query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Returns an itirator over all ngrams for n in range(ngramRange) given a list of tokens.
def range_ngrams(tokens, ngramRange=(1,2)): return chain(*(n_grams(tokens, i) for i in range(*ngramRange)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ngramize(items: List[str], ngram_range=(1, 1)) -> Generator[List[str], Any, None]:\n\n ngrams = []\n ranges = [(0, i) for i in range(ngram_range[0], ngram_range[1] + 1)]\n for r in ranges:\n ngrams += list(zip(*[items[j:] for j in range(*r)]))\n\n formatted_ngrams = [' '.join(item) for item in ngrams]\n\n yield formatted_ngrams", "def build_ngrams(tokens, n=2):\n ngrams = zip(*(islice(group, idx, None) for idx, group in enumerate(tee(tokens, n))))\n return ngrams", "def n_grams(tokens, n):\n return zip(*[tokens[i:] for i in range(n)])", "def get_ngrams(tokens, min_n, max_n):\n all_ngrams = list()\n n_tokens = len(tokens)\n for i in range(n_tokens):\n for j in range(i + min_n, min(n_tokens, i + max_n) + 1):\n all_ngrams.append(\" \".join(tokens[i:j]))\n return all_ngrams", "def create_ngrams(word_list, n):\n yield zip(*[word_list[i:] for i in range(n)])", "def create_ngrams(self, tokens):\n ngrams = []\n for i in range(len(tokens)- self.N + 1):\n ngrams.append(tuple(tokens[i:i+self.N]))\n return ngrams", "def add_ngram(sequences, token_indice, ngram_range=2):\n new_sequences = []\n for input_list in sequences:\n new_list = input_list[:]\n for ngram_value in range(2, ngram_range + 1):\n for i in range(len(new_list) - ngram_value + 1):\n ngram = tuple(new_list[i:i + ngram_value])\n if ngram in token_indice:\n new_list.append(token_indice[ngram])\n new_sequences.append(new_list)\n\n return new_sequences", "def add_ngram(sequences, token_indice, ngram_range=2):\n new_sequences = []\n for input_list in sequences:\n new_list = input_list[:]\n for i in range(len(new_list) - ngram_range + 1):\n for ngram_value in range(2, ngram_range + 1):\n ngram = tuple(new_list[i:i + ngram_value])\n if ngram in token_indice:\n new_list.append(token_indice[ngram])\n new_sequences.append(new_list)\n\n return new_sequences", "def generate_ngrams(iterable, n):\n return zip(*[itertools.islice(it, i, None) for i, it in enumerate(itertools.tee(iterable, n))])", "def add_ngram(sequences, token_indice, ngram_range=2):\n new_sequences = []\n for input_list in sequences:\n new_list = input_list[:]\n for ngram_value in range(2, ngram_range + 1):\n for i in range(len(new_list) - ngram_value + 1):\n ngram = tuple(new_list[i:i + ngram_value])\n if ngram in token_indice:\n new_list.append(token_indice[ngram])\n new_sequences.append(new_list)\n\n return new_sequences", "def get_ngrams(s, ngram_range=1):\n # tokens = s.split()\n # return filter(lambda token: len(token)>1, tokens)\n # return bigrams(s.split()) # NLTK bigrams method\n words = s.split()\n return [' '.join(words[i:i+ngram_range]) for i in range(len(words)-1)]", "def n_grams(tokens, n=1):\n shiftToken = lambda i: (el for j,el in enumerate(tokens) if j>=i)\n shiftedTokens = (shiftToken(i) for i in range(n))\n tupleNGrams = zip(*shiftedTokens)\n return tupleNGrams", "def ngrams(text, n):\n return chain(*[ngrams_(text, i) for i in range(n + 1)])", "def generate_ngram(corpus,n=2):\r\n def generate_ngram_str(text,n):\r\n text = tokenizer.tokenize(text)\r\n for i in range(0, len(text)-n+1):\r\n yield text[i:i+n]\r\n if isinstance(corpus,str):\r\n for ngram in generate_ngram_str(corpus,n):\r\n yield ngram\r\n elif isinstance(corpus, (list, types.GeneratorType)):\r\n for text in corpus:\r\n for ngram in generate_ngram_str(text,n):\r\n yield ngram", "def ngrams_(text, n):\n return zip(*[text[i:] for i in range(n)])", "def ngrams(text, n):\n grams = zip(*[text[i:] for i in range(n)])\n return [''.join(gram) for gram in grams]", "def n_gram(list, n):\n ngrams = zip(*[list[i:] for i in range(n)])\n return [\" \".join(ngram) for ngram in ngrams]", "def ngrams(iterable, n=1):\n return zip(*(iterable[i:] for i in range(n)))", "def make_ngrams(texts,n,ngram_mod):\r\n return [turnmod(doc,n,ngram_mod) for doc in texts]", "def _create_ngrams(tokens, n):\n\n ngrams = collections.Counter()\n for ngram in (tuple(tokens[i:i + n]) for i in xrange(len(tokens) - n + 1)):\n ngrams[ngram] += 1\n return ngrams", "def add_ngram(self, sequences, token_indice, ngram_range=2):\n new_sequences = []\n for input_list in sequences:\n new_list = input_list[:]\n for ngram_value in range(2, ngram_range + 1):\n for i in range(len(new_list) - ngram_value + 1):\n ngram = tuple(new_list[i:i + ngram_value])\n if ngram in token_indice:\n new_list.append(token_indice[ngram])\n new_sequences.append(new_list)\n\n return new_sequences", "def find_all_ngrams(dataset, n):\n return zip(*[dataset[i:] for i in xrange(n)])", "def get_ngrams(self,\n tokens,\n min_n=1,\n max_n=1):\n ## Check Inputs\n if min_n > max_n:\n raise ValueError(\"min_n must be less than max_n\")\n if min_n == 0:\n raise ValueError(\"min_n should be greater than 0\")\n ## Initialize N-Gram List\n all_ngrams = []\n ## Clean Input Tokens\n tt = \"text_tokenized\"\n cleaned_tokens = self._data_filter.filter_user_data([{tt:tokens}])\n if len(cleaned_tokens) > 0:\n cleaned_tokens = cleaned_tokens[0][tt]\n filtered_tokens = [i for i in tokens if i in self._data_filter.filter_set]\n ## Generate N-Gram Tuples\n for n in range(min_n, max_n+1):\n all_ngrams.extend(list(zip(*[cleaned_tokens[i:] for i in range(n)])))\n if len(filtered_tokens) > 0:\n all_ngrams.extend([(ft, ) for ft in filtered_tokens])\n return all_ngrams", "def iter_ngrams(self, sentence, n):\n return [tuple(sentence[i : i+n]) for i in range(len(sentence)-n+1)]", "def get_all_ngrams():\n grams = ()\n for i in range(0, 40):\n text_i = read_file(str(i))\n curr_grams = ngramize(text_i, n)\n grams = chain(grams, curr_grams)\n return grams", "def get_ngrams(s, ngmin=1, ngmax=1, tokenizer=list, separator=\"|\"):\n ngrams = [[] for x in range(ngmin, ngmax + 1)]\n s = tokenizer(s)\n for i, ch in enumerate(s):\n for ngsize in range(ngmin, ngmax + 1):\n if (i + ngsize) <= len(s):\n ngrams[ngsize - 1].append(separator.join(s[i:i+ngsize]))\n return ngrams", "def get_ngrams(seq, n):\n return", "def ngrams(words, n=1):\n return [tuple(words[j:j + n]) for j in range(len(words) - n + 1)]", "def _ngrams(self, string_):\n def find_ngrams(input_list, n):\n return zip(*[input_list[i:] for i in range(n)])\n\n ngrams = []\n tokens = string_.split()\n\n for size in range(1, self._ngram_range + 1):\n tuples = find_ngrams(tokens, size)\n concatenated = [\"_\".join(tuple_) for tuple_ in tuples]\n ngrams.extend(concatenated)\n\n return \" \".join(ngrams)", "def find_ngrams(input_list, n=3):\n return zip(*[input_list[i:] for i in range(n)])" ]
[ "0.77018356", "0.7479281", "0.74121964", "0.74022996", "0.7253089", "0.7199664", "0.7115451", "0.70956486", "0.708296", "0.70244306", "0.6999506", "0.6986396", "0.6947342", "0.6940619", "0.6927285", "0.6926673", "0.6888308", "0.68828356", "0.6871958", "0.682038", "0.6817774", "0.67455", "0.67187554", "0.6716223", "0.66872066", "0.66798", "0.6666912", "0.6643475", "0.6624193", "0.6617692" ]
0.8547429
0
Clear the summary dictionary.
def clear_summary(self): self._summary.clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_summaries(self):\n\n\t\tself.summaries = [{ key: 0 for key in self.keys() }, 0]\n\t\tif self.encoded_summary == 1:\n\t\t\tfor i in range(len(self.encoded_keys)):\n\t\t\t\tself.encoded_counts[i] = 0", "def clearSummary(self):\n self.summary(DiagnosticStatus.OK, '')", "def reset(self) -> None:\n self.statistics = defaultdict(int)", "def reset(self) -> None:\n self.statistics = defaultdict(float)", "def reset(self):\n self.stats = {}", "def clear_summaries(self):\n if tf.gfile.Exists(str(self.info.summary_path)):\n tf.gfile.DeleteRecursively(str(self.info.summary_path))", "def clear_summary_stats(self, field=ALL):\n stats = self.stats\n\n if stats:\n stats.pop(field, None)\n self.update({self.STATS: stats})", "def clear(self):\n #for counterName in self.counters:\n # del self.counters[counterName]\n self.counters={}\n self.title=None", "def clear(self) -> None:\n self.data = {} # defaultdict fails (T282865)\n self.size = 0", "def clear(self):\n self.counts = [{} for _ in range(len(self.counts))]", "def clear(self):\n if self.debug:\n print(\"DIMS cleared\")\n self.sp_dicts.clear()", "def clear(self) :\n self.__dict__ = {}", "def clear_stats(self):\n self._stats = None", "def clear(self):\n self._store = {}", "def clear(self):\n self.__dict__.clear()", "def reset_metric_stats(self):\n self.__stats_table = {}", "def clear(self):\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.num_examples = 0", "def reset_metric_stats(self):\n\n self.__stats_table = {}", "def clear(self):\n self._plot_data_cache = {}\n self._outstanding_requests = {}", "def clear(self):\n self._map = {}", "def clear(self):\n for key in self.__data.keys():\n del self.__data[key]", "def reset(self):\n\n self.results = {}", "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def clear(self):\r\n\r\n\t\tself.ITerm = 0.0\r\n\t\tself.DTerm = 0.0\r\n\t\tself.last_error = 0.0\r\n\r\n\t\tself.state_history = []\r\n\t\tself.error_history = []\r\n\t\tself.output_history = []\r\n\t\tself.sample_times = []\r\n\t\t\r\n\t\tself.OutputValue = 0.0", "def clear():\n global d\n for key in d.keys():\n del d[key]", "def clear(self):\n\n self.size = 0\n\n self.table = [[]] * 100\n\n self.keys_set = set()\n\n self.keys_ref = [[]] * 100", "def clear(self):\n self.counts = [0] * len(self.values)\n if HAS_NUMPY:\n self.counts = numpy.array(self.counts)", "def reset_stats() -> None:\n STATS[\"cleaned\"] = 0\n STATS[\"null\"] = 0\n STATS[\"unknown\"] = 0", "def removeAll(self):\n self.pDict.clear()", "def clear():" ]
[ "0.81930006", "0.7744242", "0.7526364", "0.7524949", "0.7348229", "0.72681165", "0.72200364", "0.71623015", "0.71330416", "0.70410275", "0.7039112", "0.6935825", "0.6934039", "0.69223726", "0.6921015", "0.68289196", "0.68142843", "0.6779447", "0.6761134", "0.6751369", "0.6727308", "0.66825765", "0.6652785", "0.66342634", "0.6608094", "0.6602574", "0.65860724", "0.6559999", "0.6550161", "0.6535711" ]
0.8200098
0
Regrid ORAS4 to MOM.
def test_oras4_to_mom(self, input_dir, output_dir): output = os.path.join(output_dir, 'mom_oras4_temp.nc') if os.path.exists(output): os.remove(output) src_name = 'ORAS4' src_data_file = os.path.join(input_dir, 'thetao_oras4_1m_2014_grid_T.nc') dest_name = 'MOM' dest_data_file = output args = [src_name, src_data_file, 'temp', dest_name, dest_data_file] my_dir = os.path.dirname(os.path.realpath(__file__)) cmd = [os.path.join(my_dir, '../', 'regrid_simple.py')] + args ret = sp.call(cmd) assert(ret == 0) # Check that outputs exist. check_output_fields('MOM', output) assert(os.path.exists(output))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_oras4_to_mom1(self, input_dir, output_dir):\n\n output = os.path.join(output_dir, 'mom1_oras4_temp.nc')\n if os.path.exists(output):\n os.remove(output)\n\n src_name = 'ORAS4'\n src_data_file = os.path.join(input_dir, 'so_oras4_1m_2014_grid_T.nc')\n dest_name = 'MOM1'\n dest_data_file = output\n\n args = [src_name, src_data_file, 'salt', dest_name, dest_data_file]\n\n my_dir = os.path.dirname(os.path.realpath(__file__))\n cmd = [os.path.join(my_dir, '../', 'regrid_simple.py')] + args\n ret = sp.call(cmd)\n assert(ret == 0)\n\n # Check that outputs exist.\n check_output_fields('MOM', output)\n assert(os.path.exists(output))", "def generate_mos(laygen, objectname_pfix, placement_grid, routing_grid_m1m2, devname_mos_boundary, devname_mos_body,\n devname_mos_dmy, m=1, m_dmy=0, origin=np.array([0,0])):\n pg = placement_grid\n rg12 = routing_grid_m1m2\n pfix = objectname_pfix\n\n # placement\n imbl0 = laygen.relplace(name=\"I\" + pfix + 'BL0', templatename=devname_mos_boundary, gridname=pg, xy=origin)\n refi=imbl0\n if not m_dmy==0:\n imdmyl0 = laygen.relplace(name=\"I\" + pfix + 'DMYL0', templatename=devname_mos_dmy, gridname=pg, refobj=refi, shape=[m_dmy, 1])\n refi=imdmyl0\n else:\n imdmyl0 = None\n im0 = laygen.relplace(name=\"I\" + pfix + '0', templatename=devname_mos_body, gridname=pg, refobj=refi, shape=[m, 1])\n refi=im0\n if not m_dmy==0:\n imdmyr0 = laygen.relplace(name=\"I\" + pfix + 'DMYR0', templatename=devname_mos_dmy, gridname=pg, refobj=refi, shape=[m_dmy, 1])\n refi=imdmyr0\n else:\n imdmyr0 = None\n imbr0 = laygen.relplace(name=\"I\" + pfix + 'BR0', templatename=devname_mos_boundary, gridname=pg, refobj=imdmyr0)\n md=im0.elements[:, 0]\n #route\n #gate\n rg0=laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=md[0].pins['G0'], refobj1=md[-1].pins['G0'])\n for _md in md:\n laygen.via(name=None, xy=[0, 0], refobj=_md.pins['G0'], gridname=rg12)\n #drain\n rdl0=laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=md[0].pins['D0'], refobj1=md[-1].pins['D0'])\n for _md in md:\n laygen.via(name=None, xy=[0, 1], refobj=_md.pins['D0'], gridname=rg12)\n #source\n rs0=laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=md[0].pins['S0'], refobj1=md[-1].pins['S1'])\n for _md in md:\n laygen.via(name=None, xy=[0, 0], refobj=_md.pins['S0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=md[-1].pins['S1'], gridname=rg12)\n #dmy\n if m_dmy>=2:\n mdmyl=imdmyl0.elements[:, 0]\n mdmyr=imdmyr0.elements[:, 0]\n laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdmyl[0].pins['D0'], refobj1=mdmyl[-1].pins['D0'])\n laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdmyr[0].pins['D0'], refobj1=mdmyr[-1].pins['D0'])\n laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdmyl[0].pins['S0'], refobj1=mdmyl[-1].pins['S1'])\n laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdmyr[0].pins['S0'], refobj1=mdmyr[-1].pins['S1'])\n for _mdmyl in mdmyl:\n laygen.via(name=None, xy=[0, 1], refobj=_mdmyl.pins['D0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=_mdmyl.pins['S0'], gridname=rg12)\n for _mdmyr in mdmyr:\n laygen.via(name=None, xy=[0, 1], refobj=_mdmyr.pins['D0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=_mdmyr.pins['S1'], gridname=rg12)\n return [imbl0, imdmyl0, im0, imdmyr0, imbr0]", "def expand(self, model_pcoll):\n return (\n model_pcoll\n | 'Transforming the NDB models into Apache Beam entities' >> (\n beam.Map(job_utils.get_beam_entity_from_ndb_model))\n | 'Writing the NDB models to the datastore' >> (\n self.datastoreio.WriteToDatastore(feconf.OPPIA_PROJECT_ID))\n )", "def configure(self):\n super(ProjectionMatrix, self).configure()\n if self.sensors is None:\n self.sensors = self.skin_air.sensors\n\n if isinstance(self.sensors, sensors_module.SensorsEEG):\n self.skin_air.sensors = self.sensors\n self.skin_air.sensors_to_surface, self.skin_air.sensor_locations = self.sensors.sensors_to_surface(self.skin_air)\n\n # Create OpenMEEG objects from TVB objects.\n self.om_head = self.create_om_head()\n self.om_sources = self.create_om_sources()\n self.om_sensors = self.create_om_sensors()\n\n # Calculate based on type of sources\n if isinstance(self.sources, surfaces_module.Cortex):\n self.om_source_matrix = self.surface_source() #NOTE: ~1 hr\n elif isinstance(self.sources, connectivity_module.Connectivity):\n self.om_source_matrix = self.dipole_source()\n\n # Calculate based on type of sensors\n if isinstance(self.sensors, sensors_module.SensorsEEG):\n self.om_head2sensor = self.head2eeg()\n elif isinstance(self.sensors, sensors_module.SensorsMEG):\n self.om_head2sensor = self.head2meg()\n if isinstance(self.sources, surfaces_module.Cortex):\n self.om_source2sensor = self.surf2meg()\n elif isinstance(self.sources, connectivity_module.Connectivity):\n self.om_source2sensor = self.dip2meg()\n\n #NOTE: ~1 hr\n self.om_inverse_head = self.inverse_head(inv_head_mat_file = \"hminv_uid\")", "def omim(context, version, build, to_json, outfile):\n version = version or 1.0\n LOG.info(\"Running scout export omim\")\n adapter = context.obj['adapter']\n \n if not to_json:\n # print the headers\n click.echo(\"##panel_id=OMIM-aut\")\n click.echo(\"##institute=cust002\")\n click.echo(\"##version={0}\".format(version))\n click.echo(\"##date={0}\".format(datetime.date.today()))\n click.echo(\"##display_name=OMIM\")\n click.echo(\"##[email protected]\")\n click.echo(\"#hgnc_id\\thgnc_symbol\")\n \n nr_omim = 0\n all_genes = adapter.all_genes(build=str(build))\n nr_genes = all_genes.count()\n \n json_genes = []\n for gene in all_genes:\n keep = False\n # A omim gene is recognized by having phenotypes\n if gene.get('phenotypes'):\n gene.pop('_id')\n for phenotype in gene['phenotypes']:\n if phenotype['status'] in ['established', 'provisional']:\n keep = True\n if keep:\n nr_omim += 1\n if to_json:\n json_genes.append(gene)\n else:\n click.echo(\"{0}\\t{1}\".format(gene['hgnc_id'], gene['hgnc_symbol']))\n \n if to_json:\n if outfile:\n json.dump(json_genes, outfile, sort_keys=True, indent=4)\n else:\n print(json.dumps(json_genes, sort_keys=True, indent=4))\n \n \n LOG.info(\"Nr of genes in total: %s\" % nr_genes)\n LOG.info(\"Nr of omim genes: %s\" % nr_omim)\n LOG.info(\"Nr of genes outside mim panel: %s\" % (nr_genes - nr_omim))", "def mirrorTransformations(self):\n\n currentSelection = cmds.ls(sl=True)\n\n # get the mirror module\n networkNode = self.returnNetworkNode\n mirrorModule = cmds.getAttr(networkNode + \".mirrorModule\")\n moduleName = cmds.getAttr(networkNode + \".moduleName\")\n parent = cmds.getAttr(networkNode + \".parentModuleBone\")\n\n # get mirror module instance and information\n mirrorInst = self.returnMirrorModuleInst\n\n # turn off aim mode\n mirrorInst.aimMode_Setup(False)\n\n # turn off coplanar mode IF it exists on the module\n try:\n state = mirrorInst.coplanarBtn.isChecked()\n if state:\n mirrorInst.coplanarBtn.setChecked(False)\n mirrorInst.coplanarMode()\n except:\n pass\n\n moverTypes = self.returnJointMovers\n for moverType in moverTypes:\n for jointMover in moverType:\n attrs = cmds.listAttr(jointMover, keyable=True)\n\n for attr in attrs:\n value = cmds.getAttr(jointMover + \".\" + attr)\n\n mirrorMover = jointMover.partition(moduleName)[2]\n mirrorMover = mirrorModule + mirrorMover\n mirrorAttrs = [\"translateX\", \"translateY\", \"translateZ\"]\n\n if attr in mirrorAttrs:\n cmds.setAttr(mirrorMover + \".\" + attr, value * -1)\n else:\n cmds.setAttr(mirrorMover + \".\" + attr, value)\n\n cmds.select(clear=True)\n if len(currentSelection) > 0:\n cmds.select(currentSelection)\n\n # turn aim mode on\n mirrorInst.aimMode_Setup(True)\n\n # extend functionality\n self.mirrorTransformations_Custom()", "def execute(self):\n self._odom_msg.header.stamp = rospy.Time.now()\n # query base state from robot and store in odom msg\n position, orientation, linear_velocity, angular_velocity = self._robot.get_base_state()\n [self._odom_msg.pose.pose.position.x,\n self._odom_msg.pose.pose.position.y,\n self._odom_msg.pose.pose.position.z] = position\n [self._odom_msg.pose.pose.orientation.x,\n self._odom_msg.pose.pose.orientation.y,\n self._odom_msg.pose.pose.orientation.z,\n self._odom_msg.pose.pose.orientation.w] = orientation\n [self._odom_msg.twist.twist.linear.x,\n self._odom_msg.twist.twist.linear.y,\n self._odom_msg.twist.twist.linear.z] = linear_velocity\n [self._odom_msg.twist.twist.angular.x,\n self._odom_msg.twist.twist.angular.y,\n self._odom_msg.twist.twist.angular.z] = angular_velocity\n self._publisher.publish(self._odom_msg)\n\n tf_msg = TransformStamped()\n tf_msg.header.frame_id = self._odom_msg.header.frame_id\n tf_msg.child_frame_id = self._odom_msg.child_frame_id\n tf_msg.transform.translation = self._odom_msg.pose.pose.position\n tf_msg.transform.rotation = self._odom_msg.pose.pose.orientation\n tf_msg.header.stamp = rospy.Time.now()\n self._br.sendTransform(tf_msg)", "def remap(self,newMasters,modMap,objMaps=[]):\n #--Masters\n self.tes3.masters = newMasters\n #--File mapping\n modMapKeys = modMap.keys()\n #--Remap iObjs\n cells_id = self.cells_id\n reObjNum = re.compile('[0-9A-Z]{8}$')\n for (iMod,objMap) in objMaps:\n cellIds = objMap.keys()\n for cellId in cellIds:\n cellObjMap = objMap[cellId]\n #--Save \n cell = cells_id.get(cellId)\n if not cell: continue\n #--Objects\n objects = cell.getObjects()\n for object in objects.list():\n #--Different mod?\n if object[0] != iMod:\n pass\n #--Cell deleted?\n elif cellObjMap == -1:\n objects.remove(object)\n #--Remapped object?\n elif object[1] in cellObjMap:\n (newIObj,objId) = cellObjMap[object[1]]\n objIdBase = reObjNum.sub('',objId) #--Strip '00001234' id num from object\n #--Mismatched object id?\n if objId != objIdBase:\n #print 'Mismatch:',object[:3]\n pass \n #--Deleted object?\n elif newIObj == -1:\n #print 'Deleted',object[:3]\n objects.remove(object)\n #--Remapped object?\n else:\n #print 'Remapped',object[:3],'to',newIObj\n newObject = self.remapObject(object,iMod,newIObj)\n objects.replace(object,newObject)\n self.updateScptRefs()\n #--Remap iMods\n if not modMapKeys: return\n for cell in self.cells:\n objects = cell.getObjects()\n for object in objects.list():\n #--Remap IMod\n iMod = object[0]\n #--No change?\n if iMod not in modMapKeys: \n pass\n #--Object deleted?\n elif modMap[iMod] == -1:\n objects.remove(object)\n #--Object not deleted?\n else:\n newObject = self.remapObject(object,modMap[iMod])\n objects.replace(object,newObject)\n self.updateScptRefs()", "def reproject(self, *args, **kwargs):\n pass", "def test_from_t4_to_topology(self):\n Molecule.from_polymer_pdb(\n get_data_file_path(\"proteins/T4-protein.pdb\")\n ).to_topology()", "def test_openmdao_good_1(self):\n updates = [\n #['MAT1', 3, 10.0], # 3 is E -> set to 10.0\n #['MAT1', 4, 10.0], # 3 is G -> set to 10.0\n ['GRID', 1, 3, 10.0], # 3 is x1 -> set to 10.0\n ['GRID', 1, 4, 20.0], # 4 is x2 -> set to 20.0\n ['CPENTA', 9, 2, 10], # 2 is property_id -> set to 10\n ['CPENTA', 9, 3, 20], # 3 is node1 -> set to 20\n ['PSOLID', 4, 1, 2], # 1 is material_id\n ['PARAM', 'WTMASS', 1, 'WTMASs'], # key\n ['PARAM', 'WTMASS', 2, 0.0025], # value1\n ['PCOMP', 1, 2, 1.],\n ['PCOMP', 1, 3, 2.],\n ['CTETRA', 8, 3, 1], # nid[0]\n ['CTETRA', 8, 4, 2], # nid[1]\n ['CTETRA', 8, 5, 3], # nid[2]\n ['CTETRA', 8, 6, 4], # nid[3]\n ]\n #GRID 1 0 0. 0. 0. 0\n #GRID 2 0 1. 0. 0. 0\n #GRID 3 0 1. 1. 0. 0\n #GRID 4 0 0. 1. 0. 0\n #CPENTA 9 4 21 22 23 24 25 26\n #PSOLID 4 1 0\n #CTETRA 8 4 11 12 13 15\n\n bdf_filename = os.path.join(mesh_utils_path, 'test_mass.dat')\n\n model = BDF(debug=False)\n model.read_bdf(bdf_filename)\n pcomp_updates = [\n ['PCOMP', 1, 15, 'YES_A', 'souts_0'],\n ['PCOMP', 1, 19, 'YES_B', 'souts_1'],\n\n ['PCOMP', 1, 25, 'YES_C', 'souts_2'],\n #['PCOMP', 1, 29, 'YES_D', 'souts_3'],\n ]\n for iupdate in updates:\n card_type, itype, ifield, value = iupdate\n card = model.update_card(card_type, itype, ifield, value)\n\n for iupdate in pcomp_updates:\n card_type, itype, ifield, value, field_name = iupdate\n card = model.update_card(card_type, itype, ifield, value)\n if '_' in field_name:\n field_name2, index = field_name.split('_')\n index = int(index)\n actual = getattr(card, field_name2)[index]\n assert actual == value, 'field_name=%r ifield=%s value=%s actual=%s\\n%s' % (\n field_name, ifield, value, actual, card.print_raw_card())\n #if card_type == 'PCOMP':\n #print(card)", "def poseReaderRig(objs,space=1,name=None, nameOverride=0):\n if not pm.pluginInfo(\"poseReader\",q=1,loaded=1):\n pm.loadPlugin(\"poseReader.so\")\n if len(objs)<=0:\n pm.error((\"poseReaderUI: You must select one or more objects to create a poseReader node for!\"),sl=0)\n poses=[]\n # Store created nodes for sel at end\n obj=''\n for obj in objs:\n Obj=pm.util.capitalize(obj)\n # new to maya 6, tho it is a script....\n if name == None:\n pose=pm.createNode(\"poseReader\",n=(\"poseReader_\" + Obj + \"Shape#\"))\n else:\n if nameOverride==0:\n pose=pm.createNode(\"poseReader\",n=(\"poseReader_\" + Obj+name+'Shape'))\n elif nameOverride==1:\n if name[-5:]=='Shape':\n pose=pm.createNode(\"poseReader\",n=name)\n else:\n pose=pm.createNode(\"poseReader\",n=name+'Shape')\n attr=\"worldMatrix\"\n if space == 2:\n attr=\"matrix\"\n\n pm.connectAttr((obj + \".\" + attr),(pose + \".worldMatrixLiveIn\"),f=1)\n xform=pm.listRelatives(pose,p=1)[0]\n pm.connectAttr((xform + \".\" + attr),(pose + \".worldMatrixPoseIn\"),f=1)\n poses.append(xform)\n # Actually store xform for sel.\n # Make a keyable attr people can actually see and use.\n pm.addAttr(pose,ln=\"weight\",k=1)\n pm.connectAttr((pose + \".outWeight\"),(pose + \".weight\"),f=1)\n # Parent to same parent that object has.\n # Very important if using local space.\n parent=pm.listRelatives(obj,p=1)[0]\n if parent != \"\":\n pm.parent(xform,parent)\n # match rotate order of obj\n rotOrder = pm.getAttr(obj+'.rotateOrder')\n xform.attr('rotateOrder').set(rotOrder)\n # Snap xform to same as obj\n pCons=pm.pointConstraint(obj,xform,w=1)\n oCons=pm.orientConstraint(obj,xform,w=1)\n pm.delete(pCons,oCons)\n # Also make up animCurve for animCurve mode\n animCurve=pm.createNode('animCurveUU')\n pm.setKeyframe(animCurve,itt=\"flat\",v=1.0,ott=\"flat\",f=0.0)\n pm.setKeyframe(animCurve,itt=\"spline\",v=0.85,ott=\"spline\",f=0.25)\n pm.setKeyframe(animCurve,itt=\"spline\",v=0.15,ott=\"spline\",f=0.75)\n pm.setKeyframe(animCurve,itt=\"flat\",v=0.0,ott=\"flat\",f=1.0)\n pm.connectAttr((animCurve + \".message\"),(pose + \".msgAnimCurve\"),f=1)\n pm.connectAttr((animCurve + \".output\"),(pose + \".animCurveOutput\"),f=1)\n\n pm.select(poses,r=1)\n # Now if we have more than one pose...connect them up to a multiTrigger node\n nPoses=len(poses)\n if nPoses>1:\n trig=pm.createNode(\"multiTrigger\")\n # Make a keyable attr people can actually see and use.\n pm.addAttr(trig,ln=\"weight\",k=1)\n pm.connectAttr((trig + \".outWeight\"),(trig + \".weight\"),f=1)\n i=0\n for i in range(0,nPoses):\n pm.connectAttr((poses[i] + \".weight\"),(trig + \".inputValues[\" + str(i) + \"]\"),f=1)\n pm.select(poses,trig,r=1)\n return pose", "def _rebuild(self):\n for shape, record in iter(self):\n self.write_row(shape, record)\n self.__isBuilt = True", "def parse_observable_rmes(self,tokenized_lines):\n\n tokenized_lines_iterator = iter(tokenized_lines) # so that we can read through sequentially\n\n observable_matrix_header = next(tokenized_lines_iterator,None)\n while (observable_matrix_header):\n\n # parse header\n conversions = (int,int,float,int,float,int,int,int)\n (observable_index,sector_index,J_bra,gex_bra,J_ket,gex_ket,rows,cols)=[\n conversion(x)\n for (x,conversion) in zip(observable_matrix_header,conversions)\n ]\n\n # determine canonicalization\n Jg_pair = ((J_bra,gex_bra),(J_ket,gex_ket))\n (Jg_pair_canonical,flipped,canonicalization_factor) = tools.canonicalize_Jg_pair(\n Jg_pair,tools.RMEConvention.kRose\n )\n\n # prepare matrix key\n observable_name = self.params[\"observable_names\"][observable_index]\n\n # read matrix\n lines = itertools.islice(tokenized_lines_iterator,rows)\n numbers = [[float(x) for x in row] for row in lines]\n matrix = np.array(numbers,dtype=float)\n\n # canonicalize matrix for storage\n if (flipped):\n matrix = (1/canonicalization_factor)*matrix.transpose()\n\n # store matrix\n observable_dict = self.observables.setdefault(observable_name,dict())\n observable_dict[Jg_pair_canonical] = matrix\n ## print(\"Key:\",key)\n ## print(self.observables[key])\n\n # attempt to read next header\n observable_matrix_header = next(tokenized_lines_iterator,None)", "def exportOrgs ( c ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n xml = \"\"\n o = sqlQuery ( c, \"select * from Organizations;\" )\n for i in o:\n oL = sqlQuery ( c, \"select * from OrganizationLocations where orgID = '\"+i[0]+\"';\" )\n oER = sqlQuery ( c, \"select * from OrganizationExternalResources where orgID = '\"+i[0]+\"';\" )\n oTC = sqlQuery ( c, \"select * from OrganizationsToCrises where orgID = '\"+i[0]+\"';\" )\n pTO = sqlQuery ( c, \"select * from PeopleToOrganizations where orgID = '\"+i[0]+\"';\" )\n xml += openTagAtt ( \"Organization\", \"organizationIdent\", i[0])\n xml += openCloseTag ( \"Name\", i[1])\n xml += closeTagAtt ( \"Kind\", \"organizationKindIdent\", i[2])\n for j in oL :\n xml += openTag ( \"Location\" )\n xml += openCloseTag ( \"Locality\", j [ 1 ] )\n xml += openCloseTag ( \"Region\", j [ 2 ] )\n xml += openCloseTag ( \"Country\", j [ 3 ] )\n xml += closeTag ( \"Location\" )\n xml += openCloseTag (\"History\", i[3])\n xml += openTag ( \"ContactInfo\" )\n xml += openCloseTag (\"Telephone\", i[4])\n xml += openCloseTag (\"Fax\", i[5])\n xml += openCloseTag (\"Email\", i[6])\n xml += openTag (\"PostalAddress\")\n xml += openCloseTag (\"StreetAddress\", i[7])\n xml += openCloseTag ( \"Locality\", i[8])\n xml += openCloseTag ( \"Region\", i[9])\n xml += openCloseTag ( \"PostalCode\", i[10])\n xml += openCloseTag ( \"Country\", i[11])\n xml += closeTag ( \"PostalAddress\" )\n xml += closeTag ( \"ContactInfo\" )\n xml += openTag (\"ExternalResources\")\n for j in oER:\n xml += openCloseTag ( j[1], j[2])\n xml += closeTag (\"ExternalResources\")\n xml += openTag (\"RelatedCrises\")\n for j in oTC:\n xml += closeTagAtt (\"RelatedCrisis\", \"crisisIdent\", j[1])\n xml += closeTag (\"RelatedCrises\")\n xml += openTag (\"RelatedPersons\")\n for j in pTO:\n xml += closeTagAtt (\"RelatedPerson\", \"personIdent\", j[0])\n xml += closeTag (\"RelatedPersons\")\n xml += closeTag (\"Organization\")\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n return xml", "def matrix_import(request, simulation, demandsegment):\n try:\n # Create a set with all existing OD pairs in the OD matrix.\n matrix = demandsegment.matrix\n pairs = Matrix.objects.filter(matrices=matrix)\n existing_pairs = set(pairs.values_list('p_id', 'q_id'))\n # Create a dictionary to map the centroid user ids with the centroid\n # objects.\n centroids = get_query('centroid', simulation)\n centroid_mapping = dict()\n centroid_id_mapping = dict()\n for centroid in centroids:\n centroid_mapping[centroid.user_id] = centroid\n centroid_id_mapping[centroid.user_id] = centroid.id\n # Convert the imported file to a csv DictReader.\n encoded_file = request.FILES['import_file']\n tsv_file = StringIO(encoded_file.read().decode())\n if encoded_file.name.split(\".\")[-1] == 'tsv':\n reader = csv.DictReader(tsv_file, delimiter='\\t')\n else:\n reader = csv.DictReader(tsv_file, delimiter=',')\n # For each imported OD pair, if the pair already exists in t\n # For each imported OD pair, if the pair already exists in the OD Matrix,\n # it is stored to be updated, else it is stored to be created.\n to_be_updated = set()\n to_be_created = list()\n for row in reader:\n pair = (\n centroid_id_mapping[int(row['origin'])],\n centroid_id_mapping[int(row['destination'])]\n )\n if pair in existing_pairs:\n to_be_updated.add((*pair, float(row['population'])))\n elif float(row['population']) > 0:\n to_be_created.append(\n Matrix(p=centroid_mapping[int(row['origin'])],\n q=centroid_mapping[int(row['destination'])],\n r=float(row['population']),\n matrices=matrix)\n )\n if to_be_updated:\n # Create a mapping between the values (p, q, r) and the ids.\n pair_values = set(pairs.values_list('id', 'p_id', 'q_id'))\n pair_mapping = dict()\n for pair in pair_values:\n pair_mapping[pair[1:]] = pair[0]\n pair_values = set(pairs.values_list('id', 'p_id', 'q_id', 'r'))\n # Find the pairs that really need to be updated (i.e. r is also\n # different).\n pair_values = set(pairs.values_list('p_id', 'q_id', 'r'))\n to_be_updated = to_be_updated.difference(pair_values)\n # Retrieve the ids of the pairs to be updated with the mapping and\n # delete them.\n to_be_updated_ids = [pair_mapping[pair[:2]] for pair in to_be_updated]\n with connection.cursor() as cursor:\n chunk_size = 20000\n chunks = [to_be_updated_ids[x:x + chunk_size]\n for x in range(0, len(to_be_updated_ids), chunk_size)]\n for chunk in chunks:\n cursor.execute(\n \"DELETE FROM Matrix \"\n \"WHERE id IN %s;\",\n [chunk]\n )\n # Create a mapping between the centroids ids and the centroid objects.\n centroid_id_mapping = dict()\n for centroid in centroids:\n centroid_id_mapping[centroid.id] = centroid\n # Now, create the updated pairs with the new values.\n to_be_created += [\n Matrix(p=centroid_id_mapping[pair[0]],\n q=centroid_id_mapping[pair[1]],\n r=pair[2],\n matrices=matrix)\n for pair in to_be_updated\n ]\n # Create the new OD pairs in bulk.\n # The chunk size is limited by the MySQL engine (timeout if it is too big).\n chunk_size = 20000\n chunks = [to_be_created[x:x + chunk_size]\n for x in range(0, len(to_be_created), chunk_size)]\n for chunk in chunks:\n Matrix.objects.bulk_create(chunk, chunk_size)\n # Update total.\n pairs = pairs.all() # Update queryset from database.\n matrix.total = int(\n demandsegment.scale * pairs.aggregate(Sum('r'))['r__sum']\n )\n matrix.save()\n simulation.has_changed = True\n simulation.save()\n return HttpResponseRedirect(reverse(\n 'metro:matrix_view', args=(simulation.id, demandsegment.id,)\n ))\n except Exception as e:\n print(e)\n context = {\n 'simulation': simulation,\n 'object': 'matrix',\n }\n return render(request, 'metro_app/import_error.html', context)", "def __sm2gsm(self, dataDict):\n\n b = (dataDict.getData('bx'),dataDict.getData('by'),dataDict.getData('bz'))\n v = (dataDict.getData('vx'),dataDict.getData('vy'),dataDict.getData('vz'))\n\n for i,time in enumerate(dataDict.getData('time_min')):\n d = self.startDate + datetime.timedelta(minutes=time)\n\n # Update magnetic field\n b_gsm = pyLTR.transform.SMtoGSM(b[0][i], b[1][i], b[2][i], d)\n \n dataDict.setData('bx', b_gsm[0], i)\n dataDict.setData('by', b_gsm[1], i)\n dataDict.setData('bz', b_gsm[2], i)\n\n # Update Velocity\n v_gsm = pyLTR.transform.SMtoGSM(v[0][i], v[1][i], v[2][i], d)\n\n dataDict.setData('vx', v_gsm[0], i)\n dataDict.setData('vy', v_gsm[1], i)\n dataDict.setData('vz', v_gsm[2], i)", "def reiniciarMatrix(self):\n self.matrixMAPA = []\n self.rellenarMatrix()", "def load_OM_outputs(file_path):\r\n # Transform the .xls database into panda type\r\n excel = pd.ExcelFile(file_path)\r\n\r\n # Collect data from a particular tab\r\n OM_outputs = excel.parse('OM', header=0, index_col=0)\r\n\r\n return OM_outputs", "def regrid_model(cube, regridcube):\n\n regridcube.coord('latitude').standard_name = 'latitude'\n regridcube.coord('longitude').standard_name = 'longitude'\n\n model_units = cube.coord('latitude').units\n regridcube.coord('latitude').units = model_units\n regridcube.coord('longitude').units = model_units\n\n new_model_cube = cube.regrid(regridcube, iris.analysis.Linear())\n\n return new_model_cube", "def __init__(self, **kwargs):\n super(ProjectionMatrix, self).__init__(**kwargs) \n LOG.debug(str(kwargs))\n\n #OpenMEEG attributes\n self.om_head = None\n self.om_sources = None\n self.om_sensors = None\n self.om_head2sensor = None\n\n self.om_inverse_head = None\n self.om_source_matrix = None\n self.om_source2sensor = None #For MEG, not used for EEG", "def MeshMachine(main):\n\n # oDesign definition\n oDesign = main['ANSYS']['oDesign']\n\n # Data for the rotor mesh\n RotorName = main['ANSYS']['Rotor&Magnets']['Name'][0]\n RotorNumMaxElem = main['ANSYS']['Mesh']['Rotor']['NumMaxElem']\n RotorMaxLength = main['ANSYS']['Mesh']['Rotor']['MaxLength']\n\n # Data for the magnets mesh\n PMNames = main['ANSYS']['Rotor&Magnets']['PMNames']\n PMNumMaxElem = main['ANSYS']['Mesh']['Magnets']['NumMaxElem']\n PMMaxLength = main['ANSYS']['Mesh']['Magnets']['MaxLength']\n\n # Data for the Stator mesh\n StatorName = main['ANSYS']['Stator']['Name']\n StatorNormalDev = main['ANSYS']['Mesh']['Stator']['NormalDev']\n StatorAspectRatio = main['ANSYS']['Mesh']['Stator']['AspectRatio']\n\n # Data for the Stator mesh\n CoilNames = main['ANSYS']['Winding']['CoilNames']\n WindingNumMaxElem = main['ANSYS']['Mesh']['Winding']['NumMaxElem']\n WindingMaxLength = main['ANSYS']['Mesh']['Winding']['MaxLength']\n\n WindingName = []\n for phase in CoilNames:\n for direction in phase:\n WindingName += direction\n\n # Creating meshes\n oModule = oDesign.GetModule(\"MeshSetup\")\n\n # Rotor meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Rotor\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", [RotorName],\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(RotorNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(RotorMaxLength)+\"mm\"\n ]\n )\n # Magnet meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Magnets\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", PMNames,\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(PMNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(PMMaxLength)+\"mm\"\n ]\n )\n # Stator meshes\n oModule.AssignTrueSurfOp(\n [\n \"NAME:Stator\",\n \"Objects:=\", [StatorName],\n \"CurvedSurfaceApproxChoice:=\", \"ManualSettings\",\n \"SurfDevChoice:=\", 0,\n \"NormalDevChoice:=\", 2,\n \"NormalDev:=\", str(StatorNormalDev) + \"deg\",\n \"AspectRatioChoice:=\", 2,\n \"AspectRatio:=\", str(StatorAspectRatio)\n ]\n )\n\n # Coil meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Coils\",\n \"RefineInside:=\"\t, True,\n \"Enabled:=\"\t\t, True,\n \"Objects:=\"\t\t, WindingName,\n \"RestrictElem:=\"\t, False,\n \"NumMaxElem:=\"\t\t, str(WindingNumMaxElem),\n \"RestrictLength:=\"\t, True,\n \"MaxLength:=\"\t\t, str(WindingMaxLength) +\"mm\"\n ]\n )\n\n return main", "def RR_convert(Path = r'U:\\Refinancing analysis\\Model',\\\n file_name = r'\\port_pro_let_kyk_regular_Datafloor.xls',\\\n ind_Path = r'U:\\Refinancing analysis\\Model',\\\n ind_file_name = r'\\port_pro_let_kyk_rank_Datafloor.xls'):\n # #Rent Roll Converter\n # ##1 Set up the environment\n # imports moved to top\n\n # ##2 Reading from excel file generated by LMS (at-property)\n df = pd.read_excel(Path + file_name, encoding = 'utf-8')\n # ##3 simple tidy up of the column names\n df = (df.rename(columns = lambda x: x.replace(' ', '_'))\n .dropna(axis=1, how='all') \n )\n # ####LMS data was generated from a database, so it is most likely type consistent\n df = (df.assign(contract_months = lambda x: list(map(months_between, x[u'契約期間 異動日'], x[u'Lease_Term_ End'])),\n base_rent = lambda d: d[u'Rent_Monthly(Yen)']/1000.0,\n rent_start = lambda d: pd.to_datetime(d[u'賃料発生日'], dayfirst = True)\n ) \n )\n # ##4 Group by tenant code, lease no., floor and units\n # ####lease no. alone wouldn't work\n \n lease_grp = df.groupby(['Tenant_Code', 'Lease_No._', u'Floor', u'Space_Category Zone/Type'])\n # ##5 Data Transformation to a more handy set - close to rent roll format\n #Format of the result\n cols = [ 'BLDG', 'FL', 'UNITS', 'ENG NAME', 'CHINESE NAME', 'GFA', 'L_START', 'L_END', 'E_RENT', 'B_RENT', 'ET_DATE', 'TA', 'Deposit', 'Ls_no']\n #this would be clearer if rolled into an function and apply then concatate\n #initialize the output\n RentRoll = pd.DataFrame(columns = cols, index = range(len(lease_grp)))\n i = 0 #index of the new dataframe\n for key, single_ls in lease_grp:\n\n bldg, fl, units, e_name, c_name, gfa, l_start, l_end, effective_rent, face_rent, et, ta, deposit, lsno = None, None, None, None, None, None, None, None, None, None, None, None, None, None\n #simple data fields just copy from the 1st row\n \n bldg = int(single_ls.iloc[0]['Building_Code'][-1])\n fl = int(single_ls.iloc[0]['Floor'][:-1])\n units = single_ls.iloc[0][u'Space_Category Zone/Type']\n gfa = round(float(single_ls.iloc[0][u'Leased_Area(m2)']),2)\n ta = single_ls.iloc[0][u'Tenant_Code']\n lsno = single_ls.iloc[0]['Lease_No._']\n deposit = round(single_ls[u'Deposit_including_non-refunding_portion(Yen)_'].max() / 1000, 2)\n \n #debugging for SDTJ\n #if ta == \"SHI01\":\n # pdb.set_trace()\n \n # split tenant name is a bit tricky, spin it off as a function #TODO more Regex magic \n e_name, c_name = split_t_name(single_ls.iloc[0][u'Tenant_Name'])\n \n #Nominal start and end of the lease, span of the accounting lease\n l_start = min(single_ls[u'契約期間 異動日'])\n l_end = max(single_ls[u'Lease_Term_ End']) + Day(1) - Second(1) # move to the end of day\n length = months_between(l_start, l_end)\n \n #copy the original durations of each sub-period\n contract_months = single_ls.contract_months.copy()\n \n #treat fitout period by reducing the first row of duration - operated on contract_months copy\n if pd.notnull(single_ls.iloc[0][u'賃料発生日']): #there are fitout period, assume it is always at the beginning \n actual_begin = pd.to_datetime(single_ls.iloc[0][u'賃料発生日'], dayfirst = True)\n fitout_end = actual_begin - pd.Timedelta('1 Day')\n fitout_months = months_between(single_ls.iloc[0][u'契約期間 異動日'], fitout_end)\n contract_months.iloc[0] -= fitout_months\n \n #regular and iregular rentfree treatment is mutually exclusive in at property system\n #counter-intuitively, the \"iregular\" rent-free was easy to deal with, effective rent is just the weighted avg\n if pd.isnull(single_ls.iloc[0][u'フリーレント開始月']): # if the rent free was not recorded as begin and end months \"regular\" rent free\n effective_rent = sum(single_ls.base_rent * contract_months) / length / gfa\n \n #for regular rent-free, workout how many months needs to be reduced from actual rent paying months\n else: # if the rent free was recorded as begin and end months \"regular\" rent free\n regular_rf_mths = int(single_ls.iloc[0][u'フリーレント終了月']) - int(single_ls.iloc[0][u'フリーレント開始月']) + 1\n regular_rf_mths = regular_rf_mths * round(length / 12, 0) # here is the problem see if round 0 help!\n effective_rent = sum(single_ls.base_rent * (contract_months - regular_rf_mths) ) / length / gfa\n \n #face rent has the added complication of denoting rentfree with 0 base rent, so need to reduce the denominator\n face_rent = single_ls.base_rent.astype(float).max() / gfa\n \n #Markdown the cancellation date for the monthly rent roll reconciliation\n if pd.notnull(single_ls.iloc[-1]['Cancellation_Date']):\n et = pd.to_datetime(single_ls.iloc[-1]['Cancellation_Date']) + Day(1) - Second(1)\n else:\n et = pd.NaT\n\n #push the row into the new dataframe\n RentRoll.iloc[i] = [bldg, fl, units, e_name, c_name, gfa, l_start, l_end, effective_rent, face_rent, et, ta, deposit, lsno]\n i += 1\n \n RentRoll.L_START = pd.to_datetime(RentRoll.L_START, errors = 'coerce')\n RentRoll.L_END = pd.to_datetime(RentRoll.L_END, errors = 'coerce')\n RentRoll.ET_DATE = pd.to_datetime(RentRoll.ET_DATE, errors = 'coerce')\n RentRoll.GFA = pd.to_numeric(RentRoll.GFA, errors = 'coerce')\n RentRoll.E_RENT = pd.to_numeric(RentRoll.E_RENT, errors = 'coerce')\n RentRoll.B_RENT = pd.to_numeric(RentRoll.B_RENT, errors = 'coerce')\n RentRoll['Real_END'] = np.where(pd.notnull(RentRoll.ET_DATE), RentRoll.ET_DATE, RentRoll.L_END)\n \n \n df_rank = pd.read_excel(ind_Path + ind_file_name, encoding = 'utf-8')\n\n group_ta = df_rank.groupby(by = 'TenantCode')\n\n industries = group_ta[u'Type of Industry'].unique()\n d = pd.Series({i:industries[i][0] for i in industries.index})\n d = pd.DataFrame(d, columns= ['Industry',]).reset_index().rename(columns = {'index': 'TA'})\n RentRoll = pd.merge(RentRoll, d, on = 'TA')\n #RentRoll['CHINESE NAME'] = RentRoll['CHINESE NAME'].str.decode('utf8')\n \n \n (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(Path+file_name)\n \n RentRoll.to_excel('RentRoll_%s.xlsx' % time.strftime('%Y-%m-%d', time.localtime(mtime)), encoding = 'utf-8')\n return RentRoll", "def runOpenMM(parm, inpcrdFile, system, rad, K, Indices, solvate, out_dcd, out_csv, out_rst ):\n\n \n def newIntegrator():\n integrator = mm.LangevinIntegrator(\n 300.0 * u.kelvin,\n 10.0 / u.picosecond,\n 2.0 * u.femtosecond)\n return integrator\n\n def pmdStructureToOEMol(parm, resname):\n\n from oeommtools.utils import openmmTop_to_oemol\n mask = \"!(:%s)\" %resname\n structure_LIG = parmed.load_file( '../2gmx_wat.prmtop', xyz = '../equilibration/rst/step8.rst.125000' )\n structure_LIG.strip(mask)\n pos = structure_LIG.positions\n top = structure_LIG.topology\n molecule = openmmTop_to_oemol(top, pos, verbose=False)\n OEPerceiveBondOrders(molecule)\n OEAssignAromaticFlags(molecule)\n OEFindRingAtomsAndBonds(molecule)\n\n return molecule\n \n def getAtomIndices( structure, resname ):\n \"\"\"\n Get atom indices of a ligand from ParmEd Structure.\n Arguments\n ---------\n resname : str\n String specifying the resiue name of the ligand.\n structure: parmed.Structure\n ParmEd Structure object of the atoms to be moved.\n Returns\n -------\n atom_indices : list of ints\n list of atoms in the coordinate file matching lig_resname\n \"\"\"\n atom_indices_ligand = []\n topology = structure.topology\n for atom in topology.atoms():\n if str(resname) in atom.residue.name:\n atom_indices_ligand.append(atom.index)\n\n return atom_indices_ligand\n\n\n \"\"\"\n Rotate the torsion to an angle rad using openeye toolkits\n \"\"\" \n molecule = pmdStructureToOEMol( parm, \"LIG\" )\n atom_indices_ligand = getAtomIndices( parm, \"LIG\" )\n\n\n dihedral_atoms = [\"C10\", \"C9\", \"C3\", \"C2\" ]\n atom1 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[0]))\n atom2 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[1]))\n atom3 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[2]))\n atom4 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[3]))\n if OESetTorsion(molecule, atom1, atom2, atom3, atom4, rad ) == False :\n print(\"Torsional bond couldn't be rotated. Please enter correct atoms!\"); \n exit()\n\n # Update ligand positions in nc_sim\n updated_pos = molecule.GetCoords()\n\n for index, atomidx in enumerate(atom_indices_ligand): \n parm.positions[atomidx] = np.array(updated_pos[index])*u.nanometers\n\n \"\"\"\n harmonically restrain dihedral angle\n see units, http://docs.openmm.org/6.3.0/userguide/theory.html\n \"\"\"\n pi = np.pi\n harmonic = mm.CustomTorsionForce(\"k*min(dtheta, 2*pi-dtheta)^2; dtheta = abs(theta-theta0); pi = %.5f\" % pi);\n harmonic.addPerTorsionParameter(\"theta0\");\n harmonic.addPerTorsionParameter(\"k\");\n system.addForce(harmonic)\n harmonic.addTorsion(Indices[0], Indices[1], Indices[2], Indices[3], (rad, K))\n\n # Restraint non-moving part of the ligand\n restraintWt = 200 #kcal/mol/A2\n # define the custom force to restrain atoms to their starting positions\n force_restr = mm.CustomExternalForce('k_restr*periodicdistance(x, y, z, x0, y0, z0)^2')\n # Add the restraint weight as a global parameter in kcal/mol/A^2\n force_restr.addGlobalParameter(\"k_restr\", restraintWt*u.kilocalories_per_mole/u.angstroms**2)\n # Define the target xyz coords for the restraint as per-atom (per-particle) parameters\n force_restr.addPerParticleParameter(\"x0\")\n force_restr.addPerParticleParameter(\"y0\")\n force_restr.addPerParticleParameter(\"z0\")\n alch_list = ['C9', 'H92', 'H93', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H4', 'H5', 'H6']\n for idx, atom_crd in enumerate( parm.positions ):\n name=parm.atoms[idx].name;\n resname=parm.atoms[idx].residue.name;\n if resname == \"LIG\":\n if not name in alch_list:\n xyz = parm.positions[idx].in_units_of(u.nanometers)/u.nanometers\n force_restr.addParticle(idx, xyz)\n system.addForce( force_restr )\n\n # build simulaion\n platform = mm.Platform.getPlatformByName('CUDA')\n integ1 = newIntegrator()\n simulation = app.Simulation(parm.topology, system, integ1)\n simulation.context.setPositions( parm.positions )\n\n # Set Box dimensions\n inpcrd = app.AmberInpcrdFile( inpcrdFile );\n if inpcrd.boxVectors is not None:\n simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)\n\n print('RESTARTING simulation from a previous State..........%s' %inpcrdFile)\n velocities = parm.velocities \n simulation.context.setVelocities( inpcrd.velocities ) \n\n # perform minimization\n print('Minimizing...')\n simulation.minimizeEnergy( tolerance = 0.5 * kilojoule/mole )\n \n # adding simulation reporters\n simulation.context.setVelocitiesToTemperature(300*u.kelvin)\n simulation.reporters.append(app.DCDReporter(out_dcd, 1000))\n simulation.reporters.append(app.StateDataReporter(csv_file, 1000, step=True, potentialEnergy=True, totalEnergy=True, volume=True,temperature=True, separator='\\t'))\n restrt = RestartReporter( out_rst, 10000000, parm.ptr('natom') );\n state = simulation.context.getState(getPositions=True, getEnergy=True, getVelocities=True, enforcePeriodicBox=True)\n restrt.report(simulation, state)\n\n\n print('Production run at NVT...')\n simulation.step(5000000) # 10 ns\n \n # saving last restart\n state = simulation.context.getState(getPositions=True, getEnergy=True, getVelocities=True, enforcePeriodicBox=True)\n restrt.report(simulation, state)\n return", "def _transform_to_odom(self, detection, timeout=3.0):\n self.swarmie.xform.waitForTransform(\n self.rovername + '/odom',\n detection.pose.header.frame_id,\n detection.pose.header.stamp,\n rospy.Duration(timeout)\n )\n\n return self.swarmie.xform.transformPose(self.rovername + '/odom',\n detection.pose)", "def tomatrix(self, ai_patch):\n V = self.space\n# print \"------------\"\n# print \"geo.npatchs : \", V.geometry.npatchs\n# print \"patch id : \", ai_patch\n# print \"dim : \", V.dim\n# print \"shape : \", V.geometry[ai_patch].shape\n if V.dim == 1 :\n [li_n_1] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_1d ( self.id, ai_patch \\\n , li_n_1 )\n if V.dim == 2 :\n [li_n_1, li_n_2] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_2d ( self.id, ai_patch \\\n , li_n_1, li_n_2 )\n if V.dim == 3 :\n [li_n_1, li_n_2, li_n_3] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_3d ( self.id \\\n , ai_patch, li_n_1, li_n_2, li_n_3 )", "def generate_obs_grid(start_date, end_date, storm_report_path, model_grid_path, proj_str):\n\n grid = xr.open_dataset(model_grid_path)\n for coord in ['lon', 'lat']:\n grid[coord].values = grid[coord].astype('float32')\n valid_dates = pd.date_range(start_date, end_date, freq='1h')\n\n obs_list = []\n\n for report_type in ['filtered_torn', 'filtered_wind', 'filtered_hail']:\n\n ds_list = []\n\n obs = combine_storm_reports(valid_dates.min(), valid_dates.max(), storm_report_path, report_type)\n\n for valid_date in valid_dates:\n\n ds = grid.expand_dims('time').assign_coords(valid_time=('time', [valid_date]))\n ds[report_type.split('_')[-1]] = ds['lat'] * 0\n\n obs_sub = obs[obs['Actual_Date'] == valid_date]\n obs_indx = find_coord_indices(ds['lon'].values, ds['lat'].values, obs_sub['Lon'], obs_sub['Lat'], proj_str)\n for i in obs_indx:\n if i is not None:\n ds[report_type.split('_')[-1]][i[0], i[1]] += 1\n else:\n continue\n ds_list.append(ds)\n\n obs_list.append(xr.concat(ds_list, dim='time'))\n\n return xr.merge(obs_list)", "def Projection(W, TYPE_PROJ = proj_l11ball, ETA = 100, AXIS = 0, ETA_STAR = 100, device = \"cpu\" ): \n \n #global TYPE_PROJ, ETA, ETA_STAR, AXIS, device \n if TYPE_PROJ == 'No_proj':\n W_new = W\n if (TYPE_PROJ == proj_l1ball or TYPE_PROJ == proj_l11ball or TYPE_PROJ == proj_l11ball_line ):\n W_new = TYPE_PROJ(W, ETA, device)\n if TYPE_PROJ == proj_l21ball or TYPE_PROJ == proj_l12ball:\n W_new = TYPE_PROJ(W, ETA, AXIS, device = device)\n if TYPE_PROJ == proj_nuclear:\n W_new = TYPE_PROJ(W, ETA_STAR, device=device)\n return W_new", "def EERToARM(entities):\n\n entities = np.array(readEER(entities)) # read in array of entities from JSON file\n relations, log = EERtoARM.transform(entities)\n JSONRelations = writeARM(list(relations))\n return JSONRelations, log", "def fix(self):\n gAsset = cmds.ls(type='gAsset')\n\n trans = cmds.listRelatives(gAsset[0], p=True)\n meshes = cmds.listRelatives(trans, ad=True, type='mesh')\n for mesh in meshes:\n if mesh:\n try:\n cmds.addAttr(mesh, ln=\"grid_renderGeo\", at='double', dv=1)\n cmds.setAttr(\n '{0}.grid_renderGeo'.format(mesh), e=False, keyable=False, lock=True)\n except:\n pass\n\n self.run()" ]
[ "0.6696749", "0.5472097", "0.5131155", "0.5071717", "0.5061862", "0.5038171", "0.49968898", "0.496674", "0.4937408", "0.4932757", "0.49031186", "0.4871228", "0.4843777", "0.48415455", "0.48366535", "0.4836101", "0.48330456", "0.48307824", "0.48289937", "0.4805085", "0.4799185", "0.47989914", "0.4786731", "0.47794548", "0.4770354", "0.47649178", "0.47547337", "0.4748466", "0.47304687", "0.47235486" ]
0.6836795
0
Regrid ORAS4 to MOM 1 degree.
def test_oras4_to_mom1(self, input_dir, output_dir): output = os.path.join(output_dir, 'mom1_oras4_temp.nc') if os.path.exists(output): os.remove(output) src_name = 'ORAS4' src_data_file = os.path.join(input_dir, 'so_oras4_1m_2014_grid_T.nc') dest_name = 'MOM1' dest_data_file = output args = [src_name, src_data_file, 'salt', dest_name, dest_data_file] my_dir = os.path.dirname(os.path.realpath(__file__)) cmd = [os.path.join(my_dir, '../', 'regrid_simple.py')] + args ret = sp.call(cmd) assert(ret == 0) # Check that outputs exist. check_output_fields('MOM', output) assert(os.path.exists(output))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_oras4_to_mom(self, input_dir, output_dir):\n\n output = os.path.join(output_dir, 'mom_oras4_temp.nc')\n if os.path.exists(output):\n os.remove(output)\n\n src_name = 'ORAS4'\n src_data_file = os.path.join(input_dir, 'thetao_oras4_1m_2014_grid_T.nc')\n dest_name = 'MOM'\n dest_data_file = output\n\n args = [src_name, src_data_file, 'temp', dest_name, dest_data_file]\n\n my_dir = os.path.dirname(os.path.realpath(__file__))\n cmd = [os.path.join(my_dir, '../', 'regrid_simple.py')] + args\n ret = sp.call(cmd)\n assert(ret == 0)\n\n # Check that outputs exist.\n check_output_fields('MOM', output)\n assert(os.path.exists(output))", "def Magnus4(self,direction='x'):\n self.reset()\n self.mol.orthoDen()\n self.mol.orthoFock()\n h = -1j*self.stepsize\n for idx,time in enumerate((self.time)):\n if direction.lower() == 'x':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[0]))\n elif direction.lower() == 'y':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[1]))\n elif direction.lower() == 'z':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[2]))\n # record pulse envelope for later plotting, etc.\n self.shape.append(self.pulse(time))\n curDen = np.copy(self.mol.PO)\n \n self.addField(time + 0.0*self.stepsize,direction=direction)\n k1 = h*self.mol.FO \n Q1 = k1\n U = expm(0.5*Q1)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n self.addField(time + 0.5*self.stepsize,direction=direction)\n k2 = h*self.mol.FO\n Q2 = k2 - k1\n U = expm(0.5*Q1 + 0.25*Q2)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n\n self.addField(time + 0.5*self.stepsize,direction=direction)\n k3 = h*self.mol.FO\n Q3 = k3 - k2\n U = expm(Q1 + Q2)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n\n self.addField(time + 1.0*self.stepsize,direction=direction)\n k4 = h*self.mol.FO\n Q4 = k4 - 2*k2 + k1\n L = 0.5*Q1 + 0.25*Q2 + (1/3.)*Q3 - (1/24.)*Q4\n L += -(1/48.)*self.mol.comm(Q1,Q2)\n U = expm(L)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n self.addField(time + 0.5*self.stepsize,direction=direction)\n k5 = h*self.mol.FO\n Q5 = k5 - k2 \n L = Q1 + Q2 + (2/3.)*Q3 + (1/6.)*Q4 - (1/6.)*self.mol.comm(Q1,Q2)\n U = expm(L)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n self.addField(time + 1.0*self.stepsize,direction=direction)\n k6 = h*self.mol.FO\n Q6 = k6 -2*k2 + k1\n L = Q1 + Q2 + (2/3.)*Q5 + (1/6.)*Q6\n L += -(1/6.)*self.mol.comm(Q1, (Q2 - Q3 + Q5 + 0.5*Q6))\n\n U = expm(L)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n # density and Fock are done updating, wrap things up\n self.mol.unOrthoFock() \n self.mol.unOrthoDen() \n self.mol.computeEnergy()\n self.Energy.append(np.real(self.mol.energy))", "def _r90(self,m):\n\n return np.rot90(m,1)", "def translatetoCOM(ccdata):\n natoms = ccdata.natom\n com = np.zeros(3, dtype=np.float)\n totalmass = 0\n for i in range(natoms):\n print(ccdata.atomcoords[0][i])\n print(ccdata.atommasses[i])\n com += ccdata.atommasses[i] * ccdata.atomcoords[0][i]\n totalmass += ccdata.atommasses[i]\n\n com /= totalmass\n\n for i in range(natoms):\n ccdata.atomcoords[0][i] -= com", "def getMatrix(self) -> CMatrix4:\n ...", "def Magnus2(self,direction='x'):\n self.reset()\n self.mol.orthoDen()\n self.mol.orthoFock()\n h = -1j*self.stepsize\n for idx,time in enumerate((self.time)):\n if direction.lower() == 'x':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[0]))\n elif direction.lower() == 'y':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[1]))\n elif direction.lower() == 'z':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[2]))\n\n # record pulse envelope for later plotting, etc.\n self.shape.append(self.pulse(time))\n curDen = np.copy(self.mol.PO)\n \n self.addField(time + 0.0*self.stepsize,direction=direction)\n k1 = h*self.mol.FO \n U = expm(k1)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n self.addField(time + 1.0*self.stepsize,direction=direction)\n L = 0.5*(k1 + h*self.mol.FO)\n U = expm(L)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n # density and Fock are done updating, wrap things up\n self.mol.unOrthoFock() \n self.mol.unOrthoDen() \n self.mol.computeEnergy()\n self.Energy.append(np.real(self.mol.energy))", "def milano4ord(grid,name,bus_name,data_dict):\n\n sin = sym.sin\n cos = sym.cos \n\n # inputs\n V = sym.Symbol(f\"V_{bus_name}\", real=True)\n theta = sym.Symbol(f\"theta_{bus_name}\", real=True)\n p_m = sym.Symbol(f\"p_m_{name}\", real=True)\n v_f = sym.Symbol(f\"v_f_{name}\", real=True) \n omega_coi = sym.Symbol(\"omega_coi\", real=True) \n \n # dynamic states\n delta = sym.Symbol(f\"delta_{name}\", real=True)\n omega = sym.Symbol(f\"omega_{name}\", real=True)\n e1q = sym.Symbol(f\"e1q_{name}\", real=True)\n e1d = sym.Symbol(f\"e1d_{name}\", real=True)\n\n # algebraic states\n i_d = sym.Symbol(f\"i_d_{name}\", real=True)\n i_q = sym.Symbol(f\"i_q_{name}\", real=True) \n p_g = sym.Symbol(f\"p_g_{name}\", real=True)\n q_g = sym.Symbol(f\"q_g_{name}\", real=True)\n\n # parameters\n S_n = sym.Symbol(f\"S_n_{name}\", real=True)\n Omega_b = sym.Symbol(f\"Omega_b_{name}\", real=True) \n H = sym.Symbol(f\"H_{name}\", real=True)\n T1d0 = sym.Symbol(f\"T1d0_{name}\", real=True)\n T1q0 = sym.Symbol(f\"T1q0_{name}\", real=True)\n X_d = sym.Symbol(f\"X_d_{name}\", real=True)\n X_q = sym.Symbol(f\"X_q_{name}\", real=True)\n X1d = sym.Symbol(f\"X1d_{name}\", real=True)\n X1q = sym.Symbol(f\"X1q_{name}\", real=True)\n D = sym.Symbol(f\"D_{name}\", real=True)\n R_a = sym.Symbol(f\"R_a_{name}\", real=True)\n K_delta = sym.Symbol(f\"K_delta_{name}\", real=True)\n params_list = ['S_n','Omega_b','H','T1d0','T1q0','X_d','X_q','X1d','X1q','D','R_a','K_delta','K_sec']\n \n # auxiliar\n v_d = V*sin(delta - theta) \n v_q = V*cos(delta - theta) \n p_e = i_d*(v_d + R_a*i_d) + i_q*(v_q + R_a*i_q) \n omega_s = omega_coi\n \n # dynamic equations \n ddelta = Omega_b*(omega - omega_s) - K_delta*delta\n domega = 1/(2*H)*(p_m - p_e - D*(omega - omega_s))\n de1q = 1/T1d0*(-e1q - (X_d - X1d)*i_d + v_f)\n de1d = 1/T1q0*(-e1d + (X_q - X1q)*i_q)\n\n # algebraic equations \n g_i_d = v_q + R_a*i_q + X1d*i_d - e1q\n g_i_q = v_d + R_a*i_d - X1q*i_q - e1d\n g_p_g = i_d*v_d + i_q*v_q - p_g \n g_q_g = i_d*v_q - i_q*v_d - q_g \n \n # dae \n f_syn = [ddelta,domega,de1q,de1d]\n x_syn = [ delta, omega, e1q, e1d]\n g_syn = [g_i_d,g_i_q,g_p_g,g_q_g]\n y_syn = [ i_d, i_q, p_g, q_g]\n \n grid.H_total += H\n grid.omega_coi_numerator += omega*H*S_n\n grid.omega_coi_denominator += H*S_n\n\n grid.dae['f'] += f_syn\n grid.dae['x'] += x_syn\n grid.dae['g'] += g_syn\n grid.dae['y_ini'] += y_syn \n grid.dae['y_run'] += y_syn \n \n if 'v_f' in data_dict:\n grid.dae['u_ini_dict'].update({f'{v_f}':{data_dict['v_f']}})\n grid.dae['u_run_dict'].update({f'{v_f}':{data_dict['v_f']}})\n else:\n grid.dae['u_ini_dict'].update({f'{v_f}':1.0})\n grid.dae['u_run_dict'].update({f'{v_f}':1.0})\n\n if 'p_m' in data_dict:\n grid.dae['u_ini_dict'].update({f'{p_m}':{data_dict['p_m']}})\n grid.dae['u_run_dict'].update({f'{p_m}':{data_dict['p_m']}})\n else:\n grid.dae['u_ini_dict'].update({f'{p_m}':1.0})\n grid.dae['u_run_dict'].update({f'{p_m}':1.0})\n\n grid.dae['xy_0_dict'].update({str(omega):1.0})\n grid.dae['xy_0_dict'].update({str(e1q):1.0})\n grid.dae['xy_0_dict'].update({str(i_q):0.5})\n\n \n # outputs\n grid.dae['h_dict'].update({f\"p_e_{name}\":p_e})\n grid.dae['h_dict'].update({f\"v_f_{name}\":v_f})\n grid.dae['h_dict'].update({f\"p_m_{name}\":p_m})\n \n for item in params_list: \n grid.dae['params_dict'].update({f\"{item}_{name}\":data_dict[item]})\n \n # if 'avr' in syn_data:\n # add_avr(grid.dae,syn_data)\n # grid.dae['u_ini_dict'].pop(str(v_f))\n # grid.dae['u_run_dict'].pop(str(v_f))\n # grid.dae['xy_0_dict'].update({str(v_f):1.5})\n\n # if 'gov' in syn_data:\n # add_gov(grid.dae,syn_data) \n # grid.dae['u_ini_dict'].pop(str(p_m))\n # grid.dae['u_run_dict'].pop(str(p_m))\n # grid.dae['xy_0_dict'].update({str(p_m):0.5})\n\n # if 'pss' in syn_data:\n # add_pss(grid.dae,syn_data) \n\n p_W = p_g * S_n\n q_var = q_g * S_n\n\n return p_W,q_var", "def MS_to_galactic():\n return matrix_transpose(MS_MATRIX)", "def mol2psi4(mol: Chem.Mol, conformer_id: int = 0) -> psi4.core.Molecule:\n\n assert type(mol) == Chem.Mol\n atoms = mol.GetAtoms()\n string = \"\\n\"\n for _, atom in enumerate(atoms):\n pos = mol.GetConformer(conformer_id).GetAtomPosition(atom.GetIdx())\n string += \"{} {} {} {}\\n\".format(atom.GetSymbol(), pos.x, pos.y, pos.z)\n string += \"units angstrom\\n\"\n return psi4.geometry(string)", "def rotate_orbit(self):\n try:\n ang = self.orbit_speed * self.time_scale / self.refresh_rate\n self.obj.rotate(angle=ang, axis=vector(0, 1, 0), origin=self.star.obj.pos)\n self.sum_ang += ang\n except ZeroDivisionError:\n print(\"ERROR: REFRESH_RATE is 0\")\n except (AttributeError, TypeError):\n print(\"ERROR: wrong arguments type while initializing!!\")", "def reorient_obj(obj, step_ang, plane):\n start_angle = 0\n end_angle = math.pi / 2\n min_area = math.inf\n best_angle = 0\n start_axis = array.array(\"d\", obj.Centroid)\n end_axis = []\n index = [0] * 3\n\n if plane == \"xy\":\n index = [1, 1, 0]\n end_axis = array.array(\"d\", [obj.Centroid[0], obj.Centroid[1], obj.Centroid[2] + 1])\n elif plane == \"xz\":\n index = [1, 0, 1]\n end_axis = array.array(\"d\", [obj.Centroid[0], obj.Centroid[1] + 1, obj.Centroid[2]])\n elif plane == \"yz\":\n index = [0, 1, 1]\n end_axis = array.array(\"d\", [obj.Centroid[0] + 1, obj.Centroid[1], obj.Centroid[2]])\n\n min_pt, max_pt = obj.GetBoundingBox()\n # projecting the points to the plane\n project_points_to_plane(min_pt, max_pt, index)\n while start_angle <= end_angle:\n obj.Rotate3D(start_axis, end_axis, step_ang)\n # compute the area\n dims = [(max_pt[0] - min_pt[0]), (max_pt[1] - min_pt[1]), (max_pt[2] - min_pt[2])]\n curr_area = 1\n for dim in dims:\n if dim > 0:\n curr_area *= dim\n if curr_area < min_area:\n min_area = curr_area\n best_angle = start_angle\n start_angle += step_ang\n min_pt, max_pt = obj.GetBoundingBox()\n # projecting the points to the plane\n project_points_to_plane(min_pt, max_pt, index)\n # rotate the object using the best angle\n obj.Rotate3D(start_axis, end_axis, best_angle)", "def arm_mirror():\n MOVEMENTS.disable_all_joints()\n while True:\n for i in range(3):\n angle = MOVEMENTS.get_raw_angle(i*2)\n MOVEMENTS.set_raw_angle(i*2 +1, angle)\n sleep(0.01)", "def normalise(self):\n for at in self.atoms:\n if at.x < 0. :\n at.x = self.coordx + at.x\n if at.y < 0. :\n at.y = self.coordy + at.y\n if at.z < 0. :\n at.z = self.coordz + at.z", "def set_matrix(self):\n theta1 = -90\n theta2 = 105\n theta3 = 180\n\n if self.number > 8:\n theta2 = 75\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glPushMatrix()\n glRotatef(theta2, 0.0, 1.0, 0.0)\n glRotatef(theta1, 1.0, 0.0, 0.0)\n glRotatef(theta3, 0.0, 0.0, 1.0)\n matrix = glGetDoublev(GL_MODELVIEW_MATRIX)\n glPopMatrix()\n glPopMatrix()\n return matrix", "def ortho(self):\r\n\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n\r\n x = _vec3(m11, m21, m31)\r\n y = _vec3(m12, m22, m32)\r\n z = _vec3(m13, m23, m33)\r\n\r\n xl = x.length()\r\n xl*=xl\r\n y = y - ((x*y)/xl)*x\r\n z = z - ((x*z)/xl)*x\r\n\r\n yl = y.length()\r\n yl*=yl\r\n z = z - ((y*z)/yl)*y\r\n\r\n return mat4( x.x, y.x, z.x, m14,\r\n x.y, y.y, z.y, m24,\r\n x.z, y.z, z.z, m34,\r\n m41, m42, m43, m44)", "def rotations4(polycube, axis):\r\n for i in range(4):\r\n yield rot90(polycube, i, axis)", "def renorm(self):\n self.U /= (np.sum(np.abs(self.U)**2)*self.dx)**0.5", "def rotate90(self):", "def deg2rad_inplace(a):", "def get_m1(self):\n\n pass", "def postTransform(self, R:Rotation) -> None:\n for i,m in enumerate(self.milestones):\n assert len(m) == 18\n mq = m[:9]\n mv = m[9:]\n self.milestones[i] = so3.mul(mq,R) + so3.mul(mv,R)", "def galactic_to_MS():\n return MS_MATRIX", "def __rotate_model(self):\n self.__model_matrix = self.__get_rotation_matrix(\n self.__face.position_cartesian,\n (1 + self.__face.position[2]) * 0.5)", "def MiyamotoNagaiAccel(self, M, rd, r):\n R = np.sqrt(r[0]**2 + r[1]**2) #Finding magnitude of x and y compnets\n zd = rd/5. #Calculating \"zd\"\n B = rd + np.sqrt(r[2]**2 + zd**2) #Calclating \"B\"\n zstuff = 1/np.sqrt(r[2]**2 + zd**2) #Calculating stuff that only appears in z componet\n MNa = -self.G*M/(R**2+B**2)**1.5 * r * np.array([1,1,zstuff]) #Putting it all together\n\n return MNa", "def runOpenMM(parm, inpcrdFile, system, rad, K, Indices, solvate, out_dcd, out_csv, out_rst ):\n\n \n def newIntegrator():\n integrator = mm.LangevinIntegrator(\n 300.0 * u.kelvin,\n 10.0 / u.picosecond,\n 2.0 * u.femtosecond)\n return integrator\n\n def pmdStructureToOEMol(parm, resname):\n\n from oeommtools.utils import openmmTop_to_oemol\n mask = \"!(:%s)\" %resname\n structure_LIG = parmed.load_file( '../2gmx_wat.prmtop', xyz = '../equilibration/rst/step8.rst.125000' )\n structure_LIG.strip(mask)\n pos = structure_LIG.positions\n top = structure_LIG.topology\n molecule = openmmTop_to_oemol(top, pos, verbose=False)\n OEPerceiveBondOrders(molecule)\n OEAssignAromaticFlags(molecule)\n OEFindRingAtomsAndBonds(molecule)\n\n return molecule\n \n def getAtomIndices( structure, resname ):\n \"\"\"\n Get atom indices of a ligand from ParmEd Structure.\n Arguments\n ---------\n resname : str\n String specifying the resiue name of the ligand.\n structure: parmed.Structure\n ParmEd Structure object of the atoms to be moved.\n Returns\n -------\n atom_indices : list of ints\n list of atoms in the coordinate file matching lig_resname\n \"\"\"\n atom_indices_ligand = []\n topology = structure.topology\n for atom in topology.atoms():\n if str(resname) in atom.residue.name:\n atom_indices_ligand.append(atom.index)\n\n return atom_indices_ligand\n\n\n \"\"\"\n Rotate the torsion to an angle rad using openeye toolkits\n \"\"\" \n molecule = pmdStructureToOEMol( parm, \"LIG\" )\n atom_indices_ligand = getAtomIndices( parm, \"LIG\" )\n\n\n dihedral_atoms = [\"C10\", \"C9\", \"C3\", \"C2\" ]\n atom1 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[0]))\n atom2 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[1]))\n atom3 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[2]))\n atom4 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[3]))\n if OESetTorsion(molecule, atom1, atom2, atom3, atom4, rad ) == False :\n print(\"Torsional bond couldn't be rotated. Please enter correct atoms!\"); \n exit()\n\n # Update ligand positions in nc_sim\n updated_pos = molecule.GetCoords()\n\n for index, atomidx in enumerate(atom_indices_ligand): \n parm.positions[atomidx] = np.array(updated_pos[index])*u.nanometers\n\n \"\"\"\n harmonically restrain dihedral angle\n see units, http://docs.openmm.org/6.3.0/userguide/theory.html\n \"\"\"\n pi = np.pi\n harmonic = mm.CustomTorsionForce(\"k*min(dtheta, 2*pi-dtheta)^2; dtheta = abs(theta-theta0); pi = %.5f\" % pi);\n harmonic.addPerTorsionParameter(\"theta0\");\n harmonic.addPerTorsionParameter(\"k\");\n system.addForce(harmonic)\n harmonic.addTorsion(Indices[0], Indices[1], Indices[2], Indices[3], (rad, K))\n\n # Restraint non-moving part of the ligand\n restraintWt = 200 #kcal/mol/A2\n # define the custom force to restrain atoms to their starting positions\n force_restr = mm.CustomExternalForce('k_restr*periodicdistance(x, y, z, x0, y0, z0)^2')\n # Add the restraint weight as a global parameter in kcal/mol/A^2\n force_restr.addGlobalParameter(\"k_restr\", restraintWt*u.kilocalories_per_mole/u.angstroms**2)\n # Define the target xyz coords for the restraint as per-atom (per-particle) parameters\n force_restr.addPerParticleParameter(\"x0\")\n force_restr.addPerParticleParameter(\"y0\")\n force_restr.addPerParticleParameter(\"z0\")\n alch_list = ['C9', 'H92', 'H93', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H4', 'H5', 'H6']\n for idx, atom_crd in enumerate( parm.positions ):\n name=parm.atoms[idx].name;\n resname=parm.atoms[idx].residue.name;\n if resname == \"LIG\":\n if not name in alch_list:\n xyz = parm.positions[idx].in_units_of(u.nanometers)/u.nanometers\n force_restr.addParticle(idx, xyz)\n system.addForce( force_restr )\n\n # build simulaion\n platform = mm.Platform.getPlatformByName('CUDA')\n integ1 = newIntegrator()\n simulation = app.Simulation(parm.topology, system, integ1)\n simulation.context.setPositions( parm.positions )\n\n # Set Box dimensions\n inpcrd = app.AmberInpcrdFile( inpcrdFile );\n if inpcrd.boxVectors is not None:\n simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)\n\n print('RESTARTING simulation from a previous State..........%s' %inpcrdFile)\n velocities = parm.velocities \n simulation.context.setVelocities( inpcrd.velocities ) \n\n # perform minimization\n print('Minimizing...')\n simulation.minimizeEnergy( tolerance = 0.5 * kilojoule/mole )\n \n # adding simulation reporters\n simulation.context.setVelocitiesToTemperature(300*u.kelvin)\n simulation.reporters.append(app.DCDReporter(out_dcd, 1000))\n simulation.reporters.append(app.StateDataReporter(csv_file, 1000, step=True, potentialEnergy=True, totalEnergy=True, volume=True,temperature=True, separator='\\t'))\n restrt = RestartReporter( out_rst, 10000000, parm.ptr('natom') );\n state = simulation.context.getState(getPositions=True, getEnergy=True, getVelocities=True, enforcePeriodicBox=True)\n restrt.report(simulation, state)\n\n\n print('Production run at NVT...')\n simulation.step(5000000) # 10 ns\n \n # saving last restart\n state = simulation.context.getState(getPositions=True, getEnergy=True, getVelocities=True, enforcePeriodicBox=True)\n restrt.report(simulation, state)\n return", "def edr3ToICRF(pmra, pmdec, ra, dec, G):\n if G >= 13:\n return pmra, pmdec\n\n ra = u.Quantity(ra, unit=u.deg)\n dec = u.Quantity(dec, unit=u.deg)\n pmra = u.Quantity(pmra, unit=u.mas / u.year)\n pmdec = u.Quantity(pmdec, unit=u.mas / u.year)\n\n table1 = np.array([[0.0, 9.0, 18.4, 33.8, -11.3],\n [9.0, 9.5, 14.0, 30.7, -19.4],\n [9.5, 10.0, 12.8, 31.4, -11.8],\n [10.0, 10.5, 13.6, 35.7, -10.5],\n [10.5, 11.0, 16.2, 50.0, 2.1],\n [11.0, 11.5, 19.4, 59.9, 0.2],\n [11.5, 11.75, 21.8, 64.2, 1.0],\n [11.75, 12.0, 17.7, 65.6, -1.9],\n [12.0, 12.25, 21.3, 74.8, 2.1],\n [12.25, 12.5, 25.7, 73.6, 1.0],\n [12.5, 12.75, 27.3, 76.6, 0.5],\n [12.75, 13.0, 34.9, 68.9, -2.9]]).T\n\n g_min = table1[0]\n g_max = table1[1]\n # pick the appropriate omegaXYZ for the source ’s magnitude :\n omega_x = table1[2][(g_min <= G) & (g_max > G)][0]*(u.mas/u.year)/1000.0\n omega_y = table1[3][(g_min <= G) & (g_max > G)][0]*(u.mas/u.year)/1000.0\n omega_z = table1[4][(g_min <= G) & (g_max > G)][0]*(u.mas/u.year)/1000.0\n pmra_corr = -1 * np.sin(dec) * np.cos(ra) * omega_x - np.sin(dec) * np.sin(ra) * omega_y + np.cos(dec) * omega_z\n pmdec_corr = np.sin(ra) * omega_x - np.cos(ra) * omega_y\n pmra_icrf = pmra - pmra_corr\n pmdec_icrf = pmdec - pmdec_corr\n return pmra_icrf, pmdec_icrf", "def molToPsi4(self):\n mol = self.molfile\n mol = Chem.AddHs(mol)\n AllChem.EmbedMolecule(mol, useExpTorsionAnglePrefs=True, useBasicKnowledge=True)\n AllChem.UFFOptimizeMolecule(mol)\n atoms = mol.GetAtoms()\n string = string = \"\\n\"\n for i, atom in enumerate(atoms):\n pos = mol.GetConformer().GetAtomPosition(atom.GetIdx())\n string += \"{} {} {} {}\\n\".format(atom.GetSymbol(), pos.x, pos.y, pos.z)\n string += \"units angstrom\\n\"\n return string, mol", "def exo4_q1(mu,n,x0,y0):\r\n #liste.append([x0,y0])\r\n absc = x0\r\n ordn = y0\r\n L = np.array([absc])\r\n M = np.array([ordn])\r\n for i in range(0,n):\r\n absc = exo2_1(absc,mu)\r\n ordn = exo2_1(ordn,mu)\r\n L = np.append(L,[absc])\r\n M = np.append(M,[ordn])\r\n return L,M", "def comp_angle_opening_magnet(self):\n\n if self.W1 > 0:\n Rbo = self.get_Rbo()\n return float(2 * arcsin(self.W1 / (2 * Rbo)))\n else:\n return self.comp_angle_magnet()", "def preTransform(self,R: Rotation) -> None:\n for i,m in enumerate(self.milestones):\n self.milestones[i] = so3.mul(R,m)" ]
[ "0.6384693", "0.5832228", "0.5281928", "0.5183972", "0.51822543", "0.51589733", "0.5147658", "0.51439065", "0.5104023", "0.5096983", "0.50910795", "0.5061224", "0.50440603", "0.50376153", "0.50355136", "0.50339186", "0.5033682", "0.5008212", "0.49874973", "0.49730933", "0.49718088", "0.49646896", "0.4957999", "0.49542636", "0.4950976", "0.49444336", "0.49413007", "0.49251217", "0.4923517", "0.4918185" ]
0.66791886
0
Update amqp client relation hooks IFF leader node is ready. Client nodes are considered ready once the leader has already run amqp_changed.
def update_clients(): if rabbit.leader_node_is_ready() or rabbit.client_node_is_ready(): for rid in relation_ids('amqp'): for unit in related_units(rid): amqp_changed(relation_id=rid, remote_unit=unit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _updateHeartbeat (self) :\r\n for pw, conn in self.clients :\r\n if conn : # we do have at least one client, enable heartbeat if needed\r\n self.have_clients = True\r\n return\r\n \r\n self.have_clients = False", "def _handle_coordinator_update(self) -> None:\n self._attr_is_on = self.relay.active\n self.async_write_ha_state()", "def set_ready(self, ready):\n\n if ready != self.ready:\n self.ready = ready\n self.send_heartbeat() # because ready has changed", "def on_connect(self, mqtt_client, userdata, flags, rc):\n global connack\n logging.debug(\"DEBUG - Connected to broker\")\n connack = True", "async def on_ready():\n # Set presence of bot\n await client.change_presence(status=discord.Status.online)\n change_status.start()\n\n # Checks for new / removed guilds after downtime\n guilds.check_guilds(client)\n\n # States, that the bot is ready\n print(\"{} is logged in as user {}\".format(appearance.bot_name, client.user.name))", "def affection_status_switch_on(self):\n self._affection_status_switch = True", "def on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Connected to broker\")\n client.connected_flag = True\n else:\n print(\"Connection failed\")\n client.connected_flag = False", "async def on_ready():\r\n for each_guild in client.guilds:\r\n if each_guild.name == PRIMARY_GUILD_NAME:\r\n print(\"Locked In 😎\\n\") # we are where we want to be\r\n elif each_guild.name == TESTING_GUILD_NAME:\r\n print(f\"{client.user} is connected to {each_guild.name}, which is recognized as a Testing \"\r\n f\"Guild\\n\")\r\n else:\r\n print(\"Name's didn't match 🤔\")\r\n print(f'{client.user} has successfully connected to {each_guild.name}! 😁\\n')\r\n await client.change_presence(activity=discord.Game('RDO - Wagon Stealing')) # sets the bots Activity\r", "async def on_ready(self):\n if not hasattr(self.bot, 'uptime'):\n self.bot.uptime = datetime.utcnow()\n\n # Check if user desires to have something other than online\n status = config.STATUS_TYPE.lower()\n status_type = {\"idle\": discord.Status.idle, \"dnd\": discord.Status.dnd}\n\n # Check if user desires to have a different type of activity\n activity = config.ACTIVITY_TYPE.lower()\n activity_type = {\"listening\": 2, \"watching\": 3, \"competing\": 5}\n\n await self.bot.change_presence(\n activity=discord.Game(type=activity_type.get(activity, 0), name=config.ACTIVITY),\n status=status_type.get(status, discord.Status.online)\n )\n\n # Indicate that the bot has successfully booted up\n print(f'Ready: {self.bot.user} | Servers: {len(self.bot.guilds)}')", "def process_clients():\n for client in state.CLIENT_LIST:\n if client.active and client.cmd_ready:\n logging.debug(\"Found a message, processing...\")\n msg_processor(client)", "async def on_ready():\r\n logging.info(f'{bot.user} has connected to Discord!')\r\n for guild in bot.guilds:\r\n await update_roles(guild)\r\n await check_guild_rules(guild)", "def on_connect( client, userdata, flag, rc ):\n if ( rc == 0 ):\n client.connected_flag = True\n logging.info( \"Connected to Broker! Returned code: %s\\n\" %rc )\n else:\n logging.info( \"Failed to connect. Returned code: %s\\n\" %rc )", "def _availability_message_received(self, msg: ReceiveMessage) -> None:\n self._available = msg.payload == \"online\"\n self.async_write_ha_state()", "def _update_on_active(self):\n pass", "def _r_on_connection_established(self, protocol):\n print(\"Client connected\")\n self._current_protocol = protocol\n\n for d in self._waiting_for_connection:\n d.callback(True)\n self._waiting_for_connection = []", "def update_callback(self):\n self.schedule_update_ha_state(True)", "def update_callback(self):\n self.schedule_update_ha_state(True)", "def connection_ready(self, active: bool):\n self._connection_ready = active", "def ready(self):\n self.update({self.STATE: self.STATE_READY})", "def slot_client_connected(self, _sender, _data):\r\n self.check_connect_ready()", "def clientConnected(self):\n self.running = True\n for d in self._callWhenReady:\n d.callback(None)", "def set_on_ready_to_recv (self, on_ready):\n self.receiving.on_data = lambda channel, size: on_ready(self, size)", "async def on_ready(self):\n logger.info('Bot is now ready and connected to Discord.')\n guild_count = len(self.guilds)\n logger.info(f'Connected as {self.user.name}#{self.user.discriminator} to {guild_count} guild{\"s\" if guild_count > 1 else \"\"}.')\n\n with self.get_session() as session:\n for guild in self.guilds:\n _guild: Guild = session.query(Guild).get(guild.id)\n if _guild is None:\n logger.warning(\n f'Guild {guild.name} ({guild.id}) was not inside database on ready. Bot was disconnected or did not add it properly...')\n session.add(Guild(id=guild.id))\n\n # TODO: Scan all messages on start for current period and check for new periods/updated vote counts.", "def async_update_callback(self) -> None:\n self._attr_is_on = self._switch.on", "def serverExplicitReady (self):\n self.server_ready.set()", "def _ready(cls):\n sync_call(cls.ready)", "async def on_ready():\n await bot.change_presence(status=discord.Status.online, activity=discord.Game(activity))", "def on_connected(self):\n logger.info('connection to redis resumed')\n for chid in self.clients.iterkeys():\n self.subscriber.psubscribe(chid)", "async def async_added_to_hass(self) -> None:\n if not (last_state := await self.async_get_last_state()):\n return\n self._attr_is_on = last_state.state == STATE_ON\n\n if self._attr_is_on:\n evt.async_call_later(self.hass, OFF_DELAY, self.off_delay_listener)", "async def async_turn_on(self, **kwargs: Any) -> None:\n\n self._attr_is_on = await self.relay.set_active(True)\n self.async_write_ha_state()" ]
[ "0.5726184", "0.5629322", "0.5541219", "0.551056", "0.5471582", "0.54037356", "0.5383622", "0.53529847", "0.5351824", "0.5332944", "0.53299505", "0.5322766", "0.5321517", "0.53153884", "0.530414", "0.5285929", "0.5285929", "0.52854335", "0.52835894", "0.526965", "0.5262245", "0.5252558", "0.5235089", "0.5229949", "0.5213754", "0.51921093", "0.51873285", "0.51832396", "0.51747507", "0.5168643" ]
0.7011619
0
Affiche les nombres impairs entre `n` et `m` inclus. >>> affiche_impairs(42, 51) 43 45 47 49 51
def affiche_impairs(n: int, m: int) -> None: # Trois versions proposées ## Version itérative classique i = n if i % 2 == 0: i += 1 while i <= m: print(i, end=" ") i += 2 print() ## Version fonctionnelle ; 1 ligne #print(" ".join(map(str, range(n - n%2 + 1, m + 1, 2)))) ## Autre version fonctionnelle, avec *unpack* ; hors programme #impairs = list(range(n - n%2 + 1, m + 1, 2)) #print(*impairs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def imprimir_matriz(matriz):\n for i in matriz:\n print(i)", "def imprime(nota_fiscal):\n\n print(\"Imprimindo nota fiscal %s\" % nota_fiscal.cnpj)", "def _compute_imprimitivity(self):\n m = floor(self._.d / 2)\n self._.antipodal = all(full_simplify(\n self._.b[i] - self._.c[self._.d - i]) == 0\n for i in range(self._.d) if i != m)\n self._.bipartite = all(a == 0 for a in self._.a)\n if self._.antipodal:\n try:\n self._.r = integralize(\n 1 + self._.b[m] / self._.c[self._.d - m])\n except TypeError:\n raise InfeasibleError(\"covering index not integral\")\n if self._.d >= 2:\n if self._.d == 2:\n b = [self._.b[0]/(self._.b[1]+1)]\n c = [Integer(1)]\n else:\n b = self._.b[:m]\n c = list(self._.c[1:m+1])\n if is_divisible(self._.d, 2):\n c[-1] *= self._.r\n scheme = self._get_class()(tuple(b), tuple(c))\n else:\n scheme = ASParameters(P=[[1]])\n self._.antipodal_subscheme = self.add_subscheme(scheme,\n self.ANTIPODAL)\n if self._.bipartite:\n if self._.d >= 2:\n b = tuple(self._.b[2*i]*self._.b[2*i+1]/self._.c[2]\n for i in range(m))\n c = tuple(self._.c[2*i+1]*self._.c[2*i+2]/self._.c[2]\n for i in range(m))\n scheme = self._get_class()(b, c)\n else:\n scheme = ASParameters(P=[[1]])\n self._.bipartite_subscheme = self.add_subscheme(scheme,\n self.BIPARTITE)", "def nome_impresso(self, nome_impresso):\n self._nome_impresso = nome_impresso", "def imprimir_menu():\n print(\"Que desea realizar en la matriz\")\n print(\"1. Presentar el nro Central \")\n print(\"2. Presentar los nros en forma espiral desde el centro \")\n print(\"3. Multiplos del nro central\")", "def _fill_impropers_cross_maps(self) -> None:\n impropers, cross_maps = [], []\n for residue in self.residues:\n for improper in residue.impropers:\n impropers.append([self._id_to_index[x] for x in improper])\n for cross_map in residue.cross_maps:\n cross_maps.append([self._id_to_index[x] for x in cross_map])\n self.impropers, self.cross_maps = impropers, cross_maps", "def assert_simplex_incidence(M,n):\n assert M.shape[1] == n, 'Incidence matrix: wrong size'\n assert (M.sum(axis=0)-1).nonzero()[0].size == 0, \\\n 'Incidence matrix: columns should sum to 1'\n assert M.nnz == n, 'Incidence matrix: should be n nonzero values'", "def imputer(seq, n=500):\n cur = len(seq)\n if cur < n:\n return np.concatenate((seq, np.zeros(n - cur)))\n return seq[: n]", "def amount_of_stairs(n):\n\n matrix = [[0] * n for i in range(n)]\n\n for i in range(0, n):\n for j in range(1, i):\n matrix[i][j] = sum(matrix[i - j - 1][:j])\n matrix[i][i] = 1\n\n # print_matrix(matrix)\n return sum(matrix[n-1])", "def imprimir_opciones(lista):\n for i, elem in enumerate(lista):\n print(\"{}. {}\".format(i + 1, elem))", "def perm4missing(flights, col, N):\n\n return ...", "def imprimir(self, screen):\n\n self.cursor.imprimir(screen)\n\n for opcion in self.opciones:\n opcion.imprimir(screen)", "def test_pruned_impropers(self, molecule, n_impropers, n_pruned):\n mol = Molecule.from_smiles(molecule)\n assert mol.n_impropers == n_impropers\n assert len(mol.smirnoff_impropers) == n_pruned\n assert len(mol.amber_impropers) == n_pruned\n\n # Order not guaranteed, so cannot zip and compare directly\n for smirnoff_imp in mol.smirnoff_impropers:\n # Convert SMIRNOFF-style improper into AMBER-style\n mod_imp = (\n smirnoff_imp[1],\n smirnoff_imp[0],\n smirnoff_imp[2],\n smirnoff_imp[3],\n )\n assert mod_imp in mol.amber_impropers", "def fn(i, seen=set(), ans=0):\n if i == n: ans += 1\n for j in range(n):\n place = {(\"col\", j), (\"diag\", i-j), (\"anti\", i+j)}\n if not (place & seen): \n seen |= place\n ans = fn(i+1, seen, ans)\n seen -= place \n return ans", "def getImpropers(self):\n try:\n return self._improperList\n except AttributeError:\n pass\n self._improperList = []\n if 'CHARMM_IMPROPERS' in self._raw_data:\n forceConstant = self._raw_data[\"CHARMM_IMPROPER_FORCE_CONSTANT\"]\n phase = self._raw_data[\"CHARMM_IMPROPER_PHASE\"]\n improperPointers = self._raw_data[\"CHARMM_IMPROPERS\"]\n forceConstConversionFactor = (units.kilocalorie_per_mole).conversion_factor_to(units.kilojoule_per_mole)\n for ii in range(0,len(improperPointers),5):\n if int(improperPointers[ii])<0 or int(improperPointers[ii+1])<0:\n raise Exception(\"Found negative improper atom pointers %s\"\n % ((improperPointers[ii],\n improperPointers[ii+1],\n improperPointers[ii+2],\n improperPointers[ii+3]),))\n iType = int(improperPointers[ii+4])-1\n self._improperList.append((int(improperPointers[ii])-1,\n int(improperPointers[ii+1])-1,\n abs(int(improperPointers[ii+2]))-1,\n abs(int(improperPointers[ii+3]))-1,\n float(forceConstant[iType])*forceConstConversionFactor,\n float(phase[iType])))\n return self._improperList", "def test_intra_num_helix(self, n):\n def ia_scaling(x): return (\n ((x - 2) * (x - 1)) / 2) * 16 if x > 0.0 else 0.0\n helix = isambard.specifications.Helix(n)\n helix.assign_force_field(self.ff)\n buff_interactions = isambard.buff.find_intra_ampal(\n helix, 1.52 * (n + 1))\n self.assertEqual(len(buff_interactions), ia_scaling(n))", "def intershow(Iab):\n from numpy import array, product, reshape, choose\n from string import join\n\n assert (type(Iab) is tuple) and (len(Iab) == 2),'not proper fortmat of hit-or-miss template'\n A,Bc = Iab\n S = seunion(A,Bc)\n Z = intersec(S,0)\n n = product(S.shape)\n one = reshape(array(n*'1','c'),S.shape)\n zero = reshape(array(n*'0','c'),S.shape)\n x = reshape(array(n*'.','c'),S.shape)\n saux = choose( S + seunion(Z,A), ( x, zero, one))\n s = ''\n for i in xrange(saux.shape[0]):\n s=s+(join(list(saux[i]))+' \\n')\n return s", "def imprimir_mochila(especiales_nivel, especiales_en_mochila, datos_de_especiales):\n print(\"\"\" MOCHILA\n| SIMBOLO | TECLA | CANTIDAD | DESCRIPCIÓN |\"\"\")\n for simbolo, info in datos_de_especiales.items():\n if simbolo not in especiales_nivel: continue\n print(f\"\"\"\n {simbolo} {info[2]} {especiales_en_mochila[simbolo]} {info[3]}\"\"\")", "def icao(mesaj):\n fsicao = open(\"mesaj.icao_intrare\", \"w\")\n for cuvant in mesaj.split(' '):\n for litera in cuvant:\n fsicao.write(' '.join(extrage_litere(litera.lower())))\n fsicao.write(' ')\n fsicao.write('\\n')\n fsicao.close()", "def fn(i, m):\n if i + 2*m >= len(piles): return prefix[-1] - prefix[i]\n ans = -inf \n for ii in range(1, 2*m+1): \n if i+ii < len(prefix): \n ans = max(ans, prefix[i+ii] - prefix[i] - fn(i+ii, max(m, ii)))\n return ans", "def readMFAPairs(mfaFile1, mfaFile2):\n def fn(file):\n return \"\".join([ i[:-1] for i in open(file, 'r').readlines()[1:] ])\n j = [0]\n def fn2(i):\n if i == '-':\n return GAP\n k = j[0]\n j[0] += 1\n return k\n mfa1 = fn(mfaFile1)\n mfa2 = fn(mfaFile2)\n mfa2 = [ fn2(i) for i in mfa2 ]\n assert len(mfa1) == len(mfa2)\n return [ mfa2[i] for i in xrange(0, len(mfa1)) if mfa1[i] != '-' ]", "def _Ih(numberPoints):\n Id = np.ones(numberPoints+1)\n Id[0] = 0\n Id[numberPoints] = 0\n return sp.diags(Id)", "def nome_impresso(self):\n return self._nome_impresso", "def pullback_irr(self, M, p1, p2):\n pass", "def climbing_stairs(n):\n\tif n < 2:\n\t\treturn 1\n\tif n == 2:\n\t\treturn 2\n\treturn climbing_stairs(n-1) + climbing_stairs(n-2)", "def add_fe_iim_miss(self):\n ofproto = self.datapath.ofproto\n parser = self.datapath.ofproto_parser\n self.logger.info(\"Adding Identity Indicator (MAC) flow table miss\"\n \" flow entry to dpid=%s\", self.dpid)\n match = parser.OFPMatch()\n actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,\n ofproto.OFPCML_NO_BUFFER)]\n inst = [parser.OFPInstructionActions(\n ofproto.OFPIT_APPLY_ACTIONS, actions)]\n mod = parser.OFPFlowMod(datapath=self.datapath, table_id=self.ft_iim,\n priority=0, match=match, instructions=inst)\n self.datapath.send_msg(mod)", "def _elimination_matrix(dim):\n n = dimension_to_number_of_triangular_elements(dim)\n\n counter = np.zeros((dim, dim), int) - 1\n counter[np.tril_indices(dim)] = np.arange(n, dtype=int)\n\n columns = [_unit_vector_or_zeros(i, n) for i in counter.ravel(\"F\")]\n\n eliminator = np.column_stack(columns)\n return eliminator", "def pipi(pkgs:str)->int:\n return pipil(pkgs.split())", "def giniImpurity(rows, resCol=None):\n if not resCol: #create the dictionary of counts for each class using pure python\n total = len(rows)\n counts = __uniqueCounts(rows)\n else: #Create the dictionary of counts for each class using pandas.\n assert 'index' in dir(rows)\n total = len(rows)\n counts = __uniqueCountsPandas(rows, resCol)\n imp = 1 #Initialize the gini-impurity at 1\n #Implement the formula for calculating gini-impurity\n fracs = [float(x)/total for x in counts.values()]\n for x in fracs:\n imp -= x*x\n return imp", "def IC(text, ncol):\n text = scrub_string(text)\n A = str_to_matrix(scrub_string(text), ncol)\n cum = 0\n for col in A:\n N = len(col)\n cum += sum(n * (n - 1) for n in Counter(col).values()) / (\n N * (N - 1) / LETTER_CNT\n )\n return cum / ncol" ]
[ "0.51183224", "0.47331733", "0.46972212", "0.4431077", "0.4413606", "0.4408542", "0.43618786", "0.43256623", "0.42876646", "0.42723647", "0.42626938", "0.42480263", "0.42155024", "0.4205314", "0.4193979", "0.41894197", "0.41849914", "0.41778257", "0.41758695", "0.4172205", "0.41632187", "0.41630915", "0.4160791", "0.41604075", "0.41362724", "0.41163594", "0.40858206", "0.40817028", "0.4076219", "0.40713298" ]
0.73484516
0
Plots an individual chip in a subaxis
def plot_chip(self, aid, nRows, nCols, px, fulldraw=True, **kwargs): ibs = self.ibs if aid in [self.aid1, self.aid2]: # Bold color for the matching chips lw = 5 text_color = np.array((135, 206, 235, 255)) / 255.0 else: lw = 2 text_color = None pnum = (nRows, nCols, px) if not fulldraw: # not doing full draw so we have to clear any axes # that are here already manually ax = self.fig.add_subplot(*pnum) self.clear_parent_axes(ax) # ut.embed() # logger.info(subax) viz_chip_kw = { 'fnum': self.fnum, 'pnum': pnum, 'nokpts': True, 'show_name': True, 'show_gname': False, 'show_aidstr': True, 'notitle': True, 'show_num_gt': False, 'text_color': text_color, } if False and ut.is_developer(): enable_chip_title_prefix = True viz_chip_kw.update( { 'enable_chip_title_prefix': enable_chip_title_prefix, 'show_name': True, 'show_aidstr': True, 'show_viewcode': True, 'show_num_gt': True, 'show_quality_text': True, } ) viz_chip.show_chip(ibs, aid, **viz_chip_kw) ax = pt.gca() pt.draw_border(ax, color=kwargs.get('color'), lw=lw) if kwargs.get('make_buttons', True): # divider = pt.ensure_divider(ax) butkw = { # 'divider': divider, 'ax': ax, 'size': '13%' # 'size': '15%' } # Chip matching/naming options nid = ibs.get_annot_name_rowids(aid) annotation_unknown = ibs.is_nid_unknown([nid])[0] if not annotation_unknown: # remove name callback = functools.partial(self.unname_annotation, aid) self.append_button( 'remove name (' + ibs.get_name_texts(nid) + ')', callback=callback, **butkw, ) else: # new name callback = functools.partial(self.mark_annotation_as_new_name, aid) self.append_button('mark as new name', callback=callback, **butkw) if ( nid != self.nid2 and not ibs.is_nid_unknown([self.nid2])[0] and not self.is_split_case ): # match to nid2 callback = functools.partial(self.rename_annotation, aid, self.nid2) text = 'match to name2: ' + ibs.get_name_texts(self.nid2) self.append_button(text, callback=callback, **butkw) if nid != self.nid1 and not ibs.is_nid_unknown([self.nid1])[0]: # match to nid1 callback = functools.partial(self.rename_annotation, aid, self.nid1) text = 'match to name1: ' + ibs.get_name_texts(self.nid1) self.append_button(text, callback=callback, **butkw) other_nid_list = self.get_other_nids() for other_nid in other_nid_list: if other_nid == nid: continue # rename nid2 callback = functools.partial(self.rename_annotation, aid, other_nid) text = 'match to: ' + ibs.get_name_texts(other_nid) self.append_button(text, callback=callback, **butkw) return ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_channels(dat, chanaxis=-1, otheraxis=-2):\n ax = []\n n_channels = dat.data.shape[chanaxis]\n for i, chan in enumerate(dat.axes[chanaxis]):\n if i == 0:\n a = plt.subplot(10, n_channels / 10 + 1, i + 1)\n else:\n a = plt.subplot(10, n_channels / 10 + 1, i + 1, sharex=ax[0], sharey=ax[0])\n ax.append(a)\n x, y = dat.axes[otheraxis], dat.data.take([i], chanaxis)\n a.plot(dat.axes[otheraxis], dat.data.take([i], chanaxis).squeeze())\n a.set_title(chan)\n plt.axvline(x=0)\n plt.axhline(y=0)", "def cplot(self, figure, i, n):\n xx, yy = np.meshgrid(range(self.L), range(self.L))\n ax = figure.add_subplot(2,2,n)\n plt.setp(ax.get_yticklabels(), visible=False)\n plt.setp(ax.get_xticklabels(), visible=False) \n plt.pcolormesh(xx, yy, self.config, cmap=plt.cm.RdBu);\n plt.title('Time=%d'%i, fontsize=20)\n plt.xlabel('X', fontsize=12)\n plt.ylabel('Y',fontsize=12) \n plt.axis('tight') \n self.ax = ax", "def plot(self):\n pass", "def plot(\n self,\n color_map={\n \"ex\": (1, 0.2, 0.2),\n \"ey\": (1, 0.5, 0),\n \"hx\": (0, 0.5, 1),\n \"hy\": (0.5, 0.2, 1),\n \"hz\": (0.2, 1, 1),\n },\n channel_order=None,\n ):\n\n if channel_order is not None:\n ch_list = channel_order()\n else:\n ch_list = self.channels\n\n n_channels = len(self.channels)\n\n fig = plt.figure()\n fig.subplots_adjust(hspace=0)\n ax_list = []\n for ii, comp in enumerate(ch_list, 1):\n try:\n color = color_map[comp]\n except KeyError:\n color = (0, 0.4, 0.8)\n if ii == 1:\n ax = plt.subplot(n_channels, 1, ii)\n else:\n ax = plt.subplot(n_channels, 1, ii, sharex=ax_list[0])\n self.dataset[comp].plot.line(ax=ax, color=color)\n ax.grid(which=\"major\", color=(0.65, 0.65, 0.65), ls=\"--\", lw=0.75)\n ax.grid(which=\"minor\", color=(0.85, 0.85, 0.85), ls=\"--\", lw=0.5)\n ax.set_axisbelow(True)\n if ii != len(ch_list):\n plt.setp(ax.get_xticklabels(), visible=False)\n\n ax_list.append(ax)", "def plot(self):\n attr = self.Graph[\"root\"]\n if (self.type == 0 or self.type == 1):\n self.subplot_1(attr, 0)\n else:\n self.subplot_2(attr, 0)", "def plot_data(self):", "def plot(self):\n\t\tself.plotOfSpect()", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def ad_sub_scatter(V,y,fig,ax,plotpos,x_label,y_label):\n ax = fig.add_subplot(plotpos)\n c = ['k','b','m','c']\n #s = [0.8,1,1,1]\n for i in range(len(V)):\n ax.scatter(V[i],y[i],color=c[i], s=5)\n ax.set_xlabel(x_label, fontsize = 16)\n ax.set_ylabel(y_label, fontsize = 16)\n ax.tick_params(axis='both' , labelsize = 12.0)\n return ax", "def plot_pcs_slice_sub(self,data_in,large_slice,plot_slice,\n indiv=0,color_array=None,sz=8):\n fig = plt.figure(figsize=(sz,6))\n gs = GridSpec(sz+len(self.states_list),1)\n feature_ax = plt.subplot(gs[:sz,:])\n stateseq_ax = plt.subplot(gs[sz+1])\n\n if color_array is None:\n color_array = self._get_colors()\n\n r_plot_slice = list(map(lambda x: large_slice[0] + x, plot_slice))\n z, perm = relabel_model_z(self,index=indiv)\n z = z[r_plot_slice]\n stateseq_norep, durations = rle(z)\n\n max_ = ceil(data_in.max()-data_in.min()) +1\n data_in=data_in[:,plot_slice]\n ttime = np.arange(data_in.shape[1])\n for ii in range(0,data_in.shape[0]):\n feature_ax.plot(ttime,data_in[ii,:] + ii*max_,'k')\n\n feature_ax.set_xlim((0,len(plot_slice)))\n feature_ax.set_ylim((data_in.min()-1,data_in.shape[0]*max_-1))\n feature_ax.set_yticks([])\n feature_ax.set_xticks([])\n\n stateseq_ax.imshow(z[:,np.newaxis].T,aspect='auto',\n cmap=ListedColormap(color_array),vmin=0,vmax=len(perm))\n stateseq_ax.set_yticks([])\n stateseq_ax.set_xticks([])\n\n for ii, pos in enumerate(durations.cumsum()):\n if durations[ii] >=1:\n feature_ax.axvline(pos,\n color=color_array[stateseq_norep[ii]],\n linestyle=':')\n return", "def plot_autocorrs(self, axis=0, n_rows=4, n_cols=8):\n self.current_plot = 'multi'\n self.ax_zoomed = False\n \n bls = self.uv.d_uv_data['BASELINE']\n\n # Extract the relevant baselines using a truth array\n # bls = bls.tolist()\n bl_ids = set([256*i + i for i in range(1, n_rows * n_cols + 1)])\n bl_truths = np.array([(b in bl_ids) for b in bls])\n \n #print self.uv.d_uv_data['DATA'].shape\n #x_data = self.d_uv_data['DATA'][bl_truths,0,0,:,0,axis] # Baselines, freq and stokes\n #x_cplx = x_data[:,:,0] + 1j * x_data[:,:,1]\n\n x_cplx = self.stokes[axis][bl_truths]\n\n\n \n # Plot the figure\n #print self.uv.n_ant\n fig = self.sp_fig\n figtitle = '%s %s: %s -- %s'%(self.uv.telescope, self.uv.instrument, self.uv.source, self.uv.date_obs)\n for i in range(n_rows):\n for j in range(n_cols):\n ax = fig.add_subplot(n_rows, n_cols, i*n_cols + j +1)\n ax.set_title(self.uv.d_array_geometry['ANNAME'][i*n_cols + j], fontsize=10)\n #ax.set_title(\"%s %s\"%(i, j))\n \n x = x_cplx[i*n_cols+j::self.uv.n_ant]\n \n if self.scale_select.currentIndex() == 0 or self.scale_select.currentIndex() == 1:\n if x.shape[0] == self.uv.n_ant:\n self.plot_spectrum(ax, x, label_axes=False)\n else:\n self.plot_spectrum(ax, x, stat='max', label_axes=False)\n self.plot_spectrum(ax, x, stat='med', label_axes=False)\n self.plot_spectrum(ax, x, stat='min', label_axes=False)\n else:\n self.plot_spectrum(ax, x, label_axes=False)\n self.updateFreqAxis(ax)\n \n if i == n_rows-1:\n ax.set_xlabel('Freq')\n if j == 0:\n ax.set_ylabel('Amplitude')\n \n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.tick_params(axis='both', which='major', labelsize=10)\n plt.tick_params(axis='both', which='minor', labelsize=8)\n plt.xticks(rotation=30)\n \n plt.subplots_adjust(left=0.05, right=0.98, top=0.95, bottom=0.1, wspace=0.3, hspace=0.45)\n return fig, ax", "def show_tileselection(image, tile_selection, tile_dim=[200, 200]):\n fig, ax = plt.subplots()\n ax.imshow(image, cmap='gray')\n for r in np.arange(image.shape[0]+1, step=200):\n ax.plot([0, image.shape[1]], [r, r], 'r')\n for c in np.arange(image.shape[1]+1, step=200):\n ax.plot([c, c], [0, image.shape[0]], 'r') \n for tiler, tilec in zip(tile_selection[0], tile_selection[1]):\n ax.plot([tilec*tile_dim[0], tilec*tile_dim[0]], [tiler*tile_dim[0], (tiler+1)*tile_dim[0]], color=[0, 1, 0])\n ax.plot([(tilec+1)*tile_dim[0], (tilec+1)*tile_dim[0]], [tiler*tile_dim[0], (tiler+1)*tile_dim[0]], color=[0, 1, 0])\n ax.plot([tilec*tile_dim[0], (tilec+1)*tile_dim[0]], [tiler*tile_dim[0], tiler*tile_dim[0]], color=[0, 1, 0])\n ax.plot([tilec*tile_dim[0], (tilec+1)*tile_dim[0]], [(tiler+1)*tile_dim[0], (tiler+1)*tile_dim[0]], color=[0, 1, 0])\n ax.set_xlim(-5, image.shape[1]+5)\n ax.set_ylim(image.shape[0]+5, -5)\n ax.axis('off')\n return fig, ax", "def plot_vis_crosshairs(fig, vis_data, title, crosscorr, ants, inputs, upper=True, units='', **kwargs):\n fig.subplots_adjust(wspace=0., hspace=0.)\n data_lim = np.max([np.abs(vis).max() for vis in vis_data])\n ax_lim = 1.05 * data_lim\n for n, (indexA, indexB) in enumerate(crosscorr):\n subplot_index = (len(ants) * indexA + indexB + 1) if upper else (indexA + len(ants) * indexB + 1)\n ax = fig.add_subplot(len(ants), len(ants), subplot_index)\n for vis in vis_data:\n ax.plot(vis[:, n].real, vis[:, n].imag, **kwargs)\n ax.axhline(0, lw=0.5, color='k')\n ax.axvline(0, lw=0.5, color='k')\n ax.add_patch(mpl.patches.Circle((0., 0.), data_lim, facecolor='none', edgecolor='k', lw=0.5))\n ax.add_patch(mpl.patches.Circle((0., 0.), 0.5 * data_lim, facecolor='none', edgecolor='k', lw=0.5))\n ax.axis('image')\n ax.axis([-ax_lim, ax_lim, -ax_lim, ax_lim])\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_frame_on(False)\n if upper:\n if indexA == 0:\n ax.xaxis.set_label_position('top')\n ax.set_xlabel(inputs[indexB][3:])\n if indexB == len(ants) - 1:\n ax.yaxis.set_label_position('right')\n ax.set_ylabel(inputs[indexA][3:], rotation='horizontal')\n else:\n if indexA == 0:\n ax.set_ylabel(inputs[indexB][3:], rotation='horizontal')\n if indexB == len(ants) - 1:\n ax.set_xlabel(inputs[indexA][3:])\n fig.text(0.5, 0.95 if upper else 0.05, title, ha='center', va='bottom' if upper else 'top')\n fig.text(0.95 if upper else 0.05, 0.5, 'Outer radius = %g %s' % (data_lim, units), va='center', rotation='vertical')", "def plotPsCurve(mcoolsPath:list,celltypeNames:list,chroms:list,resolution=100000,title=\"P(s) curve\",plotType=\"interaction\",base=1.1,log_x=True,log_y=True):\n import plotly.express as px\n from IPython.display import Image\n\n #Calculate P(s) data, get a 3 column pd.DataFrame with (bin,resolution,celltype)\n psDataAll = []\n for i in range(len(mcoolsPath)):\n psDataAll.append(compartment.getPsData(mcoolsPath[i],[\"chr\"+str(i+1) for i in range(len(chroms))],resolution=resolution,celltype=celltypeNames[i],base=base)) \n merged = pd.concat(psDataAll)\n\n data = pd.merge(merged,merged.groupby(\"celltype\").sum(),how=\"left\",on=\"celltype\").assign(prob= lambda df: df.aveCount_x/df.aveCount_y)\n\n fig = px.line(x=data[\"bin_x\"]*resolution,y=data[\"prob\"],color=data[\"celltype\"],title=title,log_x=log_x,log_y=log_y).update_layout(template='simple_white')\n fig.update_layout(width=800,height=600)\n fig.update_layout(xaxis_title=\"Genomic Distance(bp)\",\n yaxis_title=\"Contact Probability\")\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))", "def __call__(self, i):\n plt.subplot(self.nx, self.ny, i)\n return True", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def animate(i, subplot, xs, ys, cs):\n print(f\" Drawing generation {i}/{len(xs)} \\r\", end=\"\", flush=True)\n subplot.clear()\n subplot.set_title(f\"Gen {i}\")\n subplot.set_ylim(0, 1)\n subplot.set_xlim(0, 1)\n subplot.scatter(xs[i], ys[i], color=cs[i], s=individual_size)", "def EventSubsetDisplay( tubes, quantities, PMTFlatMapPositive, tubes_to_plot, title=\"Charge\", cutrange=[-1,-1], padding=10):\n PMTFlatMapPositive_values = [PMTFlatMapPositive[tube] for tube in tubes_to_plot]\n subset_x_values = np.array([value[0] for value in PMTFlatMapPositive_values])\n subset_y_values = np.array([value[1] for value in PMTFlatMapPositive_values])\n \n # set up dimensions for subset preimage with short tank data\n min_subplot_x_value = subset_x_values.min() - padding\n max_subplot_x_value = subset_x_values.max() + padding\n\n min_subplot_y_value = subset_y_values.min() - padding\n max_subplot_y_value = subset_y_values.max() + padding\n \n fig, ax= plt.subplots(figsize=[30,30])\n preimage = np.zeros( preimage_dimensions )\n\n subset_quantities = []\n for idx, tube in enumerate( tubes ):\n if cutrange[0] != cutrange[1]:\n if quantities[idx] < cutrange[0] or quantities[idx] > cutrange[1]:\n continue\n for dx in range(-3,4):\n for dy in range(-3,4):\n if abs(dx)==3 and abs(dy)==3:\n continue\n if tube in tubes_to_plot: \n #print( \"idx=\", idx, \" len(quantities)=\",len(quantities), \" tube=\", tube, \" len(PMTFlatMap)=\", len(PMTFlatMapPositive))\n preimage[ PMTFlatMapPositive[tube][1]+dx, PMTFlatMapPositive[tube][0]+dy ] = quantities[idx]\n subset_quantities.append(quantities[idx])\n \n subset_quantities = np.array(subset_quantities)\n\n imgmin = subset_quantities.min()\n imgmax = subset_quantities.max()\n \n if cutrange[0] != cutrange[1]:\n imgmin = cutrange[0]\n imgmax = cutrange[1]\n \n subset_image = preimage[min_subplot_y_value:max_subplot_y_value, min_subplot_x_value:max_subplot_x_value]\n \n im = ax.imshow( subset_image, extent = [min_subplot_x_value, max_subplot_x_value, min_subplot_y_value, max_subplot_y_value], vmin=imgmin, vmax=imgmax )\n\n fig.suptitle(title, fontsize=80)\n\n plt.rc('xtick', labelsize=24) \n plt.rc('ytick', labelsize=24) \n plt.xlabel('Distance CCW on perimeter from x-axis (cm)', fontsize=48)\n plt.ylabel('Y (cm)', fontsize=48)\n \n plt.set_cmap('gist_heat_r')\n\n # Create colourbar\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cbar = plt.colorbar(im, cax=cax)\n cbar.ax.tick_params(labelsize=24)\n\n # Fix title height\n plt.tight_layout()", "def myplots(fig,data):\n grid = AxesGrid(fig, 111, # similar to subplot(142)\n nrows_ncols=(1, 6),\n axes_pad=0.0,\n share_all=True,\n label_mode=\"L\",\n cbar_location=\"right\",\n cbar_mode=\"none\",\n )\n\n \n for i in range(6):\n Z = absolute(data[i])**2\n grid[i].set_title(r\"$t={:03.1f}$\".format(Timestep(i)),color='black',horizontalalignment='center',verticalalignment='bottom')\n im = grid[i].imshow(Z, extent=(-2, 2, -2, 2), interpolation=\"gaussian\",origin=\"lower\",cmap=cmap,norm=LogNorm(vmin=1e-5,vmax=1))\n grid[i].set_aspect(ratio)\n grid[i].set_xlabel(\"$x/10$\",size=16)\n #plt.colorbar(im, cax = grid.cbar_axes[0])\n #ticks = np.logspace(1e-6,1,7)\n #lf = LogFormatter(10, labelOnlyBase=False)\n grid[0].set_ylabel(\"$y/10$\",size=16)\n pos2 = [0.905,0.25,0.01,0.5]\n position = fig.add_axes(pos2)\n ticks=np.logspace(1e-6,1e-1,6)\n fig.colorbar(im, ax=grid[5],cax=position,extend=\"both\")\n \n for cax in grid.cbar_axes:\n cax.toggle_label(True)\n \n # This affects all axes as share_all = True.\n grid.axes_llc.set_xticks([-2,-1, 0,1])\n #grid[0].set_xticks([-20,-10, 0,10, 20])\n grid.axes_llc.set_yticks([-2, -1, 0, 1,2])", "def generate_singleatom_cs_plot(cs_data,resid=1,aname=\"CA\",frame=1):\n cs_data.set_index(['resSeq','name'], inplace=True)\n try:\n s=cs_data.loc[resid,aname]\n except KeyError as e: \n # If there are no atoms matching this selection\n return('', ['error'])\n \n framenum = cs_data.shape[1]\n x, y = s.index.to_series(), s\n\n # Establish sources\n source = ColumnDataSource(data=dict(\n x=x,\n y=y,\n frame=x.values,\n chemical_shift=y.values,\n ))\n\n # Basic plot setup: lineplot of cs values per frame\n p1 = figure(width=600, height=500, title='CS variation over frames: atom %s of residue %d' %(aname, resid))\n p1.line(\"x\", \"y\", source=source, line_width=0.5, color='#404387')\n #p1.sizing_mode = 'scale_width'\n\n\n # Vertical line slider: a vertical line in the plot that will move left or right depending on the selected frame\n frame_slider = Slider(start=0, end=framenum, value=frame, step=1, title=\"Frame\", name=\"frameslider\")\n frame_slider.width = 600\n vline = Span(location=frame_slider.value, dimension='height', line_color='#2377b4',line_width=2)\n p1.renderers.extend([vline])\n callback = CustomJS(args=dict(span=vline), code=\"\"\"\n span.location = cb_obj.value\n \"\"\")\n frame_slider.js_on_change('value', callback)\n\n\n # Remove bokeh tooltips from our plot, and put labels\n hoverlist = [('Frame', '@frame'),('Chemical shift', '@chemical_shift')]\n\n #Hover tool:\n p1.add_tools(HoverTool(tooltips=hoverlist, mode='vline'))\n p1.toolbar.active_drag = None\n p1.toolbar.active_scroll = None\n p1.toolbar.active_tap = None\n p1.toolbar_location = None\n p1.xaxis.axis_label = 'Frames'\n p1.yaxis.axis_label = 'Chemical Shift (ppm)'\n\n # Filter out non-numerical values from array\n measured = s.__array__()\n measured = np.array([ a for a in measured if type(a)==np.float64 ])\n # Histogram plot of cumulative cs values (to put alongside lineplot)\n hist, edges = np.histogram(measured, density=True, bins=100)\n x = np.linspace(measured.min(), measured.max(), 100)\n p1.y_range = Range1d(x.max()+1,x.min()-1)\n p1.x_range = Range1d(0, framenum)\n\n # instantiating the figure object for histogram and creating its plot\n graph = figure(width=300, height=500,title = \"Histogram\",)\n #graph.sizing_mode = 'scale_width'\n graph.hbar(x, right = hist, height = 0, color='#404387')\n graph.xaxis.axis_label = 'Frame density'\n graph.yaxis.visible = False\n graph.toolbar.active_drag = None\n graph.toolbar.active_scroll = None\n graph.toolbar.active_tap = None\n graph.toolbar.logo = None\n graph.toolbar_location = None\n graph.y_range = Range1d(x.max()+1,x.min()-1)\n\n # displaying the model\n p = column(row(p1,graph),frame_slider)\n \n # Names of atoms to represent in the structure\n avail_res_atoms = {'%d.%s'%(resid, aname):{'color': '#FF0000', 'display': False}} # Why red? Becasue red is the rose, that in yonder garden grow.... Just kidding. We should offer an option or something in the future\n\n return(p, avail_res_atoms)", "def etio_subplot(df, ax, title, graph_color='skyblue'):\n\n post_dx_histo = histo_dx_includes(df)\n hist_df = pd.DataFrame({\"Dx\": post_dx_histo.index, \"Count\": post_dx_histo.data})\n #hist_df = hist_df.drop(1)\n print(hist_df)\n\n graph_range = range(1,len(hist_df.index)+1)\n ax.hlines(y=graph_range, xmin=0, xmax=hist_df['Count'], color=graph_color)\n ax.plot(hist_df['Count'], graph_range, \"D\", color=graph_color)\n ax.set_yticks(range(1, len(hist_df['Dx'])+1))\n ax.set_yticklabels(hist_df['Dx'], fontsize='10')\n\n ax.set_title(title, fontsize='10')\n return ax", "def __plot_si_cf_plane(self, ax=None) -> None:\n sns.scatterplot(self.cf, self.si, ax=ax)", "def plot_channels(self, data_array):\n\n plt.figure()\n for p in range(1, 7):\n plt.subplot(6, 1, p)\n plt.plot(data_array[p-1, :])\n\n plt.draw()\n plt.show()\n return", "def debugplots(fig,data):\n grid = AxesGrid(fig, 111, # similar to subplot(142)\n nrows_ncols=(1, 6),\n axes_pad=0.0,\n share_all=True,\n label_mode=\"L\",\n cbar_location=\"right\",\n cbar_mode=\"none\",\n )\n\n Z0=data[0].real\n Z1=data[1].real\n Z2=data[2].real\n Z3=data[3].real\n Z4=data[4].real\n Z5=data[5].real\n \n Z=[Z0,Z1,Z2,Z3,Z4,Z5]\n \n for i in range(6):\n grid[i].set_title(r\"$t=%u\\Delta t$\"%(Timestep(i)),color='black',horizontalalignment='center',verticalalignment='bottom')\n im = grid[i].imshow(Z[i], extent=(-2, 2, -2, 2), interpolation=\"Nearest\",origin=\"lower\",cmap='seismic',vmin=-1,vmax=1)\n grid[i].set_aspect(ratio)\n grid[i].set_xlabel(\"$x/10$\",size=16)\n grid[0].set_ylabel(\"$y/10$\",size=16)\n pos2 = [0.905,0.25,0.01,0.5]\n position = fig.add_axes(pos2)\n fig.colorbar(im, ax=grid[2],cax=position,extend=\"both\")\n \n for cax in grid.cbar_axes:\n cax.toggle_label(True)\n \n # This affects all axes as share_all = True.\n grid.axes_llc.set_xticks([-2,-1, 0,1])\n #grid[0].set_xticks([-20,-10, 0,10, 20])\n grid.axes_llc.set_yticks([-2, -1, 0, 1,2])", "def plot_pcs_slice(self,data_in,large_slice,plot_slice=None,\n num_pcs=4,indiv=0, color_array=None,fs=30,sz=4):\n if color_array == None:\n color_array = self._get_colors()\n # Plot params\n fig = plt.figure(figsize=(sz,6))\n gs = GridSpec(sz+len(self.states_list),1)\n feature_ax = plt.subplot(gs[:sz,:])\n data_in = data_in[:num_pcs,large_slice][::-1,:]\n max_ = ceil(data_in.max()-data_in.min()) + 1\n ttime = np.arange(data_in.shape[1])\n for ii in range(0,num_pcs):\n feature_ax.plot(ttime,data_in[ii,:]+ii*max_,'k')\n feature_ax.set_yticks(np.arange(num_pcs)*max_)\n feature_ax.set_yticklabels('')\n\n feature_ax.set_ylim((data_in.min()-1,num_pcs*max_-1))\n\n xlabel_= np.linspace(0,data_in.shape[1],5,dtype='int')\n feature_ax.set_xticks(xlabel_)\n feature_ax.set_xlim((xlabel_[0],xlabel_[-1]))\n feature_ax.set_xticklabels(list(map(str,xlabel_ // fs)))\n\n if not (plot_slice is None):\n feature_ax.axvline(plot_slice[0], color=color_array[0],linestyle=':',lw=2)\n feature_ax.axvline(plot_slice[-1], color=color_array[0],linestyle=':',lw=2)\n plot_pcs_slice_sub(self,data_in,large_slice,plot_slice,indiv,color_array)\n return", "def EventDisplay( tubes, quantities, PMTFlatMapPositive, title=\"Charge\", cutrange=[-1,-1] ):\n \n fig, ax= plt.subplots(figsize=[30,30])\n preimage = np.zeros( preimage_dimensions )\n \n imgmin = quantities.min()\n imgmax = quantities.max()\n\n for idx, tube in enumerate( tubes ):\n if cutrange[0] != cutrange[1]:\n if quantities[idx] < cutrange[0] or quantities[idx] > cutrange[1]:\n continue\n for dx in range(-3,4):\n for dy in range(-3,4):\n if abs(dx)==3 and abs(dy)==3:\n continue\n \n #print( \"idx=\", idx, \" len(quantities)=\",len(quantities), \" tube=\", tube, \" len(PMTFlatMap)=\", len(PMTFlatMapPositive))\n preimage[ PMTFlatMapPositive[tube][1]+dx, PMTFlatMapPositive[tube][0]+dy ] = quantities[idx]\n\n if cutrange[0] != cutrange[1]:\n imgmin = cutrange[0]\n imgmax = cutrange[1]\n \n im = ax.imshow( preimage, extent = [-positive_x_offset,positive_x_offset,-lower_endcap_offset,lower_endcap_offset], vmin=imgmin, vmax=imgmax )\n\n fig.suptitle(title, fontsize=80)\n\n plt.rc('xtick', labelsize=24) \n plt.rc('ytick', labelsize=24) \n plt.xlabel('Distance CCW on perimeter from x-axis (cm)', fontsize=48)\n plt.ylabel('Y (cm)', fontsize=48)\n\n plt.set_cmap('gist_heat_r')\n\n # Create colourbar\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cbar = plt.colorbar(im, cax=cax)\n cbar.ax.tick_params(labelsize=24)\n\n # Fix title height\n plt.subplots_adjust(top=0.5)\n plt.tight_layout()", "def drawCatplot(df, xColumn):\n plt.style.use('default')\n plt.style.use('dark_background')\n types = getSpectralTypes()\n colors = getColors()\n sns.set_palette(sns.color_palette(colors))\n \n sns.catplot(x=xColumn, y=\"spectral_type\", data=df, order=types, height=3, \n aspect=4);\n plt.show()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def debugmyplots(fig,data):\n grid = AxesGrid(fig, 111, # similar to subplot(142)\n nrows_ncols=(1, 6),\n axes_pad=0.0,\n share_all=True,\n label_mode=\"L\",\n cbar_location=\"right\",\n cbar_mode=\"none\",\n )\n\n \n for i in range(6):\n Z = np.real(data[i])\n grid[i].set_title(r\"$t={:03.1f}$\".format(Timestep(i)),color='black',horizontalalignment='center',verticalalignment='bottom')\n im = grid[i].imshow(Z, extent=(-2, 2, -2, 2), interpolation=\"Gaussian\",origin=\"lower\",cmap='seismic',vmin=-1,vmax=1)\n grid[i].set_aspect(ratio)\n grid[i].set_xlabel(\"$x/10$\",size=16)\n #plt.colorbar(im, cax = grid.cbar_axes[0])\n #ticks = np.logspace(1e-6,1,7)\n #lf = LogFormatter(10, labelOnlyBase=False)\n grid[0].set_ylabel(\"$y/10$\",size=16)\n pos2 = [0.905,0.25,0.01,0.5]\n position = fig.add_axes(pos2)\n ticks=np.logspace(1e-6,1e-1,6)\n fig.colorbar(im, ax=grid[5],cax=position,extend=\"both\")\n \n for cax in grid.cbar_axes:\n cax.toggle_label(True)\n \n # This affects all axes as share_all = True.\n grid.axes_llc.set_xticks([-2,-1, 0,1])\n #grid[0].set_xticks([-20,-10, 0,10, 20])\n grid.axes_llc.set_yticks([-2, -1, 0, 1,2])", "def make_plot(x,y):" ]
[ "0.63316065", "0.6027693", "0.596213", "0.5961878", "0.5896854", "0.5847748", "0.5820817", "0.58156234", "0.5796932", "0.5793897", "0.5792818", "0.5770452", "0.57648313", "0.5755763", "0.5755642", "0.57511485", "0.5740199", "0.5723509", "0.57168835", "0.56989443", "0.5689734", "0.56843454", "0.56745225", "0.56665474", "0.564739", "0.56368595", "0.56271505", "0.5621731", "0.56214607", "0.56185704" ]
0.60907084
1
All the annotations are given nid
def merge_all_into_nid(self, nid, event=None): aid_list = self.all_aid_list self.ibs.set_annot_name_rowids(aid_list, [nid] * len(aid_list)) self.update_callback() self.backend_callback() self.show_page()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_annotations(self, img_id):\n return self._img_id2annotations.get(img_id, [])", "def load_annotations(self, ann_file, N, kind):\n\n self.coco = COCOPoint(ann_file, N=N, kind=kind)\n # The order of returned `cat_ids` will not\n # change with the order of the CLASSES\n self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)\n\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.img_ids = self.coco.get_img_ids()\n data_infos = []\n total_ann_ids = []\n for i in self.img_ids:\n info = self.coco.load_imgs([i])[0]\n info[\"filename\"] = info[\"file_name\"]\n data_infos.append(info)\n ann_ids = self.coco.get_ann_ids(img_ids=[i])\n total_ann_ids.extend(ann_ids)\n assert len(set(total_ann_ids)) == len(\n total_ann_ids\n ), f\"Annotation ids in '{ann_file}' are not unique!\"\n return data_infos", "def _annotations(request):\n result = Search(request).run(MultiDict(request.params))\n\n return request.find_service(AnnotationReadService).get_annotations_by_id(\n ids=result.annotation_ids\n )", "def _get_annotations(self, node, offset_mngr):\n for anno in self._iterfind(node, 'annotation'):\n offsets = list(self._get_offsets(anno, offset_mngr))\n yield self._entity(anno, offsets)", "def load_annotations(self, index):\n anns_file = open(os.path.join(self.folder_path, self.image_ids[index] + '.json'))\n labels = json.load(anns_file)\n labels = labels[\"shapes\"]\n anns_file.close()\n return labels.copy()", "def load_annotations(self, image_index):\n\t\t\t# Get ground truth annotations.\n\t\t\tannotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)\n\t\t\tannotations = {'labels': np.empty((0,)), 'bboxes': np.empty((0, 4))}\n\n\t\t\t# If needed get info for masks.\n\t\t\tif self.mask:\n\t\t\t\timport cv2\n\n\t\t\t\t# Get image info.\n\t\t\t\timage_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n\t\t\t\tannotations['masks'] = []\n\n\t\t\t# Some images appear to miss annotations (like image with id 257034).\n\t\t\tif len(annotations_ids) == 0:\n\t\t\t\treturn annotations\n\n\n\t\t\t# Parse annotations\n\t\t\tcoco_annotations = self.coco.loadAnns(annotations_ids)\n\t\t\tfor idx, a in enumerate(coco_annotations):\n\t\t\t\t# Some annotations have basically no width / height, skip them.\n\t\t\t\tif a['bbox'][2] < 1 or a['bbox'][3] < 1:\n\t\t\t\t\tcontinue\n\n\t\t\t\tannotations['labels'] = np.concatenate([annotations['labels'], [self.coco_label_to_label(a['category_id'])]], axis=0)\n\t\t\t\tannotations['bboxes'] = np.concatenate([annotations['bboxes'], [[\n\t\t\t\t\ta['bbox'][0],\n\t\t\t\t\ta['bbox'][1],\n\t\t\t\t\ta['bbox'][0] + a['bbox'][2],\n\t\t\t\t\ta['bbox'][1] + a['bbox'][3],\n\t\t\t\t]]], axis=0)\n\n\t\t\t\t# If needed get annotations for masks.\n\t\t\t\tif self.mask:\n\t\t\t\t\tif 'segmentation' not in a:\n\t\t\t\t\t\traise ValueError('Expected \\'segmentation\\' key in annotation, got: {}'.format(a))\n\n\t\t\t\t\tmask = np.zeros((image_info['height'], image_info['width'], 1), dtype=np.uint8)\n\t\t\t\t\tfor seg in a['segmentation']:\n\t\t\t\t\t\tpoints = np.array(seg).reshape((len(seg) // 2, 2)).astype(int)\n\n\t\t\t\t\t\t# Draw mask.\n\t\t\t\t\t\tcv2.fillPoly(mask, [points.astype(int)], (1,))\n\n\t\t\t\t\tannotations['masks'].append(mask.astype(float))\n\n\n\t\t\treturn annotations", "def inspect_ann(node):\n if node.annotation is not None:\n return [{\"name\": \"annotation\", \"line\": node.annotation.lineno - 1, \"end_line\": node.annotation.end_lineno - 1,\n \"col_offset\": node.annotation.col_offset, \"end_col_offset\": node.annotation.end_col_offset,\n \"var_line\": node.lineno - 1, \"var_end_line\": node.end_lineno - 1, \"var_col_offset\": node.col_offset,\n \"var_end_col_offset\": node.end_col_offset}]\n else:\n return []", "def _convert_annotations(self, ast):\n self.annotations = IDLAnnotations(ast)", "def get_annotations(self):\n entity = self.get_object()\n serializer = AnnotationValueSerializer(entity.annotations.all(), many=True)\n return Response(serializer.data)", "def get_alignable_annotations(self, root):\n\n aas = root.findall(\".//ALIGNABLE_ANNOTATION\")\n return {aa.attrib[\"ANNOTATION_ID\"]: aa for aa in aas}", "def _get_annotations(self) -> List[Dict[int, Dict[str, Any]]]:\n annotations = []\n for item in self.collector:\n data_file_type = os.path.basename(item).split(\".\")[-1]\n annotations.append(\n load_annotation_file(\n os.path.join(\n self.annotation_folder,\n os.path.basename(item).replace(data_file_type, \"json\"),\n )\n )\n )\n\n return annotations", "def get_annotations(xmlsent):\n annotations = []\n annotation_elements = xmlsent.findall(\".//{%s}a\" % NS)\n for element in annotation_elements:\n annotation = {}\n annotation['type'] = element.attrib.get('type')\n annotation['flavor'] = element.attrib.get('flavor')\n annotation['who'] = element.attrib.get('who')\n annotation['text'] = element.text\n annot = {'type': element.attrib.get('type'), 'flavor': element.attrib.get('flavor'), \n 'who': element.attrib.get('who'), 'text': element.text}\n annotations.append(annot)\n return annotations", "def parse_annotation(xml_path):\n\n tree = ET.parse(xml_path)\n root = tree.getroot()\n images = root.findall('image')\n print('[/] total number of image annotations present: {}'.format(len(images)))\n\n image2annotation = {}\n for image in images:\n image_id = image.attrib['id']\n image2annotation[image_id] = []\n\n for box in image.findall('box'):\n label = box.attrib['label']\n # skip if label is not head\n if label != \"head\":\n continue\n\n annotation = {}\n minx, miny = int(float(box.attrib['xtl'])), int(float(box.attrib['ytl']))\n maxx, maxy = int(float(box.attrib['xbr'])), int(float(box.attrib['ybr']))\n\n # parse attributes for the box and create labels accordingly\n safety_helmet, mask = False, False\n for attribute in box.findall('attribute'):\n if attribute.attrib['name'] == 'has_safety_helmet' and attribute.text == 'yes':\n safety_helmet = True\n elif attribute.attrib['name'] == 'mask' and attribute.text == 'yes':\n mask = True\n\n # 3 classes: mask+safety_helmet, safety_helmet and mask\n if safety_helmet and mask:\n class_label = \"mask+safety_helmet\"\n elif safety_helmet:\n class_label = \"safety_helmet\"\n elif mask:\n class_label = \"mask\"\n\n # save bbox coordinates and class label.\n annotation['bbox'] = [minx, miny, maxx, maxy]\n annotation['class'] = class_label\n image2annotation[image_id].append(annotation)\n\n\n return image2annotation", "def _parse_anno_info(self, annotations):\n gt_bboxes, gt_bboxes_ignore = [], []\n gt_masks, gt_masks_ignore = [], []\n gt_labels = []\n for ann in annotations:\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(ann['bbox'])\n gt_masks_ignore.append(ann.get('segmentation', None))\n else:\n gt_bboxes.append(ann['bbox'])\n gt_labels.append(ann['category_id'])\n gt_masks.append(ann.get('segmentation', None))\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks_ignore=gt_masks_ignore,\n masks=gt_masks)\n\n return ann", "def extract_annotations(xml_path, tsv_path):\n xml_opener = utilities.get_opener(xml_path)\n csv_opener = utilities.get_opener(tsv_path)\n with xml_opener(xml_path, \"rb\") as xml_file, csv_opener(tsv_path, \"wt\") as tsv_file:\n fieldnames = ['pubmed_id', 'type', 'identifier', 'offset', 'end']\n writer = csv.DictWriter(tsv_file, fieldnames=fieldnames, delimiter='\\t')\n writer.writeheader()\n tag_generator = ET.iterparse(xml_file, tag=\"document\")\n\n for event, document in tqdm.tqdm(tag_generator):\n pubmed_id = document[0].text\n\n # cycle through all the annotation tags contained within document tag\n for annotation in document.iter('annotation'):\n\n # not all annotations will contain an ID\n if len(annotation) <= 3:\n continue\n\n for infon in annotation.iter('infon'):\n if infon.attrib[\"key\"] == \"type\":\n ant_type = infon.text\n else:\n ant_id = infon.text\n\n location, = annotation.iter('location')\n offset = int(location.attrib['offset'])\n end = offset + int(location.attrib['length'])\n row = {'pubmed_id': pubmed_id, 'type': ant_type, 'identifier': ant_id, 'offset': offset, 'end': end}\n writer.writerow(row)\n\n # prevent memory overload\n document.clear()", "def draw_annotations(ax, example, annotations_num, show_label=True):\n for n in range(annotations_num):\n anno = example['annos'][n]\n for q, category in enumerate(anno['names']):\n x, y, z, w, l, h, r = example['annos'][n]['boxes'][q]\n drawBoundingBoxes(ax, x, y, z, w, l, h, r, col='red')\n\n if show_label:\n ax.text(x, y, z+h, f\"{category}\", color='r', fontsize=8.0, rotation=math.degrees(r))", "def load_annos(self):\n data = None\n with open(self.anno_path, 'r') as file:\n if self.ext == '.json':\n data = json.load(file)\n\n # Label start at index 0\n if data is not None:\n for anno in data['annotations']:\n anno['category_id'] -= 1\n\n for anno in data['categories']:\n anno['id'] -= 1\n\n return data", "def load_annotation(self,index):\n return self._load_derived_cls_annotation(index)", "def showAnns(self, anns, bbox_only=False, ax=None):\n\n if len(anns) == 0:\n return 0\n\n if ax is None:\n ax = plt.gca()\n ax.set_autoscale_on(False)\n \n if 'segmentation' in anns[0] or 'keypoints' in anns[0]:\n datasetType = 'instances'\n elif 'caption' in anns[0]:\n datasetType = 'captions'\n else:\n raise Exception('datasetType not supported')\n \n if datasetType == 'instances':\n polygons = []\n color = []\n for ann in anns:\n c = (np.random.random((1, 3))*0.6 + 0.4).tolist()[0]\n\n if bbox_only:\n [bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox']\n poly = [[bbox_x, bbox_y], [bbox_x, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y]]\n np_poly = np.array(poly).reshape((4,2))\n polygons.append(Polygon(np_poly))\n color.append(c)\n\n cat_id = ann['category_id']\n clss_txt = str(cat_id) + ':' + self.cats[cat_id]['name']\n show_class_name_plt([bbox_x, bbox_y], clss_txt, ax, c)\n continue\n\n if 'segmentation' in ann:\n if type(ann['segmentation']) == list:\n # polygon\n for seg in ann['segmentation']:\n poly = np.array(seg).reshape((int(len(seg)/2), 2))\n polygons.append(Polygon(poly))\n color.append(c)\n else:\n # mask\n t = self.imgs[ann['image_id']]\n if type(ann['segmentation']['counts']) == list:\n rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])\n else:\n rle = [ann['segmentation']]\n m = mask.decode(rle)\n img = np.ones( (m.shape[0], m.shape[1], 3) )\n if ann['iscrowd'] == 1:\n color_mask = np.array([2.0,166.0,101.0])/255 \n if ann['iscrowd'] == 0:\n color_mask = np.random.random((1, 3)).tolist()[0]\n for i in range(3):\n img[:,:,i] = color_mask[i]\n ax.imshow(np.dstack( (img, m*0.5) ))\n \n if 'keypoints' in ann and type(ann['keypoints']) == list:\n # turn skeleton into zero-based index\n sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1\n kp = np.array(ann['keypoints'])\n x = kp[0::3]\n y = kp[1::3]\n v = kp[2::3]\n for sk in sks:\n if np.all(v[sk]>0):\n plt.plot(x[sk],y[sk], linewidth=3, color=c)\n plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)\n plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)\n\n p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0)\n ax.add_collection(p)\n p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)\n ax.add_collection(p)\n \n elif datasetType == 'captions':\n for ann in anns:\n print(ann['caption'])\n\n return ax", "def _get_annotation(self, image_id):\n annotation_file = self.image_sets_dir / f'{image_id}.xml'\n objects = ET.parse(annotation_file).findall('object')\n boxes = []\n labels = []\n is_difficult = []\n for obj in objects:\n class_name = obj.find('name').text.lower().strip()\n if class_name in self.class_dict:\n bbox = obj.find('bndbox')\n\n x0 = float(bbox.find('xmin').text) - 1\n y0 = float(bbox.find('ymin').text) - 1\n x1 = float(bbox.find('xmax').text) - 1\n y1 = float(bbox.find('ymax').text) - 1\n boxes.append([x0, y0, x1, y1])\n\n labels.append(self.class_dict[class_name])\n\n is_difficult_str = obj.find('difficult').text\n is_difficult.append(int(is_difficult_str) if is_difficult_str else 0)\n\n return (np.array(boxes, dtype=np.float32),\n np.array(labels, dtype=np.int64),\n np.array(is_difficult, dtype=np.uint8))", "def nipsad_annotations(userid):\n query = query_for_users_annotations(userid)\n query[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\"must\"].append(\n {\"term\": {\"nipsa\": True}})\n return query", "def add_annotation(self,\n node_attrs: Dict[str, Dict[str, Any]],\n edge_attrs: Dict[str, Dict[str, Any]],\n sentence_ids: Dict[str, str]) -> None:\n for node, attrs in node_attrs.items():\n self._add_node_annotation(node, attrs)\n\n for edge, attrs in edge_attrs.items():\n self._add_edge_annotation(edge, attrs, sentence_ids)", "def getAnnIds(\r\n self,\r\n imgIds=[], vidIds=[],\r\n catIds=[], areaRng=[],\r\n iscrowd=None):\r\n imgIds = imgIds \\\r\n if isinstance(imgIds, tuple) or isinstance(imgIds, list) \\\r\n else [imgIds]\r\n vidIds = vidIds \\\r\n if isinstance(vidIds, tuple) or isinstance(vidIds, list) \\\r\n else [vidIds]\r\n catIds = catIds \\\r\n if isinstance(catIds, tuple) or isinstance(catIds, list) \\\r\n else [catIds]\r\n\r\n if len(imgIds) == len(vidIds) == len(catIds) == len(areaRng) == 0:\r\n anns = self.dataset['annotations']\r\n else:\r\n imgIds = imgIds if (len(imgIds) > 0) or (len(vidIds) == 0) else \\\r\n [i['id'] for vidId in vidIds for i in self.videoToImgs[vidId]]\r\n if not len(imgIds) == 0:\r\n lists = [\r\n self.imgToAnns[imgId] for imgId in imgIds\r\n if imgId in self.imgToAnns\r\n ]\r\n anns = list(itertools.chain.from_iterable(lists))\r\n else:\r\n anns = self.dataset['annotations']\r\n anns = anns if len(catIds) == 0 else [\r\n ann for ann in anns if ann['category_id'] in catIds\r\n ]\r\n anns = anns if len(areaRng) == 0 else [\r\n ann for ann in anns\r\n if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]\r\n ]\r\n\r\n if not iscrowd == None:\r\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\r\n else:\r\n ids = [ann['id'] for ann in anns]\r\n\r\n return ids", "def annotation(self, ann_type: str = None):\n if ann_type is None: ann_type = self.ann\n if ann_type != self.ann:\n warnings.warn('Please note that the annotation type is mismatch with the dataset setting!')\n\n if ann_type == 'label':\n xml_path = self.xml_path.format(id=self.id)\n ann = int(ET.parse(xml_path).find('defective').text)\n elif ann_type == 'bbox':\n xml_path = self.xml_path.format(id=self.id)\n objs = ET.parse(xml_path).findall('bbox')\n ann = []\n for ix, bbox in enumerate(objs):\n y1 = int(float(bbox.find('ymin').text))\n y2 = int(float(bbox.find('ymax').text))\n x1 = int(float(bbox.find('xmin').text))\n x2 = int(float(bbox.find('xmax').text))\n ann.append((y1, y2, x1, x2))\n elif ann_type == 'mask':\n mask_path = self.mask_path.format(id=self.id)\n if os.path.exists(mask_path):\n ann = Image.open(mask_path).convert('L')\n else:\n ann = Image.fromarray(np.zeros((512, 512), dtype=np.uint8)).convert('L')\n elif ann_type == 'none':\n ann = []\n else:\n raise NotImplementedError\n return ann", "def notes_to_annotations(self):\n\n for sub_dir, text_name, file_names in anafora.walk(self.xml_dir, self.xml_regex):\n note_path = os.path.join(self.text_dir, text_name)\n xml_path = os.path.join(self.xml_dir, sub_dir, file_names[0])\n ref_data = anafora.AnaforaData.from_file(xml_path)\n\n # collect (annot_start, annot_end, annot_id) tuples\n add_annotations(self.note2times[note_path], ref_data, 'TIMEX3')\n add_annotations(self.note2times[note_path], ref_data, 'SECTIONTIME')\n add_annotations(self.note2times[note_path], ref_data, 'DOCTIME')\n add_annotations(self.note2events[note_path], ref_data, 'EVENT')\n\n # collect (src spans, targ spans, src id, targ id) tuples\n for rel in ref_data.annotations.select_type('TLINK'):\n src = rel.properties['Source']\n targ = rel.properties['Target']\n label = rel.properties['Type']\n if label == 'CONTAINS':\n src_start, src_end = src.spans[0]\n targ_start, targ_end = targ.spans[0]\n self.note2rels[note_path].append(\n (src_start, src_end, targ_start, targ_end, src.id, targ.id))\n\n # sort relation tuples by src arguments' offsets\n # self.note2rels[note_path].sort(key=lambda t: t[0])", "def get_analysis_annotations():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id + '/analysis/annotations')\n\n annotations = []\n context_path = 'ThreatGrid.AnalysisResults.Sample.Id.Annotations'\n ec = {context_path: []} # type: ignore\n ips = demisto.get(r.json(), 'data.items.network') # type: ignore\n if ips:\n for k in ips:\n annotation = {\n 'IP': k,\n 'IP.Asn': ips[k].get('asn'),\n 'IP.City': ips[k].get('city'),\n 'IP.Country': ips[k].get('country'),\n 'IP.Org': ips[k].get('org'),\n 'IP.Region': ips[k].get('region'),\n 'IP.Timestamp': ips[k].get('ts')\n }\n annotations.append(annotation)\n ec[context_path].append(annotation)\n\n demisto.results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': r.json(),\n 'EntryContext': ec,\n 'HumanReadable': tableToMarkdown('ThreatGrid - Analysis Annotations', annotations, [\n 'IP', 'IP.Asn', 'IP.City', 'IP.Country', 'IP.Org', 'IP.Region', 'IP.Timestamp'\n ])\n })", "def separate_annotations():\n data_root = '/home/ubuntu/datasets/YT-VIS/'\n ann_file = data_root + 'annotations/instances_train_sub.json'\n import json\n with open(ann_file, 'r') as f:\n ann = json.load(f)\n # ann['videos'] = ann['videos'][15]\n # video_id = [0]\n from tqdm import tqdm\n for id in tqdm(range(len(ann['videos']))):\n videos = []\n anns = []\n video = ann['videos'][id]\n video['id'] = 1\n videos.append(video)\n\n i = 1\n for a in ann['annotations']:\n if a['video_id'] == id + 1:\n anno = a\n anno['id'] = i\n anno['video_id'] = 1\n anns.append(anno)\n i += 1\n # anno = ann['annotations'][id]\n # anno['id'] = 1\n # anno['video_id'] = 1\n # anns.append(anno)\n\n file_name = videos[0]['file_names'][0].split('/')[0]\n\n ann_new = dict()\n ann_new['info'] = ann['info']\n ann_new['licenses'] = ann['licenses']\n ann_new['categories'] = ann['categories']\n ann_new['videos'] = videos\n ann_new['annotations'] = anns\n\n with open(data_root + 'train/Annotations/{}/{}_annotations.json'.format(file_name, file_name), 'w') as f:\n json.dump(ann_new, f, ensure_ascii=False)", "def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):\n imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(imgIds) == len(catIds) == len(areaRng) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(imgIds) == 0:\n lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.dataset['annotations']\n anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]\n if not iscrowd == None:\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n else:\n ids = [ann['id'] for ann in anns]\n return ids", "def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):\n imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(imgIds) == len(catIds) == len(areaRng) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(imgIds) == 0:\n lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.dataset['annotations']\n anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]\n if not iscrowd == None:\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n else:\n ids = [ann['id'] for ann in anns]\n return ids", "def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):\n imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(imgIds) == len(catIds) == len(areaRng) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(imgIds) == 0:\n lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.dataset['annotations']\n anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]\n if not iscrowd == None:\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n else:\n ids = [ann['id'] for ann in anns]\n return ids" ]
[ "0.5965637", "0.594054", "0.58875173", "0.5866568", "0.5783522", "0.5778308", "0.57285047", "0.5727667", "0.5715991", "0.57081497", "0.5700986", "0.5677542", "0.5651312", "0.56260675", "0.5624124", "0.5497677", "0.54948866", "0.5488697", "0.5482274", "0.54618627", "0.5447039", "0.5419237", "0.53936124", "0.5380345", "0.5368331", "0.536555", "0.5356075", "0.53487456", "0.53487456", "0.53487456" ]
0.61062497
0
All nonjunk annotations are given the SAME new name
def merge_nonjunk_into_new_name(self, event=None): # Delete all original names aid_list = self.all_aid_list aid_list_filtered = ut.filterfalse_items( aid_list, self.ibs.get_annot_isjunk(aid_list) ) # Rename annotations self.ibs.set_annot_names_to_same_new_name(aid_list_filtered) self.update_callback() self.backend_callback() self.show_page()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_04_remove_annotations(self):\n self.addAnnotation(\"annotation1\", self.host.id, \"HOST\")\n self.removeAnnotation(self.added_annotations[-1].annotation.id)\n del self.added_annotations[-1]", "def fixing_annotation(key, n):\n if key + '.' + n not in self.propbank:\n key = key.replace('-', '_')\n return key + '.' + n", "def copy_annotations(source: str, target: str) -> str:\n if isinstance(source, AnnotatedStr):\n if not isinstance(target, AnnotatedStr):\n target = AnnotatedStr(target)\n target.optional = source.optional\n target.exists = source.exists\n target.phony = source.phony\n target.precious = source.precious\n return target", "def _convert_annotations(self, ast):\n self.annotations = IDLAnnotations(ast)", "def reset_annotations(self):\n # FIXME: this state does not make sense\n self.annotation_date_set = False\n self.annotation_comment_set = False\n self.annotation_type_set = False\n self.annotation_spdx_id_set = False", "def fix_name(self):\n self._name_fixed = True", "def load_annotations(\n self,\n names: Mapping[str, Annotation],\n ) -> None:\n for name, refers_to in names.items():\n self.logger.info(f\"load_annotations {name!r} : {refers_to!r}\")\n if not self.extended_name_path.match(name):\n raise ValueError(f\"Invalid name {name}\")\n\n context = self\n\n # Expand \"name1.name2....\": refers_to into [\"name1\", \"name2\", ...]: refers_to\n *path, final = self.ident_pat.findall(name)\n for name in path:\n ref = context.setdefault(name, Referent())\n if ref.container is None:\n ref.container = NameContainer(parent=self.parent)\n context = ref.container\n context.setdefault(final, Referent(refers_to))", "def test_issue_55():\n\n # full name change including stack trace\n\n @with_signature('bar()')\n def foo():\n return 'a'\n\n assert \"bar at\" in repr(foo)\n assert foo.__name__ == 'bar'\n assert foo() == 'a'\n\n # only metadata change\n\n @with_signature(None, func_name='bar')\n def foo():\n return 'a'\n\n if sys.version_info >= (3, 0):\n assert \"foo at\" in repr(foo)\n assert foo.__name__ == 'bar'\n assert foo() == 'a'", "def write_annotation(self, ann_file, img_path, new_img_name):\n if self.type == \"imagenet\":\n label = self.in_annotations[img_path]\n logger.debug(f\"Img {img_path}, imagenet label {label}\")\n ann_file.write(str(label) + \"\\n\")\n elif self.type == \"coco\":\n ann_file.write(\"detection_results {\\n\")\n for obj in self.in_annotations[img_path].keys():\n ann_file.write(\" objects {\\n\")\n ann_file.write(f\" class_id: {self.in_annotations[img_path][obj]['label']}\\n\")\n ann_file.write(\" bounding_box {\\n\")\n ann_file.write(f\" normalized_top: {self.in_annotations[img_path][obj]['normalized_bbox'][0]}\\n\")\n ann_file.write(f\" normalized_bottom: {self.in_annotations[img_path][obj]['normalized_bbox'][1]}\\n\")\n ann_file.write(f\" normalized_left: {self.in_annotations[img_path][obj]['normalized_bbox'][2]}\\n\")\n ann_file.write(f\" normalized_right: {self.in_annotations[img_path][obj]['normalized_bbox'][3]}\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(f' image_name: \"{new_img_name}\"\\n')\n ann_file.write(f' image_id: {int(new_img_name.split(\".\")[0])}\\n')\n ann_file.write(\"}\\n\")", "def _get_annotation(cls, name):\n return cls.__annotations__.get(name)", "def annotate(self, **annotations):\n _check_annotations(annotations)\n self.annotations.update(annotations)", "def dump_annotations(self):\n fname = 'annotations'\n if self.split is not None:\n fname = 'annotations_{}'.format(self.split)\n fname = os.path.join(self.dest_folder, '{}.json'.format(fname))\n self.save(self.dataset, fname, \"annotations\")", "def extract_annotations(self, min_annot=3):\n for g in self.games:\n annotation_list = [move.strip(\"{}\") for move in g.moves if move.strip().startswith(\"{\")]\n if len(annotation_list) < min_annot:\n continue\n\n annotation = \" \".join(annotation_list)\n self.annotations.append(annotation)", "def annotate_one(f_json):\n logger.info(f_json + '--->')\n \n filename = os.path.basename(f_json).split('.')[0] \n \n f_out = os.path.join(cfg.OUTPUT_PATH, filename + cfg.OUTPUT_SUFFIX) \n \n if not cfg.OUTPUT_OVERWRITE_EXISTING:\n if os.path.exists(f_out):\n logger.info(f_out + ' already exists')\n return f_out\n \n f_out = annotate_with_geonames(f_json, f_out)\n logger.info(f_out)\n \n return f_out", "def separate_annotations():\n data_root = '/home/ubuntu/datasets/YT-VIS/'\n ann_file = data_root + 'annotations/instances_train_sub.json'\n import json\n with open(ann_file, 'r') as f:\n ann = json.load(f)\n # ann['videos'] = ann['videos'][15]\n # video_id = [0]\n from tqdm import tqdm\n for id in tqdm(range(len(ann['videos']))):\n videos = []\n anns = []\n video = ann['videos'][id]\n video['id'] = 1\n videos.append(video)\n\n i = 1\n for a in ann['annotations']:\n if a['video_id'] == id + 1:\n anno = a\n anno['id'] = i\n anno['video_id'] = 1\n anns.append(anno)\n i += 1\n # anno = ann['annotations'][id]\n # anno['id'] = 1\n # anno['video_id'] = 1\n # anns.append(anno)\n\n file_name = videos[0]['file_names'][0].split('/')[0]\n\n ann_new = dict()\n ann_new['info'] = ann['info']\n ann_new['licenses'] = ann['licenses']\n ann_new['categories'] = ann['categories']\n ann_new['videos'] = videos\n ann_new['annotations'] = anns\n\n with open(data_root + 'train/Annotations/{}/{}_annotations.json'.format(file_name, file_name), 'w') as f:\n json.dump(ann_new, f, ensure_ascii=False)", "def _NiceNameToPreventCompilerErrors(self, attrname):\n # only emit the rhs of a multi part name e.g. undo.UndoItem will appear only as UndoItem\n if attrname.find(\".\") != -1:\n attrname = attrname.split(\".\")[-1] # take the last\n # Prevent compiler errors on the java side by avoiding the generating of java keywords as attribute names\n if attrname in javakeywords:\n attrname = \"_\" + attrname\n return attrname", "def _update_annotation_with_default(anno, name, default):\n # Create instance if is type class\n complete_annotation = anno\n if _is_dsl_type_cls(anno):\n complete_annotation = anno()\n complete_annotation.name = name\n if default is Input._EMPTY:\n return complete_annotation\n if isinstance(complete_annotation, Input):\n # Non-parameter Input has no default attribute\n if complete_annotation._is_parameter_type and complete_annotation.default is not None:\n # logger.warning(\n # f\"Warning: Default value of f{complete_annotation.name!r} is set twice: \"\n # f\"{complete_annotation.default!r} and {default!r}, will use {default!r}\"\n # )\n pass\n complete_annotation._update_default(default)\n return complete_annotation", "def changeAnnotations(folderPathToReannotate, annotationTochange=[u'0.3', u'1.1']):\n # transform the annotation into a list if need be\n if type(annotationTochange) is str:\n annotationTochange = [annotationTochange]\n # get the annotation data\n sentEnList, sentFrList, sentRefList, sentAnnotList = getAnnotationData(folderPathToReannotate)\n # print the annotator cheat sheet\n printCheatSheet()\n # annotate only when we find the problematic old annotation\n for indexAnnot, oldAnnot in enumerate(list(sentAnnotList)):\n if oldAnnot in annotationTochange:\n src = sentEnList[indexAnnot] if u'en-fr' in sentRefList[indexAnnot] else sentFrList[indexAnnot]\n trgt = sentFrList[indexAnnot] if u'en-fr' in sentRefList[indexAnnot] else sentEnList[indexAnnot]\n print(u'{0} - {1}'.format(indexAnnot+1, src))\n print(u'{0} - {1}'.format(indexAnnot+1, trgt))\n # get the first part of the annotation (aligned or not)\n annotatorGeneralInput = input(u'Old annotation is {0}, what is the new one: '.format(oldAnnot))\n # make sure to have the right general annotation\n while True:\n if annotatorGeneralInput in [u'0', u'1', u'0.0', u'0.1', u'0.2',\n u'1.0', u'1.1', u'1.2', u'1.3', u'1.4', u'c', u'correction']:\n break\n else:\n utilsOs.moveUpAndLeftNLines(1, slowly=False)\n annotatorGeneralInput = input(u'Repeat annotation: ')\n if annotatorGeneralInput in [u'c', u'correct']:\n annotatorGeneralInput, sentAnnotList = correctionToAnnotation(sentAnnotList)\n # if we still need to specify what type of alignment or misalignment\n if annotatorGeneralInput in [u'0', u'1']:\n utilsOs.moveUpAndLeftNLines(1, slowly=False)\n # get the second part of the annotation (aligned or not)\n annotatorSpecificInput = input(u'Specific type annotation: ')\n typeAnswers = [u'0', u'1', u'2'] if annotatorGeneralInput == 0 else [u'0', u'1', u'2', u'3', u'4']\n # make sure to have the right specific annotation\n while True:\n if annotatorSpecificInput in typeAnswers:\n break\n else:\n utilsOs.moveUpAndLeftNLines(1, slowly=False)\n annotatorSpecificInput = input(u'Repeat type annotation: ')\n # save to the list of annotations\n sentAnnotList[indexAnnot] = u'{0}.{1}'.format(annotatorGeneralInput, annotatorSpecificInput)\n # if the right answer was given in the right format right away\n else:\n # save to the list of annotations\n sentAnnotList[indexAnnot] = str(annotatorGeneralInput)\n # remove the lines from the terminal before getting to the next pair\n utilsOs.moveUpAndLeftNLines(3, slowly=False)\n # erase all remainder of the previous sentences and go back up again\n for e in range(2):\n print(u' ' * (max([len(src), len(trgt)]) + 6))\n utilsOs.moveUpAndLeftNLines(2, slowly=False)\n # remove format problematic annotations\n sentAnnotList = [annot if annot != u'1.1.0' else u'1.1' for annot in sentAnnotList]\n sentAnnotList = [annot if annot != u'0.1.0' else u'0.1' for annot in sentAnnotList ]\n # dump new annotation\n sentAnnotPath = u'{0}sampleAnnotation.tsv'.format(folderPathToReannotate)\n utilsOs.dumpRawLines(sentAnnotList, sentAnnotPath, addNewline=True, rewrite=True)", "def annotations(self, annotations):\n self._annotations = annotations", "def test_inspect_annotations_remove_all(tmp_path):\n matplotlib = pytest.importorskip(\"matplotlib\")\n import matplotlib.pyplot as plt\n\n matplotlib.use(\"Agg\")\n plt.close(\"all\")\n\n bids_root = setup_bids_test_dir(tmp_path)\n bids_path = _bids_path.copy().update(root=bids_root)\n events_tsv_fpath = bids_path.copy().update(suffix=\"events\", extension=\".tsv\").fpath\n\n # Remove all Annotations.\n raw = read_raw_bids(bids_path=bids_path, verbose=\"error\")\n raw.set_annotations(None)\n raw.load_data()\n raw.save(raw.filenames[0], overwrite=True)\n # Delete events.tsv sidecar.\n (bids_path.copy().update(suffix=\"events\", extension=\".tsv\").fpath.unlink())\n\n # Add custom Annotation.\n inspect_dataset(bids_path, find_flat=False)\n raw_fig = mne_bids.inspect._global_vars[\"raw_fig\"]\n _add_annotation(raw_fig)\n\n # Close window and save changes.\n key_event = KeyEvent(name=\"Close\", canvas=raw_fig.canvas, key=raw_fig.mne.close_key)\n raw_fig.canvas.callbacks.process(\"key_press_event\", key_event)\n\n fig_dialog = mne_bids.inspect._global_vars[\"dialog_fig\"]\n key_event = KeyEvent(name=\"Save\", canvas=fig_dialog.canvas, key=\"return\")\n fig_dialog.canvas.callbacks.process(\"key_press_event\", key_event)\n\n # events.tsv sidecar should have been created.\n assert events_tsv_fpath.exists()\n\n # Remove the Annotation.\n inspect_dataset(bids_path, find_flat=False)\n raw_fig = mne_bids.inspect._global_vars[\"raw_fig\"]\n data_ax = raw_fig.mne.ax_main\n\n key_event = KeyEvent(name=\"Annotations\", canvas=raw_fig.canvas, key=\"a\")\n raw_fig.canvas.callbacks.process(\"key_press_event\", key_event)\n _fake_click(raw_fig, data_ax, [1.0, 1.0], xform=\"data\", button=3, kind=\"press\")\n\n # Close window and save changes.\n key_event = KeyEvent(name=\"Close\", canvas=raw_fig.canvas, key=raw_fig.mne.close_key)\n raw_fig.canvas.callbacks.process(\"key_press_event\", key_event)\n\n fig_dialog = mne_bids.inspect._global_vars[\"dialog_fig\"]\n key_event = KeyEvent(name=\"Save\", canvas=fig_dialog.canvas, key=\"return\")\n fig_dialog.canvas.callbacks.process(\"key_press_event\", key_event)\n\n # events.tsv sidecar should not exist anymore.\n assert not events_tsv_fpath.exists()", "def test_sets_name(self):\n scope = Scope()\n self.assertEqual(scope.__name__, None)\n\n @Scope\n def scope2(cls): pass\n\n self.assertEqual(scope2.__name__, 'scope2')", "def _move_quant_attributes_into_annotations(model):\n if onnx is None:\n raise ModuleNotFoundError(\"Installation of ONNX is required.\")\n\n model = copy.deepcopy(model)\n qaname = \"finn_datatype\"\n for n in model.graph.node:\n for a in n.attribute:\n mark_for_removal = False\n if a.name == \"weight_qnt\":\n # assume second input is weight, make sure it has an initializer\n w_tensor_name = n.input[1]\n assert w_tensor_name in [x.name for x in model.graph.initializer]\n tq = onnx.StringStringEntryProto(key = qaname, value = a.s)\n ta = onnx.TensorAnnotation(\n tensor_name = w_tensor_name,\n quant_parameter_tensor_names = [tq]\n )\n model.graph.quantization_annotation.append(ta)\n mark_for_removal = True\n elif a.name == \"activation_qnt\":\n a_tensor_name = n.output[0]\n tq = onnx.StringStringEntryProto(key = qaname, value = a.s)\n ta = onnx.TensorAnnotation(\n tensor_name = a_tensor_name,\n quant_parameter_tensor_names = [tq]\n )\n model.graph.quantization_annotation.append(ta)\n mark_for_removal = True\n if mark_for_removal:\n n.attribute.remove(a)\n return model", "def wantsNametag(self):\n return 0", "def write_class_name(annotation_file_path, class_name):\n root = etree.parse(annotation_file_path)\n objects = root.findall('object')\n \n for item in objects:\n name = item.find('name')\n name.text = class_name\n\n root.write(annotation_file_path, pretty_print=True)", "def reset_name_labels(infr):\n infr.print('reset_name_labels', 1)\n orig_names = infr.get_node_attrs('orig_name_label')\n infr.set_node_attrs('name_label', orig_names)", "def _validate_annotations(self):\n for i, (k, v) in enumerate(self._annotations_dict.items()):\n for index, annotation in enumerate(v):\n startOffset = int(annotation['startOffset'])\n endOffset = int(annotation['endOffset'])\n tweet = self._tweets_dict[k]\n annotatedText = annotation['annotatedText']\n\n realOffset = tweet.find(annotatedText)\n if realOffset != startOffset:\n #print(\"Fixing startOffset for {}. (annotated at position {}, but should be at {})\".format(k, startOffset, realOffset))\n\n diff = realOffset - startOffset\n annotation['startOffset'] = \"{}\".format(startOffset+diff)\n annotation['endOffset'] = \"{}\".format(endOffset+diff)", "def test_01_add_annotation(self):\n self.addAnnotation(\"annotation1\", self.host.id, \"HOST\")\n self.assertEqual(self.added_annotations[-1].annotation.annotation, \"annotation1\")", "def removeAnnotation(self,i=0):\n #print \"REMOVE %s\" % i\n map(undraw,self._annotations[i])\n del self._annotations[i]", "def copy_annotations(from_data, to_data, annot_type):\n\n for annot in from_data.annotations.select_type(annot_type):\n entity = anafora.AnaforaEntity()\n entity.id = annot.id\n entity.spans = annot.spans\n entity.type = annot.type\n to_data.annotations.append(entity)", "def markAsNeedsAnnotationsDictionary(self):\n self.needs_annotations_dict = True" ]
[ "0.62354994", "0.619226", "0.6170612", "0.6045546", "0.59696645", "0.5833827", "0.5801684", "0.5790386", "0.5656737", "0.5653905", "0.56522465", "0.56447643", "0.5641596", "0.5641287", "0.5592741", "0.5537441", "0.5534922", "0.5529436", "0.55039114", "0.54505855", "0.5441954", "0.54368514", "0.5423936", "0.54111886", "0.536936", "0.53683907", "0.53610975", "0.534927", "0.5347515", "0.534653" ]
0.6833057
0
Returns the ip address of the host default exterior access port/adapter.
def get_default_ip_address(): gws = netifaces.gateways() # get all gateways default = gws['default'] # get the default gw adapter = default[2][1] # get the adapter identifier realadapter = netifaces.ifaddresses(adapter) # get the adapter addr_dict = realadapter[2][0] # get the first ipv4 address tuple return addr_dict['addr']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_default_ip():\r\n if CONFIG.BIND_INTERFACE is None:\r\n default_gw = netifaces.gateways()['default']\r\n if netifaces.AF_INET in default_gw:\r\n preferred_interface = default_gw[netifaces.AF_INET][1]\r\n else:\r\n interfaces = netifaces.interfaces()\r\n preferred_interface = next((i for i in interfaces if i != 'lo'), interfaces[0])\r\n else:\r\n preferred_interface = CONFIG.BIND_INTERFACE\r\n return netifaces.ifaddresses(preferred_interface)[netifaces.AF_INET][0]['addr']", "def get_host_ip_addr():\n return nova_conf.my_ip", "def get_IPaddress():\n config = get_ifconfig()\n return config[0]", "def getLocalhostIP():\n return socket.getaddrinfo('localhost', 0)[0][4][0]", "def get_local_host_ip(self) -> str:", "def get_localhost_ip():\n try:\n return [\n (s.connect((NAME_SERVER, 80)), s.getsockname()[0], s.close())\n for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]\n ][0][1]\n except Exception:\n return '127.0.0.1'", "def get_internal_host(self):\n prefer_internal_ip = self.charm_config.get(\"prefer-internal-ip\")\n fqdn = socket.getfqdn()\n ip = socket.gethostbyname(fqdn)\n if prefer_internal_ip:\n return ip\n return fqdn", "def get_my_ip():\r\n try:\r\n return [x[4] for x in conf.route.routes if x[2] != '0.0.0.0'][0]\r\n except IndexError:\r\n return '127.0.0.1'", "def get_default_server_ip(cls):\n \n _position = cls.basic_parameters[1]\n \n return _position['server_ip']", "def _get_my_ip():\n try:\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n csock.connect(('8.8.8.8', 80))\n (addr, port) = csock.getsockname()\n csock.close()\n return addr\n except socket.error:\n return \"127.0.0.1\"", "def get_ip_address():\n try:\n return socket.gethostbyname(socket.getfqdn())\n except socket.gaierror as error:\n logger.warn(error)\n return socket.gethostbyname(\"\")", "def get_ext_ip_addr(self, node_name):\n node = self._cloud.get_server(node_name)\n if node is None:\n raise CloudError('Cannot retrieve node/IP information. Is `node_name` set correctly?')\n return node.accessIPv4", "def get_ip_address(self):\n return self.adb.get_ip_address()", "def get_ip():\n return os.getenv(\"HOST_IP\", \"127.0.0.1\")", "def get_ip():\n with hide(\"everything\"):\n ip_addresses = run('hostname -I').split(' ')\n return ip_addresses[0]", "def get_host_ip(timeout=10):\n\n return get_default_route(timeout)[2]", "def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())", "def localhost_IP(self):\r\n return self._localhost_ip", "def get_IP():\n\n return socket.gethostbyname(socket.gethostname())", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def get_host_ipaddress(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetHostIPAddress', self.handle)", "def getLocalIP():\r\n try:\r\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n csock.connect(('8.8.8.8', 80))\r\n (addr, port) = csock.getsockname()\r\n csock.close()\r\n return addr\r\n except socket.error:\r\n return \"127.0.0.1\"", "def getPublicIP():\n try:\n # Try to get the internet-facing IP by attempting a connection\n # to a non-existent server and reading what IP was used.\n with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:\n # 203.0.113.0/24 is reserved as TEST-NET-3 by RFC 5737, so\n # there is guaranteed to be no one listening on the other\n # end (and we won't accidentally DOS anyone).\n sock.connect(('203.0.113.1', 1))\n ip = sock.getsockname()[0]\n return ip\n except:\n # Something went terribly wrong. Just give loopback rather\n # than killing everything, because this is often called just\n # to provide a default argument\n return '127.0.0.1'", "def address(self):\n \n return self.__ip", "def get_ip_address(self):\n raise NotImplementedError", "def ip_address(self) -> str:\n return self._device.ip if self.is_connected else None", "def external_IP(self):\r\n return self._external_ip", "def host(self):\n return '127.0.0.1'", "def getMyIP():\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.connect(('8.8.8.8', 1)) # connect() for UDP doesn't send packets\r\n return s.getsockname()[0]", "def ip_addr(self):\n return self.ip_addresses[0]" ]
[ "0.7631836", "0.7160805", "0.71426094", "0.7098438", "0.70112723", "0.6928257", "0.6869446", "0.68464303", "0.68386245", "0.6819427", "0.67678815", "0.67554003", "0.6754864", "0.6745649", "0.6723856", "0.6662521", "0.6648195", "0.6618535", "0.66100913", "0.65531534", "0.6540958", "0.65254396", "0.65250653", "0.6499877", "0.6487465", "0.6477267", "0.6477145", "0.64579415", "0.6442944", "0.64227873" ]
0.7446385
1
Accesses the training checkpoint.
def checkpoint(self): return self._checkpoint
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkpoint():", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def checkpoint(self):\n return self.__checkpoint", "def checkpoint(self):\n save()", "def checkpoint(self, state: TrainState): # pragma: no cover\n if self.checkpointing:\n if not have_tf: # Flax checkpointing requires tensorflow\n raise RuntimeError(\n \"Tensorflow not available and it is\" \" required for Flax checkpointing.\"\n )\n checkpoint_save(state, self.workdir)", "def load_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def checkpoint(self):\n self.logger.info('Checkpointing Sampler')\n with open(self.resume_file, \"wb\") as f:\n pickle.dump(self, f)", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def load(self, checkpoint_dir):\n print(\"\\nReading Checkpoints.....\\n\\n\")\n model_dir = \"%s\" % (\"cnn\") # give the model name by label_size\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n \n # Check the checkpoint is exist\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\n self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\n else:\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def checkpoint(self, epoch: int):\n if self.exp.scheduler_stepper is not None:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"scheduler_state_dict\": self.exp.scheduler_stepper.scheduler.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )\n else:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )", "def _restore(self):\n\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1]) # Robust enough?\n return restored_step\n logging.info('Starting training from scratch.')\n return 0", "async def checkpoint(cls) -> None:", "def resume(self, checkpoint):\n model_dict = paddle.load(checkpoint)\n self.model.set_state_dict(model_dict)", "def _get_checkpoint(self):\n ckpt = tf.train.get_checkpoint_state(self.model)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = ckpt.model_checkpoint_path\n else:\n raise RuntimeError('No checkpoint file found')\n return ckpt_path", "def load_checkpoint(self, path: str = '', train: bool = True) -> int:\n\n if not path:\n dir_ = os.path.dirname(os.path.realpath('__file__'))\n path = os.path.join(dir_, 'model.pt')\n\n try:\n ckpt = torch.load(path)\n except FileNotFoundError:\n return 0\n else:\n print('Loaded model at epoch: ', end='')\n\n self.load_state_dict(ckpt['model_state_dict'])\n self.actor_optimizer.load_state_dict(ckpt['ac_optim_dict'])\n self.critic_optimizer.load_state_dict(ckpt['critic_optim_dict'])\n epoch = ckpt['epoch']\n\n print(epoch)\n\n if not train:\n self.eval()\n else:\n self.train()\n\n return epoch", "def save_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Save checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def _resume_from_checkpoint(model: tf.keras.Model,\n model_dir: str,\n train_steps: int) -> int:\n logging.info('Load from checkpoint is enabled.')\n latest_checkpoint = tf.train.latest_checkpoint(model_dir)\n logging.info('latest_checkpoint: %s', latest_checkpoint)\n if not latest_checkpoint:\n logging.info('No checkpoint detected.')\n return 0\n\n logging.info('Checkpoint file %s found and restoring from '\n 'checkpoint', latest_checkpoint)\n model.load_weights(latest_checkpoint)\n initial_epoch = model.optimizer.iterations // train_steps\n logging.info('Completed loading from checkpoint.')\n logging.info('Resuming from epoch %d', initial_epoch)\n return int(initial_epoch)", "def checkpoint(self, timestamp=0.0, **keywords):\n self.services.debug('checkpoint() method called')\n pass", "def load_checkpoint(args, trainer, epoch_itr):\n os.makedirs(os.path.join(args.save_dir, 'checkpoints'), exist_ok=True)\n checkpoint_path = os.path.join(args.save_dir, 'checkpoints', args.restore_file)\n if os.path.isfile(checkpoint_path):\n extra_state = trainer.load_checkpoint(checkpoint_path)\n if extra_state is not None:\n # replay train iterator to match checkpoint\n epoch_itr.load_state_dict(extra_state['train_iterator'])\n\n print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(\n checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))\n\n trainer.lr_step(epoch_itr.epoch)\n trainer.lr_step_update(trainer.get_num_updates())\n if 'best' in extra_state:\n save_checkpoint.best = extra_state['best']", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def get_checkpoint_data(self) -> Dict[str, Any]:\n # get ckpt file path from config.trainer.params.resume_from_checkpoint\n path = self.config.trainer.params.get(\"resume_from_checkpoint\", None)\n if path is not None:\n is_zoo = self.is_zoo_path(path)\n ckpt_filepath = path\n if is_zoo:\n folder = download_pretrained_model(path)\n ckpt_filepath = get_ckpt_path_from_folder(folder)\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = get_config_from_folder_or_ckpt(folder, ckpt)\n else:\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = None\n\n return {\n \"ckpt\": ckpt,\n \"checkpoint_path\": ckpt_filepath,\n \"is_zoo\": is_zoo,\n \"config\": config,\n }\n\n is_zoo = False\n config = None\n ckpt = None\n # get ckpt file path from config.checkpoint\n ckpt_config = self.config.checkpoint\n suffix = \"best.ckpt\" if ckpt_config.resume_best else \"current.ckpt\"\n path = os.path.join(get_mmf_env(key=\"save_dir\"), suffix)\n ckpt_filepath = None\n resume_from_specified_path = (\n ckpt_config.resume_file is not None or ckpt_config.resume_zoo is not None\n ) and (not ckpt_config.resume or not PathManager.exists(path))\n if resume_from_specified_path:\n if ckpt_config.resume_file and PathManager.exists(ckpt_config.resume_file):\n ckpt_filepath = ckpt_config.resume_file\n elif ckpt_config.resume_zoo is not None:\n is_zoo = True\n folder = download_pretrained_model(ckpt_config.resume_zoo)\n ckpt_filepath = get_ckpt_path_from_folder(folder)\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = get_config_from_folder_or_ckpt(folder, ckpt)\n else:\n raise RuntimeError(f\"{ckpt_config.resume_file} doesn't exist\")\n\n if ckpt_config.resume and PathManager.exists(path):\n ckpt_filepath = path\n\n if ckpt_filepath is not None:\n ckpt = get_ckpt_from_path(ckpt_filepath)\n\n return {\n \"ckpt\": ckpt,\n \"checkpoint_path\": ckpt_filepath,\n \"is_zoo\": is_zoo,\n \"config\": config,\n }", "def load_ckpt(self, name=None):\r\n name = name if name == 'latest' else \"ckpt_epoch{}\".format(name)\r\n load_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if not os.path.exists(load_path):\r\n raise ValueError(\"Checkpoint {} not exists.\".format(load_path))\r\n\r\n checkpoint = torch.load(load_path)\r\n print(\"Checkpoint loaded from {}\".format(load_path))\r\n if isinstance(self.net, nn.DataParallel):\r\n self.net.module.load_state_dict(checkpoint['model_state_dict'])\r\n else:\r\n self.net.load_state_dict(checkpoint['model_state_dict'])\r\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\r\n self.clock.restore_checkpoint(checkpoint['clock'])", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def _resume(self):\n\n if self.resume_file is None:\n return None\n assert os.path.isfile(self.resume_file)\n logger.info(f'=> loading checkpoint {self.resume_file}')\n checkpoint = torch.load(self.resume_file)\n self.cur_epoch = checkpoint['epoch']\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n self.best_rho = checkpoint['best_rho']\n self.best_nmse = checkpoint['best_nmse']\n self.cur_epoch += 1 # start from the next epoch\n\n logger.info(f'=> successfully loaded checkpoint {self.resume_file} '\n f'from epoch {checkpoint[\"epoch\"]}.\\n')", "async def checkpoint() -> None:\n await get_async_backend().checkpoint()", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def _resume_checkpoint(self, resume_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.monitor_best = checkpoint['monitor_best']\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.train_logger = checkpoint['logger']\n self.logger.info(\"Checkpoint '{}' (epoch {}) loaded\".format(resume_path, self.start_epoch))", "def parse_checkpoint(self):\n pass", "def save(self, checkpoint) -> None:\r\n self.model.save(checkpoint)", "def _resume_checkpoint(self, resume_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.monitor_best = checkpoint['monitor_best']\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n if self.with_cuda:\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda(torch.device('cuda'))\n self.train_logger = checkpoint['logger']\n #self.config = checkpoint['config']\n self.logger.info(\"Checkpoint '{}' (epoch {}) loaded\".format(resume_path, self.start_epoch))" ]
[ "0.7333538", "0.728644", "0.71189874", "0.70072585", "0.6946537", "0.6767927", "0.67457914", "0.66684693", "0.6628645", "0.66275215", "0.6597199", "0.6591708", "0.6549104", "0.65430933", "0.6524163", "0.6517946", "0.64900166", "0.64351654", "0.6414265", "0.6412828", "0.63930583", "0.6381749", "0.63384396", "0.6337512", "0.6303258", "0.630132", "0.62980217", "0.6286469", "0.62825125", "0.6261222" ]
0.74751663
0
Access training loss metric objects for all tasks.
def training_losses(self): if self._training_losses is None: # Builds the per-task metrics and losses. # This the total summed training loss of tasks in the joint training. self._training_losses = dict( total_loss=tf.keras.metrics.Mean("training_loss", dtype=tf.float32)) for name in self.multi_task.tasks: self._training_losses[name] = tf.keras.metrics.Mean( "training_loss", dtype=tf.float32) return self._training_losses
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def training_metrics(self):\r\n if self._training_metrics is None:\r\n # Builds the per-task metrics and losses.\r\n self._training_metrics = {}\r\n for name, task in self.multi_task.tasks.items():\r\n self._training_metrics[name] = task.build_metrics(training=True)\r\n return self._training_metrics", "def get_loss_funcs():\n\n def _eucl_loss(x, y):\n return K.sum(K.square(x - y)) / batch_size / 2\n\n losses = {}\n losses[\"weight_stage1_L1\"] = _eucl_loss\n losses[\"weight_stage1_L2\"] = _eucl_loss\n losses[\"weight_stage2_L1\"] = _eucl_loss\n losses[\"weight_stage2_L2\"] = _eucl_loss\n losses[\"weight_stage3_L1\"] = _eucl_loss\n losses[\"weight_stage3_L2\"] = _eucl_loss\n losses[\"weight_stage4_L1\"] = _eucl_loss\n losses[\"weight_stage4_L2\"] = _eucl_loss\n losses[\"weight_stage5_L1\"] = _eucl_loss\n losses[\"weight_stage5_L2\"] = _eucl_loss\n losses[\"weight_stage6_L1\"] = _eucl_loss\n losses[\"weight_stage6_L2\"] = _eucl_loss\n\n return losses", "def get_current_validation_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name+'_val'] = float(getattr(self, 'loss_' + name + '_val')) # float(...) works for both scalar tensor and float number\n return errors_ret", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def _training_errors(self):\n feed_dict = dict()\n feed_dict[self.model.get_layer('input')] = self.x_train\n for id_ in self.task_ids.keys():\n feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_train[id_]\n errors = {}\n for task_id, loss_type in self.task_ids.iteritems():\n if loss_type is LossTypes.mse:\n errors[task_id] = np.sqrt(self.model.get_layer(task_id + '-loss')\n .eval(session=self.sess, feed_dict=feed_dict))\n elif loss_type is LossTypes.cross_entropy:\n predictions = tf.argmax(self.model.get_layer(task_id + '-prediction'), 1)\n targets = tf.argmax(self.model.get_layer(task_id + '-ground-truth'), 1)\n correct_predictions = tf.equal(targets, predictions)\n accuracy_tensor = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n accuracy = accuracy_tensor.eval(session=self.sess, feed_dict=feed_dict)\n errors[task_id] = 1. - accuracy\n return errors", "def test_compute_metrics(self):\n with self.test_session() as sess:\n tf.set_random_seed(1234)\n dut = _setup_trainer(self.tmpdir)\n\n sess.run(tf.global_variables_initializer())\n sess.run((dut.train_iterator.initializer,\n dut.train_metric_reset_op))\n\n train_mloss = sess.run(dut.train_mean_loss)\n\n # Without update, it should be zero.\n self.assertEqual(train_mloss, 0.)\n\n sess.run((dut.train_op, dut.train_mean_loss_update_op))\n\n train_mloss = sess.run(dut.train_mean_loss)\n\n # After update.\n self.assertAlmostEqual(train_mloss, 5.2298584)", "def train_loop_end(self):\r\n result = {}\r\n for task_name, loss in self.training_losses.items():\r\n result[task_name] = {loss.name: loss.result()}\r\n for task_name, task_metrics in self.training_metrics.items():\r\n result[task_name].update(\r\n {metric.name: metric.result() for metric in task_metrics})\r\n # Note that, the learning rate schedule is managed by the keras optimizer\r\n # internally, which respects the number of backward pass as `iterations`.\r\n # The learning rate schedule does not follow the trainer logical global\r\n # step of multiple tasks.\r\n if callable(self.optimizer.learning_rate):\r\n result[\"learning_rate\"] = self.optimizer.learning_rate(\r\n self.optimizer.iterations)\r\n else:\r\n result[\"learning_rate\"] = self.optimizer.learning_rate\r\n return result", "def loss_names(self):\n return ['loss']", "def compute_loss(self):", "def calculate_training_loss(self):\n self.network.train()\n self.training_average_loss = self.calculate_average_loss(self.training_dataloader)", "def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret", "def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret", "def _total_loss(self,\n collections=None,\n name=None):\n if collections is None:\n collections = [GKeys.LOSSES]\n loss_vars = []\n for key in collections:\n loss_vars.extend(tf.get_collection(GKeys.LOSSES))\n total_loss = tf.add_n(loss_vars, name=name)\n return total_loss", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def _validation_errors(self):\n feed_dict = dict()\n feed_dict[self.model.get_layer('input')] = self.x_validate\n for id_ in self.task_ids.keys():\n feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_validate[id_]\n errors = {}\n for task_id, loss_type in self.task_ids.iteritems():\n if loss_type is LossTypes.mse:\n errors[task_id] = np.sqrt(self.model.get_layer(task_id + '-loss')\n .eval(session=self.sess, feed_dict=feed_dict))\n elif loss_type is LossTypes.cross_entropy:\n predictions = tf.argmax(self.model.get_layer(task_id + '-prediction'), 1)\n targets = tf.argmax(self.model.get_layer(task_id + '-ground-truth'), 1)\n correct_predictions = tf.equal(predictions, targets)\n accuracy_tensor = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n accuracy = accuracy_tensor.eval(session=self.sess, feed_dict=feed_dict)\n errors[task_id] = 1. - accuracy\n return errors", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def get_loss_names(self):\n losses = [tns.name[:-2].replace('loss_', '').split('/')[-1] for tns in tf.get_collection('losses')]\n return \"Losses: {}\".format(' '.join(losses))", "def _monitor_metrics(self):\n metrics = [\"loss\"]\n try:\n m = U.metrics_from_model(self.model)\n if isinstance(m, list):\n metrics.extend(m)\n except:\n pass\n if self.val_data is not None:\n for m in metrics[:]:\n metrics.append(\"val_%s\" % (m))\n return metrics", "def calculate_validation_loss(self):\n self.network.train()\n self.validation_average_loss = self.calculate_average_loss(self.validation_dataloader)", "def _get_loss(self):\n raise NotImplementedError", "def reduce_loss(self, all_loss):\n if self._gpu_num == 1:\n total_loss = all_loss[0]\n else:\n layer_loss = [all_loss[j] for j in range(self._gpu_num)]\n total_loss = tf.reduce_mean(layer_loss)\n\n return total_loss", "def losses(self):\n # compute all kinds of losses \n\n # 1. Logits losses for classification \n\n # 2. regression loss for bbox \n\n return classification_loss, bbox_reg_loss", "def build_losses(self):\n self.batch_losses = tf.squared_difference(self.predicted_rv, self.label)\n self.total_loss = tf.reduce_mean(self.batch_losses)", "def build_losses(self):\n self.batch_losses = tf.squared_difference(self.predicted_rv, self.label)\n self.total_loss = tf.reduce_mean(self.batch_losses)", "def loss_opt_metric(self):\n self.loss = tf.keras.losses.SparseCategoricalCrossentropy()\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.init_lr)\n self.metric_loss = tf.keras.metrics.Mean()\n self.accuracy = tf.keras.metrics.SparseCategoricalAccuracy()", "def _create_loss(self):\n with tf.device('/cpu:0'):\n with tf.name_scope('loss'):\n self.loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder, \n logits=self.logits, name='loss'))", "def get_loss(self, inputs, outputs, add_summary=True):\n cfg = self.cfg()\n torch.autograd.set_detect_anomaly(True)\n # g_loss = tf.zeros(dtype=tf.float32, shape=[])\n g_loss = self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n r_loss = self.regularization_loss(cfg)\n# print(g_loss, r_loss)\n g_loss += r_loss\n # if cfg.proj_weight:\n # g_loss += self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n\n # if cfg.drc_weight:\n # g_loss += add_drc_loss(cfg, inputs, outputs, cfg.drc_weight, add_summary)\n #\n # if cfg.pc_rgb:\n # g_loss += add_proj_rgb_loss(cfg, inputs, outputs, cfg.proj_rgb_weight, add_summary, self._sigma_rel)\n #\n # if cfg.proj_depth_weight:\n # g_loss += add_proj_depth_loss(cfg, inputs, outputs, cfg.proj_depth_weight, self._sigma_rel, add_summary)\n #\n # if add_summary:\n # tf.contrib.summary.scalar(\"losses/total_task_loss\", g_loss)\n\n return g_loss", "def get_losses(self):\n if self.loss is not None:\n return [self.loss]\n else:\n return []", "def loss_(self, batch):\n raise NotImplementedError" ]
[ "0.67372334", "0.6436993", "0.6299553", "0.62310845", "0.6202372", "0.6157093", "0.61555773", "0.6127681", "0.6114597", "0.6095806", "0.6082403", "0.6082403", "0.6079341", "0.6064714", "0.6049644", "0.6047267", "0.6020964", "0.60065204", "0.5966593", "0.59373814", "0.5933041", "0.58981353", "0.58939964", "0.58860147", "0.58860147", "0.5859542", "0.585933", "0.5857735", "0.5849082", "0.5828064" ]
0.7341264
0
Access training metric metric objects for all tasks.
def training_metrics(self): if self._training_metrics is None: # Builds the per-task metrics and losses. self._training_metrics = {} for name, task in self.multi_task.tasks.items(): self._training_metrics[name] = task.build_metrics(training=True) return self._training_metrics
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_multitask_metrics(metric_tasks = ()):\n\n @flax.struct.dataclass\n class MultiTaskMetric(metrics.Metric):\n \"\"\"MultiTaskMetric.\n\n This metric aggregates sub-metrics in the metric_dict and return the metrics\n of all of them by calling them separately.\n\n Attributes:\n tasks: A sequence of tasks to compute metrics over.\n \"\"\"\n tasks: Tasks = metric_tasks\n\n @classmethod\n @gin_utils.allow_remapping(name='get_multitask_metrics')\n def from_model_output(cls, outputs,\n labels):\n \"\"\"Accumulates model outputs for evaluation.\n\n Args:\n outputs: A dictionary with the following structure:\n key name: Task name.\n value content: A dictionary to corresponding task specific outputs.\n labels: A dictionary with the following structure:\n key name: Task name.\n value content: A dictionary corresponding task specific labels.\n\n Returns:\n A metric object initialized from the outputs and labels.\n\n Raises:\n KeyError: Missing task-specific outputs or labels.\n \"\"\"\n new_tasks = []\n for task in cls.tasks:\n task_outputs, task_labels = (\n task.filter_by_task(outputs), task.filter_by_task(labels))\n if not task_outputs:\n raise KeyError(f'No task outputs for task: {task.name}!')\n if task_labels is None:\n raise KeyError(f'No task labels for task: {task.name}!')\n\n metric = task.metric.from_model_output(task_outputs, task_labels)\n new_tasks.append(type(task)(metric=metric))\n\n return cls(tasks=new_tasks)\n\n def merge(self, other):\n new_tasks = []\n assert len(self.tasks) == len(other.tasks)\n for task, other_task in zip(self.tasks, other.tasks):\n metric = task.metric.merge(other_task.metric)\n new_tasks.append(type(task)(metric=metric))\n\n return type(self)(tasks=new_tasks)\n\n def reduce(self):\n new_tasks = []\n for task in self.tasks:\n metric = task.metric.reduce()\n new_tasks.append(type(task)(metric=metric))\n\n return type(self)(tasks=new_tasks)\n\n def compute(self):\n output_metric = {}\n for task in self.tasks:\n task_metric = task.metric.compute()\n output_metric.update(task.prepend_by_task(task_metric))\n\n return output_metric\n\n return MultiTaskMetric", "def compute_metrics(self):\n pass", "def write_training_metrics(self) -> None:\n self.trainer_metrics.write_training_metrics()", "def list_metrics(self):\n pass", "def get_metrics(self, add_metrics={}):\n tot_px_cnt = self.res * int(self.tensors['samples_evaluated'][0])\n\n if self.debug:\n sum_per_class = self.tensors['TP'] + self.tensors['TN'] + self.tensors['FP'] + self.tensors['FN']\n unique = sum_per_class.unique()\n assert len(unique) == 1, 'Expect to observe the exact same number for all classes.'\n assert unique[0] == self.tensors['PX_CNT'].sum() == tot_px_cnt, 'Expect exactly one type of prediction per pixel.'\n\n mask_non_observed = (self.tensors['PX_CNT']).bool()\n mask_bg = self.tensors['M']\n mask_combined = (self.tensors['M'] * mask_non_observed).bool() # in PyTorch 1.4 no logical AND\n\n if self.debug:\n assert mask_combined.sum() <= mask_bg.sum()\n assert mask_combined.sum() <= mask_non_observed.sum()\n \n accuracies = (self.tensors['TP'] + self.tensors['TN']) / tot_px_cnt\n acc = torch.mean(accuracies[mask_combined])\n acc_bg_included = torch.mean(accuracies[mask_non_observed])\n\n IoUs = self.tensors['TP'] / (tot_px_cnt - self.tensors['TN']) # per class: I/U, U = sum(TP,FP,FN) = all - TN\n mIoU = torch.mean(IoUs[mask_combined])\n mIoU_bg_included = torch.mean(IoUs[mask_non_observed])\n\n if self.debug:\n if torch.cuda.is_available():\n for i in [accuracies, acc, acc_bg_included, IoUs, mIoU, mIoU_bg_included]:\n assert i.is_cuda\n\n results = OrderedDict()\n\n for i in ['acc','mIoU']:\n for j in ['','_bg_included']:\n results[ i + j + '_' + self.fold ] = float(eval(i+j+'.cpu()'))\n\n for i in range(self.tensors['TP'].shape[0]):\n results['IoU_class_' + str(i) + '_' + self.fold] = float(IoUs[i].cpu())\n results['acc_class_' + str(i) + '_' + self.fold] = float(accuracies[i].cpu())\n\n if self.debug:\n for k in results:\n if isinstance(results[k], float) and not math.isnan(results[k]):\n # don't apply check to nans and str; we don't use exactly 1 due to smaller rounding error\n assert results[k] <= 1.0001, f'Failure for {k,results[k],type(results[k])}: any metric derived from the confusion matrix should be <= 1.'\n\n #for t in self.tensors:\n # results[t + '_' + self.fold] = self.tensors[t].cpu()\n\n if add_metrics:\n for k in add_metrics:\n results[k + '_' + self.fold] = float(add_metrics[k])\n\n return results", "def init_metrics():\n metrics = defaultdict(list)\n metrics['best_acc'] = 0.0\n metrics['best_loss'] = float('inf')\n metrics['best_epoch'] = 0\n return metrics", "def calculate_batch_metrics(self):\n pass", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def init_metrics(self):\n\n self.metrics = {}\n\n self.metrics['train_loss'] = np.zeros(0)\n self.metrics['test_loss'] = np.zeros(0)\n\n # self.orth_clf = LinearDecoder(self, self.q_, MeanClassifier)\n # self.metrics['train_orthogonality'] = np.zeros(0)\n # self.metrics['test_orthogonality'] = np.zeros(0)\n\n self.metrics['train_parallelism'] = np.zeros((0,self.q_)) \n self.metrics['test_parallelism'] = np.zeros((0,self.q_))", "def multi_task_metric_func(labels, logits):\n if labels.keys() != logits.keys():\n raise ValueError('Task names are different for labels and logits.')\n metric_ops = {}\n for task_name, label in labels.items():\n accuracy_metric_name = '{}/Eval/Accuracy/{}'.format(\n task_name, dataset_split_name)\n metric_ops[accuracy_metric_name] = tf.metrics.accuracy(\n label,\n tf.argmax(logits[task_name], 1),\n weights=build_weight_for_label(label))\n\n if add_summary:\n for name, value in metric_ops.items():\n tf.summary.scalar(name, value)\n return metric_ops", "def configure_metrics(self):\n allowed = list(METRIC_LOOKUP.keys()) + [None]\n metrics = nn.ModuleDict()\n for k, m in self.branch_metrics.items():\n for metric_name in m:\n if metric_name not in allowed:\n raise ValueError(\n f\"Illegal metric given. Got: {metric_name}. Allowed: {allowed}.\"\n )\n\n if metric_name is not None:\n metric = METRIC_LOOKUP[metric_name]()\n else:\n metric = None\n\n metrics[f\"{k}_{metric_name}\"] = metric\n\n return metrics", "def metrics(self):\n return {**self.prepend_name_dict(self._prefixes[0], self._train_metrics),\n **self.prepend_name_dict(self._prefixes[1], self.validator.metrics)}", "def metrics(self):\n return self.__metrics", "def train_scores(self) -> np.ndarray:\n return np.asarray(self.train_metric_dict[self.metric_name])", "def metrics(self):\n metrics_registry = getattr(self._thread_local, \"klio_metrics\", None)\n if not metrics_registry:\n self._thread_local.klio_metrics = self._get_metrics_registry()\n return self._thread_local.klio_metrics", "def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results", "def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)", "def set_metrics(self):", "def training_losses(self):\r\n if self._training_losses is None:\r\n # Builds the per-task metrics and losses.\r\n # This the total summed training loss of tasks in the joint training.\r\n self._training_losses = dict(\r\n total_loss=tf.keras.metrics.Mean(\"training_loss\", dtype=tf.float32))\r\n for name in self.multi_task.tasks:\r\n self._training_losses[name] = tf.keras.metrics.Mean(\r\n \"training_loss\", dtype=tf.float32)\r\n return self._training_losses", "def test_get_all_derived_metrics(self):\n pass", "def _compute_running_metrics(self,\n model_output: torch.Tensor,\n batch: Tuple[torch.Tensor, torch.Tensor],\n running_metrics: dict) -> None:\n for metric in self.metrics:\n if metric.__name__ == 'word_error_rate' or metric.__name__ == 'character_error_rate':\n metric_result = metric(model_output, batch, self.decoder)\n else:\n metric_result = metric(model_output, batch)\n if type(metric_result) == torch.Tensor:\n metric_result = metric_result.item()\n\n running_metrics[metric.__name__].append(metric_result)", "def get_all_metrics(self):\n metrics = {}\n for item in self.list_metrics():\n metric_name = item[2]\n metric = self.get_metric(\n item,\n existing_dict=metrics.get(metric_name, None))\n metrics[metric_name] = metric\n return metrics", "def get_metrics(self):\n return None", "def metrics(self):\n return self._metrics", "def metrics(self):\n return self._metrics", "def calculate_dataset_metrics(self):\n pass", "def get_metrics(self) -> dict:\n return self.metric_dict", "def metrics_group():", "def get_all_metrics():\n return get_overlap_metrics() + get_distance_metrics() + get_distance_metrics()", "def test_create_metrics_dict(self):\n # binary tasks have 1 class at class definition.\n num_classes = 1\n metrics_dict = create_metrics_dict(num_classes)\n assert 'iou_1' in metrics_dict.keys()\n assert 'iou_2' not in metrics_dict.keys()\n\n num_classes = 3\n metrics_dict = create_metrics_dict(num_classes)\n assert 'iou_1' in metrics_dict.keys()\n assert 'iou_2' in metrics_dict.keys()\n assert 'iou_3' not in metrics_dict.keys()\n del metrics_dict" ]
[ "0.667385", "0.6412713", "0.6114214", "0.61097354", "0.61061174", "0.60684097", "0.602961", "0.5939189", "0.5872546", "0.58626574", "0.5764105", "0.57264847", "0.5720861", "0.57078487", "0.57022095", "0.5697843", "0.56895506", "0.56850576", "0.56800485", "0.5675132", "0.5666509", "0.56649053", "0.56496096", "0.56290954", "0.56290954", "0.56215036", "0.5612163", "0.5610585", "0.560317", "0.5599793" ]
0.82266814
0
Unpickle and execute task.
def work(pickled_task): task = pickle.loads(pickled_task) return task.execute()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, task):\n\n self._setup()\n\n runnable = load_from_module(task.task)\n runnable(*task.get_args(), **task.get_kwargs())", "def run_task(self) -> Task:", "def task_wrapper(serialized_task):\n task = pickle.loads(serialized_task)\n logging.info('Running %s', str(task))\n return cloudpickle.dumps(task())", "def execute_task(self):\n raise NotImplementedError(\"Execute Task method not implemented\")", "def run_operation(task):\n return task.run()", "def execute_task(self, task):\n t = threading.Thread(target=task)\n t.start()", "def run(self):\n task_func = getattr(self, self.task_data.get('task_type'))\n task_obj = task_func()\n return task_obj", "def exec(cls, *args, **kwargs):\n task = cls(*args, **kwargs)\n task.run()\n return task", "def task():", "def run_task(self, cmd):\n # Not initialized here...must be overloaded from outside\n raise NotImplementedError()", "def run_task(self, cmd):\n # Not initialized here...must be overloaded from outside\n raise NotImplementedError()", "def execute(self, task, script, **kwargs):\n locals().update(kwargs)\n exec(script)", "def run_task(self, task_id):\n raise NotImplementedError", "def do_unpickle(self, arg):\n try:\n from pickling import Pickling\n Pickling('output.pickle', arg).unpickle_it()\n print('The pickled file has been un-pickled')\n except FileNotFoundError as e:\n print(e)\n except():\n print(\"Error!!\")", "def call(self, task, **options):\n pass", "def _load(self, data):\n raise NotImplementedError(\"Don't know how to load the task\")", "def task():\n pass", "def task():\n pass", "def run(self, *args, **kwargs):\n if self.task_loader is None:\n if 'task' not in kwargs:\n if len(args) == 0 or not isinstance(args[0], self.flow_class.task_class):\n raise FlowRuntimeError('Function {} should be called with task instance', self.name)\n return self.func(*args, **kwargs)\n else:\n task = self.task_loader(self, *args, **kwargs)\n return self.func(task, *args, **kwargs)", "def _run(data):\n try:\n func, args, kwds = cPickle.loads(data)\n except Exception, e:\n raise deferred.PermanentTaskFailure(e)\n \n try:\n func(*args, **kwds)\n except TypeError:\n logging.debug(\"Deferred function arguments: %s %s\", args, kwds)\n raise", "def run(self):\n if self.type_task == \"Api-request\":\n self.config = ConfigApiRequestTask(**self.dynamic_configs)\n self.task = ApiRequestTask(\n priority=0, # fixed priority\n config=self.config\n )\n elif self.type_task == 'Db':\n self.config = ConfigDbTask(self.dynamic_configs)\n self.task = DbTask(\n priority=0,\n config=self.config\n )\n elif self.type_task == 'File':\n self.config = ConfigFileTask(self.dynamic_configs)\n self.task = FileTask(\n priority=0,\n config=self.config\n )\n \n try:\n self.result = self.task.execute()\n except Exception as e:\n self.errors = str(e)\n self.logger.error(f'Error executing task: {self.errors}')\n return False\n \n res = self.save_into_db()\n return res", "async def execute(self):", "def do_t(self, arg):\n self.do_task(arg)", "def run(self):\n# log.trace(\" run task %s \", self.name)\n return self.target.send(self.params)", "def run(self):\n try:\n self._run()\n except Exception as err:\n # TODO: Do Task Failure to run exception handling\n pass", "def calltask(self, name, **vars):\n if name in self._tasks:\n for entry in self._tasks[name]:\n entry.execute(vars)\n else:\n raise Error(\"No such task: {0}\".format(name))", "async def _execute(self):", "def run(self, *args, **kwargs):\n raise NotImplementedError('Tasks must define the run method.')", "def run(self, tmp=None, task_vars=None):\n if not task_vars:\n task_vars = {}\n\n result = ActionBase.run(self, tmp=tmp, task_vars=task_vars)\n result.update(\n self._execute_module(\n module_name=self.module_name,\n module_args=self._task.args,\n task_vars=task_vars,\n wrap_async=self._task.async_val\n )\n )\n\n return result", "def task(self):" ]
[ "0.6628442", "0.6553389", "0.64709336", "0.6245773", "0.62365663", "0.6235586", "0.6140546", "0.606011", "0.6013078", "0.6006409", "0.6006409", "0.58641344", "0.58462805", "0.58322525", "0.5796792", "0.57681733", "0.57168084", "0.57168084", "0.57036185", "0.5652299", "0.56480616", "0.5619827", "0.5603603", "0.5593533", "0.5587985", "0.5576912", "0.5573493", "0.55684394", "0.5535184", "0.55347687" ]
0.7416804
0
Pickle tasks and distribute work over parallel processes.
def execute(self, tasks: List[Task], progress_bar: bool = True): n_tasks = len(tasks) pickled_tasks = [pickle.dumps(task) for task in tasks] n_procs = min(self.n_procs, n_tasks) logger.info(f"Performing parallel task execution on {n_procs} " f"processes.") with Pool(processes=n_procs) as pool: results = pool.map(work, tqdm(pickled_tasks, disable=not progress_bar) ) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tasks():", "def pool_job(self, func, inputs):\n\n if self.flag_use_mp:\n output = zip(*self._pool.map(func, inputs))\n self._consolidate_mp_logs()\n else:\n logger.info(\"Performing task serially\")\n output = self.serial_job(func, inputs)\n\n return output", "def work(pickled_task):\n task = pickle.loads(pickled_task)\n return task.execute()", "def do_tasks(self):\n\t\twork_ = self.TASK_LIMIT\n\t\twhile True:\n\t\t\tif len(self.tasks) == 0 or work_ <= 0:\n\t\t\t\tbreak\n\t\t\tself.tasks[0].work(self)\n\t\t\tif self.tasks[0].completed:\n\t\t\t\tself.tasks.pop(0)\n\n\t\t\twork_ -= 1", "def run_map(self):\n # Split input into chunks for processing\n files = self.split_list()\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n self.output = pool.map(_run, files)\n # Close and join pool\n pool.close()\n pool.join()", "def job_workflow(workflow, jobfiles, jwcl=WCL()):\n #pylint: disable=protected-access,expression-not-assigned,lost-exception\n global pool\n global results\n global stop_all\n global jobfiles_global\n global job_track\n global keeprunning\n global donejobs\n global result_lock\n global lock_monitor\n\n infullnames = {}\n with open(workflow, 'r') as workflowfh:\n # for each wrapper execution\n lines = workflowfh.readlines()\n sys.stdout.flush()\n inputs = {}\n # read in all of the lines in dictionaries\n for linecnt, line in enumerate(lines):\n wrapnum = miscutils.fwsplit(line.strip())[0]\n task = parse_wrapper_line(line, linecnt)\n #task['logfile'] = None\n wcl = WCL()\n with open(task['wclfile'], 'r') as wclfh:\n wcl.read(wclfh, filename=task['wclfile'])\n wcl.update(jwcl)\n\n # get fullnames for inputs and outputs\n ins, _ = intgmisc.get_fullnames(wcl, wcl, None)\n del wcl\n # save input filenames to eliminate from junk tarball later\n infullnames[wrapnum] = []\n for isect in ins:\n for ifile in ins[isect]:\n infullnames[wrapnum].append(ifile)\n jobfiles['infullnames'].extend(ifile)\n inputs[wrapnum] = (task, copy.deepcopy(jobfiles), jwcl, ins)\n job_track[task['wrapnum']] = (task['logfile'], jobfiles)\n # get all of the task groupings, they will be run in numerical order\n tasks = jwcl[\"fw_groups\"].keys()\n tasks.sort()\n # loop over each grouping\n manager = mp.Manager()\n for task in tasks:\n results = [] # the results of running each task in the group\n # get the maximum number of parallel processes to run at a time\n nproc = int(jwcl[\"fw_groups\"][task][\"fw_nthread\"])\n procs = miscutils.fwsplit(jwcl[\"fw_groups\"][task][\"wrapnums\"])\n tempproc = []\n # pare down the list to include only those in this run\n for p in procs:\n if p in inputs.keys():\n tempproc.append(p)\n procs = tempproc\n if nproc > 1:\n numjobs = len(procs)\n # set up the thread pool\n pool = mp.Pool(processes=nproc, maxtasksperchild=2)\n outq = manager.Queue()\n errq = manager.Queue()\n with lock_monitor:\n try:\n donejobs = 0\n # update the input files now, so that it only contains those from the current taks(s)\n for inp in procs:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n # attach all the grouped tasks to the pool\n [pool.apply_async(job_thread, args=(inputs[inp] + (outq, errq, True,),), callback=results_checker) for inp in procs]\n pool.close()\n time.sleep(10)\n while donejobs < numjobs and keeprunning:\n count = 0\n while count < 2:\n count = 0\n try:\n msg = outq.get_nowait()\n print msg\n except:\n count += 1\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n count += 1\n time.sleep(.1)\n except:\n results.append(1)\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n\n raise\n\n finally:\n if stop_all and max(results) > 0:\n # wait to give everything time to do the first round of cleanup\n time.sleep(20)\n # get any waiting messages\n for _ in range(1000):\n try:\n msg = outq.get_nowait()\n print msg\n except:\n break\n for _ in range(1000):\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n break\n if not result_lock.acquire(False):\n lock_monitor.wait(60)\n else:\n result_lock.release()\n # empty the worker queue so nothing else starts\n terminate(force=True)\n # wait so everything can clean up, otherwise risk a deadlock\n time.sleep(50)\n del pool\n while True:\n try:\n msg = outq.get(timeout=.1)\n print msg\n except:\n break\n\n while True:\n try:\n errm = errq.get(timeout=.1)\n sys.stderr.write(errm)\n except:\n break\n # in case the sci code crashed badly\n if not results:\n results.append(1)\n jobfiles = jobfiles_global\n jobfiles['infullnames'] = list(set(jobfiles['infullnames']))\n if stop_all and max(results) > 0:\n return max(results), jobfiles\n # if running in single threaded mode\n else:\n temp_stopall = stop_all\n stop_all = False\n\n donejobs = 0\n for inp in procs:\n try:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n results_checker(job_thread(inputs[inp] + (sys.stdout, sys.stderr, False,)))\n except:\n (extype, exvalue, trback) = sys.exc_info()\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n results = [1]\n jobfiles = jobfiles_global\n if results[-1] != 0:\n return results[-1], jobfiles\n stop_all = temp_stopall\n\n\n return 0, jobfiles", "def submit_tasks(self, fun, shared_data, *argss, **kwdss):\n self.shared_data = shared_data\n ids = self.add_tasks(fun, *argss, **kwdss)\n # Launches the new tasks if the previous ones have stopped.\n if self.thread is None or not self.thread.is_alive():\n self.run_thread()\n return ids", "def queue_tasks(self, layer, tasks) -> int:\n pass", "def parallel(files):\n return list(map(join_process, list(map(start_process, files))))", "async def run(self):\n pool_tasks = []\n async with aiomultiprocess.Pool(\n processes=4, maxtasksperchild=64, childconcurrency=8, queuecount=2\n ) as pool:\n for call in self.calls_list:\n pool_tasks.append(pool.apply(self._get_call, args=[call]))\n for download in tqdm(asyncio.as_completed(pool_tasks), total=len(pool_tasks)):\n await download", "def parallel_pc(task_function, task_iterable, nproc):\n import multiprocessing\n\n work_queue = multiprocessing.Queue()\n results_queue = multiprocessing.Queue()\n\n loader = get_worker_processes(\n _load_data,\n (task_iterable, work_queue, nproc),\n nproc=1,\n allow_scalar=True\n )\n workers = get_worker_processes(\n _process_data,\n (task_function, work_queue, results_queue),\n nproc=nproc,\n )\n\n # Start the processing\n LOG.debug('Starting producer process...')\n loader.start()\n LOG.debug('Starting consumer processes...')\n for worker in workers:\n worker.start()\n\n # Convert the results to a list - there is one 'finished' entry\n # from each process, so need to get them all. Need to interleave\n # this portion with the actual processing (i.e. before the join)\n # to avoid the pipe used by the Queue filling up and hanging the\n # joins (see, e.g. http://stackoverflow.com/q/11854519/24895)\n LOG.info('Converting results to a final list...')\n percent_threshold = 0\n task_results = []\n for _ in range(nproc):\n for element in iter(results_queue.get, FINISHED):\n task_results.append(element)\n len_task_iterable = len(list(task_iterable))\n if len_task_iterable < 1:\n len_task_iterable = 1\n if (100*len(task_results)/len_task_iterable) > percent_threshold:\n LOG.info('{0:.0f}% - tasks complete'.format(percent_threshold))\n percent_threshold += 5\n LOG.info('{0:.0f}% - tasks complete'.format(100))\n\n LOG.debug('Waiting for loader to finish...')\n loader.join()\n LOG.debug('Loader finished...')\n\n LOG.debug('Waiting for workers to finish...')\n for id, worker in enumerate(workers):\n worker.join()\n LOG.debug('Worker %d finished...',id)\n LOG.debug('All workers finished...')\n\n return task_results", "def in_parallel(*args):\n \n # Execute each in a thread and return them all.\n return ThreadPool(len(args)).map(lambda x: x(), args)", "def worker(file_paths, out_queue):\n\t\toutdict = {}\n\t\tfor path in file_paths:\n\t\t\toutdict[n] = run_muscle(path)\n\t\tout_queue.put(outdict)", "def parallel_map(\n task,\n values,\n task_args=None,\n task_kwargs=None,\n num_cpus=None,\n progress_bar=None,\n):\n # TODO: if QuTiP's parallel_map catches up, we can remove this function,\n # and put QuTiP's parallel_map into __all__ to maintain krotov's interface.\n if task_args is None:\n task_args = ()\n if task_kwargs is None:\n task_kwargs = {}\n\n if num_cpus is None:\n num_cpus = multiprocessing.cpu_count()\n\n if progress_bar is None:\n progress_bar = BaseProgressBar()\n if progress_bar is True:\n progress_bar = TextProgressBar()\n\n progress_bar.start(len(values))\n nfinished = [0]\n\n def _update_progress_bar(x):\n nfinished[0] += 1\n progress_bar.update(nfinished[0])\n\n if USE_LOKY:\n Executor = LokyReusableExecutor\n if USE_THREADPOOL_LIMITS:\n Executor = partial(\n LokyReusableExecutor,\n initializer=_process_threadpool_limits_initializier,\n )\n else:\n Executor = ProcessPoolExecutor\n\n _threadpool_limits = _no_threadpool_limits\n if USE_THREADPOOL_LIMITS:\n _threadpool_limits = threadpool_limits\n\n with _threadpool_limits(limits=1):\n with Executor(max_workers=num_cpus) as executor:\n jobs = []\n try:\n for value in values:\n args = (value,) + tuple(task_args)\n job = executor.submit(task, *args, **task_kwargs)\n job.add_done_callback(_update_progress_bar)\n jobs.append(job)\n res = [job.result() for job in jobs]\n except KeyboardInterrupt as e:\n raise e\n\n progress_bar.finished()\n return res", "def _map_to_workers(self, iterable, result_getter):\n if not self.is_started:\n raise RuntimeError(\"Cannot process inputs: must call start() first.\")\n\n tasks = TaskIterator(iterable)\n task = next(tasks)\n\n while True:\n try:\n self._send_task(task)\n task = next(tasks)\n except Queue.Full:\n for result in result_getter(): # I wish I had `yield from` :(\n yield result\n except StopIteration:\n break\n\n while not self.is_completed:\n for result in result_getter():\n yield result", "def _compute_gps_parallel(dataset, number_gp, t_min, t_max, output_root,\n number_processes, gp_dim, **kwargs):\n p = Pool(number_processes, maxtasksperchild=10)\n\n # Pool and map can only really work with single-valued functions\n partial_gp = partial(_compute_gp_all_passbands, dataset=dataset,\n number_gp=number_gp, t_min=t_min, t_max=t_max,\n output_root=output_root, gp_dim=gp_dim, **kwargs)\n\n dataset_gps = p.map(partial_gp, dataset.object_names, chunksize=10)\n p.close()\n\n for i in range(len(dataset.object_names)):\n obj = dataset.object_names[i]\n obj_gps = dataset_gps[i]\n dataset.models[obj] = obj_gps\n print('Models fitted with the Gaussian Processes values.')", "def task_mapper(task_function, task_iterable, parallel_procs=None):\n\n num_procs = get_num_processors(parallel_procs)\n\n if num_procs == 0:\n LOG.debug('Using serial task processor...')\n return serial_pc(task_function, task_iterable)\n else:\n LOG.debug('Using %d-parallel task processors...', num_procs)\n return parallel_pc(task_function, task_iterable, num_procs)", "def pmap(func, seq,\n chunksize=1, nworkers=mp.cpu_count(),\n fargs=None, parallel=True):\n if fargs:\n nworkers = len(fargs)\n else:\n fargs = [None] * nworkers\n\n the_end = random_string()\n create_que = mp.Queue if parallel else Queue\n create_worker = mp.Process if parallel else th.Thread\n\n # Opening multiple ques sounds dumb in a way\n # but this is a easier way to implement the ordered version of\n # parrallel map. It's just that there is a limit in the number of\n # ques in the OS. Of course you wouldn't make more than 20 processes.\n que1s = [create_que(1) for _ in range(nworkers)]\n que2s = [create_que(1) for _ in range(nworkers)]\n\n def insert1(seq, que1s):\n for chunks in grouper(grouper(seq, chunksize, the_end),\n nworkers, the_end):\n for que1, chunk in zip(que1s, chunks):\n que1.put(chunk)\n for que1 in que1s:\n que1.put(the_end)\n\n w0 = create_worker(target=insert1, args=(seq, que1s))\n w0.daemon = True\n w0.start()\n\n def insert2(func, que1, que2):\n while True:\n chunk = que1.get()\n if chunk == the_end:\n que2.put(the_end)\n return\n else:\n result = []\n for x in chunk:\n if x != the_end:\n try:\n result.append(func(x))\n except Exception as error:\n que2.put(the_end)\n str_x = str(x)\n if len(str_x) > 100:\n str_x = str_x[:80] + ' ... ' + str_x[-20:]\n print('child worker error: ' + repr(error), str_x)\n return\n que2.put(result)\n\n for farg, que1, que2 in zip(fargs, que1s, que2s):\n if farg:\n # passing lexical closure\n # you can just do 'lambda x: func(farg, x)' for parallel version\n # because Python just copies args for each process\n # but it wouldn't work for thread version\n newfunc = (lambda farg: lambda x: func(farg, x))(farg)\n else:\n newfunc = func\n # don't replace the above with something like:\n # newfunc = A if test else B\n # causes a \"can't pickle\" error, I have no idea why.\n w = create_worker(target=insert2, args=(newfunc, que1, que2))\n w.daemon = True\n w.start()\n\n while True:\n for que2 in que2s:\n result = que2.get()\n if result == the_end:\n return\n else:\n yield from result\n\n # all the processes and threads are set to daemon\n # hence no need to terminate them manually\n # I might be wrong in the long run though.", "def run_in_parallel(self):\n\t\tfor p in self.parallel_threads:\n\t\t\tp.start()\n\t\tfor p in self.parallel_threads:\n\t\t\tp.join()", "def multiprocess(inputs: list, worker_class: Any, num_threads: int = 40):\n\n input_queue = Queue() # type: ignore\n output_queue = Queue() # type: ignore\n\n for input_elm in inputs:\n input_queue.put(input_elm)\n\n threads = [worker_class(input_queue, output_queue)\n for _ in range(num_threads)]\n \n for thread in threads:\n thread.start()\n \n for thread in threads:\n thread.join()\n\n return get_all_nowait(output_queue)", "def process_files(exp_folders):\n pool = mp.Pool()\n results = pool.imap_unordered(read_and_serialize, exp_folders)\n\n stat = []\n for res in results:\n print(res)\n stat.append(res)\n\n pool.close()\n pool.join()", "def parallelize(cores=None, fork=True, flatten=False, info=False, infoclass=InfoThreadProgressBar, init=None, *args, **kwargs):\n\tif cores == None:\n\t\tcores = multiprocessing.cpu_count()\n\tdef wrapper(f):\n\t\tdef execute(*multiargs):\n\t\t\tresults = []\n\t\t\tlen(list(zip(*multiargs)))\n\t\t\tN = len(multiargs[0])\n\t\t\tif info:\n\t\t\t\tprint(\"running %i jobs on %i cores\" % (N, cores))\n\t\t\ttaskQueue = queue.Queue(len(multiargs[0]))\n\t\t\t#for timenr in range(times):\n\t\t\t#\ttaskQueue.put(timenr)\n\t\t\tfor tasknr, _args in enumerate(zip(*multiargs)):\n\t\t\t\ttaskQueue.put((tasknr, list(_args)))\n\t\t\t#for timenr in range(times):\n\t\t\t#\tresult = f(*args, **kwargs)\n\t\t\t#\tresults.append(result)\n\t\t\texecutions = [Execution(taskQueue, fork, f, init, corenr, args, kwargs) for corenr in range(cores)]\n\t\t\tif info:\n\t\t\t\tinfoobj = infoclass(len(multiargs[0]), executions)\n\t\t\t\tinfoobj.start()\n\t\t\tfor i, execution in enumerate(executions):\n\t\t\t\texecution.setName(\"T-%d\" % i)\n\t\t\t\texecution.start()\n\t\t\t#if 1:\n\t\t\t#\twatchdog = Watchdog(executions)\n\t\t\t#\twatchdog.start()\n\t\t\terror = False\n\t\t\tfor execution in executions:\n\t\t\t\tlog(\"joining:\",execution.getName())\n\t\t\t\ttry:\n\t\t\t\t\texecution.join()\n\t\t\t\texcept BaseException:\n\t\t\t\t\terror = True\n\t\t\t\tresults.extend(execution.results)\n\t\t\t\tif execution.error:\n\t\t\t\t\terror = True \n\t\t\tif info:\n\t\t\t\tinfoobj.join()\n\t\t\tif error:\n\t\t\t\tprint(\"error\", file=sys.stderr)\n\t\t\t\tresults = None\n\t\t\t\traise Exception(\"error in one or more of the executors\")\n\t\t\telse:\n\t\t\t\tresults.sort(cmp=lambda a, b: cmp(a[0], b[0]))\n\t\t\t\tresults = [k[1] for k in results]\n\t\t\t\t#print \"bla\", results\n\t\t\t\tif flatten:\n\t\t\t\t\tflatresults = []\n\t\t\t\t\tfor result in results:\n\t\t\t\t\t\tflatresults.extend(result)\n\t\t\t\t\tresults = flatresults\n\t\t\treturn results\n\t\treturn execute\n\treturn wrapper", "def run(self, funcs: dict, tasks: dict) -> Optional[dict]:\n # Number of tasks must == number of functions\n assert len(funcs) == len(tasks)\n\n # Keep track of some progress for the user\n progress = 1\n\n # if we don't have tasks, don't run\n if not tasks:\n return None\n\n # results will also have the same key to look up\n finished = dict()\n results = []\n\n try:\n pool = multiprocessing.Pool(self.workers, init_worker)\n\n self.start()\n for key, params in tasks.items():\n func = funcs[key]\n result = pool.apply_async(multi_wrapper, multi_package(func, [params]))\n\n # Store the key with the result\n results.append((key, result))\n\n while len(results) > 0:\n pair = results.pop()\n key, result = pair\n result.wait()\n progress += 1\n finished[key] = result.get()\n\n self.end()\n pool.close()\n pool.join()\n\n except (KeyboardInterrupt, SystemExit):\n logger.error(\"Keyboard interrupt detected, terminating workers!\")\n pool.terminate()\n sys.exit(1)\n\n except:\n logger.error(\"Error running task\")\n\n return finished", "def _doMap(self, func, iterable):\n name = \"Mapper\"\n sys.stderr.write(\"Master[%s phase]: starting\\n\" % name)\n pipes = [mp.Pipe() for _ in range(self.num_workers)]\n proc = [mp.Process(target=spawn_mapper(func), name=name, args=(q,)) for q in pipes]\n for p in proc:\n p.daemon = True\n p.start()\n for output_p, input_p in pipes:\n input_p.close() # we don't need to read from the pipes\n qi = 0\n for item in iterable:\n pipes[qi][0].send(item)\n qi = (qi+1) % self.num_workers\n for q,_ in pipes:\n q.send(None) # add termination tokens\n q.close()\n for p in proc:\n p.join()\n sys.stderr.write(\"Master[%s phase]: ended..\\n\" % name)", "def start(self):\n\n while len(self.task_order) > 0:\n # Get the task to run, set it up, and run it\n task = self.task_order[0]\n\n # In the case of a sublist, we'll run all in parallel\n if type(task) is list:\n running_jobs = []\n job_handles = []\n print(\"Starting following tasks in parallel:\")\n for sub_task in task:\n # Add the job to a list to run. Note, each task has a\n # system object within it.\n running_jobs.append(self.task_list[sub_task])\n # If we want to keep using the same system as before\n # then assign it here.\n if running_jobs[-1].persist_system:\n running_jobs[-1].system = self.global_system\n running_jobs[-1].system.name = running_jobs[-1].task_name\n\n # Run all job\n job_handles.append(running_jobs[-1].run())\n print(\"\\t%s\" % sub_task)\n\n # Wait for jobs to finish\n for j in job_handles:\n j.wait()\n\n # Read in the data from each job\n self.data = []\n for j in running_jobs:\n j.read_results()\n self.data.append(j.data)\n\n # Check conditionals\n conditional_jobs = []\n for j in running_jobs:\n if j.conditional(j.data):\n conditional_jobs.append(j.conditional_sim_name)\n if len(conditional_jobs) > 0:\n if len(conditional_jobs) == 1:\n conditional_jobs = conditional_jobs[0]\n # Overwrite the previous task jobs and run conditionals\n self.task_order[0] = conditional_jobs\n continue\n\n # Check callbacks. Note, callbacks are only run if\n # conditionals are false.\n for j in running_jobs:\n if j.callback is not None:\n j.callback(self, j)\n\n # Remove the last simulation and continue\n del self.task_order[0]\n else:\n running_job = self.task_list[task]\n # Setup\n if running_job.persist_system:\n running_job.system = self.global_system\n running_job.system.name = running_job.task_name\n # Run\n print(\"Starting the following task:\")\n print(\"\\t%s\" % task)\n job_handle = running_job.run()\n\n job_handle.wait()\n\n # Read in the results of the simulation\n running_job.read_results()\n\n # If we have a conditional simulation to run, check and do so.\n # Note, in the case of a conditional, callback is not run!\n if running_job.conditional(running_job.data):\n self.task_order[0] = running_job.conditional_sim_name\n self.data = running_job.data\n continue\n\n # Store the data from the last simulation here\n self.data = running_job.data\n\n if running_job.callback is not None:\n running_job.callback(self, running_job)\n\n # Else, remove the finished simulation and continue\n del self.task_order[0]", "def run(self):\n tasks = []\n for stream in self.streams:\n task = mp.Process(target=self.record, args=[stream])\n task.start()\n tasks.append(task)\n for t in tasks:\n t.join()", "def joinall(self, tasks):\n map(lambda evt_pid1: (evt_pid1[0].wait(), evt_pid1[1]), tasks)", "def forqs_parallel(configs):\n pool = Pool(21)\n pool.map(forqs_sim, configs)\n pool.close()\n pool.join()", "def RunTasksInProcessPool(task, inputs, processes=None, onexit=None):\n if not processes:\n # - Use >=16 processes by default, in case it's a network-bound operation.\n # - Try to use all of the CPUs, in case it's a CPU-bound operation.\n processes = min(max(16, multiprocessing.cpu_count()), len(inputs))\n\n with Manager() as manager:\n # Set up output queue.\n out_queue = manager.Queue()\n fn = lambda idx, task_args: out_queue.put((idx, task(*task_args)))\n\n # Micro-optimization: Setup the queue so that BackgroundTaskRunner\n # doesn't have to set up another Manager process.\n queue = manager.Queue()\n\n with BackgroundTaskRunner(fn, queue=queue, processes=processes,\n onexit=onexit) as queue:\n for idx, input_args in enumerate(inputs):\n queue.put((idx, input_args))\n\n return [x[1] for x in sorted(out_queue.get() for _ in range(len(inputs)))]", "def mprocessing(nprocs, lockdb, running, mutex, itemslist, a_fn, cur):\n # proc_pool = Local variable proc_pool for Pool of processes\n # log_level = log_level\n # count_total = Total counter of items to distribute/play/indicate progress\n # len(itemslist)\n\n log_level = logging.getLogger().getEffectiveLevel()\n logging.info('===mprocessing [%s] target_fn():[%s] nprocs:[%s]',\n __name__, a_fn.__name__, nprocs)\n # if log_level <= logging.WARNING:\n # if args is not None:\n # for i, arg in enumerate(args):\n # logging.info('===mprocessing f():[%s] arg[%s]={%s}',\n # a_fn.__name__, i, arg)\n\n # if __name__ == '__main__':\n logging.debug('===Multiprocessing=== Setting up logger!')\n # CODING No need for such low level debugging to stderr\n # multiprocessing.log_to_stderr()\n logger = multiprocessing.get_logger()\n logger.setLevel(log_level)\n\n logging.debug('===Multiprocessing=== Logging defined!')\n\n # ---------------------------------------------------------\n # chunk\n #\n # Divides an iterable in slices/chunks of size size\n #\n def chunk(iter_list, size):\n \"\"\"\n Divides an iterable in slices/chunks of size size\n\n >>> for a in chunk([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3):\n ... len(a)\n 3\n 3\n 3\n 1\n \"\"\"\n iter_list = iter(iter_list)\n # lambda: creates a returning expression function\n # which returns slices\n # iter, with the second argument () stops creating\n # iterators when it reaches the end\n return iter(lambda: tuple(islice(iter_list, size)), ())\n\n proc_pool = []\n lockdb = multiprocessing.Lock()\n running = multiprocessing.Value('i', 0)\n mutex = multiprocessing.Lock()\n count_total = len(itemslist)\n\n size = (len(itemslist) // int(nprocs)) \\\n if ((len(itemslist) // int(nprocs)) > 0) \\\n else 1\n\n logging.debug('len(itemslist):[%s] int(nprocs):[%s] size per process:[%s]',\n len(itemslist), int(nprocs), size)\n\n # Split itemslist in chunks to distribute accross Processes\n for splititemslist in chunk(itemslist, size):\n logging.warning('===Actual/Planned Chunk size: [%s]/[%s]',\n len(splititemslist), size)\n logging.debug('===type(splititemslist)=[%s]', type(splititemslist))\n logging.debug('===Job/Task Process: Creating...')\n proc_task = multiprocessing.Process(\n target=a_fn, # argument function\n args=(lockdb,\n running,\n mutex,\n splititemslist,\n count_total,\n cur,))\n proc_pool.append(proc_task)\n logging.debug('===Job/Task Process: Starting...')\n proc_task.start()\n NPR.niceprint('===Job/Task Process: [{!s}] Started '\n 'with pid:[{!s}]'\n .format(proc_task.name,\n proc_task.pid),\n verbosity=3,\n logalso=logging.DEBUG)\n\n # Check status of jobs/tasks in the Process Pool\n if log_level <= logging.DEBUG:\n NPR.niceprint('===Checking Processes launched/status:',\n verbosity=3, logalso=logging.DEBUG)\n for j in proc_pool:\n NPR.niceprint('{!s}.is_alive = {!s}'.format(j.name, j.is_alive()),\n verbosity=3, logalso=logging.DEBUG)\n\n # Regularly print status of jobs/tasks in the Process Pool\n # Prints status while there are processes active\n # Exits when all jobs/tasks are done.\n while True:\n if not any(multiprocessing.active_children()):\n logging.debug('===No active children Processes.')\n break\n for prc in multiprocessing.active_children():\n logging.debug('===%s.is_alive = %s', prc.name, prc.is_alive())\n proc_task_active = prc\n NPR.niceprint('===Will wait for 60 on {!s}.is_alive = {!s}'\n .format(proc_task_active.name,\n proc_task_active.is_alive()),\n verbosity=3, logalso=logging.INFO)\n\n proc_task_active.join(timeout=60)\n NPR.niceprint('===Waited for 60s on '\n '{!s}.is_alive = {!s}'\n .format(proc_task_active.name,\n proc_task_active.is_alive()),\n verbosity=3, logalso=logging.INFO)\n\n # Wait for join all jobs/tasks in the Process Pool\n # All should be done by now!\n for j in proc_pool:\n j.join()\n NPR.niceprint('==={!s} (is alive: {!s}).exitcode = {!s}'\n .format(j.name, j.is_alive(), j.exitcode),\n verbosity=2)\n\n logging.warning('===Multiprocessing=== pool joined! '\n 'All processes finished.')\n\n # Will release (set to None) the lockdb lock control\n # this prevents subsequent calls to\n # use_lock( nuLockDB, False)\n # to raise exception:\n # ValueError('semaphore or lock released too many times')\n logging.info('===Multiprocessing=== pool joined! '\n 'Is lockdb None? [%s]. Setting lockdb to None anyhow.',\n lockdb is None)\n lockdb = None\n\n # Show number of total files processed\n NPR.niceprocessedfiles(running.value, count_total, True)\n\n return True" ]
[ "0.6510637", "0.6504562", "0.6131207", "0.6020679", "0.59689945", "0.59663326", "0.59335184", "0.5931897", "0.58953154", "0.58685786", "0.58604807", "0.582191", "0.5813244", "0.5809672", "0.5804818", "0.5782704", "0.57782567", "0.577521", "0.57745594", "0.5769893", "0.5763244", "0.576062", "0.57537174", "0.57498103", "0.57349366", "0.5719693", "0.57078964", "0.5692456", "0.56788105", "0.5671163" ]
0.6563995
0
Sets up this window with a simulator, a display and optionally a control widget.
def __init__(self, simulator, display, control=None, **kwargs): super(ZasimMainWindow, self).__init__(**kwargs) self.setAttribute(Qt.WA_DeleteOnClose) self.simulator = simulator self.display = display self.control = control central_widget = QWidget(self) if self.control is None: self.control = ControlWidget(self.simulator, parent=central_widget) layout = QVBoxLayout(central_widget) sim_name = QLabel(str(self.simulator), self) # make text selectable and links (if any) clickable sim_name.setTextInteractionFlags(Qt.TextBrowserInteraction) # there are some nasty long names if base gets bigger than 2. sim_name.setWordWrap(True) layout.addWidget(sim_name) scroller = QScrollArea() scroller.setWidget(self.display) layout.addWidget(scroller) layout.addWidget(self.control) self.control.setObjectName("control") self.setCentralWidget(central_widget) self.setup_menu() self.elementary_tool = None #self.comp_dlg = None self.new_dlg = None self.resetter = ResetDocklet(self) self.addDockWidget(Qt.RightDockWidgetArea, self.resetter)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _display_setup(self):\r\n display_file = \"{}/display.json\".format(self.settings_dir)\r\n with open(display_file) as json_file:\r\n win_settings = json.load(json_file)\r\n self.win = visual.Window(**win_settings)\r\n framerate = self.win.fps()\r\n self.frame_duration = 1.0/framerate\r\n self.mouse = event.Mouse(visible=False, win=self.win)", "def initUI(self):\n self.logger.debug('Setting up the Measurement GUI')\n self.setWindowTitle(self.title)\n\n self.show()\n\n self.make_combobox_scanner()\n self.make_combobox_movements()\n self.make_combobox_configurate()\n self.make_combobox_basic()", "def _setup_ui(self):\n\n self.window = ui.Widget()\n self.window.dimensions = ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n self.window.background_color = ImageColor.getcolor('#000000', 'RGB')\n\n interface_frame = ui.Widget(parent=self.window)\n interface_frame.dimensions = ui.normalize_dimension((\n self.preview_renderer.window[2],\n 0,\n self.normalized_screen_resolution[0] - self.preview_renderer.window[2],\n self.normalized_screen_resolution[1]\n ))\n interface_frame.background_color = ImageColor.getcolor('#ffffff', 'RGB')\n\n number = ui.LabelWidget(\"\",\n name=NAME_GET_STARTED,\n parent=interface_frame,\n align=\"center\",\n font_color=(0, 0, 0, 255))\n number.dimensions = (\n 5, 5,\n interface_frame.width - 10,\n interface_frame.height - 10\n )", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def setup(self):\n self.ui.setup_window()", "def setUp(self):\r\n self.caption = \"mirra extending classes\" # window name\r\n self.size = 640, 480 #window size\r\n self.pos = 100,100 # window top left location\r\n self.fullScreen = 0 # if fullScreen is on it will overwrite your pos and size to match the display's resolution\r\n self.frameRate = 15 # set refresh framerate\r", "def setup(self):\n \n # Define ui file to be used as a graphical interface\n # This file can be edited graphically with Qt Creator\n # sibling_path function allows python to find a file in the same folder\n # as this python module\n self.ui_filename = sibling_path(__file__, \"ant_watch_plot.ui\")\n \n #Load ui file and convert it to a live QWidget of the user interface\n self.ui = load_qt_ui_file(self.ui_filename)\n\n # Measurement Specific Settings\n # This setting allows the option to save data to an h5 data file during a run\n # All settings are automatically added to the Microscope user interface\n self.settings.New('save_video', dtype = bool, initial = False)\n self.settings.New('track_ant',dtype = bool, initial = False)\n self.settings.New('pixel_size', dtype = float, initial = 0.05547850208, ro = True)\n self.settings.New('binning', dtype = int, initial = 16, ro = True)\n self.settings.New('threshold', dtype = int, initial = 85, ro = False)\n self.settings.New('proportional', dtype = float, initial = 0.12, ro = False)\n self.settings.New('integral', dtype = float, initial = 0, ro = False)\n self.settings.New('derivative', dtype = float, initial = 0.05, ro = False)\n \n # x and y is for transmitting signal\n self.settings.New('x',dtype = float, initial = 32, ro = True, vmin = 0, vmax = 63.5)\n self.settings.New('y',dtype = float, initial = 32, ro = True, vmin = 0, vmax = 63.5)\n \n # Define how often to update display during a run\n self.display_update_period = 0.01\n \n \n # Convenient reference to the hardware used in the measurement\n self.track_cam = self.app.hardware['track_cam']\n self.wide_cam = self.app.hardware['wide_cam']\n self.recorder = self.app.hardware['flirrec']\n self.daqmotor = self.app.hardware['daqmotor']\n \n #setup experiment condition\n self.track_cam.settings.frame_rate.update_value(50)\n self.track_cam.read_from_hardware()", "def _prep_window(self, parent=None):\n self.toolkit.app.initialize()\n if not self.initialized:\n self.setup(parent)\n self.resize_to_initial()\n self.update_minimum_size()\n self.update_maximum_size()", "def init_window(self, game, width, height, scale):\n self.controller = game\n self.window.geometry(\"{0}x{1}\".format((width * scale)+5, (height * scale)+5))\n self.window.resizable(False, False)\n\n self.canvas = tk.Canvas(self.window, width=width * scale, height=height * scale)\n self.canvas.grid(row=0, column=0, sticky=\"nesw\")\n\n self.draw_grid(width, height, scale)\n\n self.window.bind(\"<Button-1>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<B1-Motion>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<space>\", lambda a: game.toggle_pause())\n self.window.bind(\"<Return>\", lambda a: game.do_step())\n self.window.bind(\"<BackSpace>\", lambda a: game.reset())\n self.set_menu()", "def __init__(self, *args, **kwargs):\r\n\t\tsuper(MainWindow, self).__init__(*args, **kwargs)\r\n\t\t\r\n\t\tself.setupUi(self)\r\n\t\t\r\n\t\tself.outputString = \"\" #this will be the content of the textEdit widget\r\n\t\tself.stopSim = False\r\n\t\t\r\n\t\tself.numBarriers = 10 #the default number of barriers \r\n\t\t\r\n\t\tself.createSim()\r\n\t\t\r\n\t\tself.sld.valueChanged.connect(self.changeBarriers) # connect slider to function\r\n\t\tself.runButton.pressed.connect(self.runSim) # connects button to function\r\n\t\tself.createButton.pressed.connect(self.createSim) # connects button to function\r\n\t\t\r\n\t\tself.show()", "def setupWidget(self):\r\n self.generateCoordinates()\r\n self.modifyCoordinateLists()\r\n self.settings.movementMatrix = self.movementMatrix\r\n self.settings.ghostIntersectionList = self.ghostIntersectionList\r\n self.createBodies()\r\n print(\"GameW set\")", "def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()", "def setup(self, callback=False, display=\"lcd\"):\n self.display_medium = display\n self._setup_gpio_in()\n if callback:\n self._add_event_detect()\n self._add_event_callback()", "def __init__(self, inner_widget=None):\n super(GameEngineUI, self).__init__(parent=qtutils.get_maya_window())\n self.setupUi(self)\n\n self.start_btn = StartButton(self)\n self.inner_widget_vlay.addWidget(self.start_btn)\n\n self.inner_widget = inner_widget\n if inner_widget is not None:\n self.inner_widget_vlay.addWidget(inner_widget)\n # end if\n\n self.game_engine = GameEngine()\n\n # Maximize\n desktop = QtGui.QApplication.instance().desktop()\n available_geometry = desktop.screenGeometry(QtGui.QCursor().pos())\n self.setGeometry(available_geometry.x(), 0, available_geometry.width(), available_geometry.height())", "def showDisplay(self, type=\"DEFAULT\"):\n gd = mamba.getDisplayer() # <- trick to ensure the root windows is created and hidden\n if type==\"DEFAULT\":\n # First if there is any display already opened it is showed\n no_display = True\n if self._displayUsr:\n self._displayUsr.show()\n no_display = False\n if self._displayVtk:\n self._displayVtk.show()\n no_display = False\n if self._displayPjt:\n self._displayPjt.show()\n no_display = False\n \n if no_display:\n # If no display is yet open we create one\n # preferentially using user defines display\n # or if not VTK\n if self._displayerUsr:\n self._displayUsr = self._displayerUsr(self.name)\n if self._displayUsr:\n self._displayUsr.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayUsr.updateim()\n else:\n self._displayVtk = self._displayerVtk(self.name)\n if self._displayVtk:\n self._displayVtk.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayVtk.updateim()\n \n elif type==\"USER\":\n if self._displayerUsr:\n if self._displayUsr:\n self._displayUsr.show()\n else:\n self._displayUsr = self._displayerUsr(self.name)\n if self._displayUsr:\n self._displayUsr.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayUsr.updateim()\n \n elif type==\"PROJECTION\":\n if self._displayerPjt:\n if self._displayPjt:\n self._displayPjt.show()\n else:\n self._displayPjt = self._displayerPjt(self.name)\n if self._displayPjt:\n self._displayPjt.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayPjt.updateim()\n \n elif type==\"VTK\":\n if self._displayerVtk:\n if self._displayVtk:\n self._displayVtk.show()\n else:\n self._displayVtk = self._displayerVtk(self.name)\n if self._displayVtk:\n self._displayVtk.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayVtk.updateim()", "def displaySetup(app, **options):\n\n display = app.display\n\n display.setup(**options)", "def __init__(self, master, **kwargs):\n GenericWindow.__init__(self, master, **kwargs)\n self.states = None\n self.master = master\n self.display()", "def __init__(self, controller, target_configuration, title, width, height, force_focus = True):\n\n\t\tConfigurationWindow.__init__(self, controller, target_configuration, title, width, height, force_focus)\n\n\t\tself.controller = controller\n\n\t\t# Display on/off toggle variables\n\t\tself.show_title = BooleanVar()\n\t\tif self.controller.plot_configuration.get_property('show_title') == True: self.show_title.set(True)\n\t\telse: self.show_title.set(False)\n\n\t\t# Chart title\n\t\tself.title_label = Label(self, text = \"Title:\")\n\t\tself.title_label.grid(row = 0, column = 0)\n\n\t\tself.title_checkbox = Checkbutton(self, text = \"Display\", var = self.show_title, onvalue = True, offvalue = False)\n\t\tif self.show_title.get(): self.title_checkbox.select()\n\t\tself.title_checkbox.grid(row = 0, column = 1)\n\n\t\tself.title_field = Entry(self)\n\t\tself.title_field.insert(0, self.controller.plot_configuration.get_property('title'))\n\t\tself.title_field.grid(row = 0, column = 2)\n\n\t\t# Plot width\n\t\tself.plot_width_label = Label(self, text = \"Plot Width (200 - 1000):\")\n\t\tself.plot_width_label.grid(row = 1, column = 0)\n\t\tself.plot_width_field = Entry(self)\n\t\tself.plot_width_field.insert(0, self.controller.plot_configuration.get_property('plot_width'))\n\t\tself.plot_width_field.grid(row = 1, column = 2)\n\n\t\t# Plot height\n\t\tself.plot_height_label = Label(self, text = \"Plot Height (200 - 1000):\")\n\t\tself.plot_height_label.grid(row = 2, column = 0)\n\t\tself.plot_height_field = Entry(self)\n\t\tself.plot_height_field.insert(0, self.controller.plot_configuration.get_property('plot_height'))\n\t\tself.plot_height_field.grid(row = 2, column = 2)\n\n\t\t# Margin top\n\t\tself.margin_top_label = Label(self, text = \"Top Margin (15 - 200):\")\n\t\tself.margin_top_label.grid(row = 3, column = 0)\n\t\tself.margin_top_field = Entry(self)\n\t\tself.margin_top_field.insert(0, self.controller.plot_configuration.get_property('margin_top'))\n\t\tself.margin_top_field.grid(row = 3, column = 2)\n\n\t\t# Margin left and right\n\t\tself.margin_left_label = Label(self, text = \"Left and Right Margins (15 - 200):\")\n\t\tself.margin_left_label.grid(row = 4, column = 0)\n\t\tself.margin_left_field = Entry(self)\n\t\tself.margin_left_field.insert(0, self.controller.plot_configuration.get_property('margin_left'))\n\t\tself.margin_left_field.grid(row = 4, column = 1)\n\t\tself.margin_right_field = Entry(self)\n\t\tself.margin_right_field.insert(0, self.controller.plot_configuration.get_property('margin_right'))\n\t\tself.margin_right_field.grid(row = 4, column = 3)\n\n\t\t# Margin bottom\n\t\tself.margin_bottom_label = Label(self, text = \"Bottom Margin (15 - 200):\")\n\t\tself.margin_bottom_label.grid(row = 5, column = 0)\n\t\tself.margin_bottom_field = Entry(self)\n\t\tself.margin_bottom_field.insert(0, self.controller.plot_configuration.get_property('margin_bottom'))\n\t\tself.margin_bottom_field.grid(row = 5, column = 2)\n\n\t\t# Save and cancel buttons\n\t\tsave_button = Button(self, text = \"Save and Update\", command = self.validate)\n\t\tsave_button.grid(row = 6, column = 1)\n\t\tcancel_button = Button(self, text = \"Cancel\", command = self.destroy)\n\t\tcancel_button.grid(row = 6, column = 2)", "def ui_setup(self):\n loader = QUiLoader()\n file = QFile('./user_interface/form/main_window.ui')\n file.open(QFile.ReadOnly)\n self._window = loader.load(file)\n file.close()\n\n status_bar = QStatusBar(self._window)\n status_bar.showMessage(__copyright__)\n self._window.setStatusBar(status_bar)\n self._window.setWindowIcon(QIcon('./user_interface/media/bucketing_icon.jpeg'))\n self._window.setWindowTitle('PySide2 Project - Basic UI Framework')\n\n self._option_panel = OptionPanel()\n self._option_panel.add_button('DekBan', './user_interface/media/dekban.png')\n self._option_panel.add_button('Charlie', './user_interface/media/charlie.jpeg')\n self._option_panel.add_button('Simon', './user_interface/media/Simon.jpeg')\n\n # Add widget to main layout\n main_layout = self._window.main_layout\n main_layout.itemAtPosition(0, 0).setAlignment(QtCore.Qt.AlignCenter)\n main_layout.itemAtPosition(0, 1).setAlignment(QtCore.Qt.AlignVCenter)\n main_layout.addWidget(self._option_panel, 2, 0, 1, 1)\n\n # Add page widget to stack\n self._pages['item'] = ItemWidget()\n self._pages['text1'] = TextPage(text=PAUSE_TEXT)\n self._pages['text2'] = TextPage(text=STOP_TEXT)\n\n for index, name in enumerate(self._pages):\n print('pages {} : {} page'.format(index, name))\n self._window.widget_stack.addWidget(self._pages[name].widget)\n\n self._window.widget_stack.setCurrentIndex(0)\n\n # Build up signal / slot\n self._option_panel.currentItemChanged.connect(self.set_page)", "def configure_window(self, width, height):\n self.configure_surface(width, height)", "def initializeUI(self):\n self.setGeometry(100, 100, 300, 200)\n self.setWindowTitle('Event Handling Example')\n\n self.show()", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def setup_gui(self):\n central_widget = QWidget(self)\n central_widget.setObjectName('central_widget')\n self.label = QLabel('Hello World')\n self.input_field = QLineEdit()\n change_button = QPushButton('Change text')\n close_button = QPushButton('close')\n quit_button = QPushButton('quit')\n central_layout = QVBoxLayout()\n button_layout = QHBoxLayout()\n central_layout.addWidget(self.label)\n central_layout.addWidget(self.input_field)\n # a separate layout to display buttons horizontal\n button_layout.addWidget(change_button)\n button_layout.addWidget(close_button)\n button_layout.addWidget(quit_button)\n central_layout.addLayout(button_layout)\n central_widget.setLayout(central_layout)\n self.setCentralWidget(central_widget)\n # create a system tray icon. Uncomment the second form, to have an\n # icon assigned, otherwise you will only be seeing an empty space in\n # system tray\n self.systemtrayicon = QSystemTrayIcon(self)\n self.systemtrayicon.show()\n # set a fancy icon\n self.systemtrayicon.setIcon(QIcon.fromTheme('help-browser'))\n change_button.clicked.connect(self.change_text)\n quit_button.clicked.connect(QApplication.instance().quit)\n close_button.clicked.connect(self.hide)\n # show main window, if the system tray icon was clicked\n self.systemtrayicon.activated.connect(self.icon_activated)", "def init_ui(self):\n\n # Display default values until first update\n self.window.pcBatteryDisplay.setValue(100)\n self.window.pcCpuDisplay.setValue(0)", "def __init__(self, settings, **kwargs):\n super(MainScreen, self).__init__(**kwargs)\n Clock.schedule_interval(self.update_status_fields, 1*CLOCK_SPEED)\n self.settings = settings\n self.ml_interface.debug = self.debug\n self.ml_interface.mer_ip_address = self.settings[\"ip_address\"]\n self.ml_interface.software_version = str(self.settings[\"software_version\"])\n self.ml_interface.software_title = self.settings[\"title\"]\n self.ml_interface.speed = str(self.settings[\"speed\"])\n self.ml_interface.speed_out = str(self.settings[\"speed_out\"])\n self.standby_position = str(\n self.settings[\"standby_position\"]\n )\n self.requested_position = str(\n self.settings[\"default_requested_position\"]\n )\n self.ml_interface.standby_position = self.standby_position\n self.ml_interface.requested_position = self.requested_position\n self.title = self.ml_interface.software_title\n self.settingsWindow = SettingsWindow(ml_object=self.ml_interface, main_screen = self)\n self.infoWindow = InfoWindow(ml_object=self.ml_interface)\n self.testWindow = TestWindow(self, ml_interface=self.ml_interface)\n if not self.debug:\n self.ml_interface.initialize_poll_connection_thread()\n self.read_thread = threading.Thread(target=self.ml_interface.initialize_read_thread)\n self.read_thread.start()\n self.ml_interface.update_ml()\n self.ml_interface.write()\n self.set_requested_position()\n self.set_standby_position()", "def __init__(self, parent=None):\n self._window = None\n\n self.setup_ui()", "def SetWindow(self, w):\r\n\r\n self.window = w", "def init_window(self, size, screen=None):\n # enforce minimum size\n (mw, mh), (w, h) = config.minsize, size\n if w < mw or h < mh:\n size = mw, mh\n\n # init view surface and pass it to screen\n self.view = pygame.display.set_mode(size, pygame.RESIZABLE)\n self.view.fill((0, 0, 0))\n if screen is not None:\n screen.resize_view()", "def init(self):\n sg.theme(gui.app_theme)\n self.window = sg.Window(\n gui.app_title,\n gui.create_layout(),\n **gui.window_config,\n )\n gui.after_window_init(self.window)", "def display(self):\n self.main_window = tk.Tk()\n self.main_window.title('ACO Simulator')\n self.status_text = tk.StringVar()\n self.status_text.set(\"Click start to run the simulation.\")\n\n self.start_btn_text = tk.StringVar()\n self.start_btn_text.set(\"Start\")\n self.pause_btn_text = tk.StringVar()\n self.pause_btn_text.set(\"End Simulation\")\n\n self.grid_frame = tk.Frame(master=self.main_window, relief=tk.RAISED, borderwidth=1)\n self.grid_frame.grid(padx=10, pady=10)\n\n for y in range(self.rows):\n row = []\n for x in range(self.columns):\n frame = tk.Frame(master=self.grid_frame, width=10, height=10, bg='blue')\n frame.grid(row=y, column=x, padx=1, pady=1)\n row.append(frame)\n self.grid.append(row)\n\n frame = tk.Frame(master=self.main_window)\n frame.grid(padx=10, pady=5, columnspan = self.columns)\n self.status = tk.Label(master=frame, textvariable=self.status_text)\n self.status.pack()\n\n frame = tk.Frame(master=self.main_window)\n frame.grid(padx=10, pady=5, columnspan = self.columns)\n self.submit_btn = tk.Button(master=frame, textvariable=self.start_btn_text, width=15)\n self.submit_btn.pack()\n self.submit_btn.bind(\"<Button-1>\", self.start_aco)\n\n frame = tk.Frame(master=self.main_window)\n frame.grid(padx=10, pady=5, columnspan = self.columns)\n self.pause_btn = tk.Button(master=frame, textvariable=self.pause_btn_text, width=15)\n self.pause_btn.pack()\n self.pause_btn.bind(\"<Button-1>\", self.end_aco)\n\n frame = tk.Frame(master=self.main_window, width=10, height=15)\n frame.grid(columnspan = self.columns)\n\n self.main_window.mainloop()" ]
[ "0.68346995", "0.6504775", "0.6488538", "0.64603406", "0.6286316", "0.6179651", "0.61078423", "0.6093484", "0.60897326", "0.6019644", "0.6019345", "0.6001433", "0.599808", "0.59722495", "0.59695345", "0.59521645", "0.5940673", "0.59181553", "0.5897244", "0.5888673", "0.58857524", "0.58837855", "0.5882095", "0.5861368", "0.5833963", "0.58270437", "0.58223933", "0.58217376", "0.58205587", "0.58146614" ]
0.76192385
0
Attach an extra display to the control. Those displays are updated whenever a step occurs.
def attach_display(self, display): self.extra_displays.append(display) self.addDockWidget(Qt.RightDockWidgetArea, display) #self.display_attached.emit(display)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_displayhook(self):\n pass", "def bs_addHeadsUpDisplay():\n # remove all headsUpDisplay.\n if pm.windows.headsUpDisplay(lh=True):\n for each in pm.windows.headsUpDisplay(lh=True):\n pm.windows.headsUpDisplay(each, rem=True)\n # add new heads up displays.\n pm.windows.headsUpDisplay('sceneNameHUD', l='Scene Name:- ', allowOverlap=True, b=0, s=4, dataFontSize='small',\n command=bspb_sceneName)\n pm.windows.headsUpDisplay('artistNameHUD', l='Artist Name:- ', allowOverlap=True, b=1, s=5, dataFontSize='small',\n command=bspb_artistName)\n pm.windows.headsUpDisplay('dateTimeHUD', l='Date And Time:- ', allowOverlap=True, b=0, s=5, dataFontSize='small',\n command=bspb_dateTime)\n pm.windows.headsUpDisplay('frameCounterHUD', l='Frame Number:- ', allowOverlap=True, b=1, s=9, dataFontSize='small',\n command=bspb_frameCounter)\n pm.windows.headsUpDisplay('focalLengthHUD', l='Focal Length:- ', allowOverlap=True, b=0, s=9, dataFontSize='small',\n command=bspb_focalLength)\n pm.windows.headsUpDisplay('camNameHUD', l='Cam :- ', allowOverlap=True, b=0, s=7, dataFontSize='small',\n command=bspb_getCurrentCam)\n # add colors in heads up display.\n # pm.mel.eval(\"displayColor -dormant headsUpDisplayLabels 19\")\n # pm.mel.eval(\"displayColor -dormant headsUpDisplayValues 14\")\n # add expressions.\n bspb_frameCounterUpdate()\n bspb_focalLengthUpdate()", "def show(self, display):\n if self.visible == True:\n pg.draw.rect(display, self.bgColor, self.panel)\n\n for element in self.elements:\n element.show(display)", "def display(self):\n self.displaycontrol |= self.LCD_DISPLAYON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)", "def updateDisplay(self):\n if self._displayPjt:\n self._displayPjt.updateim()\n if self._displayUsr:\n self._displayUsr.updateim()\n if self._displayVtk:\n self._displayVtk.updateim()", "def setDisplay(self):\n self.graph_display=[self.complexCompose(self.coefficients,(t+1)/self.display_number)[-1] for t in range(self.display_number)]", "def add_to_product_display(self, product):\n self.product_displays.add(product)", "def show(self):\r\n display(self.grid_part)", "def showDisplay(self, type=\"DEFAULT\"):\n gd = mamba.getDisplayer() # <- trick to ensure the root windows is created and hidden\n if type==\"DEFAULT\":\n # First if there is any display already opened it is showed\n no_display = True\n if self._displayUsr:\n self._displayUsr.show()\n no_display = False\n if self._displayVtk:\n self._displayVtk.show()\n no_display = False\n if self._displayPjt:\n self._displayPjt.show()\n no_display = False\n \n if no_display:\n # If no display is yet open we create one\n # preferentially using user defines display\n # or if not VTK\n if self._displayerUsr:\n self._displayUsr = self._displayerUsr(self.name)\n if self._displayUsr:\n self._displayUsr.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayUsr.updateim()\n else:\n self._displayVtk = self._displayerVtk(self.name)\n if self._displayVtk:\n self._displayVtk.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayVtk.updateim()\n \n elif type==\"USER\":\n if self._displayerUsr:\n if self._displayUsr:\n self._displayUsr.show()\n else:\n self._displayUsr = self._displayerUsr(self.name)\n if self._displayUsr:\n self._displayUsr.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayUsr.updateim()\n \n elif type==\"PROJECTION\":\n if self._displayerPjt:\n if self._displayPjt:\n self._displayPjt.show()\n else:\n self._displayPjt = self._displayerPjt(self.name)\n if self._displayPjt:\n self._displayPjt.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayPjt.updateim()\n \n elif type==\"VTK\":\n if self._displayerVtk:\n if self._displayVtk:\n self._displayVtk.show()\n else:\n self._displayVtk = self._displayerVtk(self.name)\n if self._displayVtk:\n self._displayVtk.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayVtk.updateim()", "def updateDisplay(self, msg):\n t = msg.data\n self.displayLbl.SetLabel(\"%s\" % t)\n self.SetTitle(\"%s\" % t)", "def changeDisplay1(self):\n\n print (\"--Changing to display 1--\")\n self.display1Button.setDown(True)\n self.display2Button.setDown(False)\n self.statustext.setText(\"Changed to Display 1\")\n self.photo.setPixmap(QtGui.QPixmap(self.firstScreen))\n self.ActivePhoto = self.firstScreen", "def show(self):\r\n wlight.lightController.redraw()", "def display(self):\n count = 0\n self.displays[0].start() # call only once to support shift chain\n for d in self.displays:\n d.output(self.data[count])\n count += 1\n self.displays[0].latch() # call only once to support shift chain", "def show( self ):\n if self.changed:\n self._update_ax() \n self.changed = False", "def update_display(self) -> None:\n if self.display is None:\n raise RuntimeError(\n \"Tried to update the display, but a display hasn't been \"\n \"created yet! To create a display for the renderer, you must \"\n \"call the `make_display()` method.\"\n )\n\n self.display.blit(self.surface, [0, 0])\n pygame.display.update()\n\n # Sounds:\n if self.audio_on and self.game.sound_cache is not None:\n sound_name = self.game.sound_cache\n self.sounds[sound_name].play()", "def update_displays(self):\n for key, value in self.lnp.settings:\n if key in list(self.controls.keys()):\n if isinstance(self.controls[key], Entry):\n self.controls[key].delete(0, END)\n self.controls[key].insert(0, value)\n else:\n self.controls[key][\"text\"] = (\n self.controls[key][\"text\"].split(':')[0] + ': ' +\n value)", "def _init_display(self):\n raise NotImplementedError", "def changeDisplay2(self):\n\n print (\"--Changing to display 2--\")\n self.display1Button.setDown(False)\n self.display2Button.setDown(True)\n self.statustext.setText(\"Changed to display 2\")\n self.photo.setPixmap(QtGui.QPixmap(self.secondScreen))\n self.ActivePhoto = self.secondScreen", "def display(self):\n self.tabbar.refresh()\n self.noact_pane.display()\n self.act_pane.display()\n self.statusbar.set_strings(self.act_pane.act_tab.build_status_line())\n self.statusbar.refresh()", "def setDisplayMode(self):\n self.step = (self.max_step + int(self.include))\n self.display = Fourier.inverseTransform(\n self.coefficients, self.display_number)", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def start_flash(display):\n ck_display[str(display)].configure(relief=tkinter.RAISED, bd=0, highlightbackground='blue', highlightthickness=8)", "def display( self, value=True ):\n\t\tif value:\n\t\t\tself._displaycontrol |= LCD_DISPLAYON\n\t\telse:\n\t\t\tself._displaycontrol &= (0xFF ^ LCD_DISPLAYON)\n\t\tself.command( LCD_DISPLAYCONTROL | self._displaycontrol )", "def open_display (self, *display_args, **kw):\n new_disp = kw.get('cls', Display)((0, 0, 0, 0), *display_args)\n self.displays.append(new_disp)\n return (new_disp, self._arrange_displays())", "def displaySetup(app, **options):\n\n display = app.display\n\n display.setup(**options)", "def enable_screen_and_show_control_buttons(self):\n event_logger.debug(\"Activating display\")\n rpi_utils.toggle_screen_state(\"on\")\n self.show_control_buttons()", "def show( self ):\n if self.visible == 1:#ohnheiser hack and time() - self.lastMotion > self.delay:\n self.visible = 2\n if self.visible == 2:\n self.deiconify()", "def onShow(self):\n pass" ]
[ "0.6375533", "0.63675547", "0.6366979", "0.63276595", "0.6158632", "0.6107422", "0.60964465", "0.59870285", "0.58940417", "0.58865005", "0.58281237", "0.57941467", "0.57307637", "0.5726875", "0.570152", "0.56921536", "0.56437284", "0.56373584", "0.5606878", "0.5606859", "0.55937386", "0.55937386", "0.55937386", "0.5590819", "0.55734605", "0.5570712", "0.5563851", "0.55587405", "0.5546299", "0.5541527" ]
0.7561709
0
Detach an extra attached display from the control.
def detach_display(self, display): self.extra_displays.remove(display) self.removeDockWidget(display) #self.display_detached.emit(display)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close_display (self, display):\n z = display.z\n self.layers[z].remove(display)\n # remove layer if empty now\n if not self.layers[z]:\n del self.layers[z]\n # remove display from overlaps/overlapped lists\n for disp in display.overlaps:\n disp.overlapped.remove(display)\n for disp in display.overlapped:\n disp.overlaps.remove(display)\n disp.dirty = True\n # this class stores some extra attributes in displays\n display.destroy('overlaps', 'overlapped')\n self.displays.remove(display)", "def detach(self, phy_layer):\n self._attached_phys.remove(phy_layer)", "def close_display (self, display):\n display.destroy()\n self.displays.remove(display)", "def bs_removeHeadsUpDisplay():\n # remove all headsUpDisplay.\n if pm.windows.headsUpDisplay(lh=True):\n for each in pm.windows.headsUpDisplay(lh=True):\n pm.windows.headsUpDisplay(each, rem=True)\n # remove resolution gates.\n shotCam = pm.PyNode('shot_cam')\n # add resolution gates.\n pm.camera(shotCam, e=True, dsa=False, dfc=False, displayFilmGate=False, displayResolution=False,\n displaySafeTitle=False)\n pm.setAttr(shotCam + '.displayGateMaskOpacity', 0)\n pm.setAttr(shotCam + '.displayGateMaskColor', [0, 0, 0], type='double3')\n pm.setAttr(shotCam + '.displayGateMask', 0)\n # delete expression.\n pm.delete('focalLengthUpdateEXP')\n pm.delete('frameCounterUpdateEXP')", "def hideDisplay(self):\n if self._displayPjt:\n self._displayPjt.hide()\n if self._displayUsr:\n self._displayUsr.hide()\n if self._displayVtk:\n self._displayVtk.hide()", "def detach(self, overlay):\n # See #868\n for i, a in enumerate(self.animations):\n a.layout = a.layout.clone()\n if overlay and i:\n a.preclear = False", "def noDisplay(self):\n self.displaycontrol &= ~self.LCD_DISPLAYON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)", "def removeSeparatrix(self):\n if self._separatrixOverlayHandle is not None:\n self._separatrixOverlayHandle.remove()\n self._separatrixOverlayHandle = None\n\n self.overlaySeparatrix = False", "def detach(self):\n raise io.UnsupportedOperation", "def release_displays():\n for _disp in displays:\n _disp._release() # pylint: disable=protected-access\n displays.clear()", "def destroy(self):\r\n self.visible = False", "def detach_hidden(self, zero=False):\n if zero:\n self.hidden = self._make_hidden(self.batch_size)\n else:\n self.hidden[0].detach()", "def hide_outputpad(self, frame2, outputpad):\n frame2.pack_forget()\n outputpad.pack_forget()", "def detach(self):\n raise NotImplementedError()", "def detach_plot(self):\n detached = tk.Toplevel(self)\n detached.wm_title(\"Glycoprotein\")\n fig = mpl.figure.Figure(figsize=(5, 4), dpi=100)\n ax = fig.add_subplot(111)\n chid = self.chain.get()\n\n l = len(self.myGlycosylator.sequences[chid])\n sequons = [k for k in self.myGlycosylator.sequons.keys() if chid in k[:len(chid)]]\n trees = self.original_glycans.copy()\n trees.update(self.linked_glycans)\n self.myDrawer.draw_glycoprotein(l, self.myGlycosylator.get_start_resnum(chid), sequons, ax = ax, axis = 0,\n trees = trees, names = self.names, sequon_color = self.sequon_colors)\n ax.axis('equal')\n ax.axis('off')\n\n canvas = FigureCanvasTkAgg(fig, master=detached)\n canvas.show()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n toolbar = NavigationToolbar2TkAgg(canvas, detached)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)", "def detach_hidden(self, zero=False):\n if zero:\n self.hidden = self._make_hidden(self.batch_size)\n else:\n self.hidden = self.hidden.detach()", "def detach_pd(self, conn, host, pd):\n zone = self.get_zone(conn, host)\n pdhost = self.get_pd_host(conn, pd, zone)\n if pdhost == \"\":\n self.tracer.info(\n \"disk %s is already attached to %s(%s)\" % (pd, host, zone))\n elif pdhost == host:\n self.tracer.info(\"attempting to detach %s from %s(%s)\" % (pd, host, zone))\n operation = conn.instances().detachDisk(project=PROJECT, zone=zone, instance=host, deviceName=pd).execute()\n self.wait_for_operation(conn, operation, zone)\n if self.get_pd_host(conn, pd, zone) == \"\":\n self.tracer.info(\"successfully detached %s from %s(%s)\" % (pd, host, zone))", "def do_undisplay(self, arg):\n try:\n del self._get_display_list()[arg]\n except KeyError:\n print('** %s not in the display list **' % arg, file=self.stdout)", "def clear_display(self) -> None:\n pass", "def hide(self):\n self._dev.hide()", "def unHide(self):\n self.visible = True", "def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):", "def attach_display(self, display):\n self.extra_displays.append(display)\n self.addDockWidget(Qt.RightDockWidgetArea, display)\n #self.display_attached.emit(display)", "def _stop_display(self):\n if self._proc and not self._proc.returncode:\n pid = self._proc.pid\n os.kill(pid, signal.SIGKILL)", "def detachFromPlotItem(self):\n raise NotImplementedError() # TODO", "def do_hf_unhide(self, arg):\n self.show_hidden_frames = True\n self.refresh_stack()", "def detach(self):\n\n self._check_if_stopped()\n\n self.rotation_speed = 0\n self._position = None\n self._pi.set_servo_pulsewidth(self._pin, 0)", "def detach(cls, factory, attrib_name):\n cls._to_attach.remove((factory, attrib_name))", "def detach(self, observer: Observer) -> None:\n pass", "def detach(self, observer: Observer) -> None:\n pass" ]
[ "0.65239346", "0.6328763", "0.62478584", "0.61699164", "0.6071742", "0.5924774", "0.5723696", "0.56696635", "0.5666436", "0.56627667", "0.56161225", "0.5611194", "0.55860007", "0.557813", "0.5575845", "0.5551016", "0.5550515", "0.55281574", "0.5518822", "0.55140615", "0.5483433", "0.5480573", "0.5478855", "0.54731953", "0.54218537", "0.5411136", "0.5408775", "0.5402455", "0.5389477", "0.5389477" ]
0.82741654
0
Runs the given functions using the code in the given directory. Returns True if all went well, together with a string for the user
def run_functions( functions, dir ): res = [] for i in range(0, len(functions)): res.append(functions[i].execute(dir)) return summarize_as_html(res)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_programs_in_directory(directory):\n files = [f for f in os.listdir(directory) if f.endswith(DECAF_SUFFIX)]\n files.sort()\n files = [os.path.join(directory, f) for f in files]\n\n all_passed = True\n for f in files:\n if not check_return_value(f):\n all_passed = False\n\n return all_passed", "def main():\n if os.path.isdir(path):\n for filename in os.listdir(path):\n if filename.endswith('.asm'):\n execute_asm_file(path + '/' + filename, filename)\n else:\n execute_asm_file(path, path[path.rfind(\"/\") + 1:])", "def run(self):\n\t\tif not self.is_valid:\n\t\t\traise ValueError('Paths are not valid')\n\t\t\n\t\tfrom manipulators.run import all_functions\n\t\tinput_file = open(self.input_path, 'rb')\n\t\tinput_csv = csv.reader(input_file, delimiter=',')\n\t\toutput_file = open(self.output_path, 'wb')\n\t\toutput_csv = csv.writer(output_file, delimiter=',')\n\t\tall_functions(input_csv, output_csv)", "def test_function_runs(self):\n\t\tanalyse_text(self.filename)", "def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True", "def run_functions(self):\n for function in self.functions:\n try:\n function()\n except Exception as err:\n logger.exception(\n f\"[red]Failed running and collecting data for function: {function.__name__}[/red]\"\n )\n logger.error(traceback.format_exc())\n logger.error(f\"[red]{err}[/red]\")\n logger.error(\"Continuing..\")", "def _run_callback(dir, ejobs):\n\n args = (join(dir, 'callback'), join(dir, 'config'))\n args += tuple(join(ejobs, f) for f in _FILES_TO_SEND)\n\n _logger.info('Homework evaluated; sending results')\n _logger.debug('calling %s', args)\n\n try:\n env = _env_with_python_module_search_path()\n check_call(args=args, env=env)\n except:\n _logger.error('Sending results failed')\n raise", "def test_test_directory_function_call(self):\n calls = []\n\n def callee(calls):\n \"\"\"Test function for counting calls.\"\"\"\n calls.append(1)\n\n self.logger.info(\"STEP: Initialize the workspace.\")\n with Workspace(Mock()) as workspace:\n self.workspace = workspace\n\n self.logger.info(\n \"STEP: Enter a test directory with a method call registered.\"\n )\n with workspace.test_directory(\"dir1\", callee, calls):\n self.logger.info(\"STEP: Verify that the method was called.\")\n self.assertEqual(len(calls), 1)\n\n self.logger.info(\n \"STEP: Enter a test directory with the same identifer and a \"\n \"method call registered.\"\n )\n with workspace.test_directory(\"dir1\", callee, calls):\n self.logger.info(\"STEP: Verify that the method was not called.\")\n self.assertEqual(len(calls), 1)", "def test_parse_function_pass(self):\n def function1():\n pass\n steps = test_parser.parse_function_steps(function1)\n assert steps == []\n\n def function2():\n\n pass\n steps = test_parser.parse_function_steps(function2)\n assert steps == []\n\n def function3():\n print('foo')\n pass\n steps = test_parser.parse_function_steps(function3)\n assert len(steps) == 2\n assert steps[0]['function_name'] == 'print'\n assert steps[1]['code'] == 'pass'", "def RunAll():\n testfunctions = []\n for name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isfunction(obj) and name != 'RunAll':\n testfunctions.append(obj)\n\n # run all the functions\n for f in testfunctions:\n print('Running %s' % str(f))\n f()", "def run_one_test(expected, func):\n answer = func(expected)\n if answer == expected:\n print(\".\")\n print(\"ok\")\n else:\n print(\"F\")\n print(f\"error: expected {expected}, got {answer}\")", "def run(self):\n\n pwd = self.chdir()\n if pwd is None: return -1\n res = mkstuff.run_cmd(self.bindir + '/' + self.func + ' ' + self.args)\n os.chdir(pwd)\n return res", "def run_test(standart_app, testing_app, *scenario_files):\n passed = True\n\n for scenario_file in scenario_files:\n print 'Run scenario: ', scenario_file\n\n (standart_output, standart_exit_code) = _run_scenario(standart_app, scenario_file)\n if standart_exit_code != 0:\n print 'Standart application finish scenario with code: ', standart_exit_code\n passed = False\n continue\n\n (testing_output, testing_exit_code) = _run_scenario(testing_app, scenario_files)\n if testing_exit_code != 0:\n print 'Testing application finish scenario with code: ', scenario_file\n passed = False\n continue\n\n pass_scenario = _compare_output(standart_output, testing_output)\n\n print 'Pass' if pass_scenario else 'Failed'\n\n if not pass_scenario:\n passed = False\n\n return passed", "async def perform_with_expected_code(steps, func, *agrs, expected_code=0):\n from indy.error import IndyError\n from libraries.result import Status\n try:\n await func(*agrs)\n steps.get_last_step().set_message(\"Can execute without exception.\")\n steps.get_last_step().set_status(Status.FAILED)\n return None\n except IndyError as E:\n if E.error_code == expected_code:\n steps.get_last_step().set_status(Status.PASSED)\n return None\n else:\n print(\"Indy error\" + str(E))\n steps.get_last_step().set_message(str(E))\n return E\n except Exception as Ex:\n print(\"Exception\" + str(Ex))\n return Ex", "def main():\n diagnostics = setup_logging_and_errors()\n args = parse_arguments()\n diagnostics[\"logger\"].setLevel(getattr(logging, args.log_level))\n function = import_function(args.function)\n function(args)\n exit_if_error_handler_fired(diagnostics[\"error_handler\"])", "def run(funcs, *args, **kwargs):\n argv = kwargs.pop('argv', None)\n if argv is None:\n argv = sys.argv[1:]\n parser = _create_parser(funcs, *args, **kwargs)\n with _colorama_text():\n args = parser.parse_args(argv)\n # Workaround for http://bugs.python.org/issue9253#msg186387\n if not hasattr(args, '_func'):\n parser.error('too few arguments')\n return _call_function(parser, args._func, args)", "def run(self, fnames):\n logging.info(\"Starting\")\n for fname in fnames:\n linted = self.run_pylint(fname=fname)\n if linted:\n custom_ok, override_standard = self.check_custom_rules()\n override = custom_ok and override_standard\n success = self.check_no_silent_crash(override=override)\n if success:\n self.eval_results(custom_ok, override)\n exit_code = self.report_results()\n if not self.keep_results:\n self.clean_up()\n sys.exit(exit_code)", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def run_all_tests():\n successes = 0\n testsrun = 0\n testsdir = tests_dirpath()\n for test in os.listdir(testsdir):\n path = os.path.join(testsdir, test)\n if os.path.isdir(path):\n testsrun += 1\n if run_test(path):\n successes += 1\n print(\"--- %d/%d TESTS PASSED ---\" % (successes, testsrun))\n return successes == testsrun", "def run(input_string):\n funclist =[query_is_empty,\n parentheses_are_uneven,\n operators_with_no_words_in_between,\n operator_following_opening_parenthesis_or_before_closing_parenthesis,\n quotation_marks_are_uneven,\n operators_within_exact_phrase,\n distance_must_be_between_1_and_999]\n errorcount = 0\n errorlist = []\n for func in funclist:\n if func(input_string) is False:\n errorcount += 1\n errorlist.append(\"Error: {}\".format(func.__name__))\n if errorcount != 0:\n return \"{} Errors found.\".format(errorcount), errorlist\n else:\n return True, []", "def main(tests, sources, fail_fast, config=None):\n try:\n smokr.run_tests(tests.split(','), sources.split(','),\n fail_fast, config)\n return 0\n except AssertionError:\n sys.exit(1)", "def runAction(plugin_dir, module_name, function_name, function_args, request, client_info):\n\n \n\n # We will load plugins every time this function is called. It is maybe a little\n # inefficient but it means we don't have to restart when new plugins are added.\n plugins = importPlugins(plugin_dir)\n\n # Find the corresponding module object\n if module_name not in plugins:\n print \"Module {} is not in the plugin list\".format(module_name)\n return False\n\n module_obj = plugins[module_name]\n\n # And the function, if it's present (and is a function)\n if function_name not in module_obj.__dict__:\n print \"Function {} not in module function list\".format(function_name)\n return False\n\n function = module_obj.__dict__[function_name]\n\n if type(function) != types.FunctionType:\n print \"Function {} exists but is not a function! ({})\".format(function_name, type(function))\n return False\n \n # Parse the args into a list, possibly with param substitution happening\n function_args = _parseArgs(function_args, client_info)\n\n response = function(function_args, request=request, client_info=client_info)\n\n return response", "def performFunctionalize(args, modName, modSearch=\"__main__\", preArgs=(), postArgs=()):\n funcsList = args.list\n \n mod = sys.modules[modName]\n if(funcsList):\n funcs = _getModFunctions(modName, modSearch)\n if('*' in funcsList):\n funcsList = funcsList.replace('*', '')\n search = True\n else:\n search = False\n for f in funcs:\n if(funcsList == 'all' or (search and funcsList in f.__name__) or (not search and funcsList == f.__name__)):\n print('============================================================================================')\n _printHelp(mod, f.__name__)\n\n return\n\n \n #\n # Run the function as a command\n #\n if(args.func):\n if(not hasattr(mod, args.func)):\n print('No %s function found' % args.func)\n return\n \n func = args.func\n rfunc = getattr(mod, func)\n \n # Get any args they want used\n fargs = None\n if(args.args):\n fargs = [_parseValue(a) for a in args.args]\n \n # Deal with kwargs\n kwargs = dict()\n if(args.kwargs):\n for kw in args.kwargs:\n k, w = kw.split('=', 1)\n kwargs[k] = _parseValue(w)\n \n # Print out the docs about the function\n if(args.helpme):\n _printHelp(mod, func)\n return\n \n try:\n # Build arguments to send them\n theArgs = list()\n if(preArgs):\n theArgs += list(preArgs)\n if(fargs):\n theArgs += list(fargs)\n if(postArgs):\n theArgs += list(postArgs)\n \n # Call the function, if no args make special call (couldn't figure out another way)\n if(theArgs and kwargs):\n res = rfunc(*theArgs, **kwargs)\n elif(theArgs and not kwargs):\n res = rfunc(*theArgs)\n elif(not theArgs and kwargs):\n res = rfunc(**kwargs)\n else:\n res = rfunc()\n \n # Print results\n if(args.printResult == 'str'):\n print(res)\n elif(args.printResult == 'json'):\n print(_jsonPretty(res))\n except Exception as e:\n t = \", \".join(theArgs) + \", \" if theArgs else \"\"\n t += \", \".join([\"{}={}\".format(k, v) for k, v in kwargs.iteritems()])\n print \"Exception when calling {}({})\".format(args.func, t)\n print e\n _printHelp(mod, func)\n traceback.print_exc()\n else:\n print('Call with \"-h\" for help')\n return", "def test_main(self):\n results = main(0.1, files)\n # 1\n self.assertEqual(results, \"All Done Successfully\")\n results = main(0.1, get_files_bad_file_path())\n # 2\n self.assertIn(\"skipping to next\", results)\n results = main(0.1, get_files_bad_type())\n # 3\n self.assertIn(\"skipping to next\", results)\n results = main(0.1, get_files_bad_name_table())\n # 4\n self.assertIn(\"closing app. . .\", results)", "def test_all(self, func):\n passes = 0\n fails = []\n start = time.time()\n futures = {}\n # open an executor\n with getattr(concurrent.futures, self.executor)(max_workers=self.workers) as exec:\n # walk through datasets\n for pdir, sdir, files in os.walk(self.DATA_DIR):\n for file in files:\n # if the file needs processing, submit it into the queue\n filepath = osp.join(pdir, file)\n if self.file_should_be_processed(filepath):\n future = exec.submit(func, filepath)\n futures[future] = filepath\n\n # return results\n for test_num, future in enumerate(concurrent.futures.as_completed(futures)):\n stuff_to_print = [test_num, future.result()]\n if future.result() == 'Success':\n passes += 1\n if self.print_success_path:\n stuff_to_print.append(futures[future])\n else:\n fails += [futures[future]]\n print(*stuff_to_print)\n\n end = time.time() - start\n print('Processing of {} files took {:3.1f}s ({:3.2f}s/item). {} passed; {} failed.'.format(test_num, end, end/test_num, passes, len(fails)))\n if len(fails) > 0:\n pprint.pprint(\"Failures: {}\".format(fails))\n if self.write_failures_to_file:\n with open('failures_{}.txt'.format(osp.basename(self.DATA_DIR)), mode='w') as f:\n for file in fails:\n f.write(file + '\\n')\n print(\"Failures written to file\")", "def _Run(self, dir_exists):\n with patch(os.path, 'isdir', return_value=dir_exists):\n self.RunStage()", "def run(self):\n for function in self.functions:\n # Get all args and their defaults from the function definition\n argspec = inspect.getargspec(function)\n defaults = list(argspec.defaults) if argspec.defaults else []\n num_args = len(argspec.args)\n num_defaults = len(defaults)\n num_nodefaults = num_args - num_defaults\n # Match the function's args with those passed in the ezmake command\n args = {}\n arg_error = False\n for i, arg in enumerate(argspec.args):\n if self.kwargs.get(arg) is not None:\n args[arg] = self.kwargs[arg]\n elif self.flags.get(arg):\n args[arg] = self.flags[arg]\n elif i >= num_nodefaults:\n args[arg] = defaults[i - num_nodefaults]\n else:\n arg_error = True\n # If args are missing, rather than rewrite the TypeError logic\n # just call the function, knowing that it will raise a TypeError\n if arg_error:\n function(**args)\n # Handle extra arguments\n extra_args = []\n extra_kwargs = {}\n if argspec.varargs:\n extra_args = [a for a in self.args if a not in argspec.args]\n for f, _ in self.flags.items():\n extra_args.append(f'-{f}')\n if argspec.keywords:\n extra_kwargs = {k: v for k, v in self.kwargs.items()\n if k not in argspec.args}\n # Run function\n function(*args.values(), *extra_args, **extra_kwargs)", "def run_tests(self, cov, functionsToRun): # pragma: nested\n print(\"runed cases\")\n for context in functionsToRun:\n #print(context)\n info = context.split(\".\")\n suite_name =info[0]\n #print(suite_name)\n className = info[1]\n caseName = info[2]\n cov.start()\n suite = import_local_file(suite_name)\n #print(dir(suite))\n try:\n # Call all functions in this module\n for name in dir(suite):\n variable = getattr(suite, name)\n #print(\"variable.__name__\")\n #print(variable.__name__)\n if inspect.isclass(variable) and variable.__name__== className:\n obj = variable()\n \n memberNames = inspect.getmembers(variable,inspect.isfunction)\n \n for member in memberNames:\n if member[0].startswith('test_') and member[0] == caseName:\n \n print(context)\n getattr(obj, member[0])()\n #if inspect.isfunction(variable):\n # variable()\n finally:\n cov.stop()", "def main(path):\n directory = []\n if os.path.isdir(path):\n code_writer = cw.CodeWriter(os.path.join(path, os.path.basename(path))+\".asm\")\n directory = glob.iglob(os.path.join(path, \"*.vm\"))\n else:\n file_name = path[:-3]\n code_writer = cw.CodeWriter(file_name + \".asm\")\n directory.append(path)\n\n for file in directory:\n # removing the file extension and send it to the setFileName\n f = os.path.basename(file)[:-3]\n code_writer.setFileName(f)\n # creating a relevant parser object\n parser = ps.Parser(file)\n while parser.hasMoreCommands():\n cmd = parser.commandType()\n if cmd == gc.C_PUSH or cmd == gc.C_POP:\n code_writer.writePushPop(cmd, parser.arg1(), parser.arg2())\n if cmd == gc.C_ARITHMETIC:\n code_writer.writeArithmetic(parser.arg1())\n parser.advance()\n code_writer.close()", "def run_function(function_id):\n\n language = sys.modules[__name__] # to be used by the getattr\n\n global funcs\n funcName = funcs[function_id][1] # get the function name from the global dictionary funcs\n getattr(language, funcName)() #execute the chosen function" ]
[ "0.649745", "0.61049306", "0.601415", "0.5996975", "0.5956264", "0.59434325", "0.5774836", "0.5742658", "0.5614913", "0.5594323", "0.55386126", "0.5527245", "0.54437554", "0.5434158", "0.53976876", "0.5385297", "0.5377416", "0.5375581", "0.5370017", "0.53367186", "0.53345543", "0.533228", "0.5332003", "0.53218234", "0.5314452", "0.5311673", "0.5279047", "0.5243225", "0.52191246", "0.5217832" ]
0.726597
0
Method to generate dataset_name for tests. Will either use the name defined in the test configuration ("dataset_name"), or generate one using the Expectation name and index. In cases where the dataset is a list, then an additional index will be used.
def generate_dataset_name_from_expectation_name( dataset: dict, expectation_type: str, index: int, sub_index: int | None = None ) -> str: dataset_name: str if not sub_index: dataset_name = dataset.get( "dataset_name", f"{expectation_type}_dataset_{index}" ) else: dataset_name = dataset.get( "dataset_name", f"{expectation_type}_dataset_{index}_{sub_index}" ) dataset_name = _check_if_valid_dataset_name(dataset_name) return dataset_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dataset_name(self):\n raise NotImplementedError", "def construct_dataset_name(self, *args):\n raise NotImplementedError", "def dataset_name(self):\n return self.dataset.name", "def get_dataset_name(self):\n return self.dataset_name", "def dataset_name(self):\n return self._dataset_name", "def unique_dataset_name(prefix: str = \"selenium-dataset\"):\n return f'{prefix}-{uuid.uuid4().hex[:8]}'", "def make_dataset(dataset_name):\n return {\n\n 'duc': DUCDataset(),\n\n 'icsi-asr': ICSIASRDataset(),\n 'icsi-ht': ICSIHumanTranscriptDataset(),\n\n 'inspec-train': InspectTrainingDataset(),\n 'inspec-val': InspectValidationDataset(),\n 'inspec-test': InspectTestDataset(),\n\n 'nus': NUSDataset()\n\n }[dataset_name]", "def _dataset_name(self):\n return f'Libri{self.task}Mix'", "def get_dataset_name():\n return os.getenv(\"AICROWD_DATASET_NAME\", \"cars3d\")", "def get_data_name(self, idx):\n name = None\n if type(idx) is int:\n n = self.data_count()\n assert 0 <= idx <= n - 1, \"Bad data index\"\n name = self.data[idx].name\n return(name)", "def get_dataset_names(self, include = ['*'], exclude = []):\n \n raise NotImplementedError('get_dataset_names')", "def dataset_id(self) -> str:\n return pulumi.get(self, \"dataset_id\")", "def dataset_id(self) -> str:\n return pulumi.get(self, \"dataset_id\")", "def dataset_id(self) -> str:\n return pulumi.get(self, \"dataset_id\")", "def dataset_id(self) -> str:\n return self._dataset_id", "def create_1st_dataset_rtacltest1(driver, dataset_name):\n assert wait_on_element(driver, 5, '//tr[contains(.,\"tank\")]//mat-icon[text()=\"more_vert\"]', 'clickable')\n driver.find_element_by_xpath('//tr[contains(.,\"tank\")]//mat-icon[text()=\"more_vert\"]').click()\n assert wait_on_element(driver, 4, '//button[normalize-space(text())=\"Add Dataset\"]', 'clickable')\n driver.find_element_by_xpath('//button[normalize-space(text())=\"Add Dataset\"]').click() \n assert wait_on_element(driver, 5, '//h3[text()=\"Add Dataset\"]')\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Name\"]', 'inputable')\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').send_keys(dataset_name)\n assert wait_on_element(driver, 5, '//mat-select[@ix-auto=\"select__Share Type\"]')\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Share Type\"]').click()\n assert wait_on_element(driver, 5, '//mat-option[@ix-auto=\"option__Share Type_SMB\"]', 'clickable')\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Share Type_SMB\"]').click()\n assert wait_on_element(driver, 5, '//button[@ix-auto=\"button__SAVE\"]', 'clickable')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__SAVE\"]').click()", "def getDatasetName(sitemover, datasetDict, lfn, pdsname):\n # (dsname_report is the same as dsname but might contain _subNNN parts)\n\n # get the dataset name from the dictionary\n if datasetDict:\n try:\n dsname = datasetDict[lfn]\n except Exception, e:\n tolog(\"!!WARNING!!2999!! Could not get dsname from datasetDict for file %s: %s, %s (using default %s)\" % (lfn, e, str(datasetDict), pdsname))\n dsname = pdsname\n else:\n dsname = pdsname\n\n # save the original dsname for the tracing report\n dsname_report = dsname\n\n # remove any _subNNN parts from the dataset name (from now on dsname will only be used to create SE destination paths)\n dsname = sitemover.removeSubFromDatasetName(dsname)\n\n tolog(\"File %s will go to dataset %s\" % (lfn, dsname))\n\n return dsname, dsname_report", "def dataset_records_index(dataset_id: str) -> str:\n return DATASETS_RECORDS_INDEX_NAME.format(dataset_id)", "def __get_dataset_name(self):\n d = gdal.Open(self.fname)\n # Get band metadata\n b = d.GetRasterBand(1)\n md = b.GetMetadata()\n\n if 'data_var' in md:\n return md['data_var']\n else:\n fnames = d.GetFileList()\n if len(fnames) > 2:\n d = gdal.Open(fnames[1])\n # Get band metadata\n b = d.GetRasterBand(1)\n md = b.GetMetadata()\n if 'data_var' in md:\n return md['data_var']\n else:\n return 'data'\n else:\n return 'data'", "def generate_dataset(self):\n sets = {\n \"train\": 10,\n \"test\": 5,\n }\n\n fields = {\n \"strings_list\": lambda x: str_to_ascii(self.generate_string_list(x)),\n \"data\": lambda x: np.random.randint(0, 10, (x, 10)),\n \"number\": lambda x: np.array(range(x)),\n \"field_with_a_long_name_for_printing\": lambda x: np.array(range(x)),\n }\n\n lists = {\n \"list_dummy_data\": np.array(range(10)),\n \"list_dummy_number\": np.array(range(10), dtype=np.uint8),\n }\n\n dataset = {}\n data_fields = {}\n for set_name in sets:\n dataset[set_name] = self.populate_set(sets[set_name], fields, lists)\n data_fields[set_name] = sorted(dataset[set_name].keys())\n\n return dataset, data_fields", "def create_dataset(project, dataset_name):\n dataset = dataset_name\n get_dataset = project.datasets.get(dataset_name=dataset)\n project.datasets.create(dataset_name=dataset_name)\n \n return get_dataset", "def _validate_dataset_name(self, dataset_name: Optional[str]) -> str:\n if dataset_name is None:\n if self.num_datasets > 1:\n raise ValueError(\"`dataset_name` is required if there are \"\n \"more than one datasets.\")\n dataset_name = next(iter(self._datasets))\n if dataset_name not in self._datasets:\n raise ValueError(\"Dataset not found: \", dataset_name)\n return dataset_name", "def get_dataset_url(self, dataset: Dict) -> str:\n return f\"{self.site_url}/dataset/{dataset['name']}\"", "def test_named_dataset(self):\n # NOTE this works despite the fact that child_spec has no data type but the builder has a data type because\n # get_subspec acts on the name and not necessarily the data type\n child_spec = DatasetSpec(doc='A test dataset specification', name='my_dataset')\n parent_spec = GroupSpec(doc='Something to hold a Bar', name='my_group', datasets=[child_spec])\n sub_builder = DatasetBuilder(\n name='my_dataset',\n data=[],\n attributes={\n 'data_type': 'FooData',\n 'namespace': CORE_NAMESPACE,\n 'object_id': -1\n }\n )\n GroupBuilder(name='my_group', datasets={'my_dataset': sub_builder}) # add sub_builder as a child to my_group\n result = self.type_map.get_subspec(parent_spec, sub_builder)\n self.assertIs(result, child_spec)", "def create_dataset(dataset_name):\n dataset_as_lower = dataset_name.lower()\n if dataset_as_lower in _datasets_from_keras.keys():\n data_details = _datasets_from_keras[dataset_as_lower]\n (x_train, y_train), (x_test, y_test) = data_details['data'].load_data()\n else:\n raise IOError(\"Dataset {0} is NOT supported\".format(dataset_name))\n\n # Performing pre-processing specifically for images datasets.\n if data_details['data type'] == 'image':\n x_train = _pre_process_images(x_train, data_details)\n x_test = _pre_process_images(x_test, data_details)\n\n return x_train, y_train, x_test, y_test", "def dataset_part_filename(dataset_part, num_data):\n if num_data >= 0:\n return '{}_data_{}.npz'.format(dataset_part, str(num_data))\n return '{}_data.npz'.format(dataset_part)", "def get_data_name(data_func, data_type, npoints, y_error_sigma, x_error_sigma):\n data_name = '{}_{}'.format(data_func.__name__, data_type)\n if data_func.__name__ != 'get_image':\n data_name += 'funcs'\n data_name += '_{}pts_{}ye'.format(npoints, y_error_sigma)\n if x_error_sigma is not None:\n data_name += '_{}xe'.format(x_error_sigma)\n return data_name.replace('.', '_')", "def generate_name(self):\n name = self._generate_test_name()\n while self.exists(name):\n name = self._generate_test_name()\n return name", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])" ]
[ "0.7306698", "0.7130462", "0.6957245", "0.69177663", "0.679696", "0.67210925", "0.63921684", "0.63351583", "0.6311843", "0.6262449", "0.60664064", "0.60308975", "0.60308975", "0.60308975", "0.5981057", "0.59641856", "0.5901834", "0.5868013", "0.5859498", "0.58271116", "0.58125246", "0.5789813", "0.57727224", "0.575592", "0.5746417", "0.5726159", "0.5718679", "0.5710243", "0.56982446", "0.56982446" ]
0.804055
0
Check that dataset_name (ie. table name) is valid before adding data to table.
def _check_if_valid_dataset_name(dataset_name: str) -> str: if not re.match(r"^[A-Za-z0-9_]+$", dataset_name): raise ExecutionEngineError( f"dataset_name: {dataset_name} is not valid, because it contains non-alphanumeric and _ characters." f"Please check your configuration." ) if len(dataset_name) >= MAX_TABLE_NAME_LENGTH: # starting from the end, so that we always get the index and sub_index new_dataset_name = dataset_name[-MAX_TABLE_NAME_LENGTH:] logger.info( f"dataset_name: '{dataset_name}' was truncated to '{new_dataset_name}' to keep within length limits." ) dataset_name = new_dataset_name while not re.match(r"^[A-Za-z]+$", dataset_name[0]): dataset_name = dataset_name[1:] return dataset_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_dataset_name(self, dataset_name: Optional[str]) -> str:\n if dataset_name is None:\n if self.num_datasets > 1:\n raise ValueError(\"`dataset_name` is required if there are \"\n \"more than one datasets.\")\n dataset_name = next(iter(self._datasets))\n if dataset_name not in self._datasets:\n raise ValueError(\"Dataset not found: \", dataset_name)\n return dataset_name", "def validate_dataset_name(reference_dataset_name):\n\n\tfield_name = \"reference_dataset_name\"\n\treference_dataset_name_errors = []\n\n\ttry:\n\t\tassert(re.match(character_regex, reference_dataset_name))\n\texcept AssertionError:\n\t\treference_dataset_name_errors.append(get_regex_mismatch_error_text(\n\t\t\t\t\t\t\t\t\t\t\t\tfield_name, character_regex))\n\n\ttry:\n\t\tassert(len(reference_dataset_name) <= field_length_limit)\n\texcept AssertionError:\n\t\treference_dataset_name_errors.append(get_field_length_error_text(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfield_name))\n\n\treturn reference_dataset_name_errors", "def validate_dataset(self):\n pass", "def isValidDataTypeName(name: unicode) -> bool:\n ...", "def validate_dataset_string(self, dataset):\r\n if dataset:\r\n if '/' not in dataset:\r\n raise ValueError('Dataset must be specified in the form of '\r\n '\\'{username}/{dataset-slug}\\'')\r\n\r\n split = dataset.split('/')\r\n if not split[0] or not split[1]:\r\n raise ValueError('Invalid dataset specification ' + dataset)", "def testBadNames(self):\n bad_dataset = self.badstr\n bad_table = self.badstr * 2\n # Ignore access to protected members\n # pylint: disable=W0212\n\n self.assertRaises(DOLAPI._DOLAPIError,\n self.auth.table,\n bad_dataset,\n self.table)\n\n self.assertRaises(DOLAPI._DOLAPIError,\n self.auth.table,\n self.dataset,\n bad_table)\n\n self.assertRaises(DOLAPI._DOLAPIError,\n self.auth.table,\n bad_dataset,\n bad_table)", "def __init__(self,data_name):\n\t\tif data_name.lower().strip() not in DATASETS.keys():\n\t\t\tprint(f\"{data_name} isn't a valid data name! One of \"+\", \".join(DATASETS.keys()))\n\t\t\traise Exception\n\n\t\tself.data_name = data_name.lower().strip()", "def _check_name(self):\n\t\tpass", "def on_the_add_dataset_page_input_the_dataset_name_my_acl_dataset(driver, dataset_name):\n assert wait_on_element(driver, 5, '//h3[text()=\"Add Dataset\"]')\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Name\"]', 'inputable')\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').send_keys(dataset_name)\n assert wait_on_element(driver, 5, '//mat-select[@ix-auto=\"select__Share Type\"]')\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Share Type\"]').click()\n assert wait_on_element(driver, 5, '//mat-option[@ix-auto=\"option__Share Type_SMB\"]', 'clickable')\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Share Type_SMB\"]').click()", "def validate_name(name: str) -> None:\n\n # Disallow empty.\n if not name:\n raise CleanError('Feature set name cannot be empty.')\n\n # Require starting with a letter.\n if not name[0].isalpha():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - names must start with a letter.'\n )\n\n # Require only letters, numbers, and underscores.\n if not name.replace('_', '').isalnum():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only letters, numbers, and underscores are allowed.'\n )\n\n # Require all lowercase.\n if not name.islower():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only lowercase letters are allowed.'\n )\n\n # Disallow leading, trailing, or consecutive underscores.\n # (these will result in a '' in the split results which evals to False)\n if not all(name.split('_')):\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - leading, trailing, and consecutive underscores are'\n ' not allowed.'\n )", "def test_with_valid_input(self):\n for dataset_type in ['regular', 'raw', 'REGULAR', 'RAW']:\n try:\n check_dataset_type(dataset_type)\n except ValueError:\n self.fail(\"Dataset {0} should be valid\".format(dataset_type))", "def test_slice_name(self):\n self.insert()\n data = self.tbl['name']\n assert self.check(self.idata[:, [0, 1]], data)", "def test_table_name(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n MetadataTemplate._table_name(self.study)", "def is_valid(self, dataset):\n pass", "def _ensure_dataset_None(dataset_name):\n if dataset_name is not None:\n raise MutantError(\"Don't try to provide a dataset_name on a single mutant (rather than the multi-dataset subclass)!\")\n # MAYBE-TODO this could be accomplished with a decorator instead, right?", "def check_schema_name(name: str):\n if not is_valid_schema_name(name):\n raise ValidationError(\"Invalid string used for the schema name.\")", "def is_valid_attribute_name(self, name):\n try:\n self.validate_attribute_name(name)\n return True\n except etal.LabelsSchemaError:\n return False", "def check_input(naming):\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')", "def validate_attribute_name(self, name):\n if not self.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is not allowed by the schema\" % name\n )", "def test_invalid_columns():\n train = ((\"Lorem ipsum dolor sit amet\", 3),\n (\"Sed ut perspiciatis unde\", 5.5))\n with pytest.raises(ValueError):\n TabularDataset(train, named_columns=['some_random_col'])", "def _validate_data(df):\n if constants.IMAGE_URI_KEY not in df.columns:\n # or label_col not in df.columns:\n raise AttributeError(\n 'DataFrame must contain image_uri column {}.')\n if constants.LABEL_KEY not in df.columns:\n raise AttributeError(\n 'DataFrame must contain label column.')\n if constants.SPLIT_KEY not in df.columns:\n raise AttributeError(\n 'DataFrame must contain split column.')\n if list(df.columns) != constants.IMAGE_CSV_COLUMNS:\n raise AttributeError(\n 'DataFrame column order must be {}'.format(\n constants.IMAGE_CSV_COLUMNS))", "def test_nonreserved_name(self):\n try:\n field_name_validator('_identifier')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')", "def test_white_space(self):\n with self.assertRaises(ValidationError):\n db_name_validator('http log')", "def ensure_dataset_loaded(self, name):\n if name not in self.datasets:\n print(f'Loading dataset \"{name}\"')\n pd_data = pd.read_excel(self.datafiles[name])\n data = pd.DataFrame.to_dict(pd_data, 'records')\n self.datasets[name] = data", "def _check_dataset_name_return_data(self, dataset_name, strict=False):\n if strict:\n _check_dataset_presence(self, dataset_name)\n elif dataset_name is None:\n raise MutantError(\"Cannot use None as dataset name!\")\n return self.by_dataset[dataset_name]", "def test_missing_dataset_name():\n svl_string = \"\"\"\n DATASETS\n \"bigfoot.csv\"\n BAR bigfoot\n X classification\n Y classification COUNT\n \"\"\"\n\n with pytest.raises(SvlMissingValue):\n parse_svl(svl_string)", "def _assert_valid_name(name, container):\n container.file.name_validation(container.directory, name)", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def name(self, the_name):\n if (len(the_name) < TempDataset.MIN_LEN\n or len(the_name) > TempDataset.MAX_LEN):\n raise ValueError\n self._name = the_name" ]
[ "0.7201624", "0.696584", "0.6844129", "0.67277294", "0.6414105", "0.632463", "0.62681", "0.6216825", "0.6144954", "0.61314577", "0.6130932", "0.6114927", "0.60404384", "0.6018948", "0.600851", "0.5978239", "0.5962034", "0.58974594", "0.58849335", "0.58839834", "0.5843434", "0.5840201", "0.5784512", "0.57510614", "0.5735127", "0.5729574", "0.5729549", "0.572822", "0.572822", "0.57151896" ]
0.7651157
0
Copied get_redshift_connection_url func from tests/test_utils.py
def _get_redshift_connection_string() -> str: host = os.environ.get("REDSHIFT_HOST") # noqa: TID251 port = os.environ.get("REDSHIFT_PORT") # noqa: TID251 user = os.environ.get("REDSHIFT_USERNAME") # noqa: TID251 pswd = os.environ.get("REDSHIFT_PASSWORD") # noqa: TID251 db = os.environ.get("REDSHIFT_DATABASE") # noqa: TID251 ssl = os.environ.get("REDSHIFT_SSLMODE") # noqa: TID251 if not host: raise ValueError( "Environment Variable REDSHIFT_HOST is required to run integration tests against Redshift" ) if not port: raise ValueError( "Environment Variable REDSHIFT_PORT is required to run integration tests against Redshift" ) if not user: raise ValueError( "Environment Variable REDSHIFT_USERNAME is required to run integration tests against Redshift" ) if not pswd: raise ValueError( "Environment Variable REDSHIFT_PASSWORD is required to run integration tests against Redshift" ) if not db: raise ValueError( "Environment Variable REDSHIFT_DATABASE is required to run integration tests against Redshift" ) if not ssl: raise ValueError( "Environment Variable REDSHIFT_SSLMODE is required to run integration tests against Redshift" ) url = f"redshift+psycopg2://{user}:{pswd}@{host}:{port}/{db}?sslmode={ssl}" return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_redshift_connection(config: Dict, logger):\n conn_str = \"host={host} dbname={db} user={user} password={paswd} port={port}\".format(\n host=config['CLUSTER']['HOST'],\n db=config['CLUSTER']['DB_NAME'],\n user=config['CLUSTER']['DB_USER'],\n paswd=config['CLUSTER']['DB_PASSWORD'],\n port=config['CLUSTER']['DB_PORT']\n )\n \n conn = psycopg2.connect(conn_str)\n \n conn.autocommit = True\n \n logger.warn('Redshift connection has been successfully established.')\n \n return conn", "def get_connection_string(src_or_dest):\n if src_or_dest == \"src\":\n try:\n SQL_SERVER = os.environ[\"CROP_SRC_SQL_SERVER\"]\n SQL_PASSWORD = os.environ[\"CROP_SRC_SQL_PASS\"]\n except:\n print(\n \"Need to set environment variables CROP_SRC_SQL_SERVER, CROP_SRC_SQL_PASS\"\n )\n return None\n elif src_or_dest == \"dest\":\n try:\n SQL_SERVER = os.environ[\"CROP_DEST_SQL_SERVER\"]\n SQL_PASSWORD = os.environ[\"CROP_DEST_SQL_PASS\"]\n except:\n print(\n \"Need to set environment variables CROP_DEST_SQL_SERVER, CROP_DEST_SQL_PASS\"\n )\n return None\n else:\n print(\"Error: need to specify 'src' or 'dest'\")\n SQL_USERNAME = os.environ[\"CROP_SQL_USERNAME\"]\n SQL_USER = f\"{SQL_USERNAME}@{SQL_SERVER}\"\n SQL_HOST = f\"{SQL_SERVER}.postgres.database.azure.com\"\n SQL_CONNECTION_STRING = \"%s://%s:%s@%s:%s\" % (\n SQL_ENGINE,\n SQL_USER,\n parse.quote(SQL_PASSWORD),\n SQL_HOST,\n SQL_PORT,\n )\n return SQL_CONNECTION_STRING", "def command_check_redshift_connection():\n # trying to get the keys from dwh.cfg file\n try: \n config = configparser.ConfigParser()\n config.read('aws-dwh.cfg')\n db_connection_string = config['DWH']['DWH_DB_CONNECTION_STRING']\n except Exception as e:\n print(\"Encountered following exception while trying to retrieve DWH_DB_CONNECTION_STRING from dwh.cfg file\")\n print(f\"{e}\")\n sys.exit(1)\n\n # now calling STS service with the credentials retrieved for verification\n if sql_helper.check_redshift_connection(db_connection_string):\n print(f\"Redshift connection to {db_connection_string} successful\")\n else:\n print(f\"Redshift connection to {db_connection_string} failed\")", "def test_connection():\n import psycopg2\n\n dwh_config = configparser.ConfigParser()\n dwh_config.read_file(open('./aws.cfg'))\n\n try:\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*dwh_config['CLUSTER'].values()))\n _ = conn.cursor()\n print('Connected to AWS Redshift cluster')\n conn.close()\n except Exception as e:\n print('Error connecting to AWS Redshift cluster:', e)", "def test_get_base_url():\n eq_(get_base_url(\"http://foo.com/bar/baz\"), \"http://foo.com\")\n eq_(get_base_url(\"https://foo.com:443/foo/bar\"), \"https://foo.com:443\")", "def test_url():\n return TEST_DATABASE_URL", "def test_get_conn_uri_non_existent_key(self):\n conn_id = \"test_mysql\"\n param = {\n 'Name': '/airflow/connections/test_postgres',\n 'Type': 'String',\n 'Value': 'postgresql://airflow:airflow@host:5432/airflow',\n }\n\n ssm_backend = SystemsManagerParameterStoreBackend()\n ssm_backend.client.put_parameter(**param)\n\n assert ssm_backend.get_conn_uri(conn_id=conn_id) is None\n assert [] == ssm_backend.get_connections(conn_id=conn_id)", "def GetURL(self, rel_url):\n return 'http://localhost:%d/%s' % (self.port, rel_url)", "def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)", "def get_hostname(config):\n KEY = os.environ.get(\"DWH_AWS_KEY\")\n SECRET = os.environ.get(\"DWH_AWS_SECRET\")\n redshift = boto3.client('redshift', region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET)\n CLUSTER_IDENTIFIER = config.get(\"CLUSTER\", \"CLUSTER_IDENTIFIER\")\n cluster_props = redshift.describe_clusters(\n ClusterIdentifier=CLUSTER_IDENTIFIER)['Clusters'][0]\n endpoint = cluster_props[\"Endpoint\"][\"Address\"]\n return endpoint", "def test_get_url_base_returns_url_base(self):\n # Arrange / Act\n return_value = BlobDownloader(\n f\"{settings.SERVER_URI}/987653456789\"\n ).get_url_base()\n # Assert\n self.assertEqual(return_value, SERVER_URI)", "def _get_url(context, actual, attribute_name, port):\n return actual or _get_api_url(context, attribute_name, port)", "def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()", "def test_get_host(self):\n pass", "def response_kafka_connection_url(self) -> str:\n return self._response_kafka_connection_url", "def _get_athena_connection_string(db_name_env_var: str = \"ATHENA_DB_NAME\") -> str:\n ATHENA_DB_NAME: Optional[str] = os.getenv(db_name_env_var)\n ATHENA_STAGING_S3: Optional[str] = os.getenv(\"ATHENA_STAGING_S3\")\n\n if not ATHENA_DB_NAME:\n raise ValueError(\n f\"Environment Variable {db_name_env_var} is required to run integration tests against AWS Athena\"\n )\n\n if not ATHENA_STAGING_S3:\n raise ValueError(\n \"Environment Variable ATHENA_STAGING_S3 is required to run integration tests against AWS Athena\"\n )\n\n url = f\"awsathena+rest://@athena.us-east-1.amazonaws.com/{ATHENA_DB_NAME}?s3_staging_dir={ATHENA_STAGING_S3}\"\n\n return url", "def get_db_connection_url():\n return os.environ[\"DATABASE_URL\"]", "def _get_connect_string(backend,\n user=\"openstack_citest\",\n passwd=\"openstack_citest\",\n database=\"openstack_citest\"):\n if backend == \"mysql\":\n backend = \"mysql+mysqldb\"\n elif backend == \"postgres\":\n backend = \"postgresql+psycopg2\"\n\n return (\"%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s\"\n % {'backend': backend, 'user': user, 'passwd': passwd,\n 'database': database})", "def test_get_connection(self: Any, mock_method: Any) -> None:\n extractor = SQLAlchemyExtractor()\n config_dict: Dict[str, Any] = {\n 'extractor.sqlalchemy.conn_string': 'TEST_CONNECTION',\n 'extractor.sqlalchemy.extract_sql': 'SELECT 1 FROM TEST_TABLE;'\n }\n conf = ConfigFactory.from_dict(config_dict)\n extractor.init(Scoped.get_scoped_conf(conf=conf,\n scope=extractor.get_scope()))\n extractor._get_connection()\n mock_method.assert_called_with('TEST_CONNECTION', connect_args={})\n\n extractor = SQLAlchemyExtractor()\n config_dict = {\n 'extractor.sqlalchemy.conn_string': 'TEST_CONNECTION',\n 'extractor.sqlalchemy.extract_sql': 'SELECT 1 FROM TEST_TABLE;',\n 'extractor.sqlalchemy.connect_args': {\"protocol\": \"https\"},\n }\n conf = ConfigFactory.from_dict(config_dict)\n extractor.init(Scoped.get_scoped_conf(conf=conf,\n scope=extractor.get_scope()))\n extractor._get_connection()\n mock_method.assert_called_with('TEST_CONNECTION', connect_args={\"protocol\": \"https\"})", "def get_connection(url):\n conn = psycopg2.connect(url)\n return conn", "def get_db_url_mysql(config):\n if 'DB_URL_TESTING' in config:\n return config['DB_URL_TESTING']\n\n return 'mysql+mysqlconnector://{}:{}@{}/{}' \\\n .format(config['DB_USER'],\n config['DB_PASS'],\n config['DB_HOST'],\n config['DB_NAME'])", "def test_baseurl(matrix):\n matrix.charm_config[\"enable-tls\"] = False\n result = matrix.get_public_baseurl()\n assert result == \"http://mock.fqdn:8008\"\n matrix.charm_config[\"enable-tls\"] = False\n matrix.external_port = 80\n result = matrix.get_public_baseurl()\n assert result == \"http://mock.fqdn\"\n matrix.charm_config[\"enable-tls\"] = True\n result = matrix.get_public_baseurl()\n assert result == \"https://mock.fqdn\"", "def get_connection(config, conn_cls=None):\n\n if conn_cls is None:\n conn_cls = Connection\n \n registry = config.registry\n\n uri = registry.settings.get(URI)\n greenlets = registry.settings.get(GREENLETS)\n\n if uri is None:\n raise ConfigurationError('There is no configured \"mongo.uri\"')\n\n # Spliting configs to get more than one uri\n if not isinstance(uri, list):\n uri = uri.splitlines()\n\n kargs = {\n 'use_greenlets': asbool(greenlets)\n }\n\n return conn_cls(uri, **kargs)", "def get_env_prefix(instrument):\n return \"crds://\"", "def _get_connection(rse, endpoint):\n\n key = \"connection:%s_%s\" % (rse, endpoint)\n result = REGION.get(key)\n if type(result) is NoValue:\n try:\n logging.debug(\"Creating connection object\")\n result = None\n credentials = _get_credentials(rse, endpoint)\n if 'access_key' in credentials and credentials['access_key'] and \\\n 'secret_key' in credentials and credentials['secret_key'] and \\\n 'is_secure' in credentials and credentials['is_secure'] is not None:\n\n parsed = urlparse.urlparse(endpoint)\n hostname = parsed.netloc.partition(':')[0]\n port = parsed.netloc.partition(':')[2]\n\n result = boto.connect_s3(aws_access_key_id=credentials['access_key'],\n aws_secret_access_key=credentials['secret_key'],\n host=hostname,\n port=int(port),\n is_secure=credentials['is_secure'],\n calling_format=boto.s3.connection.OrdinaryCallingFormat())\n\n REGION.set(key, result)\n logging.debug(\"Created connection object\")\n else:\n raise exception.CannotAuthenticate(\"Either access_key, secret_key or is_secure is not defined for RSE %s endpoint %s\" % (rse, endpoint))\n except exception.RucioException as e:\n raise e\n except:\n raise exception.RucioException(\"Failed to get connection for RSE(%s) endpoint(%s), error: %s\" % (rse, endpoint, traceback.format_exc()))\n return result", "def _get_snowflake_connection_string() -> str:\n sfUser = os.environ.get(\"SNOWFLAKE_USER\") # noqa: TID251\n sfPswd = os.environ.get(\"SNOWFLAKE_PW\") # noqa: TID251\n sfAccount = os.environ.get(\"SNOWFLAKE_ACCOUNT\") # noqa: TID251\n sfDatabase = os.environ.get(\"SNOWFLAKE_DATABASE\") # noqa: TID251\n sfSchema = os.environ.get(\"SNOWFLAKE_SCHEMA\") # noqa: TID251\n sfWarehouse = os.environ.get(\"SNOWFLAKE_WAREHOUSE\") # noqa: TID251\n sfRole = os.environ.get(\"SNOWFLAKE_ROLE\") or \"PUBLIC\" # noqa: TID251\n\n url = f\"snowflake://{sfUser}:{sfPswd}@{sfAccount}/{sfDatabase}/{sfSchema}?warehouse={sfWarehouse}&role={sfRole}\"\n\n return url", "def _getRemoteUrlTheOldWay(self):\n utool = getUtility(IURLTool)\n if self.remote_url:\n return utool() + '/' + self.remote_url\n else:\n return utool()", "def get_service_url():\n return get_config_handler().get_service_url()", "def get_url(self):\n return radon.cfg.protocol_cassandra + self.uuid", "def get_service_connection_string(service):\n service = service.upper()\n raw_host_port = os.environ['%s_PORT' % service]\n # Remove leading tcp:// or similar\n host_port = raw_host_port.split(\"://\")[1]\n return host_port" ]
[ "0.6826129", "0.6124717", "0.6010275", "0.5927962", "0.5850685", "0.5733345", "0.5726986", "0.5710287", "0.56841594", "0.56101626", "0.5597106", "0.55189204", "0.55136573", "0.5498949", "0.54822123", "0.5470598", "0.5457574", "0.5453781", "0.5428834", "0.54282737", "0.54261154", "0.53848", "0.53729427", "0.5363071", "0.53500235", "0.5320815", "0.52734905", "0.5245884", "0.5244926", "0.5244408" ]
0.7788936
0
Copied get_awsathena_connection_url and get_awsathena_db_name funcs from tests/test_utils.py
def _get_athena_connection_string(db_name_env_var: str = "ATHENA_DB_NAME") -> str: ATHENA_DB_NAME: Optional[str] = os.getenv(db_name_env_var) ATHENA_STAGING_S3: Optional[str] = os.getenv("ATHENA_STAGING_S3") if not ATHENA_DB_NAME: raise ValueError( f"Environment Variable {db_name_env_var} is required to run integration tests against AWS Athena" ) if not ATHENA_STAGING_S3: raise ValueError( "Environment Variable ATHENA_STAGING_S3 is required to run integration tests against AWS Athena" ) url = f"awsathena+rest://@athena.us-east-1.amazonaws.com/{ATHENA_DB_NAME}?s3_staging_dir={ATHENA_STAGING_S3}" return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get():\n assert sdb.get(\"sdb://salt/foo\") == \"sdb://salt/foo\"", "def test_url():\n return TEST_DATABASE_URL", "def test_db_connection(env_setup, env_table):\n test_string = DbManager(SqLiteHelper, {\"db_path\": env_setup, \"master_table\": env_table})\\\n .test_connection()\n assert test_string is not None", "def test_run_athena_query(self):\n self.client.athena_client = MockAthenaClient()\n\n query_success, query_results = self.client.run_athena_query(\n query='SHOW DATABASES;'\n )\n\n assert_true(query_success)\n assert_equal(query_results['ResultSet']['Rows'], [{'Data': [{'test':'test'}]}])", "def test_connection_prefix_none_value(self, mock_get_secret):\n kwargs = {'connections_prefix': None}\n\n ssm_backend = SystemsManagerParameterStoreBackend(**kwargs)\n\n assert ssm_backend.get_conn_uri(\"test_mysql\") is None\n mock_get_secret.assert_not_called()", "def getDBSApi():\n if 'testbed' in dbs3_url:\n dbs3_url_reader = dbs3_url + '/dbs/int/global/DBSReader'\n else:\n dbs3_url_reader = dbs3_url + '/dbs/prod/global/DBSReader'\n\n from dbs.apis.dbsClient import DbsApi\n\n\n #this needs to come after /data/srv/wmagent/current/apps/wmagent/etc/profile.d/init.sh is sourced \n dbsApi = DbsApi(url = dbs3_url_reader)\n return dbsApi", "def test_db_connection():\n\n from cwf2neo.neo4j import Neo4j\n\n db = Neo4j()\n\n assert db.graph.database.name", "def setUp(self):\n self.a = backend.dbconnection.DBConnect()", "def test_analytics_synonyms(self):\n class Query:\n \"\"\" A class to execute analytics queries \"\"\"\n\n def __init__(self, server, username, password):\n self.restconn = RestConnection(server)\n\n def execute(self, query):\n return self.restconn.execute_statement_on_cbas(query, None)\n\n def get_synonyms(self):\n synonyms = set()\n\n for result in json.loads(self.execute(\"select * from Metadata.`Synonym`\"))['results']:\n synonym = result['Synonym']\n synonym_name = synonym['SynonymName']\n synonym_target = synonym['ObjectDataverseName'] + '.' + synonym['ObjectName']\n synonym_dataverse = synonym['DataverseName']\n synonyms.add((synonym_name, synonym_target, synonym_dataverse))\n\n return synonyms\n\n def get_synonyms_count(self):\n return json.loads(self.execute(\"select count(*) as count from Metadata.`Synonym`;\"))['results'][0]['count']\n\n class Dataset:\n\n def __init__(self, name, bucket, clause=None):\n self.name, self.bucket, self.clause = name, bucket, clause\n\n def get_where_clause(self):\n return f\" WHERE {self.clause}\" if self.clause else \"\"\n\n class Synonym:\n\n def __init__(self, name, target):\n self.name, self.target = name, target\n\n class Dataverse:\n\n def __init__(self, name):\n self.name = name\n self.datasets = set()\n self.synonyms = set()\n\n def add_dataset(self, dataset):\n self.datasets.add(dataset)\n\n def add_synonym(self, synonym):\n self.synonyms.add(synonym)\n\n def next_dataset_name(self):\n return f\"dat_{len(self.datasets)}\"\n\n def next_synonym_name(self):\n return f\"syn_{len(self.synonyms)}\"\n\n class Analytics:\n\n def __init__(self, query):\n self.query, self.dataverses = query, set()\n\n def add_dataverse(self, dataverse):\n self.dataverses.add(dataverse)\n\n def next_dataverse_name(self):\n return f\"dtv_{len(self.dataverses)}\"\n\n def pick_target_for_synonym(self):\n choices = [f\"{dataverse.name}.{dataset.name}\" for dataverse in self.dataverses for dataset in dataverse.datasets]\n\n if choices:\n return choice(choices)\n\n return None\n\n def create(self):\n # Create daterverses and datasets\n for dataverse in self.dataverses:\n self.query.execute(f\"CREATE dataverse {dataverse.name}\")\n\n for dataset in dataverse.datasets:\n self.query.execute(f\"CREATE DATASET {dataverse.name}.{dataset.name} ON {dataset.bucket}{dataset.get_where_clause()}\")\n\n # Create synonyms\n for dataverse in self.dataverses:\n for synonym in dataverse.synonyms:\n self.query.execute(f\"CREATE analytics synonym {dataverse.name}.{synonym.name} FOR {synonym.target}\")\n\n def delete(self):\n for dataverse in self.dataverses:\n for dataset in dataverse.datasets:\n self.query.execute(f\"DROP DATASET {dataverse.name}.{dataset.name}\")\n\n for synonym in dataverse.synonyms:\n self.query.execute(f\"DROP analytics synonym {dataverse.name}.{synonym.name}\")\n\n self.query.execute(f\"DROP dataverse {dataverse.name}\")\n\n class AnalyticsTest:\n\n def __init__(self, backup, no_of_dataverses, no_of_datasets, no_of_synonyms, analytics_server):\n # The base class\n self.backup = backup\n\n # Test parameters\n self.no_of_dataverses, self.no_of_datasets, self.no_of_synonyms = no_of_dataverses, no_of_datasets, no_of_synonyms\n\n # The number of synonyms that get created\n self.no_of_synonyms_created = no_of_dataverses * no_of_synonyms\n\n # The object thats used to run queries on the server running analytics\n self.query = Query(analytics_server, analytics_server.rest_username, analytics_server.rest_password)\n\n # The object that represents our current model of analytics\n self.analytics = Analytics(self.query)\n\n def test_analytics(self):\n # Define the analytics model (i.e. which dataverses, datasets and synonyms are present)\n for i in range(self.no_of_dataverses):\n dataverse = Dataverse(self.analytics.next_dataverse_name())\n self.analytics.add_dataverse(dataverse)\n\n for j in range(self.no_of_datasets):\n dataset = Dataset(dataverse.next_dataset_name(), 'default')\n dataverse.add_dataset(dataset)\n\n for j in range(self.no_of_synonyms):\n synonym = Synonym(dataverse.next_synonym_name(), self.analytics.pick_target_for_synonym())\n dataverse.add_synonym(synonym)\n\n # Create dataverses, datasets and synonyms\n self.analytics.create()\n self.backup.assertEqual(self.query.get_synonyms_count(), self.no_of_synonyms_created)\n\n # Create a repository\n self.backup.backup_create()\n\n # Take a backup\n self.backup.backup_cluster()\n\n # Delete all analytics related stuff\n self.analytics.delete()\n self.backup.assertEqual(self.query.get_synonyms_count(), 0)\n\n # Perform a one off restore\n self.backup.backup_restore()\n synonyms = self.query.get_synonyms()\n\n # Check synonyms have been restored\n for dataverse in self.analytics.dataverses:\n for synonym in dataverse.synonyms:\n self.backup.assertIn((synonym.name, synonym.target, dataverse.name), synonyms)\n\n # The server that will be reprovisioned with analytics\n analytics_server = self.restore_cluster_host = self.servers[2]\n\n # Add a server and provision it with analytics\n self.add_server_with_custom_services(analytics_server, services=[\"cbas\"])\n\n # A little sleep for services to warmup\n self.assertTrue(RestConnection(analytics_server).wait_until_cbas_is_ready(100))\n\n # Run the analytics test\n AnalyticsTest(self, self.input.param(\"dataverses\", 5), self.input.param(\"datasets\", 5), self.input.param(\"synonyms\", 5), analytics_server).test_analytics()", "def _wh_connect():\n utils = Utility()\n config = utils.CONFIG\n conn = utils.get_plugin(config.PATH_CONNECTION_MANAGERS)\n conn.connect(config.URL_TEST_DB)\n return conn", "def _setup_aws_clients(self) -> None:", "def test_check_database_exists(self):\n query_result = [{'streamalert': True}]\n self.client.athena_client = MockAthenaClient(results=query_result)\n\n assert_true(self.client.check_database_exists())", "def _connect(self):\r\n if not self._db:\r\n import boto\r\n sdb = boto.connect_sdb()\r\n if not self.domain_name:\r\n self.domain_name = boto.config.get(\"DB\", \"sequence_db\", boto.config.get(\"DB\", \"db_name\", \"default\"))\r\n try:\r\n self._db = sdb.get_domain(self.domain_name)\r\n except SDBResponseError, e:\r\n if e.status == 400:\r\n self._db = sdb.create_domain(self.domain_name)\r\n else:\r\n raise\r\n return self._db", "def build_db_uri() -> str:\n\n return \"{DB_DRIVER}://{DB_USERNAME}:{DB_PASSWD}@{DB_HOST}:{DB_PORT}/{DB_NAME}\".format(**{\n 'DB_DRIVER': os.environ.get('DB_DRIVER', ''),\n 'DB_HOST': os.environ.get('DB_HOST', ''),\n 'DB_PORT': os.environ.get('DB_PORT', ''),\n 'DB_NAME': os.environ.get('DB_NAME', ''),\n 'DB_USERNAME': os.environ.get('DB_USERNAME', ''),\n 'DB_PASSWD': os.environ.get('DB_PASSWD', '')\n })", "def test_connection():\n import psycopg2\n\n dwh_config = configparser.ConfigParser()\n dwh_config.read_file(open('./aws.cfg'))\n\n try:\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*dwh_config['CLUSTER'].values()))\n _ = conn.cursor()\n print('Connected to AWS Redshift cluster')\n conn.close()\n except Exception as e:\n print('Error connecting to AWS Redshift cluster:', e)", "def test_get_conn_uri_non_existent_key(self):\n conn_id = \"test_mysql\"\n param = {\n 'Name': '/airflow/connections/test_postgres',\n 'Type': 'String',\n 'Value': 'postgresql://airflow:airflow@host:5432/airflow',\n }\n\n ssm_backend = SystemsManagerParameterStoreBackend()\n ssm_backend.client.put_parameter(**param)\n\n assert ssm_backend.get_conn_uri(conn_id=conn_id) is None\n assert [] == ssm_backend.get_connections(conn_id=conn_id)", "def test_connection_duplication():", "def get_db_connection_url():\n return os.environ[\"DATABASE_URL\"]", "def _get_connect_string(backend,\n user=\"openstack_citest\",\n passwd=\"openstack_citest\",\n database=\"openstack_citest\"):\n if backend == \"mysql\":\n backend = \"mysql+mysqldb\"\n elif backend == \"postgres\":\n backend = \"postgresql+psycopg2\"\n\n return (\"%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s\"\n % {'backend': backend, 'user': user, 'passwd': passwd,\n 'database': database})", "def test_get_host(self):\n pass", "def test_check_database_exists_invalid(self):\n query_result = None\n self.client.athena_client = MockAthenaClient(results=query_result)\n\n assert_false(self.client.check_database_exists())", "def test_urls(self):\n base_test_url = 'http://{}:{}/'.format(TESTING_CONFIG['host'],\n TESTING_CONFIG['port'])\n self.conn._host_url == base_test_url\n self.conn.aheader_url == base_test_url + 'analysis_header'\n self.conn.atail_url == base_test_url + 'analysis_tail'\n self.conn.dref_url == base_test_url + 'data_reference'\n self.conn.dref_header_url == base_test_url + 'data_reference_header'", "def get_db_parameters(connection_name: str = \"default\") -> dict[str, Any]:\n os.environ[\"TZ\"] = \"UTC\"\n if not IS_WINDOWS:\n time.tzset()\n\n connections = {\n \"default\": CONNECTION_PARAMETERS,\n \"client_failover\": CLIENT_FAILOVER_PARAMETERS,\n \"admin\": CONNECTION_PARAMETERS_ADMIN,\n }\n\n chosen_connection = connections[connection_name]\n if \"account\" not in chosen_connection:\n pytest.skip(f\"{connection_name} connection is unavailable in parameters.py\")\n\n # testaccount connection info\n ret = {**DEFAULT_PARAMETERS, **chosen_connection}\n\n # snowflake admin account. Not available in GH actions\n for k, v in CONNECTION_PARAMETERS_ADMIN.items():\n ret[\"sf_\" + k] = v\n\n if \"host\" in ret and ret[\"host\"] == DEFAULT_PARAMETERS[\"host\"]:\n ret[\"host\"] = ret[\"account\"] + \".snowflakecomputing.com\"\n\n if \"account\" in ret and ret[\"account\"] == DEFAULT_PARAMETERS[\"account\"]:\n print_help()\n sys.exit(2)\n\n # a unique table name\n ret[\"name\"] = \"python_tests_\" + str(uuid.uuid4()).replace(\"-\", \"_\")\n ret[\"name_wh\"] = ret[\"name\"] + \"wh\"\n\n ret[\"schema\"] = TEST_SCHEMA\n\n # This reduces a chance to exposing password in test output.\n ret[\"a00\"] = \"dummy parameter\"\n ret[\"a01\"] = \"dummy parameter\"\n ret[\"a02\"] = \"dummy parameter\"\n ret[\"a03\"] = \"dummy parameter\"\n ret[\"a04\"] = \"dummy parameter\"\n ret[\"a05\"] = \"dummy parameter\"\n ret[\"a06\"] = \"dummy parameter\"\n ret[\"a07\"] = \"dummy parameter\"\n ret[\"a08\"] = \"dummy parameter\"\n ret[\"a09\"] = \"dummy parameter\"\n ret[\"a10\"] = \"dummy parameter\"\n ret[\"a11\"] = \"dummy parameter\"\n ret[\"a12\"] = \"dummy parameter\"\n ret[\"a13\"] = \"dummy parameter\"\n ret[\"a14\"] = \"dummy parameter\"\n ret[\"a15\"] = \"dummy parameter\"\n ret[\"a16\"] = \"dummy parameter\"\n return ret", "def logic_db_connection():\n try:\n boto_session = boto3.Session(profile_name='loidsig')\n except:\n boto_session = boto3.Session()\n sm_client = boto_session.client(\n service_name='secretsmanager',\n region_name='us-east-1',\n endpoint_url='https://secretsmanager.us-east-1.amazonaws.com'\n )\n get_secret_value_response = sm_client.get_secret_value(SecretId='Loidsig_DB')\n cred_dict = ast.literal_eval(get_secret_value_response['SecretString'])\n db_user, db_pass = cred_dict['username'], cred_dict['password']\n db_host, db_port, db_name = cred_dict['host'], cred_dict['port'], cred_dict['dbname']\n\n try:\n conn = psycopg2.connect(\n host=db_host,\n port=db_port,\n user=db_user,\n password=db_pass,\n database=db_name,\n )\n except Exception as e:\n print(\"Unable to connect to postgres! Error: {}\".format(e))\n raise\n return conn", "def get_test_db():\n defaults = get_defaults()\n test_defaults = {k: v for k, v in defaults.items() if 'test' in k}\n key_list = list(test_defaults.keys())\n key_list.sort()\n db = None\n for k in key_list:\n test_name = test_defaults[k]\n m = re.match('(\\w+)://.*?/([\\w.]+)', test_name)\n if m is None:\n logger.warning(\"Poorly formed db name: %s\" % test_name)\n continue\n sqltype = m.groups()[0]\n try:\n db = DatabaseManager(test_name, sqltype=sqltype, label=k)\n db.grab_session()\n except Exception as e:\n logger.error(\"%s didn't work\" % test_name)\n logger.exception(e)\n continue # Clearly this test database won't work.\n logger.info(\"Using test database %s.\" % k)\n break\n if db is None:\n logger.error(\"Could not find any test database names.\")\n return db", "def test_get_url_base_returns_url_base(self):\n # Arrange / Act\n return_value = BlobDownloader(\n f\"{settings.SERVER_URI}/987653456789\"\n ).get_url_base()\n # Assert\n self.assertEqual(return_value, SERVER_URI)", "def _get_db_connection(name='ace'):\n\n if name is None:\n name = 'ace'\n\n #if _cached_db_connections_enabled():\n #return _get_cached_db_connection(name)\n\n config_section = 'ace'\n if name:\n config_section = 'database_{}'.format(name)\n\n if config_section not in saq.CONFIG:\n raise ValueError(\"invalid database {}\".format(name))\n\n _section = saq.CONFIG[config_section]\n kwargs = {\n 'db': _section['database'],\n 'user': _section['username'],\n 'passwd': _section['password'],\n 'charset': 'utf8'\n }\n\n if 'hostname' in _section:\n kwargs['host'] = _section['hostname']\n\n if 'port' in _section:\n kwargs['port'] = _section.getint('port')\n \n if 'unix_socket' in _section:\n kwargs['unix_socket'] = _section['unix_socket']\n\n if 'ssl_ca' in _section or 'ssl_key' in _section or 'ssl_cert' in _section:\n kwargs['ssl'] = {}\n\n if 'ssl_ca' in _section and _section['ssl_ca']:\n path = abs_path(_section['ssl_ca'])\n if not os.path.exists(path):\n logging.error(\"ssl_ca file {} does not exist (specified in {})\".format(path, config_section))\n else:\n kwargs['ssl']['ca'] = path\n\n if 'ssl_key' in _section and _section['ssl_key']:\n path = abs_path(_section['ssl_key'])\n if not os.path.exists(path):\n logging.error(\"ssl_key file {} does not exist (specified in {})\".format(path, config_section))\n else:\n kwargs['ssl']['key'] = path\n\n if 'ssl_cert' in _section and _section['ssl_cert']:\n path = _section['ssl_cert']\n if not os.path.exists(path):\n logging.error(\"ssl_cert file {} does not exist (specified in {})\".format(path, config_section))\n else:\n kwargs['ssl']['cert'] = path\n\n logging.debug(\"opening database connection {}\".format(name))\n return pymysql.connect(**kwargs)\n #return pymysql.connect(host=_section['hostname'] if 'hostname' in _section else None,\n #port=3306 if 'port' not in _section else _section.getint('port'),\n #unix_socket=_section['unix_socket'] if 'unix_socket' in _section else None,\n #db=_section['database'],\n #user=_section['username'],\n #passwd=_section['password'],\n #charset='utf8')", "def test_get_client(self):\n ec2 = get_client()\n self.assertEqual(ec2._endpoint.host, 'https://ec2.ap-northeast-1.amazonaws.com')", "def test_get_database(self):\r\n database = self.profile.get_database('testing.db')\r\n self.assertIsInstance(database, QtDBConnector)", "def test_verify_active_directory_works_after_failover_with_new_system_dataset(driver):\n pass" ]
[ "0.6231901", "0.612633", "0.5995813", "0.5969979", "0.5810007", "0.5721061", "0.56909764", "0.56657547", "0.5641794", "0.5621795", "0.5588193", "0.5584233", "0.551454", "0.55139333", "0.5426676", "0.5410052", "0.5409274", "0.54009277", "0.5392772", "0.5385202", "0.5337856", "0.5327973", "0.5320421", "0.5314456", "0.5296082", "0.5275799", "0.5262373", "0.5257943", "0.5248211", "0.5246062" ]
0.6868718
0
Copied get_snowflake_connection_url func from tests/test_utils.py
def _get_snowflake_connection_string() -> str: sfUser = os.environ.get("SNOWFLAKE_USER") # noqa: TID251 sfPswd = os.environ.get("SNOWFLAKE_PW") # noqa: TID251 sfAccount = os.environ.get("SNOWFLAKE_ACCOUNT") # noqa: TID251 sfDatabase = os.environ.get("SNOWFLAKE_DATABASE") # noqa: TID251 sfSchema = os.environ.get("SNOWFLAKE_SCHEMA") # noqa: TID251 sfWarehouse = os.environ.get("SNOWFLAKE_WAREHOUSE") # noqa: TID251 sfRole = os.environ.get("SNOWFLAKE_ROLE") or "PUBLIC" # noqa: TID251 url = f"snowflake://{sfUser}:{sfPswd}@{sfAccount}/{sfDatabase}/{sfSchema}?warehouse={sfWarehouse}&role={sfRole}" return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_url():\n return TEST_DATABASE_URL", "def get_db_connection_url():\n return os.environ[\"DATABASE_URL\"]", "def get_connection_string(src_or_dest):\n if src_or_dest == \"src\":\n try:\n SQL_SERVER = os.environ[\"CROP_SRC_SQL_SERVER\"]\n SQL_PASSWORD = os.environ[\"CROP_SRC_SQL_PASS\"]\n except:\n print(\n \"Need to set environment variables CROP_SRC_SQL_SERVER, CROP_SRC_SQL_PASS\"\n )\n return None\n elif src_or_dest == \"dest\":\n try:\n SQL_SERVER = os.environ[\"CROP_DEST_SQL_SERVER\"]\n SQL_PASSWORD = os.environ[\"CROP_DEST_SQL_PASS\"]\n except:\n print(\n \"Need to set environment variables CROP_DEST_SQL_SERVER, CROP_DEST_SQL_PASS\"\n )\n return None\n else:\n print(\"Error: need to specify 'src' or 'dest'\")\n SQL_USERNAME = os.environ[\"CROP_SQL_USERNAME\"]\n SQL_USER = f\"{SQL_USERNAME}@{SQL_SERVER}\"\n SQL_HOST = f\"{SQL_SERVER}.postgres.database.azure.com\"\n SQL_CONNECTION_STRING = \"%s://%s:%s@%s:%s\" % (\n SQL_ENGINE,\n SQL_USER,\n parse.quote(SQL_PASSWORD),\n SQL_HOST,\n SQL_PORT,\n )\n return SQL_CONNECTION_STRING", "def _get_redshift_connection_string() -> str:\n host = os.environ.get(\"REDSHIFT_HOST\") # noqa: TID251\n port = os.environ.get(\"REDSHIFT_PORT\") # noqa: TID251\n user = os.environ.get(\"REDSHIFT_USERNAME\") # noqa: TID251\n pswd = os.environ.get(\"REDSHIFT_PASSWORD\") # noqa: TID251\n db = os.environ.get(\"REDSHIFT_DATABASE\") # noqa: TID251\n ssl = os.environ.get(\"REDSHIFT_SSLMODE\") # noqa: TID251\n\n if not host:\n raise ValueError(\n \"Environment Variable REDSHIFT_HOST is required to run integration tests against Redshift\"\n )\n if not port:\n raise ValueError(\n \"Environment Variable REDSHIFT_PORT is required to run integration tests against Redshift\"\n )\n if not user:\n raise ValueError(\n \"Environment Variable REDSHIFT_USERNAME is required to run integration tests against Redshift\"\n )\n if not pswd:\n raise ValueError(\n \"Environment Variable REDSHIFT_PASSWORD is required to run integration tests against Redshift\"\n )\n if not db:\n raise ValueError(\n \"Environment Variable REDSHIFT_DATABASE is required to run integration tests against Redshift\"\n )\n if not ssl:\n raise ValueError(\n \"Environment Variable REDSHIFT_SSLMODE is required to run integration tests against Redshift\"\n )\n\n url = f\"redshift+psycopg2://{user}:{pswd}@{host}:{port}/{db}?sslmode={ssl}\"\n\n return url", "def test_get_connection(self: Any, mock_method: Any) -> None:\n extractor = SQLAlchemyExtractor()\n config_dict: Dict[str, Any] = {\n 'extractor.sqlalchemy.conn_string': 'TEST_CONNECTION',\n 'extractor.sqlalchemy.extract_sql': 'SELECT 1 FROM TEST_TABLE;'\n }\n conf = ConfigFactory.from_dict(config_dict)\n extractor.init(Scoped.get_scoped_conf(conf=conf,\n scope=extractor.get_scope()))\n extractor._get_connection()\n mock_method.assert_called_with('TEST_CONNECTION', connect_args={})\n\n extractor = SQLAlchemyExtractor()\n config_dict = {\n 'extractor.sqlalchemy.conn_string': 'TEST_CONNECTION',\n 'extractor.sqlalchemy.extract_sql': 'SELECT 1 FROM TEST_TABLE;',\n 'extractor.sqlalchemy.connect_args': {\"protocol\": \"https\"},\n }\n conf = ConfigFactory.from_dict(config_dict)\n extractor.init(Scoped.get_scoped_conf(conf=conf,\n scope=extractor.get_scope()))\n extractor._get_connection()\n mock_method.assert_called_with('TEST_CONNECTION', connect_args={\"protocol\": \"https\"})", "def test_get_conn_uri_non_existent_key(self):\n conn_id = \"test_mysql\"\n param = {\n 'Name': '/airflow/connections/test_postgres',\n 'Type': 'String',\n 'Value': 'postgresql://airflow:airflow@host:5432/airflow',\n }\n\n ssm_backend = SystemsManagerParameterStoreBackend()\n ssm_backend.client.put_parameter(**param)\n\n assert ssm_backend.get_conn_uri(conn_id=conn_id) is None\n assert [] == ssm_backend.get_connections(conn_id=conn_id)", "def test_baseurl(matrix):\n matrix.charm_config[\"enable-tls\"] = False\n result = matrix.get_public_baseurl()\n assert result == \"http://mock.fqdn:8008\"\n matrix.charm_config[\"enable-tls\"] = False\n matrix.external_port = 80\n result = matrix.get_public_baseurl()\n assert result == \"http://mock.fqdn\"\n matrix.charm_config[\"enable-tls\"] = True\n result = matrix.get_public_baseurl()\n assert result == \"https://mock.fqdn\"", "def test_get_host(self):\n pass", "def get_service_url():\n return get_config_handler().get_service_url()", "def get_connection(url):\n conn = psycopg2.connect(url)\n return conn", "def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()", "def get_usas_jdbc_url():\n if not CONFIG.DATABASE_URL:\n raise ValueError(\"DATABASE_URL config val must provided\")\n\n return get_jdbc_url_from_pg_uri(CONFIG.DATABASE_URL)", "def _get_connect_string(backend,\n user=\"openstack_citest\",\n passwd=\"openstack_citest\",\n database=\"openstack_citest\"):\n if backend == \"mysql\":\n backend = \"mysql+mysqldb\"\n elif backend == \"postgres\":\n backend = \"postgresql+psycopg2\"\n\n return (\"%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s\"\n % {'backend': backend, 'user': user, 'passwd': passwd,\n 'database': database})", "def initialized_db_url(pristine_db_engine):\n utils.initialize_database(pristine_db_engine)\n db_url = str(pristine_db_engine.url)\n return db_url", "def response_kafka_connection_url(self) -> str:\n return self._response_kafka_connection_url", "def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)", "def get_server_url():\n try:\n url = os.environ['API_HOST']\n # print('[ OK ] Server url loaded: ', url)\n except KeyError:\n url = 'http://localhost:3300/'\n print('[ WARNING ] API_HOST environment variable was not found. default server url was set at: ', url)\n\n return url", "def get_database_connection(local_dev=True):\n if local_dev:\n conn = psycopg2.connect(os.getenv(\"LOCAL_DATABASE_URL\"))\n else:\n conn = psycopg2.connect(os.getenv(\"DATABASE_URL\"))\n return conn", "def _connect_and_query(connection_string, query, *params):\n if not connection_string:\n logger.error(\"Connection string is empty, don't know where to connect\")\n return\n\n with SnowflakeController(connection_string) as snowflake:\n return snowflake._query(query, params)", "def create_connection(connection_name: str, **kwargs) -> SnowflakeConnection:\n ret = get_db_parameters(connection_name)\n ret.update(kwargs)\n connection = snowflake.connector.connect(**ret)\n return connection", "def test_get_url_base_returns_url_base(self):\n # Arrange / Act\n return_value = BlobDownloader(\n f\"{settings.SERVER_URI}/987653456789\"\n ).get_url_base()\n # Assert\n self.assertEqual(return_value, SERVER_URI)", "def get_db_url_mysql(config):\n if 'DB_URL_TESTING' in config:\n return config['DB_URL_TESTING']\n\n return 'mysql+mysqlconnector://{}:{}@{}/{}' \\\n .format(config['DB_USER'],\n config['DB_PASS'],\n config['DB_HOST'],\n config['DB_NAME'])", "def _get_athena_connection_string(db_name_env_var: str = \"ATHENA_DB_NAME\") -> str:\n ATHENA_DB_NAME: Optional[str] = os.getenv(db_name_env_var)\n ATHENA_STAGING_S3: Optional[str] = os.getenv(\"ATHENA_STAGING_S3\")\n\n if not ATHENA_DB_NAME:\n raise ValueError(\n f\"Environment Variable {db_name_env_var} is required to run integration tests against AWS Athena\"\n )\n\n if not ATHENA_STAGING_S3:\n raise ValueError(\n \"Environment Variable ATHENA_STAGING_S3 is required to run integration tests against AWS Athena\"\n )\n\n url = f\"awsathena+rest://@athena.us-east-1.amazonaws.com/{ATHENA_DB_NAME}?s3_staging_dir={ATHENA_STAGING_S3}\"\n\n return url", "def _wh_connect():\n utils = Utility()\n config = utils.CONFIG\n conn = utils.get_plugin(config.PATH_CONNECTION_MANAGERS)\n conn.connect(config.URL_TEST_DB)\n return conn", "def getProjectURL():", "def get_connection(db_url=None):\n return engine(db_url).connect()", "def get_url(self):\n return self.db_url", "def get_online_vso_url():\n for mirror in DEFAULT_URL_PORT:\n if check_connection(mirror['url']):\n # Now we get the port URL from the WSDL and test that\n wsdl = zeep.wsdl.Document(mirror[\"url\"], zeep.Transport())\n # I think that accessing \"VSOiService\" here is equivalent to the\n # set_ns_prefix call in the build_client function below\n url = wsdl.services[\"VSOiService\"].ports[mirror[\"port\"]].binding_options[\"address\"]\n if not check_cgi_connection(url):\n continue\n return mirror", "def get_sauce_url(self):\n # Creates the url based on the username and accesskey given by the config\n saucelabs_config = self.config.integrations.get('saucelabs')\n\n if not saucelabs_config:\n raise IntegrationNotConfigured(\"Attempted to use SauceLabs, but SauceLabs is not configured.\")\n\n username = saucelabs_config['username']\n access_key = saucelabs_config['accesskey']\n\n if self.is_saucelabs_available():\n return 'https://{0}:{1}@ondemand.saucelabs.com:443'.format(username, access_key)\n raise NoAvailableCapacityException(\"SauceLabs has no available capacity.\")", "def gs_url():\n assert GS_TEST_URL is not None, \"Missing gs URL in environment variables\"\n return GS_TEST_URL" ]
[ "0.6306403", "0.593241", "0.5894464", "0.5894039", "0.5661682", "0.56526774", "0.56200486", "0.5572493", "0.551084", "0.54703426", "0.5458792", "0.54345244", "0.543016", "0.54166806", "0.53993094", "0.5395401", "0.5394583", "0.53854716", "0.53636956", "0.5357231", "0.53468376", "0.5343313", "0.5321365", "0.5305248", "0.5281693", "0.5269108", "0.5264421", "0.52511334", "0.52505696", "0.5234623" ]
0.7753926
0
Creates a temporary directory and absolute path to an ephemeral sqlite_db within that temp directory. Used to support testing of multitable expectations without creating temp directories at import.
def generate_sqlite_db_path(): tmp_dir = str(tempfile.mkdtemp()) abspath = os.path.abspath( # noqa: PTH100 os.path.join( # noqa: PTH118 tmp_dir, "sqlite_db" + "".join( [random.choice(string.ascii_letters + string.digits) for _ in range(8)] ) + ".db", ) ) return abspath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tempdir():\n\n # Create a directory and return the path\n return tempfile.mkdtemp()", "def mock_db(tmpdir_factory):\n filename = str(tmpdir_factory.mktemp(\"data\").join(\"test.db\"))\n create_test_db(filename)\n return filename", "def create_temp_dir():\n\n try:\n temp_dir = os.getenv('TEMP_FILE_DIR')\n\n if not isinstance(temp_dir, type(None)):\n if os.path.exists(temp_dir):\n LOGGER.warning('Temp Directory Already Exists.')\n else:\n temp_dir = tempfile.mkdtemp()\n os.environ['TEMP_FILE_DIR'] = temp_dir\n else:\n temp_dir = tempfile.mkdtemp()\n os.environ['TEMP_FILE_DIR'] = temp_dir\n\n LOGGER.debug(f'Temp Dir: {temp_dir}')\n except Exception as ex:\n LOGGER.exception(ex)\n raise ex", "def tempdb():\n fd, minitwit.app.config['DATABASE'] = tempfile.mkstemp()\n minitwit.init_db()\n try:\n yield\n finally:\n os.close(fd)\n os.unlink(minitwit.app.config['DATABASE'])", "def get_temp_dir():\n return tempfile.mkdtemp()", "def make_temp_file():\n global TEST_DATA_PATH\n TEST_DATA_PATH = tempfile.mkstemp()", "def _use_temp_directory(self):\n if not self._is_temp_dir:\n self._orig_base_data_dir = self._base_data_dir\n self._orig_base_logs_dir = self._base_logs_dir\n temp_dir = Path(tempfile.mkdtemp())\n self._base_data_dir = temp_dir / \"data\"\n self._base_logs_dir = temp_dir / \"logs\"\n self.db.change_path(\":memory:\")\n self.set_current(\"default\", update=False)\n self._is_temp_dir = True\n return temp_dir", "def create_temp_env_directory():\n return tempfile.mkdtemp(prefix=\"spack-\")", "def setupTestDbEnv():\n baseDirPath = setupTmpBaseDir()\n baseDirPath = os.path.join(baseDirPath, \"db/bluepea\")\n os.makedirs(baseDirPath)\n return setupDbEnv(baseDirPath=baseDirPath)", "def make_tempdir():\n global _TEMPDIR\n if not _TEMPDIR:\n _TEMPDIR = tempfile.mkdtemp()\n return _TEMPDIR", "def make_tempdir():\n return mkdtemp()", "def temp_dir() -> pathlib.Path:\n with tempfile.TemporaryDirectory(prefix=\"phd_\") as d:\n yield pathlib.Path(d)", "def create_temp_folder():\n path_join = os.path.join(tempfile.gettempdir(), id_generator(5))\n os.makedirs(path_join)\n return path_join", "def secure_temp_dir(context):\n tmpd = tempfile.TemporaryDirectory()\n context.tempdir = tmpd", "def create_temp_directory(reason):\n current_dir = os.getcwd()\n # change path accordingly - dir=current_dir\n # check temp dir - C:\\Users\\uC264789\\AppData\\Local\\Temp\n return tempfile.mkdtemp(prefix='temp-{0}-'.format(reason))", "def buildTempDirs(self):\r\n self.temproot = tempfile.mkdtemp()\r\n sys.path.append(self.temproot)\r\n def makedir(a, *p):\r\n path = os.path.join(a, *p)\r\n os.mkdir(path)\r\n return path\r\n\r\n self.temp_fake = makedir(self.temproot, '_fake')\r\n open(os.path.join(self.temp_fake, '__init__.py'), 'w').close()\r\n\r\n self.temp_fake_a = makedir(self.temp_fake, 'a')\r\n open(os.path.join(self.temp_fake_a, '__init__.py'), 'w').close()\r\n\r\n self.temp_fake_aa = makedir(self.temp_fake_a, 'aa')\r\n open(os.path.join(self.temp_fake_aa, '__init__.py'), 'w').close()\r\n open(os.path.join(self.temp_fake_aa, 'eggs.py'), 'w').close()\r\n open(os.path.join(self.temp_fake_aa, 'spam.py'), 'w').close()", "def tempdir(self):\n path = tempfile.gettempdir()\n return os.path.join(path, 'parquet-index-test-' + str(uuid.uuid4()))", "def get_tmp_dir():\n tmpdir_obj = tempfile.TemporaryDirectory()\n tmpdir = tmpdir_obj.name\n return tmpdir, tmpdir_obj", "def setup_workdir():\n return tempfile.mkdtemp(dir=\"/tmp\", prefix=\"python-fleure-tests-\")", "def get_new_temp_dir(self):\n return self.useFixture(fixtures.TempDir())", "def tempdir():\n return mkdtemp()", "def temp_dir():\n global _temp_dir\n warnings.warn(\n \"Please use the :mod:`tempfile` module from the standard library\",\n DeprecationWarning\n )\n _create_temp_dir()\n return _temp_dir", "def makeTempDirParent():\n if not os.path.exists(os.path.join(os.curdir, '.tempTestDir')):\n os.mkdir(os.path.join(os.curdir, '.tempTestDir'))", "def bear_data_dir(tmp_path_factory):\n base = tmp_path_factory.mktemp(\"bear_data\")\n db = base.joinpath(\"database.sqlite\")\n files = base.joinpath(\"Local Files\")\n\n if not db.is_file():\n create_bear_db(db)\n\n if not files.is_dir():\n create_bear_files(files)\n\n return base", "def test_temp_dir(self):\r\n temp_dir = get_qiime_temp_dir()\r\n\r\n self.assertTrue(exists(temp_dir),\r\n \"temp_dir does not exist: %s\" % temp_dir)\r\n self.assertTrue(isdir(temp_dir),\r\n \"temp_dir is not a directory: %s\" % temp_dir)\r\n self.assertTrue(access(temp_dir, W_OK),\r\n \"temp_dir is not writable: %s\" % temp_dir)", "def temp_dir():\n temp = UNIT_TEST_DATA / 'temp'\n try:\n temp.mkdir(parents=True, exist_ok=True)\n yield temp\n finally:\n rmtree(temp)", "def tmp_data_directory(tmp_path_factory):\n return str(tmp_path_factory.mktemp(\"datathon-mlapp-starter\"))", "def make_tempdir(parent=None):\n tmpdir = tempfile.mkdtemp(prefix='rbtools.',\n dir=parent)\n tempdirs.append(tmpdir)\n\n return tmpdir", "def temp_dir(**kwargs):\n temp_dir = tempfile.mkdtemp(**kwargs)\n try:\n yield temp_dir\n finally:\n # Cleanup\n # ermm... this is breaking something (maybe bootstrapping replicates?), so leaving out for now\n #shutil.rmtree(temp_dir)\n pass", "def create_temp_dir(self, *args, **kwargs):\n temp_dir = tempfile.mkdtemp(*args, **kwargs)\n try:\n yield decode_path(temp_dir)\n finally:\n remove_directory(temp_dir)" ]
[ "0.7303786", "0.72319436", "0.7175902", "0.71247184", "0.71021247", "0.7007046", "0.6982505", "0.69429123", "0.6942816", "0.69313794", "0.6923916", "0.68669444", "0.68456614", "0.6796194", "0.6775696", "0.6754039", "0.6739042", "0.6726847", "0.67143714", "0.6703309", "0.6691542", "0.66735035", "0.6665949", "0.66261584", "0.65977925", "0.6592617", "0.6588109", "0.6587524", "0.65874714", "0.6582061" ]
0.735027
0
this function read cpi rate csv then returns dataframe
def get_cpi_rates(): df = pd.read_csv('cpi_usa.csv', index_col=0) df.index = pd.to_datetime(df.index) df = df.resample('BAS').mean() # change sampling to business year start df.index = df.index.year # datetime to year df.columns = ['cpi_rate'] return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetRateData(directory):\n\n rt_data = pd.read_csv(directory)\n return rt_data", "def read_csv_ur10(self, csv_file):\r\n df = pd.read_csv(csv_file, sep=';', decimal=',', header=0)\r\n return df", "def read_csv():", "def _get_liwc_df(self) -> pd.DataFrame:\n data = pd.read_csv(self.path)\n data.index = pd.to_numeric(data['Filename'].str.rstrip('.txt'))\n return data", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def ReadMetrics( fileName ):\n DataDF=pd.read_csv(fileName,header=0,delimiter=',',parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n #print(DataDF.head())\n return( DataDF )", "def read_ipea(\n file_name:str,time_freq:str\n ) -> pd.DataFrame:\n df = pd.read_csv(f'../data/{time_freq}/{file_name}.csv',\n index_col='Date',parse_dates=True).fillna(0)\n df = df.pct_change().replace([np.inf, -np.inf, np.nan], 0)\n\n df = lagger(df,2,list(df))\n df.index = df.index - pd.Timedelta('1 days')\n df = df.resample('Q', convention='start').asfreq()\n\n return df", "def read_ticker(\n ticker:str) -> pd.DataFrame:\n df = pd.read_csv(f'../data/consolidate/{ticker}.csv',\n index_col='Unnamed: 0',parse_dates=True).fillna(0)\n\n df = df.loc[:, (df != 0).any(axis=0)] # removing 0 columns\n df = df.pct_change().replace([np.inf, -np.inf, np.nan], 0)\n\n return df", "def get_data(filename):\r\n return pd.read_csv(filename)", "def read_csv_data(url):\n\n csv_data = pd.read_csv(url)\n\n return csv_data", "def _read_csvs(self):\n self.data = pd.read_csv(self.path+self.name, index_col=0)", "def get_price_df(url):\n df = pd.read_csv(url).dropna()\n df.index = pd.to_datetime(df['Date'])\n df = df.drop(columns=['Date'])\n return df", "def load_csv():\n df = pd.read_csv(datafolder+filename, decimal=decimal).astype(\n {'min': 'float', 'max': 'float'})\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def load_extract(cryptocurrency):\n df = pd.read_csv(f'input_12mo/{cryptocurrency}.csv')\n df = df['Close'].copy()\n df = df[-183:].copy()\n return df", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def get_data(path):\n df = pd.read_csv(path)\n\n return df", "def get_cpi(interval: str) -> pd.DataFrame:\n s_interval = \"semiannual\" if interval == \"s\" else \"monthly\"\n url = f\"https://www.alphavantage.co/query?function=CPI&interval={s_interval}&apikey={cfg.API_KEY_ALPHAVANTAGE}\"\n r = requests.get(url, headers={\"User-Agent\": get_user_agent()})\n\n if r.status_code != 200:\n return pd.DataFrame()\n\n data = pd.DataFrame(r.json()[\"data\"])\n data[\"date\"] = pd.to_datetime(data[\"date\"])\n data[\"CPI\"] = data[\"value\"].astype(float)\n data = data.drop(columns=[\"value\"])\n\n return data", "def loan_data():\n return pd.read_csv(data_path / \"credit_data.csv\")", "def GetOpsRates():\n return GetDataFromCsvFile('ops_rates.csv')", "def _read_data(self, fp):\n names = [\n \"Year\",\n \"Month\",\n \"Day\",\n \"Hour\",\n \"Minute\",\n \"Data Source and Uncertainty Flags\",\n \"Dry Bulb Temperature\",\n \"Dew Point Temperature\",\n \"Relative Humidity\",\n \"Atmospheric Station Pressure\",\n \"Extraterrestrial Horizontal Radiation\",\n \"Extraterrestrial Direct Normal Radiation\",\n \"Horizontal Infrared Radiation Intensity\",\n \"Global Horizontal Radiation\",\n \"Direct Normal Radiation\",\n \"Diffuse Horizontal Radiation\",\n \"Global Horizontal Illuminance\",\n \"Direct Normal Illuminance\",\n \"Diffuse Horizontal Illuminance\",\n \"Zenith Luminance\",\n \"Wind Direction\",\n \"Wind Speed\",\n \"Total Sky Cover\",\n \"Opaque Sky Cover (used if Horizontal IR Intensity missing)\",\n \"Visibility\",\n \"Ceiling Height\",\n \"Present Weather Observation\",\n \"Present Weather Codes\",\n \"Precipitable Water\",\n \"Aerosol Optical Depth\",\n \"Snow Depth\",\n \"Days Since Last Snowfall\",\n \"Albedo\",\n \"Liquid Precipitation Depth\",\n \"Liquid Precipitation Quantity\",\n ]\n\n first_row = self._first_row_with_climate_data(fp)\n df = pd.read_csv(fp, skiprows=first_row, header=None, names=names)\n return df", "def extract_df(self, file_path):\n df = pd.read_csv(file_path, sep=\";\")\n df.rename(columns={\"Get\": \"Currency\"}, inplace=True)\n df = df[df[\"Pay\"] == \"Chaos Orb\"]\n df = df[[\"League\", \"Date\", \"Currency\", \"Value\"]]\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n df[\"Date\"] = df[\"Date\"] - df.loc[0][\"Date\"]\n return df", "def get_data(fpath):\n\n visits = ['SC', 'BL', 'V01', 'V02', 'V03', 'V04', 'V05', 'V06', 'V07',\n 'V08', 'V09', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15']\n dtype = dict(PATNO=str,\n CLINICAL_EVENT=cdtype(visits, ordered=True),\n TESTNAME=str,\n TESTVALUE=str)\n\n fname = op.join(fpath, 'Current_Biospecimen_Analysis_Results.csv')\n data = pd.read_csv(fname, dtype=dtype)\n\n data['TESTVALUE'] = pd.to_numeric(data.TESTVALUE, errors='coerce')\n data = data.rename(columns=RENAME_COLS).assign(**ASSIGN_COLS)[RETAIN_COLS]\n data = data.dropna(axis=0, subset=['SCORE'])\n\n return data", "def reader(self):\n df = pd.read_csv(self.path)\n return df", "def load_predict(cryptocurrency):\n df = pd.read_csv(f'output_12mo/{cryptocurrency}.csv')\n #df = df.drop(0, axis=0).copy()\n #df = df['Close'].copy()\n #df = df[:-25].copy()\n return df", "def import_data(catalog='xmatch_TGAS_Simbad.csv', params=None, nrows=None, delimiter=','):\n print \"Loading %s and creating DataFrame..\" % catalog\n df_imported = pd.read_csv(catalog, delimiter=delimiter, header=0, usecols=params, nrows=nrows)\n print \"..Done\\n----------\"\n return df_imported", "def readData(filename):\n #defining gobal variable (dataframe) to access it outside this function\n global dataframe\n #storing full CSV file into a dataframe(data structure)\n dataframe = pd.read_csv(filename)\n #type casting temperature column of dataframe to numeric data and ignoring '***' values\n dataframe['Temperature'] = pd.to_numeric(dataframe['Temperature'], errors='coerce')\n return dataframe", "def _parse_csv(csv_file: str) -> pd.DataFrame:\n return pd.read_csv(csv_file, header=0)" ]
[ "0.6811379", "0.6593392", "0.6442805", "0.6427641", "0.64132476", "0.63819474", "0.63650864", "0.63588196", "0.6348105", "0.6342826", "0.63283473", "0.63253975", "0.63097906", "0.63020056", "0.63020056", "0.63020056", "0.62934697", "0.62911475", "0.619082", "0.6184108", "0.6173163", "0.6158802", "0.61554146", "0.6150623", "0.61362916", "0.61036724", "0.6088739", "0.6085816", "0.60793334", "0.6077265" ]
0.69035745
0
function D=l2distance(X,Z) Computes the Euclidean distance matrix.
def l2distance(X, Z=None): if Z is None: n, d = X.shape s1 = np.sum(np.power(X, 2), axis=1).reshape(-1,1) D1 = -2 * np.dot(X, X.T) + repmat(s1, 1, n) D = D1 + repmat(s1.T, n, 1) np.fill_diagonal(D, 0) D = np.sqrt(np.maximum(D, 0)) else: n, d = X.shape m, _ = Z.shape s1 = np.sum(np.power(X, 2), axis=1).reshape(-1,1) s2 = np.sum(np.power(Z, 2), axis=1).reshape(1,-1) D1 = -2 * np.dot(X, Z.T) + repmat(s1, 1, m) D = D1 + repmat(s2, n, 1) D = np.sqrt(np.maximum(D, 0)) return D
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_l2_distance_matrix(features_queries, features_dataset):\n sx = np.sum(features_queries ** 2, axis=1, keepdims=True)\n sy = np.sum(features_dataset ** 2, axis=1, keepdims=True)\n\n return np.sqrt(-2 * features_queries.dot(features_dataset.T) + sx + sy.T)", "def chDist (x1,y1,z1,x2,y2,z2):\n\n # Great-circle distance for each point\n d = (x1-x2)**2 + (y1-y2)**2 + (z1-z2)**2\n\n # L2-norm\n return d.sum()/(3*d.size)", "def distance(x, y, z):\n a = array([[(x[i, j] - y[j])**2\n\t\tfor j in range(x.shape[1])]\n\t\tfor i in range(x.shape[0])])\n b = array([[(x[i, j] - z[j])**2\n\t\tfor j in range(x.shape[1])]\n for i in range(x.shape[0])])\n return (sqrt(sum(a, 1)), sqrt(sum(b, 1)))", "def l2norm_(X, Xstar):\n return cdist(X, Xstar)", "def distance(x1, y1, z1, x2, y2, z2):\n return math.sqrt((x1-x2)**2+(y1-y2)**2+(z1-z2)**2)", "def euclidean_squared_distance(input1, input2):\n m, n = input1.size(0), input2.size(0)\n mat1 = torch.pow(input1, 2).sum(dim=1, keepdim=True).expand(m, n)\n mat2 = torch.pow(input2, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n distmat = mat1 + mat2\n distmat.addmm_(input1, input2.t(), beta=1, alpha=-2)\n return distmat", "def get_correct_distance_matrix(L):\n n = len(L)\n D = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n if i != j:\n D[i][j] = get_minor(L, [i, j], [i, j]) / get_minor(L, [i], [i])\n return D", "def calculateEuclideanDistance(vector):\r\n global euclideanDistance\r\n # create linkage matrix with the distance metric as euclidean distance\r\n # calculate the distances of the clusters by starting as singletons\r\n # and in each iteration will merge the two clusters which have the smallest distance\r\n # returns array of length n - 1\r\n # Z[i] will tell us which clusters were merged in the i-th iteration\r\n # each row has format [cluster1, cluster1, dist, sample_count].\r\n euclideanDistance = linkage(vector, metric='euclidean')", "def dist(x1, x2, distance):\n if distance == 'l2':\n return np.sqrt(np.sum(np.square(x1 - x2)))\n elif distance == 'squared_l2':\n return np.sum(np.square(x1 - x2))\n else:\n raise Exception(\"The distance '%s' is not supported.\" % distance)", "def euclidean_distances(X, Y, squared=False, inverse=True):\n # should not need X_norm_squared because if you could precompute that as\n # well as Y, then you should just pre-compute the output and not even\n # call this function.\n if X is Y:\n X = Y = np.asanyarray(X)\n else:\n X = np.asanyarray(X)\n Y = np.asanyarray(Y)\n\n if X.shape[1] != Y.shape[1]:\n raise ValueError(\"Incompatible dimension for X and Y matrices\")\n\n if squared:\n return ssd.cdist(X, Y, 'sqeuclidean')\n\n #workaround for Numpy bug that destroys array structure:\n # np.double(np.asarray([[5,5]])) == array([[ 5., 5.]])\n # but np.double(np.asarray([[5]])) == 5.0 !!!\n if X.shape[1] == 1:\n XY = np.asarray([[np.sqrt(((X[0][0]-Y[0][0])**2))]])\n else:\n XY = ssd.cdist(X, Y)\n return np.divide(1.0, (1.0 + XY)) if inverse else XY", "def distance(XYZ1=np.array([0, 0, 0], dtype='float32'),\n XYZ2=np.array([1, 1, 1], dtype='float32')):\n a=XYZ2-XYZ1\n b=a**2\n c=b.sum()\n return np.sqrt(c)", "def distance((x,y,z),(x0,y0,z0)):\n return sqrt((x-x0)**2+(y-y0)**2+(z-z0)**2)", "def distance_matrix(data):\n D = numpy.zeros( (data.shape[0], data.shape[0]) )\n for i in xrange(data.shape[0]):\n for j in xrange(i):\n D[i,j] = numpy.linalg.norm(data[i,:]-data[j,:])\n D[j,i] = D[i,j]\n\n return D", "def euclidean_distances(X, Y):\r\n\r\n D = np.zeros((X.shape[0],Y.shape[0]))\r\n \r\n for X_idx in range(X.shape[0]):\r\n for Y_idx in range(Y.shape[0]): \r\n \r\n D[X_idx,Y_idx] = np.sqrt(np.sum((X[X_idx,:]-Y[Y_idx,:])**2))\r\n \r\n return D", "def nn_distance(xyz1, xyz2):\n return _op_library.nn_distance(xyz1, xyz2)", "def l2_distance(v1, v2):\n\treturn np.linalg.norm(np.array(v1) - np.array(v2))", "def dist2(X, Y, c=1, d=1):\n R1, t1 = Rt(X)\n R2, t2 = Rt(Y)\n R1i = R1.T\n # import IPython as ip\n # ip.embed()\n # term1 = np.linalg.norm(so2.algi(logm(R1i.dot(R2))))**2\n term1 = np.linalg.norm(so2.algi(so2.logm(R1i.dot(R2))))**2\n term2 = np.linalg.norm(t2 - t1)**2\n return c*term1 + d*term2", "def distance_3D(c1, c2):\n return np.sqrt((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2 + (c1[2] - c2[2]) ** 2)", "def get_l2_distance(x1, x2, y1, y2):\n return ((x1-y1)**2 + (x2-y2)**2)**0.5", "def euclidean_distance(x1, x2):\n return (x2[0] - x1[0])**2 + (x2[1] - x1[1])**2", "def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)", "def distancematrix(vec1, vec2):\n v1, v2 = np.meshgrid(vec1, vec2)\n return np.abs(v1 - v2)", "def l2_pairwise_distance(v1, v2):\n nrow = len(v1)\n ncol = len(v2)\n dist_mat = [[0 for _ in range(ncol)] for _ in range(nrow)]\n for i in range(nrow):\n for j in range(ncol):\n dist_mat[i][j] = math.sqrt((v1[i] - v2[j])**2)\n return dist_mat", "def distance_matrix(d1, d2=None):\n if d2 is None:\n dists = np.zeros(shape=(d1.shape[0], d1.shape[0]))\n for i in range(dists.shape[0]):\n dists[i] = (((d1 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n else:\n dists = np.zeros(shape=(d1.shape[0], d2.shape[0]))\n for i in range(d1.shape[0]):\n dists[i] = (((d2 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n return dists", "def compute_distance(X, K_clusters):\n dis = np.linalg.norm((X-K_clusters),2,axis=1)**2\n return dis", "def _derive_euclidean_dm(self, cat_mat, dim):\r\n res_mat = []\r\n\r\n for i in range(dim):\r\n res_mat.append([0 for k in range(dim)])\r\n for j in range(i):\r\n res_mat[i][j] = self._vector_dist(cat_mat[i], cat_mat[j])\r\n res_mat[j][i] = res_mat[i][j]\r\n\r\n return DistanceMatrix(res_mat, self.DistanceMatrices[0].ids)", "def calcEuclideanDistance(d1, d2):\n #initiate empty list\n result = []\n #for each index in the list, each position in both list minus each other\n #and to the power of two. Add this in the result list\n for idx in range(len(d1)):\n result.append((d1[idx]-d2[idx])**2)\n\n #Return the square of the sum of all values in the result list\n return math.sqrt(sum(result))", "def compute_distances(Ls):\n if not isinstance(Ls, list):\n Ls = [Ls]\n\n dists = []\n for L in Ls:\n N,D = L.shape\n # 1xNxD - Nx1xD (L1 distance)\n dist = (np.abs(L[None,:,:] - L[:,None,:])).sum(axis=2)\n dists.append(dist)\n\n return dists", "def compute_dist_matrix(X1, X2, distance):\n N, M = X1.shape[0], X2.shape[0]\n dist_matrix = np.zeros((N, M))\n for i in range(N):\n for j in range(M):\n dist_matrix[i][j] = dist(X1[i], X2[j], distance=distance)\n return dist_matrix", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))" ]
[ "0.66955245", "0.6539834", "0.65245646", "0.6406857", "0.6404955", "0.63448393", "0.62766933", "0.62603134", "0.6219491", "0.6203552", "0.61489147", "0.61423534", "0.6136372", "0.6129909", "0.60990906", "0.6084675", "0.6078998", "0.60646284", "0.60477096", "0.5988069", "0.5983563", "0.5972538", "0.59650964", "0.5936434", "0.59357506", "0.5932708", "0.5927294", "0.5909327", "0.5896286", "0.5884926" ]
0.8402708
0
Generator that returns nothing
def emptyGenerator(): return yield
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __emptygen():\n if False:\n yield", "def nullcontext() -> Iterator[None]:\n yield", "def __call__(self, input=None): # pragma: no cover\n while False:\n yield None", "def no_none(decorated):\n def _func(*args, **kwargs):\n \"\"\"wrap generator\"\"\"\n for value in decorated(*args, **kwargs):\n if value is not None:\n yield value\n return _func", "def very_simple():\n yield 1", "def __iter__(self):\n for item in(self.data_):\n if(item!= None):#We dont want to yield spaces that have not been filled yet \n yield item", "def generator(self):\n return [None, 1]", "def __iter__(self):\n return iter(())", "def _generator(self):\n while not self._stopFlag:\n yield self._oneStep()\n self._cooperator = None", "def simple_generator():\n yield 'horse'\n # just going to do it...\n yield 'cow'\n yield 'mouse'", "def empty_iter_commits():\n\n def empty_list_gen():\n for p in []:\n yield p\n\n return empty_list_gen()", "def test_tasklet_yield_emtpy_list():\n\n @ndb.tasklet\n def test_it():\n nothing = yield []\n raise ndb.Return(nothing)\n\n assert test_it().result() == ()", "def none(self):", "def nothing():\n pass", "def noop():", "def iterwhite():\n while True:\n for n in rng.randn(100):\n yield n", "def nooutput(results):\n for _row in results:\n pass", "def source_iterator(self):\n while(True): # loop forever and never raise StopIteration\n for x in self.source.new_crashes():\n if x is None:\n yield None\n else:\n yield ((x,), {}) # (args, kwargs)\n else:\n yield None # if the inner iterator yielded nothing at all,\n # yield None to give the caller the chance to sleep", "def bool_generator():\n b = True\n while True:\n yield str(b)\n b = not b", "def _generator(self):\n\t\twhile(1):\n\t\t\ttry:\n\t\t\t\tm = self.messages.pop(0) # pop the first Flash2Message in the list\n\t\t\t\tyield m\n\t\t\texcept IndexError:\n\t\t\t\traise StopIteration", "def stop():\n raise StopIteration", "def __iter__(self):\n yield from self.gen", "def maybe_generator(obj):\n if isinstance(obj, types.GeneratorType):\n for elt in obj:\n yield elt\n else:\n yield obj", "def abc():\r\n yield \"a\"\r\n yield \"b\"\r\n yield \"c\"", "def sequences(self):\n # i am one\n yield self\n # nothing further\n return", "def __next__(self):\n\t\treturn next()", "def simple():\n yield 1\n yield 2\n yield 3", "def compact(seq):\n for item in seq:\n if item:\n yield item", "def empty_model() -> Model:\n yield Model()", "def __iter__(self):\n return self.new_generator()" ]
[ "0.83837897", "0.7599528", "0.7254626", "0.67886347", "0.67538154", "0.6752617", "0.67197376", "0.65942144", "0.6579768", "0.6470785", "0.6467969", "0.6385577", "0.63608426", "0.6305669", "0.6300992", "0.62990665", "0.6295552", "0.62473893", "0.62469494", "0.62340343", "0.6217735", "0.6216294", "0.61577374", "0.615064", "0.6144899", "0.613846", "0.6135096", "0.61253506", "0.6124949", "0.6119161" ]
0.8927865
0
GET method Get an instant task information.
def get(self, request): feedback = { 'permission': True } try: task_id = request.GET.get('task_id', None) if task_id is None: feedback['data'] = ErrorCode.parameter_missing('task_id') raise natrix_exception.ParameterMissingException(parameter='task_id') try: uuid.UUID(hex=task_id) task = Task.objects.get(id=task_id, time_type='instant') serializer = task_serializer.InstantTaskSerializer(instance=task) feedback['data'] = { 'code': 200, 'message': u'Instant Task Info!', 'info': serializer.data } except ValueError: feedback['data'] = ErrorCode.parameter_invalid('task_id', reason=u'must be a UUID') raise natrix_exception.ParameterInvalidException(parameter='task_id') except Task.DoesNotExist: feedback['data'] = ErrorCode.parameter_invalid( 'task_id', reason=u'Can not retrieve Instant Task: {}'.format(task_id)) raise natrix_exception.ParameterInvalidException(parameter='task_id') except natrix_exception.NatrixBaseException as e: logger.error(e.get_log()) feedback['data'] = ErrorCode.sp_code_bug('Serializer error: {}'.format(e.get_log())) except Exception as e: logger.error(e) feedback['data'] = ErrorCode.sp_code_bug('Unknow error: {}'.format(e)) except natrix_exception.NatrixBaseException as e: logger.info(e.get_log()) return JsonResponse(data=feedback)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, request):\n feedback = {\n 'permission': True\n }\n\n try:\n task_id = request.GET.get('task_id', None)\n if task_id is None:\n feedback['data'] = ErrorCode.parameter_missing('task_id')\n raise natrix_exception.ParameterMissingException(parameter='task_id')\n try:\n uuid.UUID(hex=task_id)\n except ValueError:\n feedback['data'] = ErrorCode.parameter_invalid('task_id', reason=u'must be a UUID')\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n try:\n task = Task.objects.get(id=task_id, time_type='instant')\n # response_count = success + wrong\n res = command_dispatcher.get_task_data(task.id)\n success = len(res.get('success'))\n wrong = len(res.get('error'))\n response_count = success + wrong\n\n time_delta = timezone.now() - task.create_time\n\n if task.status and ( response_count == task.terminal_count or time_delta.seconds > 120):\n task.status = False\n task.result_snapshot = json.dumps(res)\n task.save()\n\n feedback['data'] = {\n 'code': 200,\n 'message': 'Instant Task Status',\n 'info': {\n 'finished': not task.status,\n 'total': task.terminal_count,\n 'responses': response_count,\n 'success': success,\n 'wrong': wrong\n }\n }\n\n except Task.DoesNotExist:\n feedback['data'] = ErrorCode.parameter_invalid(\n 'task_id', reason=u'Can not retrieve Instant Task: {}'.format(task_id))\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n\n except natrix_exception.NatrixBaseException as e:\n logger.error(e.get_log())\n\n return JsonResponse(data=feedback)", "def getTask():\n\tcontent = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\tif content == \"null\":\n\t\treturn None\n\telse:\n\t\treturn json.loads(content)", "def get_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n response = jsonify(content=task['content'])\n response.status_code = 200\n return response", "def get(self, guid):\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n if not task == None:\n guid = \"%s\" % task.key().id_or_name()\n task_json = { \"id\": \"%s\" % guid, \"name\": task.name,\n \"priority\": task.priority, \"effort\": task.effort,\n \"projectId\": task.projectId,\n \"submitterId\": task.submitterId, \"assigneeId\": task.assigneeId,\n \"type\": task.type, \"developmentStatus\": task.developmentStatus,\n \"validation\": task.validation, \"description\": task.description,\n \"createdAt\": task.createdAt,\n \"updatedAt\": task.updatedAt }\n \n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(task_json))\n else:\n self.response.set_status(404, \"Task not found\")", "def get(self, project_id, task_id):\n try:\n task = backend.get(Task, {'project.pk': request.project.pk, 'pk': task_id},\n only=self.export_fields, include=('project',), raw=True)\n except Task.DoesNotExist:\n return {'message': \"unknown task\"}, 404\n return {'task': self.export(task)}, 200", "def get(self, id):\n task = get_task(get_db(), id)\n if not task:\n api.abort(404, f\"Invalid task with id: {id}\")\n return task_to_dict(task)", "def get(self):\n gid = self.get_query_argument('gid', None)\n\n if gid: # get a specified task\n self.write(update_fields(\n self._rpc.aria2.tellStatus(self._token, gid, TASK_FIELDS)))\n\n else: # get all tasks\n active_tasks = self._rpc.aria2.tellActive(self._token, TASK_FIELDS)\n waiting_tasks = self._rpc.aria2.tellWaiting(\n self._token, -1, 100, TASK_FIELDS)\n stopped_tasks = self._rpc.aria2.tellStopped(\n self._token, -1, 100, TASK_FIELDS)\n all_tasks = [\n update_fields(task) for task in\n itertools.chain(active_tasks, waiting_tasks, stopped_tasks)\n ]\n self.write({'tasks': all_tasks})", "def get_task(self):\n\n url='{url}/task'.format(url=config.SERVER_URL)\n\n try:\n res=request.urlopen(url,timeout=10).read()\n res=str(res,encoding='utf8')\n except Exception as e:\n check_server() # sleep until server is available\n try:\n res=request.urlopen(url,timeout=10).read()\n res=str(res,encoding='utf8')\n except:\n err_str='error: client -> get_task : ' \\\n 'unable to connect to server, exit process'\n info_manager(err_str,type='KEY')\n os._exit(0)\n\n if 'no task' in res: # if server have no task uid ,return 'no task uid'\n err_str= 'error: client -> get_task : ' \\\n 'unable to get task, exit process'\n info_manager(err_str,type='KEY')\n os._exit(0)\n\n try: # try to parse task str\n res=res.split(',')\n self.task_uid=res[0]\n self.task_type=res[1]\n except:\n err_str='error: client -> get_task : ' \\\n 'unable to split task str,exit process'\n info_manager(err_str,type='KEY')\n os._exit(0)", "def get(self, task_id):\n try:\n return self.dal.task.get_by_id(task_id)\n except EntityNotFound:\n raise DoesNotExist()", "def task_get(context, task_id, session=None, force_show_deleted=False):\n task_ref = _task_get(context, task_id, session=session,\n force_show_deleted=force_show_deleted)\n return _task_format(task_ref, task_ref.info)", "def show(id, json):\n\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/tasks\"}\n task = estask.Task(kargs)\n try:\n dict_resp= task.show(id)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n click.echo(\"fail to get task list\")\n sys.exit(1)\n\n if json:\n print(jsn.dumps(dict_resp, sort_keys=True, indent=4))\n return\n\n try:\n task.print_show(dict_resp)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))", "def _task_get(context, task_id, session=None, force_show_deleted=False):\n session = session or get_session()\n query = session.query(models.Task).options(\n sa_orm.joinedload(models.Task.info)\n ).filter_by(id=task_id)\n\n if not force_show_deleted and not context.can_see_deleted:\n query = query.filter_by(deleted=False)\n try:\n task_ref = query.one()\n except sa_orm.exc.NoResultFound:\n LOG.debug(\"No task found with ID %s\", task_id)\n raise exception.TaskNotFound(task_id=task_id)\n\n # Make sure the task is visible\n if not _is_task_visible(context, task_ref):\n msg = \"Forbidding request, task %s is not visible\" % task_id\n LOG.debug(msg)\n raise exception.Forbidden(msg)\n\n return task_ref", "def test_get_task(self):\n resp = self.app.get('/api/2/inf/esrs',\n headers={'X-Auth': self.token})\n\n task_id = resp.json['content']['task-id']\n expected = 'asdf-asdf-asdf'\n\n self.assertEqual(task_id, expected)", "def get(self):\n url = \"http://twitter.com/statuses/public_timeline.json\"\n task = taskqueue.Task(\n url='/tasks/fetch',\n params={'url': url}\n )\n task.add('fetch')", "def retrieve_task(self, task_id):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_TASK_STATUS,\n str(task_id)]))\n return r.json()", "def view_task(self, task_id):\n api_url = self.server_url + self.METHOD_VIEW_TASK + str(task_id)\n\n request = Request(api_url)\n\n log.info(\"Request to \" + api_url)\n try:\n response = request.get()\n except HTTPError, e:\n log.error(\"Error in view_task: \" + str(e))\n raise CuckooError(str(e))\n except ConnectionError, e:\n log.error(\"Error in view_task: \" + str(e))\n raise CuckooError(str(e))\n\n log.info(\"Response: \" + str(response))\n\n return response", "def get_tasks(self, task_id=None):\n # Recover all config from OpenVAS\n if task_id:\n return self.make_xml_request('<get_tasks id=\"%s\"/>' % name, xml_result=True)\n else:\n return self.make_xml_request(\"<get_tasks />\", xml_result=True)", "def _task_info_get(context, task_id, session=None):\n session = session or get_session()\n query = session.query(models.TaskInfo)\n query = query.filter_by(task_id=task_id)\n try:\n task_info_ref = query.one()\n except sa_orm.exc.NoResultFound:\n LOG.debug(\"TaskInfo was not found for task with id %(task_id)s\",\n {'task_id': task_id})\n task_info_ref = None\n\n return task_info_ref", "def get(self, subresource, **kwargs):\n return getattr(RESTTask, subresource)(self, **kwargs)", "def taskdetail_get(td_id):\n return IMPL.taskdetail_get(td_id)", "def get_task(self, task_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"tasks\", \"task_id\", task_id)", "def get_task(self, task_id):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks WHERE id=?\", (task_id,))\n return res.fetchone()", "def get(self, id):\n\n return self.client.get(\"external-task/{0}\".format(id))", "def get(self, controller, data, *args, **kwargs): \n task_manager = controller.get_task_manager()\n res = task_manager.get_all_tasks(details=True)\n resp = {\n u'task-instances':res,\n u'count':len(res)\n } \n return resp", "def fusion_api_get_task(self, param='', uri=None, api=None, headers=None):\n if uri is not None:\n # update fully qualified URL to relative URI\n uri = re.sub('^https://\\d*.\\d*.\\d*.\\d*', '', uri)\n return self.task.get(uri=uri, api=api, headers=headers, param=param)", "def get_tasks(self, *args, **kwargs):\n tasks_endpoint = furl(self.ENDPOINT) / self.id / \"tasks\"\n return self._client.list(Task, endpoint=tasks_endpoint.url, *args, **kwargs)", "def get_task_info(self):\n\n print()\n employee_name = self.task.get_employee_name()\n task_name = self.task.get_task_name()\n mins = self.task.get_time_spent()\n notes = self.task.get_notes()\n date = self.task.get_date()\n\n task = {\n 'employee_name': employee_name,\n 'task_name': task_name,\n 'mins': mins,\n 'notes': notes,\n 'date': date\n }\n\n return task", "def get(self):\n\n return task_service.get_tasks()", "def get_task(self, id):\n raise NotImplementedError()", "def get_task(task_id):\n return db.task.find_one({'_id': ObjectId(task_id)})" ]
[ "0.74831307", "0.7133121", "0.6826516", "0.6812052", "0.67514664", "0.6739813", "0.67362785", "0.67078495", "0.6705825", "0.66610193", "0.66112524", "0.66061497", "0.65405357", "0.65372086", "0.65368414", "0.6521287", "0.65013915", "0.64811116", "0.64549655", "0.64519477", "0.64484316", "0.6438015", "0.64167297", "0.6411057", "0.6346595", "0.63040006", "0.62929606", "0.62665755", "0.6245407", "0.62206364" ]
0.7734223
0
ingest from a particular sequence
def sequence_ingest(self,sequence): data=self.data counter=0 for item in data[sequence]: datestring=item['specimenDate'] date=fetchdate(datestring) row,created=DailyCases.objects.get_or_create(specimenDate=date,areacode=item['areaCode']) row.areaname=item['areaName'] row.dailyLabConfirmedCases=item['dailyLabConfirmedCases'] row.totalLabConfirmedCases=item['totalLabConfirmedCases'] row.changeInDailyCases=item['changeInDailyCases'] row.dailyTotalLabConfirmedCasesRate=item['dailyTotalLabConfirmedCasesRate'] row.previouslyReportedDailyCases=item['previouslyReportedDailyCases'] row.previouslyReportedTotalCases=item['previouslyReportedTotalCases'] row.changeInTotalCases=item['changeInTotalCases'] row.save() counter+=1 log.info(f'Processed: {counter} rows')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runingest(sdms):\n\n NotImplementedError", "def sequence_ingest(self,areacode):\n\t\tdata=self.data[self.data['Area code']==areacode]\n\t\tareaname=data['Area name'].unique().item()\n\t\tlog.debug(f'Ingesting cases from {areacode}: {areaname}')\n\t\t\n\t\tcounter=0\n\t\tfor day in data['Specimen date']:\n\t\t\tdate=fetchdate(day)\n\t\t\trow,created=DailyCases.objects.get_or_create(specimenDate=date,areacode=areacode)\n\t\t\tthis_day=data[data['Specimen date']==day]\n\t\t\trow.areaname=areaname \n\t\t\t#add head(1) (faster than unique() ) to deal with some areas returned twice as part of both UTLA AND LTLA sequences\n\t\t\trow.dailyLabConfirmedCases=this_day['Daily lab-confirmed cases'].head(1).item()\n\t\t\trow.totalLabConfirmedCases=this_day['Cumulative lab-confirmed cases'].head(1).item()\n\t\t\trow.save()\n\t\t\tcounter+=1\n\t\tlog.debug(f'Processed: {counter} rows')", "def partition(seq):\n\n return 0", "def preprocess_inde(self, sequence, src_seq):\r\n sequence = sequence + [len(src_seq) - 1] # add sen\r\n sequence = torch.Tensor(sequence)\r\n return sequence", "def extend_seq(mrnaseq, mrna_frag, total_length=50):\n #\n # Prepare sequences with no gaps\n #\n mrnaseq_nogap = mrnaseq.replace(\"-\", \"\")\n mrna_frag_nogap = mrna_frag.replace(\"-\", \"\")\n #\n # check if the sequence is shorter\n #\n if len(mrna_frag_nogap) > total_length:\n syserr(\"mrnaseq_nogap: \", mrnaseq_nogap)\n syserr(\"mrna_frag_nogap: \", mrna_frag_nogap)\n syserr(\"mrnaseq: \", mrnaseq)\n syserr(\"mrna_frag: \", mrna_frag)\n raise Exception(\n \"Check your sequences maybe you should shrink, not extend them\")\n span = re.search(mrna_frag_nogap, mrnaseq_nogap).span()\n\n # Decide which type of extension to do\n gap_pos_mean = mean([i for i, x in enumerate(mrna_frag) if x == \"-\"])\n list_median = median([i for i in range(len(mrna_frag))])\n\n # this ratio gives us relative position of the gaps\n ratio = gap_pos_mean / list_median\n\n # Based on the ratio do the extension of the sequence\n if ratio > 0.5 and ratio < 1.5: # extend both sides\n li = span[0]\n ui = span[1]\n length = ui - li\n if length > total_length:\n return -1\n elif length == total_length:\n return mrnaseq_nogap[li:ui]\n else:\n dif = total_length - length\n quot = dif // 2 # this is explicit integer division\n l_ext = li - quot # TODO check if they are not lower than 0\n u_ext = ui + (dif - quot)\n if (l_ext < 0) or (u_ext > len(mrnaseq_nogap) - 1):\n return \"NA\"\n else:\n return mrnaseq_nogap[l_ext:u_ext]\n elif ratio <= 0.5: # extend left - it means upstream (5'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = total_length - len(mrna_frag_nogap)\n if (li - dif < 0):\n return mrnaseq_nogap[:ui + abs(li - dif)]\n else:\n return mrnaseq_nogap[li - dif:ui]\n elif ratio >= 1.5: # extend right - it means downstream (3'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = total_length - len(mrna_frag_nogap)\n # if there is noting to extend to the right\n if ui + dif > len(mrnaseq_nogap):\n return mrnaseq_nogap[li - ((ui + dif) - len(mrnaseq_nogap)):]\n else:\n return mrnaseq_nogap[li:ui + dif]", "def imputer(seq, n=500):\n cur = len(seq)\n if cur < n:\n return np.concatenate((seq, np.zeros(n - cur)))\n return seq[: n]", "def shrink_seq(mrnaseq, mrna_frag, mrna_frag_target, total_length=50):\n # Prepare sequences with no gaps\n mrnaseq_nogap = mrnaseq.replace(\"-\", \"\")\n mrna_frag_nogap = mrna_frag.replace(\"-\", \"\")\n if len(mrna_frag_nogap) < total_length:\n syserr(mrna_frag_nogap)\n syserr(mrnaseq)\n syserr(mrna_frag)\n syserr(mrna_frag_target)\n raise Exception(\n \"Check your sequences maybe you should extend, not shrink them\")\n span = re.search(mrna_frag_nogap, mrnaseq_nogap).span()\n\n # Decide which type of extension to do\n gap_pos_mean = mean(\n [i for i, x in enumerate(mrna_frag_target) if x == \"-\"])\n list_median = median([i for i in range(len(mrna_frag_target))])\n\n # this ratio gives us relative position of the gaps\n ratio = gap_pos_mean / list_median\n\n # Based on the ratio do the shrinkage of the sequence\n if ratio > 0.5 and ratio < 1.5: # extend both sides\n li = span[0]\n ui = span[1]\n length = ui - li\n if length < total_length:\n return -1\n elif length == total_length:\n return mrnaseq_nogap[li:ui]\n else:\n dif = abs(total_length - length)\n quot = dif // 2 # this is explicit integer division\n l_ext = li + quot\n u_ext = ui - (dif - quot)\n if (u_ext < 0) or (u_ext > len(mrnaseq_nogap) - 1):\n return \"NA\"\n else:\n return mrnaseq_nogap[l_ext:u_ext]\n elif ratio <= 0.5: # trim left - it means upstream (5'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = len(mrna_frag_nogap) - total_length\n return mrnaseq_nogap[li + dif:ui]\n elif ratio >= 1.5: # extend right - it means downstream (3'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = len(mrna_frag_nogap) - total_length\n return mrnaseq_nogap[li:ui - dif]", "def _feed(self, SeqSeqToken, token_to_index, index_to_token):\n for SeqToken in SeqSeqToken:\n for Token in SeqToken:\n if not token_to_index.has_key(Token):\n i = len(index_to_token)\n token_to_index[Token] = i\n index_to_token.insert(i, Token)", "def reassemble(self, seq, buf):\n # XXX - fastpath properly sequenced data.\n if seq == self.cur and not self.q:\n self.cur += len(buf)\n return buf\n # XXX - favor newer data\n heapq.heappush(self.q, (seq, buf))\n l = []\n while self.q:\n if self.q[0][0] <= self.cur:\n seq, buf = heapq.heappop(self.q)\n if seq != self.cur:\n # Reverse overlap. Trim left (empty string on rexmit)...\n buf = buf[self.cur-seq:]\n l.append(buf)\n self.cur += len(buf)\n else:\n break\n return ''.join(l)", "def abundance_greedy_clustering(amplicon_file, minseqlen, mincount, chunk_size, kmer_size):\n otu = []\n\n for seq, number in chimera_removal(amplicon_file, minseqlen, mincount, chunk_size, kmer_size):\n otu.append((seq, number))\n\n return otu", "def ingest(self, input):\n MedianStreaming.ingest(self,input)\n self.total += input\n self.count += 1\n return self.median_current, self.total, self.count", "def prepaire_sub_seq2seq_input(extraction_results, data_aug=2):\n input_smiles = []\n subs_for_merge = []\n for cand_id, can_res in extraction_results.items():\n src_sub, _, src_frag, tgt_frag, labeled_src, labeled_tgt = can_res\n src_sub_smi = canonicalize_smiles(Chem.MolToSmiles(src_sub))\n src_frag_smi = canonicalize_smiles(Chem.MolToSmiles(src_frag))\n input_smiles.append(smi_tokenizer(src_sub_smi) +\n ' | ' + smi_tokenizer(src_frag_smi))\n subs_for_merge.append(src_sub)\n for _ in range(data_aug):\n src_sub_smi = get_random_smiles(src_sub_smi)\n src_frag_smi = get_random_smiles(\n canonicalize_smiles(Chem.MolToSmiles(src_frag)))\n input_smiles.append(smi_tokenizer(src_sub_smi) +\n ' | ' + smi_tokenizer(src_frag_smi))\n subs_for_merge.append(src_sub)\n return input_smiles, subs_for_merge", "def abundance_greedy_clustering(amplicon_file, minseqlen, mincount, chunk_size,\n kmer_size):\n data = chimera_removal(amplicon_file, minseqlen, mincount, chunk_size, kmer_size)\n otu = []\n\n for sequence, count in data:\n otu.append((sequence, count))\n\n return otu", "def _extract_seq(self, genome, regions):\n logger.debug(\"Extracting sequences on the forward strand\")\n for region in regions:\n if self.window_size <= 0:\n seq_start = region.start\n seq_end = region.end\n else:\n seq_start = max(region.summit - self.extend, 0)\n seq_end = min(region.summit + self.extend,\n genome.chrom_sizes[region.chrom])\n self.seq_starts.append(seq_start)\n self.seq_ends.append(seq_end)\n self.sequences.append(\n genome.fetch_sequence(region.chrom, seq_start, seq_end))\n if self.strand in ['both', '-']:\n self._rc_seq()", "def CatFasta2(inFile,beginSeqIndex,endSeqIndex,fpout):#{{{\n cntSeq=0\n fpin = open(inFile, \"r\")\n buff = fpin.read(BLOCK_SIZE)\n brokenseq=\"\"; ##for the seq broken by BLOCK\n while buff:\n if cntSeq > endSeqIndex:\n break\n beg=0\n end=0\n while 1:\n if brokenseq:\n end=buff.find(\"\\n>\")\n if end >= 0:\n seq=brokenseq+buff[0:end]\n brokenseq=\"\"\n beg=end\n if cntSeq > beginSeqIndex and cntSeq <= endSeqIndex:\n fpout.write(\"%s\\n\"%seq)\n else:\n brokenseq += buff\n break\n\n beg=buff.find(\">\",beg)\n end=buff.find(\"\\n>\",beg+1)\n if beg >= 0:\n cntSeq+=1\n if end >=0:\n seq=buff[beg:end]\n beg=end\n if cntSeq > beginSeqIndex and cntSeq <= endSeqIndex:\n fpout.write(\"%s\\n\"%seq)\n else:\n brokenseq=buff[beg:]\n break\n else:\n brokenseq+=buff\n break\n buff = fpin.read(BLOCK_SIZE)\n if brokenseq:\n if cntSeq > beginSeqIndex and cntSeq <= endSeqIndex:\n fpout.write(\"%s\\n\"%brokenseq)\n\n fpin.close()\n return 0", "def __call__(self):\n\n self.ndx+=self.delta\n if not 0<=self.ndx<len(self.seq):\n if self.ndx>len(self.seq):\n self.ndx=len(self.seq) # In case this sequence has shrunk.\n if self.yoyo:\n self.delta*=-1\n self.ndx+=self.delta*2\n else:\n self.ndx=0\n return self.seq[self.ndx]", "def ingest_all(self):\n\t\tfor place in self.district_codes():\n\t\t\tself.sequence_ingest(place)\n\t\tif self.edition:\n\t\t\tconfigs.userconfig.update('PHE','latest_cases',self.edition)", "def parse_proteome(fasta_file,kmer_size=12,out_base=\"kmers\",seq_per_file=50000,num_to_write=1000000):\n\n all_kmers = {}\n seq_name = None\n current_sequence = []\n\n # Parse fasta file, splitting into kmers as we go\n with open(fasta_file) as infile:\n for l in infile:\n\n if l.startswith(\">\"):\n if seq_name is not None:\n\n sequence = \"\".join(current_sequence)\n kmer_list = create_kmers(sequence,kmer_size)\n\n for k in kmer_list:\n try:\n all_kmers[k].append(seq_name)\n except KeyError:\n all_kmers[k] = [seq_name]\n\n current_sequence = []\n seq_name = l[1:].strip()\n else:\n if seq_name is None or l.strip() == \"\":\n continue\n current_sequence.append(l.strip())\n\n if seq_name is not None:\n\n sequence = \"\".join(current_sequence)\n kmer_list = create_kmers(sequence,kmer_size)\n\n for k in kmer_list:\n try:\n all_kmers[k].append(seq_name)\n except KeyError:\n all_kmers[k] = [seq_name]\n\n # Sort kmers\n to_sort = [(len(all_kmers[k]),k) for k in all_kmers.keys()]\n to_sort.sort(reverse=True)\n\n # kmers \n kmers = [k[1] for k in to_sort]\n\n if len(kmers) > num_to_write:\n kmers = kmers[:num_to_write]\n else:\n\n # If there are more single kmers than the total we want to get, grab a\n # random selection of them.\n single_kmers = [k[1] for k in to_sort if k[0] == 1]\n if num_to_write - len(kmers) > 0:\n to_grab = num_to_write - len(kmers)\n random.shuffle(single_kmers)\n kmers.extend(single_kmers[:to_grab])\n\n out = []\n counter = 0\n for k in kmers:\n\n # make sure kmer has only amino acids in it\n score = sum([1 for l in k if l not in \"ACDEFGHIKLMNPQRSTVWY\"])\n if score > 0:\n continue\n\n ids = \",\".join(all_kmers[k])\n out.append(\"{} {:5d} {}\\n\".format(k,len(all_kmers[k]),ids))\n\n if counter != 0 and counter % seq_per_file == 0:\n\n out_file = \"{}_{}.kmers\".format(out_base,counter)\n print(counter,len(kmers))\n sys.stdout.flush()\n\n f = open(out_file,'w')\n f.write(\"\".join(out))\n f.close()\n\n out = []\n\n counter += 1\n\n\n out_file = \"{}_{}.kmers\".format(out_base,counter)\n\n f = open(out_file,'w')\n f.write(\"\".join(out))\n f.close()", "def sequence_from_thresh(match):\n if len(match) == 0:\n print(\"Couldn't find any audio in input clip\")\n exit(0)\n\n sequences = []\n cur_seq = [match[0]]\n cur_id = 1\n\n while cur_id < len(match):\n if match[cur_id] == match[cur_id - 1] + 1:\n cur_seq.append(match[cur_id])\n if cur_id == len(match) - 1:\n sequences.append(cur_seq)\n break\n else:\n sequences.append(cur_seq)\n cur_seq = [match[cur_id]]\n\n cur_id += 1\n if len(sequences) == 0:\n return [(match[0], match[0])]\n\n sequences = [(x[0], x[-1]) for x in sequences]\n\n return sequences", "def assemble(self):\n\n # Calculate overlaps between each pair of reads.\n\n for r1, r2 in combinations(self.reads, 2):\n self.calculate_overlap(r1, r2)\n\n # If there are equal reads, they overlap too\n\n for read in self.reads:\n if self.reads[read].visit_limit > 1:\n self.reads[read].overlaps[read] = 0\n\n # Find the read to start the DFS algorithm,\n # The good candidate is a read that can't be glued\n # to any other read on the right side.\n\n start_candidates = self.reads.copy()\n\n for read in self.reads:\n r = self.reads[read]\n for other_read in r.overlaps:\n if other_read in start_candidates:\n del start_candidates[other_read]\n\n if len(start_candidates):\n for read in start_candidates:\n if len(self.reads[read].overlaps):\n self.find_path(1, read)\n break\n else:\n\n # If there no good candidates where to start\n # the DFS algorithm, try each node.\n\n for read in self.reads:\n if len(self.reads[read].overlaps):\n self.find_path(1, read)\n if len(self.path) == self.num_reads:\n break\n\n # Assemble the original sequence:\n # start from the first node in the path,\n # glue subsequent reads, according to how\n # much they are supposed to protrude.\n\n self.sequence = self.path[0]\n\n if len(self.path) > 1:\n for i in range(len(self.path)-1):\n r = self.reads[self.path[i]]\n overlap = r.overlaps[self.path[i+1]]\n if overlap > 0:\n self.sequence += self.path[i+1][-overlap:]\n elif overlap < 0:\n self.sequence = self.sequence[:overlap]", "def _query_sequence_sources(self):\n if self.uniprot_id:\n self._query_uniprot()\n elif self.ncbi_id:\n self._query_ncbi()\n if \"mutations\" in self.metadata.keys():\n mutations = self.metadata[\"mutations\"].split()\n del self.metadata[\"mutations\"] # remove mutations, will be added subsequently\n for mutation in mutations:\n import re\n\n if mutation.startswith(\"ins\"): # insertion\n logger.debug(f\"Performing insertion {mutation} ...\")\n match = re.search(\"ins(?P<position>[0-9]+)(?P<insertion>[A-Z]+)\", mutation)\n self.insert(int(match.group(\"position\")), match.group(\"insertion\"))\n elif mutation.startswith(\"del\"): # deletion\n logger.debug(f\"Performing deletion {mutation} ...\")\n match = re.search(\n \"del(?P<first>[0-9]+)-(?P<last>[0-9]+)(?P<insertion>[A-Z]*)\",\n mutation,\n )\n self.delete(\n int(match.group(\"first\")),\n int(match.group(\"last\")),\n match.group(\"insertion\"),\n )\n else: # substitution\n logger.debug(f\"Performing substitution {mutation} ...\")\n self.substitute(mutation)\n if \"construct_range\" in self.metadata.keys():\n logger.debug(f\"Cropping sequence to construct {self.metadata['construct_range']} ...\")\n first, last = [int(x) for x in self.metadata[\"construct_range\"].split(\"-\")]\n self._sequence = self._sequence[first - 1 : last] # 1-indexed", "def group(seq):\n pass # replace with your solution", "def removeDuplicates(seq):\n\n pass", "def segment(data):", "def predict(self, seq, chunk_size = int(10e6)):\n\n seq_len = len(seq)\n num_chunks = int(numpy.ceil(float(seq_len) / float(chunk_size)))\n assert(num_chunks > 0)\n\n\tsys.stderr.write(\"number of chunks for contig: %i\\n\" % (num_chunks))\n\n start = 0\n stop = min(chunk_size, seq_len)\n\n out = []\n\n # iterate over chunks\n for chunk_idx in range(num_chunks):\n\n sys.stderr.write(\"processing chunk #%i\\n\" % (chunk_idx))\n\n assert (start < stop)\n chunk = seq[start:stop]\n\n assert(len(self.sensors) > 0)\n tf = CombinedFeatures()\n for i in xrange(len(self.sensors)):\n f = self.sensors[i].get_test_features(chunk, self.window)\n tf.append_feature_obj(f)\n\n sys.stderr.write(\"initialising kernel...\")\n self.kernel.init(self.svs, tf)\n sys.stderr.write(\"..done\\n\")\n\n self.svm.set_kernel(self.kernel)\n lab_out = self.svm.apply().get_values()\n\n assert(len(lab_out) > 0)\n out.extend(lab_out)\n\n # increment chunk\n start = stop\n stop = min(stop+chunk_size, seq_len)\n\n\n l = (-self.window[0]) * [-42]\n r = self.window[1] * [-42]\n\n # concatenate\n ret = l + out + r\n\n assert(len(ret) == len(seq))\n\n return ret", "def get_100_seq(sequence,seq_size,num_seqs_p_record):\n\n for i in range(num_seqs_p_record):\n ini = i * seq_size\n fin = (i + 1) * seq_size\n sub_seq = sequence[ini:fin]\n sub_seq.id = sub_seq.id + \"_\" + str(i) #Cambia el id del nuevo read\n if if_N_seq(sub_seq): #Mira si es una secuencia con muchas 'N'\n continue\n else:\n fragmented_genome.append(sub_seq)", "def train():\n\tA = collections.defaultdict(dict)\n\tB = collections.defaultdict(dict)\n\tpos_list = []\n\tword_list = []\n\tstr_buf = []\n\n\n\t# read each line and count A and B\n\tfor line in sys.stdin:\n\t\tline = line.split()\n\t\t# print(line)\n\t\tif len(line) == 3:\n\t\t\tstr_buf.append((str(line[0]), str(line[1])))\n\t\t\tword_list.append(str(line[0]))\n\t\t\tpos_list.append(str(line[1]))\n\n\t\telse:\n\t\t\t# if come to the end of a sentence\n\t\t\tif len(str_buf) != 0:\n\t\t\t\tstr_buf = [('<s>','BOS')] + str_buf + [('</s>', 'EOS')]\n\t\t\t\tword_list += ['<s>', '</s>']\n\t\t\t\tpos_list += ['BOS', 'EOS']\n\n\t\t\t\tfor i, s in enumerate(str_buf):\n\t\t\t\t\tif s[0] in B[s[1]]:\n\t\t\t\t\t\tB[s[1]][s[0]] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tB[s[1]][s[0]] = 1\n\n\t\t\t\t\tif s[0] != '</s>':\n\t\t\t\t\t\t# print('strbuf[i]:',str_buf[i], 's[1]', s[1])\n\t\t\t\t\t\tif str_buf[i+1][1] in A[s[1]]:\n\t\t\t\t\t\t\tA[s[1]][str_buf[i+1][1]] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tA[s[1]][str_buf[i+1][1]] = 1\n\n\t\t\t\tstr_buf = []\n\n\t# get unique POS list and word list\n\tpos_list_uniq = list(set(pos_list))\n\tword_list_uniq = list(set(word_list))\n\n\n\t# assume <UNK>, smoothing, normalize\n\tB_sm = collections.defaultdict(dict)\n\tA_sm = A.copy()\n\n\t# assume words apeear less than 2 times as <UNK>\n\tword_count = collections.Counter(word_list)\n\tfor pos in B:\n\t\tfor word in B[pos]:\n\t\t\tif word_count[word] > 1:\n\t\t\t\tB_sm[pos][word] = B[pos][word]\n\n\t\t\telse: # add <UNK> to B_sm\n\t\t\t\tword_list_uniq.remove(word)\n\t\t\t\tif '<UNK>' in B_sm[pos]:\n\t\t\t\t\tB_sm[pos]['<UNK>'] += 1\n\t\t\t\telse:\n\t\t\t\t\tB_sm[pos]['<UNK>'] = 1\n\n\tword_list_uniq += ['<UNK>']\n\n\t# add 1 smoothing\n\tfor pos in pos_list_uniq:\n\t\tfor word in word_list_uniq:\n\t\t\tif word in B_sm[pos]:\n\t\t\t\tB_sm[pos][word] += 1\n\t\t\telse:\n\t\t\t\tB_sm[pos][word] = 1\n\n\tfor prev in pos_list_uniq:\n\t\tfor next in pos_list_uniq:\n\t\t\tif next in A_sm[prev]:\n\t\t\t\tA_sm[prev][next] += 1\n\t\t\telse:\n\t\t\t\tA_sm[prev][next] = 1\n\n\t# delete instances like A[VB][BOS], A[EOS][VB],\n\t# B[VB]['</s>'], B[EOS]['Jack']\n\n\tfor pos in B_sm:\n\t\tfor word in B_sm[pos]:\n\t\t\tif (pos == 'BOS' and word != '<s>') or \\\n\t\t\t(pos == 'EOS' and word != '</s>') or \\\n\t\t\t(word == '<s>' and pos != 'BOS') or \\\n\t\t\t(word == '</s>' and pos != 'EOS'):\n\t\t\t\tB_sm[pos][word] = 0\n\n\tfor prev in A_sm:\n\t\tfor next in A_sm[prev]:\n\t\t\tif prev == 'EOS' or next == 'BOS':\n\t\t\t\tA_sm[prev][next] = 0\n\n\t# normalize\n\tfor pos in B_sm:\n\t\ts = sum(B_sm[pos].values())\n\t\tfor word in B_sm[pos]:\n\t\t\tif B_sm[pos][word] != 0:\n\t\t\t\tB_sm[pos][word] /= s\n\n\tfor prev in A_sm:\n\t\ts = sum(A_sm[prev].values())\n\t\tfor next in A_sm[prev]:\n\t\t\tif A_sm[prev][next] != 0:\n\t\t\t\tA_sm[prev][next] /= s\n\n\treturn A_sm, B_sm, word_list_uniq", "def reconstruct_input(self, ix):", "def _fragment_seq(self, seq):\r\n num_fragments = self.Params['num_fragments']\r\n results = []\r\n start = 0\r\n for i in range(num_fragments):\r\n # My notes:\r\n # len(seq[i::n]) gives the number of even multiples of\r\n # num_fragments exist between i (inclusive) and the end of the seq.\r\n stop = start + len(seq[i::num_fragments])\r\n results.append(seq[start:stop])\r\n start = stop\r\n return results", "def reduce_archive(self):\n distance_dict = dict()\n clusters = []\n for i, pop1 in enumerate(self.archive):\n clusters.append([pop1])\n for j in range(i + 1, len(self.archive)):\n pop2 = self.archive[j]\n distance_dict[(pop1, pop2)] = np.linalg.norm(pop1.objective_values - pop2.objective_values, 2)\n while len(clusters) > self.archive_size:\n min_distance = np.inf\n min_clusters = (np.nan, np.nan)\n for i, cluster1 in enumerate(clusters):\n for j in range(i + 1, len(clusters)):\n cluster2 = clusters[j]\n distance = SPEA.cluster_distance(cluster1, cluster2, distance_dict)\n if distance < min_distance:\n min_distance = distance\n min_clusters = (i, j)\n cluster1 = clusters.pop(max(min_clusters))\n cluster2 = clusters.pop(min(min_clusters))\n clusters.append(cluster1 + cluster2)\n self.archive = []\n for cluster in clusters:\n self.archive.append(self.cluster_centroid(cluster, distance_dict))" ]
[ "0.6136106", "0.5331914", "0.53135365", "0.5175939", "0.50551826", "0.5026542", "0.49525687", "0.4925182", "0.48475435", "0.48164946", "0.47848186", "0.47665244", "0.4746681", "0.4724975", "0.4722248", "0.4703724", "0.46878073", "0.46619025", "0.4656287", "0.4650094", "0.46435857", "0.46430874", "0.46313924", "0.4626086", "0.46228155", "0.46147045", "0.46117342", "0.458181", "0.45703918", "0.4562413" ]
0.5506488
1
ingest from a particular areacode
def sequence_ingest(self,areacode): data=self.data[self.data['Area code']==areacode] areaname=data['Area name'].unique().item() log.debug(f'Ingesting cases from {areacode}: {areaname}') counter=0 for day in data['Specimen date']: date=fetchdate(day) row,created=DailyCases.objects.get_or_create(specimenDate=date,areacode=areacode) this_day=data[data['Specimen date']==day] row.areaname=areaname #add head(1) (faster than unique() ) to deal with some areas returned twice as part of both UTLA AND LTLA sequences row.dailyLabConfirmedCases=this_day['Daily lab-confirmed cases'].head(1).item() row.totalLabConfirmedCases=this_day['Cumulative lab-confirmed cases'].head(1).item() row.save() counter+=1 log.debug(f'Processed: {counter} rows')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ingest_all(self):\n\t\tfor place in self.district_codes():\n\t\t\tself.sequence_ingest(place)\n\t\tif self.edition:\n\t\t\tconfigs.userconfig.update('PHE','latest_cases',self.edition)", "def temp_(code):\n\n # Does the partial match criteria include at least one OLC code?\n if centroid or northwest or southeast:\n # Is the specified UBID code valid?\n if buildingid.v2.isValid(code):\n # Separate the UBID code into three OLC codes.\n openlocationcodes = code.split(buildingid.v2.SEPARATOR_)\n\n # Extract the OLC codes.\n centroid_openlocationcode = openlocationcodes[buildingid.v2.INDEX_CENTROID_]\n northwest_openlocationcode = openlocationcodes[buildingid.v2.INDEX_NORTHWEST_]\n southeast_openlocationcode = openlocationcodes[buildingid.v2.INDEX_SOUTHEAST_]\n\n # Initialize new list of OLC codes.\n new_openlocationcodes = []\n\n if centroid:\n if drop_suffix_centroid > 0:\n # If the \"--centroid\" flag is set and the \"--drop-suffix-centroid\"\n # option is non-zero, then drop the required number of\n # characters, and append the new OLC code to the list.\n new_openlocationcodes.append(centroid_openlocationcode[:(-1 * drop_suffix_centroid)])\n else:\n # Otherwise, append the unmodified OLC code to the list.\n new_openlocationcodes.append(centroid_openlocationcode)\n\n if northwest:\n if drop_suffix_northwest > 0:\n # If the \"--northwest\" flag is set and the \"--drop-suffix-northwest\"\n # option is non-zero, then drop the required number of\n # characters, and append the new OLC code to the list.\n new_openlocationcodes.append(northwest_openlocationcode[:(-1 * drop_suffix_northwest)])\n else:\n # Otherwise, append the unmodified OLC code to the list.\n new_openlocationcodes.append(northwest_openlocationcode)\n\n if southeast:\n if drop_suffix_southeast > 0:\n # If the \"--southeast\" flag is set and the \"--drop-suffix-southeast\"\n # option is non-zero, then drop the required number of\n # characters, and append the new OLC code to the list.\n new_openlocationcodes.append(southeast_openlocationcode[:(-1 * drop_suffix_southeast)])\n else:\n # Otherwise, append the unmodified OLC code to the list.\n new_openlocationcodes.append(southeast_openlocationcode)\n\n if len(new_openlocationcodes) > 0:\n # If the new list of OLC codes is non-empty, then join\n # the OLC codes, and then return the result.\n return buildingid.v2.SEPARATOR_.join(new_openlocationcodes)\n else:\n # No result.\n return None\n else:\n # No result.\n return None\n else:\n # No result.\n return None", "def define_areas(\n pixel_filtered_map: np.ndarray, district_heating_zone_threshold: float\n):\n structure = np.ones((3, 3)).astype(int)\n expanded_map = binary_dilation(input=pixel_filtered_map, structure=structure)\n eroded_map = binary_erosion(input=expanded_map, structure=structure)\n labels_array, n_label = measurements.label(\n input=eroded_map,\n structure=structure,\n )\n\n # labels start from 1, therefore the array size is 'num_labels_array + 1'\n areas_potential = np.zeros((n_label + 1)).astype(float)\n if n_label > 0:\n end, start, sorted_array = get_browsing_indexes(\n labels_array=labels_array,\n pixel_filtered_map=pixel_filtered_map,\n n_label=n_label,\n )\n\n for i, (start_index, end_index) in enumerate(zip(start, end)):\n area = sorted_array[start_index:end_index, 3]\n area_potential = np.sum(area)\n if area_potential >= district_heating_zone_threshold:\n # i+1 because labeling starts from 1 and not from 0\n # factor 0.001 for conversion from MWh/ha to GWh/ha\n areas_potential[i + 1] = np.around(np.sum(area_potential) / 1000, 2)\n\n areas = areas_potential[labels_array]\n filtered_map = pixel_filtered_map * (areas > 0).astype(int)\n total_potential = np.sum(areas_potential)\n return areas, filtered_map, total_potential, areas_potential[1:]", "def temp_(code):\n\n # Does the partial match criteria include at least one criterion?\n if centroid or north or east or south or west:\n match = buildingid.v3.RE_PATTERN_.match(code)\n\n # Is the specified UBID code valid?\n if match is None:\n return None\n else:\n # Initialize new list of OLC codes.\n new_openlocationcodes = []\n\n if centroid:\n centroid_openlocationcode = match.group(buildingid.v3.RE_GROUP_OPENLOCATIONCODE_)\n\n if drop_suffix_centroid > 0:\n # If the \"--centroid\" flag is set and the \"--drop-suffix-centroid\"\n # option is non-zero, then drop the required number of\n # characters, and append the new OLC code to the list.\n new_openlocationcodes.append(centroid_openlocationcode[:(-1 * drop_suffix_centroid)])\n else:\n # Otherwise, append the unmodified OLC code to the list.\n new_openlocationcodes.append(centroid_openlocationcode)\n\n if north:\n # If the \"--north\" flag is set, then append the Chebyshev\n # distance to the northern extent to the list.\n new_openlocationcodes.append(match.group(buildingid.v3.RE_GROUP_NORTH_))\n\n if east:\n # If the \"--east\" flag is set, then append the Chebyshev\n # distance to the eastern extent to the list.\n new_openlocationcodes.append(match.group(buildingid.v3.RE_GROUP_EAST_))\n\n if south:\n # If the \"--south\" flag is set, then append the Chebyshev\n # distance to the southern extent to the list.\n new_openlocationcodes.append(match.group(buildingid.v3.RE_GROUP_SOUTH_))\n\n if west:\n # If the \"--west\" flag is set, then append the Chebyshev\n # distance to the western extent to the list.\n new_openlocationcodes.append(match.group(buildingid.v3.RE_GROUP_WEST_))\n\n if len(new_openlocationcodes) > 0:\n # If the new list of OLC codes is non-empty, then join\n # the OLC codes, and then return the result.\n return buildingid.v3.SEPARATOR_.join(new_openlocationcodes)\n else:\n # No result.\n return None\n else:\n # No result.\n return None", "def main(filepath, maskpath):\n analytics.result = {}\n img_mask = nib.load(maskpath).get_fdata()\n print(\"loading\\n\", flush=True)\n # segmentation\n print(\"loading segmentation...\\n\", flush=True)\n seg = nib.load(filepath).get_fdata()\n # post processing\n print(\"applying some post processing...\\n\", flush=True)\n seg = apply_mask(seg, img_mask)\n seg_2d = binarize(seg, img_mask)\n print(\"End of slice processing\\n\", flush=True) \n distance_map, skel = analytics.distance(seg_2d)\n print(\"distance\\n\", flush=True)\n dist_per_label , skel= analytics.label_value(distance_map, skel)\n print(\"label_value\\n\", flush=True) \n analytics.get_analytics(seg, img_mask, dist_per_label, skel, verbose=True)\n print(\"got analytics\\n\", flush=True)", "def add_building_output_locations2(self,areasList,start,end,step): \n print \"Getting buildings locations...\"\n \n dictionaries = []\n dictionary = {}\n \n for a in areasList:\n \n dictionaries.append(self.grid.get_building_output_locations(a[0],a[1]))\n \n for dict in dictionaries:\n for row in dict.iteritems(): \n dictionary[row[0]] = row[1] \n\n print \"Number of buildings = %s\" % (len(dictionary))\n\n if (dictionary != {}):\n self.run_nc.add_building_output_locations(dictionary, start, end,step)", "def runingest(sdms):\n\n NotImplementedError", "def identify_peaks_amld_aeris(xCar, xDate, xDir, xFilename, outDir, processedFileLoc, Engineering, threshold='.1',\n rthresh = '.7',\n xTimeThreshold='5.0', minElevated='2', xB='102', basePerc='50',aeris=True):\n import csv, numpy\n import shutil\n from shapely.geometry import Point\n import pandas as pd\n import geopandas as gpd\n\n try:\n #amld = True\n baseCalc = float(basePerc)\n xABThreshold = float(threshold)\n minElevated = float(minElevated)\n rMin = float(rthresh)\n xDistThreshold = 160.0 # find the maximum CH4 reading of observations within street segments of this grouping distance in meters\n xSDF = 4 # multiplier times standard deviation for floating baseline added to mean\n\n xB = int(xB)\n xTimeThreshold = float(xTimeThreshold)\n fn = xDir + xFilename # set processed csv file to read in\n fnOut = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".csv\"\n fnShape = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".shp\"\n fnLog = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".log\"\n pkLog = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\",\"\") + \"_info.csv\"\n jsonOut = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\",\"\") + \".geojson\"\n infOut = processedFileLoc + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_info.csv\"\n\n ### TEST THING\n fn = xDir + xFilename # set raw text file to read in\n filenames = nameFiles(outDir,processedFileLoc,xCar,xDate,True)\n fnOut = filenames['fnOut']\n fnShape = filenames['fnShape']\n fnLog = filenames['fnLog']\n pkLog = filenames['pkLog']\n jsonOut = filenames['jsonOut']\n infOut = filenames['infOut']\n\n print(f\"{outDir}Peaks_{xCar}_{xDate}_info.csv\")\n fLog = open(fnLog, 'w')\n shutil.copy(infOut, pkLog)\n\n # convert lists to numpy arrays\n tempFile = pd.read_csv(fn)\n tempFile['ttot'] = tempFile['ttot'].astype(float)\n tempFile['ttot'] = tempFile['ttot'].astype(str)\n\n #tempFile.sort_values(by='ttot', ascending=True).reset_index(drop=True).to_csv(\n # '/Users/emilywilliams/Desktop/arg.csv')\n\n colnames = tempFile.columns\n\n aEpochTime = numpy.array(tempFile.iloc[:,colnames.get_loc('nearest10hz')])\n aDateTime = numpy.array(tempFile.apply(lambda x: x.DATE.replace('-','') + x.TIME.replace(':',''),axis=1))\n aLat = numpy.array(tempFile.iloc[:,colnames.get_loc('LAT')])\n aLon = numpy.array(tempFile.iloc[:,colnames.get_loc('LONG')])\n aCH4 = numpy.array(tempFile.iloc[:,colnames.get_loc('CH4')])\n aTCH4 = numpy.array(tempFile.iloc[:,colnames.get_loc('CH4')])\n aMean = numpy.zeros(len(aEpochTime))\n aCH4Mean_true = numpy.zeros(len(aEpochTime))\n aCH4STD= numpy.zeros(len(aEpochTime))\n aCH4Max= numpy.zeros(len(aEpochTime))\n aCH4Min= numpy.zeros(len(aEpochTime))\n aCH4Median= numpy.zeros(len(aEpochTime))\n\n aMeanC2H6 = numpy.zeros(len(aEpochTime))\n aThreshold = numpy.zeros(len(aEpochTime))\n aOdom = numpy.array(tempFile.apply(lambda x: x.VELOCITY*.1,axis=1).cumsum())\n aC2H6 = numpy.array(tempFile.iloc[:,colnames.get_loc('C2H6')])\n aC2C1 = numpy.array(tempFile.iloc[:,colnames.get_loc('C1C2')])\n aR = numpy.array(tempFile.iloc[:,colnames.get_loc('R')])\n if aeris:\n aBearingCCWE = numpy.array(tempFile.iloc[:, colnames.get_loc('bearing')])\n aBearingCWN = numpy.array(tempFile.iloc[:, colnames.get_loc('bearing')])\n aWS_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('r_avg')])\n aWD_CCWE_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('theta_avg')])\n aWD_CWN_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('theta_avg')])\n\n if not aeris:\n aBearingCCWE = numpy.array(tempFile.iloc[:,colnames.get_loc('Bearing_ccwe')])\n aBearingCWN = numpy.array(tempFile.iloc[:,colnames.get_loc('Bearing_cwn')])\n aWS_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('airmar_ws')])\n aWD_CCWE_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('airmar_wd_cor_ccwe')])\n aWD_CWN_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('airmar_wd_cor_cwn')])\n\n arolling8= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingR_8')])\n arolling15= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingR_15')])\n arolling30= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingR_30')])\n arolling45= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingR_45')])\n arolling60= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingR_60')])\n\n arollingc2h6_15= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingc2h6_15')])\n arollingc2h6_30= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingc2h6_30')])\n arollingc2h6_45= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingc2h6_45')])\n\n arollingch4_60= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingch4_60')])\n arollingch4_45= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingch4_45')])\n arollingch4_30= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingch4_30')])\n arollingch4_15= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingch4_15')])\n\n\n\n xLatMean = numpy.mean(aLat)\n xLonMean = numpy.mean(aLon)\n #xCH4Mean = numpy.mean(aCH4)\n #xC2H6Mean = numpy.mean(aC2H6)\n #xC2C1Mean = numpy.mean(aC2C1)\n\n fLog.write(\"Day CH4_mean = \" + str(numpy.mean(aCH4)) +\n \", Day CH4 SD = \" + str(numpy.std(aCH4)) + \"\\n\")\n fLog.write(\"Day C2H6 Mean = \" + str(numpy.mean(aC2H6)) +\n \", Day C2H6 SD = \" + str(numpy.std(aC2H6)) + \"\\n\")\n fLog.write(\"Center lon/lat = \" + str(xLonMean) + \", \" + str(xLatMean) + \"\\n\")\n\n lstCH4_AB = []\n count = tempFile.shape[0]\n # generate list of the index for observations that were above the threshold\n for i in range(0, count - 2):\n if ((count - 2) > xB):\n topBound = min((i + xB), (count - 2))\n botBound = max((i - xB), 0)\n\n for t in range(min((i + xB), (count - 2)), i, -1):\n if aEpochTime[t] < (aEpochTime[i] + (xB / 2)):\n topBound = t\n break\n for b in range(max((i - xB), 0), i):\n if aEpochTime[b] > (aEpochTime[i] - (xB / 2)):\n botBound = b\n break\n\n xCH4Mean = numpy.percentile(aCH4[botBound:topBound], baseCalc)\n xC2H6Mean = numpy.percentile(aC2H6[botBound:topBound], baseCalc)\n xCH4Mean_true = numpy.mean(aCH4[botBound:topBound])\n xCH4STD = numpy.std(aCH4[botBound:topBound])\n xCH4Min = numpy.min(aCH4[botBound:topBound])\n xCH4Max = numpy.max(aCH4[botBound:topBound])\n xCH4Median = numpy.percentile(aCH4[botBound:topBound],50)\n\n\n # xCH4SD = numpy.std(aCH4[botBound:topBound])\n else:\n xCH4Mean = numpy.percentile(aCH4[0:(count - 2)], baseCalc)\n xC2H6Mean = numpy.percentile(aC2H6[0:(count - 2)], baseCalc)\n xCH4Mean_true = numpy.mean(aCH4[0:(count - 2)])\n xCH4STD = numpy.std(aCH4[0:(count - 2)])\n xCH4Min = numpy.min(aCH4[0:(count - 2)])\n xCH4Max = numpy.max(aCH4[0:(count - 2)])\n xCH4Median = numpy.percentile(aCH4[0:(count - 2)],50)\n\n\n # xCH4SD = numpy.std(aCH4[0:(count-2)])\n xThreshold = xCH4Mean + (xCH4Mean * xABThreshold)\n xThreshold_c2h6 = xC2H6Mean + (xC2H6Mean * xABThreshold)\n\n if (aCH4[i] > xThreshold and aR[i]>rMin):\n #if (aCH4[i] > xThreshold):\n lstCH4_AB.append(i)\n aMean[i] = xCH4Mean\n aMeanC2H6[i] = xC2H6Mean\n aThreshold[i] = xThreshold\n aCH4STD[i] = xCH4STD\n aCH4Max[i] = xCH4Max\n aCH4Min[i] = xCH4Min\n aCH4Mean_true[i] = xCH4Mean_true\n aCH4Median[i] = xCH4Median\n\n # now group the above baseline threshold observations into groups based on distance threshold\n lstCH4_ABP = []; xDistPeak = 0.0; xCH4Peak = 0.0;\n xTime = 0.0; cntPeak = 0; cnt = 0; sID = \"\"; sPeriod5Min = \"\"; prevIndex = 0;\n\n for i in lstCH4_AB:\n if (cnt == 0):\n xLon1 = aLon[i]\n xLat1 = aLat[i]\n xOdom = aOdom[i]\n else:\n # calculate distance between points\n xDist = haversine(xLat1, xLon1, aLat[i], aLon[i])\n xDistPeak += xDist\n xCH4Peak += (xDist * (aCH4[i] - aMean[i]))\n xLon1 = aLon[i]\n xLat1 = aLat[i]\n xOdom = aOdom[i]\n if (sID == \"\"):\n xTime = aEpochTime[i]\n sID = str(xCar) + \"_\" + str(xTime)\n sPeriod5Min = str(int((aEpochTime[i] - 1350000000) / (30 * 1))) # 30 sec\n if ((aEpochTime[i] - aEpochTime[prevIndex]) > xTimeThreshold): # initial start of a observed peak\n cntPeak += 1\n xTime = aEpochTime[i]\n xDistPeak = 0.0\n xCH4Peak = 0.0\n sID = str(xCar) + \"_\" + str(xTime)\n sPeriod5Min = str(int((aEpochTime[i] - 1350000000) / (30 * 1))) # 30 sec\n # print str(i) +\", \" + str(xDist) + \",\" + str(cntPeak) +\",\" + str(xDistPeak)\n #lstCH4_ABP.append(\n # [sID, xTime, aEpochTime[i], aDateTime[i], aCH4[i], aLon[i], aLat[i], aMean[i], aThreshold[i],\n # xDistPeak, xCH4Peak, aTCH4[i],aC2H6[i],aC2C1[i],aR[i],aMeanC2H6[i], sPeriod5Min, xOdom,\n # aUavg[i],aVavg[i],aWavg[i],aRavg[i],aThavg[i]])\n lstCH4_ABP.append(\n [sID, xTime, aEpochTime[i], aDateTime[i], aCH4[i], aLon[i], aLat[i], aMean[i],aCH4Mean_true[i],aCH4STD[i],\n aCH4Max[i],aCH4Min[i],aCH4Median[i], aThreshold[i],\n xDistPeak, xCH4Peak, aTCH4[i],aC2H6[i],aC2C1[i],aR[i],aMeanC2H6[i], sPeriod5Min, xOdom,\n aWD_CCWE_cor[i],aWD_CWN_cor[i],aWS_cor[i],aBearingCCWE[i],aBearingCWN[i],arolling8[i],\n arolling15[i],arolling30[i],arolling60[i],arollingc2h6_15[i],arollingc2h6_30[i],arollingc2h6_45[i],\n arollingch4_15[i],arollingch4_30[i],arollingch4_45[i],arollingch4_60[i]\n ])\n\n cnt += 1\n prevIndex = i\n\n # Finding peak_id larger than 160.0 m\n tmpsidlist = []\n for r in lstCH4_ABP:\n if (float(r[9]) > 160.0) and (r[0] not in tmpsidlist):\n tmpsidlist.append(r[0])\n cntPeak -= len(tmpsidlist)\n\n fLog.write(\"Number of peaks found: \" + str(cntPeak) + \"\\n\")\n print(f\"{xCar} \\t {xDate} \\t {xFilename} \\t {count} \\t {len(lstCH4_ABP)}\")\n\n # write out the observed peaks to a csv to be read into a GIS\n fOut = open(fnOut, 'w')\n # s = \"PEAK_NUM,EPOCHSTART,EPOCH,DATETIME,CH4,LON,LAT,CH4_BASELINE,CH4_THRESHOLD,PEAK_DIST_M,PEAK_CH4,TCH4,PERIOD5MIN\\n\"\n s = \"OP_NUM,OP_EPOCHSTART,OB_EPOCH,OB_DATETIME,OB_CH4,OB_LON,OB_LAT,OB_CH4_BASELINE,OB_CH4_MEAN,OB_CH4_STD,OB_CH4_MAX,OB_CH4_MIN,OB_CH4_MED,\" \\\n \"OB_CH4_THRESHOLD,OP_PEAK_DIST_M,OP_PEAK_CH4,OB_TCH4,OB_C2H6,\" \\\n \"OB_C2C1,OB_R,OB_C2H6_BASELINE,OB_PERIOD5MIN,ODOMETER,OB_WD_CCWE,OB_WD_CWN,OB_WS,\" \\\n \"OB_BEARING_CCWE,OB_BEARING_CWN,OB_R_8,OB_R_15,OB_R_30,OB_R_60,OB_C2H6_15,OB_C2H6_30,OB_C2H6_45,\" \\\n \"OB_CH4_15,OB_CH4_30,OB_CH4_45,OB_CH4_60\\n\"\n\n\n fOut.write(s)\n\n truecount = 0\n for r in lstCH4_ABP:\n if r[0] not in tmpsidlist:\n s = ''\n for rr in r:\n s += str(rr) + ','\n s = s[:-1]\n s += '\\n'\n fOut.write(s)\n truecount += 1\n fOut.close()\n fLog.close()\n\n openFile = pd.read_csv(fnOut)\n if openFile.shape[0] != 0:\n pkDistDf = openFile.copy().groupby('OP_NUM', as_index=False).apply(\n lambda x: max(x.ODOMETER) - min(x.ODOMETER))\n pkDistDf.columns = ['OP_NUM', 'OP_DISTANCE']\n openFile = pd.merge(openFile.copy(), pkDistDf)\n tempCount = openFile.groupby('OP_NUM', as_index=False).OP_EPOCHSTART.count().rename(\n columns={'OP_EPOCHSTART': 'Frequency'})\n tempCount = tempCount.loc[tempCount.Frequency >= minElevated, :]\n if tempCount.shape[0] == 0:\n print(f\"No Observed Peaks with enough Elevated Readings Found in the file: {xFilename}\")\n tempCount.to_csv(fnOut) ## added to deal with issue where it wasn't being filtered out\n elif tempCount.shape[0] != 0:\n oFile = pd.merge(openFile, tempCount, on=['OP_NUM'])\n openFile = oFile.copy()\n del (oFile)\n openFile[\"minElevated\"] = openFile.apply(lambda x: int(minElevated), axis=1)\n openFile['OB_CH4_AB'] = openFile.loc[:, 'OB_CH4'].sub(openFile.loc[:, 'OB_CH4_BASELINE'], axis=0)\n openFile['OB_C2H6_AB'] = openFile.loc[:, 'OB_C2H6'].sub(openFile.loc[:, 'OB_C2H6_BASELINE'],axis=0)\n openFile.to_csv(fnOut, index=False)\n\n\n fileWt = weighted_loc(openFile, 'OB_LAT', 'OB_LON', 'OP_NUM', 'OB_CH4_AB',).loc[:, :].rename(\n columns={'OB_LAT': 'pk_LAT', 'OB_LON': 'pk_LON'}).reset_index(drop=True)\n geometry_temp = [Point(lon, lat) for lon, lat in zip(fileWt['pk_LON'], fileWt['pk_LAT'])]\n crs = 'EPSG:4326'\n # geometry is the point of the lat/lon\n # gdf_buff = gpd.GeoDataFrame(datFram, crs=crs, geometry=geometry_temp)\n\n ## BUFFER AROUND EACH 'OP_NUM' WITH BUFFER DISTANCE\n gdf_buff = gpd.GeoDataFrame(fileWt, crs=crs, geometry=geometry_temp)\n # gdf_buff = makeGPD(datFram,'LON','LAT')\n\n ##maybe this is the issue?\n #gdf_buff = gdf_buff.to_crs(epsg=32610)\n #gdf_buff['geometry'] = gdf_buff.loc[:, 'geometry'].buffer(30)\n try:\n gdf_buff.to_file(jsonOut, driver=\"GeoJSON\")\n #gdf_buff.to_file('testthing.geojson', driver=\"GeoJSON\")\n except:\n print(\"Error Saving JSON File\")\n elif openFile.shape[0] == 0:\n print(f\"No Observed Peaks Found in the file:{xFilename}\")\n except ValueError:\n print(\"Error in Identify Peaks\")\n return False", "def geo_data_analysis(search_term):\n map_pol = dict()\n\n #A list of tweet texts from each region\n NE_text = geo_collect_tweets(search_term,42.781158,-71.398729,'250mi')\n S_text = geo_collect_tweets(search_term,33.000000,-84.000000,'500mi')\n MW_text = geo_collect_tweets(search_term,40.000000,-100.000000,'1000mi')\n W_text = geo_collect_tweets(search_term,35.000000,-120.000000,'250mi')\n \n #A list of sentiment values for the tweets from each region \n NE_sentiment_values = sentiment(NE_text)\n S_sentiment_values = sentiment(S_text)\n MW_sentiment_values = sentiment(MW_text)\n W_sentiment_values = sentiment(W_text)\n\n #find the average sentiment value for each region\n NE_avg = sum(NE_sentiment_values)/len(NE_sentiment_values)\n S_avg = sum(S_sentiment_values)/len(S_sentiment_values)\n MW_avg = sum(MW_sentiment_values)/len(MW_sentiment_values)\n W_avg = sum(W_sentiment_values)/len(W_sentiment_values)\n\n return [W_avg,S_avg,NE_avg,MW_avg]", "def pick_area(data ,total_process, interval ,list_of_vars, list_of_areas, init_time=0, pr_height=None, ):\n \n \n \n #trying if the longitude values change from 0 to 360 or -180 to 180?\n \n if data['lon'].values[0] < 0:\n \n p_d = {'europe' : [0, 48, 30, 65],\n 'northamerica' : [-142,-42,0,60],\n 'australia' : [80,180,-50,10],\n 'gulfofmexico' : [-100,-75,18,31],\n 'carribeans' : [-85,-60,12,38], \n 'indianocean' : [30, 130,-35,35],\n 'NH' : [-180, 180 ,0,90]}\n \n # -180 to 180 change the values given in the dictionary to relevant\n else:\n \n p_d = {'europe' : [0, 48, 30, 65],\n 'northamerica' : [218,318,-10,70],\n 'australia' : [80,180,-50,10],\n 'gulfofmexico' : [260,285,14,37],\n 'carribeans' : [275,300,12,38], \n 'indianocean' : [30, 130,-35,35],\n 'NH' : [0, 360 ,0,90]}\n \n \n \n places_dict = {}\n #looping in the list of areas\n say_pl = 1\n for pl in list_of_areas:\n variables_l = {}\n #looping in the list of variables\n say_var =1\n for var in list_of_vars:\n #check if data contains 'lev' coords.\n try:\n \n #wrap the data\n single = data[var].sel(lon=slice(p_d[pl][0],p_d[pl][1]), \n lat=slice(p_d[pl][2],p_d[pl][3]), \n lev=pr_height).isel(time=slice(init_time, total_process, interval))\n \n #if no 'lev' coords exist.\n except:\n single = data[var].sel(lon=slice(p_d[pl][0],p_d[pl][1]), \n lat=slice(p_d[pl][2],p_d[pl][3]),).isel(time=slice(init_time, total_process, interval))\n \n #append a single variable given by the user\n variables_l[var] = single\n \n \n #append all the variables with respect to their area of interest.\n places_dict[pl] = variables_l\n \n #return\n return places_dict", "def siftRegionsOfInterest(options,mapped_data_per_size_per_register,phase,cycle):\n for chromosome in sorted(mapped_data_per_size_per_register):\n # Make separate files for each chromosome\n output_filename=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest\"\n fhw=open(output_filename,\"w\")\n for register in sorted(mapped_data_per_size_per_register[chromosome]):\n start,end=0,0\n for coordinate in sorted(mapped_data_per_size_per_register[chromosome][register]):\n if start == 0:\n start = coordinate\n elif end == 0:\n if coordinate-start < phase*cycle:\n end = coordinate\n else:\n start = coordinate\n else:\n if coordinate-end < phase*cycle:\n end = coordinate\n else:\n fhw.write(str(register)+\"\\t\"+str(start)+\"\\t\"+str(end+phase-1)+\"\\n\")\n end=0\n start=coordinate\n if end!=0:\n fhw.write(str(register)+\"\\t\"+str(start)+\"\\t\"+str(end+phase-1)+\"\\n\")\n fhw.close()", "def load_shapefile_neighborhood(area):\n if os.path.isfile(\"data/shp/Inzameling_huisvuil_080520.shp\"):\n source = gpd.read_file('data/shp/Inzameling_huisvuil_080520.shp')\n elif os.path.isfile(\"../data/shp/Inzameling_huisvuil_080520.shp\"):\n source = gpd.read_file('../data/shp/Inzameling_huisvuil_080520.shp')\n if area:\n source = source[source['sdcode'].isin(list(area))]\n return list(source.geometry)", "def scan_ap(self, cut_off = 3480, r_ap = 12, r_an = 3):\r\n for trial in range(self.dimension):\r\n max_count, max_rank = self.pick_largest(cut_off = cut_off)\r\n if max_count >= 0:\r\n y,x = self.rank_yx(max_rank)\r\n print(\"Scan pos\", y,x,\" scanning\",trial,\"counts\", max_count)\r\n count_in, count_out = self.fit_galaxy(y,x,r_ap, r_an)\r\n count_sum = []\r\n local_bg = []\r\n for c in range(len(count_in)):\r\n if count_in[c] >= cut_off:\r\n count_sum.append(count_in[c])\r\n for c in range(len(count_out)):\r\n if count_out[c] <= cut_off:\r\n local_bg.append(count_out[c])\r\n no_c = len(count_in)\r\n if len(count_sum) >= int(np.pi * (r_ap **2) / 2): # Make sure it is not noise\r\n count_sum = np.array(count_sum).sum()\r\n if len(local_bg) != 0:\r\n total = 0\r\n for c in range(len(local_bg)):\r\n if 3*13.8 <= abs(local_bg[c] - 3419):\r\n total += local_bg[c]\r\n local_bg = total / len(local_bg)\r\n else:\r\n local_bg = 3419\r\n print(\"galaxy founded at \", y, x)\r\n self.galaxies.append(galaxy(y, x, r_ap, count_sum, bg_galaxy=local_bg, no_count = no_c))\r\n \r\n elif max_count == -1:\r\n print(\"aperture scan completed, number of galaxies found is\", len(self.galaxies))\r\n break", "def _processing( infile, rchr, dist, outf ):\n\n coords, sizes = build_dict(infile)\n qry_chrs = list(coords.keys())\n\n print(\"Primary\\tHaplotig\\tPrimary_Start\\tPrimary_end\\tHaplotig_Start\\tHaplotig_End\\tHaplotig_Length\", file=outf)\n for qchr in qry_chrs:\n refcoords = coords[qchr][0]\n qrycoords = coords[qchr][1]\n refst, refend, qryst, qryend = \\\n clustering( refcoords, sorted(qrycoords), sizes[qchr], dist )\n\n print(\"%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\" % \\\n (rchr, qchr, refst, refend, qryst, qryend, sizes[qchr]), file=outf)", "def computePValues(options,whole_mapped_data,mapped_data_per_size_per_register,phase,cycle):\n min_reads_mapped_to_a_phased_register=3\n min_reads_in_a_window=10\n chromosome_hits=[]\n for chromosome in sorted(mapped_data_per_size_per_register):\n chromosome_hits.append(chromosome)\n fhr=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest\",\"r\")\n fhw=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest.concentrated\",\"w\")\n for line in fhr:\n register,start,end=line.strip().split()\n register=int(register)\n start=int(start)\n end=int(end)\n \n begin=start\n #print(chromosome,register,start,end)\n sys.stdout.flush()\n while begin+(phase*min_reads_mapped_to_a_phased_register) <= end+1:\n finish=begin+(phase*cycle)-1\n \n k=0\n for i in range(begin,finish+1):\n #print(chromosome,register,i,phase,start,end)\n try:\n k+=mapped_data_per_size_per_register[chromosome][register][i]\n except KeyError:\n pass\n #print(\"Next\")\n if k<min_reads_mapped_to_a_phased_register: \n begin+=phase\n continue\n \n num_all_reads=0\n for i in range(begin,finish+1):\n try:\n num_all_reads+=whole_mapped_data[chromosome][i]\n except KeyError:\n pass\n if num_all_reads<min_reads_in_a_window:\n begin+=phase\n continue\n \n n=0\n \"\"\"print(\"reached here\")\n sys.stdout.flush()\"\"\"\n # register_i is an iterator different from register\n for register_i in sorted(mapped_data_per_size_per_register[chromosome]):\n for i in range(begin,finish+1):\n try:\n n+=mapped_data_per_size_per_register[chromosome][register_i][i]\n except KeyError:\n pass\n \"\"\"if chromosome==\"Chr1\":\n print(str(n)+\" \"+str(num_all_reads)+\"\\n\")\"\"\"\n if n/num_all_reads<0.3:\n begin+=phase\n continue\n m=cycle*2\n pvalue=0\n for x in range(k,m+1):\n numerator=nCr((phase-1)*m,n-x)*nCr(m,x)\n pvalue+=numerator\n denominator=nCr(phase*m,n)\n pvalue=pvalue/denominator\n #print(chromosome,begin,finish,k,n,m,num_all_reads,pvalue,n/num_all_reads)\n if pvalue>=options.pvalue_cutoff:\n begin+=phase\n continue\n stuffs_to_be_printed_to_file=[register,begin,finish,k,n,m,num_all_reads,n/num_all_reads,pvalue]\n fhw.write(\"\\t\".join(map(str,stuffs_to_be_printed_to_file))+\"\\n\")\n sys.stdout.flush()\n begin+=phase", "def _readAndCombine(inputBed, withinBp):\n junct = {}\n\n # collapse a \n count = 0\n for line in open(inputBed):\n count += 1\n #if count % 100000==0: \n # print count \n if line.startswith(\"track\"):\n #out.write(line.strip()) \n #out.write(\" useScore=1\\n\") \n continue\n\n [chr, start, stop, name, score, strand, thStart, thStop, rgb, blockCount, blockSizes, blockStarts] = line.split(\"\\t\")\n score = float(score)\n if not junct.has_key(chr):\n junct[chr] = {}\n\n if int(blockCount) != 2:\n #print \"Illegal line does not have 2 blocks\" \n #print line \n continue\n\n start = int(start)\n stop = int(stop)\n [size1, size2] = [int(x) for x in blockSizes.split(\",\")[:2]]\n [start1, start2] = [int(x) for x in blockStarts.split(\",\")[:2]]\n leftEdge = start + size1\n rightEdge = start + start2 # start2 is relative to chr start \n intronLength = rightEdge - leftEdge\n\n toCombine = []\n for (other) in junct[chr].keys():\n (otherMinLeft, otherMaxLeft, otherMinRight, otherMaxRight, otherLength) = other\n if otherLength != intronLength:\n continue\n\n if otherMaxLeft < (leftEdge-withinBp) or otherMinLeft > (leftEdge+withinBp):\n continue\n\n if otherMaxRight < (rightEdge-withinBp) or otherMinRight > (rightEdge+withinBp):\n continue\n\n toCombine.append(other)\n\n allLines = [ (score, line, leftEdge, rightEdge) ]\n minLeft = maxLeft = leftEdge\n minRight = maxRight = rightEdge\n for (other) in toCombine:\n (otherMinLeft, otherMaxLeft, otherMinRight, otherMaxRight, intronLength) = other\n minLeft = min(minLeft, otherMinLeft)\n maxLeft = max(maxLeft, otherMaxLeft)\n minRight = min(minRight, otherMinRight)\n maxRight = max(maxRight, otherMaxRight)\n\n allLines.extend(junct[chr][other])\n del junct[chr][other]\n\n junct[chr][ (minLeft, maxLeft, minRight, maxRight, intronLength) ] = allLines\n\n return junct", "def segment(data):", "def main():\n # IMPORTANT: Specify a path to the new shapefile!\n data_dir = os.path.join(\"C:\\\\\",\"Users\",\"janni\",\"OneDrive\",\"Desktop\",\"data\")\n\n #Store route identification codes in to a list\n L_tracks=['\"tag_ident\"=72413','\"tag_ident\"=72417','\"tag_ident\"=73053','\"tag_ident\"=72364',\\\n '\"tag_ident\"=73054','\"tag_ident\"=79694','\"tag_ident\"=79698']\n\n if(os.path.isdir(data_dir)):\n print(\"Very good! You have chosen a valid directory!\")\n # load the point shapefile of the white-fronted goose manually!\n # access the active layer\n point_layer = iface.activeLayer()\n if not point_layer:\n print(\"Shape file failed to load!\")\n else:\n # 1\n addTimeAndDateObs(point_layer)\n print(\"-----------Created Date and Time objects-------------\")\n # 2\n addDistance(point_layer, L_tracks)\n print(\"-----------Distances calculation finished-------------\")\n # 3\n extractPoints(point_layer,Statistics(point_layer),data_dir)\n print(\"-----------Low distance points extracted and save to a new shapefile-------------\")\n print('Done')\n\n raster_fn = os.path.join(data_dir,\"Eurasia_Landcover.tif\")\n landuse_legend_fn = os.path.join(data_dir,'Eurasia_Landcover_Legend.csv')\n in_shape_fn = os.path.join(data_dir,\"lowDistance.shp\")\n out_shape_fn = os.path.join(data_dir,\"lowDistanceLanduseID.shp\")\n\n\n if(QgsProject.instance().mapLayersByName('lowDistanceLanduseID')==[]):\n processing.run(\"qgis:rastersampling\",\n {'COLUMN_PREFIX' : 'LanduseNr_',\n 'INPUT' : in_shape_fn,\n 'OUTPUT' : out_shape_fn,\n 'RASTERCOPY' : raster_fn})\n updated_shapefile = iface.addVectorLayer(out_shape_fn, '', 'ogr')\n else:\n updated_shapefile = QgsProject.instance().mapLayersByName('lowDistanceLanduseID')[0]\n #2\n convertIdFloatToInt(updated_shapefile)\n #3\n legend = preProcessLegend(landuse_legend_fn)\n #4\n convertIdToName(legend,updated_shapefile)\n #5\n plotLandUse(updated_shapefile,\"Pie\")\n print(\"-----------finished!-------------\")\n print(\"DONE! :)\")\n else:\n iface.messageBar().pushMessage(\"Error\", \"The directory does not exist. Please change data_dir in the code\",level = 1)\n print(\"Please specify a valid directory in the main function of Code_Distance.py!\")", "def analyze(self, program: ghidra.program.model.listing.Program) -> None:\n ...", "def _write_area_source_incmfd(src, lyr, max_np, max_hd):\n\n # Create the geometry\n lons, lats = _get_polygon(src)\n feat = ogr.Feature(lyr.GetLayerDefn())\n oring = ogr.Geometry(ogr.wkbLinearRing)\n for lon, lat in zip(lons, lats):\n oring.AddPoint(lon, lat, 0.0)\n oring.CloseRings()\n\n # Set standard parameters such as name, id, tectonic region\n for key in MAPPING_GENERAL.keys():\n feat.SetField(key, getattr(src, MAPPING_GENERAL[key]))\n\n # Set mfd parameters\n feat.SetField('mfd_type', 'IncrementalMFD')\n for key in MAPPING_MFD_INCR.keys():\n feat.SetField(key, getattr(src.mfd, MAPPING_MFD_INCR[key]))\n\n for i, occ in enumerate(src.mfd.occur_rates):\n tmp_str = 'or_%d' % (i+1)\n feat.SetField(tmp_str, occ)\n\n # Set geometry parameters\n for key in MAPPING_POLY_GEOM.keys():\n feat.SetField(key, getattr(src.geometry, MAPPING_POLY_GEOM[key]))\n\n # Set nodal plane distribution\n cnt = 1\n feat.SetField('num_npd', int(max_np))\n for npd in src.nodal_plane_dist:\n for key in MAPPING_NPD:\n tmp_str = '%s_%d' % (key, cnt)\n if key == 'weight':\n value = float(getattr(npd, MAPPING_NPD[key]))\n else:\n value = getattr(npd, MAPPING_NPD[key])\n feat.SetField(tmp_str, value)\n cnt += 1\n\n # Set hypocentral plane distribution\n cnt = 1\n feat.SetField('num_hdd', int(max_hd))\n for hdd in src.hypo_depth_dist:\n for key in MAPPING_HDD:\n tmp_str = '%s_%d' % (key, cnt)\n if key == 'hdd_w':\n value = float(getattr(hdd, MAPPING_HDD[key]))\n else:\n value = getattr(hdd, MAPPING_HDD[key])\n feat.SetField(tmp_str, value)\n cnt += 1\n\n # Creating the polygon and adding the geometry\n polygon = ogr.Geometry(ogr.wkbPolygon)\n polygon.AddGeometry(oring)\n feat.SetGeometry(polygon)\n\n if lyr.CreateFeature(feat) != 0:\n print \"Failed to create feature in shapefile.\\n\"\n sys.exit(1)\n\n polygon.Destroy()\n feat.Destroy()", "def get_subset_by_areas(sess_no, raw_path, \n align_on, from_time, to_time, \n target_areas,\n only_correct_trials = True, renorm = False, elec_type = 'grid' ):\n tinfo_path = raw_path + 'trial_info.mat'\n rinfo_path = raw_path + 'recording_info.mat'\n \n # get all data\n data_filtered = get_preprocessed_from_raw(sess_no, raw_path, \n align_on, from_time, to_time)\n \n # don't keep missing data // keep only_correct_trials if True\n \n responses = io.get_responses(tinfo_path)\n if only_correct_trials == False:\n ind_to_keep = (responses == responses).flatten()\n else:\n ind_to_keep = (responses == 1).flatten()\n \n #data1 =data1[ind_to_keep, :, :] # in the same time\n #data2 =data2[ind_to_keep, :, :]\n \n data_filtered = data_filtered[ind_to_keep,:,:]\n\n \n # select electrode and cut the additionnal time\n \n area_names = io.get_area_names(rinfo_path)\n \n idx = []\n for count, area in enumerate(area_names):\n if area in target_areas:\n idx.append(count) \n \n data_filtered = data_filtered[:, idx, :] \n \n\n ## change type \n data_filtered = data_filtered.astype(np.float32)\n \n if elec_type == 'single':\n data_filtered = data_filtered.reshape(data_filtered.shape[0]*data_filtered.shape[1], data_filtered.shape[2])\n data_filtered = np.expand_dims(data_filtered, axis=1)\n \n\n \n elif elec_type == 'average':\n data_filtered = np.mean(data_filtered, axis=1, keepdims=True)\n\n \n #elif elec_type == 'grid':\n #data_filtered = data_filtered\n\n elif elec_type != 'grid':\n raise ValueError('Type \\'' + elec_type + '\\' not supported. Please ' + \n 'choose one of \\'single\\'|\\'grid\\'|\\'average\\'.')\n \n # renorm data : mean = 0 and var = 1\n if renorm == True :\n data_filtered = pp.renorm(data_filtered)\n \n ### variable for shape\n #n_chans1 = len(idx)\n \n #samples_per_trial = data_filtered.shape[2] \n \n return( data_filtered )", "def data_assemble(self, x,y, r_cut, add_mask=5, pick_choice=False):\n #segmentation components\n obj_masks,center_mask_info, segments_deblend_list = self._seg_image(x, y, r_cut=r_cut)\n data_masks_center, _, xcenter, ycenter, c_index = center_mask_info\n image = self.cut_image(x,y,r_cut)\n self.raw_image = image\n src_mask = np.zeros_like(image)\n lens_mask = np.zeros_like(image)\n plu_mask = np.zeros_like(image)\n lenslight_mask_index = []\n if self.segmap is not None and self.interaction:\n segmap=self.segmap[0].data\n segdata = segmap[x - r_cut:x + r_cut + 1, y - r_cut:y + r_cut + 1]\n plt.imshow(segdata, origin='lower')\n nlabel = np.unique(segdata)\n for i in range(nlabel.shape[0] - 1):\n ax = (int((np.where(segdata == nlabel[i + 1])[0].max() - np.where(segdata == nlabel[i + 1])[0].min()) / 2 +\n np.where(segdata == nlabel[i + 1])[0].min()))\n ay = (int((np.where(segdata == nlabel[i + 1])[1].max() - np.where(segdata == nlabel[i + 1])[1].min()) / 3 +\n np.where(segdata == nlabel[i + 1])[1].min()))\n plt.text(ay, ax, repr(nlabel[i + 1]), color='r', fontsize=15)\n plt.title('Input segmentation map')\n plt.show()\n source_mask_index = [int(sidex) for sidex in input('Selection of data via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + segdata*(segdata==i*1)\n # lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + segdata*(segdata==i*1))\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + segdata*(segdata==i*1))\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n\n\n\n if self.segmap is None and self.interaction:\n self.plot_segmentation(image, segments_deblend_list, xcenter, ycenter, c_index)\n #source light\n if pick_choice:\n source_mask_index = [int(sidex) for sidex in input('Selection of data via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + obj_masks[i]\n #lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + obj_masks[i])\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + obj_masks[i])\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n else:\n src_mask = data_masks_center\n\n\n #adding pixels around the selected masks\n selem = np.ones((add_mask, add_mask))\n src_mask = ndimage.binary_dilation(src_mask.astype(np.bool), selem)\n plu_mask_out = ndimage.binary_dilation(plu_mask.astype(np.bool), selem)\n plu_mask_out = (plu_mask_out - 1)*-1\n\n #select source region to fit, or to use whole observation to fit\n ##1.select source region to fit\n snr = self.snr\n source_mask = image * src_mask\n #create background image for picked\n if self.background_rms is None:\n _, _, std = sigma_clipped_stats(image, sigma=snr, mask=source_mask)\n tshape = image.shape\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n else:\n tshape = image.shape\n std=np.mean(self.background_rms)\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n\n no_source_mask = (src_mask * -1 + 1) * img_bkg\n picked_data = source_mask + no_source_mask\n\n ##2.use whole observation to fit while mask out the contamination\n maskedimg = image * plu_mask_out\n\n ##orginize the output 'kwargs_data'\n kwargs_data = {}\n if pick_choice:\n kwargs_data['image_data'] = picked_data#select source region to fit\n else:\n kwargs_data['image_data'] = maskedimg#use whole observation to fit while mask out the contamination\n\n if self.background_rms is None:\n kwargs_data['background_rms'] = std\n self.background_rms = std\n else:\n kwargs_data['background_rms'] = np.mean(self.background_rms)\n kwargs_data['exposure_time'] = self.exp_time\n kwargs_data['transform_pix2angle'] = np.array([[1, 0], [0, 1]]) * self.deltaPix\n ra_at_xy_0 = (y - r_cut) * self.deltaPix # (ra,dec) is (y_img,x_img)\n dec_at_xy_0 = (x - r_cut) * self.deltaPix\n kwargs_data['ra_at_xy_0'] = ra_at_xy_0\n kwargs_data['dec_at_xy_0'] = dec_at_xy_0\n\n #coordinate of the lens light\n xlenlight, ylenlight = [], []\n if lenslight_mask_index !=[]:\n for i in lenslight_mask_index:\n xlenlight.append(ra_at_xy_0 + int(xcenter[i]) * self.deltaPix )\n ylenlight.append(dec_at_xy_0 + int(ycenter[i])* self.deltaPix )\n\n #for output\n self.data = kwargs_data['image_data']\n self.kwargs_data = kwargs_data\n self.data_mask = src_mask\n self.lens_mask = lens_mask\n self.plu_mask = plu_mask_out\n self.obj_masks = obj_masks\n imageData = ImageData(**kwargs_data)\n self.imageData = imageData\n kwargs_seg = [segments_deblend_list, xcenter, ycenter, c_index]\n\n return kwargs_data, kwargs_seg, [xlenlight, ylenlight]", "def segment_and_find_positions(self):\n initial_image = self.data\n xdim = self.data.shape[0]\n\n ydim = self.data.shape[1]\n downsized_image = transform.resize(\n initial_image,\n (xdim / DOWNSCALING_FACTOR, ydim / DOWNSCALING_FACTOR),\n mode=\"constant\",\n )\n rescaled_image = exposure.rescale_intensity(downsized_image)\n print(\"Starting Canny filtering\")\n g_edges = skimage.feature.canny(\n rescaled_image,\n sigma=self.canny_sigma,\n low_threshold=self.canny_low_threshold,\n )\n print(\"Starting dilation\")\n dilation = morphology.dilation(g_edges, morphology.disk(3))\n print(\"Starting erosion\")\n eroded = morphology.erosion(dilation, morphology.disk(4))\n dilation = morphology.dilation(\n eroded, morphology.diamond(4)\n ) # Dont change to disk\n print(\"Starting to remove small holes\")\n filled = morphology.remove_small_holes(\n dilation, area_threshold=self.remove_small_holes_area_threshold\n )\n print(\"Starting erosion\")\n eroded = morphology.erosion(filled, morphology.diamond(3))\n print(\"Applying filters\")\n filtered_image = eroded\n if self.colony_filters_dict is not None:\n for filter_name in self.colony_filters_dict.keys():\n filtered_image = segmentation_filters.apply_filter(\n filter_name, filtered_image, self.colony_filters_dict[filter_name]\n )\n\n colony_edges = morphology.dilation(feature.canny(filtered_image, 0.01))\n print(\"Starting outlining\")\n outline = downsized_image.copy()\n outline[colony_edges] = 65535\n distance = ndimage.distance_transform_edt(filtered_image)\n smoothed_well = ndimage.gaussian_filter(downsized_image, 0.35)\n outline.copy()\n objs, num_objs = ndimage.label(filtered_image)\n print(\"Applying filters for points\")\n if self.mode == \"A\":\n # point selection: Smoothest point in the center region\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # for each colony,\n # find the maximum distance from the two fold distance map.\n # The edge is at 0% and the center of the colony is at 100%\n d_max = dist_mask.max()\n # Getting the points which is at least 40% away from the edge\n top_percent = dist_mask > (d_max * 0.40)\n colony_mask = smoothed_well * top_percent\n colony_edges = feature.canny(colony_mask, 0.1)\n # applying the second distance transform\n # to find the smoothest point in the correct region\n inner_edges = ndimage.distance_transform_edt(\n ~colony_edges * top_percent\n )\n smooth_point = numpy.where(inner_edges == inner_edges.max())\n smooth_point = (smooth_point[0][0], smooth_point[1][0])\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n elif self.mode == \"C\":\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # point selection: edge, ridge & center respectively\n self.get_mode_c_points(dist_mask, 0, 0.03)\n self.get_mode_c_points(dist_mask, 0.15, 0.20)\n self.get_mode_c_points(dist_mask, 0.90, 0.99)", "def apply_algorithm(self):\n pass", "def add_building_output_locations(self,area_id,start,end,step,type='BUILDINGS_AS_HOLES'): \n print \"Getting buildings locations...\"\n \n dictionary = self.grid.get_building_output_locations(area_id,type)\n if (dictionary != {}):\n self.run_nc.add_building_output_locations(dictionary, start, end,step)", "def analyze(self, start, end):\n return", "def combine_data(areas, employment):\n output = []\n\n for item in employment:\n for area in areas:\n if item['code'] == area['properties']['code']:\n geom = shape(area['geometry'])\n output.append({\n 'type': area['type'],\n 'geometry': mapping(geom.representative_point()),\n 'properties': {\n 'code': area['properties']['code'],\n # 'LSOA11NM': area['properties']['LSOA11NM'],\n 'employment': item['count']\n }\n })\n\n return output", "def totalavg(code, statistics=\"MEAN\", monthRange=[1, 12], yearRange=[2003, 2016],\n path=\"H:/GIS/SNODAS/SNODASproj.gdb/\", outpath=\"H:/GIS/SNODAS/SNODASproj.gdb/\"):\n g = {}\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',\n 'MINORITY': 'MNR',\n 'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n\n # iterate over month range set here; default is 1 to 12 (Jan to Dec)\n for m in range(monthRange[0], monthRange[1] + 1):\n\n # this defines the dictionary key based on data type, month, and year\n g[code + '0000' + str(m).zfill(2)] = []\n\n # pick all tiff files from raw data folder of a data type\n for rast in arcpy.ListRasters():\n yrrng = range(yearRange[0], yearRange[1] + 1) # set years converted here\n\n # create a list of rasters with the right code and month and year\n if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]) == m:\n g[code + '0000' + str(m).zfill(2)].append(rast) # create a list of rasters for each month\n else:\n pass\n if len(g[code + '0000' + str(m).zfill(2)]) > 0:\n # arcpy sa functions that summarize the daily data to monthly data\n calc = CellStatistics(g[code + '0000' + str(m).zfill(2)], statistics_type=statistics, ignore_nodata=\"DATA\")\n calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])\n print(code + '0000' + str(m).zfill(2) + statstype[statistics])", "def arounder(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tmaxCount = kargs.get('maxCount', 200)\n\t\tmaxArea = kargs.get('maxArea', 10000000)\n\t\tminArea = kargs.get('minArea', 0)\n\t\tmaxDist = kargs.get('maxDist', 0)\n\t\tignore = kargs.get('ignore', (0, 255, 0))\n\t\tcolor = kargs.get('color', (0, 0, 255))\n\t\tthick = kargs.get('thick', 1)\n\t\t\n\t\t# Image binaire issue de la détection\n\t\tbin = this._BINARY.copy()\n\t\tinput = bin.copy()\n\t\t\n\t\t# Modifie l'image de départ T__T\n\t\timage, contours, hierarchy = cv2.findContours(\n\t\t\tinput,\n\t\t\tcv2.RETR_LIST,\n\t\t\tcv2.CHAIN_APPROX_SIMPLE\n\t\t)\n\t\t\n\t\t# Comptation\n\t\tfinger = None\n\t\tcount = len(contours)\n\t\tobjects, ignored = [], []\n\t\tif count < maxCount: #raise Exception('Too much noise, please quiet.')\n\t\t\t\n\t\t\t# Filtrage et localisation:\n\t\t\tfor contour in contours:\n\t\t\t\t\n\t\t\t\t# Filtrage des contours selon l'aire\n\t\t\t\tarea = cv2.contourArea(contour)\n\t\t\t\tif minArea <= area and area <= maxArea:\n\t\t\t\t\t\n\t\t\t\t\t# Calcul de la position\n\t\t\t\t\tobj = cv2.convexHull(contour)\n\t\t\t\t\tpoint = limiter(obj, maxDist)\n\t\t\t\t\t\n\t\t\t\t\t# Est-ce le point le plus bas ?\n\t\t\t\t\tif finger:\n\t\t\t\t\t\tif finger.y < point.y: finger = point\n\t\t\t\t\telse: finger = point\n\t\t\t\t\t\n\t\t\t\t\t# Enregistrement\n\t\t\t\t\tobjects.append(obj)\n\t\t\t\t\n\t\t\t\t# Sinon on l'ignore\n\t\t\t\telse: ignored.append(contour)\n\t\t\t\t\n\t\t\t### END FOR\n\t\t\n\t\t### END IF\n\t\telse: ignored = contours\n\t\t\n\t\t# On duplique l'image pour le rendu final\n\t\tthis._SCAN = scan = EmptyFrom(bin, 3)\n\t\tscan[:,:,0] = scan[:,:,1] = scan[:,:,2] = bin\n\t\t\n\t\t# Visuel\n\t\tprintf('%d/%d%60s\\r' % (len(objects), count, ''))\n\t\tcv2.drawContours(scan, ignored, -1, ignore, 1)\n\t\tcv2.drawContours(scan, objects, -1, color, thick)\n\t\t\n\t\t# Si on a trouvé\n\t\tif finger:\n\t\t\t\n\t\t\t# Affichage viseur\n\t\t\tscan[:, finger.x, :] = [255, 0, 0]\n\t\t\tscan[finger.y, :, :] = [127, 0, 0]\n\t\t\t\n\t\t\t# Calcul de la taille de l'image\n\t\t\tsize = D2Point(width(bin), height(bin))\n\t\t\t\n\t\t\t# Reformatage\n\t\t\torigin = +finger\n\t\t\tfinger /= size-1\n\t\t\tfinger.x = 1 - finger.x\n\t\t\tthis._BOTTOM = (origin-2).y == (size-4).y\n\t\t\n\t\t# Sinon on arrête de cliquer\n\t\telse: this._BOTTOM = False\n\t\t\n\t\t# On enregistre le truc\n\t\tthis._DETECTED = finger\n\t\t\n\t\treturn pyon(\n\t\t\tcontours = scan\n\t\t)", "def area(self):" ]
[ "0.5511954", "0.52550125", "0.5201155", "0.5177178", "0.51257527", "0.5105152", "0.5058143", "0.5027915", "0.49984464", "0.49700317", "0.49276492", "0.49143016", "0.49059305", "0.48697504", "0.48524132", "0.48495924", "0.48474264", "0.4834441", "0.48077965", "0.47952256", "0.47798565", "0.4768711", "0.4753542", "0.4744143", "0.4739221", "0.47345555", "0.47145098", "0.47053966", "0.46974152", "0.46900508" ]
0.6261241
0
add up total cases for a nation for integrity checks
def sum_cases(nation='England'): _sum=0 for _code in ons_week.stored_names: if ons_week.nation[_code]==nation: place=ons_week.stored_names[_code] _total=DailyCases.objects.filter(areaname=place).aggregate(Max('totalLabConfirmedCases')).get('totalLabConfirmedCases__max') if _total: _sum +=_total else: log.info(f'No total for {place}') return _sum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def foreign(x): # electing to count 'Indian Territory' as domestic\n if x == 'United States' or x == 'Indian Territory':\n return 0\n else:\n return 1", "def summarize(allowances):\n total_allowances = 0\n if isinstance(allowances, dict):\n for key, value in allowances.items():\n total_allowances = total_allowances + int(value)\n #end for\n else:\n total_allowances = allowances\n return total_allowances", "def test_sum_counts_by_consensus(self):\r\n #otu_table = parse_otu_table(self.otu_table)\r\n #otu_table = parse_biom_table(self.otu_table)\r\n obs_result, obs_mapping = sum_counts_by_consensus(self.otu_table, 3)\r\n exp_result = {(\r\n 'Root', 'Bacteria', 'Actinobacteria'): array([1, 0, 2, 4]),\r\n ('Root', 'Bacteria', 'Firmicutes'): array([1, 3, 1, 1]),\r\n ('Root', 'Bacteria', 'Other'): array([1, 2, 1, 0])}\r\n exp_mapping = {'s1': 0, 's2': 1, 's3': 2, 's4': 3}\r\n self.assertItemsEqual(obs_result, exp_result)\r\n self.assertEqual(obs_mapping, exp_mapping)\r\n\r\n obs_result, obs_mapping = sum_counts_by_consensus(self.otu_table, 2)\r\n exp_result = {('Root', 'Bacteria'): array([3, 5, 4, 5])}\r\n exp_mapping = {'s1': 0, 's2': 1, 's3': 2, 's4': 3}\r\n self.assertItemsEqual(obs_result, exp_result)\r\n self.assertEqual(obs_mapping, exp_mapping)\r\n\r\n obs_result, obs_mapping = sum_counts_by_consensus(self.otu_table, 4)\r\n exp_result = {('Root', 'Bacteria', 'Actinobacteria', 'Actinobacteria'):\r\n array([1, 0, 2, 4]),\r\n ('Root', 'Bacteria', 'Firmicutes', '\"Clostridia\"'):\r\n array([1, 3, 1, 1]),\r\n ('Root', 'Bacteria', 'Other', 'Other'): array([1, 2, 1, 0])}\r\n exp_mapping = {'s1': 0, 's2': 1, 's3': 2, 's4': 3}\r\n self.assertItemsEqual(obs_result, exp_result)\r\n self.assertEqual(obs_mapping, exp_mapping)", "def total_amortization(self):\n return sum(self.table[\"amortization\"])", "def generate_organisation_addition(self):\n\t\treserved_columns = list()\n\t\ttotal_attandance = list()\n\t\tn = list()\n\t\tfor column in self.days[0].data.columns:\n\t\t\tif column.startswith('reserved_'):\n\t\t\t\treserved_columns.append(column)\n\t\t\t\ttotal_attandance.append(0)\n\t\t\t\tn.append(0)\n\n\t\tfor day in self.days:\n\t\t\tfor index, row in day.data.iterrows():\n\t\t\t\tfor i, column in enumerate(reserved_columns):\n\t\t\t\t\tif int(row[column]) > 0:\n\t\t\t\t\t\tweekend = True\n\t\t\t\t\t\tif int(row['day_of_week']) < 5:\n\t\t\t\t\t\t\tweekend = False\n\t\t\t\t\t\ttotal_attandance[i] += row['pool'] - self.get_average_for_month_at_time(int(row['month'])-1, int(row['hour']), int(row['minute']), weekend)\n\t\t\t\t\t\tn[i] += 1\n\n\t\tself.org_addition = dict()\n\t\tfor i, column in enumerate(reserved_columns):\n\t\t\tif n[i] > 0:\n\t\t\t\tself.org_addition[column] = total_attandance[i]/n[i]\n\t\t\telse:\n\t\t\t\tself.org_addition[column] = 0", "def test_getTotalIndividualCount(self):\r\n # Verified with iNEXT.\r\n self.assertEqual(self.est1.getTotalIndividualCount(), 15)\r\n\r\n # Verified against results in Colwell 2012 paper.\r\n self.assertEqual(self.est2.getTotalIndividualCount(), 976)\r\n self.assertEqual(self.est3.getTotalIndividualCount(), 237)", "def treatInfections(self, amount, disease):\r\n if disease in self.city.diseaseCounts:\r\n self.city.diseaseCounts[disease] -= amount\r\n disease.addCubes(amount)", "def walesResults(dat):\n dat = dat.lower()\n if 'draw' in dat:\n res = 2\n elif 'wales' in dat:\n res = 1\n else:\n res = 0\n return res", "def get_num_cases(data, case_type, country, province=None):\n if province is not None:\n # number of cases for a province\n condition = ((data[case_type]['country/region'] == country) &\n (data[case_type]['province/state'] == province))\n result = data[case_type][condition].iloc[:, 4:].values.flatten()\n else:\n # number of cases for the full country/region\n result = data[case_type][\n data[case_type]['country/region'] == country\n ].iloc[:, 4:].sum(axis=0).values.flatten()\n return result", "def life_insurance_to_recive_total(self):\n pass", "def countContinent(self):\n if self.countryCounts == {}:\n print('No data to count')\n return\n else:\n for country in self.countryCounts:\n try:\n continent = pyccvt.country_alpha2_to_continent_code(country)\n except ModuleNotFoundError:\n print('Pycountry-convert module not installed')\n continue\n except KeyError:\n print('No continent for country with code %s.' % country)\n continue\n except:\n print('Error converting country to continent for country: %s' % country)\n continue # don't try and add if there isn't a valid continent\n\n if continent in self.continentCounts:\n self.continentCounts[continent] += 1\n else:\n self.continentCounts[continent] = 1", "def GOAL_TOTAL() -> int:\n return 21", "def check_how_much_to_paid(self, cells_number):\n if len(self.owner.countries[self.section]) == 2:\n return 10 * cells_number * 10000\n return 4 * cells_number * 10000", "def sum_crimes(cs:CrimeStatistics)-> int:\n # return 0 # stub\n #template from atomic\n crimes_total = (cs.violent_crimes+cs.property_crimes+cs.arson)\n return crimes_total", "def customer_acccounting(customer_orders):", "def test_countries_amount(self):\n countries_in_statistics_label = 'Countries in Statistics'\n\n target_html_object = html_target.activity_metric_with_id('countries_amount').format(\n '', countries_in_statistics_label\n )\n\n self.assertContains(self.response, target_html_object, 1)", "def total_management_cost(self):\n total = 0\n total += self.output_dict['insurance_usd']\n total += self.output_dict['construction_permitting_usd']\n total += self.output_dict['bonding_usd']\n total += self.output_dict['project_management_usd']\n total += self.output_dict['markup_contingency_usd']\n total += self.output_dict['engineering_usd']\n total += self.output_dict['site_facility_usd']\n return total", "def task2(self, doc) -> dict:\n country_count = {}\n match_records = []\n for entry in self.records:\n if (entry['event_type'] =='read'):\n if entry['subject_doc_id'] == doc:\n match_records.append(entry)\n for rec in match_records:\n if (rec['visitor_country'] in country_count):\n country_count[rec['visitor_country']] += 1\n else:\n country_count[rec['visitor_country']] = 1\n print(country_count)\n return country_count", "def getIncomeAndDeductions(self, paycheck, record):\n # the income category\n source = record[1].strip()\n # if it is not blank\n if source:\n # the pay rate for this category\n rate = record[2].strip()\n rate = float(rate) if rate else 0\n # the hours worked in this category\n hours = record[3].strip()\n hours = float(hours) if hours else 80\n # the amount earned\n amount = (record[4].strip())\n amount = float(amount) if amount else 0\n\n # adjust the hours earned by the salaried people\n if hours == 0 and amount > 0: hours = 80\n\n # make an income record\n income = Income(\n category = source.lower(),\n amount = amount,\n rate = rate, hours = hours,\n salary = 0 if rate else amount\n )\n # record\n paycheck.source[income.category] = income\n\n # the federal deductions\n source = record[6].strip()\n # if there\n if source:\n # get the amount\n amount = float(record[7].strip())\n # record\n paycheck.federal[source] = amount\n\n # the state deductions\n source = record[8].strip()\n # if there\n if source:\n # get the amount\n amount = float(record[9].strip())\n # record\n paycheck.state[source] = amount\n\n # the personal deductions\n source = record[10].strip()\n # if there\n if source:\n # get the amount\n amount = float(record[11].strip())\n # record\n paycheck.personal[source] = amount\n\n # all done\n return", "def total_area(self) :\n area = 0\n for i in self.residues :\n area += i.solvent_acc_area\n return area", "def testNumberIndividualsAddsUp(self):\n number = sum([x[1] for x in self.tree.get_species_abundances(reference=3)])\n number2 = sum([x[1] for x in self.tree.get_species_abundances(reference=3)])\n self.assertEqual(number, 3734)\n self.assertEqual(number2, 3734)", "def _abilities_all_units(self) -> Counter:\n abilities_amount = Counter()\n for unit in self.units + self.structures: # type: Unit\n for order in unit.orders:\n abilities_amount[order.ability] += 1\n if not unit.is_ready:\n if self.race != Race.Terran or not unit.is_structure:\n # If an SCV is constructing a building, already_pending would count this structure twice\n # (once from the SCV order, and once from \"not structure.is_ready\")\n abilities_amount[self._game_data.units[unit.type_id.value].creation_ability] += 1\n\n return abilities_amount", "def test_aggregate_nation(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'nation')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (1, 17)", "def check_sum(self) -> str:\n pass", "def allowance_total(self):\n total = 0\n for allowance in self.allowances:\n if allowance.is_active:\n total += allowance.amount\n return total", "def testhospital_vs_confirmed(self):\n data = load_covid_data(file)\n aim_day = data['evolution']['2020-03-16']\n # Artificial cut one value , it supposed to be 4 number\n aim_day['epidemiology']['confirmed']['total']['age'] = [10, 11, 12]\n try:\n cases_population = cases_per_population_by_age(data)\n except Exception as e:\n raise Exception", "def test_county(self):\n counties = self.geographies.find({ 'geoid': '15009' })\n\n self.assertEqual(counties.count(), 1)\n\n county = counties[0]\n\n self.assertEqual(county['sumlev'], config.SUMLEV_COUNTY)\n self.assertEqual(county['metadata']['NAME'], 'Maui County')\n self.assertEqual(county['metadata']['STATE'], '15')\n self.assertEqual(county['metadata']['COUNTY'], '009')\n\n pop_2000 = 128094 \n pop_2010 = 154834\n self._test_totalpop(county, pop_2000, pop_2010)", "def calculate_continent_daywise(countries_daywise_df):", "def the_cases(actor):\n if actor == \"Overijssel\":\n return ocases\n elif actor == \"Gorssel\":\n return gcases\n elif actor == \"Deventer\":\n return dcases\n else:\n print(\"Error\")\n return 0", "def total_end_for(self, age, sex):\n initial = getattr(self, '%s_total_beginning_%s' % (age, sex), 0)\n admitted = getattr(self, '%s_admitted_%s' % (age, sex), 0)\n out = getattr(self, '%s_total_out_%s' % (age, sex), 0)\n return (initial + admitted) - out" ]
[ "0.5884516", "0.5413104", "0.5396519", "0.534488", "0.5344416", "0.5311308", "0.52783656", "0.5254872", "0.5220287", "0.52202386", "0.5146511", "0.51194024", "0.5109611", "0.5081266", "0.5076306", "0.50641674", "0.5041381", "0.503695", "0.5023702", "0.5020459", "0.50109977", "0.49984866", "0.498571", "0.49637315", "0.4963442", "0.49615556", "0.49564365", "0.49516794", "0.49502146", "0.49175122" ]
0.75075555
0
This function registers the parameters of the model to Pypet. Parameters can be nested dictionaries. They are unpacked and stored recursively.
def _addParametersToPypet(self, traj, params): def addParametersRecursively(traj, params, current_level): # make dummy list if just string if isinstance(current_level, str): current_level = [current_level] # iterate dict for key, value in params.items(): # if another dict - recurse and increase level if isinstance(value, dict): addParametersRecursively(traj, value, current_level + [key]) else: param_address = ".".join(current_level + [key]) value = "None" if value is None else value traj.f_add_parameter(param_address, value) addParametersRecursively(traj, params, [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather_params(self):\n for layer in self.layers:\n for name, value in layer.params.iteritems():\n self.params[name] = value", "def _setup_params(self,**params):\n ### a parameter might be passed in for one of the extra_pos;\n ### if a key in the params dict is not a *parameter* of this\n ### PO, then try it on the extra_pos\n for n,p in params.items():\n if n not in self.params():\n self.set_parameter_value(n,p)\n del params[n]\n\n Parameterized._setup_params(self,**params)", "def set_params(model, params): # type: ignore\n for p, p_new in zip(model.parameters(), params):\n p.data = p_new.data", "def addPppParams(model):\n \n ### GAPDP Parameters ####\n model.addParameter('GAPDP','KmSub2',0.385) # nadp\n model.addParameter('GAPDP','KmProd2',0.202) # nadph\n model.addParameter('GAPDP','kcatF',2.8)\n model.addParameter('GAPDP','kcatR',0)\n\n ### FMETTRS Parameters ###\n model.addParameter('FMETTRS','kcatF',0.45)\n\n ### MTHFC Parameters ###\n model.addParameter('MTHFC','kcatF',185)\n\n #### GHMT2 Paramters ####\n model.addParameter('GHMT2','kcatF',0.0)\n model.addParameter('GHMT2','kcatR',0.0)\n \n #### TKT1 Parameters ####\n model.addParameter('TKT1',rxnFormKey='kcatF',value=20.58)\n model.addParameter('TKT1',rxnFormKey='kcatR',value=0.8)\n \n model.addParameter('TKT1',rxnFormKey='KmSub1',value=0.743) #g3p\n model.addParameter('TKT1',rxnFormKey='KmSub2',value=3.7298) #s7p\n model.addParameter('TKT1',rxnFormKey='KmProd1',value=0.4717) #r5p\n model.addParameter('TKT1',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TKT2 Parameters ####\n model.addParameter('TKT2',rxnFormKey='kcatF',value=26.87)\n model.addParameter('TKT2',rxnFormKey='kcatR',value=1.4)\n \n model.addParameter('TKT2',rxnFormKey='KmSub1',value=0.25) #f6p\n model.addParameter('TKT2',rxnFormKey='KmSub2',value=0.743) #g3p\n model.addParameter('TKT2',rxnFormKey='KmProd1',value=0.0227) #e4p\n model.addParameter('TKT2',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TALA Parameters ####\n model.addParameter('TALA',rxnFormKey='kcatF',value=22.3)\n model.addParameter('TALA',rxnFormKey='kcatR',value=0.54)\n \n model.addParameter('TALA',rxnFormKey='KmSub1',value=0.0401) #e4p\n model.addParameter('TALA',rxnFormKey='KmSub2',value=0.6688) #f6p\n model.addParameter('TALA',rxnFormKey='KmProd1',value=1.9) #g3p\n model.addParameter('TALA',rxnFormKey='KmProd2',value=0.285) #s7p\n\n \n #### Speed up DGSN Pathway ####\n model.addParameter('DGSNK',rxnFormKey='kcatF',value=2.25)\n\n #### Speed up DADN pathway ####\n model.addParameter('PUNP2',rxnFormKey='kcatF',value=13.3)\n\n #### Speed up FBA rxn ####\n #model.addParameter('FBA',rxnFormKey='kcatF',value=64.5)\n\n model.addParameter('RNDR2',rxnFormKey='KmSub1',value=0.24)\n\n \n# #### RPI Parameters ####\n model.addParameter('RPI',rxnFormKey='kcatF',value=10.0)\n model.addParameter('RPI',rxnFormKey='kcatR',value=1.0)\n \n #model.addParameter('RPI',rxnFormKey='KmSub1',value=1.0)\n #model.addParameter('RPI',rxnFormKey='KmProd1',value=1.0)\n \n model.addParameter('FBA',rxnFormKey='KmSub1',value=0.12)\n model.addParameter('FBA',rxnFormKey='KmProd2',value=0.05)\n \n \n model.addParameter('GAPD',rxnFormKey='kcatF',value=442.0) \n model.addParameter('GAPD',rxnFormKey='kcatR',value=73.6) \n \n\n model.addParameter('FBA',rxnFormKey='kcatR',value=12.6)\n \n\n model.addParameter('TPI',rxnFormKey='kcatR',value=67)\n \n model.addParameter('TPI',rxnFormKey='KmSub1',value=0.077)\n model.addParameter('TPI',rxnFormKey='KmProd1',value=0.084) \n \n\n model.addParameter('FBA',rxnFormKey='kcatF',value=21.0)\n \n \n model.addParameter('PGK',rxnFormKey='kcatR',value=3.4)\n \n model.addParameter('PGM',rxnFormKey='KmSub1',value=3.6)\n model.addParameter('PGM',rxnFormKey='KmProd1',value=0.2)\n \n \n model.addParameter('PGK',rxnFormKey='KmSub1',value=0.01)\n model.addParameter('PGK',rxnFormKey='KmProd1',value=0.1)\n \n \n model.addParameter('GAPD',rxnFormKey='KmProd1',value=0.47)\n model.addParameter('GAPD',rxnFormKey='KmProd2',value=0.061)\n \n \n model.addParameter('DRPA',rxnFormKey='kcatR',value=34.0)\n \n model.addParameter('DRPA',rxnFormKey='KmProd1',value=0.267)\n model.addParameter('DRPA',rxnFormKey='KmProd2',value=0.2)\n\n \n model.addParameter('PPM2',rxnFormKey='kcatF',value=173)\n \n model.addParameter('PPM2',rxnFormKey='KmSub1',value=0.013)\n model.addParameter('PPM2',rxnFormKey='KmProd1',value=1.2)\n\n\n\n# print('Updated PPP Parameters')\n\n return", "def set_params(self, **kwargs):\n\t\tself._treeType = kwargs.get('treeType', self._treeType)\n\t\tfor key, value in kwargs.items():\n\t\t\tif key in self._model_complexity_args:\n\t\t\t\tself._model_complexity_args[key] = value", "def define_parameters(self):", "def set_params(self, dic):\n if dic is not None:\n for key, val in zip(dic.keys(), dic.values()):\n if key in self.__dict__.keys():\n if isinstance(self.__dict__[key], Parameter):\n if isinstance(val, Parameter):\n self.__dict__[key] = val\n else:\n d = self.__dict__[key].__dict__\n self.__dict__[key] = Parameter(val, input_dimensional=d['_input_dimensional'],\n units=d['_units'],\n description=d['_description'],\n scale_object=d['_scale_object'],\n return_dimensional=d['_return_dimensional'])\n else:\n self.__dict__[key] = val", "def set_parameters(self, params):\n self.kp = params.pgain", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def _add_params(self, node_entry, idx):\n param_name = node_entry[\"name\"]\n assert param_name in self._params, (\n f\"The parameter {param_name} is not present\" \"in params dict provided.\"\n )\n value = self._params[param_name]\n numpy_array = value.numpy()\n tensor = numpy_helper.from_array(numpy_array, param_name)\n self._mc.add_initializers([tensor])\n dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy_array.dtype]\n input = onnx.helper.make_tensor_value_info(param_name, dtype, shape=numpy_array.shape)\n self._mc.add_inputs([input])", "def _ParseModelParameters(self, model_data):\n if model_data is None:\n\n return\n \n for m in model_data:\n\n try:\n \n modelname = m['modelname']\n\n runtime_parameters = []\n\n solverclass = None\n \n if m.has_key('solverclass'):\n\n solver_type = m['solverclass']\n\n if m.has_key('runtime_parameters'):\n \n for parameter in m['runtime_parameters']:\n\n component_name = parameter['component_name']\n field = parameter['field']\n val = parameter['value']\n\n runtime_parameters.append((component_name, field, val))\n\n\n self.StoreModelName(modelname=modelname,\n runtime_parameters=runtime_parameters,\n solver_type=solver_type)\n\n except Exception, e:\n\n print \"Error parsing model parameter for %s: %s\" % (modelname,e)\n \n # catch the exception and allow the parse to continue if the next\n # one is good\n \n continue", "def set_params(self, **kwargs):\n ...", "def set_params(self, **params):\n\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition('__')\n if key not in valid_params:\n raise ValueError('Invalid parameter %s for regressor %s. '\n 'Check the list of available parameters '\n 'with `regressor.get_params().keys()`.' %\n (key, self))\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self._regressor, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n valid_params[key].set_params(**sub_params)\n\n return self", "def update_parameters(\n model_param: Dict[str, Union[float, List[float]]]\n ) -> Dict[str, float]:\n\n updated_param = {}\n\n for i, _ in enumerate(model_param[\"teff\"]):\n updated_param[f\"teff_{i}\"] = model_param[\"teff\"][i]\n updated_param[f\"radius_{i}\"] = model_param[\"radius\"][i]\n\n if \"parallax\" in model_param:\n updated_param[\"parallax\"] = model_param[\"parallax\"]\n elif \"distance\" in model_param:\n updated_param[\"distance\"] = model_param[\"distance\"]\n\n return updated_param", "def set_params(self, params):", "def updateParameters(self, paramDict):\n\n params = ['taux', 'mu', 'G', 'alpha_0', 'delta', 'p', 'I0', 'kparam']\n\n # Now set the parameters\n for k in paramDict.keys():\n mycode = 'self.' + k + \"=paramDict[\\'\" + k + \"\\']\"\n exec(mycode)", "def _inject_params(self, params):\n\n params.extend([DomainParam(), InputTemplateFileParam(),\n OutputDirectoryParam(), LocaleParam(),\n WidthParam(), NoWrapParam(), OutputFileParam()])\n\n return super()._inject_params(params)", "def ApplyRuntimeParameters(self):\n \n if self.models is None or len(self.models) == 0:\n\n if self.verbose:\n\n print \"No model runtime parameters defined\"\n\n return\n\n num_models = len(self.models)\n\n if self.verbose:\n\n print \"Applying model runtime parameters to %d models\" % num_models\n\n for m in self.models:\n\n try:\n \n modelname = m['modelname']\n\n if self.verbose:\n\n print \"\\tSetting runtime parameters for '%s'\" % modelname\n\n\n self.SetModelName(modelname)\n \n if m.has_key('runtime_parameters') and not m['runtime_parameters'] is None:\n \n for parameter in m['runtime_parameters']:\n\n component_name = parameter[0]\n field = parameter[1]\n val = parameter[2]\n\n self.SetParameter(path=component_name, parameter=field, value=val)\n\n except Exception, e:\n\n print e\n\n continue\n\n # Now apply genericly set parameters\n\n if len(self._runtime_parameters) > 0:\n\n if self.verbose:\n\n print \"Applying generically set model runtime parameters\"\n\n \n for p in self._runtime_parameters:\n\n try:\n\n path = p['path'] \n parameter = p['parameter']\n value = p['value']\n service = None if not p.has_key('service') else p['service']\n\n self.SetParameter(path, parameter, value, service)\n \n except Exception, e:\n\n print e\n\n continue", "def saveParams(*args, **kwargs):\n\tapiMethod = args[0].get_text().lower()\n\t\n\tdef treeToDict(store, treeiter):\n\t\t\"\"\"\n\t\tProcess the tree to get parameter components, and ultimately the whole set.\n\t\tRow Index 0: Key\n\t\tRow Index 2: Value \n\t\t\"\"\"\n\t\twhile treeiter != None:\n\t\t\t# Determine if returning a dict or a listValue to thisLevelData. #\n\t\t\t# If thisLevelData has not been defined yet, define it to the type that matches first.#\n\t\t\t# If type match doesn't happen, well, whupz, you get an error. I should make it pop a dialog. #\n\t\t\t# Also, just ignore the row if the checkbox isn't checked #\n\t\t\tif store[treeiter][2]:\n\t\t\t\tkey = store[treeiter][0]\n\t\t\t\tuseParam = store[treeiter][2]\n\t\t\t\tif key == '':\n\t\t\t\t\tif 'thisLevelData' not in locals():\n\t\t\t\t\t\tthisLevelData = []\n\t\t\t\t\tisListVal = True\n\t\t\t\telse:\n\t\t\t\t\tif 'thisLevelData' not in locals():\n\t\t\t\t\t\tthisLevelData = {}\n\t\t\t\t\tisListVal = False\n\n\t\t\t\t# If no children, take the value set in the row #\n\t\t\t\tif store.iter_has_child(treeiter):\n\t\t\t\t\tchilditer = store.iter_children(treeiter)\n\t\t\t\t\tvalue = treeToDict(store, childiter)\n\t\t\t\telse:\n\t\t\t\t\tvalue = store[treeiter][1]\n\n\t\t\t\tif isListVal:\n\t\t\t\t\tthisLevelData.append(value)\n\t\t\t\telse:\n\t\t\t\t\tthisLevelData[key] = value\n\n\t\t\ttreeiter = store.iter_next(treeiter) # On to the next row\n\n\t\tif 'thisLevelData' in locals(): # If we actually saved params\n\t\t\treturn thisLevelData\n\t\telse:\n\t\t\treturn False\n\t\n\tstore = mainObj.widgets['paramStore']\n\tinitIter = mainObj.widgets['paramStore'].get_iter_first()\n\tmethodObj = mainObj.apiMethods[apiMethod]\n\tapiParams = treeToDict(store, initIter)\n\n\tif apiParams: # Only run the method if we have params\n\t\tmethodObj.clearParams() # This way we don't have anything left over that shouldn't be\n\t\tmethodObj.addParams(**apiParams)\n\telse:\n\t\tmethodObj.clearParams() # If there was anything set, it shouldn't be now", "def update_parameters(self,like_params):\n\n # get current dictionary with parameters, update and setup again\n params=self.get_params()\n\n for par in like_params:\n if par.name in params:\n params[par.name]=par.value\n\n self._setup_from_parameters(params)\n return", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def main(model,pmap):\n\n addPppParams(model)\n\n# addTransportParams(model,pmap)\n\n #translationSources(model)\n\n #addLipidMetabs(model)\n\n return", "def add_parameters(params):\n\n items = [\n ('x', 'X manual', ''),\n ('z', 'Z manual', ''),\n ('automatic', 'Automatic', '')\n ]\n\n params.rotation_axis = bpy.props.EnumProperty(\n items = items,\n name = \"Rotation Axis\",\n default = 'automatic'\n )\n\n params.auto_align_extremity = bpy.props.BoolProperty(\n name='auto_align_extremity',\n default=False,\n description=\"Auto Align Extremity Bone\"\n )\n\n params.segments = bpy.props.IntProperty(\n name = 'limb segments',\n default = 2,\n min = 1,\n description = 'Number of segments'\n )\n\n params.bbones = bpy.props.IntProperty(\n name = 'bbone segments',\n default = 10,\n min = 1,\n description = 'Number of segments'\n )\n\n # Setting up extra layers for the FK and tweak\n params.tweak_extra_layers = bpy.props.BoolProperty(\n name = \"tweak_extra_layers\",\n default = True,\n description = \"\"\n )\n\n params.tweak_layers = bpy.props.BoolVectorProperty(\n size = 32,\n description = \"Layers for the tweak controls to be on\",\n default = tuple( [ i == 1 for i in range(0, 32) ] )\n )\n\n # Setting up extra layers for the FK and tweak\n params.fk_extra_layers = bpy.props.BoolProperty(\n name = \"fk_extra_layers\",\n default = True,\n description = \"\"\n )\n\n params.fk_layers = bpy.props.BoolVectorProperty(\n size = 32,\n description = \"Layers for the FK controls to be on\",\n default = tuple( [ i == 1 for i in range(0, 32) ] )\n )", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def set_params(self, dic):\n if dic is not None:\n for key, val in zip(dic.keys(), dic.values()):\n if key in self.__dict__.keys():\n self.__dict__[key] = val\n\n if 'scale_params' in self.__dict__.keys():\n self.scale_params.set_params(dic)\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n self.atmospheric_params.set_params(dic)\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n self.atemperature_params.set_params(dic)\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n self.oceanic_params.set_params(dic)\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n self.ground_params.set_params(dic)\n\n if 'otemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n self.gotemperature_params.set_params(dic)\n\n if 'gtemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n self.gotemperature_params.set_params(dic)", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['p'] = self.p\n return paramDict", "def save_parameters(self):\n self.read_parameters()\n group = NXprocess()\n group['model'] = self.composite_model\n group['data'] = self.data\n for m in self.models:\n group[m['name']] = self.get_model(m['model'])\n parameters = NXparameters(attrs={'model': m['class']})\n for n, p in m['parameters'].items():\n n = n.replace(m['model'].prefix, '')\n parameters[n] = NXfield(p.value, error=p.stderr,\n initial_value=p.init_value,\n min=str(p.min), max=str(p.max),\n vary=p.vary, expr=p.expr)\n group[m['name']].insert(parameters)\n group['title'] = 'Fit Model'\n group['model'] = self.get_model()\n self.write_group(group)", "def updateParameters(self,*args,**kwargs):\n for key in kwargs.keys():\n self._params[key] = kwargs[key]", "def params(self,new):\n self._params = new\n self._config_set()\n self._make_model()", "def _register_global_params(self, params):\n\n for name,obj in self.params().items():\n global_params.add(**{name:obj})\n\n for name,val in params.items():\n global_params.params(name).default=val\n\n params.update(global_params.get_param_values())\n params[\"name\"]=self.name" ]
[ "0.59744143", "0.5886846", "0.586683", "0.5800144", "0.57814205", "0.57720727", "0.5771282", "0.5759148", "0.57267505", "0.5726273", "0.57261705", "0.5721666", "0.5715509", "0.56774455", "0.56688505", "0.5631474", "0.56107694", "0.5597048", "0.5589862", "0.5588286", "0.5560929", "0.55561686", "0.55414796", "0.55333424", "0.5530313", "0.5513919", "0.5500566", "0.54790986", "0.5473702", "0.54469836" ]
0.66340107
0
If not evaluation function is given, we assume that a model will be simulated. This function will be called by pypet directly and therefore wants a pypet trajectory as an argument
def _runModel(self, traj): if self.useRandomICs: logging.warn("Random initial conditions not implemented yet") # get parameters of this run from pypet trajectory runParams = self.getParametersFromTraj(traj) if self.parameterSpace.star: runParams = flatten_nested_dict(flat_dict_to_nested(runParams)["parameters"]) # set the parameters for the model self.model.params.update(runParams) # get kwargs from Exploration.run() runKwargs = {} if hasattr(self, "runKwargs"): runKwargs = self.runKwargs # run it self.model.run(**runKwargs) # save outputs self._saveModelOutputsToPypet(traj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_trajectory():\n pass", "def evaluate_model(self, t, scaling_parameters, system_parameters):\n raise NotImplementedError", "def make_predict_step(self):\n return self.make_eval_step()", "def evaluate(self, predictor_model) -> Any:\n raise NotImplementedError()", "def run_model(hyperparams, iteration): \n # Fixed random state\n rand_state = np.random.RandomState(1).get_state()\n np.random.set_state(rand_state)\n seed = np.random.randint(1, 2**31 - 1)\n tf.set_random_seed(seed)\n random.seed(seed)\n\n\n env = gym.make('CartPole-v1')\n env = DummyVecEnv([lambda: env])\n\n # Get all the current hyperparameter values\n hyperparams['timesteps_per_batch'] = hyperparams['timesteps_per_batch']\n for parameter_name in ['vf_stepsize', 'max_kl', 'gamma', 'lam']:\n hyperparams[parameter_name] = float(hyperparams[parameter_name])\n\n # Initialize model\n model = TRPO(MlpPolicy, env, \n verbose=1,\n timesteps_per_batch=hyperparams['timesteps_per_batch'],\n vf_stepsize=hyperparams['vf_stepsize'],\n max_kl=hyperparams['max_kl'],\n gamma=hyperparams['gamma'],\n lam=hyperparams['lam']\n )\n\n model.learn(total_timesteps=10000)\n model.save(\"trpo_cartpole_\" + str(iteration))\n \n result = evaluate(env, model)\n return result", "def eval_model(t,lat,lon,head,pitch,tide=0,temp=None,press=None):\n #get the sun positions for each timestamp, at our known lat,lon\n #sun_head, sun_zen = sunpos_mag(t,lat,lon,tide,temp,press,radians=True)\n sun_head = sunpos_mag(t, lat, lon, tide, temp, press, radians=True)\n sun_zen = sun_head[...,1]\n sun_head = sun_head[...,0]\n\n #TODO: input and output argument mismatch\n #get the ocean model aop values for each camera position\n aop = oceanaop(sun_head,sun_zen,head,pitch,1.33)\n return sun_zen,sun_head,aop", "def main(model_type, hyperparameter=None, data_version=\"version_6\", evaluation=False, first_loc=1, end_loc=7):\n # assign parameter save and load path\n parameter_path = f\"Model_parameters/{data_version}\"\n print(f'data path: {parameter_path}')\n\n if model_type == \"Regressor\":\n # preprocess for MLP preceptron\n X_train, y_train, X_del, y_del, X_test, y_test = \\\n pre_processing.merge_split(data_version=data_version, first_loc=first_loc, end_loc=end_loc)\n\n # training MLP preceptron\n regressor = Regressor.regression(X_train, y_train, X_test, y_test,\n hyperparameter=hyperparameter, version=data_version)\n\n # save model and prediction result\n Save_model.save_Preceptron(regressor, X_test, y_test, path=parameter_path, overwrite=True)\n\n # evaluate fitting process\n if evaluation:\n plot_learning_curve.evaluation_learning_curve(regressor, X_train, y_train,\n title=f\"{regressor.get_params()['hidden_layer_sizes']}\")\n\n elif model_type == \"Classifier\":\n # preprocess for MLP preceptron\n X_train, y_train, X_del, y_del, X_test, y_test = \\\n pre_processing.merge_split(data_version=data_version, first_loc=first_loc, end_loc=end_loc, regressor=False)\n\n # training MLP preceptron\n classifier = Classifier.classifier(X_train, y_train, X_test, y_test, hyperparameter=hyperparameter)\n\n # save model and prediction result\n Save_model.save_Preceptron(classifier, X_test, y_test, path=parameter_path)\n\n # evaluate MLP classifier\n if evaluation:\n confusion_matrix.confusion_matrix(classifier, X_test, y_test, target_name=None)", "def run(self, X, Y, model):\n\n p0 = X.iloc[0] # read in the input info\n params = lmfit.Parameters() # empty parameter class\n success = True # check for success\n\n if model == 'Medlyn':\n min, max = self.param_space('g1')\n params.add('g1', p0.g1, min=min, max=max)\n min, max = self.param_space('sref')\n params.add('sref', p0.sref, min=min, max=max)\n\n if model == 'Eller':\n min, max = self.param_space('kmax')\n params.add('kmaxS1', p0.kmaxS1, min=min, max=max)\n\n if (model == 'ProfitMax') or (model == 'ProfitMax2'):\n min, max = self.param_space('kmax')\n params.add('kmax', p0.kmax, min=min, max=max)\n\n # the following models all require the Sperry kmax as an input!\n if model == 'Tuzet':\n min, max = self.param_space('g1')\n params.add('g1T', p0.g1T, min=min, max=max)\n\n if 'Tleaf' in X.columns: # vary g1 and kmax\n min, max = self.param_space('kmax')\n params.add('kmaxT', p0.kmax, min=min, max=max)\n\n else: # vary g1 and Pref, sref fixed\n min, max = self.param_space('PrefT', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PrefT):\n params.add('PrefT', p0.PrefT, min=min, max=max)\n\n else:\n params.add('PrefT', -p0.P88, min=min, max=max)\n\n if model == 'WUE-LWP':\n min, max = self.param_space('Lambda')\n params.add('Lambda', p0.Lambda, min=min, max=max)\n\n if model == 'CGain':\n min, max = self.param_space('Kappa')\n params.add('Kappa', p0.Kappa, min=min, max=max)\n\n if model == 'CMax':\n min, max = self.param_space('Alpha')\n params.add('Alpha', p0.Alpha, min=min, max=max)\n min, max = self.param_space('Beta')\n params.add('Beta', p0.Beta, min=min, max=max)\n\n if model == 'SOX-OPT':\n min, max = self.param_space('kmax')\n params.add('kmaxS2', p0.kmaxS2, min=min, max=max)\n\n if model == 'LeastCost':\n min, max = self.param_space('kmax')\n params.add('kmaxLC', p0.kmaxLC, min=min, max=max)\n min, max = self.param_space('Eta')\n params.add('Eta', p0.Eta, min=min, max=max)\n\n if model == 'CAP':\n min, max = self.param_space('krl')\n params.add('krlC', p0.krlC, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritC):\n params.add('PcritC', p0.PcritC, min=min, max=max)\n\n else:\n params.add('PcritC', -p0.P88, min=min, max=max)\n\n if model == 'MES':\n min, max = self.param_space('krl')\n params.add('krlM', p0.krlM, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritM):\n params.add('PcritM', p0.PcritM, min=min, max=max)\n\n else:\n params.add('PcritM', -p0.P88, min=min, max=max)\n\n if not os.path.isdir(self.opath): # create output dir\n os.makedirs(self.opath)\n\n # run the minimizer\n if self.method == 'emcee':\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, steps=self.steps,\n nwalkers=self.nchains, burn=self.burn,\n thin=self.thin, is_weighted=False,\n progress=False, nan_policy='omit')\n\n else:\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, nan_policy='omit')\n\n for param in out.params.values():\n\n if np.isclose(param.value, param.init_value):\n params[param.name] = lmfit.Parameter(name=param.name,\n value=1.5 *\n param.init_value)\n out = lmfit.minimize(fres, params,\n args=(model, X, Y, self.inf_gb,),\n method=self.method,\n nan_policy='omit')\n\n if not os.path.isfile(os.path.join(self.opath, '%s.txt' % (model))):\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'w+')\n\n else: # append to existing file\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'a+')\n\n txt.write('\\n')\n txt.write(lmfit.fit_report(out))\n\n if not success:\n txt.write('\\n## Warning: had to fix first parameter value')\n\n txt.write('\\n')\n txt.close() # close text file\n\n return out.params.valuesdict()", "def respond(self,obs):\n if obs.timestep == 0:\n #If it's the first timestep, we have no clue. Since we don't even know if we are going to ask questions in the\n #future, we go ahead and init the inference engine for future use.\n self.p_obs = copy.deepcopy(obs)\n self.tracking_stations = self.get_remaining_stations(obs)\n self.inference_engine = inference_engine(self.tracking_agent,self.tracking_stations)\n #And set the knowledge source to inference so the next step we know where to look for in the upcoming step.\n self.knowledge.source[0] = ORIGIN.Inference\n\n #And pick a target station at random since we have to move forward.\n target_station = np.random.choice(self.tracking_stations) #pick a station at random.\n\n else:\n curr_k_id = self.knowledge.get_current_job_station_id()\n\n #Checking what knowledge we have.\n if (self.knowledge.source[curr_k_id]==ORIGIN.Answer):\n #Then we simply work on the station because we have an answer telling us that that's the station to work on.\n target_station = self.knowledge.station_order[curr_k_id]\n\n elif (self.knowledge.source[curr_k_id] == None):\n #which means we just finished a station in the last time-step. This calls for re-initalizing the inference_engine\n self.tracking_stations = self.get_remaining_stations(obs)\n self.inference_engine = inference_engine(self.tracking_agent,self.tracking_stations)\n target_station = np.random.choice(self.tracking_stations)\n\n elif (self.knowledge.source[curr_k_id]==ORIGIN.Inference):\n #Which means we have been working on a inference for a station.\n target_station = self.inference_engine.inference_step(self.p_obs,obs)\n self.knowledge.update_knowledge_from_inference(target_station)\n warnings.WarningMessage(\"Provision resetting inference_engine when a station is finished\")\n\n else:\n #it should never come to this.\n raise Exception(\"Some mistake around\")\n\n \"\"\"\n Okay, now that we know which station we should be headed to, we need to ensure the nitty-gritty details.\n Do we have a tool?\n If yes,\n if it matches our target station:\n destination: station\n else:\n destination: base\n else:\n destination: base\n \n Are we near our destination?\n Yes:\n Is it the base?\n Pick up the tool.\n else:\n execute work action.\n No:\n keep moving. \n \"\"\" \n\n if self.tool is not None:\n if self.tool == target_station:\n destination = obs.allPos[obs.stationIndices[target_station]]\n else:\n destination = global_defs.TOOL_BASE\n else:\n destination = global_defs.TOOL_BASE\n\n if utils.is_neighbor(self.pos,destination):\n if destination == global_defs.TOOL_BASE:\n #We are at the base to pick up a tool.\n desired_action = global_defs.Actions.NOOP\n self.tool = target_station\n else:\n #we are the station to work.\n desired_action = global_defs.Actions.WORK\n else:\n #Navigate to destination.\n desired_action = None\n\n obstacles = copy.deepcopy(obs.allPos).remove(self.pos)\n proposal = utils.generate_proposal(self.pos,destination,obstacles,desired_action)\n return proposal", "def TrainOneStep(self):\n pass", "def evaluate(model: torch.nn.Module, dummy_input: torch.Tensor):\n model.eval()\n if isinstance(dummy_input, torch.Tensor):\n dummy_input = [dummy_input]\n with torch.no_grad():\n model(*dummy_input)", "def eval_step(self, *args, **kwargs):\n raise NotImplementedError", "def evaluate(model: torch.nn.Module, dummy_input: torch.Tensor):\n if isinstance(dummy_input, torch.Tensor):\n dummy_input = [dummy_input]\n\n model.eval()\n with torch.no_grad():\n model(*dummy_input)", "def run_task(snapshot_config, *_):\n with LocalTFRunner(snapshot_config) as runner:\n env = TfEnv(gym.make('InvertedDoublePendulum-v2'))\n\n action_noise = GaussianStrategy(env.spec, max_sigma=0.1, min_sigma=0.1)\n\n policy = ContinuousMLPPolicy(env_spec=env.spec,\n hidden_sizes=[400, 300],\n hidden_nonlinearity=tf.nn.relu,\n output_nonlinearity=tf.nn.tanh)\n\n qf = ContinuousMLPQFunction(name='ContinuousMLPQFunction',\n env_spec=env.spec,\n hidden_sizes=[400, 300],\n action_merge_layer=0,\n hidden_nonlinearity=tf.nn.relu)\n\n qf2 = ContinuousMLPQFunction(name='ContinuousMLPQFunction2',\n env_spec=env.spec,\n hidden_sizes=[400, 300],\n action_merge_layer=0,\n hidden_nonlinearity=tf.nn.relu)\n\n replay_buffer = SimpleReplayBuffer(env_spec=env.spec,\n size_in_transitions=int(1e6),\n time_horizon=250)\n\n td3 = TD3(env_spec=env.spec,\n policy=policy,\n policy_lr=1e-4,\n qf_lr=1e-3,\n qf=qf,\n qf2=qf2,\n replay_buffer=replay_buffer,\n target_update_tau=1e-2,\n n_epoch_cycles=20,\n n_train_steps=1,\n smooth_return=False,\n discount=0.99,\n buffer_batch_size=100,\n min_buffer_size=1e4,\n exploration_strategy=action_noise,\n policy_optimizer=tf.train.AdamOptimizer,\n qf_optimizer=tf.train.AdamOptimizer)\n\n runner.setup(td3, env)\n runner.train(n_epochs=500, n_epoch_cycles=20, batch_size=250)", "def step(self, X, Y):\n if self.model is None:\n print(\"%s.fit: implement me\" % (self.__class__.__name__))", "def _evaluate_model_parameters(self, session):\n logger.info('There are no model specific operation evaluation!')", "def execute_trajectory(\n trajectory: Trajectory,\n controller: Union['SimRobotController','RobotInterfaceBase'],\n speed: float = 1.0,\n smoothing: Optional[_SMOOTHING_OPTIONS2] = None,\n activeDofs: Optional[List[Union[int,str]]] = None\n ):\n if len(trajectory.times)==0: return #be tolerant of empty paths?\n if speed <= 0: raise ValueError(\"Speed must be positive\")\n from ..control.robotinterface import RobotInterfaceBase\n from ..robotsim import SimRobotController\n if isinstance(controller,SimRobotController):\n robot_model = controller.model()\n q0 = controller.getCommandedConfig()\n elif isinstance(controller,RobotInterfaceBase):\n robot_model = controller.klamptModel()\n cq0 = controller.commandedPosition()\n if cq0[0] is None:\n cq0 = controller.sensedPosition()\n q0 = controller.configToKlampt(cq0)\n else:\n raise ValueError(\"Invalid type of controller, must be SimRobotController or RobotInterfaceBase\")\n if activeDofs is not None:\n indices = [robot_model.link(d).getIndex for d in activeDofs]\n liftedMilestones = []\n assert not isinstance(trajectory,HermiteTrajectory),\"TODO: hermite trajectory lifting\"\n for m in trajectory.milestones:\n assert(len(m)==len(indices))\n q = q0[:]\n for i,v in zip(indices,m):\n q[i] = v\n liftedMilestones.append(q)\n tfull = trajectory.constructor()(trajectory.times,liftedMilestones)\n return execute_trajectory(tfull,controller,speed,smoothing)\n\n if isinstance(trajectory,HermiteTrajectory):\n assert smoothing == None,\"Smoothing cannot be applied to hermite trajectories\"\n ts = trajectory.startTime()\n n = len(q0)\n if isinstance(controller,SimRobotController):\n controller.setMilestone(trajectory.eval(ts),vectorops.mul(trajectory.deriv(ts),speed))\n n = len(trajectory.milestones[0])//2\n for i in range(1,len(trajectory.times)):\n q,v = trajectory.milestones[i][:n],trajectory.milestones[i][n:]\n controller.addCubic(q,vectorops.mul(v,speed),(trajectory.times[i]-trajectory.times[i-1])/speed)\n else:\n cv0 = controller.commandedVelocity()\n if cv0[0] is None:\n cv0 = controller.sensedVelocity()\n times,positions,velocities = [0],[controller.configFromKlampt(q0)],[cv0]\n start = 1 if trajectory.times[0]==0 else 0\n for i in range(start,len(trajectory.milestones)):\n times.append(trajectory.times[i]/speed)\n positions.append(controller.configFromKlampt(trajectory.milestones[i][:n]))\n velocities.append(controller.velocityFromKlampt(trajectory.milestones[i][n:]))\n controller.setPiecewiseCubic(times,positions,velocities)\n else:\n if smoothing == None:\n ts = trajectory.startTime()\n if isinstance(controller,SimRobotController):\n controller.setMilestone(trajectory.eval(ts))\n for i in range(1,len(trajectory.times)):\n q = trajectory.milestones[i]\n controller.addLinear(q,(trajectory.times[i]-trajectory.times[i-1])/speed)\n else:\n #TODO: move to start?\n times,positions = [0],[controller.configFromKlampt(q0)]\n start = 1 if 0==trajectory.times[0] else 0\n for i in range(start,len(trajectory.milestones)):\n times.append(trajectory.times[i]/speed)\n positions.append(controller.configFromKlampt(trajectory.milestones[i]))\n controller.setPiecewiseLinear(times,positions)\n elif smoothing == 'spline':\n t = HermiteTrajectory()\n t.makeSpline(trajectory)\n return execute_trajectory(t,controller)\n elif smoothing == 'pause':\n if isinstance(controller,SimRobotController):\n ts = trajectory.startTime()\n controller.setMilestone(trajectory.eval(ts))\n zero = [0.0]*len(trajectory.milestones[0])\n for i in range(1,len(trajectory.times)):\n q = trajectory.milestones[i]\n controller.addCubic(q,zero,(trajectory.times[i]-trajectory.times[i-1])/speed)\n else:\n #TODO: move to start?\n zero = [.0]*len(q0)\n t = HermiteTrajectory(trajectory.times,trajectory.milestones,[zero]*len(trajectory.milestones))\n return execute_trajectory(t,controller)\n else:\n raise ValueError(\"Invalid smoothing method specified\")", "def eval(self, t: float, endBehavior: str = 'halt') -> RigidTransform:\n res = GeodesicHermiteTrajectory.eval(self,t,endBehavior)\n return self.to_se3(res)", "def evaluate(self, prediction_fn):\n pass", "def simulate_analytically(\n self,\n y0: phase_space.PhaseSpace,\n t0: utils.FloatArray,\n t_eval: jnp.ndarray,\n params: utils.Params,\n **kwargs: Any\n ) -> Optional[phase_space.PhaseSpace]:", "def train(self)->None:", "def simulate_func(function, t): \n out = function.PLsig(t)\n \n return out, True", "def __call__(self, predictor_model) -> None:\n self.save_result(self.evaluate(predictor_model))", "def prismatic_trajectory(*args):\n return dyn_trajectory(*args)", "def evaluate(self,\n model,\n x=None,\n y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None,\n callbacks=None,\n **kwargs):\n raise NotImplementedError()", "def run(model_dir,\n schedule,\n problem_class=gin.REQUIRED,\n optimizer_class=gin.REQUIRED,\n dataset_name=gin.REQUIRED,\n batch_size=gin.REQUIRED,\n eval_batch_size=64,\n train_steps=gin.REQUIRED,\n eval_steps=gin.REQUIRED,\n base_optimizer_class=gin.REQUIRED,\n base_optimizer_conditioning_class=None,\n iterations_per_loop=gin.REQUIRED,\n eval_weights=None,\n training_params_class=gin.REQUIRED,\n training_params_conditioning_class=None,\n preprocess=\"\",\n preprocess_eval=\"\",\n save_checkpoints_steps=None,\n keep_checkpoint_max=0,\n eval_on_test=False):\n assert schedule in (\"train\", \"eval\")\n\n if save_checkpoints_steps:\n kwargs = {\"save_checkpoints_steps\": save_checkpoints_steps}\n else:\n kwargs = {\"save_checkpoints_secs\": 60*10} # Every 10 minutes.\n\n run_config = tf_estimator.tpu.RunConfig(\n keep_checkpoint_max=keep_checkpoint_max,\n master=FLAGS.master,\n evaluation_master=FLAGS.master,\n tpu_config=tf_estimator.tpu.TPUConfig(\n iterations_per_loop=iterations_per_loop),\n **kwargs)\n # We use one estimator (potentially on TPU) for training and evaluation.\n problem = problem_class()\n model_fn = construct_model_fn(\n problem, optimizer_class, base_optimizer_class,\n eval_weights=eval_weights,\n base_optimizer_conditioning_class=base_optimizer_conditioning_class,\n training_params_class=training_params_class,\n training_params_conditioning_class=training_params_conditioning_class)\n tpu_estimator = tf_estimator.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n model_dir=model_dir,\n train_batch_size=batch_size,\n eval_batch_size=eval_batch_size,\n config=run_config)\n\n\n def input_fn_train(params):\n preprocess_fn = preprocessing.get_preprocess_fn(preprocess)\n return data.get_dataset(dataset_name, data.DatasetSplit.TRAIN,\n FLAGS.validation_percent, params[\"batch_size\"],\n preprocess_fn)\n\n def input_fn_eval(params, split):\n preprocess_fn = preprocessing.get_preprocess_fn(preprocess_eval)\n return data.get_dataset(dataset_name, split, FLAGS.validation_percent,\n params[\"batch_size\"], preprocess_fn).repeat()\n\n path_to_finished_file = os.path.join(model_dir, \"FINISHED\")\n if schedule == \"train\":\n gin_hook = gin.tf.GinConfigSaverHook(model_dir, summarize_config=True)\n tpu_estimator.train(input_fn=input_fn_train,\n hooks=[gin_hook],\n max_steps=train_steps)\n with tf.gfile.GFile(path_to_finished_file, \"w\") as finished_file:\n finished_file.write(\"1\")\n else:\n for checkpoint in iterate_checkpoints_until_file_exists(\n model_dir, path_to_finished_file):\n if eval_on_test:\n train_split = data.DatasetSplit.TRAIN_FULL\n test_split = data.DatasetSplit.TEST\n test_summary_name = \"test\"\n else:\n train_split = data.DatasetSplit.TRAIN\n test_split = data.DatasetSplit.VALID\n test_summary_name = \"valid\"\n\n eval_train = tpu_estimator.evaluate(\n input_fn=functools.partial(input_fn_eval, split=train_split),\n checkpoint_path=checkpoint,\n steps=eval_steps,\n name=\"train\")\n eval_test = tpu_estimator.evaluate(\n input_fn=functools.partial(input_fn_eval, split=test_split),\n checkpoint_path=checkpoint,\n steps=eval_steps,\n name=\"test\")\n\n current_step = eval_train[\"global_step\"]\n\n\n hub_modules_dir = os.path.join(model_dir, \"hub_modules\")\n if not tf.gfile.Exists(hub_modules_dir):\n tf.gfile.MkDir(hub_modules_dir)\n else:\n if not tf.gfile.IsDirectory(hub_modules_dir):\n raise ValueError(\"{0} exists and is not a directory\".format(\n hub_modules_dir))\n\n hub_module_path = os.path.join(hub_modules_dir,\n \"step-{:0>9}\".format(current_step))\n if not tf.gfile.Exists(hub_module_path):\n problem.module_spec.export(hub_module_path,\n checkpoint_path=checkpoint)\n else:\n logging.info(\"Not saving the hub module, since the path\"\n \" %s already exists\", hub_module_path)", "def entry(self):\n if not os.path.isfile('model'):\n train()\n schedule.every(0.01).seconds.do(predict, self)\n while True:\n schedule.run_pending()", "def _model_fn(features, labels, mode, config, params):\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)", "def test_predict():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.predict(testing_features)\n assert False # Should be unreachable\n except ValueError:\n pass", "def eval(self, t: float, endBehavior: str = 'halt') -> RigidTransform:\n res = self.eval_state(t,endBehavior)\n return self.to_se3(res)" ]
[ "0.5927099", "0.59192216", "0.5839392", "0.57752514", "0.5674803", "0.56711715", "0.5602252", "0.5563227", "0.55145705", "0.54891074", "0.54664755", "0.54656124", "0.5425638", "0.54192406", "0.54156095", "0.53884786", "0.53753805", "0.53617346", "0.5361516", "0.5347182", "0.5335573", "0.5334666", "0.532083", "0.5299394", "0.52980113", "0.5269667", "0.52336496", "0.5228124", "0.5220369", "0.5219889" ]
0.63979477
0
Helper to handle None's in pypet parameters (used for random number generator seed)
def _validatePypetParameters(self, runParams): # fix rng seed, which is saved as a string if None if "seed" in runParams: if runParams["seed"] == "None": runParams["seed"] = None return runParams
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_allow_none():\n value = None\n num_a = param.Integer(value=value, allow_None=True)\n assert num_a.value == value", "def test_rng_null(self):\n assert check_random_state(None) is np.random.mtrand._rand", "def noneType(value):\r\n return ''", "def test_get_with_None_value(self):\n self.assertEqual(self.config.get('none_types','some_value'),None)\n self.assertEqual(self.config.get('none_types','some_value','something'),'something')", "def test_creation_notallow_none():\n with pytest.raises(ValueError) as __:\n value = None\n __ = param.Integer(value=value, allow_None=False)", "def null() -> SetupVal:\n return NullVal()", "def nulltest():", "def guess_type_value_type (none = True) :\n return [ None, str, int, float ] if none else [ str, int, float ]", "def test_default_argument_null(self):\n @converters.wrap\n def inner_test(param: int = 14):\n \"\"\"Make sure the default was used.\"\"\"\n self.assertEqual(param, 14)\n inner_test(param=None)", "def test_mask_secret_null():\n assert utils.mask_secrets(\"\", None) == \"\"", "def get_default_value_type (ty, none = True) :\n if ty is None and none : return None\n elif ty == str : return \"\"\n elif ty == int : return 0\n elif ty == decimal.Decimal : return decimal.Decimal(0)\n elif ty == float : return 0.0\n else :\n raise PQHException (\"type expected in \" + str (guess_type_value_type ()))", "def test_none_input(self):\n eq_(None, output())", "def silent_none(value):\n if value is None:\n return ''\n return value", "def compare_with_none():\n value = {};\n if value is not None:\n print(\"value is not none\")\n else:\n print(\"value is none\")", "def get_none1(self):\n pass", "def test_creation_set_none_get_none():\n value = 11\n num_a = param.Integer(value=value)\n assert num_a.get_soft_bounds() == [None, None]", "def changenonetoNone(s):\r\n if s=='None':\r\n return None\r\n else:\r\n return s", "def type(cls):\n return 'None'", "def fill_missing_parameters(self):\n pseudo_id = self.get_param_by_type(PseudoID)\n pseudo_name = self.get_param_by_type(PseudoName)\n if pseudo_id is None and pseudo_name is not None:\n self.parameters.append(\n PseudoID(pseudo_name.value)\n ) # take name for both\n elif pseudo_name is None and pseudo_id is not None:\n self.parameters.append(\n PseudoName(pseudo_id.value)\n ) # take id for both", "def test_no_default_value(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 4)\n assert dim.default_value is None", "def test_no_default_value(self):\n dim = Real(\"yolo\", \"uniform\", -3, 4)\n assert dim.default_value is None", "def str_or_none(val):\n return str(val) if val is not None else None", "def is_none(obj):\n return obj is None", "def _check_random_state(random_state):\n if random_state is None or isinstance(random_state, int):\n return sci.random.RandomState(random_state)\n elif isinstance(random_state, sci.random.RandomState):\n return random_state\n else:\n raise TypeError('Seed should be None, int or np.random.RandomState')", "def Noneify(variable):\n if variable in (\"None\", \"\", None):\n return None\n if variable in (\"False\", \"0\"):\n return False\n if variable in (\"True\", \"1\"):\n return True\n return variable", "def return_none() -> None:\n pass", "def NoneOrType(type_):\n def coercer(value):\n if value is None:\n return value\n else:\n return type_(value)\n return coercer", "def optional():", "def check_argtype(val, type_, name, or_none=False):\n if not (isinstance(val, type_) or (or_none and val is None)):\n raise TypeError('{} should be of type {}, got {}'.format(\n name, type_, type(val)))", "def test_none_type(self):\n\n expected = TypeError\n input_ = None\n with self.assertRaises(expected):\n math.factorial(input_)" ]
[ "0.62218755", "0.602136", "0.60112995", "0.5922588", "0.58687824", "0.58633107", "0.5859123", "0.58180076", "0.5806624", "0.5767758", "0.56836206", "0.5663786", "0.56127363", "0.56008166", "0.5596845", "0.55941707", "0.55930674", "0.55622345", "0.55609566", "0.5521717", "0.5512443", "0.55054337", "0.5495281", "0.5472665", "0.54670256", "0.5445873", "0.5443218", "0.54411095", "0.5439196", "0.54356515" ]
0.6851421
0
Aggregate all results in to dfResults dataframe.
def aggregateResultsToDfResults(self, arrays=True, fillna=False): nan_value = np.nan # defines which variable types will be saved in the results dataframe SUPPORTED_TYPES = (float, int, np.ndarray, list) SCALAR_TYPES = (float, int) ARRAY_TYPES = (np.ndarray, list) logging.info("Aggregating results to `dfResults` ...") for runId, parameters in tqdm.tqdm(self.dfResults.iterrows(), total=len(self.dfResults)): # if the results were previously loaded into memory, use them if hasattr(self, "results"): # only if the length matches the number of results if len(self.results) == len(self.dfResults): result = self.results[runId] # else, load results individually from hdf file else: result = self.getRun(runId) # else, load results individually from hdf file else: result = self.getRun(runId) for key, value in result.items(): # only save floats, ints and arrays if isinstance(value, SUPPORTED_TYPES): # save 1-dim arrays if isinstance(value, ARRAY_TYPES) and arrays: # to save a numpy array, convert column to object type if key not in self.dfResults: self.dfResults[key] = None self.dfResults[key] = self.dfResults[key].astype(object) self.dfResults.at[runId, key] = value elif isinstance(value, SCALAR_TYPES): # save scalars self.dfResults.loc[runId, key] = value else: self.dfResults.loc[runId, key] = nan_value # drop nan columns self.dfResults = self.dfResults.dropna(axis="columns", how="all") if fillna: self.dfResults = self.dfResults.fillna(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aggregate_results(self):\n\n raise NotImplementedError", "def _make_results_dataframe(self):\n LOG.debug(\"Creating Results Dataframes.\")\n results_df = tfs.TfsDataFrame(index=self.twiss_df.index)\n results_df[\"S\"] = self.twiss_df[\"S\"]\n return results_df", "def make_results_df(results):\n max_val = max(x[1] for x in results)\n\n df = []\n for i in range(max_val + 1):\n df.append([])\n for j in range(max_val + 1):\n df[-1].append(results.get((i, j), np.nan))\n return pd.DataFrame(df)", "def analyzeResults(self):\n results = [self.analyzeClusterPerformance(c) for c in self.clusterLabels]\n rDF = pd.DataFrame(results)\n self.resultList.append(rDF)", "def get_pandas(self):\n return pd.DataFrame(self.results)", "def export_results(self):\n problemIDs = list(set([result.problemID for result in self.results]))\n configIDs = list(set([result.configID for result in self.results]))\n\n labels = []\n labels.extend(TestResults._fields)\n labels.extend(SizeMetrics._fields) \n # Remove unused columns\n labels.remove(\"size_metrics\")\n labels.remove(\"problemID\")\n labels.remove(\"configID\")\n\n # output = pd.Panel(items=labels, major_axis=problemIDs, minor_axis=configIDs)\n multiindex = pd.MultiIndex.from_product([problemIDs, configIDs], names=[\"problems\", \"configs\"])\n\n output = pd.DataFrame(index=multiindex, columns=labels)\n output.columns.names = [\"stats\"]\n\n for result in self.results:\n problemID = result.problemID\n configID = result.configID\n for label in [label for label in TestResults._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result, label)\n for label in [label for label in SizeMetrics._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result.size_metrics, label)\n\n # Compute Statistics\n output.fillna(value=np.nan, inplace=True)\n output.sort_index(inplace=True)\n try:\n TestFramework.compute_mosek_error(output, \"opt_val\", \"mosek_config\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_mosek_error: 'mosek_config' or 'opt_val' field not found.\")\n try:\n TestFramework.compute_performance(output, \"solve_time\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_performance: 'solve_time' field not found.\")\n return output", "def aggregate_results(output_files, agg_filename):\n\n print(file_marker + \"STARTING AGGREGATION\")\n feather_files = output_files\n\n results = []\n for i in range(len(feather_files)):\n print(file_marker + str(i))\n x = pd.read_feather(feather_files[i])\n results.append(x)\n \n overall_results = pd.concat(results, ignore_index=True, sort=False)\n opt_diff_results = overall_results\n\n opt_diff_results.reset_index(inplace=True, drop=True) \n # drop=True: column 'index' gets removed\n\n opt_diff_results.to_feather(agg_filename)\n print(file_marker + \"Aggregated results saved to: \" + agg_filename)", "def result_to_dataframe(data):\n letters, statistics = zip(*data)\n dataframe = pd.DataFrame(data=list(statistics), index=letters, columns=['SUM', 'SUM_OF_SQUARES', 'MAX', 'MIN', 'COUNT']).sort_index()\n dataframe['MEAN'] = dataframe['SUM'] / dataframe['COUNT']\n dataframe['VARIANCE'] = dataframe['SUM_OF_SQUARES'] / dataframe['COUNT'] - dataframe['MEAN']**2\n dataframe['STANDARD_DEVIATION'] = dataframe['VARIANCE']**0.5\n logging.info(\"Total datapoints read: {}.\".format(dataframe['COUNT'].sum()))\n return dataframe", "def __save_aggregate_scores(self):\n results = pd.DataFrame(\n columns=self.results[list(self.results.keys())[0]],\n index=self.results.keys(),\n )\n for row_name, time_bins in self.results.items():\n for column_name, value in time_bins.items():\n results.loc[row_name, column_name] = value\n results.reindex(sorted(results.columns, reverse=True), axis=1).to_csv(\n self.df_save_path\n )", "def aggregate_results(self, results):\n result = dict()\n result['MAE'] = self.average_dict_items(results, 'MAE')\n result['MdAE'] = self.average_dict_items(results, 'MdAE')\n result['RMSE'] = self.average_dict_items(results, 'RMSE')\n result['SMAPE'] = self.average_dict_items(results, 'SMAPE')\n result['num_values'] = self.average_dict_items(results, 'num_values')\n return result", "def collect_results( results_dir = \"experiments\" ) :\n #%%\n import pandas as pd\n exps_fn = os.listdir( results_dir )\n dics = []\n for fname in exps_fn :\n with open( results_dir + \"/\" + fname, \"rt\", encoding=\"utf8\" ) as f_out :\n dics.append( json.load( f_out ) )\n\n results_df = pd.DataFrame( dics )\n #%%\n return results_df", "def fetchall_df(result_proxy):\n# result = result_proxy.fetchall(keep_col_names=T) ???\n result = [row for row in tqdm(result_proxy)]\n return pd.DataFrame(result, columns=result[0].keys())", "def consolidate_results(path='./Data'):\n model_files = [load(os.path.join(path, f)) \n for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and f.startswith('model_')]\n df_final = pd.DataFrame(columns=['model_name','train_accuracy','test_accuracy',\n 'macro_avg_precision','macro_avg_recall',\n 'macro_avg_f1-score','weighted_avg_precision',\n 'weighted_avg_recall','weighted_avg_f1-score'])\n for model_file in model_files:\n results = model_file['model_results']\n class_report = classification_report(results.category, results.pred, output_dict=True)\n df_final = df_final.append({'model_name':model_file['model_name'],\n 'train_accuracy':'{0:.2f}'.format(model_file['model_CV'].best_score_),\n 'test_accuracy':'{0:.2f}'.format(class_report['accuracy']),\n 'macro_avg_precision':class_report['macro avg']['precision'],\n 'macro_avg_recall':class_report['macro avg']['recall'],\n 'macro_avg_f1-score':class_report['macro avg']['f1-score'],\n 'weighted_avg_precision':class_report['weighted avg']['precision'],\n 'weighted_avg_recall':class_report['weighted avg']['recall'],\n 'weighted_avg_f1-score':class_report['weighted avg']['f1-score']\n },ignore_index=True)\n return(df_final)", "def _process_results(self):\n self.portfolio.create_backtest_result_dataframe()\n stats = self._show_stats()\n return stats", "def execute_query(self):\n query_sum = self.initialize_totals()\n data = []\n\n with tenant_context(self.tenant):\n query = self.query_table.objects.filter(self.query_filter)\n query_data = query.annotate(**self.annotations)\n group_by_value = self._get_group_by()\n\n query_group_by = [\"date\"] + group_by_value\n query_order_by = [\"-date\"]\n query_order_by.extend([self.order]) # add implicit ordering\n\n query_data = query_data.values(*query_group_by).annotate(**self.report_annotations)\n\n if self._limit and query_data:\n query_data = self._group_by_ranks(query, query_data)\n if not self.parameters.get(\"order_by\"):\n # override implicit ordering when using ranked ordering.\n query_order_by[-1] = \"rank\"\n\n # Populate the 'total' section of the API response\n if query.exists():\n aggregates = self._mapper.report_type_map.get(\"aggregates\")\n metric_sum = query.aggregate(**aggregates)\n query_sum = {key: metric_sum.get(key) for key in aggregates}\n\n query_data, total_capacity = self.get_cluster_capacity(query_data)\n if total_capacity:\n query_sum.update(total_capacity)\n\n if self._delta:\n query_data = self.add_deltas(query_data, query_sum)\n is_csv_output = self.parameters.accept_type and \"text/csv\" in self.parameters.accept_type\n\n query_data = self.order_by(query_data, query_order_by)\n\n if is_csv_output:\n if self._limit:\n data = self._ranked_list(list(query_data))\n else:\n data = list(query_data)\n else:\n # Pass in a copy of the group by without the added\n # tag column name prefix\n groups = copy.deepcopy(query_group_by)\n groups.remove(\"date\")\n data = self._apply_group_by(list(query_data), groups)\n data = self._transform_data(query_group_by, 0, data)\n\n sum_init = {\"cost_units\": self._mapper.cost_units_key}\n if self._mapper.usage_units_key:\n sum_init[\"usage_units\"] = self._mapper.usage_units_key\n query_sum.update(sum_init)\n\n ordered_total = {\n total_key: query_sum[total_key] for total_key in self.report_annotations.keys() if total_key in query_sum\n }\n ordered_total.update(query_sum)\n\n self.query_sum = ordered_total\n self.query_data = data\n return self._format_query_response()", "def prepareDataframeForPivot(self, result):\n df = result\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n if self._isIndexedDataframe(df):\n if isinstance(df.columns, pd.MultiIndex):\n df.columns = df.columns.map(' | '.join)\n df = df.select_dtypes(include=['float64', 'int64'])\n if df.size == 0:\n df[\"values\"] = np.nan\n # try to keep group measures\n try:\n df.groupMeasures = result.groupMeasures\n except:\n pass\n # try to keep aggMeasures\n try:\n df.aggMeasures = result.aggMeasures\n except:\n pass\n\n return df", "def _load_results(self):\n\n _LOG.debug(\"stats colnames: %s\", \", \".join(self._stats_colnames))\n _LOG.debug(\"additional colnames: %s\", \", \".join(self._more_colnames))\n\n for res in self.rsts:\n _LOG.debug(\"hover colnames: %s\", \", \".join(self._hov_colnames[res.reportid]))\n\n colnames = []\n for colname in self._hov_colnames[res.reportid] + self._more_colnames:\n if colname in res.colnames_set:\n colnames.append(colname)\n\n csel = Trivial.list_dedup(self._stats_colnames + colnames)\n res.clear_filts()\n res.set_csel(csel)\n res.load_df()\n\n # We'll be dropping columns and adding temporary columns, so we'll affect the original\n # dataframe. This is more effecient than creating copies.\n self._mangle_loaded_res(res)", "def calc(self) -> pd.DataFrame:\n raise NotImplementedError", "def _build_results(self):\n results = {}\n cols = []\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n cols.append(get_rate_column(pol, adjustment=adj, generated=False))\n cols.append(get_column(pol, adjustment=adj))\n cols.append(\"net_consumed_mwh\")\n for ba in self.regions:\n results[ba] = pd.DataFrame(\n index=self.generation.index, columns=cols, dtype=np.float64\n )\n return results", "def prepareDataframeForTable(self, result):\n df = result\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n\n if self._isIndexedDataframe(df):\n if df.size == 0:\n df[\"values\"] = np.nan\n elif len(df.columns) > 1:\n if isinstance(df.columns, pd.MultiIndex):\n df.columns = df.columns.map(' | '.join)\n df = df.stack()\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n current_columns_name = list(df.index.names)\n current_columns_name[len(current_columns_name)-1] = \"Measures\"\n df.index.names = current_columns_name\n\n return df", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def get_query_result_to_df(self, query):\r\n try:\r\n return pd.read_sql_query(query, self.conn)\r\n except pd.pandas.io.sql.DatabaseError:\r\n print('Execution failed. Database error')", "def score_aggregate(results):\n scores = []\n truth_count = detected_count = segment_count = 0\n\n for res in results:\n scores.append(res[\"scores\"])\n truth_count += len(res[\"labels\"])\n detected_count += len(res[\"detected\"])\n segment_count += len(res[\"scores\"][\"segments\"])\n\n ret = dict()\n ret[\"scores\"] = sum_scores(scores)\n ret[\"stats\"] = dict(truth_count=truth_count, detected_count=detected_count, segment_count=segment_count)\n return ret", "def get_results(r):\n myDict = {}\n for name in r[\"results\"]:\n myDict[name[\"name\"]] = {\n \"rank\": name[\"rank\"],\n \"ticker\": name[\"ticker\"],\n \"upvotes\": name[\"upvotes\"],\n \"mentions\": name[\"mentions\"],\n \"mentions_24h_ago\": name[\"mentions_24h_ago\"],\n }\n df = pd.DataFrame.from_dict(myDict, orient=\"index\")\n df[\"rank\"] = df[\"rank\"].astype(int)\n df[\"upvotes\"] = df[\"upvotes\"].astype(int)\n df[\"mentions\"] = df[\"mentions\"].astype(int)\n df[\"mentions_24h_ago\"] = df[\"mentions_24h_ago\"].astype(int)\n\n df[\"delta_mentions_24h\"] = df[\"mentions\"] - df[\"mentions_24h_ago\"]\n df = df[~(df[\"upvotes\"] <= 1000)]\n df = df.sort_values(by=[\"delta_mentions_24h\"], ascending=False)\n return df", "def set_results(self, results, unique_keys):\n self._results = results\n self._compute_logic()\n\n for _, query in enumerate(self._results):\n\n flat = query.flatten_results(unique_keys)\n filename = 'flattened_{0}.csv'.format('_'.join(sorted(query.in_sets)))\n flat.to_csv(\n os.path.join(\n Configuration().csv.output_directory,\n '{0}'.format(filename)\n ),\n sep='\\t'\n )", "def get_stats_summary(self):\n perf_table = spark.table(self.performance_table)\\\n .where(\"yyyy_mm_dd between '{start_date}' and '{end_date}'\"\n .format(start_date = self.start_date, end_date = self.end_date))\\\n .where(\"clicks > 0\")\\\n .where(\"commission_expected_euro <= {max_rpb}\".format(max_rpb = self.max_rpb))\n\n if self.pos == ['All']:\n perf_table = perf_table.groupBy(*self.agg_on)\\\n .agg(f.sum(\"nits_bookings\").alias(\"nits_bookings\")\n ,f.sum(\"commission_expected_euro\").alias(\"nits_commission\")\n ,f.sum(\"bookings\").alias(\"gross_bookings\")\n ,f.sum(\"commission_amount_euro\").alias(\"gross_commission\")\n ,f.sum(\"cost_euro\").alias(\"cost\")\n ,f.sum(\"clicks\").alias(\"clicks\")\n ,f.sum(\"roomnights\").alias(\"roomnights\"))\\\n .withColumn(\"nits_profit\",f.expr(\"nits_commission-cost\"))\\\n .withColumn(\"gross_profit\", f.expr(\"gross_commission-cost\"))\n else:\n filtered_pos = spark.createDataFrame(pd.DataFrame(data = self.pos,\n columns = [\"pos\"]))\n\n perf_table = perf_table.join(filtered_pos, on = \"pos\", how = \"inner\")\\\n .groupBy(*self.agg_on)\\\n .agg(f.sum(\"nits_bookings\").alias(\"nits_bookings\")\n ,f.sum(\"commission_expected_euro\").alias(\"nits_commission\")\n ,f.sum(\"bookings\").alias(\"gross_bookings\")\n ,f.sum(\"commission_amount_euro\").alias(\"gross_commission\")\n ,f.sum(\"cost_euro\").alias(\"cost\")\n ,f.sum(\"clicks\").alias(\"clicks\")\n ,f.sum(\"roomnights\").alias(\"roomnights\"))\\\n .withColumn(\"nits_profit\",f.expr(\"nits_commission-cost\"))\\\n .withColumn(\"gross_profit\", f.expr(\"gross_commission-cost\"))\n\n return (perf_table)", "def get_results_frames(results_df, times):\n # coherence check:\n for t in times:\n assert results_df['t'].iloc[0] <= t <= results_df['t'].iloc[-1], \\\n 'time={} is outside the results_df range'.format(t)\n\n frames = pd.DataFrame(columns=results_df.columns)\n frames.loc[:, 't'] = times\n ignore_columns = {'t'}\n for col in results_df.columns:\n if col not in ignore_columns:\n vals_at_times = np.interp(times, results_df['t'], results_df[col])\n frames.loc[:, col] = vals_at_times\n return frames", "def _aggregate(self, method_name, *args, **kwargs):\n qc_result = self._call_qc_method(method_name, *args, **kwargs)\n return self._dataframe.__constructor__(query_compiler=qc_result)", "def get_flat_results(self):\n test_results, error_dict, framestats = self.get_results()\n test_results = self._merge_test_results(test_results, error_dict)\n\n results = copy.deepcopy(test_results)\n results.update(framestats)\n\n return results" ]
[ "0.69664747", "0.6740201", "0.6676644", "0.6570679", "0.64964044", "0.6288518", "0.6284019", "0.6193791", "0.61452377", "0.6103424", "0.608616", "0.60509396", "0.6011559", "0.59929734", "0.59141815", "0.58985555", "0.5862192", "0.5848644", "0.5842289", "0.5819599", "0.5788575", "0.5788575", "0.57706493", "0.5768687", "0.576399", "0.57519954", "0.57353634", "0.57338786", "0.572355", "0.5717158" ]
0.738837
0
Return `xr.Dataset` from the exploration results.
def xr(self, bold=False): def _sanitize_nc_key(k): return k.replace("*", "_").replace(".", "_").replace("|", "_") assert self.results is not None, "Run `loadResults()` first to populate the results" assert len(self.results) == len(self.dfResults) # create intrisinsic dims for one run timeDictKey, run_coords = self._getCoordsFromRun(self.results[0], bold=bold) dataarrays = [] orig_search_coords = self.parameterSpace.get_parametrization() for runId, run_result in self.results.items(): # take exploration coordinates for this run expl_coords = {k: v[runId] for k, v in orig_search_coords.items()} outputs = [] run_result = self._filterDictionaryBold(run_result, bold=bold) for key, value in run_result.items(): if key == timeDictKey: continue outputs.append(value) # create DataArray for run only - we need to add exploration coordinates data_temp = xr.DataArray( np.stack(outputs), dims=["output", "space", "time"], coords=run_coords, name="exploration" ) expand_coords = {} # iterate exploration coordinates for k, v in expl_coords.items(): # sanitize keys in the case of stars etc k = _sanitize_nc_key(k) # if single values, just assign if isinstance(v, (str, float, int)): expand_coords[k] = [v] # if arrays, check whether they can be squeezed into one value elif isinstance(v, np.ndarray): if np.unique(v).size == 1: # if yes, just assign that one value expand_coords[k] = [float(np.unique(v))] else: # if no, sorry - coordinates cannot be array raise ValueError("Cannot squeeze coordinates") # assing exploration coordinates to the DataArray dataarrays.append(data_temp.expand_dims(expand_coords)) # finally, combine all arrays into one if self.parameterSpace.kind == "sequence": # when run in sequence, cannot combine to grid, so just concatenate along new dimension combined = xr.concat(dataarrays, dim="run_no", coords="all") else: # sometimes combining xr.DataArrays does not work, see https://github.com/pydata/xarray/issues/3248#issuecomment-531511177 # resolved by casting them explicitely to xr.Dataset combined = xr.combine_by_coords([da.to_dataset() for da in dataarrays])["exploration"] if self.parameterSpace.star: # if we explored over star params, unwrap them into attributes combined.attrs = { _sanitize_nc_key(k): list(self.model.params[k].keys()) for k in orig_search_coords.keys() if "*" in k } return combined
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dataset(self):\n if self.mode == \"test\":\n return OnlineQueryDataset(self.mode, self.df, self.tokenizer)\n else:\n return OnlineQueryDataset(self.mode, self.df_reindex, self.tokenizer)", "def get_pandas(self):\n return pd.DataFrame(self.results)", "def to_dataset(self):\n import xarray as xr\n ds = xr.Dataset(coords={'x': (['x', ], self.center_grid.x_coord),\n 'y': (['y', ], self.center_grid.y_coord)}\n )\n ds.attrs['pyproj_srs'] = self.proj.srs\n return ds", "def collect_datset(self):\n response = requests.get(self.url)\n lines = response.text.splitlines()\n data = []\n for item in lines:\n item = item.split(\",\")\n data.append(item)\n data.pop(0) # to remove labels from list\n dataset = np.matrix(data)\n return dataset", "def get_dataset():\n\n return db.store.all()", "def get_dataset(self, therm_frac=0., make_plots=False) -> (xr.Dataset):\n data_vars = {}\n for key, val in self.data.items():\n arr = np.array(val)\n steps = np.arange(len(arr))\n if therm_frac > 0:\n arr, steps = therm_arr(arr, therm_frac=therm_frac)\n if len(arr.shape) == 1:\n data_vars[key] = xr.DataArray(arr, dims=['draw'],\n coords=[steps])\n elif len(arr.shape) == 3:\n arr = arr.T\n num_chains, num_lf, _ = arr.shape\n dims = ['chain', 'leapfrog', 'draw']\n coords = [np.arange(num_chains), np.arange(num_lf), steps]\n data_vars[key] = xr.DataArray(arr, dims=dims, coords=coords)\n else:\n chains = np.arange(arr.shape[1])\n data_vars[key] = xr.DataArray(arr.T, dims=['chain', 'draw'],\n coords=[chains, steps])\n\n return xr.Dataset(data_vars)", "def open(self):\n return xr.open_dataset(self)", "def get_experiment_data(experiment_names):\n\n snapshots_query = db_utils.query(\n Experiment.git_hash,\\\n Trial.experiment, Trial.fuzzer, Trial.benchmark,\\\n Trial.time_started, Trial.time_ended,\\\n Snapshot.trial_id, Snapshot.time, Snapshot.edges_covered)\\\n .select_from(Experiment)\\\n .join(Trial)\\\n .join(Snapshot)\\\n .filter(Experiment.name.in_(experiment_names))\\\n .filter(Trial.preempted.is_(False))\n\n return pd.read_sql_query(snapshots_query.statement, db_utils.engine)", "def iris():\n return IrisDataset()", "def get_eval_data() -> GraphDataset:\n _load_data_if_needed()\n return eval_data", "def get_dataset(self):\n return", "def _create_dataset(source=''):\n return ExperimentalDataset()", "def get_dataset(args):\n\n if args['experiment']['dataset'] == Dataset.mindsets:\n xs, ys, cs = make_mindsets(mindset_sizes=args['dataset']['mindset_sizes'],\n nb_questions=args['dataset']['nb_questions'],\n nb_useless=args['dataset']['nb_useless'],\n noise=args['dataset']['noise'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.questionnaire_likert:\n xs, ys, cs = make_likert_questionnaire(nb_samples=args['dataset']['nb_samples'],\n nb_features=args['dataset']['nb_features'],\n nb_mindsets=args['dataset']['nb_mindsets'],\n centers=args['dataset']['centers'],\n range_answers=args['dataset']['range_answers'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.retinal:\n xs, ys = load_RETINAL(root_path=args['root_dir'],\n nb_bins=args['dataset']['nb_bins'],\n max_idx=args['dataset']['max_idx'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.moons:\n xs, ys = make_moons(n_samples=args['dataset']['n_samples'],\n noise=args['dataset']['noise'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.breast_cancer_wisconsin:\n xs, ys = load_CANCER(args['dataset']['nb_bins'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.SBM:\n A, ys, G = load_SBM(block_sizes=args['dataset']['block_sizes'],\n p_in=args['dataset']['p'],\n p_out=args['dataset']['q'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.gaussian_mixture:\n xs, ys = make_blobs(n_samples=args['dataset']['blob_sizes'],\n centers=args['dataset']['blob_centers'],\n n_features=args['dataset']['blob_centers'],\n cluster_std=args['dataset']['blob_variances'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.LFR:\n A, ys, G = load_LFR(nb_nodes=args['dataset']['nb_nodes'],\n tau1=args['dataset']['tau1'],\n tau2=args['dataset']['tau2'],\n mu=args['dataset']['mu'],\n average_degree=args['dataset']['average_degree'],\n min_community=args['dataset']['min_community'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.wave:\n df = pd.read_csv('datasets/waveform.csv')\n xs = df[df.columns[:-1]].to_numpy()\n ys = df[df.columns[-1]].to_numpy()\n\n return Data(xs=xs, ys=ys)\n\n raise ValueError('Wrong name for a dataset')", "def _generate_experimental_data(self):\n\n loginfo('Extracting dataset...')\n cfg = self.cfg_\n return ex.ExperimentalData(db=cfg.db,\n prediction_label=cfg.prediction_label,\n games=cfg.games,\n folds=cfg.training_rounds,\n fold_size=cfg.training_samples_per_round,\n grid_search_folds=cfg.grid_search_folds,\n grid_search_fold_size=\n cfg.grid_search_samples_per_fold,\n sampling=cfg.data_sampling,\n lognormal=cfg.lognormal,\n power_transform=cfg.power_transform,\n bin_ranges=cfg.bin_ranges,\n batch_size=self.batch_size_)", "def get_dataset(dataset_name):\n if dataset_name == \"Iris\":\n data = datasets.load_iris()\n elif dataset_name == \"Breast Cancer\":\n data = datasets.load_breast_cancer()\n else:\n data = datasets.load_wine()\n\n X = data.data\n y = data.target\n return X, y", "def data(self):\n return exp.load(strain = self.strain, dtype = self.dtype, wid = self.wid, stage = self.stage, label = self.label, \n valid_only = self.valid_only, replace_invalid = self.replace_invalid, memmap = None);", "def get_dataset(self):\n\n trainset = datasets.MNIST('datasets/MNIST/train/', train=True, transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.MNIST('datasets/MNIST/test/', train=False, transform=self.val_transforms,\n target_transform=None, download=True)\n\n return trainset, valset", "def get_dataset(self):\n return self._X, self._y", "def get_data(result_id=-1, train=True, root_path='Data/'):\n if train:\n x = get_dataset(get_file_path('x_train_gr_smpl.csv', path=root_path))\n else:\n x = get_dataset(get_file_path('x_test_gr_smpl.csv', path=root_path))\n filePicker = result_file_selector(result_id, train)\n y = get_dataset(get_file_path(filePicker, path=root_path))\n y.columns = ['y']\n return x, y", "def GetDataset():\n x_train = []\n x_test = []\n y_train = []\n y_test = []\n\n classes1 = set()\n classes2 = set()\n for f in GetInputFiles():\n class1, class2, fold, fname = f.split('\\\\')[-4:]\n classes1.add(class1)\n classes2.add(class2)\n class1 = class1.split('_')[0]\n class2 = class2.split('_')[0]\n\n x = ReadAndTokenize(f)\n y = [int(class1 == 'positive'), int(class2 == 'truthful')]\n if fold == 'fold4':\n x_test.append(x)\n y_test.append(y)\n else:\n x_train.append(x)\n y_train.append(y)\n\n ### Make numpy arrays.\n x_test = MakeDesignMatrix(x_test)\n x_train = MakeDesignMatrix(x_train)\n y_test = numpy.array(y_test, dtype='float32')\n y_train = numpy.array(y_train, dtype='float32')\n\n dataset = (x_train, y_train, x_test, y_test)\n with open('dataset.pkl', 'wb') as fout:\n pickle.dump(dataset, fout)\n return dataset", "def get_main_dataset(self) -> pd.DataFrame:\n pass", "def to_xarray(self, **kwargs):\n if not self.fetcher:\n raise InvalidFetcher(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n xds = self.fetcher.to_xarray(**kwargs)\n xds = self.postproccessor(xds)\n return xds", "def to_dataset(self):\n if not self.type:\n raise aspecd.exceptions.MissingDatasetError\n dataset = aspecd.utils.object_from_class_name(self.type)\n dataset.id = self.id\n for history_record in self.history:\n history_record.replay(dataset)\n return dataset", "def get_dataset(name):\n if name == 'cityscapes':\n return Cityscapes", "def data_array(self) -> xr.Dataset:\n\n xr_data = xr.open_mfdataset(self.path_to_files,\n chunks=self.chunks,\n parallel=True)\n\n if not all(x in list(xr_data.coords) for x in self.DIMS):\n xr_data = xr_data.rename({\n 'latitude': 'lat',\n 'longitude': 'lon',\n })\n\n if self.subset_dict is not None:\n print(f'Cutting data using {self.subset_dict}')\n xr_data = self.cut(xr_data)\n\n if self.season is not None:\n xr_data = xr_data.where(xr_data.time.dt.season == self.season,\n drop=True)\n\n if self.rescale_longitude is True:\n xr_data = xr_data.assign_coords(lon=(((xr_data.lon + 180) % 360) -\n 180)).sortby('lon')\n\n return xr_data", "def get_data(self):\n return self._results", "def _get_dataset(self):\n if self.mode == 'train':\n return (\n tf.data.Dataset.from_tensor_slices(\n tensors=(tf.constant(value=self.file_paths),\n tf.reshape(tensor=tf.constant(self.labels), shape=[-1]))\n )\n .shuffle(buffer_size=self.num_samples, reshuffle_each_iteration=True)\n .map(map_func=self.import_waveforms_fn_train, num_parallel_calls=self.num_parallel_calls)\n .repeat()\n .batch(batch_size=self.batch_size)\n .prefetch(buffer_size=self.prefetch_buffer)\n )\n else:\n return (\n tf.data.Dataset.from_tensor_slices(\n tensors=(tf.constant(value=self.file_paths),\n tf.reshape(tensor=tf.constant(self.labels), shape=[-1]))\n )\n .map(map_func=self.import_waveforms_fn_val, num_parallel_calls=self.num_parallel_calls)\n .repeat()\n .batch(batch_size=self.batch_size)\n .prefetch(buffer_size=self.prefetch_buffer)\n )", "def dataframe(self):\n return self.get_target().dataframe()", "def reproduce(self) -> LocalDataset:\n return LocalDataset(self.path)", "def reproduce(self) -> LocalDataset:\n return LocalDataset(self.path)" ]
[ "0.62103707", "0.6039698", "0.6023591", "0.601906", "0.59815496", "0.59300596", "0.5848126", "0.581404", "0.5806816", "0.580024", "0.57943356", "0.5780146", "0.57669896", "0.57436913", "0.5633727", "0.5601604", "0.5577262", "0.5559064", "0.55575436", "0.55522215", "0.55362487", "0.55182534", "0.5485202", "0.54711866", "0.5456136", "0.54535276", "0.5447145", "0.5442712", "0.54269034", "0.54269034" ]
0.65473825
0
Create a new SRPAuthenticationPolicy from a settings dict.
def from_settings(cls, settings={}, prefix="srpauth.", **kwds): # Grab out all the settings keys that start with our prefix. auth_settings = {} for name, value in settings.iteritems(): if not name.startswith(prefix): continue auth_settings[name[len(prefix):]] = value # Update with any additional keyword arguments. auth_settings.update(kwds) # Now look for specific keys of interest. maybe_resolve = DottedNameResolver(None).maybe_resolve # You must specify a realm. if "realm" not in auth_settings: raise ValueError("pyramid_srpauth: you must specify the realm") # NonceManager can be specified as class or instance name. nonce_manager = maybe_resolve(auth_settings.get("nonce_manager")) if callable(nonce_manager): nonce_manager = nonce_manager() auth_settings["nonce_manager"] = nonce_manager # get_password can be dotted name of a callable get_password = maybe_resolve(auth_settings.get("get_password")) if get_password is not None: assert callable(get_password) auth_settings["get_password"] = get_password # get_verifier can be dotted name of a callable get_verifier = maybe_resolve(auth_settings.get("get_verifier")) if get_verifier is not None: assert callable(get_verifier) auth_settings["get_verifier"] = get_verifier # groupfinder can be dotted name of a callable groupfinder = maybe_resolve(auth_settings.get("groupfinder")) if groupfinder is not None: assert callable(groupfinder) auth_settings["groupfinder"] = groupfinder # OK, the rest should just be keyword arguments. return cls(**auth_settings)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_settings(settings):", "def from_settings(cls, settings: SimulationSettingsModel):\n return cls(\n start=settings.project.simulation_range.start,\n end=settings.project.simulation_range.end,\n )", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n insurance_policy_type_velue = dictionary.get('insurancePolicyTypeVelue')\r\n fire_insurance_policy_extend_view = easybimehlanding.models.fire_insurance_policy_extend_view.FireInsurancePolicyExtendView.from_dictionary(dictionary.get('fireInsurancePolicyExtendView')) if dictionary.get('fireInsurancePolicyExtendView') else None\r\n fire_insurance_policy_filter = easybimehlanding.models.fire_insurance_policy_filter.FireInsurancePolicyFilter.from_dictionary(dictionary.get('fireInsurancePolicyFilter')) if dictionary.get('fireInsurancePolicyFilter') else None\r\n id = dictionary.get('id')\r\n selected_insurance_policy_has_been_changed = dictionary.get('selectedInsurancePolicyHasBeenChanged')\r\n is_paymented = dictionary.get('isPaymented')\r\n has_conflict_document = dictionary.get('hasConflictDocument')\r\n is_insurance_centre_admin = dictionary.get('isInsuranceCentreAdmin')\r\n insurance_policy_payment_documents = dictionary.get('insurancePolicyPaymentDocuments')\r\n payable = dictionary.get('payable')\r\n paymented = dictionary.get('paymented')\r\n conflict = dictionary.get('conflict')\r\n initial_price = dictionary.get('initialPrice')\r\n final_price = dictionary.get('finalPrice')\r\n insurance_company_name = dictionary.get('insuranceCompanyName')\r\n insurance_centre_name = dictionary.get('insuranceCentreName')\r\n insurance_policy_conflict = dictionary.get('insurancePolicyConflict')\r\n insurance_policy_condition = dictionary.get('insurancePolicyCondition')\r\n person = dictionary.get('person')\r\n insurance_policy = dictionary.get('insurancePolicy')\r\n shopping_card = dictionary.get('shoppingCard')\r\n shopping_card_postal_packet = dictionary.get('shoppingCardPostalPacket')\r\n\r\n # Return an object of this model\r\n return cls(insurance_policy_type_velue,\r\n fire_insurance_policy_extend_view,\r\n fire_insurance_policy_filter,\r\n id,\r\n selected_insurance_policy_has_been_changed,\r\n is_paymented,\r\n has_conflict_document,\r\n is_insurance_centre_admin,\r\n insurance_policy_payment_documents,\r\n payable,\r\n paymented,\r\n conflict,\r\n initial_price,\r\n final_price,\r\n insurance_company_name,\r\n insurance_centre_name,\r\n insurance_policy_conflict,\r\n insurance_policy_condition,\r\n person,\r\n insurance_policy,\r\n shopping_card,\r\n shopping_card_postal_packet)", "def __init__(self,\r\n username=None, password=None,\r\n certChain=None, privateKey=None,\r\n checker=None,\r\n settings = None, \r\n anon = False):\r\n\r\n self.username = None\r\n self.password = None\r\n self.certChain = None\r\n self.privateKey = None\r\n self.checker = None\r\n self.anon = anon\r\n\r\n #SRP Authentication\r\n if username and password and not \\\r\n (certChain or privateKey):\r\n self.username = username\r\n self.password = password\r\n\r\n #Certificate Chain Authentication\r\n elif certChain and privateKey and not \\\r\n (username or password):\r\n self.certChain = certChain\r\n self.privateKey = privateKey\r\n\r\n #No Authentication\r\n elif not password and not username and not \\\r\n certChain and not privateKey:\r\n pass\r\n\r\n else:\r\n raise ValueError(\"Bad parameters\")\r\n\r\n self.checker = checker\r\n self.settings = settings\r\n\r\n self.tlsSession = None", "def policy_create(request, **kwargs):\n body = {'policy': kwargs}\n policy = neutronclient(request).create_qos_policy(body=body).get('policy')\n return QoSPolicy(policy)", "def from_settings(cls, settings):\n server = connection.get_redis(settings.getdict(\"REDIS_CONFIG\"))\n # XXX: This creates one-time key. needed to support to use this\n # class as standalone dupefilter with scrapy's default scheduler\n # if scrapy passes spider on open() method this wouldn't be needed\n # TODO: Use SCRAPY_JOB env as default and fallback to timestamp.\n key = DEFAULT_DUPEFILTER_KEY % {'timestamp': int(time.time())}\n return cls(server, key=key)", "def from_settings(settings):\n\n connection_type = settings.get('RABBITMQ_CONNECTION_TYPE', RABBITMQ_CONNECTION_TYPE)\n queue_name = settings.get('RABBITMQ_QUEUE_NAME', RABBITMQ_QUEUE_NAME)\n connection_parameters = settings.get('RABBITMQ_CONNECTION_PARAMETERS', RABBITMQ_CONNECTION_PARAMETERS)\n connection_dsn = settings.get('RABBITMQ_DSN', RABBITMQ_DSN)\n\n connection_producer = {\n 'blocking': pika.BlockingConnection,\n 'select': pika.SelectConnection,\n 'tornado': pika.TornadoConnection,\n 'twisted': pika.TwistedConnection\n }[connection_type]\n\n if connection_dsn:\n connection = connection_producer(pika.URLParameters(connection_dsn))\n else:\n connection = connection_producer(pika.ConnectionParameters(**connection_parameters))\n\n channel = connection.channel()\n channel.queue_declare(queue=queue_name, durable=True)\n\n return channel", "def make_settings(pypirc):\n default_pypirc = \"\"\"\n [pypi]\n username:foo\n password:bar\n \"\"\"\n\n def _settings(pypirc_text=default_pypirc, **settings_kwargs):\n pypirc.write(textwrap.dedent(pypirc_text))\n\n settings_kwargs.setdefault(\"sign_with\", None)\n settings_kwargs.setdefault(\"config_file\", str(pypirc))\n\n return settings.Settings(**settings_kwargs)\n\n return _settings", "def from_xml_node(cls, xml_node):\n policy_control_name = get_xml_text_value(xml_node, xml_tags.Elements.POLICY_CONTROL_NAME)\n access_type = get_xml_text_value(xml_node, xml_tags.Elements.ACCESS_TYPE)\n from_zone = get_xml_text_value(xml_node, xml_tags.Elements.FROM_ZONE)\n to_zone = get_xml_text_value(xml_node, xml_tags.Elements.TO_ZONE)\n\n rule_properties_node = get_xml_node(xml_node, xml_tags.Elements.RULE_PROPERTIES, optional=True)\n if rule_properties_node is None:\n rule_properties = None\n else:\n rule_properties = RuleProperties.from_xml_node(rule_properties_node)\n\n flow_node = get_xml_node(xml_node, xml_tags.Elements.FLOW, optional=True)\n if flow_node is None:\n flow = None\n else:\n flow = Flow.from_xml_node(flow_node)\n\n allowed_services_node = get_xml_node(xml_node, xml_tags.Elements.ALLOWED_SERVICES, optional=True)\n if allowed_services_node is None:\n allowed_services = None\n else:\n allowed_services = AllowedServices.from_xml_node(allowed_services_node)\n\n blocked_services_node = get_xml_node(xml_node, xml_tags.Elements.BLOCKED_SERVICES, optional=True)\n if blocked_services_node is None:\n blocked_services = None\n else:\n blocked_services = BlockedServices.from_xml_node(blocked_services_node)\n\n return cls(policy_control_name, access_type, from_zone, to_zone, rule_properties, flow, allowed_services,\n blocked_services)", "def create_sp_profile(self,\n settings=None,\n headers=None,\n payload=None,\n active_validation=True,\n **request_parameters):\n check_type(headers, dict)\n check_type(payload, dict)\n if headers is not None:\n if 'X-Auth-Token' in headers:\n check_type(headers.get('X-Auth-Token'),\n basestring, may_be_none=False)\n\n _params = {\n }\n _params.update(request_parameters)\n _params = dict_from_items_with_values(_params)\n\n path_params = {\n }\n _payload = {\n 'settings':\n settings,\n }\n _payload.update(payload or {})\n _payload = dict_from_items_with_values(_payload)\n if active_validation:\n self._request_validator('jsd_ffa347eb411567a9c793696795250a5_v2_2_1')\\\n .validate(_payload)\n\n with_custom_headers = False\n _headers = self._session.headers or {}\n if headers:\n _headers.update(dict_of_str(headers))\n with_custom_headers = True\n\n e_url = ('/dna/intent/api/v1/service-provider')\n endpoint_full_url = apply_path_params(e_url, path_params)\n if with_custom_headers:\n json_data = self._session.post(endpoint_full_url, params=_params,\n json=_payload,\n headers=_headers)\n else:\n json_data = self._session.post(endpoint_full_url, params=_params,\n json=_payload)\n\n return self._object_factory('bpm_ffa347eb411567a9c793696795250a5_v2_2_1', json_data)", "def initialize_policies(self, policy_collection, options):", "def create_policy_request():\n return {\n 'public_key':\n r'BBLewg4VqLR38b38daE7Fj\\/uhr543uGrEpyoPFgmFZK6EZ9g2XdK\\/i65RrSJ6sJ96aXD3DJHY3Me2GJQO9\\/ifjE=',\n 'label':\n 'Integration Test Policy',\n 'operations': [{\n 'sensor_id': 10,\n 'action': 'SHARE',\n }, {\n 'sensor_id': 53,\n 'action': 'BIN',\n 'bins': [30.0, 60.0, 90.0]\n }, {\n 'sensor_id': 55,\n 'action': 'MOVING_AVG',\n 'interval': 300\n }]\n }", "def GatherUserPolicySettings(self, settings, policies):\n for field in settings.DESCRIPTOR.fields:\n # |field| is the entry for a specific policy in the top-level\n # CloudPolicySettings proto.\n\n # Look for this policy's value in the mandatory or recommended dicts.\n if field.name in policies.get('mandatory', {}):\n mode = cp.PolicyOptions.MANDATORY\n value = policies['mandatory'][field.name]\n elif field.name in policies.get('recommended', {}):\n mode = cp.PolicyOptions.RECOMMENDED\n value = policies['recommended'][field.name]\n else:\n continue\n\n # Create protobuf message for this policy.\n policy_message = eval('cp.' + field.message_type.name + '()')\n policy_message.policy_options.mode = mode\n field_descriptor = policy_message.DESCRIPTOR.fields_by_name['value']\n self.SetProtobufMessageField(policy_message, field_descriptor, value)\n settings.__getattribute__(field.name).CopyFrom(policy_message)", "def __init__(__self__, *,\n policy_id: pulumi.Input[str],\n policy_parameters: Optional[pulumi.Input['PolicyParametersArgs']] = None):\n pulumi.set(__self__, \"policy_id\", policy_id)\n if policy_parameters is not None:\n pulumi.set(__self__, \"policy_parameters\", policy_parameters)", "def __init__(__self__, *,\n policy_id: pulumi.Input[str],\n policy_parameters: Optional[pulumi.Input['PolicyParametersArgs']] = None):\n pulumi.set(__self__, \"policy_id\", policy_id)\n if policy_parameters is not None:\n pulumi.set(__self__, \"policy_parameters\", policy_parameters)", "def __init__(self, policy_id=None, policy_name=None, is_policy_enabled=None, policy_target_version=None, policy_deployment_method=None, software_title=None, software_title_configuration_id=None, pending=None, completed=None, deferred=None, failed=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._policy_id = None\n self._policy_name = None\n self._is_policy_enabled = None\n self._policy_target_version = None\n self._policy_deployment_method = None\n self._software_title = None\n self._software_title_configuration_id = None\n self._pending = None\n self._completed = None\n self._deferred = None\n self._failed = None\n self.discriminator = None\n\n if policy_id is not None:\n self.policy_id = policy_id\n if policy_name is not None:\n self.policy_name = policy_name\n if is_policy_enabled is not None:\n self.is_policy_enabled = is_policy_enabled\n if policy_target_version is not None:\n self.policy_target_version = policy_target_version\n if policy_deployment_method is not None:\n self.policy_deployment_method = policy_deployment_method\n if software_title is not None:\n self.software_title = software_title\n if software_title_configuration_id is not None:\n self.software_title_configuration_id = software_title_configuration_id\n if pending is not None:\n self.pending = pending\n if completed is not None:\n self.completed = completed\n if deferred is not None:\n self.deferred = deferred\n if failed is not None:\n self.failed = failed", "def __init__(__self__, *,\n policy_id: str,\n policy_version: str,\n policy_parameters: Optional['outputs.PolicyParametersResponse'] = None):\n pulumi.set(__self__, \"policy_id\", policy_id)\n pulumi.set(__self__, \"policy_version\", policy_version)\n if policy_parameters is not None:\n pulumi.set(__self__, \"policy_parameters\", policy_parameters)", "def translate_policy(policy: dict):\n if 'PolicyName' in policy:\n # This is a normal policy that should not be expanded\n return policy\n template_name = next(iter(policy))\n template_parameters = policy[template_name]\n try:\n # 'convert' will return a list of policy statements\n policy_document = processor.convert(template_name, template_parameters)\n except InsufficientParameterValues as e:\n # Exception's message will give lot of specific details\n raise ValueError(str(e))\n except InvalidParameterValues:\n raise ValueError(\"Must specify valid parameter values for policy template '{}'\".format(template_name))\n return {\n \"PolicyName\": template_name + '-' + str(uuid.uuid4()),\n \"PolicyDocument\": policy_document\n }", "def create_policy(self, fn_inputs):\n\n # determine if the policy is already in place\n response, err_msg = self._get_policy_by_sha256(fn_inputs.get('reaqta_sha256'))\n if err_msg:\n return {}, err_msg\n\n policy_info = response.json()\n if policy_info.get('result'):\n return {}, 'A policy already exists for this file hash: {0}. <a href=\"{1}\" target=\"blank\">{1}</a>'.format(\n fn_inputs.get('reaqta_sha256'),\n self.make_linkback_url(policy_info['result'][0]['id'], POLICY_DETAILS))\n\n params = {\n \"sha256\": fn_inputs.get('reaqta_sha256'),\n \"title\": fn_inputs.get('reaqta_policy_title', ''),\n \"description\": fn_inputs.get('reaqta_policy_description', ''),\n \"disable\": not fn_inputs.get('reaqta_policy_enabled', True),\n \"block\": fn_inputs.get('reaqta_policy_block', False),\n \"enabledGroups\": [],\n \"disabledGroups\": []\n }\n\n # collect all the group names and find the groupIds\n if fn_inputs.get('reaqta_policy_included_groups'):\n group_name_list = [ group.strip() for group in fn_inputs.get('reaqta_policy_included_groups', \"\").split(',') ]\n group_id_list = self.get_group_ids(group_name_list)\n if group_id_list:\n params['enabledGroups'] = group_id_list\n\n if fn_inputs.get('reaqta_policy_excluded_groups'):\n group_name_list = [ group.strip() for group in fn_inputs.get('reaqta_policy_excluded_groups', \"\").split(',') ]\n group_id_list = self.get_group_ids(group_name_list)\n if group_id_list:\n params['disabledGroups'] = group_id_list\n\n LOG.debug(\"create_policy: %s\", params)\n url = urljoin(POLICY_URI, \"trigger-on-process-hash\")\n return self.api_call(\"POST\", url, params)", "def from_xml_node(cls, xml_node):\n policies = []\n for policy_node in xml_node.iter(tag=xml_tags.Elements.POLICY):\n policies.append(Policy.from_xml_node(policy_node))\n return cls(policies)", "def create_policy(env, policy_type, policy_weights_file=None):\n input_size = env.observation_space.shape[0]\n output_size = env.action_space.shape[0]\n action_low = env.action_space.low\n action_high = env.action_space.high\n policy = policy_type(input_size=input_size,\n output_size=output_size,\n action_high=action_high,\n action_low=action_low)\n if policy_weights_file:\n policy.load_model(policy_weights_file)\n return policy", "def from_dict(cls, _dict: Dict) -> 'UserSettings':\n args = {}\n if 'language' in _dict:\n args['language'] = _dict.get('language')\n if 'notification_language' in _dict:\n args['notification_language'] = _dict.get('notification_language')\n if 'allowed_ip_addresses' in _dict:\n args['allowed_ip_addresses'] = _dict.get('allowed_ip_addresses')\n if 'self_manage' in _dict:\n args['self_manage'] = _dict.get('self_manage')\n return cls(**args)", "def __init__(self, settings, valid, defaults=None):\n\n try:\n with open(settings, 'r') as settings_file:\n self._settings = json.load(settings_file)\n except TypeError:\n self._settings = dict(settings)\n self._settings = Settings._inject_defaults(self._settings, defaults)\n Settings._validity_check(self._settings, valid)", "def from_json(data: dict) -> \"Policy\":\n try:\n return PolicySchema().load(data)\n except ValidationError as err:\n raise PolicyCreateError(*err.args)", "def generate_object(self, **kwargs):\n from ranger_performance_tool import perf_globals\n service_type_mapping = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"service_type_mapping\")\n if \"id\" not in kwargs:\n kwargs[\"id\"] = self.random_generator.generate_int()\n if \"name\" not in kwargs:\n kwargs[\"name\"] = self.random_generator.generate_string()\n if \"service\" not in kwargs:\n enabled_service = random.choice(perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"enabled_services\"))\n service_type = service_type_mapping[enabled_service]\n kwargs[\"service\"] = enabled_service\n kwargs[\"serviceType\"] = service_type\n policy_object = RangerPolicy()\n for key, value in kwargs.items():\n if key not in dir(policy_object):\n raise Exception(\"Invalid key: \" + key)\n policy_object[key] = value\n service_type = policy_object.serviceType\n service_store = perf_globals.OBJECT_STORE.service_store[service_type]\n policy_object.resources = service_store.generate_resources()\n return policy_object", "def CreatePolicyForExternalPolicyData(self, policy_key):\n settings = ep.ExternalPolicyData()\n data = self.server.ReadPolicyDataFromDataDir(policy_key)\n if data:\n settings.download_url = urlparse.urljoin(\n self.server.GetBaseURL(), 'externalpolicydata?key=%s' % policy_key)\n settings.secure_hash = hashlib.sha256(data).digest()\n return settings.SerializeToString()\n else:\n return None", "def SecurityPolicyFromFile(input_file, messages, file_format):\n\n if file_format == 'yaml':\n parsed_security_policy = yaml.load(input_file)\n else:\n try:\n parsed_security_policy = json.load(input_file)\n except ValueError as e:\n raise exceptions.BadFileException('Error parsing JSON: {0}'.format(\n six.text_type(e)))\n\n security_policy = messages.SecurityPolicy()\n if 'description' in parsed_security_policy:\n security_policy.description = parsed_security_policy['description']\n if 'fingerprint' in parsed_security_policy:\n security_policy.fingerprint = base64.urlsafe_b64decode(\n parsed_security_policy['fingerprint'].encode('ascii'))\n if 'type' in parsed_security_policy:\n security_policy.type = (\n messages.SecurityPolicy.TypeValueValuesEnum(\n parsed_security_policy['type']))\n if 'cloudArmorConfig' in parsed_security_policy:\n security_policy.cloudArmorConfig = messages.SecurityPolicyCloudArmorConfig(\n enableMl=parsed_security_policy['cloudArmorConfig']['enableMl'])\n if 'adaptiveProtectionConfig' in parsed_security_policy:\n security_policy.adaptiveProtectionConfig = (\n messages.SecurityPolicyAdaptiveProtectionConfig(\n layer7DdosDefenseConfig=messages\n .SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig(\n enable=parsed_security_policy['adaptiveProtectionConfig']\n ['layer7DdosDefenseConfig']['enable']),))\n if 'autoDeployConfig' in parsed_security_policy['adaptiveProtectionConfig']:\n security_policy.adaptiveProtectionConfig.autoDeployConfig = (\n messages.SecurityPolicyAdaptiveProtectionConfigAutoDeployConfig())\n if 'loadThreshold' in parsed_security_policy['adaptiveProtectionConfig'][\n 'autoDeployConfig']:\n security_policy.adaptiveProtectionConfig.autoDeployConfig.loadThreshold = (\n parsed_security_policy['adaptiveProtectionConfig']\n ['autoDeployConfig']['loadThreshold'])\n if 'confidenceThreshold' in parsed_security_policy[\n 'adaptiveProtectionConfig']['autoDeployConfig']:\n security_policy.adaptiveProtectionConfig.autoDeployConfig.confidenceThreshold = (\n parsed_security_policy['adaptiveProtectionConfig']\n ['autoDeployConfig']['confidenceThreshold'])\n if 'impactedBaselineThreshold' in parsed_security_policy[\n 'adaptiveProtectionConfig']['autoDeployConfig']:\n security_policy.adaptiveProtectionConfig.autoDeployConfig.impactedBaselineThreshold = (\n parsed_security_policy['adaptiveProtectionConfig']\n ['autoDeployConfig']['impactedBaselineThreshold'])\n if 'expirationSec' in parsed_security_policy['adaptiveProtectionConfig'][\n 'autoDeployConfig']:\n security_policy.adaptiveProtectionConfig.autoDeployConfig.expirationSec = (\n parsed_security_policy['adaptiveProtectionConfig']\n ['autoDeployConfig']['expirationSec'])\n if 'ruleVisibility' in parsed_security_policy['adaptiveProtectionConfig'][\n 'layer7DdosDefenseConfig']:\n security_policy.adaptiveProtectionConfig.layer7DdosDefenseConfig.ruleVisibility = (\n messages.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig\n .RuleVisibilityValueValuesEnum(\n parsed_security_policy['adaptiveProtectionConfig']\n ['layer7DdosDefenseConfig']['ruleVisibility']))\n if 'advancedOptionsConfig' in parsed_security_policy:\n advanced_options_config = parsed_security_policy['advancedOptionsConfig']\n security_policy.advancedOptionsConfig = (\n messages.SecurityPolicyAdvancedOptionsConfig())\n if 'jsonParsing' in advanced_options_config:\n security_policy.advancedOptionsConfig.jsonParsing = (\n messages.SecurityPolicyAdvancedOptionsConfig\n .JsonParsingValueValuesEnum(\n advanced_options_config['jsonParsing']))\n if 'jsonCustomConfig' in advanced_options_config:\n security_policy.advancedOptionsConfig.jsonCustomConfig = (\n messages.SecurityPolicyAdvancedOptionsConfigJsonCustomConfig(\n contentTypes=advanced_options_config\n ['jsonCustomConfig'].get('contentTypes', [])))\n if 'logLevel' in advanced_options_config:\n security_policy.advancedOptionsConfig.logLevel = (\n messages.SecurityPolicyAdvancedOptionsConfig.LogLevelValueValuesEnum(\n advanced_options_config['logLevel']))\n if 'userIpRequestHeaders' in advanced_options_config:\n security_policy.advancedOptionsConfig.userIpRequestHeaders = (\n advanced_options_config['userIpRequestHeaders'])\n if 'ddosProtectionConfig' in parsed_security_policy:\n security_policy.ddosProtectionConfig = (\n messages.SecurityPolicyDdosProtectionConfig(\n ddosProtection=messages.SecurityPolicyDdosProtectionConfig\n .DdosProtectionValueValuesEnum(\n parsed_security_policy['ddosProtectionConfig']\n ['ddosProtection'])))\n if 'recaptchaOptionsConfig' in parsed_security_policy:\n security_policy.recaptchaOptionsConfig = (\n messages.SecurityPolicyRecaptchaOptionsConfig())\n if 'redirectSiteKey' in parsed_security_policy['recaptchaOptionsConfig']:\n security_policy.recaptchaOptionsConfig.redirectSiteKey = (\n parsed_security_policy['recaptchaOptionsConfig']['redirectSiteKey'])\n\n if 'userDefinedFields' in parsed_security_policy:\n user_defined_fields = []\n for udf in parsed_security_policy['userDefinedFields']:\n user_defined_field = messages.SecurityPolicyUserDefinedField()\n user_defined_field.name = udf['name']\n user_defined_field.base = (\n messages.SecurityPolicyUserDefinedField.BaseValueValuesEnum(\n udf['base']\n )\n )\n user_defined_field.offset = udf['offset']\n user_defined_field.size = udf['size']\n if 'mask' in udf:\n user_defined_field.mask = udf['mask']\n user_defined_fields.append(user_defined_field)\n security_policy.userDefinedFields = user_defined_fields\n\n rules = []\n for rule in parsed_security_policy['rules']:\n security_policy_rule = messages.SecurityPolicyRule()\n security_policy_rule.action = rule['action']\n if 'description' in rule:\n security_policy_rule.description = rule['description']\n if 'match' in rule:\n match = messages.SecurityPolicyRuleMatcher()\n if 'versionedExpr' in rule['match']:\n match.versionedExpr = ConvertToEnum(\n rule['match']['versionedExpr'], messages\n )\n if 'expr' in rule['match']:\n match.expr = messages.Expr(\n expression=rule['match']['expr']['expression']\n )\n if 'exprOptions' in rule['match']:\n expr_options = messages.SecurityPolicyRuleMatcherExprOptions()\n if 'recaptchaOptions' in rule['match']['exprOptions']:\n expr_options.recaptchaOptions = (\n messages.SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions(\n actionTokenSiteKeys=rule['match']['exprOptions'][\n 'recaptchaOptions'\n ].get('actionTokenSiteKeys', []),\n sessionTokenSiteKeys=rule['match']['exprOptions'][\n 'recaptchaOptions'\n ].get('sessionTokenSiteKeys', []),\n )\n )\n match.exprOptions = expr_options\n if 'config' in rule['match']:\n if 'srcIpRanges' in rule['match']['config']:\n match.config = messages.SecurityPolicyRuleMatcherConfig(\n srcIpRanges=rule['match']['config']['srcIpRanges']\n )\n security_policy_rule.match = match\n if 'networkMatch' in rule:\n network_match = messages.SecurityPolicyRuleNetworkMatcher()\n if 'userDefinedFields' in rule['networkMatch']:\n user_defined_fields = []\n for udf in rule['networkMatch']['userDefinedFields']:\n user_defined_field_match = (\n messages.SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch()\n )\n user_defined_field_match.name = udf['name']\n user_defined_field_match.values = udf['values']\n user_defined_fields.append(user_defined_field_match)\n network_match.userDefinedFields = user_defined_fields\n if 'srcIpRanges' in rule['networkMatch']:\n network_match.srcIpRanges = rule['networkMatch']['srcIpRanges']\n if 'destIpRanges' in rule['networkMatch']:\n network_match.destIpRanges = rule['networkMatch']['destIpRanges']\n if 'ipProtocols' in rule['networkMatch']:\n network_match.ipProtocols = rule['networkMatch']['ipProtocols']\n if 'srcPorts' in rule['networkMatch']:\n network_match.srcPorts = rule['networkMatch']['srcPorts']\n if 'destPorts' in rule['networkMatch']:\n network_match.destPorts = rule['networkMatch']['destPorts']\n if 'srcRegionCodes' in rule['networkMatch']:\n network_match.srcRegionCodes = rule['networkMatch']['srcRegionCodes']\n if 'srcAsns' in rule['networkMatch']:\n network_match.srcAsns = rule['networkMatch']['srcAsns']\n security_policy_rule.networkMatch = network_match\n security_policy_rule.priority = int(rule['priority'])\n if 'preview' in rule:\n security_policy_rule.preview = rule['preview']\n rules.append(security_policy_rule)\n if 'redirectTarget' in rule:\n security_policy_rule.redirectTarget = rule['redirectTarget']\n if 'ruleNumber' in rule:\n security_policy_rule.ruleNumber = int(rule['ruleNumber'])\n if 'redirectOptions' in rule:\n redirect_options = messages.SecurityPolicyRuleRedirectOptions()\n if 'type' in rule['redirectOptions']:\n redirect_options.type = (\n messages.SecurityPolicyRuleRedirectOptions.TypeValueValuesEnum(\n rule['redirectOptions']['type']))\n if 'target' in rule['redirectOptions']:\n redirect_options.target = rule['redirectOptions']['target']\n security_policy_rule.redirectOptions = redirect_options\n if 'headerAction' in rule:\n header_action = messages.SecurityPolicyRuleHttpHeaderAction()\n headers_in_rule = rule['headerAction'].get('requestHeadersToAdds', [])\n headers_to_add = []\n for header_to_add in headers_in_rule:\n headers_to_add.append(\n messages.SecurityPolicyRuleHttpHeaderActionHttpHeaderOption(\n headerName=header_to_add['headerName'],\n headerValue=header_to_add['headerValue']))\n if headers_to_add:\n header_action.requestHeadersToAdds = headers_to_add\n security_policy_rule.headerAction = header_action\n if 'rateLimitOptions' in rule:\n rate_limit_options = rule['rateLimitOptions']\n security_policy_rule.rateLimitOptions = (\n messages.SecurityPolicyRuleRateLimitOptions(\n rateLimitThreshold=messages\n .SecurityPolicyRuleRateLimitOptionsThreshold(\n count=rate_limit_options['rateLimitThreshold']['count'],\n intervalSec=rate_limit_options['rateLimitThreshold']\n ['intervalSec']),\n conformAction=rate_limit_options['conformAction'],\n exceedAction=rate_limit_options['exceedAction']))\n if 'exceedActionRpcStatus' in rate_limit_options:\n exceed_action_rpc_status = (\n messages.SecurityPolicyRuleRateLimitOptionsRpcStatus()\n )\n if 'code' in rate_limit_options['exceedActionRpcStatus']:\n exceed_action_rpc_status.code = rate_limit_options[\n 'exceedActionRpcStatus']['code']\n if 'message' in rate_limit_options['exceedActionRpcStatus']:\n exceed_action_rpc_status.message = rate_limit_options[\n 'exceedActionRpcStatus']['message']\n security_policy_rule.rateLimitOptions.exceedActionRpcStatus = (\n exceed_action_rpc_status\n )\n if 'exceedRedirectOptions' in rate_limit_options:\n exceed_redirect_options = messages.SecurityPolicyRuleRedirectOptions()\n if 'type' in rate_limit_options['exceedRedirectOptions']:\n exceed_redirect_options.type = (\n messages.SecurityPolicyRuleRedirectOptions.TypeValueValuesEnum(\n rate_limit_options['exceedRedirectOptions']['type']))\n if 'target' in rate_limit_options['exceedRedirectOptions']:\n exceed_redirect_options.target = rate_limit_options[\n 'exceedRedirectOptions']['target']\n security_policy_rule.rateLimitOptions.exceedRedirectOptions = (\n exceed_redirect_options)\n if 'banThreshold' in rate_limit_options:\n security_policy_rule.rateLimitOptions.banThreshold = (\n messages.SecurityPolicyRuleRateLimitOptionsThreshold(\n count=rate_limit_options['banThreshold']['count'],\n intervalSec=rate_limit_options['banThreshold']['intervalSec']))\n if 'banDurationSec' in rate_limit_options:\n security_policy_rule.rateLimitOptions.banDurationSec = (\n rate_limit_options['banDurationSec'])\n if 'enforceOnKey' in rate_limit_options:\n security_policy_rule.rateLimitOptions.enforceOnKey = (\n messages.SecurityPolicyRuleRateLimitOptions\n .EnforceOnKeyValueValuesEnum(rate_limit_options['enforceOnKey']))\n if 'enforceOnKeyName' in rate_limit_options:\n security_policy_rule.rateLimitOptions.enforceOnKeyName = (\n rate_limit_options['enforceOnKeyName'])\n if 'preconfiguredWafConfig' in rule:\n preconfig_waf_config = messages.SecurityPolicyRulePreconfiguredWafConfig()\n for exclusion in rule['preconfiguredWafConfig'].get('exclusions', []):\n exclusion_to_add = (\n messages.SecurityPolicyRulePreconfiguredWafConfigExclusion())\n if 'targetRuleSet' in exclusion:\n exclusion_to_add.targetRuleSet = exclusion['targetRuleSet']\n for target_rule_id in exclusion.get('targetRuleIds', []):\n exclusion_to_add.targetRuleIds.append(target_rule_id)\n for request_header in exclusion.get('requestHeadersToExclude', []):\n exclusion_to_add.requestHeadersToExclude.append(\n ConvertPreconfigWafExclusionRequestField(request_header,\n messages))\n for request_cookie in exclusion.get('requestCookiesToExclude', []):\n exclusion_to_add.requestCookiesToExclude.append(\n ConvertPreconfigWafExclusionRequestField(request_cookie,\n messages))\n for request_query_param in exclusion.get('requestQueryParamsToExclude',\n []):\n exclusion_to_add.requestQueryParamsToExclude.append(\n ConvertPreconfigWafExclusionRequestField(request_query_param,\n messages))\n for request_uri in exclusion.get('requestUrisToExclude', []):\n exclusion_to_add.requestUrisToExclude.append(\n ConvertPreconfigWafExclusionRequestField(request_uri, messages))\n preconfig_waf_config.exclusions.append(exclusion_to_add)\n security_policy_rule.preconfiguredWafConfig = preconfig_waf_config\n\n security_policy.rules = rules\n\n return security_policy", "def _get_policy_object(\n platform,\n filters=None,\n pillar_key=\"acl\",\n pillarenv=None,\n saltenv=None,\n merge_pillar=True,\n):\n policy = _Policy()\n policy_filters = []\n if not filters:\n filters = []\n for filter_ in filters:\n if not filter_ or not isinstance(filter_, dict):\n continue # go to the next filter\n filter_name, filter_config = next(iter(filter_.items()))\n header = capirca.lib.policy.Header() # same header everywhere\n target_opts = [platform, filter_name]\n filter_options = filter_config.pop(\"options\", None)\n if filter_options:\n filter_options = _make_it_list({}, filter_name, filter_options)\n # make sure the filter options are sent as list\n target_opts.extend(filter_options)\n target = capirca.lib.policy.Target(target_opts)\n header.AddObject(target)\n filter_terms = []\n for term_ in filter_config.get(\"terms\", []):\n if term_ and isinstance(term_, dict):\n term_name, term_fields = next(iter(term_.items()))\n term = _get_term_object(\n filter_name,\n term_name,\n pillar_key=pillar_key,\n pillarenv=pillarenv,\n saltenv=saltenv,\n merge_pillar=merge_pillar,\n **term_fields\n )\n filter_terms.append(term)\n policy_filters.append((header, filter_terms))\n policy.filters = policy_filters\n log.debug(\"Policy config:\")\n log.debug(str(policy))\n platform_generator = _import_platform_generator(platform)\n policy_config = platform_generator(policy, 2)\n log.debug(\"Generating policy config for %s:\", platform)\n log.debug(str(policy_config))\n return policy_config", "def create_policy(self, create_policy_details, **kwargs):\n resource_path = \"/policies\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_policy got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_policy_details,\n response_type=\"Policy\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_policy_details,\n response_type=\"Policy\")", "def __init__(self, settings):\n self._settings = settings" ]
[ "0.6121792", "0.5455022", "0.53785753", "0.5297272", "0.5285958", "0.5271405", "0.522127", "0.51688355", "0.5129926", "0.51216954", "0.50879085", "0.5074323", "0.50737494", "0.50673383", "0.50673383", "0.50573564", "0.5042459", "0.500692", "0.500038", "0.4994687", "0.4989003", "0.49742106", "0.4954014", "0.4950478", "0.49310994", "0.49153197", "0.48904705", "0.4887197", "0.48701125", "0.48620683" ]
0.70107263
0
Get the unauthenticated userid for this request. When using HTTPSRPHMACAuth, this involves looking in the HTTP Authorization header to find the reported username.
def unauthenticated_userid(self, request): params = self._get_auth_params(request) if params is None: return None return params.get("username")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unauthenticated_userid(self, request):\n authorization = request.headers.get('Authorization')\n if authorization is None:\n return None\n token_object = find_token_for_authorization(authorization)\n if token_object is None:\n raise httpexceptions.HTTPUnauthorized(headers={\n 'WWW-Authenticate': 'Bearer realm=\"Weasyl\" error=\"invalid_token\"',\n })\n return token_object.userid", "def unauthenticated_userid(self, request):\n credentials = self._get_credentials(request)\n if credentials:\n return credentials[0]", "def unauthenticated_userid(self, request):\n authorization = request.headers.get('Authorization', '')\n try:\n authmeth, token = authorization.split(' ', 1)\n except ValueError:\n return None\n if authmeth.lower() != 'bearer':\n return None\n\n user_id, client_name = self._verify_token(token, request)\n\n # Don't add suffix if authentication failed, or no specific client name is configured\n if client_name is None or client_name == 'default':\n return user_id\n\n return '{}-{}'.format(user_id, client_name)", "def unauthenticated_userid(self, request):\n # we verify and extract the token here, so that we can\n # inspect the claims in the callback\n # if verify fails, we return None\n # any claims in the request.env\n claims = self._validate_access_token(request)\n if not claims:\n return None\n oidc = self._get_utility(request)\n return claims.get(oidc.userid_claim)", "def authenticated_userid(self, request):\n params = self._get_auth_params(request)\n if params is None:\n return None\n if not self._authenticate(request, params):\n return None\n username = params[\"username\"]\n if self.groupfinder is not None:\n if self.groupfinder(username) is None:\n return None\n return username", "def get_userid(self):\n user_id = \"\"\n if self.is_valid():\n user_id = self.__httprequest.session[\"lti_user_id\"]\n return user_id", "def get_identifier(self, request):\r\n try:\r\n return request._authentication_backend.get_identifier(request)\r\n except AttributeError:\r\n return 'nouser'", "def get_identifier(self, request):\r\n username, api_key = self.extract_credentials(request)\r\n return username or 'nouser'", "def get_identifier(self, request):\r\n return request.META.get('REMOTE_USER', 'nouser')", "def get_userid():\n return _userid()", "def get_identifier(self, request):\r\n if hasattr(request, 'user'):\r\n try:\r\n return request.user.get_username()\r\n except AttributeError:\r\n pass\r\n return 'nouser'", "def get_user_id(self, details, response):\n return details['username']", "def get_identifier(self, request):\n return request.user.username", "def get_identifier(self, request):\r\n return request.user.username", "def userid(self):\n mtool = getToolByName(self.context, 'portal_membership')\n return mtool.getAuthenticatedMember().getId()", "async def authorized_userid(self, identity: str) -> Optional[str]:\n return identity if identity == \"jack\" else None", "def _getLoggedinUserId(self):\n securityManager = getSecurityManager()\n return securityManager.getUser()._login", "def get_user_id(self, details, response):\n return response['uid']", "def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")", "def get_userid(self):\n return util.kbase_env.user", "async def authorized_userid(self, identity):\r\n try:\r\n dct = json.loads(identity)\r\n async with self.db.execute(\r\n '''\r\n select count(*) from user WHERE username=? AND rowid=?\r\n ''', (dct['username'], dct['rowid'])\r\n ) as cursor:\r\n n = (await cursor.fetchone())[0]\r\n if n:\r\n return identity\r\n except Exception:\r\n pass\r\n return None", "def user_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"user_id\")", "def _get_unknown_userid(self):\n cursor = self.conn.cursor()\n unknown_user_str = dbtypes.User.null\n cursor.execute(\"select id from users where uniqueid='%s'\" % unknown_user_str)\n return cursor.fetchone()[0]", "def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_id\")", "def get_current_uid():\n # TODO: Find a better way to access the token\n return request.token['id']", "def user_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_id\")" ]
[ "0.86458576", "0.86111814", "0.82587343", "0.81629455", "0.7881346", "0.71495134", "0.70761484", "0.6976793", "0.6847131", "0.6801054", "0.6793066", "0.67122", "0.6617604", "0.6589437", "0.6586149", "0.6583202", "0.65430385", "0.65210736", "0.6506743", "0.6506743", "0.6495544", "0.6478928", "0.64359576", "0.6428565", "0.64278436", "0.64278436", "0.64278436", "0.63903385", "0.6389679", "0.63287145" ]
0.8799287
0
Get the list of effective principals for this request.
def effective_principals(self, request): principals = [Everyone] params = self._get_auth_params(request) if params is None: return principals if not self._authenticate(request, params): return principals username = params["username"] if self.groupfinder is None: groups = () else: groups = self.groupfinder(username) if groups is None: return principals principals.append(username) principals.append(Authenticated) principals.extend(groups) return principals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def effective_principals(self, principal_id, request=None, context=None):", "def get_all_principals(self, principal_id):", "def get_cm_kerberos_principals(self):\n return self._get(endpoint='{}/cm/kerberosPrincipals'.format(self.api_version)).json()", "def _principals_for_authenticated_user(user):\n principals = []\n if user.is_superuser:\n principals.append(\"group:admins\")\n if user.is_moderator or user.is_superuser:\n principals.append(\"group:moderators\")\n if user.is_psf_staff or user.is_superuser:\n principals.append(\"group:psf_staff\")\n\n # user must have base admin access if any admin permission\n if principals:\n principals.append(\"group:with_admin_dashboard_access\")\n\n return principals", "def effective_rules(self) -> pulumi.Output[Sequence[Any]]:\n return pulumi.get(self, \"effective_rules\")", "def principal_validator(principals: List[str]) -> List[str]:\n return _base_principal_validator(principals)", "def get_principals(self, role_id):", "def propietarios(self):\n return self.expedientepersona_set.filter(propietario=True)", "def policy_rules(self) -> Sequence[Any]:\n return pulumi.get(self, \"policy_rules\")", "def principalCollections(self):\n return ()", "def effective_roles(self):\n # type: (...) -> List[RoleAndOrigins]\n return self._effective_roles", "def getEssentialList(self):\n return self.essentials", "def __base_acl__(self) -> list:\n _acls = [\n (Allow, 'g:professionals', ['list', 'view', 'edit']),\n ]\n return _acls", "def list_principals_for_portfolio_single_page(self, **kwargs):\n return slurp(\n 'list_principals_for_portfolio',\n self.list_principals_for_portfolio,\n 'Principals',\n **kwargs\n )", "def accessControlList(self):\n return allACL", "def policies(self):\n return self._policies", "def get_challenges(self):\n return Challenge.objects.filter(id__in=GrandChallenge.objects.filter(Q(challenge__user_from__user__id=self.id)|Q(challenge__user_to__user__id=self.id)).order_by('round').values('challenge'))", "def securities(self) -> List[Security]:\n return self.session.get_securities(self.account_id)", "def authorizations(self):\r\n return users.Authorizations(self)", "def publishing_principles(self) -> object:\n return self._publishing_principles", "def policies(self):\n return self._data.get('policies')", "def policies(self, request):\n policies = OtterPolicies(self.store, self.tenant_id, self.group_id,\n self.dispatcher)\n return policies.app.resource()", "def security_entries(self):\n return self._security_entries", "def authorizations(self):\r\n return authorizations.Authorizations(self)", "def claimlist(self):\n return list(\n set(\n list(self.caller.player_ob.db.claimed_scenelist or [])\n + list(self.requested_validation)\n )\n )", "def fulfillments(self):\n return [Fulfillment(x) for x in self._dict.get('fulfillments', [])]", "def get_queryset(self):\n #print(\"request\", self.request)\n user = self.request.user\n return Experience.objects.filter(person=user)", "def principal_or_all_authenticated_users_validator(principals: List[str]) -> List[str]:\n return _base_principal_validator(\n principals, special_vals={\"all_authenticated_users\"}\n )", "def get_protection_policies(cohesity_client):\n policy_list = cohesity_client.protection_policies.get_protection_policies()\n policy_list = policy_list if policy_list else []\n for policy in policy_list:\n exported_res_dict[\"Protection Policies\"].append(policy.name)\n return policy_list", "def get(self):\n\n return self.get_request_handler(request.headers).get_all_education_levels()" ]
[ "0.68408215", "0.6034431", "0.5508367", "0.5466632", "0.5356161", "0.5305189", "0.52793163", "0.52767515", "0.5255969", "0.51875716", "0.5126472", "0.5123606", "0.5084907", "0.5058053", "0.5046586", "0.50261647", "0.5022569", "0.5021784", "0.49984184", "0.49808112", "0.49696845", "0.49680707", "0.49568966", "0.49506888", "0.49283814", "0.489569", "0.48885027", "0.4867634", "0.48606402", "0.48511192" ]
0.66565347
1
View that challenges for credentials with a "401 Unauthorized". This method can be used as a pyramid "forbidden view" in order to challenge for auth credentials when necessary.
def challenge_view(self, request): headerlist = [("Content-Type", "text/plain")] headerlist.extend(self._get_challenge_headers(request)) return Response("Unauthorized", status="401 Unauthorized", headerlist=headerlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_401(error):\n return render_template('/error401.html'), 401", "def user_must_authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(render_template('index.html', auth=False), 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def forbidden(request):\n return Response(render_template('core/forbidden.html'),\n status=401, mimetype='text/html')", "def unauthorized():\n return HttpError(401)", "def get_authenticated_denied(self):", "def unauthorized_handler(self):\n return flask.redirect(\"/login\")", "def _handle_authentication_error(self):\n response = make_response('Access Denied')\n response.headers['WWW-Authenticate'] = self.auth.get_authenticate_header()\n response.status_code = 401\n return response", "def login_required_403(view):\n @wraps(view)\n def dec_view(request, *args, **kwargs):\n if not request.user.is_authenticated():\n return JsonResponse({\"detail\": \"You have to log in\"}, status=403)\n\n return view(request, *args, **kwargs)\n\n return dec_view", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})" ]
[ "0.7490076", "0.73721296", "0.73475176", "0.734617", "0.7247712", "0.71779835", "0.71317047", "0.7059749", "0.7015688", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938", "0.6969938" ]
0.75270486
0
Get headers necessary for a fresh srphmacauth challenge. This method generates a new srphmacauth challenge for the given request, including a fresh nonce. If the environment is marked as having a stale nonce then this is indicated in the challenge.
def _get_challenge_headers(self, request, check_stale=True): params = {} params["realm"] = self.realm if self.domain is not None: params["domain"] = self.domain # Escape any special characters in those values, so we can send # them as quoted-strings. The extra values added below are under # our control so we know they don't contain quotes. for key, value in params.iteritems(): params[key] = value.replace('"', '\\"') # Add a fresh set of challenge parameters. params.update(self._get_challenge_params(request)) # Mark the nonce as stale if told so by the environment. if check_stale and request.environ.get(_ENVKEY_STALE_NONCE): params["stale"] = "TRUE" # Construct the final header as quoted-string k/v pairs. value = ", ".join('%s="%s"' % itm for itm in params.iteritems()) return [("WWW-Authenticate", "SRP-HMAC " + value)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_headers(credentials, path, nonce, body):\n\n sig = '/api/' + path + nonce + body\n sig_hash = hmac.new(\n credentials['secret'].encode('utf-8'),\n sig.encode('utf-8'),\n hashlib.sha384\n ).hexdigest()\n\n headers = {\n 'bfx-nonce': nonce,\n 'bfx-apikey': credentials['key'],\n 'bfx-signature': sig_hash,\n 'content-type': 'application/json'\n }\n return headers", "def get_headers(self,\n method,\n url,\n params) -> Dict[str, Any]:\n payload = self.generate_payload(method, url, params)\n headers = {\n \"Authorization\": f\"HS256 {payload}\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n return headers", "def compute_header_hmac_hash(context):\n\n return hmac.new(\n hashlib.sha512(\n b'\\xff' * 8 +\n hashlib.sha512(\n context.header.value.dynamic_header.master_seed.data +\n context.transformed_key +\n b'\\x01'\n ).digest()\n ).digest(),\n context.header.data,\n hashlib.sha256\n ).digest()", "def generateChallenge(self, request):\n client = request.getClientIP() or \"0.0.0.0\"\n seed = self._generateSeed()\n now = self._getTime()\n raw = self._challengeFmt.format(realm=self.realm, client=client,\n time=now, seed=seed, sep=self.sep)\n encoded = raw.encode(\"base64\").replace(\"\\n\", \"\")\n signed = self._sign(raw).encode(\"base64\").replace(\"\\n\", \"\")\n return self.sep.join((signed, encoded))", "def _get_request_header() -> Dict:\n metas, envs = get_full_version()\n\n header = {\n **{f'jinameta-{k}': str(v) for k, v in metas.items()},\n **envs,\n }\n return header", "def get_headers(self):\n headers = self.headers\n\n if self.jwt_secret:\n current = int(time.time())\n params = {'exp': current + self.jwt_token_length}\n token = jwt.encode(params, self.jwt_secret, algorithm='HS256')\n headers = {\n **headers,\n 'Authorization': 'Bearer {}'.format(token.decode('utf-8')),\n }\n\n return headers", "def _headers(helper):\n return {\n 'Authorization': 'Splunk {0}'.format(\n helper.context_meta['session_key'])}", "def missing_header_fields():\n auth_token = get_auth_token()\n\n headers = '{\"Host\": \"$host\",\"Date\": \"DATE\",'\n headers += '\"Accept-Encoding\": \"gzip\",'\n headers += '\"X-Auth-Token\": \"$token\"}'\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host, token=auth_token)", "def get_request_headers(self):\n return {\n 'Authorization': 'JWT ' + self.get_authorization_token()\n }", "def _get_challenge_params(self, request, params=None, nonce=None):\n # Parse the parameters from the incoming request.\n # We're only looking for username, so don't bother validating\n # any other parameters that may be present.\n if params is None:\n params = parse_authz_header(request)\n if params is None:\n return {}\n # If they didn't provide the username, they get a blank challenge.\n # This is the first request in the handshake.\n username = params.get(\"username\")\n if username is None:\n return {}\n # If they did provide the username, then they need to know the\n # salt, server-side key, etc. This is the second request.\n (algorithm, salt, verifier) = self._get_verifier(username)\n if verifier is None:\n return {}\n new_params = {}\n new_params[\"algorithm\"] = algorithm\n new_params[\"salt\"] = salt\n # Generate new nonce if needed\n if nonce is None:\n nonce = self.nonce_manager.generate_nonce(request)\n new_params[\"nonce\"] = nonce\n # Calculate the corresponding server public key.\n privkey = self._get_privkey(nonce)\n pubkey = calculate_server_pubkey(params, privkey, verifier)\n new_params[\"skey\"] = b64encode(int_to_bytes(pubkey))\n # That'll do it.\n return new_params", "def _get_headers() -> dict:\n api_key = API_KEY_CRED_LOADER.load_credentials()\n api_secret = API_SECRET_CRED_LOADER.load_credentials()\n return {\"Authorization\": \"sso-key {}:{}\".format(api_key, api_secret)}", "async def gen_headers(auth_string):\n return {\n \"Authorization\": f\"Basic {str(b64encode(bytearray(auth_string, 'utf8')), 'utf-8')}\"\n }", "def _build_mesh_authorization_header(\n self,\n nonce: str = None,\n noncecount: int = 0,\n ):\n if not nonce:\n nonce = str(uuid.uuid4())\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d%H%M\")\n\n # e.g. NHSMESH AMP01HC001:bd0e2bd5-218e-41d0-83a9-73fdec414803:0:202005041305\n hmac_msg = (\n f\"{self.mailbox}:{nonce}:{str(noncecount)}:\"\n + f\"{self.params[MeshMailbox.MAILBOX_PASSWORD]}:{timestamp}\"\n )\n\n hash_code = hmac.HMAC(\n self.params[MeshMailbox.MESH_SHARED_KEY].encode(),\n hmac_msg.encode(),\n sha256,\n ).hexdigest()\n return (\n f\"{self.AUTH_SCHEMA_NAME} {self.mailbox}:{nonce}:{str(noncecount)}:\"\n + f\"{timestamp}:{hash_code}\"\n )", "def forget(self, request):\n return self._get_challenge_headers(request, check_stale=False)", "def get_headers():\n headers = {\n \"Authorization\": \"Token {}\".format(get_token()),\n }\n\n return headers", "def get_headers():\n if not headers:\n headers[\"Content-Type\"] = \"application/json\"\n headers[\"Accept\"] = \"application/json\"\n headers[\"User-Agent\"] = constants.USER_AGENT\n headers[\"Authorization\"] = get_token(constants.AUTH_URL, cfg[\"key\"])\n\n return headers\n\n return headers", "def create_marconi_headers():\n auth_token = get_auth_token()\n\n headers = ('{\"Host\": \"$host\",\"User-Agent\": \"$user_agent\",\"Date\":\"DATE\",'\n '\"Accept\": \"application/json\",\"Accept-Encoding\": \"gzip\",'\n '\"X-Project-ID\": \"$project_id\",'\n '\"X-Auth-Token\": \"$token\",\"Client-ID\": \"$uuid\"}')\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host, user_agent=CFG.user_agent,\n project_id=CFG.project_id,\n token=auth_token, uuid=CFG.uuid)", "def get_request_headers(self):\n\t\theaders = {\n\t\t\t'Cache-Control': 'no-cache no-store max-age=1',\n\t\t\t'Connection': 'cache-control',\n\t\t}\n\t\tif self.last_modified:\n\t\t\theaders['If-Modified-Since'] = self.last_modified\n\t\tif self.etag:\n\t\t\theaders['If-None-Match'] = self.etag\n\t\treturn headers", "def getHeaders():\n userid = rhev_settings.USERNAME\n passwd = rhev_settings.PASSWORD\n # base64.encodestring adds trailing \\n. \n auth = base64.encodestring(\"%s:%s\" % (userid, passwd)).rstrip(\"\\n\")\n headers = {\"Content-Type\": \"application/xml\",\n \"Accept\": \"application/xml\",\n \"Accept-Charset\": \"utf-8\",\n \"Authorization\" : (\"Basic %s\" % auth)}\n return headers", "def _build_headers(self):\n headers = {\n 'Authorization': 'Bearer {api_key}'.format(api_key=self._api_key),\n 'SplitSDKVersion': SDK_VERSION,\n 'Accept-Encoding': 'gzip'\n }\n\n if self._split_sdk_machine_name is not None:\n headers['SplitSDKMachineName'] = self._split_sdk_machine_name() \\\n if callable(self._split_sdk_machine_name) else self._split_sdk_machine_name\n\n if self._split_sdk_machine_ip is not None:\n headers['SplitSDKMachineIP'] = self._split_sdk_machine_ip() \\\n if callable(self._split_sdk_machine_ip) else self._split_sdk_machine_ip\n\n return headers", "def vaultRequestHeaderFromSessionId(sessionId, clientId):\n headers = {'Authorization': sessionId, 'clientId': clientId}\n return headers", "def _update_challenge(request: PipelineRequest, challenger: \"PipelineResponse\") -> HttpChallenge:\n\n challenge = HttpChallenge(\n request.http_request.url,\n challenger.http_response.headers.get(\"WWW-Authenticate\"),\n response_headers=challenger.http_response.headers,\n )\n ChallengeCache.set_challenge_for_url(request.http_request.url, challenge)\n return challenge", "def _get_auth_params(self, request):\n params = self._get_unvalidated_auth_params(request)\n if params is None:\n return None\n # Check that they're valid srp-hmac-auth parameters.\n if not validate_parameters(params, self.realm):\n return None\n # Check that the digest is applied to the correct URI.\n if not validate_uri(request, params):\n return None\n # Check that the provided nonce is valid.\n # If this looks like a stale request, mark it in the request\n # so we can include that information in the challenge.\n if not validate_nonce(self.nonce_manager, request, params):\n request.environ[_ENVKEY_STALE_NONCE] = True\n return None\n return params", "def get_headers(input_header):\n if input_header:\n header = input_header\n else:\n header = create_marconi_headers()\n\n return header", "def _make_headers() -> CaseInsensitiveDict:\n headers = CaseInsensitiveDict()\n headers['Authorization'] = f'Token {os.environ[\"TOKEN\"]}'\n headers['Content-type'] = 'application/json'\n return headers", "def generate_header() -> dict:\n\n origin = \"http://dark-world.ru\"\n accept_encoding = \"gzip, deflate\"\n user_agent = \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)\" \\\n \"Chrome/66.0.3359.181 YaBrowser/18.6.0.2255 Yowser/2.5 Safari/537.36\"\n content_type = \"application/x-www-form-urlencoded\"\n accept = \"text/html\"\n referer = \"http://dark-world.ru\"\n x_requested_with = \"XMLHttpRequest\"\n connection = \"keep-alive\"\n\n return {\n \"Origin\": origin,\n \"Accept-Encoding\": accept_encoding,\n \"User-Agent\": user_agent,\n \"Content-Type\": content_type,\n \"Accept\": accept,\n \"Referer\": referer,\n \"X-Requested-With\": x_requested_with,\n \"Connection\": connection\n }", "def getSenSourceHeaders():\n os.getenv('SENSOURCE_ID')\n headers = {\"Content-type\": \"application/json\"}\n data = {\"grant_type\": \"client_credentials\", \"client_id\": os.getenv(\n 'SENSOURCE_ID'), \"client_secret\": os.getenv('SENSOURCE_SECRET')}\n req = requests.post(\"{0}/oauth/token\".format(senSourceURL),\n data=json.dumps(data), headers=headers)\n data = req.json()\n headers['Authorization'] = \"Bearer {0}\".format(data[\"access_token\"])\n return headers", "def __generate_msl_header(self, is_handshake=False, is_key_request=False, compressionalgo='GZIP', encrypt=True, esn=None):\n global esn_manifest\n self.current_message_id = self.rndm.randint(0, pow(2, 52))\n header_data = {'sender':esn_manifest, \n 'handshake':is_handshake, \n 'nonreplayable':False, \n 'capabilities':{'languages':[\n 'en-US'], \n 'compressionalgos':[], 'encoderformats':[\n 'JSON']}, \n 'recipient':'Netflix', \n 'renewable':True, \n 'messageid':self.current_message_id, \n 'timestamp':time.time()}\n if compressionalgo is not '':\n header_data['capabilities']['compressionalgos'].append(compressionalgo)\n else:\n if is_key_request:\n public_key = base64.standard_b64encode(self.rsa_key.publickey().exportKey(format='DER')).decode('utf-8')\n header_data['keyrequestdata'] = [\n {'scheme':'ASYMMETRIC_WRAPPED', \n 'keydata':{'publickey':public_key, \n 'mechanism':'JWK_RSA', \n 'keypairid':'superKeyPair'}}]\n else:\n if 'usertoken' in self.tokens:\n pass\n else:\n account = account_info\n header_data['userauthdata'] = {'scheme':'EMAIL_PASSWORD', \n 'authdata':{'email':account['email'], \n 'password':account['password']}}\n return json.dumps(header_data)", "def __updater_headers(self, path, req_from_updater):\n try:\n self.logger.info(\"Request from account-updater\")\n info = self.get_cont_stat(path, req_from_updater)\n if not isinstance(info, types.DictType):\n raise info()\n headers = HeaderKeyDict({\n 'X-Container-Object-Count': info['object_count'],\n 'X-Container-Bytes-Used': info['bytes_used'],\n 'X-DELETE-Timestamp': info['delete_timestamp'],\n 'X-PUT-Timestamp': info['put_timestamp'],\n 'X-Container' : info['container']\n })\n return headers\n except HTTPException as error:\n self.logger.exception(error)\n return error.status_int\n except Exception as err:\n self.logger.exception(err)\n return HTTP_INTERNAL_SERVER_ERROR", "def get_headers(self):\n # Creating headers.\n headers = {'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'accept-encoding': 'gzip, deflate, sdch, br',\n 'accept-language': 'en-GB,en;q=0.8,en-US;q=0.6,ml;q=0.4',\n 'cache-control': 'max-age=0',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'}\n return headers" ]
[ "0.61025995", "0.5901304", "0.5791251", "0.5778743", "0.56111044", "0.5487719", "0.5440298", "0.54147327", "0.53871363", "0.53820187", "0.536679", "0.5350175", "0.53226167", "0.53192484", "0.5319032", "0.52954966", "0.5290292", "0.52816975", "0.5227853", "0.5152028", "0.5132391", "0.512731", "0.5117923", "0.5099317", "0.50976795", "0.50915277", "0.50900763", "0.5083999", "0.50818247", "0.5080655" ]
0.74318314
0
Extract srphmacauth parameters from the request. This method extracts srphmacauth parameters from the Authorization header and returns them as a dict. If they are missing then None is returned.
def _get_unvalidated_auth_params(self, request): try: params = parse_authz_header(request) except ValueError: params = None if params is None: return None if params["scheme"].lower() != "srp-hmac": return None return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_auth_params(self, request):\n params = self._get_unvalidated_auth_params(request)\n if params is None:\n return None\n # Check that they're valid srp-hmac-auth parameters.\n if not validate_parameters(params, self.realm):\n return None\n # Check that the digest is applied to the correct URI.\n if not validate_uri(request, params):\n return None\n # Check that the provided nonce is valid.\n # If this looks like a stale request, mark it in the request\n # so we can include that information in the challenge.\n if not validate_nonce(self.nonce_manager, request, params):\n request.environ[_ENVKEY_STALE_NONCE] = True\n return None\n return params", "def get_auth_header(self) -> Mapping[str, Any]:\n return {}", "def get_auth_header(self):\n if not self.verify():\n return None\n\n auth_val = self.encode_auth_header_val()\n if not auth_val:\n return None\n\n return {'Authorization': auth_val.replace('\\n', '')}", "def _identify_mac(self, request):\n params = parse_authz_header(request, None)\n if params is None:\n return None\n if params.get(\"scheme\") != \"MAC\":\n return None\n # Check that various parameters are as expected.\n token = params.get(\"id\")\n if token is None:\n msg = \"missing MAC id\"\n return self._respond_unauthorized(request, msg)\n # Check the timestamp and nonce for freshness or reuse.\n # TODO: the spec requires us to adjust for per-client clock skew.\n try:\n timestamp = int(params[\"ts\"])\n except (KeyError, ValueError):\n msg = \"missing or malformed MAC timestamp\"\n return self._respond_unauthorized(request, msg)\n nonce = params.get(\"nonce\")\n if nonce is None:\n msg = \"missing MAC nonce\"\n return self._respond_unauthorized(request, msg)\n if not self.nonce_manager.is_fresh(token, timestamp, nonce):\n msg = \"MAC has stale token or nonce\"\n return self._respond_unauthorized(request, msg)\n # OK, they seem like sensible MAC paramters.\n return params", "def parse_authorization_header(header):\n \n re_header = re.compile('UsernameToken\\s+Username=\"(.*)\",\\s+PasswordDigest=\"(.*)\",\\s+Created=\"(.*)\",\\s+Nonce=\"(.*)\"')\n \n match = re_header.match(header)\n if match:\n return match.groups()\n return None", "def parse_auth(header):\r\n try:\r\n method, data = header.split(None, 1)\r\n if method.lower() == 'basic':\r\n #TODO: Add 2to3 save base64[encode/decode] functions.\r\n user, pwd = touni(base64.b64decode(tob(data))).split(':',1)\r\n return user, pwd\r\n except (KeyError, ValueError):\r\n return None", "def extract_basic_auth(auth_header):\n parts = auth_header.split(\" \")\n if parts[0] != \"Basic\" or len(parts) < 2:\n return None, None\n\n auth_parts = base64.b64decode(parts[1]).split(b\":\")\n if len(auth_parts) < 2:\n return None, None\n return auth_parts[0].decode(), auth_parts[1].decode()", "def _auth_headers(self):\n if self.token_str:\n return {'Authorization': 'Bearer {}'.format(self.token_str)}\n else:\n return {}", "def extract_basic_auth(auth_header):\n parts = auth_header.split(' ')\n if parts[0] != 'Basic' or len(parts) < 2:\n return None, None\n\n auth_parts = base64.b64decode(parts[1]).split(b':')\n if len(auth_parts) < 2:\n return None, None\n return auth_parts[0].decode(), auth_parts[1].decode()", "def get_auth_headers(self) -> Dict:\n if self.__access_token:\n return {\n 'Authorization': self.__access_token,\n 'Api-Key': self.__api_key,\n 'X-Client-Name': __client_name__,\n 'X-Client-Version': __version__,\n 'X-Min-Version': __min_engine_version__\n }\n elif self.__license_key and self.__email and self.__password:\n return {\n 'Authorization': self.__calculate_basic_auth_value(),\n 'License-Key': self.__license_key,\n 'Api-Key': self.__api_key,\n 'X-Client-Name': __client_name__,\n 'X-Client-Version': __version__,\n 'X-Min-Version': __min_engine_version__\n }\n else:\n raise ValueError('Credentials are not configured')", "def parse_basic_auth_header(self, request: Request) -> Tuple[str, str]:\n token = self.get_auth_token(request, \"Basic\")\n try:\n username, password = base64.b64decode(token.encode('ascii')).decode('utf-8').split(':')\n except Exception:\n raise TokenInvalidException\n return username, password", "def get_headers(self, session, **kwargs):\n token = self.get_token(session)\n\n if not token:\n return None\n\n return {IDENTITY_AUTH_HEADER_NAME: token}", "def _get_headers() -> dict:\n api_key = API_KEY_CRED_LOADER.load_credentials()\n api_secret = API_SECRET_CRED_LOADER.load_credentials()\n return {\"Authorization\": \"sso-key {}:{}\".format(api_key, api_secret)}", "def process_header_request(self, request, http_s_obj):\n response_dict = {}\n data = request.split(\"\\r\\n\\r\\n\")\n header_info = data[0].split(\"\\r\\n\")\n headers = self.updateheader(header_info, http_s_obj)\n response_dict.update({'type': header_info[0].split()[0]})\n response_dict.update({'headers': headers})\n body = data[1]\n response_dict.update({'data': body})\n path = header_info[0].split()[1]\n if path.find('?') != -1:\n split_sym = '?'\n if path.find('&') != -1:\n split_sym = '&'\n try:\n req = path.split(split_sym)\n path = req[0]\n query = req[1]\n except Exception as e:\n query = ''\n response_dict.update({'path': path})\n response_dict.update({'query': query})\n\n return response_dict", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def decode_auth_headers(request: Request) -> Tuple[str, str]:\n authorization = request.headers.get(\"Authorization\", \"\")\n\n headers = CaseInsensitiveDict({\"WWW-Authenticate\": \"Basic\"})\n\n scheme, param = get_authorization_scheme_param(authorization)\n if not authorization or scheme.lower() != \"basic\":\n raise InvalidClientError(request=request, headers=headers)\n\n try:\n data = b64decode(param).decode(\"ascii\")\n except (ValueError, UnicodeDecodeError, binascii.Error):\n raise InvalidClientError(request=request, headers=headers)\n\n client_id, separator, client_secret = data.partition(\":\")\n\n if not separator:\n raise InvalidClientError(request=request, headers=headers)\n\n return client_id, client_secret", "def get_headers(self,\n method,\n url,\n params) -> Dict[str, Any]:\n payload = self.generate_payload(method, url, params)\n headers = {\n \"Authorization\": f\"HS256 {payload}\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n return headers", "def _headers(helper):\n return {\n 'Authorization': 'Splunk {0}'.format(\n helper.context_meta['session_key'])}", "def get_request_headers(self):\n return {\n 'Authorization': 'JWT ' + self.get_authorization_token()\n }", "def _headers(self) -> Mapping[str, str]:\n return self.auth.headers() if self.auth else {}", "def parse_authz_header(request, *default):\n # This outer try-except catches ValueError and\n # turns it into return-default if necessary.\n try:\n # Grab the auth header from the request, if any.\n authz = request.environ.get(\"HTTP_AUTHORIZATION\")\n if authz is None:\n raise ValueError(\"Missing auth parameters\")\n scheme, kvpairs_str = authz.split(None, 1)\n # Split the parameters string into individual key=value pairs.\n # In the simple case we can just split by commas to get each pair.\n # Unfortunately this will break if one of the values contains a comma.\n # So if we find a component that isn't a well-formed key=value pair,\n # then we stitch bits back onto the end of it until it is.\n kvpairs = []\n if kvpairs_str:\n for kvpair in kvpairs_str.split(\",\"):\n if not kvpairs or _AUTH_PARAM_RE.match(kvpairs[-1]):\n kvpairs.append(kvpair)\n else:\n kvpairs[-1] = kvpairs[-1] + \",\" + kvpair\n if not _AUTH_PARAM_RE.match(kvpairs[-1]):\n raise ValueError('Malformed auth parameters')\n # Now we can just split by the equal-sign to get each key and value.\n params = {\"scheme\": scheme}\n for kvpair in kvpairs:\n (key, value) = kvpair.strip().split(\"=\", 1)\n # For quoted strings, remove quotes and backslash-escapes.\n if value.startswith('\"'):\n value = value[1:-1]\n if _UNESC_QUOTE_RE.search(value):\n raise ValueError(\"Unescaped quote in quoted-string\")\n value = _ESCAPED_CHAR.sub(lambda m: m.group(0)[1], value)\n params[key] = value\n return params\n except ValueError:\n if default:\n return default[0]\n raise", "def get_headers(self) -> Dict[str, str]:\n header_dict = self.generate_auth_dict()\n\n return {\n \"Authorization\": \"Basic \" + header_dict[\"signature\"],\n \"Content-Type\": 'application/json',\n }", "def getHeaders():\n userid = rhev_settings.USERNAME\n passwd = rhev_settings.PASSWORD\n # base64.encodestring adds trailing \\n. \n auth = base64.encodestring(\"%s:%s\" % (userid, passwd)).rstrip(\"\\n\")\n headers = {\"Content-Type\": \"application/xml\",\n \"Accept\": \"application/xml\",\n \"Accept-Charset\": \"utf-8\",\n \"Authorization\" : (\"Basic %s\" % auth)}\n return headers", "def get_headers(req):\n user = req.headers.get('X-User-ID', None)\n tenant = req.headers.get('X-Tenant-ID', None)\n return user, tenant", "def getBasicAuthorization(self):\n header = self.request.getHeader(\"Authorization\")\n if header:\n try:\n if header.lower().startswith(\"basic \"):\n authstr = base64.b64decode(header[6:]).split(\":\")\n if len(authstr) == 1:\n return None\n return authstr[0], \":\".join(authstr[1:])\n except:\n return None\n return None", "def test_prepare_mac_header(self):\n self.assertEqual(prepare_mac_header(**self.mac_plain), self.auth_plain)\n self.assertEqual(prepare_mac_header(**self.mac_body), self.auth_body)\n self.assertEqual(prepare_mac_header(**self.mac_both), self.auth_both)", "def _parse_challenge(header):\n # type: (str) -> Dict[str, str]\n ret = {}\n if header.startswith(BEARER):\n challenge_params = header[len(BEARER) + 1 :]\n\n matches = re.split(AUTHENTICATION_CHALLENGE_PARAMS_PATTERN, challenge_params)\n _clean(matches)\n ret = {}\n for i in range(0, len(matches), 2):\n ret[matches[i]] = matches[i + 1]\n\n return ret", "def get_auth_headers(key):\n return {\n 'Content-Type': 'Application/JSON',\n 'Authorization': key\n }", "def get_auth_headers():\n\n auth_type = \"Basic\"\n if request.headers.get('UseXBasic'):\n auth_type = \"XBasic\"\n\n return {\n 'WWW-Authenticate': '%s realm=\"Login Required\"' % auth_type\n }", "def get_auth_headers(self):\n # type: () -> AnyHeadersContainer\n headers = {}\n if self.request and self.request.auth_headers:\n headers = self.request.auth_headers.copy()\n return CaseInsensitiveDict(headers)" ]
[ "0.6592601", "0.62808114", "0.6254532", "0.6068169", "0.6067721", "0.60282284", "0.5955428", "0.5917826", "0.5900995", "0.58761734", "0.5870564", "0.58591247", "0.58245647", "0.5797018", "0.5796605", "0.57675487", "0.5755379", "0.57153034", "0.56880015", "0.5687705", "0.5673088", "0.5665712", "0.5641", "0.56308883", "0.5598953", "0.5566945", "0.5508163", "0.55015343", "0.54980767", "0.548812" ]
0.7045831
0
Extract srphmacauth parameters from the request. This method extracts srphmacauth parameters from the Authorization header and returns them as a dict. If they are missing then None is returned.
def _get_auth_params(self, request): params = self._get_unvalidated_auth_params(request) if params is None: return None # Check that they're valid srp-hmac-auth parameters. if not validate_parameters(params, self.realm): return None # Check that the digest is applied to the correct URI. if not validate_uri(request, params): return None # Check that the provided nonce is valid. # If this looks like a stale request, mark it in the request # so we can include that information in the challenge. if not validate_nonce(self.nonce_manager, request, params): request.environ[_ENVKEY_STALE_NONCE] = True return None return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_unvalidated_auth_params(self, request):\n try:\n params = parse_authz_header(request)\n except ValueError:\n params = None\n if params is None:\n return None\n if params[\"scheme\"].lower() != \"srp-hmac\":\n return None\n return params", "def get_auth_header(self) -> Mapping[str, Any]:\n return {}", "def get_auth_header(self):\n if not self.verify():\n return None\n\n auth_val = self.encode_auth_header_val()\n if not auth_val:\n return None\n\n return {'Authorization': auth_val.replace('\\n', '')}", "def parse_authorization_header(header):\n \n re_header = re.compile('UsernameToken\\s+Username=\"(.*)\",\\s+PasswordDigest=\"(.*)\",\\s+Created=\"(.*)\",\\s+Nonce=\"(.*)\"')\n \n match = re_header.match(header)\n if match:\n return match.groups()\n return None", "def _identify_mac(self, request):\n params = parse_authz_header(request, None)\n if params is None:\n return None\n if params.get(\"scheme\") != \"MAC\":\n return None\n # Check that various parameters are as expected.\n token = params.get(\"id\")\n if token is None:\n msg = \"missing MAC id\"\n return self._respond_unauthorized(request, msg)\n # Check the timestamp and nonce for freshness or reuse.\n # TODO: the spec requires us to adjust for per-client clock skew.\n try:\n timestamp = int(params[\"ts\"])\n except (KeyError, ValueError):\n msg = \"missing or malformed MAC timestamp\"\n return self._respond_unauthorized(request, msg)\n nonce = params.get(\"nonce\")\n if nonce is None:\n msg = \"missing MAC nonce\"\n return self._respond_unauthorized(request, msg)\n if not self.nonce_manager.is_fresh(token, timestamp, nonce):\n msg = \"MAC has stale token or nonce\"\n return self._respond_unauthorized(request, msg)\n # OK, they seem like sensible MAC paramters.\n return params", "def parse_auth(header):\r\n try:\r\n method, data = header.split(None, 1)\r\n if method.lower() == 'basic':\r\n #TODO: Add 2to3 save base64[encode/decode] functions.\r\n user, pwd = touni(base64.b64decode(tob(data))).split(':',1)\r\n return user, pwd\r\n except (KeyError, ValueError):\r\n return None", "def extract_basic_auth(auth_header):\n parts = auth_header.split(\" \")\n if parts[0] != \"Basic\" or len(parts) < 2:\n return None, None\n\n auth_parts = base64.b64decode(parts[1]).split(b\":\")\n if len(auth_parts) < 2:\n return None, None\n return auth_parts[0].decode(), auth_parts[1].decode()", "def _auth_headers(self):\n if self.token_str:\n return {'Authorization': 'Bearer {}'.format(self.token_str)}\n else:\n return {}", "def extract_basic_auth(auth_header):\n parts = auth_header.split(' ')\n if parts[0] != 'Basic' or len(parts) < 2:\n return None, None\n\n auth_parts = base64.b64decode(parts[1]).split(b':')\n if len(auth_parts) < 2:\n return None, None\n return auth_parts[0].decode(), auth_parts[1].decode()", "def get_auth_headers(self) -> Dict:\n if self.__access_token:\n return {\n 'Authorization': self.__access_token,\n 'Api-Key': self.__api_key,\n 'X-Client-Name': __client_name__,\n 'X-Client-Version': __version__,\n 'X-Min-Version': __min_engine_version__\n }\n elif self.__license_key and self.__email and self.__password:\n return {\n 'Authorization': self.__calculate_basic_auth_value(),\n 'License-Key': self.__license_key,\n 'Api-Key': self.__api_key,\n 'X-Client-Name': __client_name__,\n 'X-Client-Version': __version__,\n 'X-Min-Version': __min_engine_version__\n }\n else:\n raise ValueError('Credentials are not configured')", "def parse_basic_auth_header(self, request: Request) -> Tuple[str, str]:\n token = self.get_auth_token(request, \"Basic\")\n try:\n username, password = base64.b64decode(token.encode('ascii')).decode('utf-8').split(':')\n except Exception:\n raise TokenInvalidException\n return username, password", "def get_headers(self, session, **kwargs):\n token = self.get_token(session)\n\n if not token:\n return None\n\n return {IDENTITY_AUTH_HEADER_NAME: token}", "def _get_headers() -> dict:\n api_key = API_KEY_CRED_LOADER.load_credentials()\n api_secret = API_SECRET_CRED_LOADER.load_credentials()\n return {\"Authorization\": \"sso-key {}:{}\".format(api_key, api_secret)}", "def process_header_request(self, request, http_s_obj):\n response_dict = {}\n data = request.split(\"\\r\\n\\r\\n\")\n header_info = data[0].split(\"\\r\\n\")\n headers = self.updateheader(header_info, http_s_obj)\n response_dict.update({'type': header_info[0].split()[0]})\n response_dict.update({'headers': headers})\n body = data[1]\n response_dict.update({'data': body})\n path = header_info[0].split()[1]\n if path.find('?') != -1:\n split_sym = '?'\n if path.find('&') != -1:\n split_sym = '&'\n try:\n req = path.split(split_sym)\n path = req[0]\n query = req[1]\n except Exception as e:\n query = ''\n response_dict.update({'path': path})\n response_dict.update({'query': query})\n\n return response_dict", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def decode_auth_headers(request: Request) -> Tuple[str, str]:\n authorization = request.headers.get(\"Authorization\", \"\")\n\n headers = CaseInsensitiveDict({\"WWW-Authenticate\": \"Basic\"})\n\n scheme, param = get_authorization_scheme_param(authorization)\n if not authorization or scheme.lower() != \"basic\":\n raise InvalidClientError(request=request, headers=headers)\n\n try:\n data = b64decode(param).decode(\"ascii\")\n except (ValueError, UnicodeDecodeError, binascii.Error):\n raise InvalidClientError(request=request, headers=headers)\n\n client_id, separator, client_secret = data.partition(\":\")\n\n if not separator:\n raise InvalidClientError(request=request, headers=headers)\n\n return client_id, client_secret", "def get_headers(self,\n method,\n url,\n params) -> Dict[str, Any]:\n payload = self.generate_payload(method, url, params)\n headers = {\n \"Authorization\": f\"HS256 {payload}\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n return headers", "def _headers(helper):\n return {\n 'Authorization': 'Splunk {0}'.format(\n helper.context_meta['session_key'])}", "def _headers(self) -> Mapping[str, str]:\n return self.auth.headers() if self.auth else {}", "def get_request_headers(self):\n return {\n 'Authorization': 'JWT ' + self.get_authorization_token()\n }", "def parse_authz_header(request, *default):\n # This outer try-except catches ValueError and\n # turns it into return-default if necessary.\n try:\n # Grab the auth header from the request, if any.\n authz = request.environ.get(\"HTTP_AUTHORIZATION\")\n if authz is None:\n raise ValueError(\"Missing auth parameters\")\n scheme, kvpairs_str = authz.split(None, 1)\n # Split the parameters string into individual key=value pairs.\n # In the simple case we can just split by commas to get each pair.\n # Unfortunately this will break if one of the values contains a comma.\n # So if we find a component that isn't a well-formed key=value pair,\n # then we stitch bits back onto the end of it until it is.\n kvpairs = []\n if kvpairs_str:\n for kvpair in kvpairs_str.split(\",\"):\n if not kvpairs or _AUTH_PARAM_RE.match(kvpairs[-1]):\n kvpairs.append(kvpair)\n else:\n kvpairs[-1] = kvpairs[-1] + \",\" + kvpair\n if not _AUTH_PARAM_RE.match(kvpairs[-1]):\n raise ValueError('Malformed auth parameters')\n # Now we can just split by the equal-sign to get each key and value.\n params = {\"scheme\": scheme}\n for kvpair in kvpairs:\n (key, value) = kvpair.strip().split(\"=\", 1)\n # For quoted strings, remove quotes and backslash-escapes.\n if value.startswith('\"'):\n value = value[1:-1]\n if _UNESC_QUOTE_RE.search(value):\n raise ValueError(\"Unescaped quote in quoted-string\")\n value = _ESCAPED_CHAR.sub(lambda m: m.group(0)[1], value)\n params[key] = value\n return params\n except ValueError:\n if default:\n return default[0]\n raise", "def get_headers(self) -> Dict[str, str]:\n header_dict = self.generate_auth_dict()\n\n return {\n \"Authorization\": \"Basic \" + header_dict[\"signature\"],\n \"Content-Type\": 'application/json',\n }", "def getHeaders():\n userid = rhev_settings.USERNAME\n passwd = rhev_settings.PASSWORD\n # base64.encodestring adds trailing \\n. \n auth = base64.encodestring(\"%s:%s\" % (userid, passwd)).rstrip(\"\\n\")\n headers = {\"Content-Type\": \"application/xml\",\n \"Accept\": \"application/xml\",\n \"Accept-Charset\": \"utf-8\",\n \"Authorization\" : (\"Basic %s\" % auth)}\n return headers", "def get_headers(req):\n user = req.headers.get('X-User-ID', None)\n tenant = req.headers.get('X-Tenant-ID', None)\n return user, tenant", "def getBasicAuthorization(self):\n header = self.request.getHeader(\"Authorization\")\n if header:\n try:\n if header.lower().startswith(\"basic \"):\n authstr = base64.b64decode(header[6:]).split(\":\")\n if len(authstr) == 1:\n return None\n return authstr[0], \":\".join(authstr[1:])\n except:\n return None\n return None", "def test_prepare_mac_header(self):\n self.assertEqual(prepare_mac_header(**self.mac_plain), self.auth_plain)\n self.assertEqual(prepare_mac_header(**self.mac_body), self.auth_body)\n self.assertEqual(prepare_mac_header(**self.mac_both), self.auth_both)", "def _parse_challenge(header):\n # type: (str) -> Dict[str, str]\n ret = {}\n if header.startswith(BEARER):\n challenge_params = header[len(BEARER) + 1 :]\n\n matches = re.split(AUTHENTICATION_CHALLENGE_PARAMS_PATTERN, challenge_params)\n _clean(matches)\n ret = {}\n for i in range(0, len(matches), 2):\n ret[matches[i]] = matches[i + 1]\n\n return ret", "def get_auth_headers(key):\n return {\n 'Content-Type': 'Application/JSON',\n 'Authorization': key\n }", "def get_auth_headers():\n\n auth_type = \"Basic\"\n if request.headers.get('UseXBasic'):\n auth_type = \"XBasic\"\n\n return {\n 'WWW-Authenticate': '%s realm=\"Login Required\"' % auth_type\n }", "def get_auth_headers(self):\n # type: () -> AnyHeadersContainer\n headers = {}\n if self.request and self.request.auth_headers:\n headers = self.request.auth_headers.copy()\n return CaseInsensitiveDict(headers)" ]
[ "0.70454544", "0.62824553", "0.625724", "0.6068481", "0.6067896", "0.60297483", "0.5957361", "0.5918057", "0.5902953", "0.5877564", "0.5871777", "0.58604544", "0.58254915", "0.57967126", "0.57958364", "0.57685727", "0.57565445", "0.5715781", "0.5688506", "0.5688009", "0.56737", "0.56668675", "0.5642092", "0.5630442", "0.56010336", "0.55672556", "0.5508239", "0.55021536", "0.5499112", "0.5489368" ]
0.6592623
1
Obtain the password verifier data to use for the given user. This method returns a tuple (algorithm, salt, verifier) giving the necessary information to verify the user's password. If no information is available for the user then a tuple of Nones is returned.
def _get_verifier(self, username): # If we have a get_verifier callback, use it directly. if self.get_verifier is not None: verifier = self.get_verifier(username) if verifier is not None and verifier[0] is not None: return verifier # Otherwise, we need to calculate it from the password. if self.get_password is not None: password = self.get_password(username) if password is not None: algorithm = "SRP-1024-SHA1" salt = hashlib.sha1(username + self.realm).hexdigest()[:8] verifier = calculate_verifier({ "username": username, "algorithm": algorithm, "salt": salt, }, password) return (algorithm, salt, verifier) # If that didn't work out, they have no verifier. return (None, None, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_verified_password(self):\n raise NotImplementedError('get_verified_password')", "def get_verified_password(self):\n return self.controller.dbfilter.db.get('passwd/user-password-again')", "def check_password() -> tuple:\n user_data: dict = request.get_json()\n uid: str = user_data.get(\"uid\")\n organization_id: str = user_data.get(\"organization_id\")\n password: str = user_data.get(\"password\")\n\n return user_view.check_password(organization_id=organization_id, uid=uid, password=password)", "def makeVerifier(username, password, bits):\r\n usernameBytes = bytearray(username, \"utf-8\")\r\n passwordBytes = bytearray(password, \"utf-8\")\r\n return mathtls.makeVerifier(usernameBytes, passwordBytes, bits)", "def verify_login(user, password):\n q = f\"SELECT password_hash, salt, UserID FROM Users WHERE Users.username = (%s) \"\n result = select_query(q, (user))\n if not result:\n return 'no_user'\n new_hash = create_hash(password, result[0][1])\n print(new_hash)\n if new_hash['hash'] == result[0][0]:\n return ['valid', result[0][2]]\n else:\n return 'invalid'", "def check_password(self, user, password):\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n if not self.check_prereqs():\n return None\n\n query=self.create_query(self.sql_all_users_query+\" WHERE %(username_field)s='%(username)s'\",{'password_field':self.sql_password_field,'username_field':self.sql_username_field,'username':user})\n self.log.debug(\"sqlflexibleauthstore: check_password: %s\" % (query,))\n cursor.execute(query)\n\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n self.log.debug(\"sqlflexibleauthstore: check_password: retrieved hash from the database\")\n dictrow=dict(zip(desc,row))\n hash=dictrow[self.sql_password_field]\n return self.hash_method.check_hash(user,password, hash)\n return None", "def password(cls):\n return User.CryptComparator(cls.password_hashed)", "def get_verification_code(self, user_id, verify_type, secret):\n user = self.get(user_id, raise_error=True)\n code_hash = hmac.new(secret)\n code_hash.update(str(user_id))\n code_hash.update(str(user.user_name))\n code_hash.update(str(verify_type))\n return code_hash.hexdigest()", "def get_password_from_db(user_info):\n collection = get_collection(\"user\")\n user_from_db = collection.find_one({'name': user_info['name']})\n return user_from_db.get('password')", "def get_passwd(self):\n if self.__password:\n aes_cipher = AESCipher()\n return aes_cipher.decrypt(self.__password, self.__aes_key)", "def getUserInfo(self, user):\n return pwd.getpwnam(user)[2:4]", "def get_verifier():\n return get_current_registry().getUtility(IBrowserIdVerifier)", "def get_static_user_data():\r\n import os\r\n\r\n import yaml\r\n from legion_test.profiler_loader import CREDENTIAL_SECRETS_ENVIRONMENT_KEY\r\n secrets = os.getenv(CREDENTIAL_SECRETS_ENVIRONMENT_KEY)\r\n if not secrets:\r\n raise Exception(\r\n 'Cannot get secrets - {} env variable is not set'.format(CREDENTIAL_SECRETS_ENVIRONMENT_KEY))\r\n\r\n if not os.path.exists(secrets):\r\n raise Exception('Cannot get secrets - file not found {}'.format(secrets))\r\n\r\n with open(secrets, 'r') as stream:\r\n data = yaml.load(stream)\r\n\r\n static_user = data['dex']['config']['staticPasswords'][0]\r\n return {\"login\": static_user['email'], \"password\": static_user['password']}", "def get_password_hash(self, username):\n raise NotImplementedError()", "def _get_user_password(self):\n return self.__user_password", "def check_pass(self, user, app):\r\n infos = self.curs.execute(f\"SELECT login, password FROM {user} WHERE application = '{app}'\")\r\n return list(infos)[0]", "def username_password(posted_data) -> tuple:\n username = posted_data[USERNAME]\n password = posted_data[PASSWORD]\n return username, password", "def get_authentication_data():\n\n sigrhe_login = config_parser.get(\"sigrhe\", \"login\")\n sigrhe_password = config_parser.get(\"sigrhe\", \"password\")\n\n return sigrhe_login, sigrhe_password", "def SecondPart():\n return passwordChecker(data)", "def test_user1_method4():\n assert u.verify_password(USER_CREDENTIALS[\"password\"]), \"Password cannot verify properly\"", "def verify_password(provided_password, stored_password):\n secret_key = current_app.config.get('SECRET_KEY')\n return sha256_crypt.verify(provided_password+secret_key, stored_password)", "def verify_password(stored_passwd, provided_passwd):\n salt = stored_passwd[:64]\n stored_password = stored_passwd[64:]\n pwdhash = hashlib.pbkdf2_hmac(\n 'sha512', provided_passwd.encode('utf-8'), salt.encode('ascii'), 100000\n )\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n return pwdhash == stored_password", "def decode_username_and_password():\n try:\n # cherrypy.log.error(\"decoding username and password\")\n user_name = str(base64_decode(cherrypy.request.json[\"user_name\"]).decode())\n password = str(base64_decode(cherrypy.request.json[\"password\"]).decode())\n except Exception as e:\n cherrypy.log.error(str(e))\n # cherrypy.log.error(\"username and password could not be decoded\")\n cherrypy.log.error(\"slycat-standard-authentication.py authenticate\", \"cherrypy.HTTPError 400\")\n raise cherrypy.HTTPError(400)\n return user_name, password", "def __hash_new_password(password: str) -> Tuple[bytes, bytes]:\n salt = os.urandom(16)\n pw_hash = hashlib.pbkdf2_hmac(\"sha256\", password.encode(), salt, 100000)\n return salt, pw_hash", "def authenticate_userpass():\n return _userpwd_auth(current_app.config.get('VAULT_AUTH_PATH', 'userpass'))", "def get_password_hash(password):\n\n return pwd_context.hash(password)", "def get_password(self):\n return self.controller.dbfilter.db.get('passwd/user-password')", "def verify_password(self, password):\n return check_password_hash(self.password_hash, password)", "def test_user_hash_with_salt(self):\n self.assertEqual(get_user_hash(\"johndoe\", salt=\"jane\").hex()[:6], \"fb0bf4\")", "def verify_password(stored_password, provided_password):\n #print(provided_password)\n salt = stored_password[:64]\n stored_password = stored_password[64:]\n pwdhash = hashlib.pbkdf2_hmac('sha512', \n provided_password.encode('utf-8'), \n salt.encode('ascii'), \n 100000)\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n #print(pwdhash)\n return pwdhash == stored_password" ]
[ "0.6216303", "0.61528856", "0.59387445", "0.5666319", "0.548512", "0.54241", "0.5317666", "0.5284316", "0.52637374", "0.52235824", "0.5173144", "0.5169702", "0.51583606", "0.51457644", "0.5130701", "0.511049", "0.5103841", "0.50647473", "0.5032563", "0.5018214", "0.50169563", "0.49701485", "0.49355972", "0.49354175", "0.4935058", "0.4929833", "0.49163437", "0.49008703", "0.4891657", "0.48869383" ]
0.72366863
0
Get the serverside private key for a given nonce.
def _get_privkey(self, nonce): privkey = self.nonce_manager.get_prandom_bytes(nonce, 32) return int_from_bytes(privkey)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_private_key():\n return DH.b2i(Random.new().read(DH_SIZE))", "def find_private_key(self):\n\t\tp, q = self.find_hidden_primes()\n\t\tself.private_key = self.calculate_private_key(p, q)\n\t\treturn self.private_key", "def private_key(self):\n return self.__get_option('private_key')", "def private_key(self, seed: str) -> str:\n return nanopy.deterministic_key(seed, self.account_index)[0]", "def private_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_key\")", "def get_private_key(self):\n return self._private_key", "def get_private_key(self) -> str:\n\t\treturn self._privateKey", "def get_shared_key(public, private, p):\n s = pow(public, private, p)\n s_hex = hex(s)[2:]\n # Make the length of s_hex a multiple of 2\n if len(s_hex) % 2 != 0:\n s_hex = '0' + s_hex\n # Convert hex to bytes\n s_bytes = binascii.unhexlify(s_hex)\n # Hash and return the hex result\n return sha256(s_bytes).digest()", "def get_private_key(self, address58: str) -> 'EllipticCurvePrivateKey':\n return self.keys[address58]", "def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"", "def get_private_key(self, uid: str) -> str:\n return self.context.get(\n \"/dsum/private_key/%s\" % uid, None, \"DSum: failed retrieving the Curve 25519 private key with uid: %s\" % uid)['key']", "def computeSessionKey(client_pub, server_secret, p):\r\n return expMod(client_pub, server_secret, p)", "def private_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"private_key\")", "def poly1305_key_gen(key: bytes, nonce: bytes) -> bytes:\n\n poly = ChaCha(key, nonce)\n return poly.encrypt(bytes(32))", "def _GetServerKey(self, peer_id):\n return hashlib.sha224(peer_id + self.network_id).hexdigest()", "def ecdsaPrivkey(self):\n return SigningKey.from_string(\n string=self.rawPrivkey(), curve=SECP256k1)", "def private_key(self):", "def generate_private_key(self):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key generation.\")\n return self.load_private_key(SigningKey.generate(curve=self.curve))", "def get_private_key(self):\n# _log.debug(\"get_private_key: node_name={}\".format(self.node_name))\n with open(os.path.join(self.runtime_dir, \"private\", \"private.key\"), 'rb') as f:\n return f.read()", "def nonceRFC6979(privKey, inHash):\n # Truncate private key if too long.\n if len(privKey) > 32:\n privKey = privKey[:32]\n\n q = Curve.N\n x = privKey\n\n qlen = q.bit_length()\n holen = SHA256_SIZE\n rolen = (qlen + 7) >> 3\n bx = int2octets(x, rolen) + bits2octets(inHash, rolen)\n\n # Step B\n v = ByteArray(bytearray([1] * holen))\n\n # Step C (Go zeroes the all allocated memory)\n k = ByteArray(0, length=holen)\n\n # Step D\n k = mac(k, v + ByteArray(0x00, length=1) + bx)\n\n # Step E\n v = mac(k, v)\n\n # Step F\n k = mac(k, v + 0x01 + bx)\n\n # Step G\n v = mac(k, v)\n\n # Step H\n while True:\n # Step H1\n t = ByteArray(b\"\")\n\n # Step H2\n while len(t) * 8 < qlen:\n v = mac(k, v)\n t += v\n\n # Step H3\n secret = hashToInt(t)\n if secret >= 1 and secret < q:\n return secret\n\n k = mac(k, v + 0x00)\n v = mac(k, v)", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def download_key():\n data = check_args(('cloudProvider', ))\n provider = jobs.init_provider(data, True)\n key = encrypt_key(provider.get_key(), data['username'])\n return make_response(keyName=provider.keyname, key=key)" ]
[ "0.60576475", "0.5962076", "0.594449", "0.5929952", "0.5911263", "0.5911263", "0.58971477", "0.5884221", "0.58803326", "0.586463", "0.58440894", "0.5813109", "0.5810623", "0.575331", "0.575331", "0.5733055", "0.5732046", "0.5714373", "0.56808704", "0.56776005", "0.56552625", "0.56501645", "0.56481034", "0.5610307", "0.5610307", "0.5610307", "0.5610307", "0.5610307", "0.5610307", "0.5575878" ]
0.74581945
0
Include default srpauth settings into a pyramid config. This function provides a hook for pyramid to include the default settings
def includeme(config): # Grab the pyramid-wide settings, to look for any auth config. settings = config.get_settings().copy() # Use the settings to construct an AuthenticationPolicy. authn_policy = SRPAuthenticationPolicy.from_settings(settings) config.set_authentication_policy(authn_policy) # Hook up a default AuthorizationPolicy. # You can't have one without the other, and ACLAuthorizationPolicy is # usually what you want. If the app configures one explicitly then this # will get overridden. authz_policy = ACLAuthorizationPolicy() config.set_authorization_policy(authz_policy) # Add forbidden view to challenge for auth credentials. config.add_view(authn_policy.challenge_view, context="pyramid.exceptions.Forbidden")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def override_config(self):\n super(AuthedConfigFixture, self).override_config()\n self.conf.register_opts(auth_token._OPTS, group='keystone_authtoken')\n self.conf.set_override('auth_uri', 'http://127.0.0.1:35357',\n group='keystone_authtoken')", "def setup_settings():\n # pylint: disable=import-outside-toplevel\n from django.conf import settings\n import tiny_erp.settings as defaults\n\n for name in dir(defaults):\n if name.isupper() and not hasattr(settings, name):\n setattr(settings, name, getattr(defaults, name))", "def configure_who_defaults(config):\n settings = config.registry.settings\n BACKENDAUTH_DEFAULTS = {\n \"use\": \"mozsvc.user.whoauth:BackendAuthPlugin\"\n }\n for key, value in BACKENDAUTH_DEFAULTS.iteritems():\n settings.setdefault(\"who.plugin.backend.\" + key, value)\n BASICAUTH_DEFAULTS = {\n \"use\": \"repoze.who.plugins.basicauth:make_plugin\",\n \"realm\": \"Sync\",\n }\n for key, value in BASICAUTH_DEFAULTS.iteritems():\n settings.setdefault(\"who.plugin.basicauth.\" + key, value)\n MACAUTH_DEFAULTS = {\n \"use\": \"mozsvc.user.whoauth:SagradaMACAuthPlugin\",\n }\n for key, value in MACAUTH_DEFAULTS.iteritems():\n settings.setdefault(\"who.plugin.macauth.\" + key, value)\n # If there is an auth backend, enable basicauth by default.\n # Enable macauth by default regardless, since it doesn't need a backend.\n if config.registry.get(\"auth\") is not None:\n settings.setdefault(\"who.authenticators.plugins\", \"backend macauth\")\n settings.setdefault(\"who.identifiers.plugins\", \"basicauth macauth\")\n settings.setdefault(\"who.challengers.plugins\", \"basicauth macauth\")\n else:\n settings.setdefault(\"who.authenticators.plugins\", \"macauth\")\n settings.setdefault(\"who.identifiers.plugins\", \"macauth\")\n settings.setdefault(\"who.challengers.plugins\", \"macauth\")", "def defaultConf():\n from config import lwbdUrl, userAndPass\n baseUrl = lwbdUrl\n lucidAuth = userAndPass\n return LucidSdaConfiguration(baseUrl,\n lucidAuth)", "def init_settings(self):\n self.app.config.setdefault('SIMPLE_DOMAINS', [])\n self.app.config.setdefault('AWS_ACCESS_KEY_ID', environ.get('AWS_ACCESS_KEY_ID'))\n self.app.config.setdefault('AWS_SECRET_ACCESS_KEY', environ.get('AWS_SECRET_ACCESS_KEY'))\n self.app.config.setdefault('AWS_REGION', environ.get('AWS_REGION', self.DEFAULT_REGION))", "def auth_config(self):\n\t\treturn {\n\t\t\t'login_url': self.uri_for('login'),\n\t\t\t'logout_url': self.uri_for('logout')\n\t\t}", "def _create_default_config(self):\n self.options.setdefault('options.admin_passwd', '')\n sys.path.append(self.openerp_dir)\n sys.path.extend([egg.location for egg in self.ws])\n from openerp.tools.config import configmanager\n configmanager(self.config_path).save()", "def config():\n config_django()\n config_svisor()", "def auth_config(self):\n return {\n 'login_url': self.uri_for('login'),\n 'logout_url': self.uri_for('logout')\n }", "def setdefaults(self):\n self.config = {\n 'dbuser': Infopage.DEFAULT_DBUSER,\n 'dbname': Infopage.DEFAULT_DBNAME,\n 'dbpassword': Infopage.DEFAULT_DBPASSWORD,\n 'dbhost': Infopage.DEFAULT_DBHOST\n }", "def add_default_settings_config(self):\n config = {\n mconst.DEF_SETTINGNAME_default_logfilename: mconst.DEF_SETTINGVAL_default_logfilename_defaultvalue,\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)", "def __init__(self):\n for name, default in self.defaults.items():\n value = getattr(django.conf.settings, name, default)\n setattr(self, name, value)", "def main(global_config, **settings):\n authn_policy = AuthTktAuthenticationPolicy('mysecret', timeout=30000, callback=group_finder)\n authz_policy = ACLAuthorizationPolicy()\n\n config = Configurator(settings=settings,\n root_factory = RootFactory,\n authentication_policy=authn_policy,\n authorization_policy=authz_policy)\n\n config.include('pyramid_jinja2')\n\n\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('home', '/')\n config.add_route('login', '/login')\n config.add_route('logout', '/logout')\n\n config.scan()\n return config.make_wsgi_app()", "def config(settings):\n\n #T = current.T\n\n # Pre-Populate\n settings.base.prepopulate.append(\"locations/TH\")\n\n # Uncomment to restrict to specific country/countries\n settings.gis.countries.append(\"TH\")\n\n # -------------------------------------------------------------------------\n # L10n (Localization) settings\n settings.L10n.languages[\"th\"] = \"Thai\"\n # Default Language (put this in custom template if-required)\n #settings.L10n.default_language = \"th\"\n # Default timezone for users\n settings.L10n.timezone = \"Asia/Bangkok\"\n # Default Country Code for telephone numbers\n settings.L10n.default_country_code = 66\n\n settings.fin.currencies[\"THB\"] = \"Baht\"\n settings.fin.currency_default = \"THB\"", "def add_earlydefault_settings(self):\n self.add_default_settings_config()\n self.add_default_settings_aliases()", "def config(settings):\n\n #T = current.T\n\n # PrePopulate data\n settings.base.prepopulate += (\"SHARE/LK\",)\n settings.base.prepopulate_demo += (\"SHARE/Demo\",)\n\n # Finance settings\n settings.fin.currencies = {\n #\"EUR\" : \"Euros\",\n #\"GBP\" : \"Great British Pounds\",\n \"LKR\" : \"Sri Lanka Rupees\",\n \"USD\" : \"United States Dollars\",\n }\n settings.fin.currency_default = \"USD\"", "def set_lib_defaults():\n\n set_middleware_defaults()\n\n # TODO(gmann): Remove setting the default value of config policy_file\n # once oslo_policy change the default value to 'policy.yaml'.\n # https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49\n DEFAULT_POLICY_FILE = 'policy.yaml'\n policy_opts.set_defaults(CONF, DEFAULT_POLICY_FILE)", "def includeme(config):\n # authentication\n auth_secret = os.environ.get('AUTH_SECRET', '')\n auth_policy = AuthTktAuthenticationPolicy(\n secret=auth_secret,\n hashalg='sha512'\n )\n config.set_authentication_policy(auth_policy)\n # authorization\n authz_policy = ACLAuthorizationPolicy()\n config.set_authorization_policy(authz_policy)\n config.set_root_factory(MyRoot)\n\n session_secret = os.environ.get('SESSION_SECRET', '')\n session_factory = SignedCookieSessionFactory(session_secret)\n config.set_session_factory(session_factory)\n config.set_default_csrf_options(require_csrf=True)", "def apply_settings(auth_info, django_settings):\r\n provider_names = auth_info.keys()\r\n provider.Registry.configure_once(provider_names)\r\n enabled_providers = provider.Registry.enabled()\r\n _set_global_settings(django_settings)\r\n _set_provider_settings(django_settings, enabled_providers, auth_info)", "def main(global_config, **settings):\n\n authn_policy = AuthTktAuthenticationPolicy('sosecreeet', callback=groupfinder, hashalg='sha512')\n authz_policy = ACLAuthorizationPolicy()\n\n config = Configurator(settings=settings, root_factory=RootFactory)\n config.set_authentication_policy(authn_policy)\n config.set_authorization_policy(authz_policy)\n\n config.include('pyramid_jinja2')\n\n config.add_static_view('static', 'static', cache_max_age=3600)\n\n config.add_route('home', '/') #todo: login\n\n config.add_route('paste', '/paste') #paste form\n\n config.add_route('view', '/view/{id}') #See the resulting paste\n\n config.scan()\n\n return config.make_wsgi_app()", "def patched_settings():\n settings.ENABLE_EMAIL_SUBSCRIPTIONS = False\n settings.BCRYPT_LOG_ROUNDS = 1", "def generate_settings():\r\n conf_file = os.path.join(os.path.dirname(base_settings.__file__),\r\n 'example', 'conf.py')\r\n conf_template = open(conf_file).read()\r\n default_url = 'http://salmon.example.com'\r\n site_url = raw_input(\"What will be the URL for Salmon? [{0}]\".format(\r\n default_url))\r\n site_url = site_url or default_url\r\n secret_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n api_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n output = conf_template.format(api_key=api_key, secret_key=secret_key,\r\n site_url=site_url)\r\n return output", "def main(global_config, **settings):\n engine = engine_from_config(settings, 'sqlalchemy.')\n DBSession.configure(bind=engine)\n Base.metadata.bind = engine\n \n authentication_policy = AuthTktAuthenticationPolicy('com.bangj.SecRet', callback=groupfinder)\n authorization_policy = ACLAuthorizationPolicy()\n\n config = Configurator(settings=settings, root_factory='hypweb:models.RootFactory')\n config.set_authentication_policy(authentication_policy)\n config.set_authorization_policy(authorization_policy)\n config.include('pyramid_beaker')\n\n session_factory = session_factory_from_settings(settings)\n config.set_session_factory(session_factory)\n\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('home', '/')\n config.add_route('login', '/login')\n config.add_route('invite', '/invite/{id}')\n config.add_route('accounts', '/accounts')\n config.add_route('profile', '/accounts/edit')\n config.add_route('add_realm_form', '/add_realm_form')\n config.add_route('regenerate', '/accounts/regenerate')\n config.add_route('remove_from_realm', '/accounts/remove_from_realm/{id}')\n config.add_route('signout', '/signout')\n config.add_route('register', '/register')\n config.add_route('remove_account', '/accounts/remove/{id}')\n\n config.add_route('summary', '/'),\n config.add_route('interfaces', '/interfaces'),\n config.add_route('browse_domains', '/browse_domains/'),\n config.add_route('queries', '/queries'),\n config.add_route('service', '/services/{ifIndex}'),\n config.add_route('services', '/services/'),\n config.add_route('domains', '/domains/'),\n config.add_route('debug', '/debug'),\n config.add_route('feedback', '/'),\n\n config.scan()\n return config.make_wsgi_app()", "def enable_third_party_auth():\r\n\r\n from third_party_auth import settings as auth_settings\r\n auth_settings.apply_settings(settings.THIRD_PARTY_AUTH, settings)", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('pyramid_chameleon')\n\n config.include('velruse.providers.google_oauth2')\n config.add_google_oauth2_login_from_settings()\n config.add_subscriber(before_render, BeforeRender)\n\n my_session_factory = session_factory_from_settings(settings)\n config.set_session_factory(my_session_factory)\n\n authentication_policy = AuthTktAuthenticationPolicy('seekrit',\n callback=None, hashalg='sha512')\n authorization_policy = ACLAuthorizationPolicy()\n\n config.set_authentication_policy(authentication_policy)\n config.set_authorization_policy(authorization_policy)\n\n mongo = MongoClient(settings['db_uri'])\n db = mongo[settings['db_name']]\n config.registry.db_mongo = db\n config.registry.admin_list = settings['admin'].split(',')\n config.registry.upload_path = settings['upload_path']\n config.registry.news_path = settings['news_path']\n config.registry.admin_path = settings['admin_path']\n config.registry.public_path = settings['public_path']\n config.registry.dataset_path = settings['dataset_path']\n config.registry.script_path = settings['script_path']\n config.registry.download_path = settings['download_path']\n config.registry.studies_path = settings['studies_path']\n config.registry.jbrowse_path = settings['jbrowse_path']\n config.registry.base_url = settings['base_url']\n\n\n # by default we don't sniff, ever\n #config.registry.es = Elasticsearch( [settings['elastic_host']])\n #config.registry.es_db = settings['elastic_db']\n #config.registry.es.indices.create(index=settings['elastic_db'], ignore=400)\n\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_static_view('app', 'rgv:webapp/app')\n config.add_route('autocomplete', '/autocomplete')\n config.add_route('checkgene', '/checkgene')\n config.add_route('browser_stat','/browser_stat')\n config.add_route('d_getter', '/data_frame')\n config.add_route('file_dataset','/dataset_download/{dir}/{file}')\n config.add_route('genelevel', '/genelevel')\n config.add_route('home', '/')\n config.add_route('hmtData', '/hmtData')\n config.add_route('login', '/user/login')\n config.add_route('logged', '/user/logged')\n config.add_route('newsfeed', '/newsfeed')\n config.add_route('scData', '/scData')\n config.add_route('scDataGenes', '/scDataGenes')\n config.add_route('studyfeed', '/studyfeed')\n config.add_route('user', '/user')\n config.add_route('user_register', '/user/register')\n config.add_route('user_recover', '/user/recover')\n config.add_route('user_confirm_recover', '/user/confirm_recover')\n config.add_route('user_confirm_email', '/user/confirm_email')\n config.add_route('user_validate', '/user/validate')\n config.add_route('user_delete', '/user/delete')\n config.add_route('user_info', '/user/{id}')\n config.add_route('read_file','/browser_genelevel_init')\n config.add_route('search', '/search')\n\n\n config.scan()\n\n # automatically serialize bson ObjectId and datetime to Mongo extended JSON\n json_renderer = JSON()\n def pymongo_adapter(obj, request):\n return json_util.default(obj)\n json_renderer.add_adapter(ObjectId, pymongo_adapter)\n json_renderer.add_adapter(datetime.datetime, pymongo_adapter)\n\n config.add_renderer('json', json_renderer)\n\n return config.make_wsgi_app()", "def get_client_settings_env(**_):\r\n username = os.environ.get('SL_USERNAME')\r\n api_key = os.environ.get('SL_API_KEY')\r\n proxy = os.environ.get('https_proxy')\r\n\r\n config = {'proxy': proxy}\r\n if username and api_key:\r\n config['auth'] = BasicAuthentication(username, api_key)\r\n return config", "def set_config_defaults(config):\n new_config = config.copy()\n\n new_config.setdefault(\"window_title\", \"Materials Cloud Tool\")\n new_config.setdefault(\n \"page_title\",\n \"<PLEASE SPECIFY A PAGE_TITLE AND A WINDOW_TITLE IN THE CONFIG FILE>\",\n )\n\n new_config.setdefault(\"custom_css_files\", {})\n new_config.setdefault(\"custom_js_files\", {})\n new_config.setdefault(\"templates\", {})\n\n return new_config", "def app(request):\n settings_override = {\n 'TESTING': True,\n }\n yield settings_override", "def configure_ext_login(app):\n lm.init_app(app)\n\n @lm.user_loader\n def load_user(userid):\n \"\"\"\n Needed for flask-login.\n \"\"\"\n return models.User.query.get(int(userid))\n\n @app.before_request\n def set_g_user():\n g.user = current_user", "def _maybeSetDefaultAuthDomain(self):\n auth_domain = os.environ.get(\"AUTH_DOMAIN\")\n if not auth_domain:\n os.environ['AUTH_DOMAIN'] = \"appscale.com\"" ]
[ "0.6572887", "0.62719035", "0.617056", "0.608547", "0.6069364", "0.60461533", "0.6034325", "0.601316", "0.586339", "0.58631706", "0.57857096", "0.5770519", "0.571746", "0.5687391", "0.56611305", "0.56506854", "0.5604428", "0.5598433", "0.55896086", "0.5553104", "0.553698", "0.5523538", "0.550868", "0.55067945", "0.5490416", "0.5482418", "0.54702574", "0.5469005", "0.5438611", "0.54376286" ]
0.64770824
1
Creates a dictionarylike object representing the filesystem structure starting at the given root directory.
def get_fs_dict ( initial_root, create_item=None, dict_cls=dict, dirname_filter=None, filename_filter=None, include_root=False, toplevel_files=True, prune_empty=False, file_key=None, ): # TODO(could-do): max_depth=N fsdict = dict_cls() get_file_key = ( lambda x: x ) if file_key is None else file_key for root, dict_relpath, dirnames, filenames in walk_relpath ( initial_root, include_root=include_root, prune_empty=prune_empty, dirname_filter=dirname_filter, filename_filter=filename_filter ): if dict_relpath: dictpath = dict_relpath.split ( os.sep ) parent = functools.reduce ( dict_cls.get, dictpath[:-1], fsdict ) if create_item is None: parent [dictpath[-1]] = dict_cls.fromkeys ( map ( get_file_key, filenames ) ) else: parent [dictpath[-1]] = dict_cls ( ( get_file_key ( fname ), create_item ( ( root + os.sep + fname ), fname, root ) ) for fname in filenames ) elif not toplevel_files: pass elif create_item is None: for fname in filenames: fsdict [get_file_key(fname)] = None else: for fname in filenames: fsdict [get_file_key(fname)] = create_item ( ( root + os.sep + fname ), fname, root ) # -- end for return fsdict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, dirname, defmode='r'):\n self.name = dirname\n self.defmode = defmode\n\n self.items = []\n\n for i in os.listdir(dirname):\n if os.path.isdir(os.path.join(dirname, i)):\n self.items.append(Tree(os.path.join(dirname, i), defmode))\n\n else:\n self.items.append(open(os.path.join(dirname, i), defmode))\n\n self._dict = self.to_dict()", "def get_directory_structure(rootdir):\n rootdir = rootdir.rstrip(os.sep)\n start = rootdir.rfind(os.sep) + 1\n dir= {\"containers\": [rootdir]} \n for path, dirs, files in os.walk(rootdir):\n folders = path[start:].split(os.sep)\n\n subdir = dict.fromkeys(files)\n parent = functools.reduce(dict.get, folders[:-1], dir)\n \n config = get_container_config(path, folders, subdir)\n \n parent[folders[-1]] = {'containers': dirs}\n parent[folders[-1]].update(config)\n \n return dir", "def __create_dir_structure_file__(self):\n # | - __create_dir_structure_file__\n\n dir_structure_data = {}\n dir_structure_data[\"tree_level_labels\"] = self.tree_level_labels\n dir_structure_data[\"level_entries_dict\"] = self.level_entries_list\n # TEMP\n dir_structure_data[\"skip_dirs\"] = self.skip_dirs_lst\n\n fle_name = os.path.join(\n self.root_dir,\n self.working_dir,\n \"jobs_bin/dir_structure.json\",\n )\n\n with open(fle_name, \"w\") as fle:\n json.dump(dir_structure_data, fle, indent=2)\n # __|", "def getDictRootedAt(self, relpath = \"\", root = \"\"):\n def getDictRootedAt(path):\n retval = {}\n opts = self.getOptionsDict(path)\n secs = self.getSections(path)\n for k in opts:\n retval[k] = opts[k]\n for i in secs:\n retval[i] = getDictRootedAt(path + \"/\" + i)\n return retval\n\n return getDictRootedAt(root + \"/\" + relpath)", "def getDictOfRoot(tree, fromNode=None):\r\n if fromNode == None:\r\n fromNode = tree.root\r\n Dict = {fromNode.name:{\"__files__\":fromNode.files}}\r\n Dict = tree.getChildren(fromNode, Dict)\r\n return Dict", "def get_dir(root_dir):\n\n dir_dict = {}\n\n for item in os.scandir(root_dir):\n item_type = \"\"\n\n if item.is_file():\n item_type = \"[FILE]\"\n elif item.is_dir():\n item_type = \"[DIR]\"\n\n dir_dict[item.name] = item_type\n\n return dir_dict", "def Init(self):\n # First iteration over all the files in root searching for symlinks and\n # non-regular files.\n seen_inodes = {}\n for basepath, _, filenames in sorted(os.walk(self._root)):\n for filename in sorted(filenames):\n full_path = os.path.join(basepath, filename)\n rel_path = full_path[len(self._root):]\n st = os.lstat(full_path)\n\n file_data = {\n 'size': st.st_size,\n }\n self._files[rel_path] = file_data\n\n # Track symlinks.\n if stat.S_ISLNK(st.st_mode):\n link_path = os.readlink(full_path)\n # lddtree's normpath handles a little more cases than the os.path\n # version. In particular, it handles the '//' case.\n self._symlinks[rel_path] = (\n link_path.lstrip('/') if link_path and link_path[0] == '/' else\n lddtree.normpath(os.path.join(os.path.dirname(rel_path),\n link_path)))\n file_data['deps'] = {\n 'symlink': [self._symlinks[rel_path]]\n }\n\n # Track hardlinks.\n if st.st_ino in seen_inodes:\n self._hardlinks[rel_path] = seen_inodes[st.st_ino]\n continue\n seen_inodes[st.st_ino] = rel_path", "def build_tree(path: str, ignore_dirs: Optional[Sequence[str]] = None) -> dict:\n if ignore_dirs is None:\n ignore_dirs = []\n if is_module(path):\n key = uuid.uuid4().hex\n name = os.path.splitext(os.path.basename(path))[0]\n item = {key: {\n \"name\": name,\n \"path\": os.path.abspath(path),\n \"components\": [name],\n \"type\": \"module\",\n }}\n return item\n if is_shared_object(path):\n key = uuid.uuid4().hex\n name = os.path.basename(path).partition(\".\")[0]\n return {key: {\n \"name\": name,\n \"path\": os.path.abspath(path),\n \"components\": [name],\n \"type\": \"shared_object\"\n }}\n if is_file(path):\n key = uuid.uuid4().hex\n return {key: {\n \"name\": None,\n \"path\": os.path.abspath(path),\n \"components\": [None],\n \"type\": \"file\"\n }}\n if is_directory(path):\n key = uuid.uuid4().hex\n name = os.path.basename(path)\n item = {key: {\n \"name\": name if is_package(path) else None,\n \"path\": os.path.abspath(path),\n \"components\": [name] if is_package(path) else [None],\n \"type\": \"package\" if is_package(path) else \"directory\",\n \"children\": {}\n }}\n for child in os.listdir(path):\n if child not in ignore_dirs:\n child_path = os.path.join(path, child)\n info = build_tree(child_path, ignore_dirs)\n if info:\n if \"children\" in item[key]:\n apply_tree(info, lambda x: x[\"components\"].insert(0, item[key][\"name\"]))\n item[key][\"children\"].update(info)\n return item\n return {}", "def createStructure(self, root, dirDict):\n for x in dirDict:\n child = root.child(x)\n if isinstance(dirDict[x], dict):\n child.createDirectory()\n self.createStructure(child, dirDict[x])\n else:\n child.setContent(dirDict[x].replace(\"\\n\", os.linesep).encode())", "def createStructure(self, root, dirDict):\n for x in dirDict:\n child = root.child(x)\n if isinstance(dirDict[x], dict):\n child.createDirectory()\n self.createStructure(child, dirDict[x])\n else:\n child.setContent(dirDict[x].replace('\\n', os.linesep))", "def __init__(self, directory):\n self.nodes = {}\n self.leaves = {}\n self.root = None\n self.directory = pathlib.Path(directory).absolute()\n self.directory.mkdir(exist_ok=True)", "def __init__(self, root, branches=None):\n self.tree_dict = {}\n self.directory = Path(root)\n self.start = str(self.directory).rfind(os.sep) + 1\n self.branches = branches\n self.get()", "def read_root():\n return {\"Hello\":\"World!\"}", "def create_file_dict():\n import os\n file_dict = {}\n for root, dirs, files in os.walk('.'):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n for f in files:\n try:\n with open(f, 'r') as thing:\n res = thing.readline()\n except:\n res = ''\n file_name = os.path.join(root, f).lstrip('./')\n file_dict[file_name] = res\n return file_dict", "def default_file_hierarchy_dict():\n return {\n directory(\"include\"): {\n directory(\"with spaces\"): {\n file(\"with spaces.hpp\"): {\n namespace(\"with_spaces\"): {\n function(\"int\", \"value\"): parameters()\n }\n }\n }\n }\n }", "def process_folder(root, path=\"\"):\n myDict = {}\n if path:\n if root.cd(path):\n for key in ROOT.gDirectory.GetListOfKeys():\n filterKey(root, key, path, myDict, \"__List\")\n else:\n for key in ROOT.gDirectory.GetListOfKeys():\n mypath = ROOT.gDirectory.GetPathStatic()\n filterKey(root, key, mypath, myDict, \"\")\n ROOT.gDirectory.cd(mypath)\n return myDict", "def create_tree_hash_dict(cls, current_dir, file_path, dirs, files, ref_table):\n\n # we sort just to ensure there are no arrangement issues that could affect the hash outcome\n file_hashs = sorted([ref_table['%s/%s' % (file_path, file)]['hash'] for file in files])\n dir_hashs = sorted([ref_table['%s/%s' % (file_path, dir_name)]['hash'] for dir_name in dirs])\n\n tree_info = {}\n tree_info['path'] = file_path\n tree_info['content'], tree_info['hash'] = cls.get_tree_contents(file_path, dirs, files, ref_table)\n tree_info['type'] = 'tree'\n tree_info['name'] = current_dir\n tree_info['perm'] = stat.S_IMODE(os.lstat(file_path).st_mode)\n\n return tree_info", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def __init__(self, root_path):\n self._root = root_path\n if not os.path.exists(self._root):\n os.makedirs(self._root)", "def get_filesystem(fpath):\n filesystem = {\n \"filename\": os.path.basename(fpath),\n \"path\": fpath,\n \"size\": os.stat(fpath).st_size,\n \"symlinks\": _geospatial.get_symlinks(fpath)\n }\n\n return filesystem", "def _build_file_tree(self):\n # Build file tree with packmode and weigth info (# of file in the packmode)\n root = {\"packmode\": None, \"weight\": None, \"children\": {}}\n for filepath, packmode in self.override_packmode_map.items():\n node = root\n for part in filepath:\n node = node[\"children\"].setdefault(\n part, {\"packmode\": None, \"weight\": None, \"children\": {}}\n )\n node[\"weight\"] = 1\n node[\"packmode\"] = packmode\n return root", "def mkdicttree(self, dictH):\n for key in dictH.keys():\n if os.path.isfile(key):\n sys.exit(\"Key: {key} cannot be a file. \"\n \"Directories only\".format(key=key))\n elif os.path.isabs(key) and not os.path.isdir(key):\n os.makedirs(key)\n\n if not os.path.isabs(key):\n sys.exit('Got: \"{f}\", expected absolute path'.format(f=key))\n elif os.path.isabs(key) and isinstance(dictH[key], dict):\n self.traverse_dict_and_add(rootDir=key, dictH=dictH[key])", "def __init__(self, file_root):\n self.root = file_root", "def make_tree_from_dict(jsonFile, rootDir=None):\n with open(jsonFile, \"r\") as f:\n dictH = json.load(f)\n\n rootDict = engine.RootDict()\n if rootDir is not None:\n for key in dictH.keys():\n if key == '{root}':\n dictH[rootDir] = dictH[key]\n dictH.pop(\"{root}\", None)\n rootDict.mkdicttree(dictH)\n # check if rootDir isn't none and, if the dict root key is parsable, replace the key with the folderpath given in rootDir", "def __init__(self, root_path):\r\n self.root_path = root_path\r\n if not os.path.exists(root_path):\r\n os.makedirs(root_path)", "def directory_tree(self, root=None, print_value=None):\n files = (\n self.drive.files()\n .list(\n q=\"mimeType = 'application/vnd.google-apps.folder' and trashed = false\",\n corpora=\"drive\",\n spaces=\"drive\",\n fields=\"files(id, name, parents)\",\n includeItemsFromAllDrives=True,\n supportsAllDrives=self.shared_drive[0],\n driveId=self.shared_drive[1],\n )\n .execute()\n )\n file_dict = {}\n file_names = {}\n for file in files[\"files\"]:\n if file[\"parents\"][0] not in file_dict:\n file_dict[file[\"parents\"][0]] = {}\n file_dict[file[\"parents\"][0]][file[\"id\"]] = file[\"name\"]\n file_names[file[\"id\"]] = file[\"name\"]\n tree = {}\n results = []\n\n def recurse(parent_id, tree_pos):\n if len(file_dict) == 0:\n return\n if parent_id in file_dict:\n parent = file_dict[parent_id]\n for folder in parent.keys():\n tree_pos[folder] = {}\n results.append(folder)\n if len(tree_pos) > 0:\n for folder in tree_pos.keys():\n recurse(folder, tree_pos[folder])\n\n if root is not None:\n results.append(root[\"id\"])\n recurse(root[\"id\"], tree)\n elif self.shared_drive[0]:\n results.append(self.shared_drive[1])\n recurse(self.shared_drive[1], tree)\n else:\n results.append(\"root\")\n recurse(\"root\", tree)\n\n def tree_name(tree_pos, space):\n if len(tree_pos) == 0:\n return\n for id, folder in tree_pos.items():\n print(f\"{' '*space}{file_names[id]} [{id}]\")\n if len(folder) > 0:\n tree_name(tree_pos[id], space + 4)\n\n if print_value is not None:\n root_title = self.get(results[0])\n print(f\"{root_title['name']} [{root_title['id']}]\")\n tree_name(tree, 4)\n\n if root is not None:\n return results\n else:\n return tree" ]
[ "0.6521569", "0.65162444", "0.6484279", "0.64037526", "0.6266439", "0.62654215", "0.6254696", "0.6253443", "0.62238693", "0.6207021", "0.6196134", "0.6184652", "0.6175318", "0.6164307", "0.6069025", "0.6048882", "0.6022305", "0.60057586", "0.60057586", "0.60057586", "0.60057586", "0.60057586", "0.5952005", "0.5927178", "0.5909663", "0.5908858", "0.58855385", "0.5858566", "0.5843471", "0.5778058" ]
0.7043223
0
Generator that iterates over the content of a filesystem tree starting at source and compares it to the filesystem tree starting at dest (which doesn't have to exist). The subdir_root can be used to control whether source should be a subdir of dest or not (which means that walk_copy_tree (source, dest, subdir_root=True) is identical to walk_copy_tree (source, dest + os.sep + os.path.basename(source), subdir_root=False)). The items are 6tuples (absolute path to the source directory, absolute path to the dest dir, dir path relative to the source root, list of directories, list of files, list of dirnames). The dirnames list can be modified (slice assignment) in order to affect the directories visited by os.walk(). The directories/files lists are lists of 2x2tuples ( (abspath in source, stat in source), (abspath in dest, stat in dest) ).
def walk_copy_tree ( source, dest, subdir_root=False, **walk_kwargs ): source_path = os.path.abspath ( source ) dest_path = os.path.abspath ( dest ) get_entry = lambda path: ( path, os.lstat ( path ) if os.path.lexists ( path ) else None ) get_stat_list = lambda s, d, names: ( [ ( get_entry ( s + name ), get_entry ( d + name ) ) for name in names ] ) for root, root_rel, dirnames, filenames in walk_relpath ( source_path, include_root=subdir_root, **walk_kwargs ): root_dest = ( dest + os.sep + root_rel if root_rel else dest ) dirs = get_stat_list ( root + os.sep, root_dest + os.sep, dirnames ) files = get_stat_list ( root + os.sep, root_dest + os.sep, filenames ) yield root, root_dest, root_rel, dirs, files, dirnames
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assertFileTree(self, source, tree):\n dirs_tree = [e for e in tree if not isinstance(e, str)]\n\n dirs, files = self.storage.listdir(source)\n expected_dirs = [e[0] for e in dirs_tree]\n expected_files = [e for e in tree if isinstance(e, str)]\n self.assertCountEqual(dirs, expected_dirs)\n self.assertCountEqual(files, expected_files)\n\n for folder, files in dirs_tree:\n self.assertFileTree(self.storage.join(source, folder), files)", "def copy_tree ( self,\n source_root, dest_root, overwrite=True, followlinks=False\n ):\n dodir = self.dodir\n copy_file = self.copy_file\n\n if overwrite:\n for source, dest, relpath, dirs, files, dirnames in walk_copy_tree (\n source_root, dest_root, followlinks=followlinks\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n copy_file ( source_file, dest_file )\n else:\n for source, dest, relpath, dirs, files, dirnames in walk_copy_tree (\n source_root, dest_root, followlinks=followlinks\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n if dest_stat is None:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if dest_stat is None:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n copy_file ( source_file, dest_file )", "def copytree(src, dst, overwrite=False, changed_only=True):\n assert os.path.isdir(src), \\\n (\"Source path `%s` does not name an existing directory\" % src)\n errors = []\n if not os.path.exists(dst):\n os.makedirs(dst)\n for name in os.listdir(src):\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n try:\n if os.path.isdir(srcname):\n errors.extend(\n copytree(srcname, dstname, overwrite, changed_only))\n else:\n copyfile(srcname, dstname)\n except (IOError, os.error) as why:\n errors.append((srcname, dstname, why))\n return errors", "def walk_files():\n\n # TODO: not check twice the same dir or file\n for path in config.targets:\n abs_path = os.path.join(cwd, path)\n\n if not os.path.islink(abs_path) and os.path.isfile(abs_path):\n walked.append(abs_path)\n yield abs_path\n #process_file(abs_path)\n\n if os.path.isdir(abs_path):\n walked.append(abs_path)\n for root, dirs, files in os.walk(abs_path):\n for fname in files:\n if isbackup(fname):\n continue\n abs_path = os.path.join(root, fname)\n walked.append(abs_path)\n if not os.path.islink(abs_path) and\\\n os.path.isfile(abs_path):\n base, name = os.path.split(abs_path)\n XXX, ext = os.path.splitext(name)\n\n ignored = False\n for pattern in IGNORE_FILES:\n if pattern.search(fname):\n ignored = True\n break\n\n # maybe should be merged with IGNORE_FILES?\n for regexp in config.exclude_list:\n if regexp.search(fname):\n ignored = True\n break\n\n if not ignored:\n for test_ext in config.disallow_exts:\n if test_ext == ext:\n ignored = True\n break\n\n if not ignored:\n if config.allow_exts:\n ignored = True\n for test_ext in config.allow_exts:\n if test_ext == ext:\n ignored = False\n break\n\n if not ignored:\n yield abs_path\n #process_file(abs_path)\n\n for dir in dirs[:]:\n if dir in IGNORE_DIRS:\n dirs.remove(dir)\n if dir in dirs:\n dirs.remove(dir)\n # mayb be should be merged with IGNORE_DIRS?\n else:\n for regexp in config.exclude_list:\n if regexp.search(dir):\n # This check is required\n # because several different patterns\n # could match one file name\n if dir in dirs:\n dirs.remove(dir)\n\n for dir in dirs:\n abs_path = os.path.join(root, dir)\n walked.append(abs_path)", "def files_from_root(root, accept):\n for (dir_path, _dir_names, file_names) in os.walk(root, followlinks=True):\n for file_name in file_names:\n if accept(file_name):\n path = os.path.join(dir_path, file_name)\n yield path", "def file_walker(root,**kwargs):\n\n # Get our keyword argunents, and do some initialization.\n max_depth=kwargs.get('depth',None)\n if max_depth==None:\n max_depth=sys.maxsize # I don't think we'll hit this limit in practice.\n follow_links=kwargs.get('follow_links',True)\n prune=compile_filename_patterns(kwargs.get('prune',[]))\n ignore=compile_filename_patterns(kwargs.get('ignore',[]))\n report_dirs=kwargs.get('report_dirs',False)\n if report_dirs not in (False,True,'first','last'):\n raise ValueError(\"report_dirs=%r is not one of False, True, 'first', or 'last'.\"%(report_dirs,))\n stack=[(0,root)] # Prime our stack with root (at depth 0).\n been_there=set([os.path.abspath(os.path.realpath(root))])\n dir_stack=[] # Stack of paths we're yielding after exhausting those directories.\n\n while stack:\n depth,path=stack.pop()\n if report_dirs in (True,'first'):\n yield path+os.sep\n elif report_dirs=='last':\n dir_stack.append(path+os.sep)\n flist=os.listdir(path)\n flist.sort()\n dlist=[]\n # First, let the caller iterate over these filenames.\n for fn in flist:\n p=os.path.join(path,fn)\n if os.path.isdir(p):\n # Just add this to this path's list of directories for now.\n dlist.insert(0,fn)\n continue\n pat,mat=first_match(fn,ignore)\n if not pat:\n yield p\n # Don't dig deeper than we've been told to.\n if depth<max_depth:\n # Now, let's deal with the directories we found.\n for fn in dlist:\n p=os.path.join(path,fn)\n # We might need to stack this path for our fake recursion.\n if os.path.islink(p) and not follow_links:\n # Nope. We're not following symlinks.\n continue\n rp=os.path.abspath(os.path.realpath(p))\n if rp in been_there:\n # Nope. We've already seen this path (and possibly processed it).\n continue\n m=None\n pat,mat=first_match(fn,prune)\n if pat:\n # Nope. This directory matches one of the prune patterns.\n continue\n # We have a keeper! Record the path and push it onto the stack.\n been_there.add(rp)\n stack.append((depth+1,p))\n while dir_stack:\n yield dir_stack.pop()", "def _duplicate_as_linked_tree(self, source_root):\n logging.debug(\"Started traversing %s \\'s tree for file linkage and directory duplication.\" % self.directory)\n # Create the containing directory that resides within the share\n within_share_dir_path = os.path.join(self.directory, os.path.basename(source_root))\n self._makedir(within_share_dir_path)\n for root, subdirectories, files in os.walk(source_root, followlinks=True):\n share_root = root.replace(str(source_root), within_share_dir_path, 1)\n for subdir in subdirectories:\n target = os.path.join(share_root, subdir)\n self._makedir(target)\n for file in files:\n source = os.path.join(root, file)\n target = os.path.join(share_root, file)\n self._link_files(source, target)", "def _collect_entries(rootdir: str, basedir: str):\n\n files = []\n dirs = []\n\n for entry in os.listdir(os.path.join(rootdir, basedir)):\n rel_path = os.path.join(basedir, entry)\n full_path = os.path.join(rootdir, rel_path)\n isdir = os.path.isdir(full_path)\n if isdir and (rel_path in ('./.git', './.pytest_cache') or entry == '__pycache__'):\n continue\n\n st = os.stat(full_path, follow_symlinks=False)\n\n (dirs if isdir else files).append((rel_path, dict(isdir=isdir, path=rel_path, size=(0 if isdir else st.st_size),\n mode=st.st_mode, omode=f'{st.st_mode:04o}',\n mtime=int(st.st_mtime))))\n\n for rel_path, entry in sorted(dirs):\n yield entry\n yield from _collect_entries(rootdir, rel_path)\n\n for _, entry in sorted(files):\n yield entry", "def file_src_dest(self):\n yielded_dests = []\n for mgr_file in reversed(self.manager.contents):\n path = Path(mgr_file)\n for from_path in self.maybe_add_one_path(path):\n stem = from_path.relative_to(path) if path.is_dir() else path.name\n to_path = self.output_files_dir / stem\n resolved = str(to_path.resolve())\n if resolved in yielded_dests: # pragma: no cover\n self.log.debug(\"Already populated\", resolved)\n continue\n yielded_dests += [resolved]\n yield from_path, to_path", "def copy_tree(src, dst):\n if not os.path.isdir(src):\n raise Exception, \\\n \"cannot copy tree '%s': not a directory\" % src\n try:\n names = os.listdir(src)\n except os.error, (_, errstr):\n raise Exception, \\\n \"error listing files in '%s': %s\" % (src, errstr)\n\n makedirs(dst)\n\n outputs = []\n\n for n in names:\n src_name = os.path.join(src, n)\n dst_name = os.path.join(dst, n)\n\n if os.path.islink(src_name):\n link_dest = os.readlink(src_name)\n \n os.symlink(link_dest, dst_name)\n outputs.append(dst_name)\n\n elif os.path.isdir(src_name):\n outputs.extend(copy_tree(src_name, dst_name))\n else:\n copyfile(src_name, dst_name)\n \n outputs.append(dst_name)\n\n return outputs", "def files_matching(self, dt=None):\n #Use os.walk. If descend is False, only continue for matching\n #the re to this point. If True, compare branch to entire re but\n #walk everything\n for d in self.directories:\n for (dirpath, dirnames, filenames) in \\\n os.walk(d, topdown=True, followlinks=True):\n #dirpath is FULL DIRECTORY to this point\n relpath = dirpath[len(d) + 1:]\n if not self.descend:\n if relpath and not \\\n self.file_fmt.match(relpath, dt, 'start'):\n continue\n for i in range(-len(dirnames), 0):\n if not self.file_fmt.match(os.path.join(\n relpath, dirnames[i]), dt, 'start'):\n del dirnames[i]\n for f in filenames:\n if self.file_fmt.match(os.path.join(relpath, f), dt,\n 'end' if self.descend else None):\n yield os.path.join(dirpath, f)", "def test_filecompare(self):\n cmp = filecmp.dircmp(self.root_gold, self.root_target, ignore=[])\n self.recursive_dircmp(cmp)", "def copytree(self, name, source, dest, symlinks=False):\n self.m.path.assert_absolute(source)\n self.m.path.assert_absolute(dest)\n args = ['--symlinks'] if symlinks else []\n self._run(name, ['copytree'] + args + [source, dest])\n self.m.path.mock_copy_paths(source, dest)", "def iter_tree(root):\n\tfor file_rel in _iter_tree_next(os.path.abspath(root), '', {}):\n\t\tyield file_rel", "def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)", "def movetree(src, dst, overwrite=False, changed_only=True):\n assert os.path.isdir(src), \\\n (\"Source path `%s` does not name an existing directory\" % src)\n errors = []\n if not os.path.exists(dst):\n os.makedirs(dst)\n for name in os.listdir(src):\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n try:\n if os.path.isdir(srcname):\n errors.extend(\n movetree(srcname, dstname, overwrite, changed_only))\n else:\n movefile(srcname, dstname)\n except (IOError, os.error) as why:\n errors.append((srcname, dstname, why))\n return errors", "def _iter_tree_next(root_full, dir_rel, memo):\n\tdir_full = os.path.join(root_full, dir_rel)\n\tdir_real = os.path.realpath(dir_full)\n\n\t# Remember each encountered ancestor directory and its canonical\n\t# (real) path. If a canonical path is encountered more than once,\n\t# recursion has occurred.\n\tif dir_real not in memo:\n\t\tmemo[dir_real] = dir_rel\n\telse:\n\t\traise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)\n\n\tfor node in os.listdir(dir_full):\n\t\tnode_rel = os.path.join(dir_rel, node)\n\t\tnode_full = os.path.join(root_full, node_rel)\n\t\tnode_stat = os.stat(node_full)\n\n\t\tif stat.S_ISDIR(node_stat.st_mode):\n\t\t\t# Child node is a directory, recurse into it and yield its\n\t\t\t# decendant files.\n\t\t\tfor file_rel in _iter_tree_next(root_full, node_rel, memo):\n\t\t\t\tyield file_rel\n\n\t\telif stat.S_ISREG(node_stat.st_mode):\n\t\t\t# Child node is a file, yield it.\n\t\t\tyield node_rel\n\n\t# NOTE: Make sure to remove the canonical (real) path of the directory\n\t# from the ancestors memo once we are done with it. This allows the\n\t# same directory to appear multiple times. If this is not done, the\n\t# second occurance of the directory will be incorrectly interpreted as\n\t# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.\n\tdel memo[dir_real]", "def command_copytree(args):\n for srcdir in args.srcdirs:\n basename = os.path.basename(srcdir)\n destdir2 = os.path.normpath(os.path.join(args.destdir, basename))\n if os.path.exists(destdir2):\n shutil.rmtree(destdir2)\n sys.stdout.write(\"copytree: %s => %s\\n\" % (srcdir, destdir2))\n shutil.copytree(srcdir, destdir2)\n return 0", "def files_and_folders(self, depth):\n for directory in self.rarc._directories[self.directory_index:][:self.directory_count]:\n yield depth, directory\n if isinstance(directory, Folder):\n if directory.data_offset < len(self.rarc._nodes):\n node = self.rarc._nodes[directory.data_offset]\n if directory.name == \".\" or directory.name == \"..\":\n continue\n yield from node.files_and_folders(depth + 1)", "def get_files(self, include=[], exclude=[]):\r\n for (basepath, dpaths, fpaths) in os.walk(self.path, topdown=True):\r\n for subpath in dpaths + fpaths:\r\n path = os.path.join(self.chroot_path(basepath), subpath)\r\n if filter_path(path, include, exclude):\r\n yield path", "def walk(top):\n yield top\n for name in os.listdir(top):\n name = os.path.join(top, name)\n if os.path.isdir(name) and not os.path.islink(name):\n for directory in walk(name):\n yield directory", "def walk(self):\n if os.path.exists(self.folder):\n for root_path, _, f_files in os.walk(self.folder):\n yield root_path, f_files\n if not self.recursive:\n break\n else:\n print(f\"[!e] Passed folder doesn't exist. Path: {self.folder}\",\n file=sys.stdout)\n exit(0)", "def FindSources(env, dest, source, suffixes=None):\n for source_entry in env.Flatten(source):\n if type(source_entry) == str:\n # Search for matches for each source entry\n source_nodes = env.Glob(source_entry)\n else:\n # Source entry is already a file or directory node; no need to glob it\n source_nodes = [source_entry]\n for s in source_nodes:\n if str(s.__class__) == 'SCons.Node.FS.Dir':\n # Recursively search subdir. Since glob('*') doesn't match dot files,\n # also glob('.*').\n FindSources(env, dest, [s.abspath + '/*', s.abspath + '/.*'],\n suffixes)\n elif suffixes and s.suffix in suffixes:\n dest.add(s)", "def copytree(source, filesfilter=[\"*\"], dirs=[\"web\", \"cache\"]):\n\n destiny = hashlib.md5(flatname(source).encode(\"utf-8\")).hexdigest()\n destiny_path = os.path.join(HOME, *dirs, destiny)\n\n if os.path.exists(destiny_path):\n shutil.rmtree(destiny_path)\n shutil.copytree(source, destiny_path, ignore=allow_patterns(*filesfilter))\n\n return destiny_path", "def walk(self):\n for _root, _dirs, files in os.walk(self.root):\n for filename in files:\n if self.is_key(filename):\n yield filename", "def oh_folders(src, dest=dest):\n copytree(src, dest, ignore=ignore_patterns(*ignore_list), dirs_exist_ok=True)", "def copytree2(src, dst, symlinks=False, ignore=None):\n\tnames = os.listdir(src)\n\tcpy_err={'flag':True, 'error':None}\n\tif ignore is not None:\n\t\tignored_names = ignore(src, names)\n\telse:\n\t\tignored_names = set()\n\n\tif not os.path.exists(dst):\n\t\ttry:\n\t\t\tos.makedirs(dst)\n\t\texcept:\n\t\t\tcpy_err= {'flag':False, 'error':\"Impossible to create a directory, PLATO is accountered a problem, contact the administrator\"}\n\t\t\treturn cpy_err\n\t\n\tfor name in names:\n\t\tif name in ignored_names:\n\t\t\tcontinue\n\t\tsrcname = os.path.join(src, name)\n\t\tdstname = os.path.join(dst, name)\n\t\ttry:\n\t\t\tif symlinks and os.path.islink(srcname):\n\t\t\t\tlinkto = os.readlink(srcname)\n\t\t\t\tos.symlink(linkto, dstname)\n\t\t\telif os.path.isdir(srcname):\n\t\t\t\tcopytree(srcname, dstname, symlinks, ignore)\n\t\t\telse:\n\t\t\t\tcopy2(srcname, dstname)\n\t\t\t# XXX What about devices, sockets etc.?\n\t\texcept (IOError, os.error) as why:\n\t # errors.append((srcname, dstname, str(why)))\n\t\t\tcpy_err={'flag':False,'error':str(why)}\n\t\t\treturn cpy_err\n\t\t# catch the Error from the recursive copytree so that we can\n\t\t# continue with other files\n\t\texcept Error as err:\n\t\t\tcpy_err={'flag':False,'error':str(err.args[0])}\n\t\t\treturn cpy_err\n\t\t # errors.extend(err.args[0])\n\ttry:\n\t\tcopystat(src, dst)\n\texcept OSError as why:\n\t\t# errors.extend((src, dst, str(why)))\n\t\tcpy_err={'flag':False,'error':str(why)}\n\t\treturn cpy_err\n\t# if errors:\n\t# raise Error(errors)\n\t\n\treturn cpy_err", "def copy_directory(source, dest):\n for path, dirs, files in walk(source):\n relative_src_path = path.replace(source, \"\").lstrip(\"/\")\n abs_dest_path = join(dest, relative_src_path)\n if not exists(abs_dest_path):\n makedirs(abs_dest_path)\n for tdir in dirs:\n dest_dir = join(abs_dest_path, tdir)\n if not exists(dest_dir):\n makedirs(dest_dir)\n for tfile in files:\n src_file = join(path, tfile)\n dest_file = join(abs_dest_path, tfile)\n if islink(src_file):\n linkto = readlink(src_file)\n symlink(linkto, dest_file)\n continue\n else:\n process_file(src_file, dest_file)", "def copy_tree_to_path(src_dir, dest_dir):\n names = os.listdir(src_dir)\n\n for name in names:\n srcname = os.path.join(src_dir, name)\n destname = os.path.join(dest_dir, name)\n\n if os.path.isdir(srcname):\n shutil.copytree(srcname, destname)\n else:\n shutil.copy(srcname, destname)", "def nestedXcopy(namePatterns, sourceDir, targetDir, renameTo=None, flags=None):\n\tfor aDir in dirR.listNestedDirContainsOneOfFilesM(sourceDir, namePatterns, flags):\n\t\txcopy(namePatterns, aDir, os.path.join(targetDir, dirR._relativePathString(sourceDir, aDir)), renameTo, flags)" ]
[ "0.6292492", "0.5832197", "0.58168596", "0.5799975", "0.5763929", "0.57484084", "0.5722568", "0.56811893", "0.567231", "0.5659945", "0.56529665", "0.5623519", "0.5603917", "0.56004435", "0.5587346", "0.55472785", "0.5534416", "0.55120814", "0.54979503", "0.5481419", "0.5479654", "0.54780024", "0.5471861", "0.5458029", "0.5444585", "0.5438223", "0.54313004", "0.542351", "0.5399541", "0.5387801" ]
0.7667128
0
Compares the given mode with a list of r/w/x bits and creates a RWX object for it.
def from_bitmask ( cls, mode, rwx_bits ): return cls ( mode & rwx_bits[0], mode & rwx_bits[1], mode & rwx_bits[2], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_stat_mode ( cls, stat_mode ):\n return cls (\n RWX.from_bitmask ( stat_mode, cls.USR_BITS ),\n RWX.from_bitmask ( stat_mode, cls.GRP_BITS ),\n RWX.from_bitmask ( stat_mode, cls.OTH_BITS ),\n )", "def resMode(mode): \n if mode==0:\n makeMesh(r0x, r0y)\n elif mode==1:\n makeMesh(r1x, r1y)\n elif (mode==2):\n makeMesh(r2x, r2y)", "def get_bitmask ( self, rwx_bits ):\n ret = 0\n if self.readable:\n ret |= rwx_bits[0]\n\n if self.writable:\n ret |= rwx_bits[1]\n\n if self.executable:\n ret |= rwx_bits[2]\n\n return ret", "def _iswritemode(self, mode):\n\n # Currently only used to test the bz2 files.\n _writemodes = (\"w\", \"+\")\n for c in mode:\n if c in _writemodes:\n return True\n return False", "def _create_rwl(self, bb, pred_bb_rwls):\n rwl = BBReadWriteList(self, bb, *pred_bb_rwls)\n for inst in bb.insts:\n # get read & write sets of an instruction\n ird_set = inst.read_set.copy()\n iwrt_set = inst.write_set.copy()\n # data flow for simple movement instructions\n if inst.itype == InstKind.simple_mov:\n # add rw for simple move instructions\n for rd, wrt in inst.redef_list:\n rwl.add_rw(rd, wrt, inst)\n # remove redef ones from the the read/write set\n redef_rd_wrt_list = list(zip(*inst.redef_list))\n ird_set -= set(redef_rd_wrt_list[0])\n iwrt_set -= set(redef_rd_wrt_list[1])\n # add ird_set and iwrt_set to the rwl\n for rd in ird_set:\n rwl.add_rw(rd, \"\", inst)\n for wrt in iwrt_set:\n rwl.add_rw(\"#u\", wrt, inst)\n return rwl", "def getmode(self, mode):\r\n modes = {}\r\n # core modes\r\n for m, (basemode, basetype, bands) in _MODEINFO.items():\r\n modes[m] = ModeDescriptor(m, bands, basemode, basetype)\r\n # extra experimental modes\r\n modes[\"RGBa\"] = ModeDescriptor(\"RGBa\",\r\n (\"R\", \"G\", \"B\", \"a\"), \"RGB\", \"L\")\r\n modes[\"LA\"] = ModeDescriptor(\"LA\", (\"L\", \"A\"), \"L\", \"L\")\r\n modes[\"La\"] = ModeDescriptor(\"La\", (\"L\", \"a\"), \"L\", \"L\")\r\n modes[\"PA\"] = ModeDescriptor(\"PA\", (\"P\", \"A\"), \"RGB\", \"L\")\r\n # mapping modes\r\n modes[\"I;16\"] = ModeDescriptor(\"I;16\", \"I\", \"L\", \"L\")\r\n modes[\"I;16L\"] = ModeDescriptor(\"I;16L\", \"I\", \"L\", \"L\")\r\n modes[\"I;16B\"] = ModeDescriptor(\"I;16B\", \"I\", \"L\", \"L\")\r\n # set global mode cache atomically\r\n _modes = modes\r\n return _modes[mode]", "def set_weight(mode, rms):\n \n if mode == 'rms':\n mrms = rms\n if mode == 'rms2':\n mrms = np.power(rms, 2)\n if mode == '1/rms':\n mrms = 1./rms\n if mode == '1/rms2':\n mrms = 1./np.power(rms, 2)\n \n return mrms", "def wrap_rw(gcalc, gobs):\n rw, scale = get_rw(gobs, gcalc, weight=None)\n return rw, scale", "def mode2wave(modes):\n # True Wavelengths\n freq = modes * rep_rate + lfc_offset # true wavelength\n waves = c.value / freq * 1e10 # magic\n return np.array(waves)", "def binning():\n def r(x):\n return 1 << (x & 7)\n\n def w(x):\n return 0x11 * (x >> 1)\n return r, w", "def create_mode_buttons(master: Widget, mode_var: IntVar) -> None:\r\n\r\n add = Radiobutton(master, text='Add', font=self.FONT_NORMAL,\r\n variable=mode_var, value=0)\r\n remove = Radiobutton(master, text='Remove', font=self.FONT_NORMAL,\r\n variable=mode_var, value=1)\r\n toggle = Radiobutton(master, text='Toggle', font=self.FONT_NORMAL,\r\n variable=mode_var, value=2)\r\n\r\n add.pack(anchor=W, padx=self.WIDGET_PAD, pady=(self.WIDGET_PAD,0))\r\n remove.pack(anchor=W, padx=self.WIDGET_PAD, pady=(self.WIDGET_PAD,0))\r\n toggle.pack(anchor=W, padx=self.WIDGET_PAD, pady=self.WIDGET_PAD)", "def merge_binary_rois(roi1, roi2):\n if (roi1.pixelSizeX != roi2.pixelSizeX) or (roi1.pixelSizeY != roi2.pixelSizeY):\n raise ValueError('The pixel sizes of the two WeightedROI objects should match!')\n\n if roi1.pixelSizeUnit != roi2.pixelSizeUnit:\n raise ValueError('The pixel size units of the two WeightedROI objects should match!')\n\n mask1 = roi1.get_binary_mask(); mask2 = roi2.get_binary_mask(); mask3 = np.logical_or(mask1, mask2).astype(np.int8)\n\n return ROI(mask3, pixelSize=[roi1.pixelSizeY, roi1.pixelSizeX], pixelSizeUnit=roi1.pixelSizeUnit)", "def create_bit_r_format(self, instr_lst, op_func, func_code):\n rs = 0\n rt = 0\n shamt = 0\n rd = 0\n\n # if instruction has shift value\n if func_code == \"000000\" or func_code == \"000010\":\n try:\n rd = int(instr_lst[self.ops].get_nm().split('R')[1])\n rt = int(instr_lst[self.ops + 1].get_nm().split('R')[1])\n shamt = instr_lst[self.ops + 2].get_val()\n except Exception:\n pass\n\n # if function does not use rd\n elif func_code == \"011000\" or func_code == \"011010\":\n try:\n rs = int(instr_lst[self.ops].get_nm().split('R')[1])\n rt = int(instr_lst[self.ops + 1].get_nm().split('R')[1])\n except Exception:\n pass\n\n # if function only uses rd\n elif func_code == \"010010\" or func_code == \"010000\":\n try:\n rd = int(instr_lst[self.ops].get_nm().split('R')[1])\n except Exception:\n pass\n\n # if function only uses rs\n elif func_code == \"001000\":\n try:\n rs = int(instr_lst[self.ops].get_nm().split('R')[1])\n except Exception:\n pass\n\n # other arithmetic, logic functions\n else:\n try:\n rs = int(instr_lst[self.ops + 2].get_nm().split('R')[1])\n rt = int(instr_lst[self.ops + 1].get_nm().split('R')[1])\n rd = int(instr_lst[self.ops].get_nm().split('R')[1])\n except Exception:\n pass\n # format the rs, rt, rd, shamt values into 5 bits\n rs = format(rs, '#07b').split('b')[1]\n rt = format(rt, '#07b').split('b')[1]\n rd = format(rd, '#07b').split('b')[1]\n shamt = format(shamt, '#07b').split('b')[1]\n code_lst = [op_func, rs, rt, rd, shamt, func_code, \"\\n\"]\n return \" \".join(code_lst)", "def get_supported_binning_modes(self):\n all_modes=lib.is_SetBinning(self.hcam,0x8001)\n supp={\"v\":{1},\"h\":{1}}\n for mask,(d,s) in self._subsampling_modes.items():\n if all_modes&mask:\n supp[d].add(s)\n return sorted(supp[\"v\"]),sorted(supp[\"h\"])", "def cleaned_multiple_modes(mu, xv_min, xv_max, n_modes=10, ratio=0.25):\n modes = multiple_modes(mu, xv_min, xv_max, n_modes=n_modes)\n return clean_multiple_modes(mu, modes, ratio=ratio)", "def test_modes_for_course_multiple(self):\r\n mode1 = Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None)\r\n mode2 = Mode(u'verified', u'Verified Certificate', 0, '', 'usd', None)\r\n set_modes = [mode1, mode2]\r\n for mode in set_modes:\r\n self.create_mode(mode.slug, mode.name, mode.min_price, mode.suggested_prices)\r\n\r\n modes = CourseMode.modes_for_course(self.course_key)\r\n self.assertEqual(modes, set_modes)\r\n self.assertEqual(mode1, CourseMode.mode_for_course(self.course_key, u'honor'))\r\n self.assertEqual(mode2, CourseMode.mode_for_course(self.course_key, u'verified'))\r\n self.assertIsNone(CourseMode.mode_for_course(self.course_key, 'DNE'))", "def __wlst_mode_matches(self, attr_mode_string):\n return attr_mode_string == 'both' or attr_mode_string == WlstModes.from_value(self._wlst_mode).lower()", "def _random_mode(self):\n modes = ((1, 2, 3, 4, 5),\n (1, 2, 4, 5),\n (2, 3, 4),\n (1, 3, 5),\n (1, 5),\n (2, 4),\n (3,),\n ())\n return random.choice(modes)", "def special_game_modes_memory_patterns(obs, player_x, player_y):\n def environment_fits(obs, player_x, player_y):\n \"\"\" environment fits constraints \"\"\"\n # if game mode is not normal\n if obs['game_mode'] != GameMode.Normal:\n return True\n return False\n \n def get_memory_patterns(obs, player_x, player_y):\n \"\"\" get list of memory patterns \"\"\"\n memory_patterns = [\n corner,\n free_kick,\n goal_kick,\n kick_off,\n penalty,\n throw_in,\n idle\n ]\n return memory_patterns\n \n return {\"environment_fits\": environment_fits, \"get_memory_patterns\": get_memory_patterns}", "def renderWindows(XWindow, YWindow, occurrencyWindow, windowModel = False):\n\t\tdef renderDoors(XDoor, YDoor, occurrencyDoor, doorModel = False):\n\t\t\t\"\"\"\n\t\t\trenderWindows accept the door's cells and the occurrency, and optionally a door generating function \n\t\t\t\"\"\"\n\t\t\tdef renderRoof(vertices, pitchAngle, height):\n\t\t\t\t\"\"\"\n\t\t\t\trenderRoof accept the vertices of the base roof, a pitch angle and the desired height \n\t\t\t\tof the roof\n\t\t\t\t\"\"\"\n\t\t\t\tdef renderLadder(ladderHeight, interStep, riser):\n\t\t\t\t\t\"\"\"\n\t\t\t\t\trenderLadder is the inner function used to assembly all together, it takes the \n\t\t\t\t\tdesired height of the ladder, an interstep between two step and a riser for the single\n\t\t\t\t\tstep.\n\t\t\t\t\t\"\"\"\n\n\t\t\t\t\t#building the ladder model and the ladder box\n\t\t\t\t\tladderModel = ladder.make_ladder(ladderHeight, interStep, riser)\n\t\t\t\t\twith open(\"lines/ladder.lines\", \"rb\") as ladderFile:\n\t\t\t\t\t\treader = csv.reader(ladderFile, delimiter=\",\")\n\t\t\t\t\t\trow = next(reader)\n\t\t\t\t\t\tladderModel = T([1,2])([float(row[0])*xfactor, float(row[1])*yfactor])(ladderModel)\n\t\t\t\t\tladderBOX = CUBOID([SIZE([1])(ladderModel)[0]/xfactor,SIZE([2])(ladderModel)[0]/yfactor, SIZE([3])(ladderModel)[0]/zfactor])\n\t\t\t\t\tladderBOX = T([1,2])([float(row[0])-SIZE([1])(ladderBOX)[0]/2., float(row[1])-SIZE([2])(ladderBOX)[0]/2.])(ladderBOX)\n\n\t\t\t\t\t#building roof model\n\t\t\t\t\tif isinstance(vertices, basestring):\n\t\t\t\t\t\twith open(\"lines/\" + vertices + \".lines\", \"rb\") as file:\n\t\t\t\t\t\t\treader = csv.reader(file, delimiter=\",\")\n\t\t\t\t\t\t\tnewVertices = []\n\t\t\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\t\t\tnewVertices.append([float(row[0]), float(row[1])])\n\t\t\t\t\tif newVertices:\n\t\t\t\t\t\troofModel = roof.roofBuilder(newVertices, pitchAngle, height)\n\t\t\t\t\telse:\n\t\t\t\t\t\troofModel = roof.roofBuilder(vertices, pitchAngle, height)\n\t\t\t\t\troofModel = T([3])([nStorey*3/zfactor])(roofModel)\n\t\t\t\t\troofModel = S([1,2,3])([xfactor*1.09,yfactor*1.09,zfactor])(roofModel)\n\t\t\t\t\troofModel = T([1,2])([-SIZE([1])(roofModel)[0]*0.05,-SIZE([2])(roofModel)[0]*0.05]) (roofModel)\n\n\t\t\t\t\t#building full house model with windows and doors\n\t\t\t\t\tfullHouse = []\n\t\t\t\t\tfor story in range(nStorey):\n\t\t\t\t\t\thouseModel = house.build_house(story, windowModel, doorModel, ladderBOX)\n\t\t\t\t\t\tfullHouse.append(houseModel)\n\t\t\t\t\t\tfullHouse.append(T([3])([3]))\n\t\t\t\t\tfullHouse = STRUCT(fullHouse)\n\n\t\t\t\t\t#returning the result\n\t\t\t\t\treturn STRUCT([roofModel, ladderModel, fullHouse])\n\n\t\t\t\treturn renderLadder\n\n\t\t\treturn renderRoof\n\n\t\treturn renderDoors", "def create_rw_two_window(args, n_agents=1):\n\n T = args.rw_time\n DISC_UNIFORM_WIDTH = args.rw_width\n WINDOWS = args.rw_windows\n # first simulate a random walk\n\n POSSIBLE_STEPS, STEP_PROBS, DIMENSIONS = UNBIASED_RW\n\n rw = RandomWalk(DIMENSIONS,\n STEP_PROBS,\n POSSIBLE_STEPS,\n n_agents=n_agents,\n T=T,\n prior_distribution=MultiWindowDiscreteUniform(DIMENSIONS, WINDOWS, seed=args.rw_seed+2),\n seed=args.rw_seed+1)\n rw.reset()\n analytic = MultiWindowTwoStepRandomwWalkPosterior(WINDOWS, 0.5, T)\n return rw, analytic", "def append_modes(self,modes):\n self.modes = np.append(self.modes,np.array(modes,dtype=modetype))", "def BuildStatusBits():\n\n StatusList = np.zeros(192) # default all 0\n\n print (\"Select a impelementation of status bits:\" + '\\n'\n + '1: Minimum' + '\\n'\n + '2: Stantard' + '\\n'\n + '3: Enhanced')\n mode = int(input())\n print(\"If the impelementation chosen it's needed of extra information \"\n + \"it will be asked byte by byte like input\")\n\n if mode >= 2:\n for bit in range(3):\n print(\"Write the \" + str(bit) + \" status byte (i.e. 01110010)\")\n ZeroByte = input()\n StatusList = InsertInStatusList(StatusList, ZeroByte, bit)\n if mode == 3:\n for bit in range(3,23):\n if bit != 5: # Byte 5 must be set to logic 0\n print(\"Write the \" + str(bit) + \" status byte (i.e. 01110010)\")\n StatusByte = input()\n StatusList = InsertInStatusList(StatusList, StatusByte, bit)\n\n return StatusList", "def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)", "def multiple_modes(mu, xv_min, xv_max, n_modes=10):\n # Make sure these are arrays\n xv_min = np.array(xv_min)\n xv_max = np.array(xv_max)\n\n # Generate a list of starting points\n n_dims, = xv_min.shape\n standard_range = np.random.rand(n_modes, n_dims)\n xv_start_list = (xv_max - xv_min) * standard_range + xv_min\n\n # Compute the modes\n # Size: (n_modes, d_x)\n return np.array([mode(mu, xv_start, xv_min, xv_max)\n for xv_start in xv_start_list])", "def wave2mode(waves):\n freq = c.value * 1e10 / waves\n modes = np.round((freq - lfc_offset) / rep_rate)\n return np.array(modes).astype(int)", "def viz_windows(self, score_img, mode):\n if mode == 'filtered':\n lw_img = window_image(self.windows_left, 'x_filtered', color=(0, 255, 0))\n rw_img = window_image(self.windows_right, 'x_filtered', color=(0, 255, 0))\n elif mode == 'raw':\n color = (255, 0, 0)\n win_left_detected, arg = filter_window_list(self.windows_left, False, False, remove_undetected=True)\n win_right_detected, arg = filter_window_list(self.windows_right, False, False, remove_undetected=True)\n lw_img = window_image(win_left_detected, 'x_measured', color, color, color)\n rw_img = window_image(win_right_detected, 'x_measured', color, color, color)\n else:\n raise Exception('mode is not valid')\n combined = lw_img + rw_img\n return cv2.addWeighted(score_img, 1, combined, 0.5, 0)", "def register_mode(name, mode):\r\n if name in predefined_modes:\r\n raise ValueError('Mode name already taken: %s' % name)\r\n predefined_modes[name] = mode", "def mode(self, mode: Optional[int] = None) -> Optional[int]:\n ...", "def _check_rw_flag(self, rw_flag):\n\n rw_flag = rw_flag.lower()\n if rw_flag == \"r\":\n pass\n elif rw_flag == \"w\":\n pass\n else:\n raise ValueError(\"rw_flag must be 'r' or 'w'\")\n return rw_flag" ]
[ "0.6624755", "0.5387723", "0.52551746", "0.5182285", "0.50885785", "0.49726188", "0.4918288", "0.47915792", "0.4790591", "0.47765884", "0.46680427", "0.4661446", "0.46470672", "0.45956075", "0.45906985", "0.45378727", "0.4529674", "0.45049015", "0.4488689", "0.4481426", "0.4464955", "0.44343388", "0.44339734", "0.44290128", "0.44179517", "0.44168958", "0.44024053", "0.43745336", "0.43732834", "0.4371878" ]
0.6783992
0
Returns an integer representing the rwx mode for the given rwx bits.
def get_bitmask ( self, rwx_bits ): ret = 0 if self.readable: ret |= rwx_bits[0] if self.writable: ret |= rwx_bits[1] if self.executable: ret |= rwx_bits[2] return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_bitmask ( cls, mode, rwx_bits ):\n return cls (\n mode & rwx_bits[0], mode & rwx_bits[1], mode & rwx_bits[2],\n )", "def get_lsb(self, x, w=32):\n mask = (1 << w) - 1\n return int(x & mask)", "def get_stat_mode ( self ):\n return (\n self.user.get_bitmask ( self.USR_BITS ) |\n self.group.get_bitmask ( self.GRP_BITS ) |\n self.others.get_bitmask ( self.OTH_BITS )\n )", "def get_octal_from_file_permission(rwx: str) -> str:\n octals = {'r': 4, 'w': 2, 'x': 1, '-': 0}\n chars = []\n temp = []\n \n for i in range(len(rwx)):\n val = octals.get(rwx[i])\n temp.append(val)\n if len(temp) == 3:\n chars.append(list(t for t in temp))\n temp.clear()\n \n return ''.join(str(sum(item)) for item in chars)", "def ROL(byte, count):\n return ((byte << count) | (byte >> (32 - count))) & 0xffffffff", "def operation_mode(self) -> int:\n op_mode = self._read_u8(_REG_OP_MODE)\n return (op_mode >> 2) & 0b111", "def get_num_shift_reg(self):\n return self.num_registers", "def from_stat_mode ( cls, stat_mode ):\n return cls (\n RWX.from_bitmask ( stat_mode, cls.USR_BITS ),\n RWX.from_bitmask ( stat_mode, cls.GRP_BITS ),\n RWX.from_bitmask ( stat_mode, cls.OTH_BITS ),\n )", "def roi_x_size():\n def r(x):\n return x & 0xFFF\n\n def w(x):\n return min(x, 0xFFF)\n return r, w", "def rotl(x, count):\n ret = 0\n for i in range(64):\n bit = (x >> i) & 1\n ret |= bit << ((i + count) % 64)\n return ret", "def view_channel(_) -> int:\n return 1 << 10", "def view_channel(_) -> int:\n return 1 << 10", "def get_stat_mode ( mode_str ):\n return FsPermissions.from_str ( mode_str ).get_stat_mode()", "def mode(self) -> int:", "def get_level(raw_data, bits):\n level = 0\n for i in range(13, -1, -1):\n level <<= 1\n b, o = (bits[i] / 8) + 1, bits[i] % 8\n level |= (ord(raw_data[b]) >> o) & 1\n return level", "def _check_rw_flag(self, rw_flag):\n\n rw_flag = rw_flag.lower()\n if rw_flag == \"r\":\n pass\n elif rw_flag == \"w\":\n pass\n else:\n raise ValueError(\"rw_flag must be 'r' or 'w'\")\n return rw_flag", "def get_bit(reg,n_bit):\n return reg >> n_bit & 1", "def mode(self) -> int:\n return self._mode", "def python_int_bitwidth():\r\n # 'l' denotes a C long int, and the size is expressed in bytes.\r\n return struct.calcsize('l') * 8", "def _FixNtfsMode(mode: int) -> int:\n # TSK with NTFS reports the following permissions:\n # r-xr-xr-x for hidden files\n # -wx-wx-wx for read-only files\n # We want to report the reversed mapping, because it makes more sense.\n\n permissions = mode & 0o777\n\n if permissions == 0o333:\n return (mode & ~0o777) | 0o555\n elif permissions == 0o555:\n return (mode & ~0o777) | 0o333\n else:\n return mode", "def __int__(self):\n\n return self.bitflags", "def shift(bits: int, dir_: str, times: int = 1) -> int:\n dir_ = dir_.lower()\n res = bits\n if \"n\" in dir_:\n res <<= 8 * times\n if \"e\" in dir_:\n # wall is a bit mask that ensures bits aren't shifted beyond where\n # they are meant to go\n wall = not_(reduce(op.or_, (W_EDGE >> shift_ for shift_ in range(times)), 0))\n res >>= 1 * times\n res &= wall\n if \"s\" in dir_:\n res >>= 8 * times\n if \"w\" in dir_:\n # wall is a bit mask that ensures bits aren't shifted beyond where\n # they are meant to go\n wall = not_(reduce(op.or_, (E_EDGE << shift_ for shift_ in range(times)), 0))\n res <<= 1 * times\n res &= wall\n return res & ALL_", "def get_bitmask(self):\r\n return self.__bitmask__", "def bitness():\n # see https://docs.python.org/2/library/platform.html#platform.architecture\n return '64-bit' if sys.maxsize > 2**32 else '32-bit'", "def getModeString(fullPath, stats):\n bits = \"rwx\"\n modes = \"\"\n permissions = (\n S_IRUSR,\n S_IWUSR,\n S_IXUSR,\n S_IRGRP,\n S_IWGRP,\n S_IXGRP,\n S_IROTH,\n S_IWOTH,\n S_IXOTH,\n )\n\n fileType = getFileType(fullPath)\n\n if fileType is \"ln\" or fileType is \"or\":\n filePermissions = stats[ST_MODE]\n modes += 'l'\n else:\n filePermissions = stats[ST_MODE]\n if fileType is 'di':\n modes += 'd'\n else:\n modes += '-'\n\n for i, perm in enumerate(permissions):\n if filePermissions & perm:\n modes += bits[i % 3]\n else:\n modes += \"-\"\n\n return modes", "def _get_pixel_mode_settings(self, mode=None):\n if mode is None:\n mode=self.get_color_mode()\n mode=self._color_modes.get(mode,mode)\n mode&=0x7F\n return self._mode_bpps[mode],self._mode_channels[mode]", "def permissions(self):\n return int(self.options.get_str('octal_permissions'), 8)", "def convert_mode(string: str) -> int:\n pattern = re.compile(\n r\"\"\"^ # ensure length\n (?P<o400>r|-) # Use groups as octal values.\n (?P<o200>w|-)\n (?P<o100>x|-)\n (?P<o040>r|-)\n (?P<o020>w|-)\n (?P<o010>x|-)\n (?P<o004>r|-)\n (?P<o002>w|-)\n (?P<o001>x|-)\n $ # ensure length\"\"\",\n re.VERBOSE,\n )\n match = pattern.match(string)\n if match:\n return sum(\n int(f\"0{key}\", base=8)\n for key, value in match.groupdict().items()\n if value != \"-\"\n )\n else:\n # No match, treat it as wrong syntax\n raise SyntaxError(\n \"Mode-string should be between `---------` and `rwxrwxrwx`.\"\n )", "def manage_channels(_) -> int:\n return 1 << 4", "def manage_channels(_) -> int:\n return 1 << 4" ]
[ "0.6308568", "0.5609232", "0.5549503", "0.5435947", "0.5271582", "0.51879895", "0.5128324", "0.5119145", "0.51129645", "0.5092444", "0.50841266", "0.50841266", "0.50642216", "0.5061078", "0.50094485", "0.4977763", "0.4973743", "0.4972096", "0.496862", "0.49660695", "0.4963623", "0.4957433", "0.49560574", "0.49439907", "0.49290946", "0.49148715", "0.4882706", "0.48713338", "0.48658738", "0.48658738" ]
0.76721585
0
Returns a new permissions object for the given string.
def from_str ( cls, s, strict=False ): rwx_user = RWX.from_str ( s[0:3], strict=strict ) rwx_group = RWX.from_str ( s[3:6], strict=strict ) rwx_others = RWX.from_str ( s[6:9], strict=strict ) return cls ( rwx_user, rwx_group, rwx_others )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_string(cls, permission):\n p_read = 'r' in permission\n p_add = 'a' in permission\n p_update = 'u' in permission\n p_process = 'p' in permission\n\n parsed = cls(p_read, p_add, p_update, p_process)\n\n return parsed", "def create_from_arg_string(cls, arg_string):\n return cls()", "def from_string(cls, string):\n normalised = cls.normalise_string(string)\n return cls.from_normalised_string(normalised)", "def from_str(cls, string):", "def from_string (cls, string, access=DEFAULT_ACCESS, accept_value=True):\n hKey, moniker, value = cls._from_string (string, access, accept_value)\n if value is None:\n return cls (moniker, access)\n else:\n return cls (moniker, access).get_value (value)", "def from_str(cls, name: str):\n return cls.__members__[name]", "def from_str ( cls, s, strict=False ):\n readable, writable, executable = False, False, False\n\n if strict:\n _s = s.lower()\n readable = _s[0] == 'r'\n writable = _s[1] == 'w'\n executable = _s[2] == 'x'\n\n elif s:\n for char in s.lower():\n if char == 'r':\n readable = True\n elif char == 'w':\n writable = True\n elif char == 'x':\n executable = True\n # -- end for\n # -- end if\n\n return cls ( readable, writable, executable )", "def from_string(cls, str_value):\n for m in cls:\n if m.value == str_value:\n return m\n else:\n return None", "def from_type_string(cls, type_str):\n type_info = cls.is_my_type(type_str)\n if type_info:\n return cls(type_info)", "def from_string(cls, role: str) -> \"ProjectRole\":\n role = role.lower()\n for r in cls:\n if role == r.name.lower():\n return r\n raise ValueError('No project role matching \"{}\"'.format(role))", "def from_str(cls, s):\n raise NotImplementedError", "def fromString(cls, string):\n raise NotImplementedError(\n 'fromString is not implemented on %r' % (cls.__name__,))", "def _from_string (cls, string, access=DEFAULT_ACCESS, accept_value=True):\n computer, root, path, value = _parse_moniker (string, accept_value=accept_value)\n moniker = REGISTRY_HIVE.name_from_value (root) + ((sep + path) if path else \"\")\n if computer:\n hRoot = wrapped (win32api.RegConnectRegistry, None if computer == \".\" else computer, root)\n else:\n hRoot = root\n\n try:\n if computer:\n moniker = r\"\\\\%s\\%s\" % (computer, moniker)\n return wrapped (win32api.RegOpenKeyEx, hRoot, path, 0, cls._access (access)), moniker, value\n except exc.x_not_found:\n return None, moniker, value", "def semantic_capability_interface_from_string(string, file_name='<string>'):\n return semantic_capability_interface_from_dict(yaml.load(string), file_name)", "def construct_from_string(cls, string):\n # Remove fletcher specific naming from the arrow type string.\n if string.startswith(\"fletcher[\"):\n string = string[9:-1]\n\n if string == \"list<item: string>\":\n return cls(pa.list_(pa.string()))\n\n try:\n type_for_alias = pa.type_for_alias(string)\n except (ValueError, KeyError):\n # pandas API expects a TypeError\n raise TypeError(string)\n\n return cls(type_for_alias)", "def from_string(cls, name):\n if hasattr(cls,name):\n return cls.__getattribute__(name)\n else:\n return None", "def from_str(cls, encstr: str) -> 'Restriction':\n encstr = re.sub(r'\\s+', '', encstr)\n ret, remainder = cls.decode(encstr)\n if len(remainder) != 0:\n raise ValueError(\"Restriction had extrs characters at end: {}\"\n .format(remainder))\n return ret", "def from_string(cls, name: str) -> Enum:", "def from_string(cls, dlstr):\n\n NotImplementedError(\"Should be implemented by subclass\")", "def from_string(self, regex_str: str):\n return RegexReader(regex_str)", "def from_string(cls, dlstr):\n raise NotImplementedError(\"Should be implemented by subclass\")", "def FromHumanReadable(cls, string: Text):\n precondition.AssertType(string, Text)\n return _GetFactory(cls).FromHumanReadable(string)", "def __init__(self, string):\n Rule.__init__(self)\n self.__string = string", "def create_from_string(cls, text):\n parts = text.split('::')\n pcount = len(parts)\n if pcount == 4:\n name = parts[0]\n u_path = parts[1]\n ds_name = parts[2]\n dir_struc = None\n for _ in DirStruc:\n if _.name == ds_name:\n dir_struc = _\n break\n else:\n raise DvczError(\n \"Not the name of a valid dir_struc name: '%s'\" % ds_name)\n\n # 'item access'\n hashtype = HashTypes[parts[3]]\n return Store(name, u_path, dir_struc, hashtype)\n else:\n raise DvczError(\"Invalid Store descriptor: '%s'\" % text)", "def __new__(cls, val):\n # Don't repeat if already a UID class then may get the name that\n # str(uid) gives rather than the dotted number\n if isinstance(val, UID):\n return val\n\n if isinstance(val, compat.string_types):\n return super(UID, cls).__new__(cls, val.strip())\n\n raise TypeError(\"UID must be a string\")", "def create_from_substring(\n cls,\n what_parsed,\n original_string: str,\n new_string: str,\n start_offset: int\n ):\n if new_string is None:\n return ParseDelegationReturnMetadata.make_failing()\n if new_string != \"\":\n remaining_right_starti = original_string.rfind(new_string)\n else:\n remaining_right_starti = len(original_string)\n if remaining_right_starti == -1:\n raise ValueError(\"New string must be substring of original string\")\n if original_string[remaining_right_starti:] != new_string:\n raise ValueError(f\"new string must appear at the end of the origional string.\"\n f\"original string is '{original_string}' new string is '{new_string}'\"\n f\"remaining_right_starti is {remaining_right_starti}\")\n return cls(True, original_string, start_offset, what_parsed, remaining_right_starti)", "def interpret_requirement(string):\n string_list = split(string, sep=' ')\n \n requirement = Requirement(points, degree, majors, levels, max_non_degree)\n return requirement", "def fromString(cls, string):\n # From SAM specification v1.5, slightly adapted for single-token parsing\n pattern = r\"^[0-9]+[MIDNSHPX=]\" \n string = string.strip()\n if string == '*':\n return CIGAR.fromList(['*'])\n parsed = []\n s = string\n # Parse string token (e.g. 14M) by token, re.findall is not enough,\n # because non-matching subsequences between (e.g. \"14Mblabla3D4M\") would\n # go unnoticed! Also it would be good to abort as early as possible if\n # an invalid string is found to avoid parsing possibly very long strings\n while s != '':\n r = re.match(pattern, s)\n if not r:\n raise ValueError('Invalid CIGAR string: \"'+string+'\"')\n g = r.group(0)\n parsed.append(g)\n s = s[len(g):]\n \n parsed = [(int(p[:-1]), p[-1:]) for p in parsed]\n\n return CIGAR.fromList(parsed)", "def fromStr(cls, s):\n assert isinstance(s, str), 'incorrect type of arg s: should be type str, is type {}'.format(type(s))\n s = [ int(n) for n in s.split('.') ]\n return cls(*s)", "def __create_new_permission(self, codename, **kwargs) -> None:\n permission = Permission(codename=codename, **kwargs)\n permission.save()" ]
[ "0.69934595", "0.5844637", "0.5794634", "0.5684017", "0.562672", "0.56258774", "0.5491144", "0.54451984", "0.5422989", "0.5404011", "0.5355926", "0.5328249", "0.5311455", "0.5287373", "0.5220261", "0.5157838", "0.5156501", "0.50684", "0.5037294", "0.5022736", "0.50016254", "0.4977543", "0.49746856", "0.49575195", "0.49570024", "0.493968", "0.4938288", "0.49250185", "0.4903258", "0.49028882" ]
0.58623594
1
Creates a permissions object for the given stat mode.
def from_stat_mode ( cls, stat_mode ): return cls ( RWX.from_bitmask ( stat_mode, cls.USR_BITS ), RWX.from_bitmask ( stat_mode, cls.GRP_BITS ), RWX.from_bitmask ( stat_mode, cls.OTH_BITS ), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chmod_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def _getstatdict_forcreate(self, mode):\n now = getnow()\n stat = {\n 'st_mode': mode,\n 'st_nlink': 1,\n 'st_uid': os.getuid(),\n 'st_gid': os.getgid(),\n 'st_size': 0,\n 'st_atime': now,\n 'st_mtime': now,\n 'st_ctime': now,\n }\n return stat", "def get_stat_mode ( mode_str ):\n return FsPermissions.from_str ( mode_str ).get_stat_mode()", "def create_file_attributes(permissions, time, size):\n\n return {\n 'st_mode': (stat.S_IFREG | permissions),\n 'st_ctime': time,\n 'st_mtime': time,\n 'st_atime': time,\n 'st_size': size,\n 'st_uid': os.getuid(),\n 'st_gid': os.getgid(),\n 'st_nlink': 1\n }", "def permissions(self, account_id):\n from pureport_client.commands.accounts.permissions import Command\n return Command(self.client, account_id)", "def chmod_chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )", "def set_permissions(self, permissions):\n\n\t\tif Platform.PLATFORM_POSIX == self.__platform.get_platform():\n\t\t\tif permissions.__class__ == str and re.match('([-r][-w][-xsStT]){3,3}', permissions):\n\t\t\t\tself.__permissions = 0\n\t\t\t\tif permissions[0] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IRUSR\n\t\t\t\tif permissions[1] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWUSR\n\t\t\t\tif permissions[2] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXUSR\n\t\t\t\tif permissions[3] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IRGRP\n\t\t\t\tif permissions[4] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWGRP\n\t\t\t\tif permissions[5] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXGRP\n\t\t\t\tif permissions[6] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IROTH\n\t\t\t\tif permissions[7] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWOTH\n\t\t\t\tif permissions[8] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXOTH\n\t\t\t\t\t\n\t\t\telif permissions.__class__ == str and re.match('(0)?[0-7]{3,3}', permissions):\n\t\t\t\tif len(permissions) == 3:\n\t\t\t\t\tpermissions = '0' + permissions\n\t\t\t\tself.__permissions = octstr_to_int(permissions)\n\t\t\t\n\t\t\telif permissions.__class__ == int and 0 <= permissions <= 511:\n\t\t\t\tself.__permissions = permissions\n\t\t\t\n\t\t\telse:\n\t\t\t\traise PermissionsInvalidError()\n\n\t\telif Platform.PLATFORM_WINDOWS == self.__platform.get_platform():\n\t\t\tif permissions.__class__ == str and re.match('[-r][-w]', permissions):\n\t\t\t\tself.__permissions = 0\n\t\t\t\tif permissions[0] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IREAD\n\t\t\t\tif permissions[1] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWRITE\n\t\t\telif permissions.__class__ == int and 0 <= permissions <= 511:\n\t\t\t\tself.__permissions = permissions\n\t\t\telse:\n\t\t\t\traise PermissionsInvalidError() \n\t\telse:\n\t\t\traise PlatformNotSupportedError()", "def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0066)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEquals(mode, 0444)\n else:\n self.assertEquals(mode, 0066)", "def chmod(self, mode, rec=0):\n if not isinstance(mode, int):\n raise TypeError(f\"mode {mode!r} must be an integer\")\n if rec:\n for x in self.visit(rec=rec):\n error.checked_call(os.chmod, str(x), mode)\n error.checked_call(os.chmod, self.strpath, mode)", "def chmod(path, mode):\n try:\n st = os.stat(path)\n except OSError:\n return None, None\n\n origMode = fMode = stat.S_IMODE(st.st_mode)\n if isinstance(mode, str):\n parts = [s.strip() for s in mode.split(\",\")]\n for s in parts:\n m = _rModePart.match(s)\n if not m:\n return origMode, -2\n\n role, op, flags = m.groups()\n\n bits = 0\n for f in flags:\n bits |= _bitMap[role+f]\n\n if op == \"+\":\n fMode |= bits\n elif op == \"-\":\n fMode &= ~bits\n else:\n fMode = (fMode & _bitMap[role]) | bits\n else:\n fMode = mode\n\n try:\n os.chmod(path, fMode)\n except OSError:\n return origMode, -1\n\n return origMode, 0", "def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0o066)\n self.addCleanup(log1.close)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEqual(mode, 0o444)\n else:\n self.assertEqual(mode, 0o066)", "def sort_permissions(fl):\n\n if oct(os.stat(fl).st_mode)[4:] != '666':\n os.chmod(fl, 0o666)", "def add_bits_info(perms, filemode):\n bit_vals = [\n (stat.S_ISUID, 2, \"s\", \"s\"),\n (stat.S_ISGID, 5, \"s\", \"s\"),\n (stat.S_ISVTX, 8, \"t\", \"T\"),\n ]\n for bit, i, xval, yval in bit_vals:\n if filemode & bit:\n if perms[i] == \"x\":\n perms[i] = xval\n else:\n perms[i] = yval\n return perms", "def get_stat_mode ( self ):\n return (\n self.user.get_bitmask ( self.USR_BITS ) |\n self.group.get_bitmask ( self.GRP_BITS ) |\n self.others.get_bitmask ( self.OTH_BITS )\n )", "def get_perms(filemode):\n perm_vals = [\n (stat.S_IRUSR, \"r\"),\n (stat.S_IWUSR, \"w\"),\n (stat.S_IXUSR, \"x\"),\n (stat.S_IRGRP, \"r\"),\n (stat.S_IWGRP, \"w\"),\n (stat.S_IXGRP, \"x\"),\n (stat.S_IROTH, \"r\"),\n (stat.S_IWOTH, \"w\"),\n (stat.S_IXOTH, \"x\"),\n ]\n perms = (perm if filemode & val else \"-\" for val, perm in perm_vals)\n return \"\".join(perms)", "def assign_perm(self, permission, user, obj, ctype=None):\n if getattr(obj, 'pk', None) is None:\n raise ObjectNotPersisted(\"Object %s needs to be persisted first\" % obj)\n\n if not ctype:\n ctype = ContentType.objects.get_for_model(obj)\n\n if not isinstance(permission, Permission):\n permission = Permission.objects.get(content_type=ctype, codename=permission)\n\n obj_perm, created = self.get_or_create(\n content_type=ctype,\n permission=permission,\n object_pk=obj.pk,\n user=user)\n return obj_perm", "def getModeString(fullPath, stats):\n bits = \"rwx\"\n modes = \"\"\n permissions = (\n S_IRUSR,\n S_IWUSR,\n S_IXUSR,\n S_IRGRP,\n S_IWGRP,\n S_IXGRP,\n S_IROTH,\n S_IWOTH,\n S_IXOTH,\n )\n\n fileType = getFileType(fullPath)\n\n if fileType is \"ln\" or fileType is \"or\":\n filePermissions = stats[ST_MODE]\n modes += 'l'\n else:\n filePermissions = stats[ST_MODE]\n if fileType is 'di':\n modes += 'd'\n else:\n modes += '-'\n\n for i, perm in enumerate(permissions):\n if filePermissions & perm:\n modes += bits[i % 3]\n else:\n modes += \"-\"\n\n return modes", "def permission(guild_id: int, permissions: list):\n\n def wrapper(cmd):\n if not getattr(cmd, \"__permissions__\", None):\n cmd.__permissions__ = {}\n cmd.__permissions__[guild_id] = permissions\n return cmd\n\n return wrapper", "def stat_mode_to_index_mode(mode: int) -> int:\n if S_ISLNK(mode): # symlinks\n return S_IFLNK\n if S_ISDIR(mode) or S_IFMT(mode) == S_IFGITLINK: # submodules\n return S_IFGITLINK\n return S_IFREG | (mode & S_IXUSR and 0o755 or 0o644) # blobs with or without executable bit", "def get_perm(name):\n stat_info = os.lstat(name)\n mode = stat_info.st_mode\n\n uid = stat_info.st_uid\n gid = stat_info.st_gid\n user = pwd.getpwuid(uid)[0]\n group = grp.getgrgid(gid)[0]\n\n perms = {}\n mask = []\n for who,who_is in ((\"usr\", user), (\"grp\", group), (\"oth\", None)):\n perms[who] = {\"is\" : who_is, \"perms\" : {}}\n for what in \"r\", \"w\", \"x\":\n has_perm = bool(mode & getattr(stat,\"S_I\"+what.upper()+who.upper()))\n perms[who][\"perms\"][what] = has_perm\n mask.append(what.lower() if has_perm else \"-\")\n mask = \"\".join(mask)\n perms[\"mask\"] = mask\n return (user, group, mask, perms)", "def __create_new_permission(self, codename, **kwargs) -> None:\n permission = Permission(codename=codename, **kwargs)\n permission.save()", "def create_permission( # pylint: disable=inconsistent-return-statements\n self,\n share_permission: IO,\n timeout: Optional[int] = None,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> None:", "def open_and_force_mkdir(path, mode):\n force_mkdir(os.path.dirname(path))\n return open(path, mode)", "def make_executable(p):\n st = os.stat(p)\n os.chmod(p, st.st_mode | 0o111)", "def set_object_permissions(self, bucket_name, object_name, mode):\n\n return h3lib.set_object_permissions(self._handle, bucket_name, object_name, mode, self._user_id)", "def chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def file_mode_checker(\n filename: str,\n mode: int = 0o600,\n quiet: bool = False,\n create: bool = False\n):\n try:\n st_mode = os.stat(filename).st_mode\n except OSError: # file does not exist\n if not create:\n raise\n os.close(os.open(filename, os.O_CREAT | os.O_EXCL, mode))\n return\n\n warn_str = 'File {0} had {1:o} mode; converted to {2:o} mode.'\n if stat.S_ISREG(st_mode) and (st_mode - stat.S_IFREG != mode):\n os.chmod(filename, mode)\n # re-read and check changes\n if os.stat(filename).st_mode != st_mode and not quiet:\n warn(warn_str.format(filename, st_mode - stat.S_IFREG, mode))", "def format_filemode(filemode):\n filetype = get_filetype(filemode)\n permissions = add_bits_info(get_perms(filemode), filemode)\n return filetype + permissions", "def test_modePreservation(self):\n open(self.path, \"w\").close()\n os.chmod(self.path, 0o707)\n mode = os.stat(self.path)[stat.ST_MODE]\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n log.rotate()\n self.assertEqual(mode, os.stat(self.path)[stat.ST_MODE])", "def create_permission(permission, event):\n setDefaultRoles(permission.title, ('Manager',))" ]
[ "0.6227134", "0.6162068", "0.57799256", "0.57596844", "0.5685234", "0.5639629", "0.5500084", "0.54408145", "0.53968257", "0.53407454", "0.53266263", "0.5314504", "0.5312102", "0.52856654", "0.52711236", "0.5270964", "0.5242508", "0.5240414", "0.519975", "0.519469", "0.5187937", "0.5181617", "0.51756865", "0.51624334", "0.51276827", "0.5106919", "0.50831896", "0.507481", "0.50720817", "0.5071475" ]
0.71357036
0
Calls chmod(fspath) and chown(fspath) after creating an intermediate ChownChmod instance.
def chown_chmod ( fspath, uid=None, gid=None, mode=None, pretend=False ): return ChownChmod ( uid, gid, mode, pretend ).chown_chmod ( fspath )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chown_chmod ( self, fspath ):\n # should be renamed to chmod_chown()\n return (\n self.chmod ( fspath ),\n self.chown ( fspath )\n )", "def chown_dir ( self, fspath ):\n return", "def chmod_chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )", "def chmod_dir ( self, fspath ):\n return", "def chown_file ( self, fspath ):\n return", "def chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def chmod ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def chmod_file ( self, fspath ):\n return", "def chmod_chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )", "def Chown(self):\n cmd = 'chmod -R 0775 %s' % self.procdir\n self.ExecCmd(cmd)", "def chmod_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def _setup_permissions(self, chown, chmod):\n if chown is not None:\n if isinstance(chown, str):\n user, group = chown, None\n\n else:\n try:\n # Try to extract tuple.\n user, group = chown\n\n except ValueError:\n # If length of iterable is not 2, then allow 1.\n assert len(chown) == 1, 'chown must be user or tuple'\n user, group = chown[0], None\n\n except TypeError:\n # If not iterable, use given value as user.\n user, group = chown, None\n\n # Lookup user id.\n if isinstance(user, str):\n user_info = pwd.getpwnam(user)\n user = user_info.pw_uid\n\n # Lookup group id, or use -1 (do not change group)\n if isinstance(group, str):\n group = grp.getgrnam(group).pw_gid\n\n elif group is None:\n group = -1\n\n # Return tuple usable by os.chown().\n chown = (user, group)\n\n # Ensure chmod is numeric if given.\n if chmod is not None:\n assert isinstance(chmod, numbers.Number), 'chmod must be a number'\n\n return chown, chmod", "def update_chmod(self):\n pass", "def _set_rw_permissions_for_all(self, nms, path):\n nms.appliance.execute('chmod ugo+rw %s' % path)", "def chgrp_perms( path, group='climatew', permissions=None ):\n global std_file_perms, std_dir_perms\n if group is None:\n _group = -1 # means don't change the group\n elif not isinstance(group, int):\n _group = grp.getgrnam(group)[2]\n if permissions is None:\n if os.path.isdir(path):\n permissions = std_file_perms\n else:\n permissions = std_dir_perms\n os.chown( path, -1, _group )\n os.chmod( path, permissions )", "def chmod(self, path, mode):\n str_mode = (\"%o\" % mode)[-4:]\n if str_mode not in [\"0755\", \"0644\"]:\n raise FuseOSError(errno.EINVAL)\n\n result = super(CurrentView, self).chmod(path, mode)\n\n if os.path.isdir(self.repo._full_path(path)):\n return result\n\n message = \"Chmod to {} on {}\".format(str_mode, path)\n self._stage(add=path, message=message)\n\n log.debug(\"CurrentView: Change %s mode to %s\", path, (\"0%o\" % mode)[-4:])\n return result", "def file_perms( fname, permissions, remote=None ):\n if remote == None:\n if perms.i_own( fname ):\n if type(permissions) == type(''):\n perms.apply_chmod( fname, permissions )\n else:\n # assume 'permissions' is a tuple or list\n perms.apply_chmod( fname, *permissions )\n else:\n if remote.x_i_own( fname ):\n if type(permissions) == type(''):\n remote.x_apply_chmod( fname, permissions )\n else:\n # assume 'permissions' is a tuple or list\n remote.x_apply_chmod( fname, *permissions )", "def chmod(self, path, mod):\n self._call(\"SETPERMISSION\", method=\"put\", path=path, permission=mod)", "def testChAttrs(self):\n def _check(results):\n self.flushLoggedErrors()\n self.assertTrue(results[0].startswith(b'-rw-r--r--'))\n self.assertEqual(results[1], b'')\n self.assertTrue(results[2].startswith(b'----------'), results[2])\n self.assertEqual(results[3], b'')\n\n d = self.runScript('ls -l testfile1', 'chmod 0 testfile1',\n 'ls -l testfile1', 'chmod 644 testfile1')\n return d.addCallback(_check)\n # XXX test chgrp/own", "def set_file_permissions(host, fqpath, perms):\n command = \"chmod %s %s\" % (perms, fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('chmod failed: %s' % rerr)\n return False", "def _set_chmod(self, chmod=None):\n if not chmod:\n chmod = self.chmod\n if os.chmod(self.path, self.chmod):\n self.chmod = chmod\n return True\n return False", "def chown(self, path, owner=None, group=None):\n kwargs = {}\n if owner is not None:\n kwargs[\"owner\"] = owner\n if group is not None:\n kwargs[\"group\"] = group\n self._call(\"SETOWNER\", method=\"put\", path=path, **kwargs)", "def _make_writeable(filename):\n import stat\n if sys.platform.startswith('java'):\n # On Jython there is no os.access()\n return\n if not os.access(filename, os.W_OK):\n st = os.stat(filename)\n new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR\n os.chmod(filename, new_permissions)", "def chmodRecursive(self, p, mod):\n for root, dirs, fnames in p:\n for f in fnames:\n try:\n os.chmod(os.path.join(root, f), mod)\n except Exception, inst:\n logging.error('Unable to set permissions %s for file %s, error: %s' % (mod, f, inst))", "def set_file_owner_perm(path, permission, user, group):\n uid = pwd.getpwnam(user).pw_uid\n gid = grp.getgrnam(group).gr_gid\n\n current_perm = get_permissions(path)\n try:\n logger.debug('Current permission: {0}, changing to {1}'.format(current_perm, oct(permission)))\n os.chmod(path, permission)\n os.chown(path, uid, gid)\n except Exception as e:\n logger.warning('Unable to change permissions on {0}: {1}'.format(path, e))", "def chmod_plus_w(path):\r\n path_mode = os.stat(path).st_mode\r\n path_mode &= int('777', 8)\r\n path_mode |= stat.S_IWRITE\r\n os.chmod(path, path_mode)", "def _make_writeable(filename):\n if not os.access(filename, os.W_OK):\n st = os.stat(filename)\n new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR\n os.chmod(filename, new_permissions)", "def chmod(self, mode, rec=0):\n if not isinstance(mode, int):\n raise TypeError(f\"mode {mode!r} must be an integer\")\n if rec:\n for x in self.visit(rec=rec):\n error.checked_call(os.chmod, str(x), mode)\n error.checked_call(os.chmod, self.strpath, mode)", "def change_permissions(path, permission='777'):\r\n if os.path.exists(path):\r\n subprocess.call('chmod -R %s %s'%(permission,path),shell=True)\r\n else:\r\n raise NameError('invalid path %s'% path)" ]
[ "0.81457466", "0.7512426", "0.75111514", "0.7507602", "0.7474258", "0.7366395", "0.734391", "0.7337309", "0.7254689", "0.7141006", "0.70009094", "0.68603367", "0.68487173", "0.68058246", "0.66050404", "0.65938056", "0.6492462", "0.6466497", "0.63773435", "0.6361034", "0.63574886", "0.63001686", "0.62370056", "0.6183611", "0.61630887", "0.61500967", "0.61165035", "0.611366", "0.6099981", "0.6089158" ]
0.805532
1
Similar to "touch ".
def do_touch ( self, fspath ): return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def touch(name):\r\n with file(name, 'a'):\r\n os.utime(name, None)", "def touch(path):\n fd = open(path, 'a')\n fd.close()", "def touch_file(file_name):\n os.utime(file_name, None)", "def _touch(path):\n open(path, \"w\").close()", "def touch(path):\n if not os.path.isfile(path):\n with open(path, 'a'):\n os.utime(path, None)", "def touch(file_path: str) -> None:\n try:\n os.utime(file_path, None)\n except Exception:\n open(file_path, 'a').close()", "def touch_p(filepath, times=None, mkdir=True):\n return touch(filepath=filepath, times=times, mkdir=mkdir)", "def touch(path):\n with open(path, 'wt') as f:\n pass", "def touch(path: str) -> None:\n Stat.forget(path)\n os.utime(path)", "def touch(filename):\n try:\n if os.path.exists(filename):\n os.utime(filename, None)\n else:\n open(filename, \"w\").close()\n except IOError as e:\n if e.errno != 13:\n raise\n else:\n return False\n except OSError as e:\n if e.errno != 13:\n raise\n else:\n return False\n return True", "def _touch(fname, times=None):\n with file(fname, 'a'):\n os.utime(fname, times)", "def touch(path, mtime, test=False):\n if test: return\n os.utime(path, (mtime, mtime))", "def touch(file_name):\n open(file_name, 'a').close()", "def touch(*paths):\n\n for path in paths:\n if os.path.exists(path):\n os.utime(path, None)\n else:\n open(path, 'a').close()\n LOG.debug('touch {!r}'.format(path))", "def touch(self, filename):\n call(['touch', os.path.join(SAMPLE_PROJECT, filename)])", "def touch(filepath, times=None, mkdir=False):\n filepath = expand_path(filepath)\n if mkdir:\n mkdir_p(os.path.dirname(filepath))\n with open(filepath, 'a'):\n if times or times is None:\n os.utime(filepath, times)\n return filepath", "def touch(fname, times=None):\n with open(fname, 'a'):\n os.utime(fname, times)", "def _touch_file(self, fname):\n if os.path.exists(fname):\n os.utime(fname, None)\n else:\n open(fname, 'a').close()", "def touch(path, mtime, test=False):\n if test: return\n os.utime(path, (mtime, mtime), follow_symlinks=False)", "def touch(op):\n if not os.path.exists(op):\n if is_verbose():\n print(\"Creating nonexistent file '%s'.\" % (op))\n fd = open(op, \"w\")\n fd.close()\n elif not os.path.isfile(op):\n raise RuntimeError(\"'%s' exists but is not a normal file\" % (op))", "def touch(path, times=None):\n if os.path.isdir(path):\n os.utime(path, times)\n else:\n with open(path, \"ab\"):\n os.utime(path, times)", "def Touch(path, makedirs=False):\n if makedirs:\n SafeMakedirs(os.path.dirname(path))\n\n # Create the file if nonexistant.\n open(path, 'a').close()\n # Update timestamp to right now.\n os.utime(path, None)", "def touch(path):\n open(path, 'wb').close()", "def touch(file, times=None):\r\n if times:\r\n if len(times) > 2:\r\n raise ValueError('times must either be a tuple of (atime, mtime) or else a single time value '\r\n 'to use for both.')\r\n\r\n if len(times) == 1:\r\n times = (times, times)\r\n\r\n with safe_open(file, 'a'):\r\n os.utime(file, times)", "def touch_file(name):\n if not os.path.exists(name):\n print(f'File \\'{name}\\' not found. Creating it.')\n with open(name, 'w'):\n pass\n return False\n return True", "def touch(self, node):\n pass", "def touch(path, atime=None, mtime=None):\n assert ((atime is None) == (mtime is None)), 'atime and mtime are exclusive'\n if atime is None:\n times = None\n else:\n times = (atime, mtime)\n with open(path, 'ab+'):\n # Note: there is a race condition here.\n os.utime(path, times=times)", "def _touch_file(self, file_id):\n if file_id in self.touch_list:\n self.touch_list.remove(file_id)\n self.touch_list.append(file_id)", "def __touch_file(self, filename):\n with open(filename, \"w\") as fd:\n fd.write(\"\")", "def touch(self, dst, label=None):\r\n self.write('', dst, label, mode='a')" ]
[ "0.72858804", "0.7187371", "0.71445245", "0.7061986", "0.703371", "0.7024376", "0.6991309", "0.6952427", "0.6919515", "0.6912042", "0.6906273", "0.69005775", "0.6891966", "0.6804933", "0.6791455", "0.6778573", "0.6766807", "0.672045", "0.66853195", "0.6679039", "0.6630036", "0.650876", "0.64875203", "0.64329195", "0.641999", "0.63944983", "0.6374483", "0.62377155", "0.62247366", "0.61726123" ]
0.7773865
0
Calls chown_dir(fspath) or chown_file(fspath), depending on whether fspath is a directory or not.
def chown ( self, fspath ): if os.path.isdir ( fspath ): return self.chown_dir ( fspath ) else: return self.chown_file ( fspath )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chown_dir ( self, fspath ):\n return", "def chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def chown_file ( self, fspath ):\n return", "def chmod_chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )", "def chown_chmod ( self, fspath ):\n # should be renamed to chmod_chown()\n return (\n self.chmod ( fspath ),\n self.chown ( fspath )\n )", "def chown_chmod ( fspath, uid=None, gid=None, mode=None, pretend=False ):\n return ChownChmod ( uid, gid, mode, pretend ).chown_chmod ( fspath )", "def chmod_chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )", "def chmod_dir ( self, fspath ):\n return", "def chown_recursive(path, username):\n user = getpwnam(username)\n uid, gid = user.pw_uid, user.pw_gid\n chown(path, uid, gid)\n for _, _, files in walk(path):\n for f in files:\n try:\n chown(f, uid, gid)\n except OSError: # Files are yanked from under our feet\n pass", "def chown(self, path, owner=None, group=None):\n kwargs = {}\n if owner is not None:\n kwargs[\"owner\"] = owner\n if group is not None:\n kwargs[\"group\"] = group\n self._call(\"SETOWNER\", method=\"put\", path=path, **kwargs)", "def check_writable ( self,\n fspath, mkdir_chown=False, mkdir_chmod=False, mkdir_p=True\n ):\n success = False\n\n ERRNOS_IGNORE = { errno.EACCES, }\n\n try:\n if self.do_touch ( fspath ):\n success = True\n\n except IOError as ioerr:\n if ioerr.errno == errno.EPERM:\n pass\n elif ioerr.errno == errno.ENOENT:\n try:\n if self.dodir (\n os.path.dirname ( fspath ),\n chown=mkdir_chown, chmod=mkdir_chmod, mkdir_p=mkdir_p\n ) and self.do_touch ( fspath ):\n success = True\n\n except ( OSError, IOError ) as err:\n if err.errno == errno.EPERM:\n pass\n elif err.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = err.__class__.__name__,\n code = err.errno,\n code_name = errno.errorcode [err.errno],\n )\n )\n else:\n raise\n # -- end <try again>\n elif ioerr.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = ioerr.__class__.__name__,\n code = ioerr.errno,\n code_name = errno.errorcode [ioerr.errno],\n )\n )\n else:\n raise\n return success", "def chown(config):\n\n path = config.device_path()\n\n args = [\"sudo\", \"chown\", \"%d:%d\" % (os.getuid(), os.getgid()), path]\n iotests.log(\" \".join(args), filters=[iotests.filter_chown])\n proc = subprocess.Popen(args,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n msg = proc.communicate()[0]\n\n if proc.returncode != 0:\n raise Exception(msg)", "def chmod ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def set_file_owner(host, fqpath, user):\n command = \"chown %s %s\" % (user, fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('chown failed: %s' % rerr)\n return False", "def chmod_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def Chown(self):\n cmd = 'chmod -R 0775 %s' % self.procdir\n self.ExecCmd(cmd)", "def chgrp_perms( path, group='climatew', permissions=None ):\n global std_file_perms, std_dir_perms\n if group is None:\n _group = -1 # means don't change the group\n elif not isinstance(group, int):\n _group = grp.getgrnam(group)[2]\n if permissions is None:\n if os.path.isdir(path):\n permissions = std_file_perms\n else:\n permissions = std_dir_perms\n os.chown( path, -1, _group )\n os.chmod( path, permissions )", "def dodir ( self, dirpath, mkdir_p=True, chown=True, chmod=True ):\n\n if self._dodir ( dirpath, mkdir_p=mkdir_p ):\n if chmod:\n self.chmod_dir ( dirpath )\n if chown:\n self.chown_dir ( dirpath )\n\n return True\n else:\n return False", "def chown(self, user, group, rec=0):\n uid = getuserid(user)\n gid = getgroupid(group)\n if rec:\n for x in self.visit(rec=lambda x: x.check(link=0)):\n if x.check(link=0):\n error.checked_call(os.chown, str(x), uid, gid)\n error.checked_call(os.chown, str(self), uid, gid)", "def _setup_permissions(self, chown, chmod):\n if chown is not None:\n if isinstance(chown, str):\n user, group = chown, None\n\n else:\n try:\n # Try to extract tuple.\n user, group = chown\n\n except ValueError:\n # If length of iterable is not 2, then allow 1.\n assert len(chown) == 1, 'chown must be user or tuple'\n user, group = chown[0], None\n\n except TypeError:\n # If not iterable, use given value as user.\n user, group = chown, None\n\n # Lookup user id.\n if isinstance(user, str):\n user_info = pwd.getpwnam(user)\n user = user_info.pw_uid\n\n # Lookup group id, or use -1 (do not change group)\n if isinstance(group, str):\n group = grp.getgrnam(group).pw_gid\n\n elif group is None:\n group = -1\n\n # Return tuple usable by os.chown().\n chown = (user, group)\n\n # Ensure chmod is numeric if given.\n if chmod is not None:\n assert isinstance(chmod, numbers.Number), 'chmod must be a number'\n\n return chown, chmod", "def chown_file(filename, file_owner, sudo=True):\n LOG.info(\"Changing the user that owns {}\".format(filename))\n cmd = \"chown {} {}\".format(file_owner, filename)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)", "def changeOwn():\n os.system('sudo chown -R test:users /etc/resolv.conf')\n os.system('sudo chown -R test:named /etc/named.conf')", "def chmod_file ( self, fspath ):\n return", "def chown_mode(self) -> Optional[pulumi.Input[Union[str, 'ChownMode']]]:\n return pulumi.get(self, \"chown_mode\")", "def testChAttrs(self):\n def _check(results):\n self.flushLoggedErrors()\n self.assertTrue(results[0].startswith(b'-rw-r--r--'))\n self.assertEqual(results[1], b'')\n self.assertTrue(results[2].startswith(b'----------'), results[2])\n self.assertEqual(results[3], b'')\n\n d = self.runScript('ls -l testfile1', 'chmod 0 testfile1',\n 'ls -l testfile1', 'chmod 644 testfile1')\n return d.addCallback(_check)\n # XXX test chgrp/own", "def chdir(self, path):\n # temporarily join the specified directory to see if we have\n # permissions to do so\n basedir = os.getcwd()\n try:\n os.chdir(path)\n except os.error:\n raise\n else:\n os.chdir(basedir)\n self.cwd = self.fs2ftp(path)", "def makedirs(path, mode=None, uid=None, gid=None):\n try:\n mode = mode or 0o755\n os.makedirs(path, mode)\n except OSError as exc:\n if exc.errno == 17:\n pass\n else:\n raise\n uid = uid if uid is not None else -1\n gid = gid if gid is not None else -1\n os.chown(path, uid, gid)", "def adjust_permission_base_dir(base_dir, destination):\n\n if destination==\"tegner-login-1\":\n #Change group and set permissions for PDC Stockholm\n user_group = DATA_USER_PDC + \":\" + DATA_GROUP_PDC\n \n subprocess.Popen( [\"chown\", \"-R\", user_group, base_dir],\n stdout=subprocess.PIPE )\n \n\n subprocess.Popen( [\"setfacl\", \"-R\", \"-M\", \"/cfs/klemming/projects/xenon/misc/basic\", base_dir],\n stdout=subprocess.PIPE )", "def test_component_chown_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component chown component2 changed_owner')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def _set_rw_permissions_for_all(self, nms, path):\n nms.appliance.execute('chmod ugo+rw %s' % path)" ]
[ "0.8948717", "0.84335893", "0.84035283", "0.770454", "0.7691118", "0.74401116", "0.7235852", "0.6874833", "0.6546262", "0.6537897", "0.6497793", "0.6452218", "0.63476413", "0.6302093", "0.6228736", "0.6176845", "0.61583006", "0.60229385", "0.5985563", "0.59134465", "0.5885709", "0.5855531", "0.5847197", "0.582242", "0.5771268", "0.5738549", "0.5618006", "0.5602222", "0.5593151", "0.5483183" ]
0.8772791
1
Similar to chown(fspath), but checks the given mode in order to decide whether fspath is a dir.
def chown_stat ( self, fspath, mode ): if stat.S_ISDIR ( mode ): return self.chown_dir ( fspath ) else: return self.chown_file ( fspath )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chmod_chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )", "def chmod_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def chown_dir ( self, fspath ):\n return", "def chown_chmod ( fspath, uid=None, gid=None, mode=None, pretend=False ):\n return ChownChmod ( uid, gid, mode, pretend ).chown_chmod ( fspath )", "def dodir ( self, dirpath, mkdir_p=True, chown=True, chmod=True ):\n\n if self._dodir ( dirpath, mkdir_p=mkdir_p ):\n if chmod:\n self.chmod_dir ( dirpath )\n if chown:\n self.chown_dir ( dirpath )\n\n return True\n else:\n return False", "def chmod_dir ( self, fspath ):\n return", "def chmod_chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )", "def set_isdir(self):\n self.st_mode |= stat.S_IFDIR", "def check_writable ( self,\n fspath, mkdir_chown=False, mkdir_chmod=False, mkdir_p=True\n ):\n success = False\n\n ERRNOS_IGNORE = { errno.EACCES, }\n\n try:\n if self.do_touch ( fspath ):\n success = True\n\n except IOError as ioerr:\n if ioerr.errno == errno.EPERM:\n pass\n elif ioerr.errno == errno.ENOENT:\n try:\n if self.dodir (\n os.path.dirname ( fspath ),\n chown=mkdir_chown, chmod=mkdir_chmod, mkdir_p=mkdir_p\n ) and self.do_touch ( fspath ):\n success = True\n\n except ( OSError, IOError ) as err:\n if err.errno == errno.EPERM:\n pass\n elif err.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = err.__class__.__name__,\n code = err.errno,\n code_name = errno.errorcode [err.errno],\n )\n )\n else:\n raise\n # -- end <try again>\n elif ioerr.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = ioerr.__class__.__name__,\n code = ioerr.errno,\n code_name = errno.errorcode [ioerr.errno],\n )\n )\n else:\n raise\n return success", "def chown_chmod ( self, fspath ):\n # should be renamed to chmod_chown()\n return (\n self.chmod ( fspath ),\n self.chown ( fspath )\n )", "def is_dir(self, path: PathLike):", "def test_file(path, mode, exception=RuntimeError, isdir=False):\n what = (\"directory\" if isdir else \"file\")\n if not os.access(path, os.F_OK):\n raise exception(\"Cannot access %s '%s'.\" % (what, path))\n if isdir and not os.path.isdir(path):\n raise exception(\n \"Expected '%s' to be a directory, but it's not.\" % path)\n if (mode & os.R_OK) and not os.access(path, os.R_OK):\n raise exception(\"Cannot read %s '%s'.\" % (what, path))\n if (mode & os.W_OK) and not os.access(path, os.W_OK):\n raise exception(\"Cannot write to %s '%s'.\" % (what, path))\n if (mode & os.X_OK) and not os.access(path, os.X_OK):\n if isdir:\n raise exception(\"Cannot traverse directory '%s':\"\n \" lacks 'x' permission.\" % path)\n else:\n raise exception(\"File '%s' lacks execute ('x') permission.\" % path)\n return True", "def isdir(path: str) -> bool:\n result = Stat._result(path, throw=False)\n return not isinstance(result, BaseException) and S_ISDIR(result.st_mode)", "def ensuredir(dpath, mode=0o1777):\n if isinstance(dpath, (list, tuple)): # nocover\n dpath = join(*dpath)\n if not exists(dpath):\n try:\n os.makedirs(normpath(dpath), mode=mode)\n except OSError: # nocover\n raise\n return dpath", "def is_fs_dir(pathname: Union[str, os.PathLike]) -> bool:\n return os.path.isdir(pathname)", "def chown_mode(self) -> Optional[pulumi.Input[Union[str, 'ChownMode']]]:\n return pulumi.get(self, \"chown_mode\")", "def chmod ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def chmod(self, path, mode):\n str_mode = (\"%o\" % mode)[-4:]\n if str_mode not in [\"0755\", \"0644\"]:\n raise FuseOSError(errno.EINVAL)\n\n result = super(CurrentView, self).chmod(path, mode)\n\n if os.path.isdir(self.repo._full_path(path)):\n return result\n\n message = \"Chmod to {} on {}\".format(str_mode, path)\n self._stage(add=path, message=message)\n\n log.debug(\"CurrentView: Change %s mode to %s\", path, (\"0%o\" % mode)[-4:])\n return result", "def chown_file ( self, fspath ):\n return", "def is_dir(self, path):", "def ensure_directory(self, name, dest, mode=0777):\n self.m.path.assert_absolute(dest)\n self._run(\n name, ['ensure-directory', '--mode', oct(mode), dest])\n self.m.path.mock_add_paths(dest)", "def validateDirectory(dir, mode=0755, noExceptionRaise=False):\n\n if os.path.isdir(dir):\n if os.access(dir, 7): return 1\n else: return None\n else:\n try:\n os.makedirs(dir, mode)\n os.chmod(dir, mode)\n except:\n if noExceptionRaise: pass\n else: raise\n return 1", "def makedirs(path, mode=None, uid=None, gid=None):\n try:\n mode = mode or 0o755\n os.makedirs(path, mode)\n except OSError as exc:\n if exc.errno == 17:\n pass\n else:\n raise\n uid = uid if uid is not None else -1\n gid = gid if gid is not None else -1\n os.chown(path, uid, gid)", "def is_dir(argstr):\n arg = Path(argstr)\n return arg.exists() and arg.is_dir()", "def mkdir_safe(path, mode=0o777):\n try:\n os.makedirs(path, mode=mode)\n return True\n except OSError as err:\n # If dir already exists, no problem. Otherwise raise\n if err.errno == errno.EEXIST and os.path.isdir(path):\n return False\n else:\n raise", "def _isdir(dirname):\n if sys.platform[:3] == 'win' and dirname[:2] == r'\\\\':\n if os.path.exists(dirname):\n return os.path.isdir(dirname)\n try:\n os.listdir(dirname)\n except WindowsError:\n return 0\n else:\n return os.path.ismount(dirname)\n else:\n return os.path.isdir(dirname)", "def is_dir(path):\n try:\n return os.stat(path)[0] & 61440 == 16384\n except OSError as e:\n if e.args[0] == 2:\n return False\n else:\n raise e", "def check_dir_perms(path, dir_perm=stat.S_IWOTH, file_perm=stat.S_IWOTH, users=('root',), groups=('root',), recurse=True):\n directories = ((path, (), ()),) if not recurse else os.walk(path)\n for dir_name, sub_dirs, files in directories:\n attrib = os.stat(dir_name)\n if attrib.st_uid not in [pwd.getpwnam(user).pw_uid for user in users]:\n err_msg = 'Directory: \"{0}\" is owned by {1} which is not in the list of allowed users: \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, pwd.getpwuid(attrib.st_uid).pw_name, users))\n\n if attrib.st_gid not in [grp.getgrnam(group).gr_gid for group in groups]:\n err_msg = 'The group for directory: \"{0}\" is {1} which is not in the list of allowed groups: \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, grp.getgrgid(attrib.st_gid).gr_name, groups))\n\n if check_permission(attrib.st_mode, dir_perm):\n # Could add strmode for python one day and make nice human errors\n err_msg = 'The permissions on directory: \"{0}\" are \"{1!s}\" and violate restriction \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, oct(attrib.st_mode), oct(dir_perm)))\n\n for f in files:\n file_attrib = os.stat(os.path.join(dir_name, f))\n if check_permission(file_attrib.st_mode, file_perm):\n # Could add strmode for python one day and make nice human errors\n err_msg = 'The permissions on file: \"{0}\" are \"{1!s}\" and violate restriction \"{2!s}\"'\n raise PermError(err_msg.format(os.path.join(dir_name, f), oct(file_attrib.st_mode), oct(file_perm)))", "def SafeMakedirs(path, mode=0o775, sudo=False):\n if sudo:\n if os.path.isdir(path):\n return False\n cros_build_lib.SudoRunCommand(\n ['mkdir', '-p', '--mode', oct(mode), path], print_cmd=False,\n redirect_stderr=True, redirect_stdout=True)\n return True\n\n try:\n os.makedirs(path, mode)\n return True\n except EnvironmentError as e:\n if e.errno != errno.EEXIST or not os.path.isdir(path):\n raise\n\n return False" ]
[ "0.74052227", "0.72158754", "0.6905035", "0.6810875", "0.67720324", "0.6705877", "0.64470017", "0.6443986", "0.6299984", "0.6216668", "0.6183951", "0.61443883", "0.6120498", "0.61022836", "0.6096778", "0.6015092", "0.60054046", "0.60038394", "0.5932468", "0.5914445", "0.5905771", "0.59037775", "0.5903506", "0.5899561", "0.5843854", "0.57954353", "0.57857186", "0.57845986", "0.57649064", "0.5764237" ]
0.807419
0
Changes the owner of a directory.
def chown_dir ( self, fspath ): return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chown(self, path, owner=None, group=None):\n kwargs = {}\n if owner is not None:\n kwargs[\"owner\"] = owner\n if group is not None:\n kwargs[\"group\"] = group\n self._call(\"SETOWNER\", method=\"put\", path=path, **kwargs)", "def chown_file(filename, file_owner, sudo=True):\n LOG.info(\"Changing the user that owns {}\".format(filename))\n cmd = \"chown {} {}\".format(file_owner, filename)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)", "def changeDirectory( self, directory ):\n if directory[0] == '/':\n directory = directory.lstrip( '/' )\n self.cwd = '%s/%s' % ( self.cwd, directory )", "def chdir(self, directory):\n self.eval(\"cd('{0}')\".format(directory))", "def set_file_owner_perm(path, permission, user, group):\n uid = pwd.getpwnam(user).pw_uid\n gid = grp.getgrnam(group).gr_gid\n\n current_perm = get_permissions(path)\n try:\n logger.debug('Current permission: {0}, changing to {1}'.format(current_perm, oct(permission)))\n os.chmod(path, permission)\n os.chown(path, uid, gid)\n except Exception as e:\n logger.warning('Unable to change permissions on {0}: {1}'.format(path, e))", "def create_directory(self, directory: str, owner: str) -> None:\n self.run(\"/\", \"root\", [\"install\", \"-d\", \"-o\", owner, directory])", "def changeOwn():\n os.system('sudo chown -R test:users /etc/resolv.conf')\n os.system('sudo chown -R test:named /etc/named.conf')", "def set_ownership(self):\n\n os.chmod(os.path.join(\"%s\" % NetworkManager_conf_dir, self.connection._id), 0600)", "def ChangeDir(self, path: str) -> None:\n ...", "def changeDirectory(self, directory):\n self._cwd = directory", "def chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def set_owner(self, owner):\n self.settings[\"owner\"] = owner", "def set_file_owner(host, fqpath, user):\n command = \"chown %s %s\" % (user, fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('chown failed: %s' % rerr)\n return False", "def chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def change_ownership(obj, userid):\n assert isinstance(userid, string_types)\n old_owner = obj.creators[0]\n if userid == old_owner:\n return\n #Remove Owner group from old owner\n obj.local_roles.remove(old_owner, ROLE_OWNER)\n #Add new owner\n obj.local_roles.add(userid, ROLE_OWNER)\n #Set new owner in creators attr - this will also trigger reindex catalog event so keep it last!\n obj.set_field_appstruct({'creators': (userid,)})\n return userid", "def set_owner(self, owner):\n self.__owner = owner", "def adjust_permission_base_dir(base_dir, destination):\n\n if destination==\"tegner-login-1\":\n #Change group and set permissions for PDC Stockholm\n user_group = DATA_USER_PDC + \":\" + DATA_GROUP_PDC\n \n subprocess.Popen( [\"chown\", \"-R\", user_group, base_dir],\n stdout=subprocess.PIPE )\n \n\n subprocess.Popen( [\"setfacl\", \"-R\", \"-M\", \"/cfs/klemming/projects/xenon/misc/basic\", base_dir],\n stdout=subprocess.PIPE )", "def chown_file ( self, fspath ):\n return", "def changeOwnership(self, document):\n document.changeOwnership(getSecurityManager().getUser(), False)", "def mkdir_for(dir, owner=\"root:root\"):\n return sudo(\"mkdir -p %s && sudo chown -R %s %s\" % (dir, owner, dir))", "def chown(config):\n\n path = config.device_path()\n\n args = [\"sudo\", \"chown\", \"%d:%d\" % (os.getuid(), os.getgid()), path]\n iotests.log(\" \".join(args), filters=[iotests.filter_chown])\n proc = subprocess.Popen(args,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n msg = proc.communicate()[0]\n\n if proc.returncode != 0:\n raise Exception(msg)", "def transferOwnership(_newOwner: address):\n assert msg.sender == self.owner, \"Access is denied.\"\n assert _newOwner != ZERO_ADDRESS, \"Invalid owner supplied.\"\n\n log.OwnershipTransferred(msg.sender, _newOwner)\n self.owner = _newOwner", "def set_object_owner(self, bucket_name, object_name, uid, gid):\n\n return h3lib.set_object_owner(self._handle, bucket_name, object_name, uid, gid, self._user_id)", "def chown_chmod ( fspath, uid=None, gid=None, mode=None, pretend=False ):\n return ChownChmod ( uid, gid, mode, pretend ).chown_chmod ( fspath )", "def change_dir(filename):", "def change_dir(self, src: str = None, dest: str = None):\n\n if not is_empty(src):\n self._srcDir = src\n\n if not is_empty(dest):\n self._destDir = dest", "def test_owner_after_setuid(file, modules, tmp_path, state_file_account):\n\n # Desired configuration.\n desired_file = tmp_path / \"file_with_setuid\"\n mode = \"4750\"\n\n # Run the state.\n ret = file.managed(\n name=str(desired_file),\n user=state_file_account.username,\n group=state_file_account.group.name,\n mode=mode,\n )\n assert ret.result is True\n # Check result.\n user_check = modules.file.get_user(str(desired_file))\n assert user_check == state_file_account.username\n group_check = modules.file.get_group(str(desired_file))\n assert group_check == state_file_account.group.name\n mode_check = modules.file.get_mode(str(desired_file))\n assert salt.utils.files.normalize_mode(mode_check) == mode", "def set_directory(self, directory):\n\t\tself.edit.set_text(directory)", "def changeDirectory(self, directory):\n self.pushMode(CLI_MODES.shell)\n output = self.sendCmd(\"cd %s\" % directory)\n self.popMode()\n if \"No such file or directory\" in output:\n logger.error (\"No such file or directory exist : %s\" %directory)\n return output", "def chdir(where):\n from twill import commands\n \n cwd = os.getcwd()\n _dirstack.append(cwd)\n print(cwd)\n\n os.chdir(where)\n print('changed directory to \"%s\"' % (where,), file=commands.OUT)\n\n commands.setglobal('__dir__', where)" ]
[ "0.7278033", "0.62889427", "0.61836296", "0.6171118", "0.60855687", "0.6067038", "0.60461086", "0.6039696", "0.60394585", "0.60348403", "0.60155183", "0.59926784", "0.59820396", "0.5909075", "0.5907682", "0.5847199", "0.5833868", "0.58306533", "0.57877916", "0.5778257", "0.5686343", "0.56662035", "0.5637307", "0.5585992", "0.5565728", "0.5551696", "0.55464137", "0.550384", "0.54747057", "0.5468665" ]
0.6625209
1
Calls chmod_dir(fspath) or chmod_file(fspath), depending on whether fspath is a directory or not.
def chmod ( self, fspath ): if os.path.isdir ( fspath ): return self.chmod_dir ( fspath ) else: return self.chmod_file ( fspath )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chmod_dir ( self, fspath ):\n return", "def chmod_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def chmod_chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )", "def chmod_chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )", "def chmod_file ( self, fspath ):\n return", "def chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def chown_chmod ( self, fspath ):\n # should be renamed to chmod_chown()\n return (\n self.chmod ( fspath ),\n self.chown ( fspath )\n )", "def __shutil_fix(func, path, exc):\n # If the function is rmdir, remove or unlink and is an access error\n if func in (os.rmdir, os.remove, os.unlink) and exc[1].errno == errno.EACCES:\n # Set 777 as the permissions and call the function again\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n # Otherwise, just raise the exception again\n else:\n raise", "def chown_dir ( self, fspath ):\n return", "def check_writable ( self,\n fspath, mkdir_chown=False, mkdir_chmod=False, mkdir_p=True\n ):\n success = False\n\n ERRNOS_IGNORE = { errno.EACCES, }\n\n try:\n if self.do_touch ( fspath ):\n success = True\n\n except IOError as ioerr:\n if ioerr.errno == errno.EPERM:\n pass\n elif ioerr.errno == errno.ENOENT:\n try:\n if self.dodir (\n os.path.dirname ( fspath ),\n chown=mkdir_chown, chmod=mkdir_chmod, mkdir_p=mkdir_p\n ) and self.do_touch ( fspath ):\n success = True\n\n except ( OSError, IOError ) as err:\n if err.errno == errno.EPERM:\n pass\n elif err.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = err.__class__.__name__,\n code = err.errno,\n code_name = errno.errorcode [err.errno],\n )\n )\n else:\n raise\n # -- end <try again>\n elif ioerr.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = ioerr.__class__.__name__,\n code = ioerr.errno,\n code_name = errno.errorcode [ioerr.errno],\n )\n )\n else:\n raise\n return success", "def chown_chmod ( fspath, uid=None, gid=None, mode=None, pretend=False ):\n return ChownChmod ( uid, gid, mode, pretend ).chown_chmod ( fspath )", "def set_file_permissions(host, fqpath, perms):\n command = \"chmod %s %s\" % (perms, fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('chmod failed: %s' % rerr)\n return False", "def chmodRecursive(self, p, mod):\n for root, dirs, fnames in p:\n for f in fnames:\n try:\n os.chmod(os.path.join(root, f), mod)\n except Exception, inst:\n logging.error('Unable to set permissions %s for file %s, error: %s' % (mod, f, inst))", "def rchmod(d, mode=0755):\n os.chmod(d, mode)\n for root, dirs, files in os.walk(d):\n for item in dirs:\n os.chmod(path.join(root, item), mode)\n for item in files:\n os.chmod(path.join(root, item), mode)", "def check_dir_perms(path, dir_perm=stat.S_IWOTH, file_perm=stat.S_IWOTH, users=('root',), groups=('root',), recurse=True):\n directories = ((path, (), ()),) if not recurse else os.walk(path)\n for dir_name, sub_dirs, files in directories:\n attrib = os.stat(dir_name)\n if attrib.st_uid not in [pwd.getpwnam(user).pw_uid for user in users]:\n err_msg = 'Directory: \"{0}\" is owned by {1} which is not in the list of allowed users: \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, pwd.getpwuid(attrib.st_uid).pw_name, users))\n\n if attrib.st_gid not in [grp.getgrnam(group).gr_gid for group in groups]:\n err_msg = 'The group for directory: \"{0}\" is {1} which is not in the list of allowed groups: \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, grp.getgrgid(attrib.st_gid).gr_name, groups))\n\n if check_permission(attrib.st_mode, dir_perm):\n # Could add strmode for python one day and make nice human errors\n err_msg = 'The permissions on directory: \"{0}\" are \"{1!s}\" and violate restriction \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, oct(attrib.st_mode), oct(dir_perm)))\n\n for f in files:\n file_attrib = os.stat(os.path.join(dir_name, f))\n if check_permission(file_attrib.st_mode, file_perm):\n # Could add strmode for python one day and make nice human errors\n err_msg = 'The permissions on file: \"{0}\" are \"{1!s}\" and violate restriction \"{2!s}\"'\n raise PermError(err_msg.format(os.path.join(dir_name, f), oct(file_attrib.st_mode), oct(file_perm)))", "def chmod(path, mode):\n try:\n st = os.stat(path)\n except OSError:\n return None, None\n\n origMode = fMode = stat.S_IMODE(st.st_mode)\n if isinstance(mode, str):\n parts = [s.strip() for s in mode.split(\",\")]\n for s in parts:\n m = _rModePart.match(s)\n if not m:\n return origMode, -2\n\n role, op, flags = m.groups()\n\n bits = 0\n for f in flags:\n bits |= _bitMap[role+f]\n\n if op == \"+\":\n fMode |= bits\n elif op == \"-\":\n fMode &= ~bits\n else:\n fMode = (fMode & _bitMap[role]) | bits\n else:\n fMode = mode\n\n try:\n os.chmod(path, fMode)\n except OSError:\n return origMode, -1\n\n return origMode, 0", "def file_perms( fname, permissions, remote=None ):\n if remote == None:\n if perms.i_own( fname ):\n if type(permissions) == type(''):\n perms.apply_chmod( fname, permissions )\n else:\n # assume 'permissions' is a tuple or list\n perms.apply_chmod( fname, *permissions )\n else:\n if remote.x_i_own( fname ):\n if type(permissions) == type(''):\n remote.x_apply_chmod( fname, permissions )\n else:\n # assume 'permissions' is a tuple or list\n remote.x_apply_chmod( fname, *permissions )", "def change_permissions(path, permission='777'):\r\n if os.path.exists(path):\r\n subprocess.call('chmod -R %s %s'%(permission,path),shell=True)\r\n else:\r\n raise NameError('invalid path %s'% path)", "def dodir ( self, dirpath, mkdir_p=True, chown=True, chmod=True ):\n\n if self._dodir ( dirpath, mkdir_p=mkdir_p ):\n if chmod:\n self.chmod_dir ( dirpath )\n if chown:\n self.chown_dir ( dirpath )\n\n return True\n else:\n return False", "def chown_file ( self, fspath ):\n return", "def permissions_check(\n basedir='.',\n verbose_level=0,\n):\n # File permissions on Cygwin/Windows filesystems don't work the\n # same way as Linux. Don't try to change them.\n # TODO(dittrich): Is there a Better way to handle perms on Windows?\n fs_type = get_fs_type(basedir)\n if fs_type in ['NTFS', 'FAT', 'FAT32']:\n msg = (\n f\"[-] {basedir} has file system type '{fs_type}': \"\n \"skipping permissions check\"\n )\n logger.info(msg)\n return\n any_other_perms = stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH\n for root, dirs, files in os.walk(basedir, topdown=True):\n for name in files:\n path = os.path.join(root, name)\n try:\n st = os.stat(path)\n perms = st.st_mode & 0o777\n open_perms = (perms & any_other_perms) != 0\n if (open_perms and verbose_level >= 1):\n print(\n f\"[!] file '{path}' is mode {oct(perms)}\",\n file=sys.stderr\n )\n except OSError:\n pass\n for name in dirs:\n path = os.path.join(root, name)\n try:\n st = os.stat(path)\n perms = st.st_mode & 0o777\n open_perms = (perms & any_other_perms) != 0\n if (open_perms and verbose_level >= 1):\n print(\n (\n f\"[!] directory '{path}' is mode \"\n f\"{oct(perms)}\"\n ),\n file=sys.stderr\n )\n except OSError:\n pass", "def fix_permissions(self, directory_permissions=SSH_DIR_PERMS, file_permissions=SSH_FILE_PERMS):\n ssh_dir = os.path.expanduser('~/.ssh')\n dperm = int(directory_permissions, 8)\n fperm = int(file_permissions, 8)\n\n if not os.path.isdir(ssh_dir):\n self.log.debug('No such directory: {}'.format(ssh_dir))\n return\n\n for (root, _dirs, files) in os.walk(ssh_dir):\n if stat.S_IMODE(os.stat(root).st_mode) != dperm:\n self.log.debug('Fixing permissions for directory {}'.format(root))\n os.chmod(root, dperm)\n\n for f in [os.path.join(root, f) for f in files]:\n if stat.S_IMODE(os.stat(f).st_mode) != fperm:\n self.log.debug('Fixing permissions for file {}'.format(f))\n os.chmod(f, fperm)", "def _set_chmod(self, chmod=None):\n if not chmod:\n chmod = self.chmod\n if os.chmod(self.path, self.chmod):\n self.chmod = chmod\n return True\n return False", "def onerror(func, path, exc_info):\n import stat\n\n if not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise", "def onerror(func, path, exc_info):\n import stat\n if not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise", "def onerror(func, path, exc_info):\n import stat\n if not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise", "def on_error(func, path, exc_info):\n import stat\n if not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise", "def fix_py37_win_tempdir_permissions(dirpath: Union[str, Path]) -> None:\n if not WINDOWS or sys.version_info >= (3, 8):\n return\n for root, dirs, files in os.walk(dirpath):\n for name in dirs + files:\n path = os.path.join(root, name)\n try:\n os.chflags(path, 0) # type: ignore[attr-defined]\n except AttributeError:\n pass\n os.chmod(path, 0o700)", "def sort_permissions(fl):\n\n if oct(os.stat(fl).st_mode)[4:] != '666':\n os.chmod(fl, 0o666)" ]
[ "0.8261613", "0.80634874", "0.7444107", "0.74340725", "0.72950804", "0.68876374", "0.6578346", "0.65769285", "0.65414566", "0.64435726", "0.6389558", "0.63817346", "0.63671714", "0.63211554", "0.6107828", "0.60001636", "0.59810525", "0.5977845", "0.58882743", "0.58740795", "0.5813945", "0.5756249", "0.57510877", "0.57306725", "0.5696493", "0.56943786", "0.56943786", "0.56696635", "0.56579614", "0.5631619" ]
0.83173656
0
Similar to chmod(fspath), but checks the given mode in order to decide whether fspath is a dir.
def chmod_stat ( self, fspath, mode ): if stat.S_ISDIR ( mode ): return self.chmod_dir ( fspath ) else: return self.chmod_file ( fspath )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chmod_dir ( self, fspath ):\n return", "def chmod_chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )", "def chmod ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def is_dir(self, path: PathLike):", "def test_file(path, mode, exception=RuntimeError, isdir=False):\n what = (\"directory\" if isdir else \"file\")\n if not os.access(path, os.F_OK):\n raise exception(\"Cannot access %s '%s'.\" % (what, path))\n if isdir and not os.path.isdir(path):\n raise exception(\n \"Expected '%s' to be a directory, but it's not.\" % path)\n if (mode & os.R_OK) and not os.access(path, os.R_OK):\n raise exception(\"Cannot read %s '%s'.\" % (what, path))\n if (mode & os.W_OK) and not os.access(path, os.W_OK):\n raise exception(\"Cannot write to %s '%s'.\" % (what, path))\n if (mode & os.X_OK) and not os.access(path, os.X_OK):\n if isdir:\n raise exception(\"Cannot traverse directory '%s':\"\n \" lacks 'x' permission.\" % path)\n else:\n raise exception(\"File '%s' lacks execute ('x') permission.\" % path)\n return True", "def chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def isdir(path: str) -> bool:\n result = Stat._result(path, throw=False)\n return not isinstance(result, BaseException) and S_ISDIR(result.st_mode)", "def is_dir(argstr):\n arg = Path(argstr)\n return arg.exists() and arg.is_dir()", "def dodir ( self, dirpath, mkdir_p=True, chown=True, chmod=True ):\n\n if self._dodir ( dirpath, mkdir_p=mkdir_p ):\n if chmod:\n self.chmod_dir ( dirpath )\n if chown:\n self.chown_dir ( dirpath )\n\n return True\n else:\n return False", "def is_dir(self, path):", "def is_dir(path: str) -> bool:\n return _fs().is_dir(path)", "def is_fs_dir(pathname: Union[str, os.PathLike]) -> bool:\n return os.path.isdir(pathname)", "def chmod(path, mode):\n try:\n st = os.stat(path)\n except OSError:\n return None, None\n\n origMode = fMode = stat.S_IMODE(st.st_mode)\n if isinstance(mode, str):\n parts = [s.strip() for s in mode.split(\",\")]\n for s in parts:\n m = _rModePart.match(s)\n if not m:\n return origMode, -2\n\n role, op, flags = m.groups()\n\n bits = 0\n for f in flags:\n bits |= _bitMap[role+f]\n\n if op == \"+\":\n fMode |= bits\n elif op == \"-\":\n fMode &= ~bits\n else:\n fMode = (fMode & _bitMap[role]) | bits\n else:\n fMode = mode\n\n try:\n os.chmod(path, fMode)\n except OSError:\n return origMode, -1\n\n return origMode, 0", "def ensuredir(dpath, mode=0o1777):\n if isinstance(dpath, (list, tuple)): # nocover\n dpath = join(*dpath)\n if not exists(dpath):\n try:\n os.makedirs(normpath(dpath), mode=mode)\n except OSError: # nocover\n raise\n return dpath", "def isdir(path):\n if not os.path.isdir(path):\n if os.path.isfile(path):\n msg = \"{0} is a file.\".format(path)\n else:\n msg = \"{0} does not exist.\".format(path)\n raise argparse.ArgumentTypeError(msg)\n return path", "def isdir(path):\n if not os.path.isdir(path):\n if os.path.isfile(path):\n msg = \"{0} is a file.\".format(path)\n else:\n msg = \"{0} does not exist.\".format(path)\n raise argparse.ArgumentTypeError(msg)\n return path", "def check_dir_perms(path, dir_perm=stat.S_IWOTH, file_perm=stat.S_IWOTH, users=('root',), groups=('root',), recurse=True):\n directories = ((path, (), ()),) if not recurse else os.walk(path)\n for dir_name, sub_dirs, files in directories:\n attrib = os.stat(dir_name)\n if attrib.st_uid not in [pwd.getpwnam(user).pw_uid for user in users]:\n err_msg = 'Directory: \"{0}\" is owned by {1} which is not in the list of allowed users: \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, pwd.getpwuid(attrib.st_uid).pw_name, users))\n\n if attrib.st_gid not in [grp.getgrnam(group).gr_gid for group in groups]:\n err_msg = 'The group for directory: \"{0}\" is {1} which is not in the list of allowed groups: \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, grp.getgrgid(attrib.st_gid).gr_name, groups))\n\n if check_permission(attrib.st_mode, dir_perm):\n # Could add strmode for python one day and make nice human errors\n err_msg = 'The permissions on directory: \"{0}\" are \"{1!s}\" and violate restriction \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, oct(attrib.st_mode), oct(dir_perm)))\n\n for f in files:\n file_attrib = os.stat(os.path.join(dir_name, f))\n if check_permission(file_attrib.st_mode, file_perm):\n # Could add strmode for python one day and make nice human errors\n err_msg = 'The permissions on file: \"{0}\" are \"{1!s}\" and violate restriction \"{2!s}\"'\n raise PermError(err_msg.format(os.path.join(dir_name, f), oct(file_attrib.st_mode), oct(file_perm)))", "def is_dir(path):\n try:\n return os.stat(path)[0] & 61440 == 16384\n except OSError as e:\n if e.args[0] == 2:\n return False\n else:\n raise e", "def isdir(path):\n system = get_instance(path)\n\n # User may use directory path without trailing '/'\n # like on standard file systems\n return system.isdir(system.ensure_dir_path(path))", "def is_dir(path):\n if not os.path.isdir(path):\n msg = \"{0} is not a directory\".format(path)\n raise argparse.ArgumentTypeError(msg)\n logging.info(msg)\n else:\n return path", "def _is_dir(path: str)->bool:\n if _is_s3(path):\n return path.endswith(\"/\")\n else:\n return os.path.isdir(os.path.abspath(path))", "def validateDirectory(dir, mode=0755, noExceptionRaise=False):\n\n if os.path.isdir(dir):\n if os.access(dir, 7): return 1\n else: return None\n else:\n try:\n os.makedirs(dir, mode)\n os.chmod(dir, mode)\n except:\n if noExceptionRaise: pass\n else: raise\n return 1", "def is_dir(path):\n if not os.path.isdir(path):\n msg = '{0} is not a directory'.format(path)\n raise argparse.ArgumentTypeError(msg)\n else:\n return path", "def isdir (self, path):\r\n pass", "def is_dir(filename):\n return os.path.isdir(filename)", "def set_isdir(self):\n self.st_mode |= stat.S_IFDIR", "def __is_dir(path):\n if path[-2:] == \"..\":\n return False\n try:\n os.listdir(path)\n return True\n except OSError:\n return False", "def check_path(path, isfile=False, isdir=False):\n \n return os.path.isfile(path) if isfile else os.path.isdir(path)", "def chmod(self, path, mode):\n str_mode = (\"%o\" % mode)[-4:]\n if str_mode not in [\"0755\", \"0644\"]:\n raise FuseOSError(errno.EINVAL)\n\n result = super(CurrentView, self).chmod(path, mode)\n\n if os.path.isdir(self.repo._full_path(path)):\n return result\n\n message = \"Chmod to {} on {}\".format(str_mode, path)\n self._stage(add=path, message=message)\n\n log.debug(\"CurrentView: Change %s mode to %s\", path, (\"0%o\" % mode)[-4:])\n return result", "def __isdir(p):\n p_part = osp.split(osp.abspath(p))[1]\n if p_part[0] == '.':\n return False\n return osp.isdir(p)" ]
[ "0.69052184", "0.6780001", "0.6638909", "0.6612683", "0.65377444", "0.6522304", "0.6480368", "0.6379503", "0.63446534", "0.627109", "0.6259275", "0.6235954", "0.6234129", "0.618081", "0.6163878", "0.6163878", "0.6055755", "0.5984406", "0.5971258", "0.5969658", "0.595803", "0.59454805", "0.59241396", "0.58954716", "0.5893838", "0.5883766", "0.57574886", "0.57557654", "0.57553685", "0.57528615" ]
0.7568901
0
Similar to chmod_chown(), but checks mode in order to decide whether fspath is a dir.
def chmod_chown_stat ( self, fspath, mode ): if stat.S_ISDIR ( mode ): return ( self.chmod_dir ( fspath ), self.chown_dir ( fspath ) ) else: return ( self.chmod_file ( fspath ), self.chown_file ( fspath ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chown_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def chmod_chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return (\n self.chmod_dir ( fspath ), self.chown_dir ( fspath )\n )\n else:\n return (\n self.chmod_file ( fspath ), self.chown_file ( fspath )\n )", "def chmod_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def chmod_dir ( self, fspath ):\n return", "def chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def chown_chmod ( fspath, uid=None, gid=None, mode=None, pretend=False ):\n return ChownChmod ( uid, gid, mode, pretend ).chown_chmod ( fspath )", "def check_writable ( self,\n fspath, mkdir_chown=False, mkdir_chmod=False, mkdir_p=True\n ):\n success = False\n\n ERRNOS_IGNORE = { errno.EACCES, }\n\n try:\n if self.do_touch ( fspath ):\n success = True\n\n except IOError as ioerr:\n if ioerr.errno == errno.EPERM:\n pass\n elif ioerr.errno == errno.ENOENT:\n try:\n if self.dodir (\n os.path.dirname ( fspath ),\n chown=mkdir_chown, chmod=mkdir_chmod, mkdir_p=mkdir_p\n ) and self.do_touch ( fspath ):\n success = True\n\n except ( OSError, IOError ) as err:\n if err.errno == errno.EPERM:\n pass\n elif err.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = err.__class__.__name__,\n code = err.errno,\n code_name = errno.errorcode [err.errno],\n )\n )\n else:\n raise\n # -- end <try again>\n elif ioerr.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = ioerr.__class__.__name__,\n code = ioerr.errno,\n code_name = errno.errorcode [ioerr.errno],\n )\n )\n else:\n raise\n return success", "def dodir ( self, dirpath, mkdir_p=True, chown=True, chmod=True ):\n\n if self._dodir ( dirpath, mkdir_p=mkdir_p ):\n if chmod:\n self.chmod_dir ( dirpath )\n if chown:\n self.chown_dir ( dirpath )\n\n return True\n else:\n return False", "def chown_dir ( self, fspath ):\n return", "def chmod ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def is_dir(self, path: PathLike):", "def check_dir_perms(path, dir_perm=stat.S_IWOTH, file_perm=stat.S_IWOTH, users=('root',), groups=('root',), recurse=True):\n directories = ((path, (), ()),) if not recurse else os.walk(path)\n for dir_name, sub_dirs, files in directories:\n attrib = os.stat(dir_name)\n if attrib.st_uid not in [pwd.getpwnam(user).pw_uid for user in users]:\n err_msg = 'Directory: \"{0}\" is owned by {1} which is not in the list of allowed users: \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, pwd.getpwuid(attrib.st_uid).pw_name, users))\n\n if attrib.st_gid not in [grp.getgrnam(group).gr_gid for group in groups]:\n err_msg = 'The group for directory: \"{0}\" is {1} which is not in the list of allowed groups: \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, grp.getgrgid(attrib.st_gid).gr_name, groups))\n\n if check_permission(attrib.st_mode, dir_perm):\n # Could add strmode for python one day and make nice human errors\n err_msg = 'The permissions on directory: \"{0}\" are \"{1!s}\" and violate restriction \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, oct(attrib.st_mode), oct(dir_perm)))\n\n for f in files:\n file_attrib = os.stat(os.path.join(dir_name, f))\n if check_permission(file_attrib.st_mode, file_perm):\n # Could add strmode for python one day and make nice human errors\n err_msg = 'The permissions on file: \"{0}\" are \"{1!s}\" and violate restriction \"{2!s}\"'\n raise PermError(err_msg.format(os.path.join(dir_name, f), oct(file_attrib.st_mode), oct(file_perm)))", "def chown_chmod ( self, fspath ):\n # should be renamed to chmod_chown()\n return (\n self.chmod ( fspath ),\n self.chown ( fspath )\n )", "def is_dir(self, path):", "def ensuredir(dpath, mode=0o1777):\n if isinstance(dpath, (list, tuple)): # nocover\n dpath = join(*dpath)\n if not exists(dpath):\n try:\n os.makedirs(normpath(dpath), mode=mode)\n except OSError: # nocover\n raise\n return dpath", "def isdir(path: str) -> bool:\n result = Stat._result(path, throw=False)\n return not isinstance(result, BaseException) and S_ISDIR(result.st_mode)", "def makedirs(path, mode=None, uid=None, gid=None):\n try:\n mode = mode or 0o755\n os.makedirs(path, mode)\n except OSError as exc:\n if exc.errno == 17:\n pass\n else:\n raise\n uid = uid if uid is not None else -1\n gid = gid if gid is not None else -1\n os.chown(path, uid, gid)", "def check_sane(self):\n st = os.stat(self.path)\n if st.st_uid != os.getuid():\n raise Exception('Auth dir %s not owned by user %d.' % (\n self.path, os.getuid()))\n # Mode 16832 is equal to (stat.S_IFDIR | stat.S_IRWXU)\n # In other words, a directory with mode bits rwx------\n if st.st_mode != 16832:\n raise Exception('Auth dir %s not a dir or wrong permissions.' % self.path)", "def is_fs_dir(pathname: Union[str, os.PathLike]) -> bool:\n return os.path.isdir(pathname)", "def is_dir(path: str) -> bool:\n return _fs().is_dir(path)", "def isdir (self, path):\r\n pass", "def SafeMakedirs(path, mode=0o775, sudo=False):\n if sudo:\n if os.path.isdir(path):\n return False\n cros_build_lib.SudoRunCommand(\n ['mkdir', '-p', '--mode', oct(mode), path], print_cmd=False,\n redirect_stderr=True, redirect_stdout=True)\n return True\n\n try:\n os.makedirs(path, mode)\n return True\n except EnvironmentError as e:\n if e.errno != errno.EEXIST or not os.path.isdir(path):\n raise\n\n return False", "def isdir(path):\n if not os.path.isdir(path):\n if os.path.isfile(path):\n msg = \"{0} is a file.\".format(path)\n else:\n msg = \"{0} does not exist.\".format(path)\n raise argparse.ArgumentTypeError(msg)\n return path", "def isdir(path):\n if not os.path.isdir(path):\n if os.path.isfile(path):\n msg = \"{0} is a file.\".format(path)\n else:\n msg = \"{0} does not exist.\".format(path)\n raise argparse.ArgumentTypeError(msg)\n return path", "def is_dir(argstr):\n arg = Path(argstr)\n return arg.exists() and arg.is_dir()", "def validateDirectory(dir, mode=0755, noExceptionRaise=False):\n\n if os.path.isdir(dir):\n if os.access(dir, 7): return 1\n else: return None\n else:\n try:\n os.makedirs(dir, mode)\n os.chmod(dir, mode)\n except:\n if noExceptionRaise: pass\n else: raise\n return 1", "def hisdir(file_path: str) -> bool:\n return os.path.isdir(file_path)", "def isdir(path):\n system = get_instance(path)\n\n # User may use directory path without trailing '/'\n # like on standard file systems\n return system.isdir(system.ensure_dir_path(path))", "def set_isdir(self):\n self.st_mode |= stat.S_IFDIR", "def permissions_check(\n basedir='.',\n verbose_level=0,\n):\n # File permissions on Cygwin/Windows filesystems don't work the\n # same way as Linux. Don't try to change them.\n # TODO(dittrich): Is there a Better way to handle perms on Windows?\n fs_type = get_fs_type(basedir)\n if fs_type in ['NTFS', 'FAT', 'FAT32']:\n msg = (\n f\"[-] {basedir} has file system type '{fs_type}': \"\n \"skipping permissions check\"\n )\n logger.info(msg)\n return\n any_other_perms = stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH\n for root, dirs, files in os.walk(basedir, topdown=True):\n for name in files:\n path = os.path.join(root, name)\n try:\n st = os.stat(path)\n perms = st.st_mode & 0o777\n open_perms = (perms & any_other_perms) != 0\n if (open_perms and verbose_level >= 1):\n print(\n f\"[!] file '{path}' is mode {oct(perms)}\",\n file=sys.stderr\n )\n except OSError:\n pass\n for name in dirs:\n path = os.path.join(root, name)\n try:\n st = os.stat(path)\n perms = st.st_mode & 0o777\n open_perms = (perms & any_other_perms) != 0\n if (open_perms and verbose_level >= 1):\n print(\n (\n f\"[!] directory '{path}' is mode \"\n f\"{oct(perms)}\"\n ),\n file=sys.stderr\n )\n except OSError:\n pass" ]
[ "0.76696944", "0.7318846", "0.7116121", "0.70041734", "0.70023066", "0.67857933", "0.6681127", "0.665088", "0.65759367", "0.6541759", "0.6481282", "0.6316214", "0.6304739", "0.62244767", "0.60552204", "0.60432374", "0.597863", "0.5969846", "0.5941203", "0.59405357", "0.5939309", "0.5938363", "0.59023464", "0.59023464", "0.5899241", "0.58707535", "0.58656347", "0.5860757", "0.5847294", "0.583364" ]
0.76531845
1
Copies a file from source to dest.
def _copy_file ( self, source, dest ): return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copyFile( src, dest ):\n\tinFile = open( src, 'r' )\n\toutFile = open( dest, 'w' )\n\tfor line in inFile:\n\t\toutFile.write( line )\n\toutFile.close()\n\tinFile.close()", "def copyFile(srcPath, destPath):\n shutil.copy(srcPath, destPath)", "def copy_file(src, dest):\n with open_local_or_gcs(src, 'r') as h_src:\n with open_local_or_gcs(dest, 'w') as h_dest:\n shutil.copyfileobj(h_src, h_dest)", "def copy_file(source_file_name, dest_file_name):\n print(\"Copying \" + source_file_name + \" to \" + dest_file_name)\n shutil.copy2(source_file_name, dest_file_name)\n print(\"Copying done.\")", "def copy_file(file: str, dest: str) -> None:\n\tuux.show_debug(\"Copying \" + str(file) + \" => \" + str(dest))\n\tshutil.copy2(file, dest)", "def copy_file(file, destination):\n with open(file, 'rb') as infile, open(destination, 'wb') as outfile:\n outfile.write(infile.read())", "def copyFile(src, dest):\n try:\n shutil.copy(src,dest)\n except shutil.Error as e:\n print(\"Error: \" + str(e))\n except IOError as e:\n print(\"Error: \" + e.strerror)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def _copy_file(src, dest):\n\n if src is None or dest is None:\n raise ValueError(\"src and dest must not be None\", src, dest)\n\n if not os.path.isfile(src):\n raise ValueError(\"src file does not appear to exist\", src)\n\n # if error on copy, subprocess will raise CalledProcessError\n try:\n subprocess.run(\n [\"/usr/bin/ditto\", src, dest], check=True, stderr=subprocess.PIPE\n )\n except subprocess.CalledProcessError as e:\n logging.critical(\n f\"ditto returned error: {e.returncode} {e.stderr.decode(sys.getfilesystemencoding()).rstrip()}\"\n )\n raise e", "def copy_file(source_file, target_file):\n\t# print('\\n\\nCopying [{}] to [{}].\\n\\n'.format(source_file, target_file))\n\trun_rsync([source_file, target_file])", "def copyfile(source, dest, buffer_size=1024*1024):\n if not hasattr(source, 'read'):\n source = open(source, 'rb')\n if not hasattr(dest, 'write'):\n dest = open(dest, 'wb')\n while 1:\n copy_buffer = source.read(buffer_size)\n if copy_buffer:\n dest.write(copy_buffer)\n else:\n break\n source.close()\n dest.close()\n return True", "def copy_single_file(src, dest, log):\n if op.exists(src) and op.isfile(src):\n shellutil.copy(src, dest, log=log)\n return True", "def copyfile(source, dest, newname=None):\n\n if not os.path.exists(source):\n #print 'no such file %s' %source\n return False\n shutil.copy(source, newname)\n dest = os.path.join(dest, newname)\n if os.path.exists(dest):\n os.remove(dest)\n shutil.move(newname, dest)\n return True", "def copy_file(fs, inpath, outpath):\n fs.copy(inpath, outpath)", "def copy(self, src_path: str, tgt_path: str) -> None:", "def copyFile(filename, sourceDir, targetDir, renameTo=None, silent=True):\n\tif renameTo == None: renameTo = filename\n\tfullname_source = os.path.join(sourceDir, filename)\n\tfullname_target = os.path.join(targetDir, renameTo)\n\tshutil.copy(fullname_source, fullname_target)\n\tif silent==False:\n\t\tprint(\"File \"+fullname_source+\" copied to \"+source_dir)", "def _copyFile(self, source, dstDir):\n dstFile = os.path.join(dstDir, os.path.basename(source))\n touch = \"/usr/bin/touch\" if OSUtilities.isMacOS() else \"/bin/touch\"\n subprocess.call([touch, dstFile])\n subprocess.call([\"/bin/cp\", source, dstDir])\n self._logger.info(\"Copying file \" + source + \" to \" + dstDir)\n self._numCopiedFiles += 1", "def act_copy_file(self, file_source, file_target):\n try:\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.copy2(file_source, file_target)\n self.logger.debug('%s: Action: <copy> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file copy: %s -> %s', file_source, file_target)", "def file_copy_from_local(self, path, dest):\n if not j.sal.fs.exists(path):\n raise j.exceptions.Base(\"{} doesn't exist on local file system\".format(path))\n\n with open(path, \"rb\") as f:\n self.file_write(dest, f, append=False, create=True)\n return", "def copyfile(self, destination, **kwargs):\n assert _os.path.isfile(self.__str__()) == True\n _shutil.copyfile(self.__str__(), destination, **kwargs)", "def cp(src, dest):\n _shutil.copy2(native(src), native(dest))", "def file_copy(self, path, dest, override=False):\n # first check if path exists on the file system\n if self.exists(dest) and not override:\n return\n if j.sal.fs.exists(path):\n return self.file_copy_from_local(path, dest)\n else:\n return self.file_copy_form_bcdbfs(path, dest)", "def copyFile(source, target):\n\tfrom shutil import copyfile, copystat, copymode\n\tfrom os.path import split\n\tsource = adaptPath(source)\n\ttarget = adaptPath(target)\n\tif int(getFileModifTime(source)) != int(getFileModifTime(target)):\n\t\tmakedir(split(target)[0])\n\t\tcopyfile(source, target)\n\t\tcopystat(source, target)\n\t\tcopymode(source, target)\n\t#~ else:\n\t\t#~ print (\"%s not copied\"%(target))", "def copy_file(source, destination):\n\n try:\n shutil.copy(source, destination)\n except (OSError, IOError):\n return False\n else:\n return True", "def copy_file(src_file,dst_folder):\n from shutil import copyfile\n from os.path import split\n copyfile(src_file, dst_folder+split(src_file)[1])\n return", "def copy_file(self, dst, tmpdir=None):\n if tmpdir is None:\n tmpfn = sameDir\n else:\n tmpfn = lambda _: tmpdir._path\n assert isinstance(dst, Path)\n with open(self._path, 'rb') as src_fd:\n with safeopen(dst._path, 'wb', useDir=tmpfn) as dst_fd:\n copyfileobj(src_fd, dst_fd)", "def file_copy(\n self,\n src: str,\n dest: Optional[str] = None,\n file_system: Optional[str] = None,\n peer: Optional[bool] = False,\n ) -> None:\n if dest is None:\n dest = os.path.basename(src)\n\n if file_system is None:\n file_system = self._get_file_system()\n\n # netmiko's enable_scp\n self.enable_scp()\n self._file_copy(src, dest, file_system)\n if peer:\n self.peer_device._file_copy(src, dest, file_system) # pylint: disable=protected-access\n\n # logging removed because it messes up unit test mock_basename.assert_not_called()\n # for tests test_file_copy_no_peer_pass_args, test_file_copy_include_peer\n # log.info(\"Host %s: File %s transferred successfully.\")", "def __copyfile(source, destination):\n logger.info(\"copyfile: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.copy(source, destination)\n return True\n except Exception as e:\n logger.error(\n \"copyfile: %s -> %s failed! Error: %s\", source, destination, e\n )\n return False" ]
[ "0.8451881", "0.8364238", "0.83361626", "0.82699907", "0.8111486", "0.79564", "0.78758574", "0.7702679", "0.7702679", "0.7702679", "0.7593211", "0.75324965", "0.74742824", "0.7454095", "0.744795", "0.73798084", "0.73748046", "0.73684984", "0.7365559", "0.73600304", "0.735375", "0.73521227", "0.73436517", "0.7305033", "0.7299017", "0.7286231", "0.7276195", "0.72761077", "0.72724354", "0.7173522" ]
0.84352726
1
Copies a file from source to dest and calls chmod(),chown() afterwards.
def copy_file ( self, source, dest, chown=True, chmod=True ): if self._copy_file ( source, dest ): if chmod: self.chmod_file ( dest ) if chown: self.chown_file ( dest ) return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _copy_file ( self, source, dest ):\n return", "def copyFileWithPermissions(source_path, dest_path, dist_dir):\n\n if os.path.islink(source_path) and not isWin32Windows():\n link_source_abs = os.path.abspath(source_path)\n link_target_abs = os.path.abspath(\n os.path.join(os.path.dirname(source_path), os.readlink(source_path))\n )\n\n link_target_rel = relpath(link_target_abs, os.path.dirname(link_source_abs))\n\n if isFilenameBelowPath(\n path=dist_dir,\n filename=os.path.join(os.path.dirname(dest_path), link_target_rel),\n ):\n os.symlink(link_target_rel, dest_path)\n return\n\n try:\n shutil.copy2(\n source_path,\n dest_path,\n )\n except PermissionError as e:\n if e.errno != errno.EACCES:\n raise\n\n source_mode = os.stat(source_path).st_mode\n shutil.copy(source_path, dest_path)\n os.chmod(dest_path, source_mode)", "def copy_file(src, dest):\n logger.debug(\"Copying %s to %s\", src, dest)\n try:\n shutil.copy(src, dest)\n except (OSError, IOError) as exc:\n logger.debug('Installation error, trying sudo.')\n try:\n check_call(['sudo', 'cp', src, dest])\n except HelperError:\n # That failed too - re-raise the original exception\n raise exc\n return True", "def copyFile(source, target):\n\tfrom shutil import copyfile, copystat, copymode\n\tfrom os.path import split\n\tsource = adaptPath(source)\n\ttarget = adaptPath(target)\n\tif int(getFileModifTime(source)) != int(getFileModifTime(target)):\n\t\tmakedir(split(target)[0])\n\t\tcopyfile(source, target)\n\t\tcopystat(source, target)\n\t\tcopymode(source, target)\n\t#~ else:\n\t\t#~ print (\"%s not copied\"%(target))", "def copyFile(source_path, dest_path):\n\n while 1:\n try:\n shutil.copyfile(source_path, dest_path)\n except PermissionError as e:\n if e.errno != errno.EACCES:\n raise\n\n general.warning(\"Problem copying file %s:\" % e)\n\n if (\n queryUser(\n \"Retry?\",\n choices=(\"yes\", \"no\"),\n default=\"yes\",\n default_non_interactive=\"no\",\n )\n == \"yes\"\n ):\n continue\n\n raise\n\n break", "def copyFile(src, dest):\n try:\n shutil.copy(src,dest)\n except shutil.Error as e:\n print(\"Error: \" + str(e))\n except IOError as e:\n print(\"Error: \" + e.strerror)", "def copy_file(source_file, target_file):\n\t# print('\\n\\nCopying [{}] to [{}].\\n\\n'.format(source_file, target_file))\n\trun_rsync([source_file, target_file])", "def copy_file(source_file, dest_file, sudo=True, preserve=True, cleanup=None):\n LOG.info(\"Copy file {} preserve attributes\".format('and' if preserve\n else 'without'))\n preserve_str = '--preserve=all ' if preserve else ''\n cmd = \"cp {} {}{}\".format(source_file, preserve_str, dest_file)\n _exec_cmd(cmd, sudo=sudo, fail_ok=False)\n\n if cleanup:\n file_path = source_file if cleanup == 'source' else dest_file\n files_to_delete.append(file_path)", "def act_copy_file(self, file_source, file_target):\n try:\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.copy2(file_source, file_target)\n self.logger.debug('%s: Action: <copy> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file copy: %s -> %s', file_source, file_target)", "def copymode(src, dest):\n import shutil\n\n shutil.copymode(src, dest)", "def _copy_file(src, dest):\n\n if src is None or dest is None:\n raise ValueError(\"src and dest must not be None\", src, dest)\n\n if not os.path.isfile(src):\n raise ValueError(\"src file does not appear to exist\", src)\n\n # if error on copy, subprocess will raise CalledProcessError\n try:\n subprocess.run(\n [\"/usr/bin/ditto\", src, dest], check=True, stderr=subprocess.PIPE\n )\n except subprocess.CalledProcessError as e:\n logging.critical(\n f\"ditto returned error: {e.returncode} {e.stderr.decode(sys.getfilesystemencoding()).rstrip()}\"\n )\n raise e", "def __copyfile(source, destination):\n logger.info(\"copyfile: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.copy(source, destination)\n return True\n except Exception as e:\n logger.error(\n \"copyfile: %s -> %s failed! Error: %s\", source, destination, e\n )\n return False", "def _copyFile(self, source, dstDir):\n dstFile = os.path.join(dstDir, os.path.basename(source))\n touch = \"/usr/bin/touch\" if OSUtilities.isMacOS() else \"/bin/touch\"\n subprocess.call([touch, dstFile])\n subprocess.call([\"/bin/cp\", source, dstDir])\n self._logger.info(\"Copying file \" + source + \" to \" + dstDir)\n self._numCopiedFiles += 1", "def move_file(source_file, dest_file, sudo=True):\n LOG.info(\"Copy file and preserve attributes\")\n cmd = \"mv {} {}\".format(source_file, dest_file)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)", "def put_and_chown(localfile, remotefile, mode=\"0644\", owner=\"root\", group=\"root\", overwrite=True):\n # Configure the exists() check and chown differently depending on whether\n # we're copying over a file or a directory.\n with hide(\"everything\"), settings(warn_only=True):\n if isinstance(localfile, basestring) and \\\n local(\"test -d \"+localfile).succeeded:\n target = remotefile+\"/\"+os.path.basename(localfile)\n chown_cmd = \"chown -R\"\n else:\n target = remotefile\n chown_cmd = \"chown\"\n\n # Only copy things that are not already there\n if not exists(target) or overwrite:\n put(localfile, remotefile, use_sudo=True, mode=mode)\n sudo(chown_cmd+\" \"+owner+\":\"+group+\" \"+remotefile)", "def copy_file(source_file_name, dest_file_name):\n print(\"Copying \" + source_file_name + \" to \" + dest_file_name)\n shutil.copy2(source_file_name, dest_file_name)\n print(\"Copying done.\")", "def copyFile(srcPath, destPath):\n shutil.copy(srcPath, destPath)", "def copy_file(src, dest):\n with open_local_or_gcs(src, 'r') as h_src:\n with open_local_or_gcs(dest, 'w') as h_dest:\n shutil.copyfileobj(h_src, h_dest)", "def cp(src, dest):\n _shutil.copy2(native(src), native(dest))", "def copystat(src, dest):\n import shutil\n\n shutil.copystat(str(src), str(dest))", "def force_copy(src, dest):\r\n if os.path.isfile(dest):\r\n os.remove(dest)\r\n if os.path.isdir(dest):\r\n dest = os.path.join(dest, os.path.basename(src))\r\n shutil.copyfile(src, dest)\r\n return dest", "def copyfile(source, dest, newname=None):\n\n if not os.path.exists(source):\n #print 'no such file %s' %source\n return False\n shutil.copy(source, newname)\n dest = os.path.join(dest, newname)\n if os.path.exists(dest):\n os.remove(dest)\n shutil.move(newname, dest)\n return True", "def put(self, src, dst):\r\n abs_src = os.path.expanduser(src)\r\n assert os.path.exists(abs_src), 'File does not exist, cannot copy: %s' % abs_src\r\n return self._do_put(abs_src, dst)", "def copyFile( src, dest ):\n\tinFile = open( src, 'r' )\n\toutFile = open( dest, 'w' )\n\tfor line in inFile:\n\t\toutFile.write( line )\n\toutFile.close()\n\tinFile.close()", "def copy_file(source, destination):\n\n try:\n shutil.copy(source, destination)\n except (OSError, IOError):\n return False\n else:\n return True", "def copy(self, src_path: str, tgt_path: str) -> None:", "def file_copy(\n self,\n src: str,\n dest: Optional[str] = None,\n file_system: Optional[str] = None,\n peer: Optional[bool] = False,\n ) -> None:\n if dest is None:\n dest = os.path.basename(src)\n\n if file_system is None:\n file_system = self._get_file_system()\n\n # netmiko's enable_scp\n self.enable_scp()\n self._file_copy(src, dest, file_system)\n if peer:\n self.peer_device._file_copy(src, dest, file_system) # pylint: disable=protected-access\n\n # logging removed because it messes up unit test mock_basename.assert_not_called()\n # for tests test_file_copy_no_peer_pass_args, test_file_copy_include_peer\n # log.info(\"Host %s: File %s transferred successfully.\")", "def copy(source, target):\n\tshutil.copy(source, target)", "def copy_file(file: str, dest: str) -> None:\n\tuux.show_debug(\"Copying \" + str(file) + \" => \" + str(dest))\n\tshutil.copy2(file, dest)", "def copyFile(filename, sourceDir, targetDir, renameTo=None, silent=True):\n\tif renameTo == None: renameTo = filename\n\tfullname_source = os.path.join(sourceDir, filename)\n\tfullname_target = os.path.join(targetDir, renameTo)\n\tshutil.copy(fullname_source, fullname_target)\n\tif silent==False:\n\t\tprint(\"File \"+fullname_source+\" copied to \"+source_dir)" ]
[ "0.75960183", "0.75810474", "0.73564", "0.7140633", "0.70787334", "0.7058556", "0.70468634", "0.70138454", "0.69947135", "0.69778115", "0.69608563", "0.695502", "0.6950139", "0.69486654", "0.6940283", "0.69282556", "0.6926681", "0.69047016", "0.68762404", "0.68417186", "0.6830198", "0.6808796", "0.68080896", "0.68076724", "0.67613226", "0.6750782", "0.6721477", "0.67040646", "0.6701122", "0.66985536" ]
0.830945
0
Calls dodir(dir) for each dir in dirs.
def dodirs ( self, *dirs, **kwargs ): for dirpath in dirs: self.dodir ( dirpath, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _dodir ( self, dirpath, mkdir_p ):\n return", "def walk_dir(self, dir):\n if self.ppath_prefix_len:\n prefix = self.ppath_prefix[self.ppath_prefix_idx%self.ppath_prefix_len]\n self.ppath_prefix_idx += 1\n merged_path = os.path.join(prefix, dir)\n for root, dirs, files in self.fswalk_base(merged_path):\n yield merged_path, dirs, files\n else:\n yield self.fswalk_base(dir)", "def list_dir(\n dir_,\n):\n return map(\n lambda fi: path_join(dir_, fi),\n os.listdir(dir_),\n )", "def parse_dir(args, dirname, names):\n for name in names:\n path = os.path.join(dirname, name)\n\n if os.path.isfile(path):\n parse_file_from_directory(path, args)", "def _load_dirs(self):\n rootdirs = self._docset.get_compounds(xml.Directory,\n lambda x: x.get_parent() is None)\n for dirdoc in rootdirs:\n self._load_dir(dirdoc, None)", "def update_dirs(dirs):\n index = len(dirs) - 1\n for i, d in enumerate(reversed(dirs)):\n if d in dir_ignore:\n del dirs[index - i]", "def dirsIter(self):\n url = urlparse(self.baseurl)\n basepath = url2pathname(url.path)\n if self.tld is not None:\n yield self.tld, self.getTLDPathsTuple(basepath)\n for dir in self.dirs:\n yield dir, (basepath, dir)", "def process_directory(dir, exiftool_path):\n for path_object in pathlib.Path(dir).glob(\"**/*\"):\n if path_object.is_file():\n verbose(f\"Processing file {path_object}\")\n process_file(path_object, exiftool_path)\n elif path_object.is_dir():\n verbose(f\"Processing directory {path_object}\")\n process_directory(path_object, exiftool_path)", "def convert_dirs(base_dir, hdf_name, complib=None, complevel=0):\n print('Converting directories in {}'.format(base_dir))\n\n dirs = glob.glob(os.path.join(base_dir, '*'))\n dirs = {d for d in dirs if os.path.basename(d) in DIRECTORIES}\n if not dirs:\n raise RuntimeError('No direcotries found matching known data.')\n\n store = pd.HDFStore(\n hdf_name, mode='w', complevel=complevel, complib=complib)\n\n for dirpath in dirs:\n dirname = os.path.basename(dirpath)\n\n print(dirname)\n df = cache_to_df(dirpath)\n\n if dirname == 'travel_data':\n keys = ['from_zone_id', 'to_zone_id']\n elif dirname == 'annual_employment_control_totals':\n keys = ['sector_id', 'year', 'home_based_status']\n elif dirname == 'annual_job_relocation_rates':\n keys = ['sector_id']\n elif dirname == 'annual_household_control_totals':\n keys = ['year']\n elif dirname == 'annual_household_relocation_rates':\n keys = ['age_of_head_max', 'age_of_head_min',\n 'income_min', 'income_max']\n elif dirname == 'building_sqft_per_job':\n keys = ['zone_id', 'building_type_id']\n elif dirname == 'counties':\n keys = ['county_id']\n elif dirname == 'development_event_history':\n keys = ['building_id']\n elif dirname == 'target_vacancies':\n keys = ['building_type_id', 'year']\n else:\n keys = [dirname[:-1] + '_id']\n\n if dirname != 'annual_household_relocation_rates':\n df = df.set_index(keys)\n\n for colname in df.columns:\n if df[colname].dtype == np.float64:\n df[colname] = df[colname].astype(np.float32)\n elif df[colname].dtype == np.int64:\n df[colname] = df[colname].astype(np.int32)\n else:\n df[colname] = df[colname]\n\n df.info()\n print(os.linesep)\n store.put(dirname, df)\n\n store.close()", "def process_dir(pool, topdir):\n for root, dirs, files in os.walk(topdir):\n # Not really needed, but makes things consistent.\n dirs.sort()\n files.sort()\n\n for path in files:\n process_file(pool, os.path.join(root, path))", "def ls_dir(d):\n return [d for d in [os.path.join(d, f) for f in os.listdir(d)] if os.path.isdir(d)]", "def analyze_dir(self, dirname):\n if self.exceeded_max():\n return\n\n for (dirpath, dirnames, filenames) in os.walk(dir_name):\n for filename in filenames:\n self.analyze_file(dirname + \"/\" + filename)", "def update_dirs(self, dirs: dict):\n if self.dirs is None:\n self.dirs = AttrDict(**dirs)\n else:\n for key, val in dirs.items():\n self.dirs.update({key: val})", "def parse_dir_replace(args, dirname, names):\n for name in names:\n path = os.path.join(dirname, name)\n\n if os.path.isfile(path):\n parse_file_replace(path, args)", "def list_dirs(site_name, doctype=''):\n siteid = _get_site_id(site_name)\n if siteid is None:\n raise FileNotFoundError('no_site')\n\n ## probable inefficient approach\n ## for dir_id in dirids:\n ## docs = _get_docs_in_dir(dir_id)\n ## for docname in docs:\n ## docids = _get_doc_ids(dir_id, docname)\n ## if docids[1] is not None:\n ## diridlist.append(dir_id)\n querystring = 'select id, dirname from {} where site_id = %s;'\n result = execute_query(querystring.format(TABLES[2]), (siteid,))\n dirmap = {row['id']: row['dirname'] for row in result}\n dirids = [x for x in dirmap]\n if doctype in ('', 'src'):\n pass\n elif doctype == 'dest':\n querystring = 'select dir_id, target_docid from {} where dir_id = any(%s);'\n result = execute_query(querystring.format(TABLES[3]), (dirids,))\n dirids = set()\n for row in result:\n if row['target_docid'] is not None:\n dirids.add(row['dir_id'])\n else:\n raise RuntimeError('wrong doctype for list_dirs')\n dirlist = []\n for id in dirids:\n test = dirmap[id]\n if test != '/':\n dirlist.append(test)\n return dirlist # returns all dirs that have documents of the given type", "def add_distdirs ( self, distdirs ):\n def gen_repos():\n for d in distdirs:\n repo = BasicRepo (\n name=os.path.basename ( d ),\n directory=d,\n distroot=self.distroot\n )\n self.logger.debug ( 'New entry, ' + str ( repo ) )\n yield repo\n # --- end of gen_repos() ---\n self.repos.extend ( gen_repos() )", "def walk(dir, callback):\n\n dir = abspath(dir)\n for file in listdir(dir):\n nfile = join(dir, file)\n if isdir(nfile):\n walk(nfile, callback)\n else:\n callback(nfile)", "def process_dir(self, src_dir, dst_dir):\n self.logger.tree(src_dir)\n for srcpath in self.list_all_files(src_dir):\n dstpath = srcpath.replace(src_dir, dst_dir)\n # TODO: Can we clean up the way we handle relative_path?\n # Relative path is here so that when we print files in the log it\n # shows only the file's path. Should we just pass it to the logger\n # when we create it? Or let the logger figure it out?\n # relative_path = srcpath.replace(src_dir + '/', '')\n self.cur_file = File(srcpath, dstpath, self.logger)\n self.process_file(self.cur_file)", "def getImmediateSubdirectories(dir):", "def iter_dir(tree, path):\n for f in os.listdir(path):\n if os.path.isfile(path + '/' + f + '/__init__.py'):\n tree[f] = None\n elif os.path.isdir(path + '/' + f):\n tree[f] = {}\n SnakeWM.iter_dir(tree[f], path + '/' + f)", "def walk(dirname):\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)", "def dirGenerator(datadirectory):\n\n subdirectories = [row for row in os.listdir(datadirectory) if '$' not in row]\n\n #iterate through subdirectories\n for day in subdirectories:\n\n #collect raw data set file names in sub directories\n fileNames = [row for row in os.listdir(datadirectory + day + '\\\\RawDataFiles\\\\')]\n\n #iterate over the raw datasets\n print 'There are ' + str(len(fileNames)) + ' datasets in ' + day\n for index, datafile in enumerate(fileNames):\n yield datadirectory + day + '\\\\RawDataFiles\\\\' + datafile, day, datafile, index", "def makedirs(*ds):\n for d in ds:\n if not os.path.isdir(d):\n cmd = ['mkdir', '-p', d]\n run_safe(cmd, silent=True)", "def list_dir(self, path):", "def walk(dirname): \n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)", "def DirEnumerator(args, path):\n for d in os.scandir(path):\n try:\n if d.name == '.' or d.name == '..':\n pass\n elif d.is_symlink() and args.skiplinks:\n pass\n elif d.is_file():\n yield d.path\n elif d.is_dir() and args.recurse:\n for f in DirEnumerator(args, d.path):\n yield f\n except Exception as e:\n print(\"EXCEPTION %s accessing %s/%s\" % (e, path, d.name))", "def _iter_plugin_files(dirs):\n for plugin_dir in dirs:\n plugin_dir = Path(plugin_dir).expanduser()\n if not plugin_dir.exists(): # pragma: no cover\n continue\n for subdir, dirs, files in os.walk(plugin_dir, followlinks=True):\n subdir = Path(subdir)\n # Skip test folders.\n base = subdir.name\n if 'test' in base or '__' in base or '.git' in str(subdir): # pragma: no cover\n continue\n logger.debug(\"Scanning `%s`.\", subdir)\n for filename in files:\n if (filename.startswith('__') or not filename.endswith('.py')):\n continue # pragma: no cover\n logger.debug(\"Found plugin module `%s`.\", filename)\n yield subdir / filename", "def walk_through_dir(dir_path):\n for dirpath, dirnames, filenames in os.walk(dir_path):\n print(f\"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.\")", "def delFilesFromDirs(self, files, dirs):\n\n\t\tself.__delActualDirs(dirs)\n\t\tTagging.delTagsFromElements(self, dirs, files)", "def give_dirs(self, dirdefs):\n dirlist = next(os.walk('.'))[1]\n return [dir for dir in dirlist\n if self.check_all_rules(dirdefs, dir)]" ]
[ "0.67373455", "0.6056506", "0.5995078", "0.5957818", "0.59263694", "0.5912053", "0.5904246", "0.5838539", "0.5792521", "0.57878745", "0.57871544", "0.57282823", "0.5726129", "0.5694284", "0.56921166", "0.5691485", "0.5685755", "0.56559676", "0.56476575", "0.5639799", "0.5628069", "0.5606861", "0.5596786", "0.5569225", "0.5567806", "0.5557462", "0.552", "0.55191547", "0.54849863", "0.5451038" ]
0.8401167
0
Removes fspath if it is an empty directory or a file (or link).
def wipe ( self, fspath ): return self.rmdir ( fspath ) or self.unlink ( fspath )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_path(file_path):\n\n pass", "def strip_path(fpath):\n if not fpath:\n return fpath\n try:\n file_path, file_name = os.path.split(fpath)\n except Exception:\n file_name = fpath\n return file_name", "def _cleanup_path(path):\n return string.join(filter(None, string.split(path, '/')), '/')", "def unlink ( self, fspath ):\n return", "def clean_file_path(path):\r\n\r\n return path.split(\"/\")[-1]", "def clean_path(path):\n return resolved_path(path)", "def _trim_path(path):\n if path.endswith(\"/\"):\n path = path[:-1] # remove / at the end\n \n return path", "def remove_trailing_slash(path):\n if len(path) > 0:\n if path[len(path) - 1] == \"/\":\n return path[0:-1]\n else:\n return path\n else:\n return path", "def strip_path(self):\n return self.path.replace('/', '')", "def remove(path):\n # thanks https://stackoverflow.com/a/41789397\n if os.path.isfile(path) or os.path.islink(path):\n os.remove(path) # remove the file\n elif os.path.isdir(path):\n shutil.rmtree(path) # remove dir and all contains\n else:\n raise ValueError(\"file {} is not a file or dir.\".format(path))", "def remove(path):", "def recursive_remove(fs, path):\n\n if fs.is_dir(path=path) and not fs.is_link(path=path):\n for child in fs.children(path=path):\n recursive_remove(fs=fs, path=child)\n fs.remove_empty_directory(str(path))\n else:\n fs.remove_file(str(path))", "def remove(path):\n if os.path.isfile(path) or os.path.islink(path):\n os.remove(path) # remove the file\n elif os.path.isdir(path):\n shutil.rmtree(path) # remove dir and all contains\n else:\n raise ValueError(\"file {} is not a file or dir.\".format(path))", "def remove(path):\n if os.path.isfile(path) or os.path.islink(path):\n os.remove(path) # remove the file\n elif os.path.isdir(path):\n shutil.rmtree(path) # remove dir and all contains\n else:\n raise ValueError(\"file {} is not a file or dir.\".format(path))", "def rmrf(path: str):\n if os.path.isdir(path) and not os.path.islink(path):\n shutil.rmtree(path)\n else:\n try:\n os.remove(path)\n except OSError:\n pass", "def RemovePath(*path):\n file_path = os.path.join(*path)\n if os.path.exists(file_path):\n if os.path.isdir(file_path):\n RemoveDirectory(file_path)\n else:\n RemoveFile(file_path)", "def normdirpath(path):\n if not path.endswith('/') and path != '':\n path += '/'\n return path", "def _unlink(path):\n if os.path.isdir(path):\n os.rmdir(path)\n else:\n os.remove(path)", "def rm_rf(path):\n try:\n if islink(path) or isfile(path):\n # Note that we have to check if the destination is a link because\n # exists('/path/to/dead-link') will return False, although\n # islink('/path/to/dead-link') is True.\n os.unlink(path)\n elif isdir(path):\n shutil.rmtree(path)\n except (OSError, IOError):\n pass", "def clean_path(path):\n path = path.replace(\"~\", str(Path.home()))\n if path[-1] != \"/\":\n path += \"/\"\n return path", "def spilt_path(unclean_path_to_file):\n if os.path.exists(unclean_path_to_file) == True:\n return os.path.split(unclean_path_to_file)\n else:\n return None, unclean_path_to_file", "def clean_path(self, path):\n if('.flaccuesplit.' in path):\n path, flaccue_details = path.split('.flaccuesplit.')\n if(path.startswith(self.mount)):\n # Strip off the mount point.\n path = path[len(self.mount):]\n return path", "def _remove_path_head(path, head):\n # Bugfix 13 Oct 2017: path.replace(head,'') will remove head from everywhere in the path. This\n # is especially problematic if the user gives the local dir as \".\" (i.e. the current directory)\n # because it will remove periods from filenames\n\n # Find the head at the beginning of the path only. Escape any characters in head that have special\n # meaning in a regular expression (e.g. \".\" means \"any character\")\n head_regex = '^{}'.format(re.escape(head))\n path = re.sub(head_regex, '', path)\n if path.startswith('/'):\n path = path[1:]\n\n return path", "def noTrailingSlash(path):\n return path.split('/')[0]", "def remove(path):\n if os.path.isfile(path):\n os.remove(path) # remove the file\n elif os.path.isdir(path):\n shutil.rmtree(path) # remove dir and all contains\n else:\n print(\" - file {} is not a file or dir.\".format(path))", "def test_fpath():\n\n assert fpath(None, 'data.json') == 'data.json'\n assert fpath('/path/', 'data.json') == '/path/data.json'\n assert fpath(Path('/path/'), 'data.json') == '/path/data.json'", "def remove_dir_without_error(a_path):\r\n if not os.path.isdir(a_path):\r\n return\r\n for foo in os.listdir(a_path):\r\n abs_foo = os.path.join(a_path, foo)\r\n if os.path.isfile(abs_foo):\r\n try:\r\n os.remove(abs_foo)\r\n except Exception:\r\n continue\r\n else:\r\n remove_dir_without_error(abs_foo)\r\n try:\r\n shutil.rmtree(a_path)\r\n except Exception:\r\n return", "def remove_dir_without_error(a_path):\r\n if not os.path.isdir(a_path):\r\n return\r\n for foo in os.listdir(a_path):\r\n abs_foo = os.path.join(a_path, foo)\r\n if os.path.isfile(abs_foo):\r\n try:\r\n os.remove(abs_foo)\r\n except Exception:\r\n continue\r\n else:\r\n remove_dir_without_error(abs_foo)\r\n try:\r\n shutil.rmtree(a_path)\r\n except Exception:\r\n return", "def remove_upper_level_references(path):\n return os.path.normpath(\"/\" + path).lstrip(\"/\")", "def strip_path(path):\n name_re = re.compile(\"[^/]*\\.([a-z]+)$\")\n return name_re.search(path).group(0)" ]
[ "0.6562152", "0.63822734", "0.63800764", "0.63273704", "0.6302005", "0.62554234", "0.6191784", "0.6151708", "0.61012214", "0.6026597", "0.5998513", "0.599304", "0.59807295", "0.59807295", "0.59583515", "0.59484076", "0.5933473", "0.58946943", "0.5864218", "0.5861272", "0.582187", "0.58182085", "0.58028936", "0.5782308", "0.57766396", "0.5716672", "0.5713394", "0.5713394", "0.57008106", "0.56865644" ]
0.6647596
0
Recursively copies files from source_root to dest_root (while keeping its directory structure). Ownership and permissions are not preserved, instead copied files and created dirs will have to permissions set during initialization of this object.
def copy_tree ( self, source_root, dest_root, overwrite=True, followlinks=False ): dodir = self.dodir copy_file = self.copy_file if overwrite: for source, dest, relpath, dirs, files, dirnames in walk_copy_tree ( source_root, dest_root, followlinks=followlinks ): for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs: dodir ( dest_dir ) for ( source_file, source_stat ), ( dest_file, dest_stat ) in files: if followlinks and stat.S_ISLINK ( source_stat ): dodir ( dest_file ) else: copy_file ( source_file, dest_file ) else: for source, dest, relpath, dirs, files, dirnames in walk_copy_tree ( source_root, dest_root, followlinks=followlinks ): for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs: if dest_stat is None: dodir ( dest_dir ) for ( source_file, source_stat ), ( dest_file, dest_stat ) in files: if dest_stat is None: if followlinks and stat.S_ISLINK ( source_stat ): dodir ( dest_file ) else: copy_file ( source_file, dest_file )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __copyFiles(self):\n if os.path.isdir(self.__sourcePath):\n shutil.copytree(self.__sourcePath, self.__targetPath)\n else:\n shutil.copy2(self.__sourcePath, self.__targetPath)", "def walk_copy_tree ( source, dest, subdir_root=False, **walk_kwargs ):\n source_path = os.path.abspath ( source )\n dest_path = os.path.abspath ( dest )\n\n get_entry = lambda path: (\n path, os.lstat ( path ) if os.path.lexists ( path ) else None\n )\n get_stat_list = lambda s, d, names: (\n [ ( get_entry ( s + name ), get_entry ( d + name ) ) for name in names ]\n )\n\n for root, root_rel, dirnames, filenames in walk_relpath (\n source_path, include_root=subdir_root, **walk_kwargs\n ):\n root_dest = ( dest + os.sep + root_rel if root_rel else dest )\n\n dirs = get_stat_list ( root + os.sep, root_dest + os.sep, dirnames )\n files = get_stat_list ( root + os.sep, root_dest + os.sep, filenames )\n\n yield root, root_dest, root_rel, dirs, files, dirnames", "def copydir(source, dest):\n dest_par = os.path.dirname(dest)\n for root, dirs, files in os.walk(source):\n if not os.path.isdir(root):\n os.makedirs(root)\n\n for mdir in dirs:\n try:\n dest_path = os.path.join(dest_par, root, mdir)\n if not os.path.isdir(dest_path):\n os.makedirs(dest_path)\n except:\n pass\n for file in files:\n rel_path = root.replace(source, '').lstrip(os.sep)\n dest_path = os.path.join(dest, rel_path)\n if not os.path.isdir(dest_path):\n os.makedirs(dest_path)\n cpy_src = os.path.join(root, file)\n cpy_dest = os.path.join(dest_path, file)\n shutil.copyfile(cpy_src, cpy_dest)\n shutil.copymode(cpy_src, cpy_dest)", "def copyChildren(root, target):\n global numCopied, exitStatus\n\n if not os.path.exists(target):\n mkdirRecursive(target)\n childList = os.listdir(root)\n for entry in childList:\n source = root + os.sep + entry\n if os.path.isfile(source) and entry.endswith(\".py\"):\n doCopy = True\n if os.path.exists(target + os.sep + entry):\n srcStat = os.stat(source)\n targetStat = os.stat(target + os.sep + entry)\n if srcStat[stat.ST_MTIME] <= targetStat[stat.ST_MTIME]:\n doCopy = False # target is same or newer\n else:\n os.remove(target + os.sep + entry)\n if doCopy:\n shutil.copy2(source, target)\n shutil.copymode(source, target + os.sep + entry)\n numCopied = numCopied + 1\n elif os.path.isdir(source):\n # make the child directory in the target tree,\n # if it doesn't already exist\n if not os.path.exists(target + os.sep + entry):\n mkdirRecursive(target + os.sep + entry)\n # and then copy all of its children\n copyChildren(root + os.sep + entry, target + \"/\" + entry)", "def copy_directory(source, dest):\n for path, dirs, files in walk(source):\n relative_src_path = path.replace(source, \"\").lstrip(\"/\")\n abs_dest_path = join(dest, relative_src_path)\n if not exists(abs_dest_path):\n makedirs(abs_dest_path)\n for tdir in dirs:\n dest_dir = join(abs_dest_path, tdir)\n if not exists(dest_dir):\n makedirs(dest_dir)\n for tfile in files:\n src_file = join(path, tfile)\n dest_file = join(abs_dest_path, tfile)\n if islink(src_file):\n linkto = readlink(src_file)\n symlink(linkto, dest_file)\n continue\n else:\n process_file(src_file, dest_file)", "def copyTree(source_path, dest_path):\n if python_version >= 0x380:\n # Python 3.8+ has dirs_exist_ok\n return shutil.copytree(source_path, dest_path, dirs_exist_ok=True)\n\n from distutils.dir_util import copy_tree\n\n return copy_tree(source_path, dest_path)", "def copy_subtree(src, dst):\n for src_f in os.listdir(src):\n src_path = os.path.join(src, src_f)\n if os.path.isdir(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copytree(src_path, dst_path)\n else:\n ProcessJson.copy_subtree(src_path, dst_path)\n elif os.path.isfile(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copy(src_path, dst_path)", "def copytree(src, dest):\n shutil.copytree(src, dest)\n restorecon(dest, recursive=True)", "def copy_dirlink_tree ( self,\n source_root, dest_root, overwrite=False, followlinks=False\n ):\n\n unlink = self.unlink\n symlink = self.symlink\n\n source, dest, relpath, dirs, files, dirnames = next (\n walk_copy_tree ( source_root, dest_root, followlinks=followlinks )\n )\n\n self.dodir ( dest_root )\n\n if overwrite:\n for ( my_source, my_source_stat ), ( my_dest, my_dest_stat ) in (\n itertools.chain ( dirs, files )\n ):\n if my_dest_stat is not None:\n unlink ( my_dest )\n symlink ( my_source, my_dest )\n else:\n for ( my_source, my_source_stat ), ( my_dest, my_dest_stat ) in (\n itertools.chain ( dirs, files )\n ):\n if my_dest_stat is None:\n symlink ( my_source, my_dest )", "def copyDir(srcPath, destPath):\n shutil.copytree(srcPath, destPath)", "def dir_copy_from_bcdbfs(self, path, dest, recursive=True):\n if path == j.sal.fs.getParent(dest):\n raise j.exceptions.Base(\"{} can not copy directory into itself\".format(path))\n dir_source = self._dir_model.get_by_name(name=path)[0]\n source_files = dir_source.files\n for file_id in source_files:\n file = self._file_model.get(file_id)\n basename = j.sal.fs.getBaseName(file.name)\n self.file_copy_form_bcdbfs(file.name, j.sal.fs.joinPaths(dest, basename))\n if recursive:\n source_dirs = dir_source.dirs\n for dir_id in source_dirs:\n dir = self._dir_model.get(dir_id)\n self.dir_create(dir.name)\n basename = j.sal.fs.getBaseName(dir.name)\n self.dir_copy_from_bcdbfs(dir.name, j.sal.fs.joinPaths(dest, basename))", "def copy_recursively(src, dst, overwrite=False, changed_only=True):\n if os.path.isdir(src):\n copytree(src, dst, overwrite, changed_only)\n else:\n copyfile(src, dst, overwrite, changed_only)", "def dir_copy(self, path, dest, recursive=True):\n if j.sal.fs.exists(path):\n self.dir_copy_from_local(path, dest, recursive=recursive)\n else:\n self.dir_copy_from_bcdbfs(path, dest, recursive=recursive)", "def copy(self, src, dest, recursive=False, update=False):\n self.makedir(posixpath.dirname(dest))\n command = CommandBuilder.copy(src, dest, recursive, update)\n return self.execute_command(command)", "def copy(self, destination):\n destination = Path(destination)\n src_base = str(self.directory)\n if self.flatten:\n dst_base = destination\n else:\n dst_base = Path(destination.joinpath(self.directory.stem))\n\n for src in self.locations_to_copy:\n if src.is_dir():\n for dir_path, dir_names, file_names in os.walk(str(src)):\n if self.flatten:\n dst_dir = dst_base\n else:\n dst_dir = Path(dir_path.replace(src_base, str(dst_base)))\n if not dst_dir.exists():\n dst_dir.mkdir(parents=True)\n for file in file_names:\n shutil.copy2(os.path.join(dir_path, file), str(dst_dir))\n else:\n if self.flatten:\n dst_dir = dst_base\n else:\n dst_dir = Path(str(src.parent).replace(src_base, str(dst_base)))\n if not dst_dir.exists():\n dst_dir.mkdir(parents=True)\n shutil.copy2(str(src), str(dst_dir))", "def copy(source, destination):\r\n\r\n source_ = os.path.abspath(os.path.expanduser(source))\r\n destination_ = os.path.abspath(os.path.expanduser(destination))\r\n\r\n if not os.path.exists(destination_) and not os.path.isfile(source_):\r\n os.makedirs(destination_)\r\n\r\n def recurse(source, destination):\r\n for entry in os.listdir(source):\r\n entry_path = os.path.join(source, entry)\r\n if os.path.isdir(entry_path):\r\n entry_dest = os.path.join(destination, entry)\r\n if os.path.exists(entry_dest):\r\n if not os.path.isdir(entry_dest):\r\n raise IOError('Failed to copy {0} a directory.'\r\n .format(entry_dest))\r\n recurse(entry_path, entry_dest)\r\n else:\r\n shutil.copytree(entry_path, entry_dest)\r\n else:\r\n shutil.copy2(entry_path, destination)\r\n\r\n\r\n if os.path.isdir(source_):\r\n recurse(source_, destination_)\r\n\r\n elif os.path.isfile(source_):\r\n dest_dir = os.path.dirname(destination_)\r\n if not os.path.exists(dest_dir):\r\n os.makedirs(dest_dir)\r\n shutil.copy2(source_, destination_)\r\n logger.info('copying %s to %s' % (source_, destination_))\r\n else:\r\n logger.warning('skipped copy %s to %s' % (source_, destination_))", "def dir_copy_from_local(self, path, dest, recursive=True):\n source_files = j.sal.fs.listFilesInDir(path)\n for file in source_files:\n basename = j.sal.getBaseName(file)\n self.file_copy_from_local(file, j.sal.fs.joinPaths(path, basename))\n if recursive:\n source_dirs = j.sal.fs.listDirsInDir(path)\n for dir in source_dirs:\n self.dir_create(dir)\n basename = j.sal.fs.getBaseName(dir)\n self.dir_copy_from_local(dir, j.sal.fs.joinPaths(dest, basename))", "def copy_files(self, source, target):\n\n if source == target and is_local(self.borrowed_ctx.host):\n logger.warning(\"IGNORE self-node: {}\".format(self.borrowed_ctx.host))\n return\n\n try:\n for item in os.listdir(source):\n if os.path.isfile(os.path.join(source, item)):\n logger.debug(\n \"processing {} --> {}\".format(\n os.path.join(source, item), self.borrowed_ctx.host\n )\n )\n self._sftp_channel.put(\n os.path.join(source, item), \"%s/%s\" % (target, item)\n )\n else:\n self.mkdir(\"%s/%s\" % (target, item), ignore_existing=True)\n self.copy_files(\n os.path.join(source, item), \"%s/%s\" % (target, item)\n )\n except Exception as e:\n logger.warning(\n \"Error of processing target = ({}:{}), for reason: {}\".format(\n self.borrowed_ctx.host, self.borrowed_ctx.port, e,\n )\n )\n exit(0)", "def copy_one(self, src, dest):\n if self.manager.no_sourcemaps and self.is_ignored_sourcemap(src.name):\n return\n\n if dest.is_dir():\n shutil.rmtree(dest)\n elif dest.exists():\n dest.unlink()\n\n if not dest.parent.exists():\n self.log.debug(f\"creating folder {dest.parent}\")\n dest.parent.mkdir(parents=True)\n\n self.maybe_timestamp(dest.parent)\n\n copytree_kwargs = {}\n\n if self.manager.no_sourcemaps:\n copytree_kwargs[\"ignore\"] = SOURCEMAP_IGNORE_PATTERNS\n\n if src.is_dir():\n shutil.copytree(src, dest, **copytree_kwargs)\n else:\n shutil.copy2(src, dest)\n\n self.maybe_timestamp(dest)", "def copy_filelink_tree ( self,\n source_root, dest_root, overwrite=False, followlinks=False\n ):\n dodir = self.dodir\n unlink = self.unlink\n symlink = self.symlink\n\n if overwrite:\n for source, dest, relpath, dirs, files, dirnames in (\n walk_copy_tree ( source_root, dest_root, followlinks=followlinks )\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n if dest_stat is not None:\n unlink ( dest_file )\n symlink ( source_file, dest_file )\n else:\n for source, dest, relpath, dirs, files, dirnames in (\n walk_copy_tree ( source_root, dest_root, followlinks=followlinks )\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n if dest_stat is None:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if dest_stat is None:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n symlink ( source_file, dest_file )", "def copytree(self, name, source, dest, symlinks=False):\n self.m.path.assert_absolute(source)\n self.m.path.assert_absolute(dest)\n args = ['--symlinks'] if symlinks else []\n self._run(name, ['copytree'] + args + [source, dest])\n self.m.path.mock_copy_paths(source, dest)", "def copy_one(self, src, dest):\n if dest.is_dir():\n shutil.rmtree(dest)\n elif dest.exists():\n dest.unlink()\n\n if not dest.parent.exists():\n self.log.debug(f\"creating folder {dest.parent}\")\n dest.parent.mkdir(parents=True)\n\n self.maybe_timestamp(dest.parent)\n\n if src.is_dir():\n shutil.copytree(src, dest)\n else:\n shutil.copy2(src, dest)\n\n self.maybe_timestamp(dest)", "def _duplicate_as_linked_tree(self, source_root):\n logging.debug(\"Started traversing %s \\'s tree for file linkage and directory duplication.\" % self.directory)\n # Create the containing directory that resides within the share\n within_share_dir_path = os.path.join(self.directory, os.path.basename(source_root))\n self._makedir(within_share_dir_path)\n for root, subdirectories, files in os.walk(source_root, followlinks=True):\n share_root = root.replace(str(source_root), within_share_dir_path, 1)\n for subdir in subdirectories:\n target = os.path.join(share_root, subdir)\n self._makedir(target)\n for file in files:\n source = os.path.join(root, file)\n target = os.path.join(share_root, file)\n self._link_files(source, target)", "def fresh_copy_dir(source_path, target_path):\n os.mkdir(target_path)\n for item in os.listdir(source_path):\n s = os.path.join(source_path, item)\n t = os.path.join(target_path, item)\n if os.path.isdir(s):\n fresh_copy_dir(s, t)\n else:\n shutil.copyfile(s, t)", "def copy(src, dst):\n try:\n shutil.copytree(src, dst)\n except OSError as exc:\n if exc.errno == errno.ENOTDIR:\n shutil.copy(src, dst)\n else:\n raise", "def copy(src, dst):\n try:\n shutil.copytree(src, dst)\n except OSError as exc:\n if exc.errno == errno.ENOTDIR:\n shutil.copy(src, dst)\n else:\n raise", "def copy_tree_to_path(src_dir, dest_dir):\n names = os.listdir(src_dir)\n\n for name in names:\n srcname = os.path.join(src_dir, name)\n destname = os.path.join(dest_dir, name)\n\n if os.path.isdir(srcname):\n shutil.copytree(srcname, destname)\n else:\n shutil.copy(srcname, destname)", "def copy(self, src, dest):\n\n src = os.path.join(os.path.dirname(__file__), \"collections\", \"kitchensink\", src)\n dest = os.path.join(self.checkout, dest)\n if os.path.isdir(src):\n shutil.copytree(src, dest)\n else:\n shutil.copy(src, dest)\n return dest", "def copydir(source, dest, ignore=None):\n shutil.copytree(source, dest, ignore_dangling_symlinks=True,\n ignore=shutil.ignore_patterns(*ignore) if ignore else None)", "def copy(source, destination):\n if os.path.isdir(source):\n return __copytree(source, destination)\n else:\n return __copyfile2(source, destination)" ]
[ "0.69874936", "0.6962603", "0.6929658", "0.6741736", "0.6719468", "0.6689175", "0.6660653", "0.6599278", "0.6598707", "0.6578675", "0.6560733", "0.655713", "0.65497947", "0.65477866", "0.65471786", "0.65068245", "0.648274", "0.643485", "0.63942957", "0.63851553", "0.63709056", "0.63305813", "0.63278604", "0.6324605", "0.63092965", "0.63092965", "0.6289155", "0.6276932", "0.6274228", "0.62540644" ]
0.7179521
0
Creates symlinks to source_root's content in dest_root.
def copy_dirlink_tree ( self, source_root, dest_root, overwrite=False, followlinks=False ): unlink = self.unlink symlink = self.symlink source, dest, relpath, dirs, files, dirnames = next ( walk_copy_tree ( source_root, dest_root, followlinks=followlinks ) ) self.dodir ( dest_root ) if overwrite: for ( my_source, my_source_stat ), ( my_dest, my_dest_stat ) in ( itertools.chain ( dirs, files ) ): if my_dest_stat is not None: unlink ( my_dest ) symlink ( my_source, my_dest ) else: for ( my_source, my_source_stat ), ( my_dest, my_dest_stat ) in ( itertools.chain ( dirs, files ) ): if my_dest_stat is None: symlink ( my_source, my_dest )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_filelink_tree ( self,\n source_root, dest_root, overwrite=False, followlinks=False\n ):\n dodir = self.dodir\n unlink = self.unlink\n symlink = self.symlink\n\n if overwrite:\n for source, dest, relpath, dirs, files, dirnames in (\n walk_copy_tree ( source_root, dest_root, followlinks=followlinks )\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n if dest_stat is not None:\n unlink ( dest_file )\n symlink ( source_file, dest_file )\n else:\n for source, dest, relpath, dirs, files, dirnames in (\n walk_copy_tree ( source_root, dest_root, followlinks=followlinks )\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n if dest_stat is None:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if dest_stat is None:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n symlink ( source_file, dest_file )", "def link(self, src, dst, label=None):\n self._tag(dst, label)\n self._mkdir_for(dst)\n abs_src = self._rootjoin(src)\n abs_dst = os.path.join(self.chroot, dst)\n try:\n os.link(abs_src, abs_dst)\n except OSError as e:\n if e.errno == errno.EEXIST:\n # File already exists, skip\n pass\n elif e.errno == errno.EXDEV:\n # Hard link across devices, fall back on copying\n shutil.copyfile(abs_src, abs_dst)\n else:\n raise", "def create_symlink(src, dest):\n sudo('ln -s {} {}'.format(src, dest))", "def link(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n abs_src = self._rootjoin(src)\r\n abs_dst = os.path.join(self.chroot, dst)\r\n try:\r\n os.link(abs_src, abs_dst)\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n # File already exists, skip\r\n pass\r\n elif e.errno == errno.EXDEV:\r\n # Hard link across devices, fall back on copying\r\n shutil.copyfile(abs_src, abs_dst)\r\n else:\r\n raise", "def link(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n abs_src = self._rootjoin(src)\r\n abs_dst = os.path.join(self.chroot, dst)\r\n try:\r\n os.link(abs_src, abs_dst)\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n # File already exists, skip\r\n pass\r\n elif e.errno == errno.EXDEV:\r\n # Hard link across devices, fall back on copying\r\n shutil.copyfile(abs_src, abs_dst)\r\n else:\r\n raise", "def makeLinks(self, source, target):\n\n if os.path.exists(target): os.unlink(target)\n os.symlink(source, target)", "def _duplicate_as_linked_tree(self, source_root):\n logging.debug(\"Started traversing %s \\'s tree for file linkage and directory duplication.\" % self.directory)\n # Create the containing directory that resides within the share\n within_share_dir_path = os.path.join(self.directory, os.path.basename(source_root))\n self._makedir(within_share_dir_path)\n for root, subdirectories, files in os.walk(source_root, followlinks=True):\n share_root = root.replace(str(source_root), within_share_dir_path, 1)\n for subdir in subdirectories:\n target = os.path.join(share_root, subdir)\n self._makedir(target)\n for file in files:\n source = os.path.join(root, file)\n target = os.path.join(share_root, file)\n self._link_files(source, target)", "def cpsym(src,dest):\n \n src = os.path.normpath(src)\n dest = os.path.normpath(dest)\n \n if not os.path.exists(src):\n return\n \n for dirpath,dirnames,filenames in os.walk(src):\n rel_dirpath = os.path.relpath(dirpath,src)\n dest_dirpath = os.path.join(dest,rel_dirpath)\n mkdir(dest_dirpath,isfull=True)\n \n for filename in filenames:\n src_filename = os.path.join(dirpath,filename)\n rel_filename = os.path.relpath(src_filename,src)\n \n dest_filename = os.path.join(dest,rel_filename)\n try:\n os.symlink(src_filename,dest_filename)\n except OSError:\n pass", "def copy_tree ( self,\n source_root, dest_root, overwrite=True, followlinks=False\n ):\n dodir = self.dodir\n copy_file = self.copy_file\n\n if overwrite:\n for source, dest, relpath, dirs, files, dirnames in walk_copy_tree (\n source_root, dest_root, followlinks=followlinks\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n copy_file ( source_file, dest_file )\n else:\n for source, dest, relpath, dirs, files, dirnames in walk_copy_tree (\n source_root, dest_root, followlinks=followlinks\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n if dest_stat is None:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if dest_stat is None:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n copy_file ( source_file, dest_file )", "def copy_directory(source, dest):\n for path, dirs, files in walk(source):\n relative_src_path = path.replace(source, \"\").lstrip(\"/\")\n abs_dest_path = join(dest, relative_src_path)\n if not exists(abs_dest_path):\n makedirs(abs_dest_path)\n for tdir in dirs:\n dest_dir = join(abs_dest_path, tdir)\n if not exists(dest_dir):\n makedirs(dest_dir)\n for tfile in files:\n src_file = join(path, tfile)\n dest_file = join(abs_dest_path, tfile)\n if islink(src_file):\n linkto = readlink(src_file)\n symlink(linkto, dest_file)\n continue\n else:\n process_file(src_file, dest_file)", "def copytree(self, name, source, dest, symlinks=False):\n self.m.path.assert_absolute(source)\n self.m.path.assert_absolute(dest)\n args = ['--symlinks'] if symlinks else []\n self._run(name, ['copytree'] + args + [source, dest])\n self.m.path.mock_copy_paths(source, dest)", "def copy_files(self, dest_dir: str, symlink: bool = True):\n\n # Convert dir to pathlib.Path\n dest_dir = pathlib.Path(dest_dir)\n\n # Make directory if it does not exist.\n if not dest_dir.is_dir():\n dest_dir.mkdir(parents=True)\n\n # Symlink/copy in exe\n from_file = self.wrf_hydro_exe\n to_file = dest_dir.joinpath(from_file.name)\n if symlink:\n to_file.symlink_to(from_file)\n else:\n shutil.copy(str(from_file), str(to_file))", "def _create_symlink(self, source_path, main):\n main_file = os.path.realpath(os.path.join(source_path, main))\n if not os.path.isfile(main_file):\n main_file += '.js'\n if not os.path.isfile(main_file):\n print('\\tWARNING: Could not create symlink for {}, no such file.'.format(main_file))\n return\n main_file_name = os.path.basename(main_file)\n with change_working_directory(os.path.realpath(self.symlink_dir)) as cd:\n file_path = os.path.join(cd, main_file_name)\n self.created(file_path)\n if os.path.islink(file_path):\n os.remove(file_path)\n symlink(main_file, main_file_name)", "def _makeSymlink ( target, source, env ) :\n if len(target) != 1 :\n fail ( \"unexpected number of targets for symlink: \"+str(target) )\n if len(source) != 1 :\n fail ( \"unexpected number of sources for symlink: \"+str(source) )\n\n target = str(target[0])\n source = str(source[0].abspath)\n trace ( \"Executing symlink `%s' -> `%s'\" % ( target, source ), \"makeSymlink\", 3 )\n\n os.symlink ( source, target )", "def ln(src, dst):\n os.symlink(src, dst)", "def copy_or_link(src, dest):\n if os.name == 'nt':\n qisys.sh.install(src, dest)\n else:\n qisys.sh.rm(dest)\n os.symlink(src, dest)", "def create_symlink(source_file, dest_file, sudo=True):\n LOG.info(\"Creating symlink to {} called {}\".format(source_file, dest_file))\n cmd = \"ln -sf {} {}\".format(source_file, dest_file)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)", "def create_symlink_dir(src_dir, src_list, dst):\n if not src_list:\n return\n message = \"creating symlink directory at {dst} with files {src_list}\".format(\n dst=dst,\n src_list=pformat(src_list))\n logging.info(message)\n if not os.path.exists(dst):\n os.makedirs(dst)\n for src_file in src_list:\n if not src_file:\n continue\n source = os.path.join(src_dir, src_file)\n destination = os.path.join(dst, src_file)\n if os.path.lexists(destination):\n continue\n try:\n os.symlink(source, destination)\n except Exception as e:\n msg = format_debug(e)\n logging.error(e)", "def __copytree(source, destination, symlinks=False):\n logger.info(\"copytree: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.copytree(source, destination, symlinks)\n return True\n except Exception as e:\n logger.exception(\n \"copytree: %s -> %s failed! Error: %s\", source, destination, e\n )\n return False", "def build_folders(source, destination_temp, standard, root):\n\n source_fs = OSFS(source)\n\n print \"Processing %s ... \" % standard['id']\n standard_fs = source_fs.opendir(standard['id'])\n\n # list all artifacts of a standard\n artifacts = standard_fs.listdir(dirs_only=True)\n if '.git' in artifacts: artifacts.remove(\".git\")\n\n for artifact in artifacts:\n # check whether artifact folder exists in destination_temp \n if root.exists('%s/%s' % (destination_temp, artifact)) == False:\n root.makedir('%s/%s' % (destination_temp, artifact))\n\n # copy standard folders from source to destination_temp in desired structure\n root.copydir('%s/%s/%s' % (source, standard['id'], artifact), '%s/%s/%s' % (destination_temp, artifact, standard['id']))\n\n html = create_standard_webpage(standard, artifacts)\n\n # check whether register/standard exists\n if root.exists('%s/%s' % (destination_temp, standard['id'])) == False:\n root.makedir('%s/%s' % (destination_temp, standard['id']))\n \n # write standard HTML page to register/standard/index.html\n with codecs.open('%s/%s/index.html' % (destination_temp, standard['id']), 'w', encoding='utf8') as f:\n f.write(html)\n\n # copy web assets\n root.copydir('web/assets', '%s/r' % destination_temp, overwrite=True)", "def make_franny_symlinks(src_dirs, out_dir):\n\n for path, dirs, files in chain.from_iterable(os.walk(path)\n for path in src_dirs):\n print('Looking in %s' % path)\n for sta in ['NS12', 'NS13', 'NS14']:\n for filename in fnmatch.filter(files, '*.%s*' % sta):\n net = filename.split('.')[-7]\n chan = filename.split('.')[-4]\n if chan[-1] == 'N':\n new_chan = 'EH1'\n elif chan[-1] == 'E':\n new_chan = 'EH2'\n else:\n continue\n mseed_nm = filename.split('/')[-1]\n new_mseed = string.replace(mseed_nm, chan, new_chan)\n old_path = os.path.join(path, filename)\n new_path = '%s/%s/%s/%s.D/%s' % (out_dir, net,\n sta, new_chan, new_mseed)\n\n print('Creating symlink for file %s at %s'\n % (old_path, new_path))\n spwd = '*blackmore89'\n cmnd = 'sudo -S ln %s %s' % (old_path, new_path)\n os.system('echo %s | %s' % (spwd, cmnd))\n return", "def copy_files(self):\n for (source_name, target_name) in self.FILES_TO_LINK:\n src = os.path.expanduser(source_name)\n tgt = os.path.expanduser(target_name)\n cmd = 'cp -rf {src} {tgt}'.format(src=src, tgt=tgt)\n\n print(cmd)\n if not self.dry_run:\n run(cmd)", "def create_symlinks(target_dir: os.PathLike, symlinks_to_create: List[os.PathLike]):\n for src_path in symlinks_to_create:\n trg_path = os.path.join(target_dir, os.path.basename(src_path))\n\n if os.path.islink(src_path):\n # Let's not create symlinks to symlinks\n # Since dropping the current symlink will break the experiment\n os.symlink(os.readlink(src_path), trg_path)\n else:\n print(f'Creating a symlink to {src_path}, so try not to delete it occasionally!')\n os.symlink(src_path, trg_path)", "def link(self):\n\n if self.path_source is not None:\n full_source_path = os.path.join(\n os.path.expandvars(self.path_source), self.name\n )\n full_destination_path = os.path.join(\n os.path.expandvars(self.path_destination), self.name\n )\n\n try:\n if self.sudo:\n spawn.process(\n f'ln -sfv \"{full_source_path}\" \"{full_destination_path}\"',\n sudo=True,\n )\n else:\n os.symlink(full_source_path, full_destination_path)\n except FileExistsError:\n message.error(\n \"Can't symlink, file already exists at destination. Attempting fix.\"\n )\n os.remove(full_destination_path)\n message.info(f\"Removed: '{full_destination_path}'\")\n os.symlink(full_source_path, full_destination_path)\n finally:\n message.info(\n f\"Symlink created: '{full_source_path}' <--> '{full_destination_path}'\"\n )\n else:\n message.error(\n f\"'{self.name}' has no source from which to create a link from.\"\n )", "def fix_git_symlinked(src, dst):\n # if running from WC there should be a 'doc' dir sibling to nikola package\n if not should_fix_git_symlinked():\n return\n # probabbly in a WC, so symlinks should be fixed\n for root, dirs, files in os.walk(dst):\n for name in files:\n filename = os.path.join(root, name)\n\n # detect if symlinked\n try:\n if not (2 < os.path.getsize(filename) < 500):\n continue\n # which encoding uses a git symlink marker ? betting on default\n with open(filename, 'r') as f:\n text = f.read()\n if text[0] != '.':\n # de facto hint to skip binary files and exclude.meta\n continue\n except Exception:\n # probably encoding: content binary or encoding not defalt,\n # also in py2.6 it can be path encoding\n continue\n dst_dir_relpath = os.path.dirname(os.path.relpath(filename, dst))\n path = os.path.normpath(os.path.join(src, dst_dir_relpath, text))\n if not os.path.exists(path):\n continue\n # most probably it is a git symlinked file\n\n # copy original content to filename\n shutil.copy(path, filename)", "def _clone_defaults(self, source, dest, context):\n\n for base, dirs, files in os.walk(source):\n relative = os.path.relpath(base, source)\n\n for d in dirs:\n os.makedirs(os.path.join(dest, relative, d))\n\n for filename in files:\n\n if not filename.endswith(self.valid_extensions):\n continue\n\n with open(os.path.join(base, filename), 'r') as f:\n data = f.read()\n\n with open(os.path.join(dest, relative, filename), 'w') as f:\n data = jinja2.Template(data).render(**context)\n f.write(data)", "def make_symlink(dst, src, silently_move=False):\n dst_dir = os.path.dirname(dst.rstrip(os.path.sep))\n if not os.path.isdir(dst_dir):\n os.makedirs(dst_dir)\n\n # get a temporary directory\n if os.path.exists(dst):\n if silently_move or (((os.path.isfile(dst) or (os.path.isdir(dst)) and\n query_yes_no('Move NSLS-II from userpackages?')))):\n import tempfile\n temp_dir = tempfile.mkdtemp()\n shutil.move(dst, temp_dir)\n print('Previous NSLS-II folder moved to {0}'.format(temp_dir))\n else:\n print('NSLS-II already exists in userpackages. Please move or delete it'\n 'and then re-run setup.py')\n return False\n\n # this symlink does not get removed when pip uninstall vttools is run...\n # todo figure out how to make pip uninstall remove this symlink\n try:\n # symlink the NSLS-II folder into userpackages\n os.symlink(src, dst)\n except AttributeError:\n # you must be on Windows!\n call(['mklink', '/j', dst, src], shell=True)\n\n return True", "def convert_relative_symlinks(template_dir, out_dir):\n for root, dirs, files in os.walk(out_dir):\n for filename in files:\n filepath = os.path.join(root, filename)\n if os.path.islink(filepath):\n linkto = os.readlink(filepath)\n if linkto.startswith('.'):\n os.remove(filepath)\n start_dir = os.path.relpath(root, out_dir)\n os.symlink(os.path.join(template_dir, start_dir, filename), filepath)", "def mergecopytree(src, dst, symlinks=False, ignore=None):\n if not os.path.exists(dst):\n os.makedirs(dst)\n shutil.copystat(src, dst)\n lst = os.listdir(src)\n if ignore:\n excl = ignore(src, lst)\n lst = [x for x in lst if x not in excl]\n for item in lst:\n src_item = os.path.join(src, item)\n dst_item = os.path.join(dst, item)\n if symlinks and os.path.islink(src_item):\n if os.path.lexists(dst_item):\n os.remove(dst_item)\n os.symlink(os.readlink(src_item), dst_item)\n elif os.path.isdir(src_item):\n mergecopytree(src_item, dst_item, symlinks, ignore)\n else:\n shutil.copy2(src_item, dst_item)", "def copy(self, src, dst, label=None):\n self._tag(dst, label)\n self._mkdir_for(dst)\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))" ]
[ "0.68658274", "0.65962565", "0.6584691", "0.65420985", "0.65420985", "0.6446246", "0.6419486", "0.63529956", "0.633968", "0.6323208", "0.6282044", "0.6085427", "0.60373276", "0.6013182", "0.5988649", "0.5983457", "0.5963935", "0.59410805", "0.5917402", "0.5914621", "0.58708274", "0.5848524", "0.5830169", "0.5820486", "0.575667", "0.5744613", "0.57129383", "0.5674513", "0.5656964", "0.5648473" ]
0.69057864
0
A trivial test of the StatsModelsWrapper
def test_stats_models_wrapper(): X = np.array([[1], [2], [3]]) y = np.array([1.1, 2, 3]) glm_gaussian = functools.partial(sm.GLM, family=sm.families.Gaussian()) sm_est = StatsModelsWrapper(glm_gaussian) assert sm_est.fit(X, y) is sm_est, "fit did not return self" assert sm_est.predict(X).shape == (3,) assert 0.0 <= sm_est.score(X, y) <= 1.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testGetModelsData(self):\n models = models_logic._getModelsData()\n self.assertTrue(models)", "def test_model():\n pass", "def test_get_stats(self):\n pass", "def test_model_info_basic():\n model = ModelInfo('test description', {'f1': 0.9},\n BaseLocation('protoc://something:8080/thingy'))\n assert 'test description' in model.items['description']\n assert model.items['metrics']['f1'] == 0.9\n assert model.items['location'].get_host() == 'something:8080'\n assert model.items['hash'] is not None", "def test_predictor():", "def test_model_obj():\n # Get the ambient profile data\n profile = get_profile()\n\n # Initialize a Model object\n sbm = single_bubble_model.Model(profile)\n\n # Check the model attributes\n assert_approx_equal(sbm.p.rho_r, 1031.035855535142, significant=6)\n (T, S, P) = profile.get_values(1000., ['temperature', 'salinity',\n 'pressure'])\n (Tp, Sp, Pp) = sbm.profile.get_values(1000., ['temperature', 'salinity',\n 'pressure'])\n assert Tp == T\n assert Sp == S\n assert Pp == P", "def test_model_found(arguments):\n ...", "def do_stats_model(x, y):\n Xx = sm.add_constant(x)\n sm_logit = sm.Logit(y, Xx)\n result = sm_logit.fit()\n print result.summary()\n result.pred_table()\n # linear model\n print \"linear regression model:\\n\"\n sm_linear = sm.OLS(y, Xx)\n result = sm_linear.fit()\n print result.summary()", "def test_summaries(self):\n try:\n ans = str(self.model)\n except:\n assert False, \"Model __repr__ failed.\"\n\n try:\n print(self.model)\n except:\n assert False, \"Model print failed.\"\n\n try:\n self.model.summary()\n except:\n assert False, \"Model summary failed.\"", "def test_rr_basic(model):\n results = model.fit()\n assert isinstance(results, RCRResults)", "def test_fitModel():\n\n # check type\n assert isinstance(algo_svd, surprise.prediction_algorithms.matrix_factorization.SVD)", "def testGetReigsteredModel(self):\n from soc.models.student import Student\n model = models_logic.getModel('soc.models.student.Student')\n self.assertEqual(model, Student)", "def test_called_model(mocked_models):\n # given\n mocked_instance = mocked_models.BattleSchema.return_value\n\n # when\n handlers.new_battle()\n\n # then\n assert mocked_models.BattleSchema.call_count == 1\n assert mocked_instance.dumps.call_count == 1", "def test_model_initialization():\n MyModel(\"model\", SkillContext())", "def testModel( self, classTest, classPred):", "def test_get_summary_with_model(self):\n\t\t\n\t\tdescription = self.watcher.describe(model=self.model)\n\t\tself.assertEqual(11, len(description))\n\t\t\n\t\t\n\t\tdetails = self.watcher.analyze(model=self.model, layers=[self.second_layer])\n\t\treturned_summary = self.watcher.get_summary(details)\n\t\t\n\t\tprint(returned_summary)\n\t\t\n\t\tsaved_summary = self.watcher.get_summary()\n\t\tself.assertEqual(returned_summary, saved_summary)", "def test_custom_models(model):\n atom = ATOMRegressor(X_reg, y_reg, random_state=1)\n atom.run(models=model, n_calls=2, n_initial_points=1)\n assert atom.rfr.fullname == \"RandomForestRegressor\"\n assert atom.rfr.estimator.get_params()[\"random_state\"] == 1", "def testGetCachedModelsData(self):\n models = models_logic.getModelsData()\n models2 = models_logic._getModelsData()\n self.assertEqual(models, models2)", "def test_stats_init(self):\n stats_store = StatsStore()\n\n gen_stats = stats_store.general_stats_get()\n for cntr in [stats_store.General.NUM_APPS_MOVES, stats_store.General.NUM_ERR]:\n assert cntr in gen_stats\n assert gen_stats[cntr] == 0", "def test_test_regression_model(self):\n model = RegressionTestModel()\n example = {'x_1': 3, 'x_2': 2}\n pred = list(model.predict([example]))[0]\n self.assertEqual(pred['score'], 20)", "def test_get_model(self) -> None:\n get_model()", "def test_plot_ess_no_sample_stats(models):\n idata = models.model_1\n with pytest.raises(ValueError, match=\"must contain sample_stats\"):\n plot_ess(idata.posterior, rug=True)", "def test_models_regression(model):\n atom = ATOMRegressor(X_reg, y_reg, test_size=0.24, random_state=1)\n atom.run(\n models=model,\n metric=\"neg_mean_absolute_error\",\n n_calls=2,\n n_initial_points=1,\n bo_params={\"base_estimator\": \"gbrt\", \"cv\": 1},\n )\n assert not atom.errors\n assert hasattr(atom, model)", "def test_stats_class_initialisation(self):\n self.assertIsInstance(self.stats,cardutils.Stats)", "def test_base_stats():\n # type is required\n config = { 'file_name' : 'dummy_file' }\n with np.testing.assert_raises(ValueError):\n stats = piff.Stats.process(config)\n\n # ... for all stats in list.\n config = [ { 'type': 'TwoDHist', 'file_name': 'f1' },\n { 'type': 'Whisker', 'file_name': 'f2', },\n { 'type': 'Rho', 'file_name': 'f3' },\n { 'file_name' : 'dummy_file' },\n ]\n with np.testing.assert_raises(ValueError):\n stats = piff.Stats.process(config)\n\n # Can't do much with a base Stats class\n stats = piff.Stats()\n np.testing.assert_raises(NotImplementedError, stats.compute, None, None)\n np.testing.assert_raises(NotImplementedError, stats.plot)", "def test_model_runs(self):\n\n for m in self.models:\n self.assertTrue(m is not None)\n self.assertTrue(isinstance(m, topic_model.TopicModel))", "def test_fit_model(fitted_model):\n assert fitted_model.history['loss'] is not None", "def test_settingmodel_init():\n SettingsModel()", "def test_build_model(arguments):\n ...", "def test_keras_model_with_no_bias(self):\n\t\t\n\t\tfrom keras.models import Sequential\n\t\tfrom keras.layers import Dense, Activation\n\t\t\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(100, input_shape=(190,), use_bias=False))\n\t\tmodel.add(Activation(\"relu\"))\n\t\tmodel.add(Dense(10, use_bias=False))\n\t\tmodel.add(Activation('sigmoid'))\n\t\t\n\t\twatcher = ww.WeightWatcher(model=model)\n\t\tdetails = watcher.describe()\n\t\tprint(details)\n\t\tself.assertTrue(len(details)==2)\n\t\t\n\t\tdetails = watcher.analyze()\n\t\tprint(details)\n\t\tself.assertTrue(len(details)==2)\n\t\t\n\t\t\n\t\tdetails = watcher.analyze(min_evals=20)\n\t\tprint(details[['layer_id', 'M', 'num_evals']])\n\t\tself.assertTrue(len(details)==1)" ]
[ "0.6844442", "0.6665745", "0.6491076", "0.6237574", "0.6218416", "0.6198978", "0.6190901", "0.6182809", "0.6175669", "0.61716205", "0.6156747", "0.6148331", "0.6138285", "0.61118513", "0.6085253", "0.60259545", "0.6008298", "0.59566724", "0.593223", "0.592766", "0.59270495", "0.5916134", "0.5890754", "0.5887706", "0.5881624", "0.5834123", "0.5827549", "0.5820992", "0.5817654", "0.5811236" ]
0.78905725
0
Function that return the api key for the hypixel api.
def get_hypixel_key(self): key = self.bot_data_file["apiKeys"]["hypixel"] if self.check_empty_key(key): return key else: print("ERROR GETTING THE HYPIXEL KEY (get yours from https://api.hypixel.net/) - ABORTING") quit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_api_key(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'api_key')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def api_key(self):\n # type () -> str\n return self._api_key", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def get_api_key(site):\n\n # Assumes the configuration is available via a config module\n return config.get_key(site)", "def get_api_key ():\n PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))\n key_file = open(PROJECT_PATH + \"/key_api.txt\", \"r\")\n return (key_file.read()).rstrip('\\n')", "def api_key(self):\n return self.__creds.api_key_v2", "def get_api_key(api_key):\n api.get(api_key)", "def api_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_key\")", "def api_key(self):\n return self._api_key", "def api_key(cls):\n cls.load()\n return cls._api_key", "def _apikey():\n return __opts__.get(\"bamboohr\", {}).get(\"apikey\", None)", "def API_KEY(self):\n return 8", "def _get_api_key(self):\n self.api.apikey = self.api.action.user_show(id=self.username)['apikey']", "def _get_api_key():\n cfg = read_config()\n cfg = cfg['notifier']['telegram_bot']\n return cfg.get('api_key')", "def API_KEY(self):\n return 2", "def _get_api_key():\n api_key_directory = os.getenv(\"KOKORO_GFILE_DIR\")\n api_key_file = os.path.join(api_key_directory, \"resultstore_api_key\")\n assert os.path.isfile(api_key_file), (\n \"Must add --api_key arg if not on \"\n \"Kokoro or Kokoro environment is not set up properly.\"\n )\n with open(api_key_file, \"r\") as f:\n return f.read().replace(\"\\n\", \"\")", "def api_key(self) -> Optional[str]: # noqa: D401\n return self._api_key", "def API_KEY(self):\n return 9", "def API_KEY(self):\n return 11", "def API_KEY(self):\n return 12", "def gen_api_key():\r\n m = hashlib.sha256()\r\n m.update(get_random_word(12))\r\n return unicode(m.hexdigest()[:12])", "def API_KEY(self):\n return 14", "def get_tool_shed_api_key(self, trans, **kwd):\n return self.authentication_service.get_api_key(trans.environ, trans.request)", "def API_KEY(self):\n return 3", "def trello_api_key():\n return TRELLO_API_KEY", "def API_KEY(self):\n return 13" ]
[ "0.7882624", "0.7581468", "0.7505369", "0.7505369", "0.7505369", "0.7505369", "0.74702185", "0.7435706", "0.74057823", "0.7401957", "0.73333293", "0.73333293", "0.7320179", "0.7299802", "0.72601944", "0.7181773", "0.7115222", "0.711507", "0.71141195", "0.70895034", "0.70852727", "0.70707107", "0.7057186", "0.70450556", "0.70115536", "0.70057744", "0.7005759", "0.7004855", "0.7003365", "0.6995857" ]
0.7944425
0
Function that return the api key for the gif api.
def get_gif_key(self): key = self.bot_data_file["apiKeys"]["gif"] if self.check_empty_key(key): return key else: print("ERROR GETTING THE GIF KEY (get yours from http://api.giphy.com/) - ABORTING") quit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_api_key(api_key):\n api.get(api_key)", "def get_api_key(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'api_key')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def api_key(self):\n # type () -> str\n return self._api_key", "def get_api_key ():\n PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))\n key_file = open(PROJECT_PATH + \"/key_api.txt\", \"r\")\n return (key_file.read()).rstrip('\\n')", "def _get_api_key(self):\n self.api.apikey = self.api.action.user_show(id=self.username)['apikey']", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def api_key(self):\n return self._api_key", "def apikey(serv):\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n '{0}.key'.format(serv))\n key = open(path, \"r\").read().rstrip()\n return key", "def _get_api_key():\n api_key_directory = os.getenv(\"KOKORO_GFILE_DIR\")\n api_key_file = os.path.join(api_key_directory, \"resultstore_api_key\")\n assert os.path.isfile(api_key_file), (\n \"Must add --api_key arg if not on \"\n \"Kokoro or Kokoro environment is not set up properly.\"\n )\n with open(api_key_file, \"r\") as f:\n return f.read().replace(\"\\n\", \"\")", "def api_key(cls):\n cls.load()\n return cls._api_key", "def _get_api_key():\n cfg = read_config()\n cfg = cfg['notifier']['telegram_bot']\n return cfg.get('api_key')", "def api_key(self):\n return self.__creds.api_key_v2", "def get_api_key(site):\n\n # Assumes the configuration is available via a config module\n return config.get_key(site)", "def API_KEY(self):\n return 2", "def API_KEY(self):\n return 8", "def api_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_key\")", "def API_KEY(self):\n return 3", "def trello_api_key():\n return TRELLO_API_KEY", "def API_KEY(self):\n return 9", "def _apikey():\n return __opts__.get(\"bamboohr\", {}).get(\"apikey\", None)", "def API_KEY(self):\n return 14", "def API_KEY(self):\n return 1", "def API_KEY(self):\n return 12", "def API_KEY(self):\n return 0", "def api_key(self) -> Optional[str]: # noqa: D401\n return self._api_key", "def get_api_key(instance):\n\n # TODO make this work with environment variables or else\n # by getting the api-key from ~/.config/flywheel/user.json\n # if the KEY_FILE is not present but that doesn't honor the\n # \"instance\" argument to this method\n\n with open(KEY_FILE) as json_file:\n keys = json.load(json_file)\n the_user = keys[\"default\"]\n for key, val in keys[\"ids\"][the_user].items():\n if instance.startswith(key):\n api_key = val\n if not api_key:\n print(f\"{CR}Could not find instance '{instance}'{C0}\")\n return api_key" ]
[ "0.7271017", "0.7184897", "0.70157254", "0.69424164", "0.6854634", "0.6826018", "0.6826018", "0.6826018", "0.6826018", "0.67205614", "0.67190504", "0.67133266", "0.6693411", "0.6674918", "0.6624762", "0.6581685", "0.65432197", "0.6537641", "0.65328526", "0.65328526", "0.65228236", "0.6516149", "0.6484012", "0.64585656", "0.64288473", "0.64247257", "0.6406715", "0.63983566", "0.63941354", "0.6389442" ]
0.78639907
0
Function that return the default weather country to use
def get_weather_country(self): return self.bot_data_file["weather"]["default_country"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def country() -> str:", "def get_default_country(self):\n proxy = self.env['ir.config_parameter']\n default_country = proxy.sudo().get_param('default_country')\n if not default_country:\n raise UserError('Please use Default Country as US in config parameters.')\n return default_country.strip()", "def get_default_country(self):\n proxy = self.env['ir.config_parameter']\n default_country = proxy.sudo().get_param('default_country')\n if not default_country:\n raise UserError('Please use Default Country as US in config parameters.')\n return default_country.strip()", "def country(self) -> Optional[str]:\n return pulumi.get(self, \"country\")", "def country(self) -> Optional[str]:\n return pulumi.get(self, \"country\")", "def country(self) -> str:\n return pulumi.get(self, \"country\")", "def get_weather_language(self):\n return self.bot_data_file[\"weather\"][\"default_language\"]", "def get_city_country(city, country, population=''):\n if population:\n location = city + ' ' + country + ' ' + str(population)\n return location.title()\n\n else:\n location = city + ' ' + country\n return location.title()", "def get_country(self, field_name='COUNTRY'):\n default = self.get_default(field_name)\n if default != '' and default != None:\n return '%s' % str(default).split(',')[-1].strip()\n return ''", "def country(self):\n if \"country\" in self._prop_dict:\n return self._prop_dict[\"country\"]\n else:\n return None", "def country(self):\n if \"country\" in self._prop_dict:\n return self._prop_dict[\"country\"]\n else:\n return None", "def country(name):\n return location_db().find(name=name)[\"country\"]", "def country_or_region(self) -> str:\n return pulumi.get(self, \"country_or_region\")", "def get_city_country(city, country, population=''):\n if population:\n city_country = f\"{city}, {country} - population {population}\"\n else:\n city_country = f\"{city}, {country}\"\n return city_country.title()", "def default():\n return DefaultGeothermal.default()", "def get_country(ip):\r\n return geoip.country_code_by_addr(ip)", "def country(self):\n return self.status.place['country']", "def get_geo_location(country: str) -> Optional[str]:\n country_to_geo_location_mapping = {\n \"Portugal\": \"Europe\",\n \"Spain\": \"Europe\",\n \"France\": \"Europe\",\n \"Italy\": \"Europe\",\n \"Malta\": \"Europe\",\n \"Switzerland\": \"Europe\",\n \"Austria\": \"Europe\",\n \"Slovenia\": \"Europe\",\n \"Croatia\": \"Europe\",\n \"Greece\": \"Europe\",\n \"Turkey\": \"Europe\",\n \"North Macedonia\": \"Europe\",\n \"Poland\": \"Europe\",\n \"Germany\": \"Europe\",\n \"Netherlands\": \"Europe\",\n \"Denmark\": \"Europe\",\n \"Sweden\": \"Europe\",\n \"Norway\": \"Europe\",\n \"Finland\": \"Europe\",\n \"Latvia\": \"Europe\",\n \"Russia\": \"Europe\",\n \"Belgium\": \"Europe\",\n \"Ireland\": \"Europe\",\n \"United Kingdom\": \"Europe\",\n \"Iceland\": \"Europe\",\n \"Canada\": \"North America\",\n \"Mexico\": \"North America\",\n \"United States\": \"North America\",\n }\n\n return country_to_geo_location_mapping.get(country, None)", "def convert_country(country):\n if (country and 'China' in country) or \\\n country == 'Chin' or country == 'CHINA':\n country = 'China'\n elif country and 'Brazil' in country or \\\n country == 'Brasil' or \\\n country == 'ITA - Instituto Tecnologico de Aeronautica (':\n country = 'Brazil'\n elif country and 'Argentina' in country:\n country = 'Argentina'\n elif country == 'Czechia':\n country = 'Czech Republic'\n elif 'Norwegian' in country:\n country = 'Norway'\n elif country and 'United Kingdom' in country:\n country = 'United Kingdom'\n elif country and 'Hong Kong' in country:\n country = 'Hong Kong'\n elif country == 'Cameroun':\n country = 'Cameroon'\n elif (country and 'Chile' in country) or country == 'CHILE':\n country = 'Chile'\n elif (country and 'United States of America' in \\\n country) or country == 'United States' or country \\\n == 'USA' or 'Florida' in country or \\\n 'California' in country or\\\n country == 'National Reference Centre for' or \\\n country == 'United State of America' or \\\n country == 'U.S.A.' or \\\n country == 'Virginia':\n country = 'United States of America'\n elif country=='Republic of Panamá' or country=='Panamá' or 'Panama' in country:\n country = 'Panama'\n elif 'Canada' in country:\n country = 'Canada'\n elif 'Colombia' in country:\n country = 'Colombia'\n elif 'Spain' in country or country=='España':\n country = 'Spain'\n elif 'Iran' in country:\n country = 'Iran'\n elif 'Saudi Arabia' in country:\n country = 'Saudi Arabia'\n elif 'Italy' in country:\n country = 'Italy'\n elif 'Japan' in country:\n country = 'Japan'\n elif 'Germany' in country:\n country = 'Germany'\n elif 'Luxembourg' in country:\n country = 'Luxembourg'\n elif ('France' in country) or country == 'Marseille':\n country = 'France'\n elif country == 'ROC' or country == 'R. O. C':\n country = 'Taiwan'\n elif country == 'Brasil':\n country = 'Brazil'\n elif country == 'México' or 'Mexico' in country:\n country = 'Mexico'\n elif 'Slowakia' in country:\n country = 'Slowakia'\n elif country == 'Korea' or 'Republic of Korea' in country:\n country = 'South Korea'\n elif country == 'United Kindgom':\n country = 'United Kingdom'\n elif country and 'Netherlands' in country:\n country = 'Netherlands'\n elif country == 'Commonwealth of Australia' or 'Australia' in country:\n country = 'Australia'\n elif 'Singapore' in country:\n country = 'Singapore'\n elif country and (country[0].isdigit() or country[0] == '+'):\n country = 'N/A'\n return country", "def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n elif country_name == 'Yemen, Rep.':\n return 'ye'\n elif country_name == 'Vietnam':\n return 'vn'\n elif country_name == 'Tanzania':\n return 'tz'\n elif country_name == 'Moldova':\n return 'md'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n elif country_name == 'Iran, Islamic Rep.':\n return 'ir'\n elif country_name == 'Hong Kong SAR':\n return 'hk'\n elif country_name == 'Congo, Dem. Rep.':\n return 'cd'\n elif country_name == 'Congo, Rep.':\n return 'cf'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n # If the country wasn't found, return None.\n return None", "def get_user_country(user_location):\n geo_locator = geopy.Nominatim(user_agent=\"User Location\", timeout=10)\n location = geo_locator.reverse(user_location, language='en')\n location = str(location).split(', ')\n country = location[-1]\n\n if country == 'United States of America':\n country = 'USA'\n elif country == 'United Kingdom':\n country = 'UK'\n\n return country", "def get_inflation_country():\n print(\">> Downloading WORLD BANK inflation / country data...\")\n url = source_config.inflation_data_url['latest']\n output_file = source_config.inflation_data_files['raw']['latest']\n download_insee_excel(url, output_file, check=False)", "def default():\n return DefaultWindPower.default()", "def country(self):\n # type: () -> string_types\n return self._country", "def get_country(self, country):\n if country == \"United Kingdom\": return \"en\"\n if country == \"Portugal\": return \"pt\"\n\n result = self.session.get(\"https://en.ogame.gameforge.com\")\n soup = BeautifulSoup(result.content, \"html.parser\")\n\n code_list = soup.find(\"ul\", {\"id\": \"mmoList1\"})\n countries = {}\n for tag in code_list.find_all(\"li\"):\n link = tag.find(\"a\")[\"href\"]\n name = tag.string.strip() # name of the country\n code = link.split(\".\")[0].replace(\"//\", \"\")\n countries[name] = code # save to the dict\n\n # check if input was ok\n if not country in countries.keys():\n self.crash(\"Country\", country, \"was not found on the list.\")\n if len(countries[country]) != 2:\n self.crash(\"Can't fetch code for country\", country)\n\n return countries[country]", "def getDefaultCurrency():", "def get_city_country(city, country, population=''):\n\tif population:\n\t\tcity_country = city.title() + ', ' + country.title() + \" - population \" + str(population)\n\telse:\n\t\tcity_country = city.title() + ', ' + country.title()\n\treturn city_country", "def get_leagues_country(wd):\n try:\n country = wd.find_element_by_tag_name(\"img\").get_attribute(\"alt\")\n return country\n except:\n return \"N/A Country\"", "def city_country(city, country):\n return f\"{city.title()}, {country.title()}\"", "def city_country(city, country):\n return f\"{city.title()}, {country.title()}\"" ]
[ "0.7248471", "0.72482795", "0.72482795", "0.7153096", "0.7153096", "0.6838989", "0.6754464", "0.6725625", "0.66886955", "0.6680285", "0.6680285", "0.6672574", "0.6584849", "0.6470137", "0.64690846", "0.6460345", "0.6444273", "0.6421123", "0.6391796", "0.6377553", "0.6376989", "0.6334231", "0.6319703", "0.63006735", "0.6264556", "0.6261244", "0.62561363", "0.6224264", "0.62145025", "0.62145025" ]
0.84455776
0
Function that return the default weather language for the results
def get_weather_language(self): return self.bot_data_file["weather"]["default_language"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_locale():\n return \"he\"", "def Language(self, default=None):\n return self.data.get('language', default)", "def get_weather_country(self):\n return self.bot_data_file[\"weather\"][\"default_country\"]", "def trans_weather(string):\r\n\treturn cn2en.WEATHER[string]", "def get_default_language():\n return getattr(thread_locals, 'DEFAULT_LANGUAGE',\n settings.DEFAULT_LANGUAGE)", "def get_default_language():\n utility = queryUtility(ILanguageAvailability)\n if utility is not None:\n return utility.getDefaultLanguage()\n return DEFAULT_LANGUAGE", "def gpwDefaultLanguage(self):\n parent = self.getFolderWhenPortalFactory()\n if hasattr(parent, 'getRawLanguage') and parent.getRawLanguage():\n return parent.getRawLanguage()\n tool = getToolByName(self, 'portal_languages', None)\n if tool is not None:\n return tool.getDefaultLanguage()\n return config.LANGUAGE_DEFAULT", "def get_user_language() -> str:\n languages = {\n \"arabic\": \"arb\",\n \"chinese\": \"cmn-CN\",\n \"danish\": \"da-DK\",\n \"english\": \"en-GB\",\n \"french\": \"fr-FR\",\n \"german\": \"de-DE\",\n \"portuguese\": \"pl-PT\",\n \"spanish\": \"es-ES\"\n }\n textlang = input(\"What language do you want to hear?\")\n try:\n return languages[textlang.lower()]\n except KeyError as e:\n print(\"Enter a valid language.\")\n sys.exit(1)", "def temps(lieu):\r\n\r\n key = '5a72ceae1feda40543d5844b2e04a205'\r\n localisation = \"http://api.openweathermap.org/data/2.5/weather?q={0},fr&appid={1}\"\r\n localisation = localisation.format(lieu, key)\r\n request_html = requests.get(localisation)\r\n data = request_html.json()\r\n\r\n weather = data['weather'][0]['main']\r\n\r\n if weather == \"Clear\":\r\n weather = \"Beau\"\r\n\r\n elif weather == \"Clouds\":\r\n weather = \"Nuageux\"\r\n return weather", "def default_language(self) -> str:\n return self.raw_config.get(\"default_language\", \"en\")", "def test_default_translations(self):\n\t\t\n\t\tself.assertTrue(data.get_default_translation('Catholicism', 3) == 'DRA')\n\t\tself.assertTrue(data.get_default_translation('Christianity', 3) == 'ESV')", "def get_language(self, text):\n try:\n post_lang = detect(text)\n except:\n post_lang = 'N/A'\n return post_lang", "def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language", "def default_language(self):\n return self._default_language", "def init_translations():\n if \"@lang\" in input.load_input():\n lang = input.get_lang()\n try:\n trad = gettext.GNUTranslations(open(\"../course/common_student/$i18n/\" + lang + \".mo\", \"rb\"))\n except FileNotFoundError:\n trad = gettext.NullTranslations()\n trad.install()\n return lang\n trad = gettext.NullTranslations()\n trad.install()\n return \"en\"", "def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()", "def t(eng, chinese):\n return chinese if 'zh' in get_info().user_language else eng", "def get_locale():\n setting = Setting.query.filter(Setting.name == 'default_language').first()\n\n if setting is not None:\n return setting.value\n\n # Return default language when none found\n return 'en'", "def getWikiLanguageName():\r\n return \"wikidpad_mini_1_0\"", "def wikiLanguages():\n return languages", "def get_lang(ix):\n\tlang = None\n\tif ix == 0:\n\t\tlang = setting.TLA_ENG\n\telif ix == 1:\n\t\tlang = setting.TLA_JP\n\telse:\n\t\tlang = setting.TLA_VN\n\n\tf = open (f\"lang\\\\{lang}.json\", encoding=setting.TLA_UTF8)\n\tglobal data_json\n\tdata_json = json.load(f)\n\n\treturn lang", "def use_en(self):\n pass", "def get_lang(self):\n props = getToolByName(self.context,\n 'portal_properties')\n return props.site_properties.getProperty('default_language') or 'en'", "def get_language(self, word, lang=None):\n lang = lang or self.cfg.get('lang', 'en')\n # let's retrieve the word from configuration dict.\n try:\n return self.cfg['words_' + lang][word]\n except StandardError:\n return 'Do not know how to \"{}\" in \"{}\"'.format(word, lang)", "def weather_helper():\n\n weather = get_weather('Chicago')\n conditions = weather['weather'][0]['description']\n temperature = weather['main']['temp']\n location = weather['name']\n\n curr_weather = 'It is currently %s degrees with %s in %s' % (temperature, conditions, location)\n return curr_weather", "def default():\n return DefaultWindPower.default()", "def get_localization(self, language: str) -> Localization:\n ...", "def get_localization(self):\n return self._request_data(\"/lokarria/localization\")", "def translate(self, language=None):", "def get_language_of_horizon_url(self) -> str:\n if 'horizons-mag' in self.url_h:\n self.language = 'en'\n elif 'horizonte-magazin' in self.url_h:\n self.language = 'de'\n elif 'revue-horizons' in self.url_h:\n self.language = 'fr'" ]
[ "0.6565924", "0.65229905", "0.6455026", "0.64302605", "0.64252025", "0.6408285", "0.639051", "0.6347569", "0.6344524", "0.632163", "0.63124716", "0.6244405", "0.6179648", "0.6177513", "0.61695105", "0.61636174", "0.61391145", "0.61023325", "0.6052343", "0.60194623", "0.598831", "0.59782004", "0.59771055", "0.5967522", "0.59665537", "0.59499496", "0.59443986", "0.593902", "0.5904153", "0.5901601" ]
0.8097329
0
Function that return the default rocket league platform to use
def get_rocket_league_platform(self): return self.bot_data_file["rocketleague"]["default_platform"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default_platform(self) -> Platform:\n _args: list[Arg] = []\n _ctx = self._select(\"defaultPlatform\", _args)\n return _ctx.execute_sync(Platform)", "def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])", "def platform():\n return \"micaz\"", "def getPlatform(self):\n\t\treturn None", "def get_platform():\n platforms = [\n \"Android\",\n \"Linux.RaspberryPi\",\n \"Linux\",\n \"XBOX\",\n \"Windows\",\n \"ATV2\",\n \"IOS\",\n \"OSX\",\n \"Darwin\",\n ]\n\n for platform in platforms:\n if xbmc.getCondVisibility('System.Platform.'+platform):\n return platform\n return \"Unknown\"", "def get_platform():\r\n platforms = [\r\n \"Android\",\r\n \"Linux.RaspberryPi\",\r\n \"Linux\",\r\n \"XBOX\",\r\n \"Windows\",\r\n \"ATV2\",\r\n \"IOS\",\r\n \"OSX\",\r\n \"Darwin\",\r\n ]\r\n\r\n for platform in platforms:\r\n if xbmc.getCondVisibility('System.Platform.%s' % platform):\r\n return platform\r\n return \"Unknown\"", "def get_platform(self, name):\n if name in self.platforms:\n return name\n else:\n try:\n p = self.platforms['name'] = Platform.load(self, name)\n return p\n except IOError as e:\n print('Failed loading platform: {0}'.format(str(e)))\n return None", "def get_computer_play():\r\n return random.choice(['Ailurophile', 'Assemblage', 'Becoming', 'Beleaguer', \r\n 'Brood', 'Bucolic', 'Bungalow', 'Chatoyant', 'Comely', \r\n 'Conflate', 'Cynosure', 'Dalliance', 'Demesne', 'Demure', \r\n 'Denouement', 'Desuetude', 'Desultory', 'Diaphanous', \r\n 'Dissemble', 'Dulcet', 'Ebullience', 'Effervescent', \r\n 'Efflorescence', 'Elision', 'Elixir', 'Eloquence', \r\n 'Embrocation', 'Emollient', 'Ephemeral', 'Epiphany', \r\n 'Erstwhile', 'Ethereal', 'Evanescent', 'Evocative', \r\n 'Fetching', 'Felicity', 'Forbearance', 'Fugacious', \r\n 'Furtive', 'Gambol', 'Glamour', 'Gossamer', 'Halcyon', \r\n 'Harbinger', 'Imbrication', 'Imbroglio', 'Imbue', \r\n 'Incipient', 'Ineffable', 'Ingenue', 'Inglenook', \r\n 'Insouciance', 'Inure', 'Kayak', 'Labyrinthine', \r\n 'Lagniappe', 'Lagoon', 'Languor', 'Lassitude', 'Leisure', \r\n 'Lilt', 'Lissome', 'Lithe', 'Love', 'Mellifluous', \r\n 'Moiety', 'Mondegreen', 'Murmurous', 'Nemesis', 'Numbered',\r\n 'Offing', 'Onomatopoeia', 'Opulent', 'Palimpsest', \r\n 'Panacea', 'Panoply', 'Pastiche', 'Penumbra', 'Petrichor', \r\n 'Plethora', 'Propinquity', 'Pyrrhic', 'Python', \r\n 'Quintessential', 'Ratatouille', 'Ravel', 'Redolent', \r\n 'Riparian', 'Ripple', 'Scintilla', 'Sempiternal', 'Seraglio', \r\n 'Serendipity', 'Summery', 'Sumptuous', 'Surreptitious', \r\n 'Susquehanna', 'Susurrous', 'Talisman', 'Tintinnabulation', \r\n 'Umbrella', 'Untoward', 'Vestigial', 'Wafture', \r\n 'Wherewithal', 'Woebegone'])", "def get_platform():\n try:\n import RPi.GPIO\n return PI\n except ImportError:\n pass\n\n if platform.system() == 'Linux':\n return LINUX\n else:\n return UNKNOWN", "def platform(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"platform\")", "def default():\n return DefaultPvWattsv8.default()", "def Platforms():\n return platforms", "def getPlatform(self, name):\r\n if self.platforms.has_key(name):\r\n return self.platforms[name]\r\n else:\r\n self.platforms[name] = Platform(name)\r\n return self.platforms[name]", "def getDefault():", "def load_default_game():\n global width, height, dungeon_map # DO NOT REMOVE\n width = 5\n height = 3\n dungeon_map = [list(\"&.@:=\"), list(\" \"), list(\"OYO k\")]\n return (\n 2, # player x\n 1, # player y\n '>', # player symbol\n {'+': 1}, # inventory\n 0, # werewolf x\n 1, # werewolf y\n 1, # werewolf health\n 0, # werewolf stun count\n )", "def getPlatform(self):\n self.platform=util.get_platform()\n if not(self.platform.find('linux')==-1): self.platform='Unix' # i suppose, that in all unix systems are paths similiar\n if self.platform=='win32': self.platform='Win32' # this should be done automatically", "def default():\n return DefaultPvWattsv5.default()", "def platform():\n return ['linux']", "def GetCurrentPlatform():\n if sys.platform == 'darwin':\n return 'mac'\n if sys.platform == 'win32':\n return 'win'\n if sys.platform == 'linux2':\n return 'linux'\n raise RuntimeError('Unknown platform')", "def get_platform():\n global _PLATFORM\n if _PLATFORM is None:\n _PLATFORM = _sysconfig.get_platform()\n return _PLATFORM", "def machine():\n mach = platform.machine()\n if mach.startswith('arm'):\n return 'arm'\n else:\n # Assume x86/x86_64 machine.\n return None", "def platform():\n if 'OS' in gyp_defines():\n if 'android' in gyp_defines()['OS']:\n return 'android'\n else:\n return gyp_defines()['OS']\n elif IsWindows():\n return 'win'\n elif IsLinux():\n return 'linux'\n else:\n return 'mac'", "def get_platform(build_rules_list, verbose=False, platform=None):\n\n # Make sure the input is PlatformTypes\n if platform:\n platform = PlatformTypes.lookup(platform)\n\n # If it's not, search the build_rules for it\n if not isinstance(platform, PlatformTypes):\n\n # Check build_rules.py\n platform = getattr_build_rules_list(\n build_rules_list, \"PROJECT_PLATFORM\", None)\n\n # Is it not a PlatformTypes?\n if not isinstance(platform, PlatformTypes):\n item = PlatformTypes.lookup(platform)\n if not isinstance(item, PlatformTypes):\n print(\n \"Platform Type \\\"{}\\\" is not supported, using a default.\".format(platform))\n platform = PlatformTypes.default()\n else:\n platform = item\n\n # Print if needed.\n if verbose:\n print(\"Platform name {}\".format(platform))\n return platform", "def get_rocket_league_key(self):\n key = self.bot_data_file[\"rocketleague\"][\"key\"]\n if self.check_empty_key(key):\n return key\n else:\n print(\n \"ERROR GETTING THE ROCKET LEAGUE KEY (check bot documentation) - ABORTING\")\n quit(1)", "def who_goes_first(self):\n if random.randint(0, 1) == 0:\n return 'computer'\n return 'player'", "def get_platforms(self):\n if self.platform == 'All':\n return PLATFORMS\n else:\n return self.platform.split(':')", "def default():\n return DefaultWindPower.default()", "def get_sequencing_platform(self):\n platform = self.data[\"platform\"]\n if platform == \"miseq\":\n platform = \"MiSeq\"\n elif platform == \"hiseq4000\":\n platform == \"HiSeq4000\"\n elif platform == \"hiseq2000\":\n platform == \"HiSeq2000\"\n else:\n raise Exception(\"Unknown platform {platform} for sequencing run {run}\".format(platform=platform,run=self.run))\n return platform", "def DRIVER():\n return \"podman\"", "def PlatformName():\n if override_platform_name:\n return override_platform_name\n if IsWindows():\n return 'win32'\n if IsLinux():\n return 'linux'\n if IsMac():\n return 'mac'\n raise NotImplementedError('Unknown platform \"%s\".' % sys.platform)" ]
[ "0.6455419", "0.6322307", "0.62479967", "0.6134072", "0.60681105", "0.6051301", "0.5905534", "0.5828498", "0.5817532", "0.5813724", "0.57934755", "0.5779312", "0.5738541", "0.57173306", "0.5632414", "0.5601634", "0.5525579", "0.5488002", "0.54671514", "0.54515594", "0.5449795", "0.54451686", "0.5418922", "0.54130244", "0.5391391", "0.5385227", "0.5378629", "0.5367841", "0.5365362", "0.5364647" ]
0.7961892
0
Function that return the api key for the youtube api.
def get_youtube_api_key(self): key = self.bot_data_file["youtube"]["key"] if self.check_empty_key(key): return key else: print( "ERROR GETTING THE YOUTUBE KEY (check bot documentation) - ABORTING") quit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_youtube_dev_key():\r\n return getenv('YOUTUBE_DEV_KEY')", "def get_api_key(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'api_key')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def api_key(self):\n return self.__creds.api_key_v2", "def trello_api_key():\n return TRELLO_API_KEY", "def api_key(self):\n # type () -> str\n return self._api_key", "def api_key(self):\n return self._api_key", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")", "def _get_api_key():\n cfg = read_config()\n cfg = cfg['notifier']['telegram_bot']\n return cfg.get('api_key')", "def get_api_key ():\n PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))\n key_file = open(PROJECT_PATH + \"/key_api.txt\", \"r\")\n return (key_file.read()).rstrip('\\n')", "def api_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_key\")", "def api_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_key\")", "def get_api_key(api_key):\n api.get(api_key)", "def api_key(cls):\n cls.load()\n return cls._api_key", "def get_api_key(site):\n\n # Assumes the configuration is available via a config module\n return config.get_key(site)", "def api_key(self) -> Optional[str]: # noqa: D401\n return self._api_key", "def API_KEY(self):\n return 2", "def _apikey():\n return __opts__.get(\"bamboohr\", {}).get(\"apikey\", None)", "def _get_api_key():\n api_key_directory = os.getenv(\"KOKORO_GFILE_DIR\")\n api_key_file = os.path.join(api_key_directory, \"resultstore_api_key\")\n assert os.path.isfile(api_key_file), (\n \"Must add --api_key arg if not on \"\n \"Kokoro or Kokoro environment is not set up properly.\"\n )\n with open(api_key_file, \"r\") as f:\n return f.read().replace(\"\\n\", \"\")", "def API_KEY(self):\n return 9", "def API_KEY(self):\n return 12", "def API_KEY(self):\n return 3", "def API_KEY(self):\n return 8", "def get_youtube_client():\r\n # Disable OAuthlib's HTTPS verification when running locally.\r\n # *DO NOT* leave this option enabled in production.\r\n os.environ[\"OAUTHLIB_INSECURE_TRANSPORT\"] = \"1\"\r\n\r\n api_service_name = \"youtube\"\r\n api_version = \"v3\"\r\n client_secrets_file = \"client_secret.json\"\r\n\r\n # Get credentials and create an API client\r\n scopes = [\"https://www.googleapis.com/auth/youtube.readonly\"]\r\n flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(\r\n client_secrets_file, scopes)\r\n credentials = flow.run_console()\r\n\r\n # from the Youtube DATA API\r\n youtube_client = googleapiclient.discovery.build(\r\n api_service_name, api_version, credentials=credentials)\r\n\r\n return youtube_client", "def api_secret_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_secret_key\")", "def get_api_key() -> dict:\r\n with open('config.json', 'r') as config_file:\r\n api_keys = json.load(config_file)\r\n return api_keys['newsapi']['api']", "def _get_api_key(self):\n self.api.apikey = self.api.action.user_show(id=self.username)['apikey']", "def API_KEY(self):\n return 11" ]
[ "0.76500094", "0.73951364", "0.7117241", "0.70795554", "0.7049423", "0.6914638", "0.69049734", "0.69049734", "0.69049734", "0.69049734", "0.6876005", "0.68502235", "0.6762104", "0.6762104", "0.6760866", "0.6752559", "0.6700647", "0.6627661", "0.65667534", "0.6539867", "0.6539679", "0.65279496", "0.65100706", "0.6495266", "0.6456945", "0.6437682", "0.64311886", "0.64064825", "0.6385671", "0.6379005" ]
0.83354473
0
Function that return the list of the youtube channels to check, with all the details about the notification
def get_list_youtube_channels_check(self): return self.bot_data_file["youtube"]["channels"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_channels(youtube, channelId):\n channels_response = youtube.channels().list(\n id=AUTH_USER_CHANNEL_ID,\n part=\"contentDetails\"\n ).execute()\n \n return channels_response[\"items\"]", "def get_channels():\n r = slack.channels.list().body\n return [ c for c in r['channels'] if c['is_member'] ]", "def get_channels():\n\tchannels = slack.get_channels()\n\treturn jsonify(channels=channels.body['channels'])", "def _channels_list(self):\n result = self.slack.api_call(\"channels.list\")\n\n if not result.get(\"ok\"):\n logging.error(result['error'])\n return None\n\n return result['channels']", "def get_user_channels(self):\n\n request = self.youtube.subscriptions().list(\n part='snippet',\n mine=True,\n order='alphabetical'\n )\n subscriptions = []\n while request:\n response = request.execute()\n subscriptions.append(response)\n request = self.youtube.subscriptions().list_next(request, response)\n\n channels = {}\n for subscription in subscriptions:\n for channel in subscription['items']:\n channel_title = channel['snippet']['title']\n channel_id = channel['snippet']['resourceId']['channelId']\n channels[channel_title] = channel_id\n\n return channels", "def get_channel_details(self, chan_ids_list, part='statistics'):\n\n chnl_details = {}\n key = self.keylist[self.keyindex]\n url_c = \"https://www.googleapis.com/youtube/v3/channels\"\n\n for ind, chan in enumerate(chan_ids_list):\n try:\n querystring = {\"id\": chan, \"part\": part,\n \"key\": key}\n response = request_handler(self, url_c, params=querystring, wait=100)\n #print(response)\n # Error-handling\n if response.get('error'):\n print(response.get('error'))\n while response['error']['errors'][0]:\n key = keychange(self)\n \n querystring = {\"id\": chan, \"part\": part,\n \"key\": key}\n response = request_handler(self, url_c, params=querystring, wait=100)\n\n if response.get('error'):\n #chnl_details.update({chan:[str(response), response.text]})\n #\n if response['error']['errors'][0]['reason'] == 'keyInvalid':\n return [{chan:[str(response), response.text]}]\n break\n\n if response.get('Interneterror'):\n chnl_details.update({chan: str(response)})\n continue\n\n chnl_details[chan] = response['items']\n\n except Exception as e:\n print(e, traceback.format_exc())\n\n if ind % 100 == 0:\n print(ind)\n \n return chnl_details", "def get_channels_json(self):\n logging.debug(f\"Getting all Slack channels...\")\n return self.get_list_json(\"conversations\")[\"channels\"]", "def analyt(analytics):\n API_KEY = secrets.YT_KEY\n youtube = build('youtube', 'v3', developerKey=API_KEY)\n request = youtube.channels().list(\n part='statistics',\n forUsername=analytics\n )\n response = request.execute()\n print(response)", "def get_channels(cj): \n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n channels = opener.open(\"http://www.douban.com/j/app/radio/channels\")\n channel_list = json.loads(channels.read())\n return channel_list[\"channels\"]\n # print channel_list", "async def watchlist(self, ctx):\r\n channel_list = await self.config.guild(ctx.guild).watching()\r\n msg = \"Bad gifs will be removed in:\\n\"\r\n for channel in channel_list:\r\n channel_obj = self.bot.get_channel(channel)\r\n if channel_obj is None: # Catch deleted/unexisting channels\r\n continue\r\n msg += f\"{channel_obj.mention}\\n\"\r\n await ctx.send(msg)", "def get_channels(self):\n response = self.client.api_call(\n f'conversations.list?types={cfg.CHANNEL[\"types\"]}&exclude_archived={cfg.CHANNEL[\"exclude_archived\"]}'\n )\n assert response['ok']\n return response['channels']", "async def votechannel_list(self, ctx):\n channels = await self.bot.db.execute(\n \"\"\"\n SELECT channel_id, voting_type FROM voting_channel WHERE guild_id = %s\n \"\"\",\n ctx.guild.id,\n )\n if not channels:\n raise exceptions.Info(\"There are no voting channels on this server yet!\")\n\n rows = []\n for channel_id, voting_type in channels:\n rows.append(f\"<#{channel_id}> - `{voting_type}`\")\n\n content = discord.Embed(\n title=f\":1234: Voting channels in {ctx.guild.name}\", color=int(\"3b88c3\", 16)\n )\n await util.send_as_pages(ctx, content, rows)", "def list_channel_captions_data(youtube=None, channel=\"\", max_results=100):\n\n # fetch data for all channel videos up to max results\n videos_data = youtube.search().list(\n part = \"snippet\",\n #forMine = True,\n channelId = \"UCMk_WSPy3EE16aK5HLzCJzw\",\n maxResults = max_results,\n q = \"*\",\n type = \"video\"\n ).execute()\n\n # create a list of id, title pairs for videos\n video_ids_titles = list(map(\n lambda video: (\n video['items']['id']['videoId'],\n video['items']['snippet']['title']\n ),\n videos_data\n ))\n\n # search video ids for draft captions\n captions_data_per_video = {}\n for video_id, video_title in video_ids_titles:\n captions_data = list_video_captions_data(youtube, video_id)\n # store languages in video captions dict\n captions_data_per_video[video_id] = {\n 'id': video_id,\n 'title': video_title,\n 'captions': captions_data\n }\n \n # send back found caption languages\n return captions_data_per_video", "async def list_channel(self, ctx: MyContext):\n channels = self.db_get_channels(ctx.guild.id)\n if not channels: # we can't send an empty list\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.no-channels\", p=ctx.prefix\n )\n )\n return\n txt = \"\\n\".join([c.to_str() for c in channels])\n await ctx.send(txt)", "async def _list(self, ctx):\n config = await self.config.guild(ctx.guild).channels()\n data = [self.bot.get_channel(x).mention for x in config]\n if ctx.channel.id in config:\n destination = ctx.author\n else:\n destination = ctx\n if not data:\n return await destination.send(\"There are no channels.\")\n await destination.send(\", \".join(data))", "def channels_listall(token):\n channels_results = channels.list()\n channels_list = []\n for channel in channels_results:\n channels_list.append(\n {\"channel_id\": channel[\"channel_id\"], \"name\": channel[\"name\"]}\n )\n return {\"channels\": channels_list}", "def list_all_channels(_response=Response, _db=Depends(get_db)):\n\n res_status, _data = ChatController(_db).list_channels()\n\n _response.status_code = res_status\n\n return {\"data\": _data}", "def get_comments(youtube, video_id, channel_id):\n global nextPageToken\n \n results = youtube.commentThreads().list(\n part=\"snippet\", \n videoId=video_id, \n allThreadsRelatedToChannelId=AUTH_USER_CHANNEL_ID\n ).execute()\n\n nextPageToken = results.get(\"nextPageToken\")\n\n for item in results[\"items\"]:\n comment = item[\"snippet\"][\"topLevelComment\"]\n \tauthor = comment[\"snippet\"][\"authorDisplayName\"]\n \ttry:\n \t authorChannelId = comment[\"snippet\"][\"authorChannelId\"]\n \texcept KeyError:\n \t pass\n \tchannel = authorChannelId.get(\"value\")\n \t\n \tchannel_list.append(channel)\n \t\n return results[\"items\"]", "def get_videos(channel_name, CLIENT_SECRETS_FILE):\r\n\r\n video_list = []\r\n\r\n MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth 2.0\"\r\n\r\n YOUTUBE_READONLY_SCOPE = \"https://www.googleapis.com/auth/youtube.readonly\"\r\n YOUTUBE_API_SERVICE_NAME = \"youtube\"\r\n YOUTUBE_API_VERSION = \"v3\"\r\n\r\n flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,\r\n message=MISSING_CLIENT_SECRETS_MESSAGE,\r\n scope=YOUTUBE_READONLY_SCOPE)\r\n\r\n storage = Storage(\"%s-oauth2.json\" % sys.argv[0])\r\n credentials = storage.get()\r\n\r\n if credentials is None or credentials.invalid:\r\n flags = argparser.parse_args()\r\n credentials = run_flow(flow, storage, flags)\r\n\r\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\r\n http=credentials.authorize(httplib2.Http()))\r\n\r\n # Retrieve the contentDetails part of the channel resource for the\r\n # authenticated user's channel.\r\n channels_response = youtube.channels().list(\r\n forUsername=channel_name,\r\n part=\"contentDetails\"\r\n ).execute()\r\n\r\n for channel in channels_response[\"items\"]:\r\n # From the API response, extract the playlist ID that identifies the list\r\n # of videos uploaded to the authenticated user's channel.\r\n uploads_list_id = channel[\"contentDetails\"][\"relatedPlaylists\"][\"uploads\"]\r\n\r\n # Retrieve the list of videos uploaded to the authenticated user's channel.\r\n playlistitems_list_request = youtube.playlistItems().list(\r\n playlistId=uploads_list_id,\r\n part=\"snippet\",\r\n maxResults=50\r\n )\r\n\r\n while playlistitems_list_request:\r\n playlistitems_list_response = playlistitems_list_request.execute()\r\n\r\n # Print information about each video.\r\n for playlist_item in playlistitems_list_response[\"items\"]:\r\n title = playlist_item[\"snippet\"][\"title\"]\r\n video_id = playlist_item[\"snippet\"][\"resourceId\"][\"videoId\"]\r\n video_list.append((title, video_id, 'https://img.youtube.com/vi/' + video_id + '/0.jpg'))\r\n\r\n playlistitems_list_request = youtube.playlistItems().list_next(\r\n playlistitems_list_request, playlistitems_list_response)\r\n\r\n return(video_list)", "def test_channel_list1():\n reset_data()\n user1 = auth_register(\"123eff45\", \"xxx\", \"yyyy\", email=\"[email protected]\")\n owner1 = auth_register(\"123eff45\", \"xxx\", \"yyyy\", email=\"[email protected]\")\n channel1_1 = channels_create(owner1['token'], \"channel1\", True)['channel_id']\n channel_join(user1['token'], channel1_1)\n channel_list1 = channels_list(user1['token'])\n channels = [channel['channel_id'] for channel in channel_list1]\n assert channels == [channel1_1]\n print(\"=========pass test1 : only one channel in channel_list========\")", "def scrape_channel(channel_id):\n youtube = googleapiclient.discovery.build(\"youtube\", \"v3\", developerKey=DEV_KEY)\n\n channel_request = youtube.channels().list(id=channel_id, part=\"contentDetails\")\n channel_response = channel_request.execute()\n\n channels = channel_response['items']\n\n # This should really be unique but let's loop anyway.\n for channel in channels:\n uploads_id = channel['contentDetails']['relatedPlaylists']['uploads']\n\n next_page = True\n page_token = None\n while next_page:\n uploads_request = youtube.playlistItems().list(playlistId=uploads_id,\n pageToken=page_token,\n part=\"contentDetails\",\n maxResults=5)\n uploads_response = uploads_request.execute()\n\n videos = uploads_response['items']\n for video in videos:\n video_id = video['contentDetails']['videoId']\n\n video_request = youtube.videos().list(id=video_id,\n part=\"snippet\")\n video_response = video_request.execute()\n\n print(video_id, video_response['items'][0]['snippet']['title'],\n sep=\"\\t\", flush=True)\n\n next_page = ('nextPageToken' in uploads_response.keys())\n if next_page:\n page_token = uploads_response['nextPageToken']\n\n sleep(1)", "def _extract_channels_from_sb_country(html_rsp):\n soup = BeautifulSoup(html_rsp, 'html.parser')\n channel_list = list()\n for link in soup.find_all('a'):\n url = link.get('href')\n if '/youtube/user/' in url:\n channel_list.append('https://socialblade.com' + url + '/monthly') # Get the detailed statistics page.\n return channel_list", "def test_blacklist_get_channel(self):\n test_channel = [{\"name\": \"Listed Count\",\n \"ShowChart\": 0,\n \"ShowTable\": 0,\n \"mode\": \"integer\",\n \"kind\": \"Custom\",\n \"customunit\": \"\",\n \"limitmaxerror\": 0,\n \"limitmode\": 1,\n \"value\": 0},\n {\"name\": \"Not Listed Count\",\n \"ShowChart\": 0,\n \"ShowTable\": 0,\n \"mode\": \"integer\",\n \"kind\": \"Custom\",\n \"customunit\": \"\",\n \"value\": 0},\n {\"name\": \"No Answer Count\",\n \"ShowChart\": 0,\n \"ShowTable\": 0,\n \"mode\": \"integer\",\n \"kind\": \"Custom\",\n \"customunit\": \"\",\n \"limitmaxwarning\": 0,\n \"limitmode\": 1,\n \"value\": 0}]\n assert_equal(self.test_blacklist.get_blacklist(['', 0, 0, 0]), test_channel)", "def active_channels(cls, user, notification_type):\n if notification_type not in NOTIFICATION_TYPES:\n raise ValueError(\"You asked for an invalid notification_type\")\n\n try:\n setting = cls.objects.get(user=user, notification_type=notification_type)\n except cls.DoesNotExist:\n # No setting equals all channels\n return CHANNELS\n\n if not setting.enabled:\n # Everything is disabled when the enabled flag is False\n return []\n\n # When enabled is True return all valid channels in the channels field.\n return list(set(setting.channels or []) & set(CHANNELS))", "def get_channels(self):\n return self.channels", "def channels_list(token):\n auth_u_id = get_id_from_token(token)\n all_channels = channels.query(\"all_members\", \"contains\", auth_u_id)\n channels_list = []\n for channel in all_channels:\n channels_list.append(\n {\"channel_id\": channel[\"channel_id\"], \"name\": channel[\"name\"]}\n )\n return {\"channels\": channels_list}", "async def fetch_dm_channels(self):\n data = await self.http.get_dm_channels()\n channels = []\n for dm_channel_data in data.get('channels', data):\n dm_channel = self.http.create_channel(data=dm_channel_data)\n channels.append(dm_channel)\n\n return channels", "def showChannels(self):\n print(\"Channels:\")\n for c in self.channels:\n if c.role != channel_pb2.Channel.Role.DISABLED:\n cStr = stripnl(MessageToJson(c.settings))\n print(\n f\" {channel_pb2.Channel.Role.Name(c.role)} psk={pskToString(c.settings.psk)} {cStr}\")\n publicURL = self.getURL(includeAll=False)\n adminURL = self.getURL(includeAll=True)\n print(f\"\\nPrimary channel URL: {publicURL}\")\n if adminURL != publicURL:\n print(f\"Complete URL (includes all channels): {adminURL}\")", "def filter_channels(request):\n youtube_list = channels_filter(request.GET) #Youtube.objects.all()\n try:\n new = []\n for lst in youtube_list:\n if lst.id % 2:\n lst.view_rate = int(0 if lst.new is None else lst.new)\n new.append(lst)\n serializer = YotubeSerializer(new, many=True)\n return Response(serializer.data)\n except (TypeError, AttributeError) as err:\n print ('error ', err.message)\n return Response([])", "def load_channels(archived=False):\n if not settings.SLACK_TOKEN:\n return {'ok': False, 'error': 'config_error'}\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.conversations_list(exclude_archived=not archived)\n assert response['ok'] is True\n\n channels = []\n for channel in response['channels']:\n channels.append((channel['id'], channel['name']))\n\n return {'ok': True, 'channels': channels}\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response" ]
[ "0.7082673", "0.69445324", "0.69291717", "0.68211055", "0.6791136", "0.67022717", "0.6548039", "0.6489021", "0.6430852", "0.64190793", "0.6402421", "0.6395185", "0.6382993", "0.6259776", "0.62419856", "0.62130135", "0.6212603", "0.62044746", "0.6181014", "0.6158402", "0.61281466", "0.61175686", "0.6082002", "0.6062779", "0.60560685", "0.6000734", "0.59811836", "0.5920615", "0.5899193", "0.5871793" ]
0.82831806
0
Function that return the current version of the bot.
def get_version(self): return self.bot_data_file["version"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_version():\n click.echo(get_current_version_number())", "async def version(self):\n # [p]version\n\n await self.bot.say(\"Current version: \" + CoreAPI.get_version())", "def get_version():\n return about.get_version()", "def version(self):\n return self.get_current_version()", "def get_version(self):\n return self.__make_api_call('get/version')", "def get_current_version(self) -> str:\n raise NotImplementedError()", "def get_version(self):\n\n r = self._create_operation_request(self, method=\"GET\")\n root_info = send_session_request(self._session, r).json()\n return root_info[\"currentVersion\"]", "async def version(self) -> str:\n response = await self._request(\"status\")\n return response[\"version\"]", "def get_version(self) -> str:\n return versioning.get_version()", "def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)", "async def version(self):\n self.do(\"version\")\n return (await self.read(7)).strip()", "def get_version(self):\n return self.version", "def get_current_version(self):\n current_version = self.get_version(self.get_module_and_path(self._main_dir))\n return current_version", "def version(self):\n\n if self.running() is True:\n return APIConsumer.get(\"/version\").content\n else:\n return None", "async def get_version(self):\n\n # Display info message\n log.info(\"get_version\")\n\n # By default empty string\n version = \"\"\n\n # Run get version on the device\n output = await self.send_command(self.cmd_get_version)\n\n # Seek \"Version \" and \",\" to get the version in the returned output\n version = output.split(\"Version \")[1].split(\",\")[0]\n\n # Display info message\n log.info(f\"get_version: version: {version}\")\n\n # Return the version of the software of the device\n return version", "def version(self):\n return self._client.getVersion()", "def get_version(self):\n self._send_command(self._adapter.get_version())", "def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def getCurrentVersion():\n f_version = configManagement.currentVersion()\n return f_version", "def get_current_version(self):\n raise NotImplementedError(\"get_current_version is not implemented\")", "def get_version(self):\n pass", "def get_version(self):\n return version.__version__", "def get_version(self):\n return version.__version__", "def version():\n return Tns.exec_command(command='--version')", "def get_version(self):\n return self.api_version", "def get_version():\n return magpy.get_version()", "def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"version\")", "def get(self):\n return self._version" ]
[ "0.77160335", "0.75977933", "0.7508295", "0.7467047", "0.74613523", "0.74310684", "0.7374588", "0.7321967", "0.72990876", "0.7288763", "0.72590643", "0.72493654", "0.7226841", "0.72038954", "0.7202787", "0.71924376", "0.7184682", "0.71830183", "0.71617293", "0.7141195", "0.7116767", "0.7111156", "0.71108246", "0.71108246", "0.7102882", "0.7092937", "0.7088483", "0.707709", "0.70709705", "0.7064278" ]
0.8068609
0
Function that return the current build number of the bot.
def get_build(self): return self.bot_data_file["build"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_number(self):\n return self.get_data(\"build_number\")", "def get_build_number():\n try:\n return int(os.getenv(*legion.config.BUILD_NUMBER))\n except ValueError:\n raise Exception('Cannot parse build number as integer')", "def getBuild(number):", "def getBuild(number):", "def build_number(self):\n return self._build_number", "def build():\n return get_cached(\"build.json\", False).get(\"build_id\")", "def get_new_build(old_version, new_version, build):\n\n # Version did not change, increment the current build number\n if old_version == new_version:\n return str(int(build) + 1)\n\n # Version changed, start over at 1\n else:\n return str(1)", "def test_get_build_number(self):\n pass", "def get_latest_build(self):\n # Retrieve last sanity-checked build number (could be 0)\n self.get_last_sanity()\n\n # * List all build numbers for this version. Note this may include\n # builds for other versions, since all versions for a given\n # release share a build directory.\n # * Ignore builds above 50000, which are toy builds\n\n builds = [int(x) for x in os.listdir(self.ver_dir)\n if x.isdigit() and int(x) > self.last_bld and int(x) < 50000]\n builds.sort()\n\n # Check each build after last sanity-checked build\n bld_num = self.last_bld\n for build in builds:\n print (\"Checking build \" + str(build))\n if self.check_build(build):\n bld_num = build\n print(\"bld_num is now \" + str(bld_num))\n return bld_num", "def get_last_completed_build_number(jenkins_url, job_name):\n return execute_command(\n f\"wget -qO- {jenkins_url}/{job_name}/lastCompletedBuild/buildNumber\"\n )", "def build_id(self):\n if self.method == 'tagBuild':\n return self.params[1]", "def get_last_successful_build_nr(jenkins_url, job_name):\n return execute_command(\n f\"wget -qO- {jenkins_url}/{job_name}/lastSuccessfulBuild/buildNumber\"\n )", "def _GetLastOfficialBuildRevision():\n # First make sure the builder doesn't have any pending builds and is idle.\n builders = _QueryWaterfall('/builders')\n if builders[_SYZYGY_OFFICIAL]['pendingBuilds'] > 0:\n raise RuntimeError('There are pending official builds.')\n if builders[_SYZYGY_OFFICIAL]['state'] != 'idle':\n raise RuntimeError('An official build is in progress.')\n\n # Get the information from the last build and make sure it passed before\n # extracting the revision number.\n build = _QueryWaterfall('/builders/%s/builds/-1' %\n urllib.quote(_SYZYGY_OFFICIAL))\n if 'successful' not in build['text']:\n raise RuntimeError('Last official build failed.')\n return int(build['sourceStamp']['revision'])", "def GetBuildNumFromBuilder(build_reason, bot_name, builder_host, builder_port):\n # Gets the buildbot url for the given host and port.\n server_url = _GetBuildBotUrl(builder_host, builder_port)\n buildbot_url = BUILDER_JSON_URL % {'server_url': server_url,\n 'bot_name': bot_name,\n 'build_num': '_all'\n }\n builds_json = _FetchBuilderData(buildbot_url)\n if builds_json:\n builds_data = json.loads(builds_json)\n for current_build in builds_data:\n if builds_data[current_build].get('reason') == build_reason:\n return builds_data[current_build].get('number')\n return None", "def get_arm_build(self):\n return self.parent._build[1]", "def _get_next_build_sequence_id(self):\n self._build_sequence += 1\n return \"{:0>4}\".format(self._build_sequence)", "def current_buildfile(self):\r\n return self._active_buildfile", "def build_api_version(self):\n return self._build_api_version", "def build(self):\n return int(self.build_string)", "def getBuild():", "def jenkins_last_build_sha():\n job_url = os.getenv('JOB_URL')\n job_json_url = \"{0}/api/json\".format(job_url)\n response = urllib.urlopen(job_json_url)\n job_data = json.loads(response.read())\n\n last_completed_build_url = job_data['lastCompletedBuild']['url']\n last_complete_build_json_url = \"{0}/api/json\".format(last_completed_build_url)\n\n response = urllib.urlopen(last_complete_build_json_url)\n last_completed_build = json.loads(response.read())\n\n return last_completed_build[1]['lastBuiltRevision']['SHA1'] # needs testing", "def get_build_line(latest_build):\n proc = Popen([\"osg-koji\", \"buildinfo\", latest_build],\n stdout=PIPE)\n build_line = proc.stdout.readline().decode(\"latin-1\").strip()\n ret = proc.wait()\n if ret != 0 or not build_line:\n return\n return build_line", "def getPreviousBuild():", "def builder_version(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"builder_version\")", "def get_version():\n click.echo(get_current_version_number())", "def getCurrentBuilds():\n # again, we could probably provide an object for 'waiting' and\n # 'interlocked' too, but things like the Change list might still be\n # subject to change", "def GetBuildID(build_bot, date):\n day = '{day:02d}'.format(day=date%100)\n mon = MONTHS[date/100%100]\n date_string = mon + ' ' + day\n if build_bot in WATERFALL_BUILDERS:\n url = 'https://uberchromegw.corp.google.com/i/chromeos/' + \\\n 'builders/%s?numbuilds=200' % build_bot\n if build_bot in ROTATING_BUILDERS:\n url = 'https://uberchromegw.corp.google.com/i/chromiumos.tryserver/' + \\\n 'builders/%s?numbuilds=200' % build_bot\n command = 'sso_client %s' %url\n retval = 1\n retry_time = 3\n while retval and retry_time:\n retval, output, _ = \\\n command_executer.GetCommandExecuter().RunCommandWOutput(command, \\\n print_to_console=False)\n retry_time -= 1\n\n if retval:\n return []\n\n out = output.split('\\n')\n line_num = 0\n build_id = []\n # Parse the output like this\n # <td>Dec 14 10:55</td>\n # <td class=\"revision\">??</td>\n # <td failure</td><td><a href=\"../builders/gcc_toolchain/builds/109\">#109</a>\n while line_num < len(out):\n if date_string in out[line_num]:\n if line_num + 2 < len(out):\n build_num_line = out[line_num + 2]\n raw_num = re.findall(r'builds/\\d+', build_num_line)\n # raw_num is ['builds/109'] in the example.\n if raw_num:\n build_id.append(int(raw_num[0].split('/')[1]))\n line_num += 1\n return build_id", "def build_time(self):\n return self.nodes[0].get('infos').get('system_info').get('build_time')", "def GetLastBuildRevision(self):\n last_build_revision = None\n if os.path.exists(self.last_change_file):\n last_build_revision = int(open(self.last_change_file).read())\n\n if os.path.exists(self.revisions_path):\n fp = open(self.revisions_path)\n try:\n line = fp.readline()\n\n # TODO(markhuang): remove this block after all builders are updated\n line = line.replace('\\'', '\"')\n\n revisions_dict = simplejson.loads(line)\n if revisions_dict:\n self.last_chromium_revision = revisions_dict['chromium_revision']\n self.last_webkit_revision = revisions_dict['webkit_revision']\n self.last_v8_revision = revisions_dict['v8_revision']\n except (IOError, KeyError, ValueError), e:\n self.last_chromium_revision = None\n self.last_webkit_revision = None\n self.last_v8_revision = None\n print e\n fp.close()\n return last_build_revision", "def get_build(self, build_id):\n pass" ]
[ "0.7946986", "0.7492893", "0.7415122", "0.7415122", "0.74024177", "0.71346706", "0.7069816", "0.7063874", "0.6871903", "0.68270797", "0.67294055", "0.6716901", "0.6683429", "0.6650736", "0.66344464", "0.657459", "0.65717137", "0.6549233", "0.65007925", "0.64656496", "0.6458796", "0.6428225", "0.63482827", "0.62860763", "0.62566924", "0.62466997", "0.6221623", "0.61547714", "0.61375505", "0.6123258" ]
0.7554891
1
Function that return the description of the bot.
def get_description(self): return self.bot_data_file["description"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def description(ctx, bot: typing.Union[discord.Member, discord.User]):\n data = await make_request(\"https://www.motiondevelopment.top/api/v1.2/bots/\", bot.id)\n if not bot.bot:\n return await r(ctx, \"Not a bot.\")\n\n if len(data[\"Big_desc\"]) > 2000:\n desc = data[\"Big_desc\"][:2000] + \\\n f\"...\\n[View original page for full description](https://www.motiondevelopment.top/bots/{bot.id})\"\n else:\n desc = data[\"Big_desc\"]\n await em(ctx, embed=discord.Embed(color=0xfecdea, description=desc))", "def get_description(self) -> str:\n pass", "def description():", "def get_description(cls) -> str:\n return cls.__doc__ or \"\"", "def Description(self) -> str:", "def Description(self) -> str:", "def get_description(self):\n return self['command_name']", "def get_description(self):\n pass", "def description(self):\n return (self.__doc__ or \"\").strip()", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")" ]
[ "0.8035858", "0.7597421", "0.7409643", "0.7403121", "0.7396752", "0.7396752", "0.73865396", "0.7321154", "0.72624", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715", "0.7261715" ]
0.82401824
0
Function that return the current bot icon. This icon is used for embed messages
def get_bot_icon(self): return self.bot_data_file["bot_icon"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def icon(self):\n return self._sensor[CONF_ICON]", "def icon(self) -> str:\n return ICON_SERVER", "def get_icon(self):\n return self.ICON", "def icon(self):\n return self._config.get(CONF_ICON)", "def get_icon(self) -> Dict[str, Any]:\n player = self._last_sessionplayer\n assert player is not None\n return player.get_icon()", "def icon(self):\r\n try:\r\n return self.data['icon_url_base']+self.data['icon_url_name']\r\n except KeyError:\r\n return ''", "def icon(self):\n return self.ICON", "def icon(self):\n return self.ICON", "def icon(self):\n if \"icon\" in self._typeconf:\n return self._typeconf[\"icon\"]", "def get_icon(self):\r\n return get_icon(self.ICON)", "def icon(self) -> str:\n return self._icon", "def icon(self) -> str:\n return self._icon", "def icon(self) -> str:\n return self._icon", "async def icon(self, ctx: lifesaver.Context):\n if not ctx.guild.icon:\n await ctx.send(\"This server doesn't have a custom icon.\")\n return\n\n await ctx.send(ctx.guild.icon.replace(format=\"png\"))", "def get_icon(self):\n if self.verb == \"C\" or self.verb == \"A\" or self.verb == \"K\":\n return \"fa-comment\"\n\n elif self.verb == \"I\" or self.verb == \"U\" or self.verb == \"O\":\n return \"fa-users\"\n\n elif self.verb == \"L\":\n return \"fa-heart\"\n\n elif self.verb == \"F\":\n return \"fa-star\"\n\n elif self.verb == \"W\":\n return \"fa-check-circle\"\n\n elif self.verb == \"E\":\n return \"fa-pencil\"\n\n elif self.verb == \"V\":\n return \"fa-plus\"\n\n elif self.verb == \"S\":\n return \"fa-share-alt\"\n\n elif self.verb == \"R\":\n return \"fa-reply\"", "def get_icon(self):\n return self._icon", "def icon(self):\n return STATUSES.get(self._mower_status, {}).get('icon', DEFAULT_ICON)", "def icon(self):\n return self._metadata[2]", "def icon(self):\n return self.__icon", "def icon(self):\n ret_icon = self._icon\n if self.player_name == \"lower\":\n ret_icon = self._icon.lower()\n if self.is_promoted:\n ret_icon = \"+\" + ret_icon\n return ret_icon", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON" ]
[ "0.7684818", "0.7662258", "0.7628439", "0.75548655", "0.75522", "0.7551229", "0.7547751", "0.7547751", "0.75120354", "0.7508497", "0.75049603", "0.75049603", "0.75049603", "0.74850464", "0.7462734", "0.74546903", "0.74382097", "0.7438098", "0.7424576", "0.73911935", "0.73910886", "0.73910886", "0.73910886", "0.73910886", "0.73910886", "0.73910886", "0.73910886", "0.73910886", "0.73910886", "0.73910886" ]
0.87335
0
Function that return the current default status of the bot.
def get_default_status(self): return self.bot_data_file["bot_status"]["defaultStatus"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Status(self, default=None):\n return self.data.get('status', default)", "def defaultStatus(self):\n raise NotImplementedError", "def getDefaultStatus(session_key):\n\n # Get the list of statuses\n logger.debug(\"Getting the default status\")\n statuses_list = entity.getEntities(NotableEventUpdate.REVIEW_STATUSES_REST_URL,\n namespace=NotableEventUpdate.DEFAULT_NAMESPACE,\n owner=NotableEventUpdate.DEFAULT_OWNER,\n sessionKey=session_key,\n count=-1)\n\n # Get the first status defined a default (there should be only one)\n for status_id in statuses_list:\n\n # Get the status as a dictionary\n notable_status = statuses_list[status_id]\n\n # Get the disabled\n if 'disabled' in notable_status:\n disabled = splunk.util.normalizeBoolean(notable_status['disabled'])\n else:\n disabled = False\n\n # Get the default status\n if 'default' in notable_status:\n default = splunk.util.normalizeBoolean(notable_status['default'])\n else:\n default = False\n\n # If the status is both enabled and default then return it as the default\n if disabled is False and default:\n return status_id", "def defaultStatus(self, value=None):\n raise NotImplementedError", "def get_status(self):\n status = self._status.get_message()\n \n if status == \"N\":\n return \"offline\"\n \n elif status == \"Y\":\n return \"online\"\n \n elif status == \"A\":\n return \"away\"\n \n elif status == \"B\":\n return \"busy\"", "def get_status(self):\n # TODO retrieve from db if not set\n return self.status", "def status(self):\n return self.get(self._names[\"status\"])", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def __get_status(self):\n return random.choice(self.STATUS)", "def status(self) -> Optional[int]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"status\")" ]
[ "0.7572122", "0.7343083", "0.70287657", "0.6921298", "0.684734", "0.67186576", "0.666823", "0.6652601", "0.6652601", "0.6652601", "0.65798765", "0.65798765", "0.65798765", "0.65798765", "0.65798765", "0.65798765", "0.65798765", "0.65798765", "0.65798765", "0.654335", "0.6537283", "0.651919", "0.651919", "0.651919", "0.651919", "0.651919", "0.651919", "0.651919", "0.651919", "0.651919" ]
0.90379596
0
Function that return the current command prefix of the bot.
def get_command_prefix(self): return self.bot_data_file["commands_prefix"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmdprefix(self) -> str:\n return self.config[\"Core\"].get(\"CmdPrefix\", \"!\")", "async def get_command_prefix(self, guild: Guild) -> str:\n if (prefix := self._cache[guild.id].prefix) is None:\n db_guild = await self.find_by_id(guild.id)\n\n if db_guild is None:\n await self.create_from_gateway_response(guild)\n db_guild = await self.find_by_id(guild.id)\n\n self._cache[guild.id].prefix = prefix = db_guild[\"command_prefix\"]\n\n return prefix", "def command_with_prefix(self):\n return self.endpoint_prefix.rstrip('/') + self.command", "async def get_prefix(bot: Bot, message: Message):\n return appearance.get_prefix(guild_id=message.guild.id)", "async def get_prefix(_bot, message):\n if isinstance(message.channel, discord.DMChannel):\n return _bot.config.dm_prefix\n\n if message.author == _bot.owner:\n return _bot.config.owner_prefix\n\n prefix_for_this_guild = await _bot.db.fetchrow(\n \"\"\"\n SELECT prefix\n FROM guilds\n WHERE id=$1\n \"\"\",\n message.guild.id,\n )\n\n if prefix_for_this_guild is None:\n await _bot.db.execute(\n \"\"\"\n INSERT INTO guilds (id, prefix)\n VALUES ($1, $2)\n \"\"\",\n message.guild.id,\n \",\",\n )\n prefix_for_this_guild = {\"prefix\": _bot.config.default_prefix}\n\n prefix_return = str(prefix_for_this_guild[\"prefix\"])\n return commands.when_mentioned_or(prefix_return)(_bot, message)", "async def prefix_process(bot, msg: discord.Message):\n if msg.guild:\n g_prefix = await bot.sql.get_guild_prefix(bot.db, guildID=msg.guild.id)\n if g_prefix:\n return g_prefix, bot.default_prefix\n return bot.default_prefix", "def get_prefix(self):\n return self.prefix", "def get_prefix(self):\n return self._prefix", "def get_prefix(self):\n return self._prefix", "def get_prefix(bot, message):\n\n # Add prefixes that you want the bot to respond to\n prefixes = ['?', '!']\n\n # Check to see if we are outside of a guild. e.g DM's etc.\n if not message.guild:\n # Only allow ? to be used in DMs\n return '?'\n\n # If we are in a guild, we allow for the user to mention us or use any of the prefixes in our list.\n return commands.when_mentioned_or(*prefixes)(bot, message)", "async def get_prefix(client, message):\n conf1 = json.load(open(\"json/serverconfig.json\", 'r'))\n guild = message.guild\n if guild:\n return conf1[str(guild.id)][\"prefix\"]\n else:\n return get_default_prefix()", "def prefix(self):\n return self[\"prefix\"]", "def prefix(self):\n return self[\"prefix\"]", "def rel_command(self):\n return self.command.lstrip('/')", "def getPrefix(self):\n return( self.id.split('.')[0] )", "def prefix(self):\n return self._prefix", "def prefix(self):\n return self._prefix", "def prefix(self):\n return self._prefix", "def getPrefix(self):\n return _libsbml.SBasePlugin_getPrefix(self)", "def connection_prefix(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"connection_prefix\")", "def connection_string_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"connection_string_prefix\")", "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "async def prefix(self, _bot, message: discord.Message):\n mention = [self.user.mention + ' ', f'<@!{self.user.id}> ']\n additional_prefixes = await self.get_prefixes(message.guild)\n return self.cfg['bot']['prefixes'] + mention + additional_prefixes", "def getPrefix(self):\n return _libsbml.ASTBasePlugin_getPrefix(self)", "async def fetch_prefix(self, bot: 'ContestBot', message: discord.Message):\n user_id = bot.user.id\n base = [f'<@!{user_id}> ', f'<@{user_id}> ']\n\n if message.guild:\n with self.get_session() as session:\n guild: Guild = session.query(Guild).get(message.guild.id)\n base.append(guild.prefix)\n return base", "def getPrefix(self):\n return _libsbml.SBase_getPrefix(self)", "def getPrefix(self):\n return _libsbml.MultiASTPlugin_getPrefix(self)", "def prefix(self):\n return self._path_prefix", "def prefix(self):\n return self._get_storage().prefix" ]
[ "0.793544", "0.7813612", "0.74872243", "0.72662723", "0.7263468", "0.7129918", "0.6971227", "0.6871771", "0.6871771", "0.6865197", "0.6864055", "0.67791307", "0.67791307", "0.67293286", "0.6695514", "0.66427916", "0.66427916", "0.66427916", "0.6555726", "0.65498084", "0.65389127", "0.6537446", "0.6537446", "0.653329", "0.6491352", "0.6489772", "0.6414889", "0.63773185", "0.63627744", "0.6359829" ]
0.8556621
0
Function that return the bot source code link.
def get_open_source_link(self): return self.bot_data_file["open_source_link"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _botsource(self, ctx):\r\n source_link = \"https://github.com/Simalary/SimsVIP.Servo\"\r\n await self.bot.say('{0.message.author.mention}, my source code is available at <{1}>.'.format(ctx, source_link))", "async def source(self, context):\n await context.channel.send(\"https://github.com/balfroim/TengriBOT\")", "async def source(self, ctx, *, command: str = None):\n\n source_url = 'https://github.com/Discord-Bots-Italia/public-bot-py'\n branch = 'master'\n\n if command is None:\n return await ctx.send(source_url)\n\n else:\n obj = self.bot.get_command(command.replace('.', ' '))\n if obj is None:\n return await ctx.send('Could not find command.')\n\n # since we found the command we're looking for, presumably anyway, let's\n # try to access the code itself\n src = obj.callback.__code__\n module = obj.callback.__module__\n filename = src.co_filename\n\n lines, firstlineno = inspect.getsourcelines(src)\n location = os.path.relpath(filename).replace('\\\\', '/')\n\n final_url = f'<{source_url}/blob/{branch}/{location}#L{firstlineno}-L{firstlineno + len(lines) - 1}>'\n await ctx.send(final_url)", "async def source(self, ctx):\n \"\"\" Check out my source code <3 \"\"\"\n # Do not remove this command, this has to stay due to the GitHub LICENSE.\n # TL:DR, you have to disclose source according to MIT.\n # Reference: https://github.com/AlexFlipnote/discord_bot.py/blob/master/LICENSE\n await ctx.send(f\"**{ctx.bot.user}** is powered by this source code:\\nhttps://github.com/AlexFlipnote/discord_bot.py With modifications by user: snow-blade\")", "async def source(ctx):\n await ctx.send(\"The source can be found here: \" +\n \"https://github.com/FrederikBolding/CryptoBot\")", "def getBuildbotURL():", "async def source(self, ctx, command: str = None):\n\n source_url = \"https://github.com/Zeniath/Non-Don-Tools\"\n if command is None:\n return await ctx.send(source_url)\n \n\n obj = self.bot.get_command(command.replace('.', ' '))\n if obj is None:\n return await ctx.send('Could not find command.')\n\n src = obj.callback.__code__\n lines, firstlineno = inspect.getsourcelines(src)\n if not obj.callback.__module__.startswith('discord'):\n location = os.path.relpath(src.co_filename).replace('\\\\', '/')\n else:\n location = obj.callback.__module__.replace('.', '/') + '.py'\n source_url = \"https://github.com/Zeniath/Non-Don-Tools\"\n\n await ctx.send(f\"<{source_url}/tree/master/{location}/#L{firstlineno}-L{firstlineno + len(lines) - 1}>\")", "async def source(self, ctx):\n await ctx.send(f\"**{ctx.bot.user}** is powered by this source code:\\nhttps://github.com/SHI3DO/Keter\")", "async def source(self, ctx: Context, *, command: commands.Command = None):\n if command is None:\n return await ctx.send(BOT_CONFIG.REPOSITORY_URL) # type: ignore\n\n if command.name == 'help':\n code = type(self.bot.help_command)\n\n else:\n code = command.callback.__code__\n\n filename: str = inspect.getsourcefile(code) # type: ignore\n filename = str(pathlib.Path(filename).relative_to(pathlib.Path.cwd())).replace('\\\\', '/')\n\n lines, first_line = inspect.getsourcelines(code)\n\n last_line = first_line + len(lines) - 1\n await ctx.send(f'{BOT_CONFIG.REPOSITORY_URL}/blob/master/{filename}#L{first_line}-#L{last_line}') # type: ignore", "def source(self, irc, msg, args):\n irc.reply('My source is at http://supybot.com/')", "async def link(ctx, bot: typing.Union[discord.Member, discord.User]):\n if not bot.bot:\n return await r(ctx, \"Not a bot.\")\n await r(ctx, f'<https://www.motiondevelopment.top/bots/{bot.id}>')", "def getLink(self):", "async def source(ctx, command: Option(str, \"The command to view the source code for\", required=False)):\n source_url = 'https://github.com/Pycord-Development/robocord'\n branch = 'main'\n view = discord.ui.View()\n if command is None:\n url = source_url\n label = \"Source code for entire bot\"\n else:\n command_split = command.split()\n index = 0\n obj = discord.utils.get(bot.application_commands.values(), name=command_split[index])\n while isinstance(obj, SlashCommandGroup):\n if index + 1 > len(command_split):\n return await ctx.respond(\"Error: Command is a group. You must choose a subcommand from it.\")\n obj = discord.utils.get(obj.subcommands, name=command_split[index])\n if not isinstance(obj, SlashCommand):\n return await ctx.respond(\"Error: Command could not be found\")\n # noinspection PyUnresolvedReferences\n src = obj.callback.__code__\n filename = src.co_filename\n lines, firstlineno = inspect.getsourcelines(src)\n location = os.path.relpath(filename).replace('\\\\', '/')\n\n url = f'{source_url}/blob/{branch}/{location}#L{firstlineno}-L{firstlineno + len(lines) - 1}'\n content = await discord.ext.commands.clean_content(escape_markdown=True).convert(ctx, command)\n label = f'Source code for command \"{content}\"'\n view.add_item(discord.ui.Button(label=\"View Code\", url=url))\n await ctx.respond(label, view=view)", "def linkcode_resolve(domain, info):\n if domain != 'py' or not info['module']:\n return None\n filename = info['module'].replace('.', '/')\n return \"https://github.com/mathcamp/flywheel/blob/%s/%s.py\" % (version_data['ref'], filename)", "def get_buildbot_url():\n return \"http://10.45.4.98:8001/\"", "def repo_link(repo):\n return \"https://github.com/\" + repo", "def get_url(self):\r\n if self.mod.filename:\r\n return self.mod.service.get_mirror() + self.mod.filename", "def getLink(self):\n return self.link", "def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url", "def trig_code(self, bot, source, target, trigger, argument):\n\t\treturn \"Hello, I'm a pyirkbot based on pynik. My code https://github.com/blueCommand/pyirkbot For feature requests use https://github.com/blueCommand/pyirkbot/issues beer is good also\"", "def _get_source(link):\n if link.startswith(\"http://\") or link.startswith(\"https://\"):\n down = httpkie.Downloader()\n return down.download(link)\n\n if os.path.exists(link):\n with open(link) as f:\n return f.read()\n\n raise UserWarning(\"html: '%s' is neither URL or data!\" % link)", "def getSource():", "def linktrail(self, code: str) -> str:\n site = pywikibot.Site(code, 'wikipedia')\n return site.linktrail()", "def source_code(self):\n return str(self.source)", "def get_raw(link: str) -> str:\n\n link = link.strip(\"<>/\") # Allow for no-embed links\n\n authorized = (\n \"https://hastebin.com\",\n \"https://gist.github.com\",\n \"https://gist.githubusercontent.com\",\n )\n\n if not any([link.startswith(url) for url in authorized]):\n raise BadArgument(message=f\"I only accept links from {', '.join(authorized)}. (Starting with 'https').\")\n\n domain = link.split(\"/\")[2]\n\n if domain == \"hastebin.com\":\n if \"/raw/\" in link:\n return link\n token = link.split(\"/\")[-1]\n if \".\" in token:\n token = token[: token.rfind(\".\")] # removes extension\n return f\"https://hastebin.com/raw/{token}\"\n else:\n # Github uses redirection so raw -> usercontent and no raw -> normal\n # We still need to ensure we get a raw version after this potential redirection\n if \"/raw\" in link:\n return link\n return link + \"/raw\"", "def theLinky(self):\n theLink = self.absolute_url()\n return theLink", "def getSource(self):\n return urllib2.urlopen(Parser.SOURCE_URL)", "def link(self):\n return f\"https://{DOMAIN}/invite/{self.code}\"", "async def source(\n self, ctx: Context, *, source_item: SourceConverter = None\n ) -> None:\n if source_item is None:\n embed = discord.Embed(\n title=\"Magoji's Github Repository\",\n description=f\"[Here's the github link!]({GITHUB_REPO_URL})\",\n colour=0x87CEEB,\n )\n await ctx.send(embed=embed)\n return\n embed = self.build_embed(source_item)\n await ctx.send(embed=embed)", "def getSourceURL(self):\n return self.SourceURL" ]
[ "0.7624818", "0.6937835", "0.6835078", "0.6830835", "0.68156576", "0.678243", "0.65339357", "0.64714015", "0.63910526", "0.63313943", "0.6330082", "0.6192807", "0.60986626", "0.603519", "0.5984332", "0.5980589", "0.59727496", "0.5923028", "0.5890302", "0.5885634", "0.5849865", "0.58480793", "0.5817544", "0.5805717", "0.58046097", "0.5766712", "0.5758632", "0.57407403", "0.57310265", "0.57246786" ]
0.76383734
0
Function that return the bot private chat alert message
def get_private_chat_alert(self): return self.bot_data_file["private_chat_alert"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMessage():\n return message", "def do_something(incoming_msg):\n return \"i did what you said - {}\".format(incoming_msg.text)", "async def chat_message(self, event):\n await self.send_json(\n return_value(\n ACTION_MESSAGE,\n event['label'],\n event['username'],\n MSG_MESSAGE,\n event['message']\n )\n )", "def get_message(self):\n return self.msg", "async def chat_message(self, event):\n if self.user and not self.user.is_authenticated:\n return\n\n user_id = event['user_id']\n message = event['message']\n created_at = event['created_at']\n publisher_full_name = event['publisher_full_name']\n\n await self.send(text_data=json.dumps({\n 'user_id': user_id,\n 'created_at': created_at,\n 'message': \"{}\".format(message),\n 'publisher_full_name': publisher_full_name,\n }))", "def getMessage() -> str:\n pass", "def __sendMessage(self):\n # TODO: Switch to this when implemented\n \n msg = self.ui.inputWidget.toPlainText()\n self.ui.inputWidget.clear()\n strv = StringView()\n strv.appendText(unicode(msg))\n self._amsn_conversation.sendMessage(strv)\n self.ui.textEdit.append(\"<b>/me says:</b><br>\"+unicode(msg)+\"\")", "def hello_monkey():\n\n resp = MessagingResponse().message(\"Hey hey you logged a brag! Nice!!\")\n return str(resp)", "async def chat_message(self, event):\n\n print(\"PublicChatConsumer\", \"chat_message from user\", event[\"user_id\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_MESSAGE,\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"user_id\": event[\"user_id\"],\n \"message\": event[\"message\"],\n \"natural_timestamp\": humanize_or_normal(timezone.now())\n })", "def get_message(self, user):\n return None", "def chat(sock, chan, msg, excuse=False):\n if chan in cfg.ACCEPTED or excuse:\n sock.send(\"PRIVMSG {} :{}\\r\\n\".format(chan, msg).encode(\"utf-8\"))\n console.info(\"TWITCH : {:<11} - {:<10}: {}\".format(chan[:11], \"RyuoBot\", msg))\n else:\n console.error(\"TWITCH : {:<11} - {:<10}: {} - RyuoBot is not allowed to type in {}!\".format(chan[:11], \"RyuoBot\", msg, chan))", "def showmessage(self):\n return self.message", "def showmessage(self):\n return self.message", "def privmsg(self, user, channel, msg):\n user = user.split('!', 1)[0]\n print user, \"PRIVMSG\", channel, msg\n if \"@\" not in user and \".\" in user:\n # Server message\n return\n if channel == 'AUTH':\n # server message during connect\n return\n target = channel # where to send reply\n reply = None\n if target == self.nickname:\n target = user\n reply = 'sorry, I\\'m just a bot'\n else:\n # see if this message should get a response\n reply = tinyurl.tiny(user, channel, msg)\n if reply is not None: \n # get 1st line only, if we have multi-line response\n while \"\\n\" in reply or \"\\r\" in reply:\n reply=reply.split(\"\\r\",1)[0]\n reply=reply.split(\"\\n\",1)[0]\n if reply is not None: \n print \"PRIVMSG reply\", target, reply\n for line in reply.split(\"\\n\"):\n # channel messages will already be set to single line (above),\n # so we'll only send multiline responses to a user\n self.msg(target, line)", "def alert_message(self):\r\n alerts = self.q(css=\"div.open-ended-alert\").text\r\n\r\n if len(alerts) < 1:\r\n return \"\"\r\n else:\r\n return alerts[0]", "def on_bot_message():\n handle_bot_message(request.get_json())\n return \"ok\"", "def send_alert(token, chatid, message):\n\n try:\n bot = Bot(token=token)\n bot.send_message(chat_id=chatid, parse_mode='HTML', text=message)\n logging.info('Alert sent successfully via Telegram.')\n except TelegramError as tg_err:\n logging.error('Unable to send alert, Telegram exception: %s', tg_err)", "def message(self, msg):\n if msg['type'] in ('chat', 'normal'):\n msg.reply(\"Thanks for sending\\n%(body)s\" % msg).send()", "def echo(params):\n return params['message']", "async def on_chat_message(self, chat_message):\n pass", "def display_message():", "def beware_msg(msg):\n print(\"\\n\\n\\n************************************************************\")\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\\n\\n\\n\")\n print(msg)\n print(\"\\n\\n\\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n print(\"************************************************************\\n\\n\\n\")", "def flash_msg(self, params):\n if params.has_key('receiver'): name = params['receiver']\n else: \n if self.participant: \n group = self.service.groupOfParticipant(self.participant)\n if group: \n member_avail = filter(lambda x:x.status == LISTEN and x.name != self.name,group.members)\n if member_avail:\n member = member_avail.pop()\n name = member.name\n else:\n self.notLoggedIn()\n return\n if params.has_key('text'): text = params['text']\n else: return\n\n logger.writeLog(\"%s@%s said:'%s'\" % (self.name,self.transport.hostname,text))\n \n if self.participant:\n msgMethod = self.participant.directMessage\n try:\n self.service.sendParticipants(self.name,\"botmsg\",{\"text\":text,\"sender\":self.name})\n msgMethod(name,text)\n except:\n self.receiveDirectCommand(\"msg\",{\"sender\":\"MsgServ\",\"text\":\"cant send text, probably there is no user to listen\"})\n else:\n self.notLoggedIn()", "def handle_chat(data: bytes) -> Tuple[bytes, str]:\n length = struct.unpack('H', data[:2])[0]\n message = data[2:2+length].decode(helpers.ENCODING)\n return data[2+length:], f'Sent message: \"{message}\"'", "def chat(self, p: str):\n message = p[1:]\n if message.strip() != '':\n message: str = f\"{self.username} says: {message[:80]}\"\n self.broadcast(packet.construct_log_packet(message), include_self=True)\n self.logger.log(message)", "def telebot():\n payload = json.loads(request.data)\n message = payload.get('message', payload.get('edited_message',''))\n msg_from = message.get('from')\n user_id = msg_from.get('id')\n user_first_name = msg_from.get('first_name','')\n user_last_name = msg_from.get('last_name','')\n user_is_bot = msg_from.get('is_bot')\n chat = message.get('chat')\n chat_id = chat.get('id')\n command = message.get('text')\n \n if user_is_bot or message == '':\n return jsonify({'method': 'sendMessage','chat_id' : chat_id,'text': 'Sorry I can\\'t answer you!'})\n \n bot_response = {\n 'method': 'sendMessage',\n 'chat_id' : chat_id,\n 'text': f'[{user_first_name} {user_last_name}](tg://user?id={user_id}) {command}',\n 'parse_mode':'Markdown',\n }\n\n return jsonify(bot_response)", "def echo(self, msg=None):\n return msg", "def SICAlert_bot(text):\r\n # Bot info\r\n bot = telegram.Bot(token=SICTOKEN)\r\n # print(bot.get_me())\r\n\r\n # sends messages to chat\r\n # Chat_id - https://web.telegram.org/#/im?p=c\"1371960899\"_2218953917755105830\r\n bot.send_message(chat_id=SICAlertChatID, text=text)", "def horde_message(self, message):", "def on_chat(self, event, text):\n return None" ]
[ "0.6508822", "0.62238526", "0.6108728", "0.60552496", "0.6001818", "0.6000742", "0.5984514", "0.59744394", "0.5974095", "0.59739006", "0.59720236", "0.5966712", "0.5966712", "0.5936854", "0.59354985", "0.59330904", "0.5886", "0.5866715", "0.5865388", "0.5853917", "0.58382535", "0.5830164", "0.5829517", "0.58109564", "0.58101267", "0.5807647", "0.5802447", "0.57918984", "0.57870793", "0.5780456" ]
0.77665764
0
Function that return the max_cleverbot_requests.
def get_max_cleverbot_requests(self): return int(self.bot_data_file["maxCleverbotRequests"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_count(self):\n return self.config.get('max_count', 500)", "def limit_num_clients(self):\n return self._limit_num_clients", "def get_max_readings( self ):\n return 2500", "def maxclients(self) -> Optional[int]:\n return pulumi.get(self, \"maxclients\")", "def max_findings_per_request(self) -> float:\n return pulumi.get(self, \"max_findings_per_request\")", "def max_mireds(self):\n return 333", "def max_trials(self) -> int:\n return self._max_trials", "def max_epochs(self):\n return self.trainer_cfg[\"max_num_epochs\"]", "def maximum_number_of_workers(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"maximum_number_of_workers\")", "def max_request_length(self) -> int:\n return self.max_sequence_length", "def max_num_links(self):\n return self._max_num_links", "def get_max_iters():\n return 2000", "def maxRetransmits(self) -> Optional[int]:\n return self.__parameters.maxRetransmits", "def _determine_limit(self, limit):\n\n # Note: +1 is allowed here because it allows\n # the user to fetch one beyond to see if they\n # are at the end of the list\n if not limit:\n res = conf.api_configuration.max_returned_num + 1\n else:\n res = min(conf.api_configuration.max_returned_num + 1, limit)\n\n return res", "def max_num_batches(self):\n return self._max_num_batches", "def maximum_number_of_workers(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"maximum_number_of_workers\")", "def get_request_limit(self, access_token):\n url = \"{0}/rate_limit?access_token={1}\"\n response = requests.get(url.format(self.ROOT_API_URL, access_token))\n data = response.json()\n return data['resources']['core'].get(\"remaining\")", "def max_waiting(self):\n return self._max_waiting", "def find_max_guesses():\n print(\"You'll get 5 guesses per problem!\")\n return 5", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def max_epochs(self):\n return self.trainer_cfg[\"max_num_epochs\"] + self.trainer_cfg[\"swa_epochs\"]", "def adaptive_limit(self) -> int:\n return pulumi.get(self, \"adaptive_limit\")", "def remaining_requests(self):\n try:\n return self._get_limit('Remaining')\n except ValueError:\n logging.error(\n \"Unable to gather limit statistics until log() has been called. Returning -1\")\n return -1", "def get_limit(self):\n return self.limit", "def max_creds(self) -> int:\n return self._max_creds", "def max_sequence_length(self) -> int:\n return self._max_request_length", "def max(self) -> int:\n return self._status['party_size'][1]", "def get_limit(self):\n return self._limit", "def get_limit(self):\n return self._limit", "def max_retries(self) -> ConfigNodePropertyInteger:\n return self._max_retries" ]
[ "0.71495795", "0.67273325", "0.6720516", "0.6696665", "0.66402334", "0.6631142", "0.6547441", "0.6515297", "0.65100735", "0.6495988", "0.6492308", "0.64291495", "0.64208835", "0.64208025", "0.64189225", "0.63931036", "0.63668925", "0.63549143", "0.6311809", "0.63114387", "0.62998766", "0.62760216", "0.6273074", "0.6268755", "0.62378746", "0.6234806", "0.62160814", "0.6211906", "0.6211906", "0.6211559" ]
0.9331741
0
Function that return the url where to write the status.
def get_server_write_status_url(self): write_url: str = self.bot_data_file["bot_status"]["server_state_saving"]["writeStateUrl"] print("Api:" + self.empty_api_key) print("Url:" + self.empty_url) if self.get_bot_save_state_to_server() and write_url.startswith(self.empty_url): print( "save_state_to_server IS TRUE BUT STATUS WRITE URL STARTS WITH 'http://URL/' SO IS NOT VALID - ABORTING") quit(1) return write_url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_absolute_url(self):\n return \"/status/%i/\" % self.id", "def status_url(self, username, id):\n return urllib.parse.urljoin(self.instance, f'/p/{urllib.parse.quote(username)}/{id}')", "def Url(self) -> str:", "def url(self):\n if not os.path.exists(self.path):\n self.save()\n return self.uset.url(os.path.join(self.folder, self.get_filename()))", "def _create_request_url():\n url = 'http'\n if _config['save']:\n url += 's'\n url += '://{}:{}/move'.format(_config['ip'], _config['port'])\n return url", "def get_success_url(self):\n return reverse('logs-jobs')", "def get_success_url(self):\n return reverse('logs-jobs')", "def get_status_callback_url(self):\n return [obj for obj in self._request_uri(\"status_callback_url\")]", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def svn_info_t_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_success_url(self):\n return \"/\"", "def GetURL(self, rel_url):\n return 'http://localhost:%d/%s' % (self.port, rel_url)", "def url (self):\n return Links.createURL('/')", "def get_server_read_status_url(self):\n read_url: str = self.bot_data_file[\"bot_status\"][\"server_state_saving\"][\"readStateUrl\"]\n if self.get_bot_save_state_to_server() and read_url.startswith(self.empty_url):\n print(\n \"save_state_to_server IS TRUE BUT STATUS READ URL STARTS WITH 'http://URL/' SO IS NOT VALID - ABORTING\")\n quit(1)\n return read_url", "def tracking_url(self) -> str:\n return pulumi.get(self, \"tracking_url\")", "def url_health():\n return \"OK\"", "def pipeline_status_path(self):\n return '/_ah/pipeline/status?root=%s&auto=false' % self.root_pipeline_id", "def status():\n _request('worklog/status/')", "def url(self):\n if not self.fid:\n raise exceptions.NotCreatedError(object=self)\n\n return self._file_url(self.fid)", "def direct_url(self):\n #return '%s/getDownloadableFile' % self.absolute_url()\n return self.context.absolute_url()", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def get_monitoring_url(self):\n return \"http://{0}:{1}\".format(self.get_head_node_ip(), self.MONITOR_PORT)", "def uri(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uri\")" ]
[ "0.72626585", "0.6896938", "0.6601351", "0.63740164", "0.6253406", "0.61851", "0.61851", "0.6134751", "0.6113722", "0.6113722", "0.6113722", "0.6113722", "0.6113722", "0.6113722", "0.6113722", "0.60878825", "0.6071305", "0.6063158", "0.6057401", "0.6029894", "0.60195893", "0.60114306", "0.6001483", "0.59949774", "0.59847313", "0.59801394", "0.5974853", "0.5974853", "0.59725153", "0.5946897" ]
0.7271589
0
Function that return the url where to read the status.
def get_server_read_status_url(self): read_url: str = self.bot_data_file["bot_status"]["server_state_saving"]["readStateUrl"] if self.get_bot_save_state_to_server() and read_url.startswith(self.empty_url): print( "save_state_to_server IS TRUE BUT STATUS READ URL STARTS WITH 'http://URL/' SO IS NOT VALID - ABORTING") quit(1) return read_url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_absolute_url(self):\n return \"/status/%i/\" % self.id", "def get_status_callback_url(self):\n return [obj for obj in self._request_uri(\"status_callback_url\")]", "def url(self):\n _, body = self.request('/v1.1/url', 'GET')\n return body.get('url', None)", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def Url(self) -> str:", "def geturl(self):\n return self.__url", "def status_url(self, username, id):\n return urllib.parse.urljoin(self.instance, f'/p/{urllib.parse.quote(username)}/{id}')", "def findLink(status):\n link = False\n try:\n match2 = re.findall(r'bit.ly[\\w./:0-9]*', status)\n if match2:\n link = match2[0]\n #Find full urls\n match = re.findall(r'http[\\w./:0-9]*', status)\n if match:\n link = match[0]\n resp = urllib.urlopen(link)\n if resp.url:\n link = resp.url\n else:\n link = False\n except:\n link = False\n return link", "def get_uri(self):\n return self.url", "def get_url(self):\n return self.url", "def get_url(self):\n return self.url", "def get_url(self):\n return self.resource.url", "def geturl(self) -> str:\n\n req = request.Request(url=self._url, headers=self._headers)\n with request.urlopen(req) as f:\n return f.read().decode('utf-8', 'ignore')", "def get_server_write_status_url(self):\n write_url: str = self.bot_data_file[\"bot_status\"][\"server_state_saving\"][\"writeStateUrl\"]\n print(\"Api:\" + self.empty_api_key)\n print(\"Url:\" + self.empty_url)\n if self.get_bot_save_state_to_server() and write_url.startswith(self.empty_url):\n print(\n \"save_state_to_server IS TRUE BUT STATUS WRITE URL STARTS WITH 'http://URL/' SO IS NOT VALID - ABORTING\")\n quit(1)\n return write_url", "def url(self):\n return self._client.url", "def uri(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uri\")", "def url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"url\")", "def get_url(self):\n\n return self.url", "def get_url(self):\n\n return self.url", "def url(self) -> str:\n return self._request.url.path", "def get_info_url(self):\n return self.get_info(\"URL\")" ]
[ "0.75997925", "0.7049809", "0.7032578", "0.6967554", "0.6967554", "0.6900867", "0.6900867", "0.6900867", "0.6900867", "0.6900867", "0.6900867", "0.6900867", "0.6876656", "0.6832015", "0.6823533", "0.68136656", "0.6778891", "0.6762563", "0.6762563", "0.67617697", "0.67424023", "0.67075324", "0.6687732", "0.6687246", "0.66646063", "0.66646063", "0.66623926", "0.66623926", "0.66616887", "0.6656003" ]
0.7110662
1
Function that return the username for the meme generator
def get_meme_generator_username(self): key = self.bot_data_file["meme_generator"]["username"] if self.check_empty_key(key): return key else: print("ERROR GETTING THE MEME USERNAME (register on https://api.imgflip.com/) - BOT ABORTING") quit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def username(self) -> str:", "def username(self) -> str:", "def generateUsername(self):\n retval= \"{0}.{1}\".format( self.first_name.split()[0].lower(),\n self.last_name.split()[-1].lower() )\n \n return toAscii(retval)", "def generate_username():\n return ''.join(choice(ascii_letters + digits) for _ in range(15))", "def GetUsername(self):\n pass", "def get_free_username():\n\t\n\t# uuid4 generates a GUID, which with high probability (close\n\t# to 1.0) has never been seen before (anywhere in the world)\n\treturn \"user\" + str(uuid4()).replace(\"-\", \"\")", "def username(self):\n return self._username()", "def get_username(self):\r\n raise NotImplementedError", "def random_username():\n return str(uuid.uuid4().hex.upper())", "def username(self) -> undefined.UndefinedOr[str]:", "def computer_username():\r\n\t#possible username the computer can have\r\n\tusernames = ['bella_123','$lade(99)','BADOO_0!','V1rus**',\t\t\r\n\t\t'Gh0stO_O', '1ce_man','MoneyBa9$','1ucy=_=', 'F1ash~_~',\r\n\t\t'<an9el>','-NeGaT1Ve-', '__M4dCat__','|Re$pEcT0|','-D1ggerR-',\r\n\t\t'k^T3st','n1ce!™']\r\n\trandom.SystemRandom().shuffle(usernames)\t\r\n\tselect_username = ''.join(random.sample(usernames, 1))\t#select a random username\r\n\treturn select_username", "def username(self) -> str:\n raise NotImplementedError", "def get_username(problem_name, instance_number):\n\n return \"{}_{}\".format(sanitize_name(problem_name), instance_number)", "def name(self):\n name = self.__telegram_info.message.from_user.name\n return name[0].upper() + name[1::]", "def get_name(username):\n print(\"We halo \" + username + \" , piye kabare?\")", "def username(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"username\")", "def account_name_generator():\n return 'jdoe-' + str(uuid()).lower()[:16]", "def get_username(self):\n raise NotImplementedError('get_username')", "def get_username(self):\r\n return self.username", "def generate_username(size=10, chars=string.ascii_lowercase + string.digits):\n suffix = gen_random_string(size, chars)\n return 'k8s-console-temp-user-' + suffix", "def user_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_name\")", "def usernameFind(self):\r\n return self.username()", "def get_name(self):\n return self.user.username if self.user.username else self.user.email", "def get_username(self):\n return self.username", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")" ]
[ "0.7847496", "0.7847496", "0.7331012", "0.72546655", "0.723931", "0.7221629", "0.71840674", "0.71298873", "0.71025926", "0.7083926", "0.70373", "0.70328695", "0.70257235", "0.7025711", "0.69972277", "0.69816643", "0.6980639", "0.69769245", "0.6962517", "0.692402", "0.6922609", "0.68831027", "0.68485785", "0.6833536", "0.68332666", "0.68332666", "0.68332666", "0.68148893", "0.68148893", "0.68148893" ]
0.8672658
0
Function that return the password for the meme generator
def get_meme_generator_password(self): key = self.bot_data_file["meme_generator"]["password"] if self.check_empty_key(key): return key else: print("ERROR GETTING THE MEME PASSWORD (register on https://api.imgflip.com/) - BOT ABORTING") quit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def passwordGen() :\n\treturn __randomString(12)", "def password(self) -> str:", "def generate_pw(self):\n\n chunks = []\n for chunk_no in range(self.CHUNKS):\n if chunk_no < self.chunk:\n chunks.append(self.verified_chunks[chunk_no])\n elif chunk_no == self.chunk:\n chunks.append(str(self.counter).zfill(self.PASSWORD_LENGTH /\n self.CHUNKS))\n else:\n chunks.append(\"000\")\n\n return \"\".join(chunks)", "async def password_generate_complex(self, ctx):\n await ctx.send(\n \"\".join(\n random.choice(string.ascii_letters[:94]) for i in range(random.randint(20, 35))\n )\n )", "def password_generator(password_lenght):\r\n password = \"\"\r\n\r\n try:\r\n if password_lenght >=1:\r\n for i in range(password_lenght):\r\n choice = random.choice(symbols)\r\n password += str(choice)\r\n print(f\"Your password is: {password} \\nTnank you!\")\r\n return password\r\n else:\r\n return 0\r\n except Exception:\r\n pass", "def passwordGen(self):\n password = ''\n while len(password) < self.length:\n ls = []\n if self.numeric: ls.append(random.choice(list(string.digits)))\n if self.lower : ls.append(random.choice(list(string.ascii_lowercase)))\n if self.upper : ls.append(random.choice(list(string.ascii_uppercase)))\n if self.symbol : ls.append(random.choice(list(string.punctuation)))\n if not ls: sys.exit(0)\n random.shuffle(ls)\n if self.length - len(password) > len(ls):\n password += ''.join(ls) \n else:\n password += ''.join(ls[:self.length - len(password)])\n\n return password", "def giveReadablePassword():\n import random\n words = [\n 'Alpha',\n 'Bravo',\n 'Charlie',\n 'Delta',\n 'Echo',\n 'Foxtrot',\n 'Golf',\n 'Hotel',\n 'India',\n 'Juliet',\n 'Kilo',\n 'Lima',\n 'Mike',\n 'November',\n 'Oscar',\n 'Papa',\n 'Quebec',\n 'Romeo',\n 'Sierra',\n 'Tango',\n 'Uniform',\n 'Victor',\n 'Whiskey',\n 'Xray',\n 'Yankee',\n 'Zulu']\n\n chars = [\n '!',\n '#',\n '$',\n '%',\n '&',\n '*',\n '-',\n '.',\n ':',\n '?',\n '@' \n ]\n\n\n random.seed()\n pw = ''\n pw += random.choice(words)\n pw += random.choice(words)\n pw += random.choice(chars)\n pw += \"{:04d}\".format(random.randint(0,10000))\n return pw", "def GetPassword(self):\n pass", "def generate_password(self):\n password = str()\n\n length = len(self.chars_password)\n for index in range(self.length_password):\n char_index = random.randint(0, length - 1)\n password += self.chars_password[char_index]\n\n return password", "def generate_random_password(self):\r\n self.symbols = self.__set_symbol_dict() # set new symbol subset dict\r\n self.i = randrange(len(self.symbols)) # set new dict key pointer\r\n return \"\".join(self.__get_random_symbol() for _ in range(self.pw_len))", "def password():\n chars = \"abcdefghijklmnopqsrtuvwxyzABCDEFGHIJKLMNOPQSRTUVWXYZ\"\\\n \"123456890!#%&-_*<>+=()\"\n return ''.join(random.sample(chars, 15))", "def generate_password():\n return urlsafe_b64encode(urandom(32)).decode('utf-8')", "def password(self):\n return self._password()", "def randomPassword(self):\n pwd = \"\"\n charsLength = len(self.chars)\n for i in range(self.pwdLength):\n pwd += self.chars[randrange(charsLength)]\n return pwd", "def _random_password(self):\n return ''.join([\n random.choice(string.ascii_letters + string.digits)\n for _ in range(12)\n ])", "def generador_password(tamm):\n\tif tamm == 0:\n\t\treturn chr(randint(33,122))\n\telif tamm == 2:\n\t\treturn chr(randint(33,47)) + generador_password(tamm-1)\n\telif tamm == 5:\n\t\treturn chr(randint(48,57)) + generador_password(tamm-1)\n\telif tamm == 10:\n\t\treturn chr(randint(65,90)) + generador_password(tamm-1)\n\telif tamm == 13:\n\t\treturn chr(randint(97,122)) + generador_password(tamm-1)\n\telse:\n\t\treturn chr(randint(33,122)) + generador_password(tamm-1)", "def anypassword():\n\n characters = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n password = ''.join(random.choice(characters) for x in range(size))\n\n return password", "def randompassword():\n\n chars = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n password = ''.join(random.choice(chars) for x in range(size))\n\n return password", "def generate_password():\n selection = string.ascii_letters + string.digits\n\n while True:\n password = \"\".join(secrets.choice(selection) for i in range(16))\n\n if (\n any(c.isupper() for c in password)\n and any(c.islower() for c in password)\n and any(c.isdigit() for c in password)\n ):\n break\n\n return password", "def generate_password(self): \n\n password = []\n length = input(\"Enter Length for Password (At least 8): \")\n\n if length.lower().strip() == \"exit\":\n raise UserExits\n elif length.strip() == \"\":\n raise EmptyField\n elif int(length) < 8:\n raise PasswordNotLongEnough\n else:\n # generating a password\n spinner = Halo(text=colored(\"Generating Password\", \"green\"), spinner=self.dots_, color=\"green\")\n spinner.start()\n for i in range(0, int(length)):\n #choose character from one of the lists randomly\n password.append(random.choice(random.choice([string.ascii_lowercase, string.ascii_uppercase, string.digits, self.specialChar_])))\n\n finalPass = \"\".join(password)\n spinner.stop()\n\n return finalPass", "def generate_password():\n chars = string.ascii_letters + string.digits\n key = random.sample(chars, 10)\n keys = \"\".join(key)\n return keys", "def randompassword():\n characters = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n return ''.join(random.choice(characters) for x in range(size))", "def generate_pw():\n chars = string.ascii_letters + string.digits + '!@#$%^&*()'\n password = ''.join(random.choice(chars) for i in range(16))\n pyperclip.copy(password)\n print('Password copied to clipboard.')\n return password", "def generate_password(self, length):\n items = [\"a\", \"e\", \"i\", \"o\", \"u\", \"1\", \"2\", \"4\", \"5\", \"7\", \"8\", \"9\"]\n\n new_password = \"\"\n while(len(new_password) < length):\n item = items[randint(0, len(items) - 1)]\n new_password += item\n return new_password", "def generate_password(path: str, number: int) -> str:\n password = \"\"\n for i in range(number):\n rand_line = generate_random_numbers_string()\n password += Program.find_string_by_number(rand_line, path)\n\n return password", "def genpass(length):\n password = \"\"\n choice = string.ascii_letters + string.digits\n for i in range(length):\n password += random.choice(choice)\n return password", "def _password_generator(size: int = 12, chars: Optional[str] = None) -> str:\n if chars is None:\n chars = string.ascii_letters + string.digits\n return ''.join(random.choice(chars) for _ in range(size))", "def get_user_password(text):\n return getpass.getpass(text)", "def generate_password() -> str:\n list_letters = [choice(LETTERS) for _ in range(randint(8, 10))]\n list_symbols = [choice(SYMBOLS) for _ in range(randint(2, 4))]\n list_numbers = [choice(NUMBERS) for _ in range(randint(2, 4))]\n password_list = [n for n in list_letters + list_symbols + list_numbers]\n shuffle(password_list)\n return \"\".join(password_list)", "def get_password(self):\n raise NotImplementedError('get_password')" ]
[ "0.818299", "0.8023962", "0.7555738", "0.7500553", "0.7393278", "0.7303603", "0.72950816", "0.7247995", "0.7243249", "0.71949685", "0.7173853", "0.7160653", "0.7150656", "0.71400476", "0.7137876", "0.71269286", "0.70900357", "0.7080714", "0.7054677", "0.70473486", "0.7043101", "0.7033133", "0.7029056", "0.698128", "0.69796413", "0.69646883", "0.69375193", "0.69314075", "0.69263566", "0.69261" ]
0.82762927
0
Function that return the owner ID to send private messages
def get_owner_private_messages(self): owner_id = self.bot_data_file["owners_data"]["ownerPrivateMessagesID"] if owner_id == "": print("ERROR GETTING THE OWNER ID - EMPTY") return "" else: return owner_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def owner_id(self) -> int:\n return self.proto.owner", "def owner_id(self) -> str:\n return pulumi.get(self, \"owner_id\")", "def owner_id(self) -> str:\n return self.__owner_id", "def send_owner_message(): \n data = order_obj.send_owner_message(request.forms)\n return data", "def bot_owner_id(self):\n return self._bot_owner_id", "def owner_id(self) -> Optional[str]:\n return pulumi.get(self, \"owner_id\")", "def owner_id(self):\n return self._owner_id", "def owner(self) -> None:\n return self.bot.get_user(self.bot.config.owner_ids[0])", "def unique_id(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_unique_id(self)", "def __get_sender_id(self):\n return self.__sender_id", "def identity(self) -> str:\n return self.requester.uuid", "async def uid(message):\n return \"your user id is: {}\".format(message.user_id)", "def owner_account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner_account_id\")", "def getOwnerIdFromToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def unique_id(self):\n return _spacegrant_swig.message_debug_sptr_unique_id(self)", "def unique_id(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_unique_id(self)", "def owner(self) -> discord.User:\n if self.config.owner_id:\n return self.get_user(self.config.owner_id)\n if self.owner_ids:\n return self.get_user(self.config.owner_ids[0])\n return None", "def get_id(self):\r\n return self.username", "def owner(self):\n \n if not self.logMessage is None:\n return self.logMessage[\"author\"]", "def unique_id(self):\n return _spacegrant_swig.udp_debug_sptr_unique_id(self)", "async def uid(message, user: ParamType.MIXER_USER):\n return \"@{} user id is: {}\".format(user.username, user.id)", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def owner(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner\")", "def owner(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner\")", "def owner(self):\n answer = self._call('owner')\n return answer.owner", "def get_slack_token_owner():\n response = slack_client.api_call(\n \"auth.test\",\n )\n if not response.get(\"ok\", False):\n raise SlackError('Failed to get slack token owner {}'.format(response['error']))\n return response['user_id']", "def party_id(self):\n pass", "def unique_id(self):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr_unique_id(self)" ]
[ "0.71588725", "0.7072653", "0.6997864", "0.6992629", "0.6844016", "0.6815332", "0.67553514", "0.64907527", "0.6454328", "0.6349432", "0.63298124", "0.63248587", "0.63142663", "0.629799", "0.62715447", "0.6219807", "0.6200476", "0.61841255", "0.6167833", "0.61574113", "0.61382324", "0.6128096", "0.6128096", "0.6128096", "0.61020136", "0.61020136", "0.6094895", "0.6076285", "0.6063798", "0.60400987" ]
0.81578904
0
Function that return all the owners of the bot that have master permissions
def get_owners_list(self): final_list = [] for entry in self.bot_data_file["owners_data"]["owners_list"]: final_list.append(str(entry["name"])) if len(final_list) == 0: print("ERROR GETTING THE OWNERS LIST (i need at least 1 owner) - BOT ABORTING") quit(1) else: return final_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def owners(self):\n return self.find_users_by_rel('owner')", "def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up", "def bots(self) -> Generator[discord.User, None, None]:\n for user in self.users:\n if user.bot:\n yield user", "def get_owners_command(client: Client) -> COMMAND_OUTPUT: # pragma: no cover\n url = '/api/v3/security/owners?resultLimit=500'\n response, status = client.make_request(Method.GET, url)\n\n readable_output: str = tableToMarkdown(name=f\"{INTEGRATION_NAME} - Owners\",\n t=list(response))\n\n return readable_output, {}, list(response)", "def owners(self):\n return self._owners", "def owners(self):\n return self._owners", "def owners(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"owners\")", "def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())", "async def whoowns(ctx, bot: typing.Union[discord.Member, discord.User]):\n if not bot.bot:\n # pyright: reportUndefinedVariable=false\n return await r(ctx, \"Not a bot.\")\n\n data = await make_request(\"https://www.motiondevelopment.top/api/v1.2/bots/\", bot.id)\n\n e = discord.Embed(color=0xfecdea)\n e.description = f'**{data[\"owner_name\"]}** owns **{bot}**'\n\n await em(ctx, embed=e)", "def get_owner_entities(self, username):\n\t\t#print('Quasar Utility Server getting owner entities for username{' + username + '}')\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_GET_OWNER_ENTITIES, username)", "def owners(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"owners\")", "def owners(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"owners\")", "def owners(self):\n return self.properties.get('owners',\n DirectoryObjectCollection(self.context, ResourcePath(\"owners\", self.resource_path)))", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def get_objects_owners(obj):\n type = obj.get_type()\n owners = []\n for user in obj.user_set.all():\n if type == Entities.TASK:\n relation = UserTasks.objects.get(user=user, task=obj)\n elif type == Entities.EVENT:\n relation = UserEvents.objects.get(user=user, event=obj)\n elif type == Entities.PLAN:\n relation = UserPlans.objects.get(user=user, plan=obj)\n else:\n raise TypeError\n owners.append(Owner(user.name, relation.access_level))\n\n return owners", "def fetch_owner_accounts():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n return owner_accounts", "def manage_owners():\n\n owner_data = request.get_json(force=True)\n return _get_owner_service().create_owner(owner_data)", "def getResponsibleUsers():", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def getPropertyOwners(self) -> List[unicode]:\n ...", "def owner(self) -> None:\n return self.bot.get_user(self.bot.config.owner_ids[0])", "def get_owned_apps(self):\n user = users.get_current_user()\n if not user:\n return []\n email = user.email()\n try:\n user_info = self.get_by_id(UserInfo, email)\n if user_info:\n return user_info.owned_apps\n else:\n return []\n except Exception as err:\n logging.exception(err)\n return []", "def owners(self):\n from hubspot3.owners import OwnersClient\n\n return OwnersClient(**self.auth, **self.options)", "def owner_or_permissions(**perms):\n original = commands.has_permissions(**perms).predicate\n\n async def extended_check(ctx):\n if ctx.guild is None:\n raise errors.NoPrivateMessage\n return ctx.guild.owner_id == ctx.author.id or await original(ctx)\n\n return commands.check(extended_check)", "def project_owners(limit=None):\n tx = cypher_transaction()\n query = \"\"\"MATCH (p:project)-[:OWNED_BY]->(u:user) RETURN u, p\"\"\"\n if limit is not None:\n query += \" LIMIT {limit}\"\n tx.append(query, parameters={'limit': limit})\n else:\n tx.append(query)\n\n results = tx.commit()\n owners = [] # Just a list of user nodes\n for record in _first(results):\n user, project = record.values\n print(\"{0} is owned by {1}\".format(project['name'], user['name']))\n owners.append(user)\n return owners", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def humans(self) -> Generator[discord.User, None, None]:\n for user in self.users:\n if not user.bot:\n yield user", "def handle_empty_owners():\n result = shared_ldap.find_from_email(shared.globals.REPORTER)\n if result is not None:\n shared_sd.post_comment(\n \"Adding %s as the owner of the group.\" % shared.globals.REPORTER, True)\n return [result]\n\n # OK - something stupid is happening but let's give ourselves\n # a safety net.\n shared_sd.post_comment(\n \"Unable to add %s as an owner as the email address cannot be \"\n \"found in Linaro Login. This means the automation has not \"\n \"been able to find any of the specified email addresses in \"\n \"Linaro Login. Consequently, IT Services will need to manage \"\n \"it in the interim.\" % shared.globals.REPORTER, True)\n return [\"cn=its,ou=mailing,ou=groups,dc=linaro,dc=org\"]", "def get_everyone_granted(self):", "def members(self):\r\n return self.exclude(contributor__username=u'anonymous')" ]
[ "0.67644495", "0.6596586", "0.65689915", "0.65200084", "0.6504249", "0.6504249", "0.6436979", "0.6391575", "0.6351989", "0.6308154", "0.6297625", "0.6297625", "0.6263222", "0.6260391", "0.6135052", "0.61238444", "0.60664177", "0.6038699", "0.5990057", "0.59782493", "0.5976417", "0.5969584", "0.5942845", "0.5928673", "0.58734536", "0.58329165", "0.58265114", "0.5802756", "0.57808", "0.5774363" ]
0.6895028
0
Run the network with the first layer of nodes in state x
def run_network(self, x): # We have the input vector x, this is the first layer in our nn a = x # So we need to run the neural network now. We are going to make # things extra explicit by having the z term. Clearly super # inefficient for w, b in zip(self.weights, self.biases): # Find the values in the next layer of the network z = numpy.add(numpy.dot(w, a), b) a = sigmoid(z) return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _forward(self, x):\n global global_epoch\n global_epoch += 1\n bias = -np.ones((x.shape[0], 1))\n tail = np.zeros((x.shape[0], self.dim_hid+self.dim_out))\n nodes = np.concatenate((bias, x, tail), axis=1)\n weight = self.weight * self.connectivity\n for i in range(self.dim_in, self.dim_in+self.dim_hid+self.dim_out):\n net = nodes.dot(weight[i])\n nodes[:,i] = self.__sigmoid(net)\n nodes[:,self.dim_in:self.dim_in+self.dim_hid] *= self.hidden\n return nodes", "def forward(self, x):\n return self.net(x)", "def trainNet():", "def run(self, x):\n \"*** YOUR CODE HERE ***\"\n layer1 = nn.ReLU(nn.AddBias(nn.Linear(x, self.layer1), self.bias1))\n layer2 = nn.ReLU(nn.AddBias(nn.Linear(layer1, self.layer2), self.bias2))\n layer3 = nn.ReLU(nn.AddBias(nn.Linear(layer2, self.layer3), self.bias3))\n layer4 = nn.AddBias(nn.Linear(layer3, self.layer4), self.bias4)\n return layer4", "def run(self, x):\n \"*** YOUR CODE HERE ***\"\n layer1 = nn.ReLU(nn.AddBias(nn.Linear(x, self.layer1), self.bias1))\n layer2 = nn.ReLU(nn.AddBias(nn.Linear(layer1, self.layer2), self.bias2))\n layer3 = nn.AddBias(nn.Linear(layer2, self.layer3), self.bias3)\n return layer3", "def run(self, x):\n \"*** YOUR CODE HERE question 2 ***\"\n xm0 = nn.Linear(x, self.m0)\n bias0 = nn.AddBias(xm0, self.b0)\n relu0 = nn.ReLU(bias0)\n xm1 = nn.Linear(relu0, self.m1)\n bias1 = nn.AddBias(xm1, self.b1)\n ### extra layer below ###\n # relu1 = nn.ReLU(bias1)\n # xm2 = nn.Linear(relu1, self.m2)\n # final = nn.AddBias(xm2, self.b2)\n # training another layer makes a smoother curve but needs more tuning...\n\n return bias1", "def forward(self, state):\n x = state\n for layer in self.linear_layers[:-1]:\n x = F.relu(layer(x))\n x = self.linear_layers[-1](x)\n return x", "def run(self, x):\n \"*** YOUR CODE HERE question 3 ***\"\n xm0 = nn.Linear(x, self.m0) # 1x784 * 784x10 = 1x10\n bias0 = nn.AddBias(xm0, self.b0) #1x10 + 1x10 = 1x10\n relu0 = nn.ReLU(bias0) #1x10 relu = 1x10\n xm1 = nn.Linear(relu0, self.m1) #1x10 * 10x784 = 1x784\n bias1 = nn.AddBias(xm1, self.b1) #xm1, b1 1x784 + 1x784 = 1x784\n xm2 = nn.Linear(bias1, self.m2) # 1x784 * 784x10 = 1x10\n final = nn.AddBias(xm2, self.b2) #1x10 + 1x10 = 1x10\n\n return final", "def forward(self, x):\n # x = state\n \n x = F.relu(self.input(x))\n x = self.output(x)\n \n return x", "def run(self, x):\n T = len(x)\n self.x = x\n self.i = np.zeros((T, self.hidden_size))\n self.f = np.zeros((T, self.hidden_size))\n self.o = np.zeros((T, self.hidden_size))\n self.g = np.zeros((T, self.hidden_size))\n self.h = np.zeros((T, self.hidden_size))\n self.c = np.zeros((T+1, self.hidden_size))\n self.s = np.zeros((T+1, self.hidden_size))\n for t in xrange(T):\n # input gate\n self.i[t] = self.gatefun.compute(np.dot(self.igate.u, x[t])\n + np.dot(self.igate.w, self.s[t-1])\n + np.dot(self.igate.v, self.c[t-1]) + self.igate.b)\n # forget gate\n self.f[t] = self.gatefun.compute(np.dot(self.fgate.u, x[t])\n + np.dot(self.fgate.w, self.s[t-1])\n + np.dot(self.fgate.v, self.c[t-1]) + self.fgate.b)\n # current hidden node state\n self.g[t] = self.acfun.compute(np.dot(self.nodes.u, x[t]) + \n np.dot(self.nodes.w, self.s[t-1]) + self.nodes.b)\n # internal memoery\n self.c[t] = self.f[t] * self.c[t-1] + self.i[t] * self.g[t]\n # output gate\n self.o[t] = self.gatefun.compute(np.dot(self.ogate.u, x[t])\n + np.dot(self.ogate.w, self.s[t-1])\n + np.dot(self.ogate.v, self.c[t]) + self.ogate.b)\n self.h[t] = self.acfun.compute(self.c[t])\n self.s[t] = np.clip(self.o[t] * self.h[t], -50, 50)\n return self.s[:-1]", "def run(self, x):\n T = len(x)\n self.x = x\n self.s = np.zeros((T, self.hidden_size))\n for t in xrange(T):\n self.s[t] = np.dot(self.nodes.u, x[t])\n if self.en_bias: self.s[t] += self.nodes.b\n self.s[t] = self.acfun.compute(np.clip(self.s[t], -50, 50))\n return self.s", "def forward(self, x):\n x=T.div(x,255.0)\n \n #print(state[20:,20:,0])\n #print(state[:,0,:,:])\n conv1 = F.relu(self.conv1(x))\n conv2 = F.relu(self.conv2(conv1))\n conv3 = F.relu(self.conv3(conv2))\n ###\n conv_state = conv3.view(conv3.size()[0], -1)\n flat1 = F.relu(self.fc1(conv_state))\n flat2 = F.relu(self.fc2(flat1))\n\n V = self.V(flat2)\n A = self.A(flat2)\n\n return V, A\n return x", "def forward(self, input_x):\n return self.net(input_x.float())", "def forward(self, x):\n x = self.efficient_net(x)\n return x", "def call(self, x, **args):\n \n out = self.net(x)\n out = self.lstm(out)\n return out", "def forward(self, x):\n if x.dim() == 1:\n x = torch.unsqueeze(x, 0)\n return self.net(x)", "def forward(self, x):\n for task_module_name in self.task_module_name_path[self.task_idx]:\n for layer in self.task_modules[task_module_name]:\n x = layer(x)\n #x = self.task_modules[task_module_name](x)\n x = x.view(x.size(0), -1)\n x = self.classification_layers[str(self.task_idx)](x)\n return x", "def forward(self, state):\n x = state.unsqueeze(1)\n x = F.relu(self.cnl1(x))\n x = F.relu(self.cnl2(x))\n x = F.relu(self.cnl3(x))\n x = x.view(x.shape[0], -1) # flatten\n x = F.relu(self.dense1(x))\n x = self.out(x)\n return x", "def forward(self, state):\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n return x", "def train_network(self):\n batch = self.memory.sample(self.batch_size)\n inputs = np.array([b[\"state\"] for b in batch]) #####\n actions = np.array([b[\"action\"] for b in batch])\n rewards = np.array([b[\"reward\"] for b in batch])\n next_inputs = np.array([b[\"next_state\"] for b in batch])\n\n actions_one_hot = np.eye(self.action_space_size)[actions]\n\n next_qvalues = np.squeeze(self.target_network.model(next_inputs))\n targets = rewards + self.discount * np.amax(next_qvalues, axis=-1)\n\n self.online_network.train_step(inputs, targets, actions_one_hot)", "def run(self, xs):\n \"*** YOUR CODE HERE ***\"\n previousOutput = None\n for letter in xs:\n inputLayer = self.runInputLayer(letter)\n if previousOutput is not None:\n previousOutputLayer = self.runPreviousOutputLayer(previousOutput)\n inputLayer = nn.Add(inputLayer, previousOutputLayer)\n outputLayer = self.runOutputLayer(inputLayer)\n previousOutput = inputLayer\n return outputLayer", "def run_through_model(self, x):\n\n for idx, (W, b) in enumerate(zip(self._conv_weights, self._conv_biases)):\n curr_n = self.n_values[idx]\n curr_k = self.k_values[idx]\n\n x = self.conv_layer_as_matrix_op(W, b, x, curr_n, curr_k)\n x = x.flatten()\n\n end_conv_n = self.n_values[idx + 1]\n num_linear_layers = len(self._lin_weights)\n\n for idx, (W, b) in enumerate(zip(self._lin_weights, self._lin_biases)):\n\n if idx < num_linear_layers - 1:\n x = self.linear_layer(W, b, x, use_relu=True)\n else:\n x = self.linear_layer(W, b, x, use_relu=False)\n\n return x", "def forward(self, x):\n #print('output of fetures.children() : %s'%str([i for i in self.features.children()]))\n #print(\"shape of input is %s\" % str(x.size()))\n for layer_no, layer in enumerate(self.features.children()):\n\n if layer_no is 23:\n y = layer(x)\n if layer_no is 33:\n z = layer(x)\n x = layer(x)\n\n #print('debug')\n #print('layer info: %s'%str(layer))\n #print(\"shape of x is %s\" % str(x.size()))\n\n x = self.conv1D_downstream1(x)\n x = self.conv1D_downstream2(x)\n x = self.upsample_1(x)\n\n z = self.conv1D_pool4(z)\n y = self.conv1D_pool3(y)\n #print('debug')\n #print(\"shape of x is %s\"%str(x.size()))\n #print(\"shape of z is %s\" % str(z.size()))\n\n if x.size() is not z.size():\n x = nn.functional.interpolate(x,size = (z.size()[2],z.size()[3]), mode = 'nearest')\n x = x+ z\n x = self.upsample_2(x)\n x = x+y\n x = self.upsample_3(x)\n\n return x", "def __call__(self, x_input):\n reuse = True if self.built else None\n net = load_kaffe_model(self.model_name, x_input, reuse=reuse)\n self.built = True\n self.net = net\n #output = end_points['alexnet_v2/fc8']\n # Strip off the extra reshape op at the output\n output = self.net.get_output()\n probs = output.op.inputs[0]\n return probs", "def run(layers):", "def _cnn(self, state):\n\n x = self.relu(self.bn1(self.conv1(state)))\n x = self.relu(self.bn2(self.conv2(x)))\n x = self.relu(self.bn3(self.conv3(x)))\n x = x.reshape(x.size(0), -1)\n\n return x", "def forward(self, state, action):\n x = torch.cat((state, action), dim=1)\n return self.net(x)", "def forward(self, state, action): \n ##x = F.relu(self.fc1(state)) \n x = F.relu(self.bn1(self.fc1(state))) \n x = torch.cat([x, action], dim=1)\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def call(self, x: tf.Tensor) -> tf.Tensor:\n for layer in self._sublayers:\n x = layer(x)\n\n return x", "def call(self, x, training=False):\n for idx in range(self.n_layers):\n if self.nn_desc.topology.dict_topo[idx+1].layer_type != 'dropout':\n x = self.layer[idx](x)\n else:\n x = self.layer[idx](x, training=tf.dtypes.cast(training,\n dtype=tf.bool))\n return x" ]
[ "0.63821185", "0.6350916", "0.63436234", "0.6343451", "0.6338674", "0.6298721", "0.62605876", "0.6254791", "0.62174386", "0.6208298", "0.6149231", "0.6126274", "0.6119637", "0.6065895", "0.60484594", "0.60425836", "0.60365623", "0.6015953", "0.60128206", "0.6008909", "0.60061836", "0.59976655", "0.59963554", "0.5986476", "0.59567136", "0.594945", "0.59469044", "0.59250444", "0.59147525", "0.590149" ]
0.7387201
0
Modify /etc/hosts and test if a log exists by running \'sudo journalctl f\'
def test_host_file_audit(host): with host.sudo(): host.run("touch /etc/hosts") audit_log = host.run("journalctl -u auditd --since \"10 seconds ago\" | grep \"/etc/hosts\"") assert audit_log.stdout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_hosts_file(self, resolution):\n self._execute_command('echo {0} >> /etc/hosts'.format(resolution),\n sudo=True)", "def add_host_entry(ip, hostname, domain):\n host_line = ip+\" \"+hostname+\".\"+domain+\" \"+hostname\n\n # Only add entry if it does not exist already. We don't want warnings about\n # grep not finding the entry, as that's to be expected.\n with hide(\"warnings\"), settings(warn_only=True):\n if run(\"grep \\\"\"+host_line+\"\\\" /etc/hosts\").failed:\n sudo(\"echo \"+host_line+\" >> /etc/hosts\")", "def test_sudoers_audit(host):\n with host.sudo():\n sudoers_access = host.run(\"touch /etc/sudoers\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/sudoers\\\"\")\n assert audit_log.stdout", "def add_host_entries(hosts_file=None):\n from fabric.contrib.files import append\n if hosts_file:\n try:\n hosts = open(hosts_file)\n for line in hosts:\n append(\"/etc/hosts\", line.rstrip(\"\\n\"), use_sudo=True)\n except IOError:\n print \"ERROR: defined hosts file is missing!\"", "def _exists_remote(self, host):\n # This file gets written after cloudinit is done\n # path = '/var/lib/cloud/instance/boot-finished'\n path = '/home/ubuntu/SETUP_COMPLETE'\n t = 0\n sleep_len = 10\n while True:\n status = subprocess.call(\n ['ssh', '-oStrictHostKeyChecking=no', '-i', '/home/ubuntu/.ssh/id_rsa', 'ubuntu@'+host, 'test -f {}'.format(pipes.quote(path))])\n if status == 0:\n return True\n else:\n return False", "def test_hosts_file(host):\n hosts_file = host.file('/etc/hosts')\n assert hosts_file.exists\n assert hosts_file.user == 'root'\n assert hosts_file.group == 'root'", "def _add_node_to_etc_hosts(self):\n image = 'alpine:latest'\n command = 'echo \"{} {} # clusterdock\" >> /etc/hosts'.format(self.ip_address,\n self.fqdn)\n volumes = {'/etc/hosts': {'bind': '/etc/hosts', 'mode': 'rw'}}\n\n logger.debug('Adding %s to /etc/hosts ...', self.fqdn)\n client.containers.run(image=image,\n command=[self.execute_shell, '-c', command],\n volumes=volumes,\n remove=True)", "def flush_dns_cache():\n\n print(\"Flushing the DNS cache to utilize new hosts file...\")\n print(\n \"Flushing the DNS cache requires administrative privileges. You might need to enter your password.\"\n )\n\n dns_cache_found = False\n\n if platform.system() == \"Darwin\":\n if subprocess.call(SUDO + [\"killall\", \"-HUP\", \"mDNSResponder\"]):\n print_failure(\"Flushing the DNS cache failed.\")\n elif os.name == \"nt\":\n print(\"Automatically flushing the DNS cache is not yet supported.\")\n print(\n \"Please copy and paste the command 'ipconfig /flushdns' in \"\n \"administrator command prompt after running this script.\"\n )\n else:\n nscd_prefixes = [\"/etc\", \"/etc/rc.d\"]\n nscd_msg = \"Flushing the DNS cache by restarting nscd {result}\"\n\n for nscd_prefix in nscd_prefixes:\n nscd_cache = nscd_prefix + \"/init.d/nscd\"\n\n if os.path.isfile(nscd_cache):\n dns_cache_found = True\n\n if subprocess.call(SUDO + [nscd_cache, \"restart\"]):\n print_failure(nscd_msg.format(result=\"failed\"))\n else:\n print_success(nscd_msg.format(result=\"succeeded\"))\n\n centos_file = \"/etc/init.d/network\"\n centos_msg = \"Flushing the DNS cache by restarting network {result}\"\n\n if os.path.isfile(centos_file):\n if subprocess.call(SUDO + [centos_file, \"restart\"]):\n print_failure(centos_msg.format(result=\"failed\"))\n else:\n print_success(centos_msg.format(result=\"succeeded\"))\n\n system_prefixes = [\"/usr\", \"\"]\n service_types = [\"NetworkManager\", \"wicd\", \"dnsmasq\", \"networking\"]\n restarted_services = []\n\n for system_prefix in system_prefixes:\n systemctl = system_prefix + \"/bin/systemctl\"\n system_dir = system_prefix + \"/lib/systemd/system\"\n\n for service_type in service_types:\n service = service_type + \".service\"\n if service in restarted_services:\n continue\n\n service_file = path_join_robust(system_dir, service)\n service_msg = (\n \"Flushing the DNS cache by restarting \" + service + \" {result}\"\n )\n\n if os.path.isfile(service_file):\n if 0 != subprocess.call(\n [systemctl, \"status\", service], stdout=subprocess.DEVNULL\n ):\n continue\n dns_cache_found = True\n\n if subprocess.call(SUDO + [systemctl, \"restart\", service]):\n print_failure(service_msg.format(result=\"failed\"))\n else:\n print_success(service_msg.format(result=\"succeeded\"))\n restarted_services.append(service)\n\n dns_clean_file = \"/etc/init.d/dns-clean\"\n dns_clean_msg = \"Flushing the DNS cache via dns-clean executable {result}\"\n\n if os.path.isfile(dns_clean_file):\n dns_cache_found = True\n\n if subprocess.call(SUDO + [dns_clean_file, \"start\"]):\n print_failure(dns_clean_msg.format(result=\"failed\"))\n else:\n print_success(dns_clean_msg.format(result=\"succeeded\"))\n\n if not dns_cache_found:\n print_failure(\"Unable to determine DNS management tool.\")", "def get_local_etc_hosts_entries():\n\n hosts_content = None\n with open('/etc/hosts', 'r') as f:\n hosts_content = f.read()\n\n re_exclude_entry = re.compile(r'\\s*#.*|.*localhost.*|.*broadcasthost.*|^\\s*$')\n entries = filter(lambda line: not re_exclude_entry.match(line), hosts_content.splitlines())\n\n return '### /etc/hosts from host ###\\n' + '\\n'.join(entries)", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def set_hostname(self, path, hostname):\n\n f = open(os.path.join(path, 'etc', 'hostname'), 'w')\n f.write(hostname + \"\\n\")\n f.close()\n\n hosts = os.path.join(path, 'etc', 'hosts')\n\n with open(hosts, 'rb') as f:\n reader = csv.reader(f, delimiter=\"\\t\")\n rows = [row for row in reader]\n\n for row in rows:\n if len(row) > 1 and row[0] == '127.0.1.1':\n row[1] = hostname\n break\n\n with open(hosts, 'w') as f:\n for row in rows:\n f.write(\"\\t\".join(row) + \"\\n\")", "def check_host(host):\n try:\n request = requests.get(host[0], timeout=3)\n host[1] = bool(re.search(host[1], request.text))\n except Exception:\n host[1] = False\n if host[1] is False:\n os.system(CONFIG['mail_command'].format(\n 'CRITICAL: {} is critical'.format(host[0])))\n\n return host", "def batchSetHosts(serverList, hostList, delHostList=[]):\n def sortedByKeys(dic):\n keys = dic.keys()\n keys.sort()\n retDic = {}\n for key in keys:\n retDic[key] = dic[key]\n return retDic\n\n for server in serverList:\n env.host_string = server['host']\n env.port = server['port']\n env.user = server['user']\n env.password = server['password']\n ret = sudo('cat /etc/hosts')\n lines = ret.split('\\n')\n hostHash = {}\n newHostList = []\n for line in lines:\n line = line.replace('\\n', '')\n line = line.replace('\\r', '')\n line = line.strip()\n if line == '':\n continue\n if line.replace(' ', '') == '':\n continue\n if line[0] == '#':\n newHostList.append(line)\n continue\n items = line.split(' ')\n ip = items[0].strip()\n for i in xrange(len(items)):\n if i == 0:\n continue\n domain = items[i]\n domain = domain.strip()\n if domain == '':\n continue\n if domain in delHostList:\n continue\n hostHash[domain] = ip\n setHostHash = {}\n for host in hostList:\n host = host.strip()\n items = host.split(' ')\n ip = items[0].strip()\n for i in xrange(len(items)):\n if i == 0:\n continue\n domain = items[i]\n domain = domain.strip()\n if domain == '':\n continue\n if domain in delHostList:\n continue\n setHostHash[domain] = ip\n for domain, ip in setHostHash.items():\n hostHash[domain] = ip\n hostHash = sortedByKeys(hostHash)\n for domain, ip in hostHash.items():\n hostline = ip + ' ' + domain\n newHostList.append(hostline)\n newHostList.sort()\n hosts = '\\n'.join(newHostList)\n sudo(\"echo '%s' > /etc/hosts\" % (hosts))", "def enable_host(self, name):\n from soppa.local import aslocal\n self.guest_ip = self.guest_ip()\n self.guest_host_name = name\n # Host (remote) change\n self.file.set_setting('/etc/hosts', '{0} {1}'.format('127.0.0.1', self.guest_host_name))\n # local change\n aslocal()\n self.file.set_setting('/etc/hosts', '{0} {1}'.format(self.guest_ip, name))", "def configure(node):\n script = []\n script.append(Statements.exec(\"hostname %s\" % node.getName()))\n script.append(Statements.createOrOverwriteFile(\n \"/etc/hostname\", [node.getName()]))\n script.append(Statements.exec(\n \"sed -i 's/127.0.0.1/127.0.0.1\\t%s/' /etc/hosts\" % node.getName()))\n return script", "def delete_host(host):\n\n lines = []\n with open(known_hosts_path, \"r\") as f:\n lines = f.readlines()\n\n with open(known_hosts_path, \"w\") as f:\n\n for line in lines:\n if host != line.split()[0]:\n f.write(line)", "def syslog(ctx, config):\n if ctx.archive is None:\n # disable this whole feature if we're not going to archive the data anyway\n yield\n return\n\n log.info('Starting syslog monitoring...')\n\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'mkdir', '-p', '-m0755', '--',\n '{adir}/syslog'.format(adir=archive_dir),\n ],\n wait=False,\n )\n )\n\n CONF = '/etc/rsyslog.d/80-cephtest.conf'\n conf_fp = StringIO('''\nkern.* -{adir}/syslog/kern.log;RSYSLOG_FileFormat\n*.*;kern.none -{adir}/syslog/misc.log;RSYSLOG_FileFormat\n'''.format(adir=archive_dir))\n try:\n for rem in ctx.cluster.remotes.iterkeys():\n misc.sudo_write_file(\n remote=rem,\n path=CONF,\n data=conf_fp,\n )\n conf_fp.seek(0)\n run.wait(\n ctx.cluster.run(\n args=[\n 'sudo',\n 'service',\n # a mere reload (SIGHUP) doesn't seem to make\n # rsyslog open the files\n 'rsyslog',\n 'restart',\n ],\n wait=False,\n ),\n )\n\n yield\n finally:\n log.info('Shutting down syslog monitoring...')\n\n run.wait(\n ctx.cluster.run(\n args=[\n 'sudo',\n 'rm',\n '-f',\n '--',\n CONF,\n run.Raw('&&'),\n 'sudo',\n 'service',\n 'rsyslog',\n 'restart',\n ],\n wait=False,\n ),\n )\n # race condition: nothing actually says rsyslog had time to\n # flush the file fully. oh well.\n\n log.info('Checking logs for errors...')\n for rem in ctx.cluster.remotes.iterkeys():\n log.debug('Checking %s', rem.name)\n r = rem.run(\n args=[\n 'egrep', '--binary-files=text',\n '\\\\bBUG\\\\b|\\\\bINFO\\\\b|\\\\bDEADLOCK\\\\b',\n run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),\n run.Raw('|'),\n 'grep', '-v', 'task .* blocked for more than .* seconds',\n run.Raw('|'),\n 'grep', '-v', 'lockdep is turned off',\n run.Raw('|'),\n 'grep', '-v', 'trying to register non-static key',\n run.Raw('|'),\n 'grep', '-v', 'DEBUG: fsize', # xfs_fsr\n run.Raw('|'),\n 'grep', '-v', 'CRON', # ignore cron noise\n run.Raw('|'),\n 'grep', '-v', 'BUG: bad unlock balance detected', # #6097\n run.Raw('|'),\n 'grep', '-v', 'inconsistent lock state', # FIXME see #2523\n run.Raw('|'),\n 'grep', '-v', '*** DEADLOCK ***', # part of lockdep output\n run.Raw('|'),\n 'grep', '-v', 'INFO: possible irq lock inversion dependency detected', # FIXME see #2590 and #147\n run.Raw('|'),\n 'grep', '-v', 'INFO: NMI handler (perf_event_nmi_handler) took too long to run',\n run.Raw('|'),\n 'grep', '-v', 'INFO: recovery required on readonly',\n run.Raw('|'),\n 'head', '-n', '1',\n ],\n stdout=StringIO(),\n )\n stdout = r.stdout.getvalue()\n if stdout != '':\n log.error('Error in syslog on %s: %s', rem.name, stdout)\n set_status(ctx.summary, 'fail')\n if 'failure_reason' not in ctx.summary:\n ctx.summary['failure_reason'] = \\\n \"'{error}' in syslog\".format(error=stdout)\n\n log.info('Compressing syslogs...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'find',\n '{adir}/syslog'.format(adir=archive_dir),\n '-name',\n '*.log',\n '-print0',\n run.Raw('|'),\n 'sudo',\n 'xargs',\n '-0',\n '--no-run-if-empty',\n '--',\n 'gzip',\n '--',\n ],\n wait=False,\n ),\n )", "def test_syslog_shortcut_simple(self):\n with cleanup_handlers():\n expected_message = random_string(50)\n coloredlogs.install(syslog=True)\n logging.info(\"%s\", expected_message)\n if os.path.isfile(UNIX_SYSTEM_LOG):\n with open(UNIX_SYSTEM_LOG) as handle:\n assert any(expected_message in line for line in handle)", "def restart_all():\n sudo(\"for logstream in `ls /etc/init/logstream*`; do BASE=`basename $logstream .conf`; service $BASE restart; done\")", "def _check_host_existence(self, hostname: str) -> bool:\n with self.lock:\n hosts = self.hosts.all()\n for host in hosts:\n if host['hostname'] == hostname:\n return True\n return False", "def inject_hosts_files(self):\n self.log.info(\"Injecting host files\")\n hosts = dict()\n for i in self.all_nodes:\n hosts[i.name] = i.get_public_addr()\n #add the host names to etc/hosts\n orchestrator.inject_hostnames(hosts, delete=self.cluster_name)\n for i in self.all_nodes:\n i.inject_hostnames(hosts, delete=self.cluster_name)\n self.all_nodes[0].run_command(\"service ganglia-monitor restart; service gmetad restart\", silent=True)\n orchestrator.run_command(\"service ganglia-monitor restart; service gmetad restart\", silent=True)", "def set_host_aliases():\n with open('/tmp/hosts', 'w') as f:\n uname = os.uname()\n f.write(f'{uname.nodename} localhost\\n')\n os.environ['HOSTALIASES'] = '/tmp/hosts'", "def enhost(host):\n available = find(host,DNSMASQ_AVAILABLE)\n if not available:\n print(\"%s not found in available directory -- \" % host + DNSMASQ_AVAILABLE)\n return \n\n enabled = find(host,DNSMASQ_ENABLED)\n if enabled:\n print(\"%s is already enabled\" % host)\n return\n src = available\n dest = DNSMASQ_ENABLED + '/' + host + DNSMASQ_CFG_SUFFIX\n print(\"Linking %s to %s\" % (src,dest))\n os.symlink(src,dest)\n restart()\n listhosts()", "def _try_restart_fedora(self) -> None:\n\n try:\n util.run_script(['systemctl', 'restart', 'httpd'])\n except errors.SubprocessError as err:\n raise errors.MisconfigurationError(str(err))\n\n # Finish with actual config check to see if systemctl restart helped\n super().config_test()", "def keystonehost():\n env.cd = cd\n env.run = run\n env.hosts = settings.HOSTS['keystone']\n env.exists = exists", "def _hostOK(self, host):\n if os.system(\"ping -c 1 $node &> /dev/null\"):\n # No access to host\n return False\n elif os.system(\"ssh -n -a -x $node 'ls' &> /dev/null\"):\n # No route to host\n return False\n else:\n return True", "def set_hostname(dut, host_name):\n cmd = \"sudo hostname {}\".format(host_name)\n st.config(dut, cmd)\n return", "def write(content):\n with open(TMP_HOSTS_FILE, \"w\") as f:\n f.write(content)", "def _prepare_hosts(container_dir, app):\n etc_dir = os.path.join(container_dir, 'overlay', 'etc')\n fs.mkdir_safe(etc_dir)\n new_hosts = os.path.join(etc_dir, 'hosts')\n new_hosts_orig = os.path.join(etc_dir, 'hosts.original')\n new_host_aliases = os.path.join(etc_dir, 'host-aliases')\n\n shutil.copyfile(\n '/etc/hosts',\n new_hosts\n )\n shutil.copyfile(\n '/etc/hosts',\n new_hosts_orig\n )\n fs.mkdir_safe(new_host_aliases)\n\n pwnam = pwd.getpwnam(app.proid)\n os.chown(new_host_aliases, pwnam.pw_uid, pwnam.pw_gid)", "def updateHost(self, *hosts):\n localhost_name = None\n old_hostnames = []\n for old_host in self.hosts.values():\n old_hostnames.append(old_host.name)\n if isinstance(old_host, LocalHost):\n if localhost_name is not None:\n logger.warning('Duplicate localhost found in lab.hosts')\n localhost_name = old_host.name\n for new_host in hosts:\n # Updating localhost\n if (isinstance(new_host, LocalHost) and localhost_name is not None):\n # Check for localhost clash\n if new_host.name != localhost_name:\n logger.warning('Localhost is already present: ' +\n '%s\\n' +\n 'Not updating host %s!', localhost_name, new_host.name)\n continue\n else:\n localhost_name = new_host.name\n # Will an update happen?\n if new_host.name in old_hostnames:\n logger.info('Overwriting host: %s', new_host.name)\n # Will it end up removing the localhost?\n if (new_host.name == localhost_name and\n not isinstance(new_host, LocalHost)):\n localhost_name = None\n self.hosts[new_host.name] = new_host\n if localhost_name is None:\n logger.warning('Localhost not yet present')" ]
[ "0.64045393", "0.630242", "0.6119651", "0.6042859", "0.58638054", "0.578382", "0.57800317", "0.55004096", "0.54827553", "0.5457473", "0.54408246", "0.54313713", "0.5353777", "0.5333098", "0.53144336", "0.53130835", "0.5285852", "0.5245373", "0.5223455", "0.5212901", "0.5212819", "0.5207941", "0.5193217", "0.5183032", "0.51571757", "0.51462823", "0.51382947", "0.5125669", "0.5115741", "0.5093398" ]
0.7312464
0
Modify /etc/sudoers and test if a log exists by running \'sudo journalctl f\'
def test_sudoers_audit(host): with host.sudo(): sudoers_access = host.run("touch /etc/sudoers") audit_log = host.run("journalctl -u auditd --since \"10 seconds ago\" | grep \"/etc/sudoers\"") assert audit_log.stdout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_sudo_mode():\n if not 'SUDO_UID' in os.environ.keys():\n print(\"Try running this program with sudo.\")\n exit()", "def check_sudo(self, uid: str) -> None:\n stdout, stderr = self.syscall(os.popen(\"which sudo\").read().strip(), \"-nu\", uid, \"-S\", \"true\", \"/bin/bash\")\n if stdout or stderr:\n raise GateException(\"Access denied to UID '{}' via sudo.\".format(uid))", "def syslog(ctx, config):\n if ctx.archive is None:\n # disable this whole feature if we're not going to archive the data anyway\n yield\n return\n\n log.info('Starting syslog monitoring...')\n\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'mkdir', '-p', '-m0755', '--',\n '{adir}/syslog'.format(adir=archive_dir),\n ],\n wait=False,\n )\n )\n\n CONF = '/etc/rsyslog.d/80-cephtest.conf'\n conf_fp = StringIO('''\nkern.* -{adir}/syslog/kern.log;RSYSLOG_FileFormat\n*.*;kern.none -{adir}/syslog/misc.log;RSYSLOG_FileFormat\n'''.format(adir=archive_dir))\n try:\n for rem in ctx.cluster.remotes.iterkeys():\n misc.sudo_write_file(\n remote=rem,\n path=CONF,\n data=conf_fp,\n )\n conf_fp.seek(0)\n run.wait(\n ctx.cluster.run(\n args=[\n 'sudo',\n 'service',\n # a mere reload (SIGHUP) doesn't seem to make\n # rsyslog open the files\n 'rsyslog',\n 'restart',\n ],\n wait=False,\n ),\n )\n\n yield\n finally:\n log.info('Shutting down syslog monitoring...')\n\n run.wait(\n ctx.cluster.run(\n args=[\n 'sudo',\n 'rm',\n '-f',\n '--',\n CONF,\n run.Raw('&&'),\n 'sudo',\n 'service',\n 'rsyslog',\n 'restart',\n ],\n wait=False,\n ),\n )\n # race condition: nothing actually says rsyslog had time to\n # flush the file fully. oh well.\n\n log.info('Checking logs for errors...')\n for rem in ctx.cluster.remotes.iterkeys():\n log.debug('Checking %s', rem.name)\n r = rem.run(\n args=[\n 'egrep', '--binary-files=text',\n '\\\\bBUG\\\\b|\\\\bINFO\\\\b|\\\\bDEADLOCK\\\\b',\n run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),\n run.Raw('|'),\n 'grep', '-v', 'task .* blocked for more than .* seconds',\n run.Raw('|'),\n 'grep', '-v', 'lockdep is turned off',\n run.Raw('|'),\n 'grep', '-v', 'trying to register non-static key',\n run.Raw('|'),\n 'grep', '-v', 'DEBUG: fsize', # xfs_fsr\n run.Raw('|'),\n 'grep', '-v', 'CRON', # ignore cron noise\n run.Raw('|'),\n 'grep', '-v', 'BUG: bad unlock balance detected', # #6097\n run.Raw('|'),\n 'grep', '-v', 'inconsistent lock state', # FIXME see #2523\n run.Raw('|'),\n 'grep', '-v', '*** DEADLOCK ***', # part of lockdep output\n run.Raw('|'),\n 'grep', '-v', 'INFO: possible irq lock inversion dependency detected', # FIXME see #2590 and #147\n run.Raw('|'),\n 'grep', '-v', 'INFO: NMI handler (perf_event_nmi_handler) took too long to run',\n run.Raw('|'),\n 'grep', '-v', 'INFO: recovery required on readonly',\n run.Raw('|'),\n 'head', '-n', '1',\n ],\n stdout=StringIO(),\n )\n stdout = r.stdout.getvalue()\n if stdout != '':\n log.error('Error in syslog on %s: %s', rem.name, stdout)\n set_status(ctx.summary, 'fail')\n if 'failure_reason' not in ctx.summary:\n ctx.summary['failure_reason'] = \\\n \"'{error}' in syslog\".format(error=stdout)\n\n log.info('Compressing syslogs...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'find',\n '{adir}/syslog'.format(adir=archive_dir),\n '-name',\n '*.log',\n '-print0',\n run.Raw('|'),\n 'sudo',\n 'xargs',\n '-0',\n '--no-run-if-empty',\n '--',\n 'gzip',\n '--',\n ],\n wait=False,\n ),\n )", "def install_sudo():\n import vars\n vars = vars.Vars()\n with settings(warn_only=True):\n if run(\"which sudo\").failed:\n run(vars.os.package_install_cmd % \"sudo\")", "def verify_passwordless_sudo():\n\n args = [\"sudo\", \"-n\", \"/bin/true\"]\n\n proc = subprocess.Popen(args,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n msg = proc.communicate()[0]\n\n if proc.returncode != 0:\n iotests.notrun('requires password-less sudo access: %s' % msg)", "def test_host_file_audit(host):\n with host.sudo():\n host.run(\"touch /etc/hosts\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/hosts\\\"\")\n assert audit_log.stdout", "def _test_sudo(self) -> bool:\n self.debug('Check if sudo is necessary.', level=2)\n command = Command('whoami')\n user_output = self.guest.execute(command, silent=True)\n if user_output.stdout is None:\n raise tmt.utils.RunError(\n 'unexpected command output',\n command,\n 0,\n user_output.stdout,\n user_output.stderr)\n\n return user_output.stdout.strip() != 'root'", "def addSudoers(User_String):\n SudoerString = \"\"\"User_Alias DEVOPS = %s\nDEVOPS ALL=NOPASSWD: ALL\"\"\" % User_String\n _hazSudoers = sudo('[ -f /etc/sudoers.d/webtelemetry-devops ] && echo \"yes\" || echo \"no\"')\n # We wont overwrite anything until we can verify & compare the contents\n if _hazSudoers == \"no\":\n sudo('echo \"%s\" >> /etc/sudoers.d/webtelemetry-devops' % SudoerString)\n sudo('chmod 440 /etc/sudoers.d/webtelemetry-devops')\n else:\n print \"[Info] webtelemetry-devops Sudoers file already exists.\"", "def sudo():\n try:\n run('sudo whoami')\n return 'sudo'\n except:\n return ''", "def sudo(ctx, config):\n log.info('Configuring sudo...')\n sudoers_file = '/etc/sudoers'\n backup_ext = '.orig.teuthology'\n tty_expr = r's/^\\([^#]*\\) \\(requiretty\\)/\\1 !\\2/g'\n pw_expr = r's/^\\([^#]*\\) !\\(visiblepw\\)/\\1 \\2/g'\n\n run.wait(\n ctx.cluster.run(\n args=\"sudo sed -i{ext} -e '{tty}' -e '{pw}' {path}\".format(\n ext=backup_ext, tty=tty_expr, pw=pw_expr,\n path=sudoers_file\n ),\n wait=False,\n )\n )\n try:\n yield\n finally:\n log.info('Restoring {0}...'.format(sudoers_file))\n ctx.cluster.run(\n args=\"sudo mv -f {path}{ext} {path}\".format(\n path=sudoers_file, ext=backup_ext\n )\n )", "def check_reboot():\n return os.path.exists(\"/run/reboot-required\")", "def ExplainIfSudoNeeded(self, tf, dirs_to_remove):\n system = platform.system()\n # If running under Windows we don't need (or have) sudo.\n if system.lower().startswith('windows'):\n return\n\n user_id = os.getuid()\n if (os.stat(self.gsutil_bin_dir).st_uid == user_id and\n os.stat(self.boto_lib_dir).st_uid == user_id):\n return\n\n # Won't fail - this command runs after main startup code that insists on\n # having a config file.\n config_file = self.config_file_list\n self.CleanUpUpdateCommand(tf, dirs_to_remove)\n raise CommandException(\n ('Since it was installed by a different user previously, you will need '\n 'to update using the following commands.\\nYou will be prompted for '\n 'your password, and the install will run as \"root\". If you\\'re unsure '\n 'what this means please ask your system administrator for help:'\n '\\n\\tchmod 644 %s\\n\\tsudo env BOTO_CONFIG=%s gsutil update'\n '\\n\\tchmod 600 %s') % (config_file, config_file, config_file),\n informational=True)", "def _ExplainIfSudoNeeded(self, tf, dirs_to_remove, old_cwd):\n # If running under Windows or Cygwin we don't need (or have) sudo.\n if system_util.IS_CYGWIN or system_util.IS_WINDOWS:\n return\n\n user_id = os.getuid()\n if os.stat(gslib.GSUTIL_DIR).st_uid == user_id:\n return\n\n # Won't fail - this command runs after main startup code that insists on\n # having a config file.\n config_file_list = GetConfigFilePaths()\n config_files = ' '.join(config_file_list)\n self._CleanUpUpdateCommand(tf, dirs_to_remove, old_cwd)\n\n # Pick current protection of each boto config file for command that restores\n # protection (rather than fixing at 600) to support use cases like how GCE\n # installs a service account with an /etc/boto.cfg file protected to 644.\n chmod_cmds = []\n for config_file in config_file_list:\n mode = oct(stat.S_IMODE((os.stat(config_file)[stat.ST_MODE])))\n chmod_cmds.append('\\n\\tsudo chmod %s %s' % (mode, config_file))\n\n raise CommandException('\\n'.join(\n textwrap.wrap(\n 'Since it was installed by a different user previously, you will need '\n 'to update using the following commands. You will be prompted for your '\n 'password, and the install will run as \"root\". If you\\'re unsure what '\n 'this means please ask your system administrator for help:')) + (\n '\\n\\tsudo chmod 0644 %s\\n\\tsudo env BOTO_CONFIG=\"%s\" %s update'\n '%s') % (config_files, config_files, self.gsutil_path,\n ' '.join(chmod_cmds)),\n informational=True)", "def user_should_be_able_to_use_sudo(driver):\n assert \"lectured\" in sudo_results, str(sudo_results)", "def check_reboot():\n return os.path.exist(\"run/reboot-required\")", "def test_syslog_shortcut_simple(self):\n with cleanup_handlers():\n expected_message = random_string(50)\n coloredlogs.install(syslog=True)\n logging.info(\"%s\", expected_message)\n if os.path.isfile(UNIX_SYSTEM_LOG):\n with open(UNIX_SYSTEM_LOG) as handle:\n assert any(expected_message in line for line in handle)", "def sudoers():\n return \"\"\"ALL=NOPASSWD: /sbin/multipath, /sbin/multipathd, /etc/init.d/multipathd, /usr/bin/sg_persist, /bin/mount, /bin/umount, /bin/kill, /usr/bin/lsof, /usr/bin/systemctl, /usr/sbin/lsof, /usr/sbin/xfs_repair, /usr/bin/mkdir, /sbin/vgscan, /sbin/pvscan, /sbin/lvscan, /sbin/vgchange, /sbin/lvdisplay\"\"\"", "def test_correct_sudo_config(self):\n\n pattern = re.compile('^\\s*PermitRootLogin\\s+no')\n # remove escaped characters in case the string supports color\n rem_escape = re.compile(r'\\x1B\\[[0-?]*[ -/]*[@-~]')\n \n for ip in self.IPs:\n try:\n s=pxssh.pxssh(options={\"PasswordAuthentication\" : \"no\"})\n s.login(ip, \"as\", ssh_key=\"~/.ssh/id_as_ed25519\")\n s.sendline('grep -E \"^\\s*PermitRootLogin\\s+no\" /etc/ssh/sshd_config')\n self.assertTrue(s.prompt())\n line_to_match=rem_escape.sub('', s.before.splitlines()[-1])\n self.assertTrue(pattern.match(line_to_match) != None, \"Error in machine {}\".format(ip))\n s.logout()\n except pxssh.ExceptionPxssh as e:\n self.assertTrue(False)\n print s.before\n print 'Sudo verification or login to {} failed!, error: {}'.format(ip, e)\n\n self.assertTrue(True)", "def checkRoot():\n \n if not os.geteuid() == 0:\n sys.exit(\"You must be root to run this command, please use sudo and try again.\")", "def check_root() -> None:\n if os.geteuid() != 0:\n print(\"Please run as root\")\n exit(1)", "def restart_all():\n sudo(\"for logstream in `ls /etc/init/logstream*`; do BASE=`basename $logstream .conf`; service $BASE restart; done\")", "def user_is_root():\n return os.geteuid() == 0", "def sudo_restart ( self, ):\r\n pass\r\n \"sudo reboot\"", "def _detect_sudo(self, _execnet=None):\n exc = _execnet or execnet\n gw = exc.makegateway(\n self._make_connection_string(self.hostname, use_sudo=False)\n )\n\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())'\n )\n\n result = channel.receive()\n gw.exit()\n\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True", "def check_sudo_rules(self):\n result = ([], False)\n sfile = '/etc/sudoers'\n fm = FileManager(sfile)\n\n if fm.file.is_readable:\n result = fm.parse_sudoers(sfile)\n else:\n result = SudoList().parse()\n \n return 'sudo_rules', result", "def test_syslog_shortcut_enhanced(self):\n with cleanup_handlers():\n the_expected_message = random_string(50)\n not_an_expected_message = random_string(50)\n coloredlogs.install(syslog='warning')\n logging.info(\"%s\", not_an_expected_message)\n logging.warning(\"%s\", the_expected_message)\n if os.path.isfile(UNIX_SYSTEM_LOG):\n with open(UNIX_SYSTEM_LOG) as handle:\n assert any(the_expected_message in line for line in handle)\n assert not any(not_an_expected_message in line for line in handle)", "def test_sudo(self):\n self.assertEqual(self.host.user().name, \"matlab\")\n self.assertTrue(self.host.run(\"sudo echo 'Hello World'\").succeeded)", "def isroot():\n\treturn (os.geteuid() == 0)", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def is_admin():\n if os.name == 'nt':\n try:\n # Only Windows users with admin privileges can read \n # the C:\\windows\\temp directory.\n os.listdir(os.sep.join([os.environ.get('SystemRoot','C:\\\\windows'),'temp']))\n except:\n return False\n else:\n return True\n else:\n # Root has UID 0 on Unix systems.\n if 'SUDO_USER' in os.environ and os.geteuid() == 0:\n return True\n else:\n return False" ]
[ "0.64434725", "0.625049", "0.61129206", "0.61046714", "0.6042901", "0.5995111", "0.5981245", "0.595383", "0.595321", "0.5922493", "0.5753546", "0.57430065", "0.5703084", "0.5694894", "0.568475", "0.5654135", "0.564468", "0.56377536", "0.561018", "0.5570623", "0.5528348", "0.55256766", "0.5505332", "0.54758793", "0.5435078", "0.5402021", "0.53979176", "0.53952384", "0.5381734", "0.5378809" ]
0.7631633
0
s > hash code
def get_hash_code(s): h = 0 n = len(s) for i, c in enumerate(s): h = h + ord(c) * 31 ** (n - 1 - i) return StrUtil.convert_4_bytes(h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hashcode(o):", "def hash(self) -> str:\r\n ...", "def elf_hash(s):\n h = 0\n for c in s:\n h = (h << 4) + ord(c)\n t = (h & 0xF0000000)\n if t != 0:\n h = h ^ (t >> 24)\n h = h & ~t\n return h", "def hash(self) -> bytes:", "def hash(x) -> int:\n pass", "def get_hash(s):\n hash_object = hashlib.md5(s.encode())\n return hash_object.hexdigest()", "def hash(string):\n hs = 0\n for s in string:\n hs += ord(s)\n return hs", "def hash_function(s):\n\n # O(n) over the key length\n # O(1) over the HASH_DATA_SIZE\n\n bytes_list = s.encode()\n\n total = 0\n\n\n for b in bytes_list: # O(n) over the length of the key\n total += b\n\n\n total &= 0xffffffff # 32 bit (8 f's)\n\n return total", "def sdbm_hash(name):\n ret = 0\n for ii in name:\n ret = (ret * 65599 + ord(ii)) & 0xFFFFFFFF\n return hex(ret)", "def strhash(s: str) -> int:\n h = hashlib.md5(s.encode('utf-8'))\n h = int(h.hexdigest(), base=16)\n return h", "def HashAlgorithm(self) -> _n_7_t_0:", "def gen_hash(s: str) -> str:\n\n m = hashlib.md5()\n m.update(bytes(s, encoding = 'utf8'))\n hash_code = str(m.hexdigest())\n\n return hash_code", "def hash_key(self):", "def hashing_info(string):#KEY HASHING FUNCTION\n nodeInfo = string.encode('utf-8')\n\n #md5 -> 2^7 = 128 bits\n hash_object = hashlib.md5()\n hash_object.update(nodeInfo)\n\n tmp = hash_object.hexdigest()\n tmp = int(tmp,16)\n\n result = tmp >> (128-16)\n return result", "def hash(self, text):\n hashval = 0\n for i in xrange(0, len(text)):\n hashval += ord(text[i])**i\n return hashval", "def _hashcode(token):\n res = 0\n l = len(token)\n cnt = 1\n for c in token:\n res += ord(c) * 31 ** (l - cnt)\n cnt += 1\n return res", "def hash(self, string):\n return self.__scaffydb.hash(string)", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def __hash__(self):\n token = \"\"\n for gamePiece in self.game_pieces:\n token = token + str(gamePiece.x) + str(gamePiece.y)\n \n hash_ = int(token) % 100000\n return hash_", "def get_hash(self):\r\n return", "def current_hash(self):", "def hashing(word) :\r\n ans = hashlib.sha256(word.encode())\r\n return ans.hexdigest()", "def __hash__(self):\n return hash((self._nele, self._m_s))", "def _s_hash(fn, data: str):\n\n return fn(_b(data)).hexdigest()", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def get_hash(self):\n return self.__hash", "def sha1(self, s):\n\t\tself.sha1_calls += 1\n\t\treturn int(hashlib.sha1(s).hexdigest(), 16)", "def __hash__(self) -> int:", "def hashing(file,pp):\n\n def myhash(instring):\n # sdbm hash\n res = 0\n for t in instring:\n res = (ord(t) + (res<<6) + (res<<16) - res) % 2**32\n return res\n\n return hex(myhash(file.replace('\\\\','/')+\":\"+pp))", "def __hash__(self):\n return hash(self.get_canonical_identifier())" ]
[ "0.7849163", "0.76975936", "0.74788135", "0.74515873", "0.7448918", "0.74175584", "0.73752755", "0.736382", "0.7322883", "0.72037613", "0.7167543", "0.71318203", "0.7103698", "0.7055194", "0.70461094", "0.70433646", "0.7012222", "0.7006303", "0.7006017", "0.7004447", "0.698998", "0.69740283", "0.69603777", "0.69600534", "0.69521827", "0.69506484", "0.69422334", "0.6939315", "0.6933055", "0.6927499" ]
0.82550454
0
get default channel list
def get_api_default_channel_list(self): url = "http://api.applezhuan.com/api/c/get_default_channellist?&" params = { "android_id": self.mobile.android_id, "platform": "2", "av": "2", "type": "1", "time": self.get_current_time, "ov": self.mobile.os, "lon": self.mobile.lon, "lat": self.mobile.lat, "device_name": "dpi", "device_code": self.device_code, "brand": self.mobile.brand, "mac": self.mobile.mac, "vn": "1.0.2", "network": self.mobile.network } params_str = self.encrypt.get_secret_param(params) url = url + "s=" + params_str headers = { "Accept-Language": "zh-CN,zh;q=0.8", "User-Agent": "Mozilla/5.0 (Linux; U; Android " + self.mobile.os + "; zh-cn; GT-N7100 Build/" + self.mobile.brand + ") AppleWebKit/534.30" " (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30", "Host": "api.applezhuan.com", "Connection": "Keep-Alive", "Accept-Encoding": "gzip", "Cookie": self.cookie } res = requests.get(url, headers=headers) # print(res.text) result = json.loads(res.text) return result["d"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def channels(self):\n if not self.is_loaded():\n return []\n else:\n return ipmi_channels()", "def default_channel(self) -> int:\r\n ...", "def _channels_list(self):\n result = self.slack.api_call(\"channels.list\")\n\n if not result.get(\"ok\"):\n logging.error(result['error'])\n return None\n\n return result['channels']", "def get_channels(self):\n return self.channels", "def get_channels():\n r = slack.channels.list().body\n return [ c for c in r['channels'] if c['is_member'] ]", "async def list_channel(self, ctx: MyContext):\n channels = self.db_get_channels(ctx.guild.id)\n if not channels: # we can't send an empty list\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.no-channels\", p=ctx.prefix\n )\n )\n return\n txt = \"\\n\".join([c.to_str() for c in channels])\n await ctx.send(txt)", "def channel_list(self):\n return_str = self.scpi.query_channel_catalog().split(',')\n channel_dct = {}\n for i in range(int(len(return_str)/2)):\n channel_dct[int(return_str[2 * i])] = return_str[2 * i + 1]\n return channel_dct", "async def _list(self, ctx):\n config = await self.config.guild(ctx.guild).channels()\n data = [self.bot.get_channel(x).mention for x in config]\n if ctx.channel.id in config:\n destination = ctx.author\n else:\n destination = ctx\n if not data:\n return await destination.send(\"There are no channels.\")\n await destination.send(\", \".join(data))", "def get_channels():\n\tchannels = slack.get_channels()\n\treturn jsonify(channels=channels.body['channels'])", "def channels_listall(token):\n channels_results = channels.list()\n channels_list = []\n for channel in channels_results:\n channels_list.append(\n {\"channel_id\": channel[\"channel_id\"], \"name\": channel[\"name\"]}\n )\n return {\"channels\": channels_list}", "def getChannels(self) -> List:\n\t\tif self._taking_off:\n\t\t\tself.takeOff()\n\n\t\tif self._landing:\n\t\t\tself.land()\n\n\t\treturn self._altHoldController.getChannels() + [2000]", "def _fillChannels(self):\n\n # Add extra disabled channels as needed\n index = len(self.channels)\n while index < self.iface.myInfo.max_channels:\n ch = channel_pb2.Channel()\n ch.role = channel_pb2.Channel.Role.DISABLED\n ch.index = index\n self.channels.append(ch)\n index += 1", "def get_channels(self):\n return [self.afos, \"%s...\" % (self.afos[:3], )]", "def getChannelsByName(self, unit, channels): \n\t\treturn self.selectChannelsByName(unit, channels, dontSelect = 1)", "def extract_channels(self, index: int) -> ListLike:\n cmd_pieces = self[index].split()\n channels = []\n for i, piece in enumerate(cmd_pieces):\n if piece in [\"--channel\", \"-c\"]:\n channels.append(cmd_pieces[i + 1])\n return channels", "def _get_all_initialized_channels(self):\n channels_dict = defaultdict(list)\n for service_base_dir in self._get_persistent_mpe_dir().glob(\"*/*\"):\n org_id = service_base_dir.parent.name\n channels = self._get_initialized_channels_for_org(org_id)\n if (channels):\n channels_dict[org_id] = channels\n return channels_dict", "def initDefaultChoices(self):\n return []", "def get_clients(self, channel):\n if channel not in self.clients.keys():\n return []\n return self.clients[channel]", "def test_blacklist_get_channel(self):\n test_channel = [{\"name\": \"Listed Count\",\n \"ShowChart\": 0,\n \"ShowTable\": 0,\n \"mode\": \"integer\",\n \"kind\": \"Custom\",\n \"customunit\": \"\",\n \"limitmaxerror\": 0,\n \"limitmode\": 1,\n \"value\": 0},\n {\"name\": \"Not Listed Count\",\n \"ShowChart\": 0,\n \"ShowTable\": 0,\n \"mode\": \"integer\",\n \"kind\": \"Custom\",\n \"customunit\": \"\",\n \"value\": 0},\n {\"name\": \"No Answer Count\",\n \"ShowChart\": 0,\n \"ShowTable\": 0,\n \"mode\": \"integer\",\n \"kind\": \"Custom\",\n \"customunit\": \"\",\n \"limitmaxwarning\": 0,\n \"limitmode\": 1,\n \"value\": 0}]\n assert_equal(self.test_blacklist.get_blacklist(['', 0, 0, 0]), test_channel)", "def channels(self):\n return self._channels", "def list_channels():\n user = getpass.getuser()\n base_path = \"C:\\\\Users\\\\\" + user + \"\\\\Documents\\\\Eve\\\\logs\\\\Chatlogs\\\\\"\n today = datetime.datetime.utcnow().strftime(\"%Y%m%d\")\n most_recent = {}\n for filename in os.listdir(base_path):\n filename = filename[:-4]\n full_filename = filename\n time = filename[-6:]\n filename = filename[:-7]\n date = filename[-8:]\n channel_name = filename[:-9]\n if date == today:\n channel = Channel()\n channel.file_name = full_filename\n channel.dir = base_path\n channel.channel_name = channel_name\n channel.date = date\n channel.time = time\n if most_recent.get(channel_name):\n newest_channel = most_recent.get(channel_name)\n if int(time) > int(newest_channel.time):\n most_recent[channel_name] = channel\n else:\n most_recent[channel_name] = channel\n\n return most_recent", "def channels(self):\n return [channel for channel in self.client.channels if channel.has_nick(self)]", "def get_channels(self):\n response = self.client.api_call(\n f'conversations.list?types={cfg.CHANNEL[\"types\"]}&exclude_archived={cfg.CHANNEL[\"exclude_archived\"]}'\n )\n assert response['ok']\n return response['channels']", "def channels(self):\r\n return v3.Channels(self)", "def channels(self):\n return self._channels.keys()", "def get_list_youtube_channels_check(self):\n return self.bot_data_file[\"youtube\"][\"channels\"]", "def ordered_channel_names(self):\n channel_list = []\n for k in self.__dict__.keys():\n if k.startswith('channel_'):\n channel_list.append(\n [int(k.split('channel_')[1]), self.__dict__[k]]\n )\n channel_list.sort()\n if len(channel_list) == 0:\n print('********* warning!! empty channel list - are there ay channel_N attributes? ')\n return [i[1] for i in channel_list]", "def get_channels_json(self):\n logging.debug(f\"Getting all Slack channels...\")\n return self.get_list_json(\"conversations\")[\"channels\"]", "async def listchannels(self, ctx: commands.Context):\n db_session = self.bot.create_db_session()\n channels_query = db_session.query(Channel).filter(Channel.joinable == True).order_by(Channel.name)\n db_session.close()\n\n header_message = \"Here is a list of the joinable channels\"\n channel_list = \"\\n\".join(channel.name for channel in channels_query)\n footer_messge = (\"To join or leave one of these channels, use the !joinchannel and !leavechannel commands.\\n\"\n \"To join multiple channels, separate them with a space.\")\n\n message = discord.Embed()\n message.title = \"Joinable Channels\"\n message.description = channel_list\n message.set_footer(text=footer_messge)\n\n await ctx.send(embed=message)", "def showChannels(self):\n print(\"Channels:\")\n for c in self.channels:\n if c.role != channel_pb2.Channel.Role.DISABLED:\n cStr = stripnl(MessageToJson(c.settings))\n print(\n f\" {channel_pb2.Channel.Role.Name(c.role)} psk={pskToString(c.settings.psk)} {cStr}\")\n publicURL = self.getURL(includeAll=False)\n adminURL = self.getURL(includeAll=True)\n print(f\"\\nPrimary channel URL: {publicURL}\")\n if adminURL != publicURL:\n print(f\"Complete URL (includes all channels): {adminURL}\")" ]
[ "0.69699097", "0.685595", "0.68018174", "0.668857", "0.65878475", "0.65467113", "0.6529256", "0.6497529", "0.64888537", "0.63858175", "0.63354135", "0.6327543", "0.63058734", "0.62887496", "0.62406474", "0.6229618", "0.62196255", "0.6214428", "0.6173595", "0.6123503", "0.6101624", "0.6098337", "0.60922235", "0.6090089", "0.6042482", "0.6028967", "0.6005569", "0.60052055", "0.59994596", "0.5978641" ]
0.720246
0
accept the img captcha and request send sms captcha
def get_sms_captcha(self, img_ts, img_captcha): url = "http://api.applezhuan.com/api/get_sms_captcha?&" params = { "img_captcha": img_captcha, "time": self.get_current_time, "ts": img_ts, "device_code": self.device_code, "mobile": self.mobile.mobile } params_str = self.encrypt.get_secret_param(params) url = url + "s=" + params_str headers = { "Accept-Language": "zh-CN,zh;q=0.8", "User-Agent": "Mozilla/5.0 (Linux; U; Android " + self.mobile.os + "; zh-cn; GT-N7100 Build/" + self.mobile.brand + ") AppleWebKit/534.30 (KHTML, like Gecko) " "Version/4.0 Mobile Safari/534.30", "Host": "api.applezhuan.com", "Connection": "Keep-Alive", "Accept-Encoding": "gzip", "Cookie": self.cookie } res = requests.get(url, headers=headers) # print(res.text) result = json.loads(res.text) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def captcha(self):\n notification.send_sms(message=message)\n notification.send_emails(emails=email, message=message)\n sleep(25)\n\n ### this code snippet is for reference only, not to be used ###\n # sleep(3)\n # captcha = self.driver.find_element_by_xpath('/html/body/div/iframe[0]')\n # self.driver.switch_to.frame(captcha)\n # captcha_loc = captcha.location\n # print(captcha_loc)\n # captcha_x = captcha_loc[\"x\"]\n # captcha_y = captcha_loc[\"y\"]\n # self.actions.tap_and_hold(captcha_x, captcha_y)\n # sleep(5)\n # self.actions.release(captcha_x, captcha_y)\n # self.search_input()", "def handle_captcha(self):\n self.webdriver.save_screenshot('./out/captcha.png')\n sleep(20)\n\n with open('./out/captcha', 'r') as f:\n try:\n self.webdriver.find_element_by_xpath(\"//input[@aria-label='Type the text you hear or see']\").send_keys(f.read())\n except:\n log.error('Captcha input failed. Possibly incorrect captcha?')\n raise\n\n self.webdriver.find_element_by_xpath('//*[@id=\"identifierNext\"]').click()\n sleep(4)\n\n self.webdriver.find_element_by_css_selector(\"input[type=password]\").send_keys(self.bot.getPassword())", "def get_image_response(self, captcha_id):\n url = 'http://2captcha.com/res.php'\n data = {'key': self.api_key, 'action': 'get',\n 'id': captcha_id, 'json': 1}\n response = self.session.post(url, data=data)\n json_response = json.loads(response.text)\n recaptcha_answer = json_response[\"request\"]\n finished = False\n for _ in range(20): # For making up to 120 seconds of waits\n if 'CAPCHA_NOT_READY' not in response.text:\n finished = True\n break\n # Time Requested by the web page\n sleep(6)\n response = self.session.post(url, data=data)\n json_response = json.loads(response.text)\n recaptcha_answer = json_response[\"request\"]\n\n if not finished:\n return False\n\n return recaptcha_answer", "async def enter_captcha(self, url, sid):\n raise VkCaptchaNeeded(url, sid)", "def solve_captcha_manual(gid):\n image = auth.get_captcha_image(gid)\n # FIXME: Use Python's temp file interface.\n image.save(\"./test.png\")\n webbrowser.open_new_tab(\"./test.png\")\n text = input('solve_captcha --->')\n return text", "def handle_captcha(thread_call, thread_r):\n import subprocess\n\n iden = thread_r['captcha']\n\n subprocess.call(['open', reddit_url + 'captcha/' + iden])\n thread_call['captcha'] = input(\"Captcha (enclose in quotes):\")\n thread_call['iden'] = iden\n\n request = session.post(reddit_url + 'api/submit', data=thread_call, cookies=cookie)\n thread_r = request.json()['json']['data']\n print request.json()\n if len(thread_r['errors']) > 0:\n debug_printer.pprint(thread_r)", "def sms_reply():\n # Fetch the message\n media_msg = request.form.get('NumMedia')\n msg = request.form.get('Body').lower()\n resp = MessagingResponse()\n responded = False\n if '1' in media_msg:\n pic_url = request.form.get('MediaUrl0') # URL of the person's media\n # pprint(pic_url) # so you can see the URL that the picture generated \n resp.message(\"We have recieved your request for image analysis! Please wait for our response\")\n resp.message(pic_url)\n url = \"https://techclan-twitter.herokuapp.com/reverse_image?URL=\"\n url=url+pic_url\n resp.message('The image has been succesfully uploaded to our server!The Url of the image is :')\n response=requests.get(url)\n parsed=json.loads(response.text)\n s1=\"\"\n count=0\n for each in parsed:\n s1=s1+each+\"\\n ................\\n\"\n if count>5:\n break\n count=count+1\n resp.message('The reverse image analysis of image reports are:')\n resp.message(s1)\n time.sleep(1)\n u='http://18.205.87.224/api/text?id='\n u=u+pic_url\n response=requests.get(u)\n parsed=json.loads(response.text)\n resp.message(parsed)\n responded==True\n elif '5' in msg:\n r = requests.get('https://coronavirus-19-api.herokuapp.com/countries/india')\n if r.status_code == 200:\n data = r.json()\n text = f'_Covid-19 Cases in India_ \\n..........................\\nConfirmed Cases : *{data[\"cases\"]}* \\n................\\nToday Cases : *{data[\"todayCases\"]}* \\n..............\\nDeaths : *{data[\"deaths\"]}* \\n..................................\\nRecovered : *{data[\"recovered\"]}* \\n\\n..................\\nTotal Tested : *{data[\"totalTests\"]}* \\n\\n Type 0 to return to main menu'\n else:\n text = 'I could not retrieve the results at this time, sorry.'\n resp.message(text)\n responded = True \n \n elif '1' in msg:\n \n resp.message(\"wait we will fetch your results soon!!\")\n url = \"http://18.234.107.157:5000/api/text?id=\"\n ms=str(msg)\n #a,b=ms.split(' ',1)\n url=url+ms\n response=requests.get(url)\n parsed=json.loads(response.text)\n agree=0\n disagree=0\n discuss=0\n ctr=0\n for each in parsed:\n if ctr>100:\n break\n ctr=ctr+1\n answ=each.get('Score',\"error\")\n if answ == \"agree\":\n agree=agree+1\n elif answ == \"disagree\":\n disagree=disagree+1\n if(agree>disagree):\n resp.message(\"This is *REAL* according to our sources !! Our results are based on following sources..we cannot be 100% Sure.\")\n else:\n resp.message(\"This is *FAKE* according to our sources !! Our results are based on following sources..we cannot be 100% Sure.\")\n count=0\n s1=\"\"\n for each in parsed:\n s1=s1+each['link']+\"*Title :*\" +each['title']+\"\\n ................\\n\"\n if count>5:\n break\n count=count+1\n resp.message(s1)\n responded==True\n #reporting\n elif '3' in msg:\n # resp.message(\"We have reported your content to our police database!!\")\n ms=str(msg)\n a,b=ms.split(' ',1)\n url='https://spreadsheetupdate1.herokuapp.com/spreed?id='\n url=url+ms\n r=requests.get(url)\n resp.message(\"We have reported your content to our police database!!\")\n responded==True\n\n\n\n \n #for news\n\n elif msg=='news' or msg=='4':\n \n url=\"\"\"https://newsapi.org/v2/top-headlines?sources=bbc-news,cnn,cnbc,abc-news,google-news-uk,independent&apiKey=3ff5909978da49b68997fd2a1e21fae8\"\"\"\n r = requests.get(url)\n #resp.message(\"stay\") \n if r.status_code == 200:\n resp.message(\"stay here with us! We are fetching news for you \")\n data = r.json()\n articles = data['articles'][:5]\n result = \"\"\n ctr=0 \n for article in articles:\n # if ctr>10:\n # break\n # ctr=ctr+1\n title = article['title']\n url = article['url']\n if 'Z' in article['publishedAt']:\n published_at = datetime.datetime.strptime(article['publishedAt'][:19], \"%Y-%m-%dT%H:%M:%S\")\n else:\n published_at = datetime.datetime.strptime(article['publishedAt'], \"%Y-%m-%dT%H:%M:%S%z\")\n \n result += \"\"\"*{}*\nRead more: {}\n_Published at {:02}/{:02}/{:02} {:02}:{:02}:{:02} UTC_\n\"\"\".format(\n title,\n url, \n published_at.day, \n published_at.month, \n published_at.year, \n published_at.hour, \n published_at.minute, \n published_at.second\n )+\"\\n ..................\\n\"\n\n else:\n result = 'I cannot fetch news at this time. Sorry!'\n\n resp.message(result)\n responded = True\t\n else:\n phone_no = request.form.get('From')\n reply = fetch_reply(msg, phone_no)\n\n resp = MessagingResponse()\n resp.message(reply)\n responded = True\n \n\n \t\n\n return str(resp)", "def obtain_image_captcha(self, file_path):\n id_answer = self.post_image_task(file_path)\n if not id_answer:\n message = f\"Unable to obtain response for request of captcha from 2Captcha\"\n print(message)\n return None\n\n try:\n captcha_id = int(id_answer)\n except ValueError:\n message = f\"Error in captcha request from 2Captcha: {id_answer}\"\n print(message)\n return None\n\n recaptcha_answer = self.get_image_response(captcha_id)\n if not recaptcha_answer:\n message = f\"Unable to obtain response for captcha image solution from 2Captcha\"\n print(message)\n return None\n\n print(f\"Output from 2Captcha {recaptcha_answer}\")\n return recaptcha_answer", "def post_image_task(self, file_path):\n url = 'http://2captcha.com/in.php'\n input_file = {'file': open(file_path, 'rb')}\n data = {'key': self.api_key, 'method': 'post', 'json': 1}\n response = self.session.post(url, files=input_file, data=data)\n id_answer = self.handle_id_answer(response.text)\n finished = False\n for _ in range(20): # For making up to 120 seconds of waits\n if 'CAPCHA_NOT_READY' not in response.text:\n finished = True\n break\n # Time Requested by the web page\n sleep(6)\n response = self.session.post(url, files=input_file, data=data)\n id_answer = self.handle_id_answer(response.text)\n\n if not finished:\n return False\n\n return id_answer", "def solve_image_captcha(self, captcha_tmp_path):\n # Get solution and apply it\n for i in range(1, 4):\n print(f\"Attempt #{i} for recaptcha solution\")\n solution = self.obtain_image_captcha(captcha_tmp_path)\n print(f'this {solution}')\n if solution and ERROR not in solution.upper():\n break\n\n if solution is None or ERROR in solution.upper():\n if not solution:\n message = f\"2Captcha service didn't return a response for the captcha\"\n else:\n message = f\"Error in captcha solution from 2Captcha: {solution}\"\n return None\n\n print(\"Captcha solution: {}\".format(solution))\n return solution", "def get_captcha(self):\n res = self._limited_call(self._requests.get,\n constants.FA_ROOT + \"/captcha.jpg\")\n data = res.content\n return data", "def _handle_verify_code(self):\n while True:\n # r = self.session.get(self._genimage_url.format(code=self.codestring))\n try:\n self.headers[\"Cookie\"] = \"__jsluid=%s; __jsl_clearance=%s; JSESSIONID=%s\" % (self._jsluid, self._jsl_clearance, self.jsessionid)\n vfcode_url = \"http://www.miitbeian.gov.cn/getVerifyCode?%s\" % random.randint(10, 90)\n logger.info(\"Downloading verification code pic: %s\", vfcode_url)\n request = urllib2.Request(vfcode_url,headers=self.headers)\n r = self.opener.open(request, timeout=20)\n s = r.read()\n for cookie in self.cookiejar:\n logger.info(\"Get Cookie step2: %s, %s\", cookie.name, cookie.value)\n if cookie.name == \"JSESSIONID\":\n self.jsessionid = cookie.value\n img_path = \"miitVerf/code.png\"\n with open(img_path, mode='wb') as fp:\n fp.write(s)\n fp.close()\n logger.info(\"Saved verification code to %s\", format(os.path.dirname(img_path)))\n break\n except Exception,e:\n logger.info(e)\n self.vcode = raw_input(\"Please input the captcha:\\n\")\n return self.vcode", "def gen_captcha(**kwargs):\n from PIL import ImageFile\n from PIL import Image\n from PIL import ImageFont\n from PIL import ImageDraw\n from PIL import ImageFilter\n import random\n from PIL import ImageFile as pyImageFile\n import sys\n sys.modules['ImageFile'] = pyImageFile\n from io import StringIO, BytesIO\n # CHAR_BIT=(4,5,6,7,8)\n # CHAR_TYPE=(1,2,3)\n #随机选择字符位数和类型.\n # text=getstr( random.choice(CHAR_BIT), random.choice(CHAR_TYPE))\n text = kwargs.get('text', None)\n fnt_sz = kwargs.get('size', DEFAULT_IMAGE_SIZE)\n bkground = kwargs.get('bkground', DEFAULT_BG)\n font_color = kwargs.get('font_color', DEFAULT_FONT_COLOR)\n distortion = kwargs.get('distortion', DEFAULT_DISTORTION)\n addWidth = kwargs.get('addWidth', None)\n addHeight = kwargs.get('addHeight', None)\n\n period = distortion[0]\n amplitude = distortion[1]\n offset = distortion[2]\n\n## outFile = StringIO()\n outFile = BytesIO()\n\n DATA_PATH = os.path.abspath(os.path.dirname(__file__))\n FONT_PATH = DATA_PATH + '/fonts'\n\n # select font for captcha\n ALL_FONTS = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12')\n rand_font = random.choice(ALL_FONTS)\n \"\"\"font = ImageFont.truetype(FONT_PATH+'/font%s.ttf'%rand_font, fnt_sz)\"\"\"\n font = ImageFont.truetype(FONT_PATH + '/font' + rand_font + '.ttf', fnt_sz)\n\n #依据需求认定图片大小\n # textSize =[165,50]\n textSize = [kwargs.get('width', 165), kwargs.get('height', 50)]\n factTextSize = font.getsize(text)\n\n #如果定义尺寸小于实际尺寸则用实际的尺寸\n if factTextSize[0] > textSize[0]:\n textSize[0] = factTextSize[0]\n if factTextSize[1] > textSize[1]:\n textSize[1] = factTextSize[1]\n#------------------------------render background1 -----------------------\n image = Image.new(\n 'RGB', (textSize[0] + addWidth, textSize[1] + addHeight), bkground)\n image.paste(bkground)\n#------------------------------render Text2 ------------------------\n draw = ImageDraw.Draw(image)\n alignment = (random.uniform(0, 1), random.uniform(0, 1))\n x = int((image.size[0] - textSize[0]) * alignment[0] + 0.5)\n y = int((image.size[1] - textSize[1]) * alignment[1] + 0.5)\n\n draw.text((x, y), text, font=font, fill=font_color)\n#--------------new add line i值越大线越粗------------------------\n width, height = image.size\n for i in range(0, 3):\n draw.line(((0, height / 1 + i), (width, height / 8 + i)), fill=128)\n\n#------------------------------render Distortion -----------------------\n r = 1\n xPoints = image.size[0] //r + 2\n yPoints = image.size[1] //r + 2\n\n # Create a list of arrays with transformed points\n xRows = []\n yRows = []\n for j in range(yPoints):\n xRow = []\n yRow = []\n for i in range(xPoints):\n x, y = getTransform(i * r, j * r, amplitude, period, offset)\n\n # Clamp the edges so we don't get black undefined areas\n x = max(0, min(image.size[0] - 1, x))\n y = max(0, min(image.size[1] - 1, y))\n\n xRow.append(x)\n yRow.append(y)\n xRows.append(xRow)\n yRows.append(yRow)\n\n # Create the mesh list, with a transformation for\n # each square between points on the grid\n mesh = []\n for j in range(yPoints - 1):\n for i in range(xPoints - 1):\n mesh.append((\n # Destination rectangle\n (i * r, j * r,\n (i + 1) * r, (j + 1) * r),\n # Source quadrilateral\n (xRows[j][i], yRows[j][i],\n xRows[j + 1][i], yRows[j + 1][i],\n xRows[j + 1][i + 1], yRows[j + 1][i + 1],\n xRows[j][i + 1], yRows[j][i + 1]),\n ))\n\n img = image.transform(image.size, Image.MESH, mesh, Image.BILINEAR)\n\n # save the image to a file\n img.save(outFile, format='jpeg')\n outFile.seek(0)\n # img.save(\"captchas.jpg\") #测试用,正式系统请删除.\n src = outFile.read()\n size = len(src)\n sys.modules['ImageFile'] = ImageFile\n return {'text': text, 'src': src, 'size': size}", "def solve_captcha(self):\n # Switch to the Captcha's iframe\n captcha = CapatchaSolver(self.driver)\n while True:\n self.driver.switch_to.frame(self.driver.find_element_by_tag_name(\"iframe\"))\n captcha.solve_captcha()\n # Check if we passed the captcha part by checking the page title\n wait = WebDriverWait(self.driver, 10)\n try:\n wait.until_not(EC.title_is(consts.BLOCKED))\n break\n except TimeoutException:\n self.driver.refresh()", "def bypass_captcha(self, rps):\n viewstate_pattern = r\"id=\\\"__VIEWSTATE\\\".*\\\"(.*)\\\"\"\n viewstategenerator_pattern = r\"id=\\\"__VIEWSTATEGENERATOR\\\".*\\\"(.*)\\\"\"\n CAPTCHA_PATTERN = r\"id=\\\"ctl00_ContentPlaceHolder1_ctl00_lblCapcha\\\".*?>(.*?)<\\/span>\"\n viewstate = re.search(viewstate_pattern, rps)\n if viewstate:\n viewstate = viewstate.group(1)\n else:\n print(\"VIEWSTATE value not found!\")\n viewstategenerator = re.search(viewstategenerator_pattern, rps)\n if viewstategenerator:\n viewstategenerator = viewstategenerator.group(1)\n captcha = re.search(CAPTCHA_PATTERN, rps)\n if captcha:\n captcha_text = captcha.group(1)\n print(\"[*] CAPTCHA -> [{}]\".format(captcha_text))\n payload = {\n 'ctl00$ContentPlaceHolder1$ctl00$txtCaptcha':captcha_text,\n '__VIEWSTATE':viewstate,\n '__VIEWSTATEGENERATOR':viewstategenerator,\n '__EVENTARGUMENT':'',\n '__EVENTTARGET':'',\n 'ctl00$ContentPlaceHolder1$ctl00$btnXacNhan': 'Vào website'\n }\n rps = self.session.post(url = home_url, headers = BROWSER_HEADERS, data=payload)\n if CAPTCHA_ELEMENT_ID not in rps.text:\n print(\"[*] CAPTCHA BYPASSED\")\n return True\n else:\n print(\"CAPTCHA NOT BYPASSED! PLEASE REPORT TO DEVELOPER BACHVKHOA!\")\n else:\n print(\"[*] CAPTCHA NOT FOUND\")\n return False", "def get_captcha_reply(captcha):\n def get_char_at(pos, captcha):\n char_chars = [line[pos-1:pos] for line in captcha.split(b'\\n')]\n key = ''.join([ str(s, 'ascii') for s in char_chars])\n if key == ' | ':\n return get_char_at(pos+2, captcha)\n if key == ' | .\\\\ ':\n return get_char_at(pos+2, captcha)\n return chars[key]\n\n pos = 1\n\n a, size = get_char_at(pos, captcha)\n pos += size\n pwn.log.info(\"a=%d\" % a)\n\n op, size = get_char_at(pos, captcha)\n pos += size\n pwn.log.info('op=%s' % op)\n\n b, size = get_char_at(pos, captcha)\n pos += size\n pwn.log.info('b=%d' % b)\n \n if op == '-':\n return a - b\n if op == '*':\n return a * b\n if op == '/':\n return a / b\n if op == '+':\n return a + b\n pwn.log.error(\"Ops not found (%s)\" % op)", "def captcha_validation(token: str):\n url = \"https://www.google.com/recaptcha/api/siteverify\"\n secret = json.loads(get_secret(\"CAPTCHA_SECRET\"))['CAPTCHA_SECRET']\n payload = {\n \"secret\": secret,\n \"response\": token\n }\n response_raw = requests.post(url, data=payload)\n response_text = response_raw.text\n logger.debug(response_text)\n response = json.loads(response_text)\n return response['success']", "def askForCaptcha(self, url):\n try:\n import webbrowser\n wikipedia.output(u'Opening CAPTCHA in your web browser...')\n if webbrowser.open(url):\n return wikipedia.input(\n u'What is the solution of the CAPTCHA that is shown in '\n u'your web browser?')\n else:\n raise\n except:\n wikipedia.output(u'Error in opening web browser: %s'\n % sys.exc_info()[0])\n wikipedia.output(\n u'Please copy this url to your web browser and open it:\\n %s'\n % url)\n return wikipedia.input(\n u'What is the solution of the CAPTCHA at this url ?')", "def submit(request):\n if request.POST:\n form = CaptchaForm(request.POST, request.FILES)\n if form.is_valid():\n image = request.FILES['singleImage']\n extension = image.name.split('.')[1]\n hashname = random.getrandbits(128)\n with open(os.path.join(settings.STATIC_ROOT, \"tmp/%s.%s\" % (hashname, extension)), \"w+\") as imagePath:\n imagePath.write(image.read())\n\n ctx = RequestContext(request, {\"hash\":hashname, \"extension\":extension})\n template = loader.get_template(\"wainz/submission_details.html\")\n\n return HttpResponse(template.render(ctx))\n else:\n form = CaptchaForm()\n\n return render_to_response(\"wainz/submit.html\", dict(form=form), context_instance = RequestContext(request))", "def handle(self,text, mic, profile):\n stuff = os.listdir(jasperpath.data('img/'))\n count = len(stuff)\n count +=1\n cam.start()\n pic = cam.get_image()\n pygame.image.save(pic,(jasperpath.data('img/pic%s.jpg' %count)))\n self.pygm.blitimg((\"pic%s.jpg\" %count), size, black, x, y)\n\n mic.say(\"%s\" %message)\n## pic(self)\n cam.stop()\n time.sleep(5)\n\n fromaddr = \"[email protected]\"\n #toaddr = \"[email protected]\"\n #toaddr = \"[email protected]\"\n toaddr = \"[email protected]\"\n\n sub = \"test\"\n msg = MIMEMultipart()\n pasw = \"Garfield76\"\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"pic\"\n \n body = \"blah blah\"\n \n msg.attach(MIMEText(body, 'plain'))\n \n filename = (\"pic%s.jpg\" %count)\n attachment = open((jasperpath.data('img/pic%s.jpg' %count)), \"rb\")\n \n part = MIMEBase('application', 'octet-stream')\n part.set_payload((attachment).read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', \"attachment; filename= %s\" % filename)\n \n msg.attach(part)\n \n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login(fromaddr, pasw)\n text = msg.as_string()\n server.sendmail(fromaddr, toaddr, text)\n server.quit()", "def twocaptcha_solver():\n SITE_URL = get_site_settings()[1]\n SITE_KEY = get_site_settings()[0] # osrs site key\n API_KEY = get_user_settings()[2] # api key read from settings.ini\n if not API_KEY:\n raise ValueError(\"No API key was found in settings.ini.\")\n\n s = requests.Session()\n\n # here we post and parse site key to 2captcha to get captcha ID\n try:\n captcha_id = s.post(f\"http://2captcha.com/in.php?key={API_KEY}\"\n f\"&method=userrecaptcha&googlekey={SITE_KEY}\"\n f\"&pageurl={SITE_URL}\").text.split('|')[1]\n except IndexError:\n print(\"You likely don't have a valid 2captcha.com API key with funds\"\n \" in your settings.ini file. Fix and re-run the program.\")\n\n # then we parse gresponse from 2captcha response\n recaptcha_answer = s.get(\n f\"http://2captcha.com/res.php?key={API_KEY}\"\n f\"&action=get&id={captcha_id}\").text\n print(\"Solving captcha...\")\n while 'CAPCHA_NOT_READY' in recaptcha_answer:\n sleep(6)\n recaptcha_answer = s.get(\n f\"http://2captcha.com/res.php?key={API_KEY}\"\n f\"&action=get&id={captcha_id}\").text\n try:\n recaptcha_answer = recaptcha_answer.split('|')[1]\n except IndexError:\n print(\"2captcha failed to solve this one.. Returning a blank response \"\n \"If the program fails to continue, please msg Gavin with error.\")\n recaptcha_answer = ''\n else:\n return recaptcha_answer", "def handle_verify_code(self, code):\n r = self.session.get(self.image_url_format.format(code=code))\n\n # FIXME use terminal better\n img_path = os.path.expanduser('~/') + 'pansh.{}.vcode.png'.format(hash(self.username))\n with open(img_path, mode='wb') as fp:\n fp.write(r.content)\n print(\"Saved verification code to {}\".format(os.path.dirname(img_path)))\n vcode = raw_input(\"Please input the captcha:\\n\")\n return vcode", "def send_post(self, board, thread, comment='', filepath=None, filebin=None, email=''):\n if filepath:\n file = {}\n file['image'] = open(filepath,'rb').read()\n elif filebin:\n file = {}\n file['image'] = filebin\n else:\n file = None\n\n\n \n g_response = self.solver.solve_and_return_solution()\n if g_response != 0:\n req = requests.post(\"https://2ch.hk/makaba/posting.fcgi?json=1\", data={\n 'task':'post',\n 'board': board,\n 'thread': thread,\n 'captcha_type':'hcaptcha',\n '2chaptcha_id': '248cebfd-9b3f-4d8c-88b5-f812daf51261',\n 'email': email,\n 'comment': comment,\n 'h-captcha-response': g_response\n \n },files = file if file is not None else {'img': None} ).text\n \n req = json.loads(req)\n \n if req['Error'] == None:\n print( \"{} {} Post {} sent\".format(req[\"Status\"], time.strftime(\"%X\"), req[\"Num\"]) )\n return req[\"Num\"]\n else:\n print(\"Err {} {}\".format(time.strftime(\"%X\"), req))\n return 0\n else:\n print(\"task finished with error \" + self.solver.error_code)\n return 0", "async def set_captcha_and_connect(self, captcha_id: str, captcha_input: str):\n await self._set_captcha(captcha_id, captcha_input)\n await asyncio.sleep(10)\n await self._set_products()", "def test_5_signin(self):\n print \"获取验证码token\"\n r = requests.post(gl.url + ':7130/account/v1/get_captcha_token')\n print r, r.status_code, r.json()[\"captcha_token\"], r.json()[\"message\"], r.json()[\"code\"]\n self.assertEqual(r.status_code, 200)\n ##self.assertEqual(r.json()[\"message\"], \"操作成功\")\n self.assertEqual(r.json()[\"code\"], 0)\n gl.captcha_token = r.json()[\"captcha_token\"]\n self.assertIsNotNone(gl.captcha_token)\n\n print \"获取验证码\"\n r = requests.get(gl.url + ':7130/account/v1/get_captcha_image' + '?captcha_token=' + gl.captcha_token)\n print r, r.status_code, r.json()[\"captcha_value\"]\n self.assertEqual(r.status_code, 200)\n self.assertIsNotNone(r.json()[\"captcha_value\"])\n gl.captcha_value = r.json()[\"captcha_value\"]\n\n print \"发送验证码\"\n d = \"{\\\"purpose\\\": \\\"signin\\\", \\\"phone\\\": \\\"\"+gl.invitation_phoneNo+\"\\\", \\\"Source\\\": \\\"web\\\", \\\"captcha_token\\\":\\\"\" + gl.captcha_token + \"\\\",\\\"captcha_value\\\":\\\"\" + gl.captcha_value + \"\\\"}\"\n print \"传入参数:\" + d\n r = requests.post(gl.url + ':7130/account/v1/send_verify_code', data=d)\n print r, \"返回值:\" + r.text\n self.assertEqual(r.status_code, 200)\n gl.verify_code = r.json()[\"verify_code\"]\n self.assertIsNotNone(r.json()[\"verify_code\"])\n\n print \"验证码校验\"\n d = \"{\\\"purpose\\\": \\\"signin\\\", \\\"phone\\\": \\\"\"+gl.invitation_phoneNo+\"\\\",\\\"Source\\\": \\\"web\\\", \\\"verify_code\\\":\\\"\" + gl.verify_code + \"\\\"}\"\n print \"传入参数:\" + d\n r = requests.post(gl.url + ':7130/account/v1/check_verify_code', data=d)\n print r, \"返回值:\" + r.text\n self.assertEqual(r.status_code, 200)\n\n print \"登录\"\n d = \"{\\\"password\\\": \\\"\"+gl.invitation_pwd+\"\\\", \\\"phone\\\": \\\"\"+gl.invitation_phoneNo+\"\\\",\\\"Source\\\": \\\"web\\\", \\\"captcha_token\\\":\\\"\" + gl.captcha_token + \"\\\",\\\"captcha_value\\\":\\\"\" + gl.captcha_value + \"\\\"}\"\n print \"传入参数:\" + d\n r = requests.post(gl.url + ':7130/account/v1/sign_in', data=d)\n print r, \"返回值:\" + r.text\n self.assertEqual(r.status_code, 200)\n self.assertIsNotNone(r.json()[\"token\"])\n gl.account_token = r.json()[\"token\"]", "def receive_capturing_validation(self):\n reply = self.socket.recv(1)\n if reply[0] == codes['timeout']:\n print(\"Ocurrió un timeout en la conexión\")\n self.close_connection()\n if bytes_to_int(reply) == codes['already_have_all']:\n print(\"Ya tenías todos los pokémones. Has completado el juego.\")\n self.receive_session_termination()\n\n elif bytes_to_int(reply) == codes['already_have_pokemon']:\n print(\"Ya tienes el pokémon sugerido. Intentaré encontrarte otro.\")\n self.receive_pokemon_suggestion()\n\n elif bytes_to_int(reply) == codes['do_not_have_pokemon']:\n print(\"Tu pokédex no reconoce a este pokémon. Intenta capturarlo!\")\n captured = False\n while not captured:\n captured = self.verify_capture()\n if captured:\n break\n again = \"\"\n while again != \"y\" and again != \"n\":\n again = input(\"Quieres tratar de nuevo? (y/n): \")\n if again == \"n\":\n self.socket.sendall(pack('B', codes['no']))\n self.receive_session_termination()\n elif again == \"y\":\n self.socket.sendall(pack('B', codes['yes']))\n if captured:\n print(\"Lo capturaste\")\n self.receive_image()\n self.receive_session_termination()", "def _validate_captcha(data):\n settings = api.config.get_settings()[\"captcha\"]\n\n post_data = urllib.parse.urlencode(\n {\n \"secret\": settings[\"reCAPTCHA_private_key\"],\n \"response\": data[\"g-recaptcha-response\"],\n \"remoteip\": flask.request.remote_addr,\n }\n ).encode(\"utf-8\")\n\n request = urllib.request.Request(settings[\"captcha_url\"], post_data, method=\"POST\")\n response = urllib.request.urlopen(request).read().decode(\"utf-8\")\n parsed_response = json.loads(response)\n return parsed_response[\"success\"] is True", "def get_captcha_challenge(http_body, \n captcha_base_url='http://www.google.com/accounts/'):\n contains_captcha_challenge = False\n captcha_parameters = {}\n for response_line in http_body.splitlines():\n if response_line.startswith('Error=CaptchaRequired'):\n contains_captcha_challenge = True\n elif response_line.startswith('CaptchaToken='):\n # Strip off the leading CaptchaToken=\n captcha_parameters['token'] = response_line[13:]\n elif response_line.startswith('CaptchaUrl='):\n captcha_parameters['url'] = '%s%s' % (captcha_base_url,\n response_line[11:])\n if contains_captcha_challenge:\n return captcha_parameters\n else:\n return None", "def require_auth_captcha(self, response, query_params,\n login_form_data, http_session):\n logger.info('Captcha is needed. Query params: %s', query_params)\n form_text = response.text\n\n action_url = parse_form_action_url(form_text)\n logger.debug('form action url: %s', action_url)\n if not action_url:\n raise VkAuthError('Cannot find form action url')\n\n captcha_sid, captcha_url = parse_captcha_html(\n html=response.text, response_url=response.url)\n logger.info('Captcha url %s', captcha_url)\n\n login_form_data['captcha_sid'] = captcha_sid\n login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)\n\n response = http_session.post(action_url, login_form_data)\n return response", "def get_captcha_image(self, page_html) -> str:\n try:\n items = page_html.select('div[class=\"ddText\"]')\n result_items = re.findall(r'\\\"data:image.*\\\"', str(items[0]))\n result_items = str(result_items).replace(\"\\\"\", \"\")\n except Exception as e:\n raise e\n else:\n return result_items" ]
[ "0.7563033", "0.7049562", "0.6603564", "0.6600111", "0.64076126", "0.6371104", "0.6334581", "0.6282515", "0.6243216", "0.6157799", "0.6151368", "0.6085415", "0.60629886", "0.597998", "0.5968537", "0.58884144", "0.5865535", "0.5841705", "0.5816367", "0.57923406", "0.576051", "0.57494044", "0.57404584", "0.57376057", "0.57316214", "0.569171", "0.5662541", "0.5655075", "0.5567107", "0.5476676" ]
0.71202487
1
get content info by id
def get_content_by_id(self, content_id): url = "http://api.applezhuan.com/api/m/get_content?content_id=%s" % content_id headers = { "Host": "api.applezhuan.com", "Connection": "keep-alive", "Accept": "application/json", "Origin": "http://m.applezhuan.com", "Content-Type": "application/x-www-form-urlencoded", "Referer": "http://m.applezhuan.com/article_detail.html?content_id=%s" % content_id, "Accept-Encoding": "gzip,deflate", "Accept-Language": "zh-CN,en-US;q=0.8", "User-Agent": "Mozilla/5.0 (Linux; U; Android " + self.mobile.os + "; zh-cn; GT-N7100 Build/" + self.mobile.brand + ") AppleWebKit/534.30 (KHTML, like Gecko) " "Version/4.0 Mobile Safari/534.30", "Cookie": self.cookie, "X-Requested-With": "com.shuishi.kuai" } res = requests.get(url, headers=headers) result = json.loads(res.text) # print("get content id") # print(result) return result["d"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content_by_id_get(self, head, id, locale):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Content/GetContentById/{id}/{locale}/\"))", "def get_content(self, content_id):\n options = {\n 'content_id': int(content_id),\n }\n return self._get('get_content', options)", "def find_content(self, content_id):\n return self._find_content(content_id)", "def get(self, _id):", "def info(self, id):", "def get_object(id):", "def get_content(self, content_id):\n url = \"http://api.applezhuan.com/api/c/get_content?&\"\n params = {\n \"android_id\": self.mobile.android_id,\n \"platform\": \"2\",\n \"av\": \"2\",\n \"content_id\": content_id,\n \"time\": self.get_current_time,\n \"ov\": self.mobile.os,\n \"device_name\": \"dpi\",\n \"device_code\": self.device_code,\n \"brand\": self.mobile.brand,\n \"mac\": self.mobile.mac,\n \"vn\": \"1.0.2\",\n \"network\": self.mobile.network\n }\n params_str = self.encrypt.get_secret_param(params)\n url = url + \"s=\" + params_str\n headers = {\n \"Accept-Language\": \"zh-CN,zh;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; U; Android \" + self.mobile.os + \"; zh-cn; GT-N7100 Build/\" +\n self.mobile.brand + \") AppleWebKit/534.30 (KHTML, like Gecko) \"\n \"Version/4.0 Mobile Safari/534.30\",\n \"Host\": \"api.applezhuan.com\",\n \"Connection\": \"Keep-Alive\",\n \"Accept-Encoding\": \"gzip\",\n \"Cookie\": self.cookie\n }\n\n res = requests.get(url, headers=headers)\n result = json.loads(res.text)\n # print(\"get content id= %s\" % content_id)\n # print(result)\n return result[\"d\"]", "def get(self, id):\n return {'id': id}", "def get(self):\n return self.content_id", "def get_by_id(self, id):\n return Entry.all().filter('entry_id = ', id).get()", "def get_title_by_id(id):\n\n # your code", "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "def get(self, id):\n if id == 'body':\n return document.body\n else:\n return self.instances[id]", "def detail(): \n\n # get contentid\n content_id = request.args.get('contentid')\n\n # get shortest places\n title, places = get_shortest(content_id)\n print(content_id)\n\n return render_template('detail.html', \n title=title,\n content_id=content_id,\n places=places, \n count=len(places))", "def contents_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(\n repository_id, \"contents\", access_token)", "def get(self, id):\n return get_comments(id)", "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "def meta(id):\n db = core.connect()\n return db[id][\"meta\"]", "def get_content(self, resource, num, id_):\r\n if resource == \"categories\":\r\n return self._db_manager.get_category_list()\r\n if resource == \"latest_items\" and num > 0:\r\n return self._db_manager.get_latest_items(num)\r\n if resource == \"category\" and id_ is not None:\r\n category = self._db_manager.get_category(id_)\r\n if category is not None:\r\n items = self._db_manager.get_category_items(category[\"id\"])\r\n return odict([\r\n (\"id\", category[\"id\"]),\r\n (\"name\", category[\"name\"]),\r\n (\"items\", items)\r\n ])\r\n if resource == \"item\" and id_ is not None:\r\n item = self._db_manager.get_item(id_)\r\n if item is not None:\r\n category = self._db_manager.get_category(item[\"category_id\"])\r\n if category is not None:\r\n return odict([\r\n (\"id\", item[\"id\"]),\r\n (\"name\", item[\"name\"]),\r\n (\"description\", item[\"description\"]),\r\n (\"category_id\", item[\"category_id\"]),\r\n (\"category_name\", category[\"name\"])\r\n ])\r\n # if no conditions for a valid return value apply, exit with None\r", "def get(id=None):\n return requests.get(\"/{}\".format(id))", "async def get(self, ctx, id : int):\n response = await self.api.get(id)\n await ctx.send(embed=self._build_embed(response))\n return True", "def get(self, cls, id):\n pass", "def get_show_info(self, id, **kwargs):\n kwargs['id'] = id\n return self.get('info/show.json', **kwargs)", "def get(self, story_id):", "def get_by_id(dataobj_id):\n results = list(get_data_dir().rglob(f\"{dataobj_id}-*.md\"))\n return results[0] if results else None", "def get(self, id):\n return read_msg(id)", "def getbyid(self, id):\n\n return esd.retrieve(id)", "def get_item_content_async(self, trans, id):\n\n # Get visualization, making sure it's accessible.\n visualization = self.get_visualization(trans, id, check_ownership=False, check_accessible=True)\n if visualization is None:\n raise web.httpexceptions.HTTPNotFound()\n\n # Return content.\n visualization_config = self.get_visualization_config(trans, visualization)\n return trans.fill_template_mako(\"visualization/item_content.mako\", encoded_id=trans.security.encode_id(visualization.id),\n item=visualization, item_data=visualization_config, content_only=True)", "def get_comment_information_by_id(comment_id):\n comment = REDDIT.comment(comment_id)\n print(comment.body)\n print(vars(comment))", "def find(self, id):\n response = self._connection.session.get(self.url + \"/%s\" % id)\n return self._raise_or_return_json(response)" ]
[ "0.7491383", "0.7386878", "0.7337057", "0.70705456", "0.70664924", "0.6717087", "0.6689234", "0.66503906", "0.65968055", "0.65881276", "0.6506912", "0.6382121", "0.63475037", "0.6342468", "0.6333151", "0.6325207", "0.6293055", "0.629015", "0.6267186", "0.6173501", "0.6167295", "0.6166267", "0.6150956", "0.6150744", "0.604498", "0.60282177", "0.602632", "0.60226715", "0.60029525", "0.6000302" ]
0.746825
1
Padding s to 16bits.
def pad(s): return s + (16 - len(s) % 16) * chr(16 - len(s) % 16)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pad(s):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\treturn s + b\"\\0\" * (AES.block_size - len(s) % AES.block_size)", "def zeropad(s):\n\n npad = 16 - len(s) % 16\n return s + '\\x00' * npad", "def pad16(data: bytes) -> bytes:\n\n if len(data) % 16 == 0:\n return bytes(0)\n else:\n return bytes(16 - (len(data) % 16))", "def _pad(\r\n s: str,\r\n bs: int,\r\n) -> str:\r\n number_of_bytes_to_pad = bs - len(s) % bs\r\n ascii_string = chr(number_of_bytes_to_pad)\r\n padding_str = number_of_bytes_to_pad * ascii_string\r\n return s + padding_str", "def pkcs5_pad(self,s):\n return s + (self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE) * chr(self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE)", "def nullPad(s):\n padding = chr(0) * (Blowfish.block_size - (len(s) % Blowfish.block_size))\n if padding:\n return s + padding\n else:\n return s", "def mbrpad(s):\n length = len(s)\n if length == 32:\n return s\n elif length > 31:\n raise Exception(\"Constant too long\")\n s = bytearray(s + '\\x00' * (32 - len(s)))\n s[length] ^= 0x80\n s[-1] ^= 0x01\n return bytes(s)", "def _pad8(s):\n return '%08d' % int(s)", "def __pad(self, data):\n return data + (AES.block_size - len(data) % AES.block_size) * \\\n chr(AES.block_size - len(data) % AES.block_size)", "def b16encode(s: str) -> str:\n return base64.b16encode(s.encode()).decode()", "def padding(string):\r\n\tbinary = ascii_to_binary(string)\r\n\tl = len(binary)\r\n\tif l >= 448:\r\n\t\treturn \"STRING IS TOO LONG\"\r\n\telse:\r\n\t\tbinary += \"1\"\r\n\t\t\t\r\n\t\tfor i in range(448-len(binary)):\r\n\t\t\tbinary += \"0\"\r\n\r\n\t\tbinary = binary + conversions.decimal_to_binary(l, 64)\r\n\r\n\t\treturn binary", "def _add_padding(input_str):\r\n padding_len = AES.block_size - len(input_str) % AES.block_size\r\n return input_str + padding_len * chr(padding_len)", "def pkcs5_unpad(self,s):\n return s[0:-ord(s[-1])]", "def pad(data):\r\n bytes_to_pad = AES.block_size - len(data) % AES.block_size\r\n return data + (bytes_to_pad * chr(bytes_to_pad))", "def general_same_padding(i, k, d=1, s=1, dims=2):\n #Convert i, k and d to tuples if they are int\n i = tuple([i for j in range(dims)]) if type(i) == int else i\n k = tuple([k for j in range(dims)]) if type(k) == int else k\n d = tuple([d for j in range(dims)]) if type(d) == int else d\n s = tuple([s for j in range(dims)]) if type(s) == int else s\n \n return tuple([int(0.5*(d[j]*(k[j]-1)-(1-i[j])*(s[j]-1))) for j in range(dims)])", "def pad_hex(i):\n pure_hex = hex(i)\n short_hex = pure_hex[len('0x'):]\n return ('00'+short_hex)[-2:]", "def left_zero_pad(s, blocksize):\n if blocksize > 0 and len(s) % blocksize:\n s = (blocksize - len(s) % blocksize) * b('\\000') + s\n return s", "def un_pad(s):\n return s[0:-ord(s[-1])]", "def padding(message):\n\n # Convert the string to bits by calling the tobits function\n mbits = tobits(message)\n # Get the length of bits\n length = len(mbits)\n # Calculate the strengthening vector length\n strengthmessage = (bin(int(length))[2:]).zfill(64 * ((len(bin(int(length))[2:]) + 63) // 64))\n\n # Create a padding which starts with 1\n padding = '1'\n # Get the number of zeroes to pad\n get_length = 128 - (length + 64) % 128\n # Run the for loop to append all 0's\n for i in range(0, get_length - 1):\n padding = padding + '0'\n\n # Make the entire pad \n to_add_pad = padding + strengthmessage\n # Return the entire pad\n return to_add_pad", "def glueSize(s):\n\tsize = len(s)\n\treturn struct.pack(\">I\",size) + s", "def _padboth(width, s):\n fmt = \"{0:^%ds}\" % width\n return fmt.format(s)", "def pad(plain, size):\n offset = size - (len(plain) % size)\n return plain + chr(offset) * offset", "def __Pad(self, data):\n pad = self.block_size - len(data) % self.block_size\n return data + pad * chr(pad)", "def _wadifyString(s):\n\n if len(s) < 8:\n s += \"\\x00\" * (8 - len(s))\n return s", "def conv_pad(x, ks, mode):\n\tpad = (int(np.floor((ks-1)/2)), int(np.ceil((ks-1)/2)))\n\treturn F.pad(x, (*pad, *pad), mode=mode)", "def stripPadding(s):\n if s and ord(s[-1]) <= Blowfish.block_size:\n return s.rstrip(s[-1])\n else:\n return s", "def _derive_padding_crypto(self, seed, pad_string): # XXX consider secret_seed\n secret = self.mac(pad_string,\n seed,\n self.shared_secret)\n return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])", "def scapy_layers_dot11_RadioTap_extract_padding(self, s):\n\tpadding = len(s) - (self.pre_dissect_len - self.RadioTap_len)\n\tif padding:\n\t\treturn s[padding:], s[:padding]\n\telse:\n\t\treturn s, None", "def naive_block_padding(b: bytes, size: int) -> bytes:\n assert size <= 0xff\n\n l = len(b)\n if l > 0 and l % size == 0:\n return b\n\n return b + b'\\x00' * (size - (l % size))", "def pad(msg):\n return msg + (BLOCK_SIZE - len(msg)) * PADDING" ]
[ "0.69346625", "0.6807775", "0.6797431", "0.6661236", "0.6636684", "0.6550715", "0.6318116", "0.62260115", "0.59978324", "0.59684944", "0.58873004", "0.5858596", "0.58571833", "0.58209276", "0.57891655", "0.577834", "0.5753556", "0.5745127", "0.57439864", "0.57319933", "0.5698589", "0.5660012", "0.5651905", "0.5630854", "0.5552021", "0.55487007", "0.54595846", "0.5457051", "0.54478973", "0.54451174" ]
0.75771606
0
generate lat and lon
def gen_lat_lon(self): delta = round(random.random() * random.randint(1, 4), 4) sign = random.randint(1, 100) if sign % 2 == 0: self.lat += delta else: self.lat -= delta delta = round(random.random() * random.randint(1, 4), 4) sign = random.randint(1, 100) if sign % 2 == 0: self.lon += delta else: self.lon -= delta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_gps_gen_from_range(s_lat,n_lat, e_lon, w_lon):\n #print(s_lat, n_lat, e_lon, w_lon)\n latitude = random.uniform(s_lat, n_lat)\n longitude = random.uniform(e_lon, w_lon)\n return latitude, longitude", "def genLatLong(lat1, long1, lat2, long2):\n\tlat = lat1 + (lat2 - lat1) * random.random()\n\tlongg = long1 + (long2 - long1) * random.random()\n\treturn (lat, longg)", "def gen_gps_to_coords(lat,lon,rows,cols,min_lat,max_lat,min_lon,max_lon):\n\n if (lat <= min_lat or lat >= max_lat or lon <= min_lon or lon >= max_lon):\n return (-1,-1)\n\n lat_step = abs(max_lat-min_lat)/rows\n lon_step = abs(max_lon-min_lon)/cols\n\n lat_spot = int((max_lat-lat)/lat_step)\n lon_spot = int((lon-min_lon)/lon_step)\n #print \"lat: %f lon: %f lat_spot: %f lon_spot: %f\" % (lat,lon,lat_spot,lon_spot)\n return (lat_spot,lon_spot)", "def makeUpCoords(numb):\n # bounds of UK in EPSG:4326\n minLat=49.96\n maxLat=60.84\n minLon=-7.5\n maxLon=1.78\n # generate array of random numbers\n lon=np.random.rand(numb)*(maxLon-minLon)+minLon\n lat=np.random.rand(numb)*(maxLat-minLat)+minLat\n return(lon,lat)", "def _getXYZ ( lon, lat ):\n d2r = pi / 180.\n rlon, rlat = ( d2r * lon, d2r * lat )\n x = cos(rlat) * cos(rlon)\n y = cos(rlat) * sin(rlon)\n z = sin(rlat)\n return (x,y,z)", "def map(self, lat, long):\r\n rxy = self._r*lat/(np.pi/2)\r\n x = rxy*np.cos(long)\r\n y = rxy*np.sin(long)\r\n return (x, y)", "def map(self, lat, long):\r\n rxy = self._r*np.sqrt(1-np.cos(lat))\r\n x = rxy*np.cos(long)\r\n y = rxy*np.sin(long)\r\n return (x, y)", "def lat_lons(self):", "def lat_lng(row):\r\n lat = row[\"latitude\"]\r\n lng = row[\"longitude\"]\r\n n = int(lat/GRANULARITY)\r\n nlat_start = n * GRANULARITY\r\n nlat_end = nlat_start + GRANULARITY\r\n nlg=int(lng/GRANULARITY)\r\n nlng_start = nlg * GRANULARITY\r\n nlng_end = nlng_start + GRANULARITY\r\n latlng=[(nlat_start,nlng_start), (nlat_start,nlng_end), (nlat_end,nlng_end), (nlat_end,nlng_start)]\r\n return latlng", "def CreateTargetGeoMap(latS, latN, lonW, lonE, latlen, lonlen):\n\n lat_grid = np.linspace(latS, latN, latlen)\n lon_grid = np.linspace(lonW, lonE, lonlen)\n\n return lat_grid,lon_grid", "def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)", "def lonlat2xy(s_lon, s_lat): # x: easting, y: northing\r\n # convert decimals to seconds...\r\n s_lon = dec2sec(s_lon)\r\n s_lat = dec2sec(s_lat)\r\n\r\n ## Auxiliary values \r\n # i.e. differences of latitude and longitude relative to Bern in the unit [10000'']\r\n s_lng_aux = (s_lon - 26782.5)/10000.\r\n s_lat_aux = (s_lat - 169028.66)/10000.\r\n \r\n # easting\r\n s_x = (600072.37 \r\n + 211455.93*s_lng_aux \r\n - 10938.51*s_lng_aux*s_lat_aux \r\n - 0.36*s_lng_aux*(s_lat_aux**2) \r\n - 44.54*(s_lng_aux**3))\r\n \r\n # northing\r\n s_y = (200147.07 \r\n + 308807.95*s_lat_aux \r\n + 3745.25*(s_lng_aux**2) \r\n + 76.63*(s_lat_aux**2) \r\n - 194.56*(s_lng_aux**2)*s_lat_aux \r\n + 119.79*(s_lat_aux**3))\r\n\r\n return s_x, s_y", "def convert(self, lat, lon):\r\n a = self.a\r\n b = self.b\r\n long0 = self.long0\r\n k0 = self.k0\r\n dx = self.dx\r\n\r\n e = (1 - b ** 2 / a ** 2) ** 0.5\r\n e2 = e ** 2 / (1 - e ** 2)\r\n n = (a - b) / (a + b)\r\n nu = a / (1 - (e ** 2) * (sin(lat) ** 2)) ** 0.5\r\n p = lon - long0\r\n\r\n A = a * (1 - n + (5 / 4.0) * (n ** 2 - n ** 3) + (81 / 64.0)*(n ** 4 - n ** 5))\r\n B = (3 * a * n / 2.0) * (1 - n + (7 / 8.0) * (n ** 2 - n ** 3) + (55 / 64.0) * (n ** 4 - n ** 5))\r\n C = (15 * a * (n ** 2) / 16.0) * (1 - n + (3 / 4.0) * (n ** 2 - n ** 3))\r\n D = (35 * a * (n ** 3) / 48.0) * (1 - n + (11 / 16.0) * (n ** 2 - n ** 3))\r\n E = (315 * a * (n ** 4) / 51.0) * (1 - n)\r\n\r\n S = A * lat - B * sin(2 * lat) + C * sin(4 * lat) - D * sin(6 * lat) + E * sin(8 * lat)\r\n\r\n K1 = S * k0\r\n K2 = k0 * nu * sin(2 * lat)/4.0\r\n K3 = (k0 * nu * sin(lat) * (cos(lat) ** 3) / 24.0) * \\\r\n (5 - tan(lat) ** 2 + 9 * e2 * (cos(lat) ** 2) + 4 * (e2 ** 2) * (cos(lat) ** 4))\r\n\r\n y = K1 + K2 * (p ** 2) + K3 * (p ** 4)\r\n\r\n K4 = k0 * nu * cos(lat)\r\n K5 = (k0 * nu * (cos(lat) ** 3) / 6.0) * (1 - tan(lat) ** 2 + e2 * (cos(lat) ** 2))\r\n\r\n x = K4 * p + K5 * (p ** 3) + dx\r\n return x, y", "def gps_to_coords(self,lat,lon):\n\n if (lat <= self.min_lat or lat >= self.max_lat or lon <= self.min_lon or lon >= self.max_lon):\n return (-1,-1)\n\n lat_spot = int((self.max_lat-lat)/self.lat_step)\n lon_spot = int((lon-self.min_lon)/self.lon_step)\n #print \"lat: %f lon: %f lat_spot: %f lon_spot: %f\" % (lat,lon,lat_spot,lon_spot)\n return (lat_spot,lon_spot)", "def coords_to_gps(self,coords):\n return ((self.max_lat - (self.lat_step * (0.5+coords[0]))),(self.min_lon + (self.lon_step * (0.5+coords[1]))))", "def get_latlon():\n\t\n iss.compute() # Get the lat/long values from ephem\n long_value = [float(i) for i in str(iss.sublong).split(\":\")]\n if long_value[0] < 0:\n long_value[0] = abs(long_value[0])\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"W\"\n else:\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"E\"\n cam.exif_tags['GPS.GPSLongitude'] = '%d/1,%d/1,%d/10' % (long_value[0], long_value[1], long_value[2]*10)\n lat_value = [float(i) for i in str(iss.sublat).split(\":\")]\n if lat_value[0] < 0:\n lat_value[0] = abs(lat_value[0])\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"S\"\n else:\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"N\"\n cam.exif_tags['GPS.GPSLatitude'] = '%d/1,%d/1,%d/10' % (lat_value[0], lat_value[1], lat_value[2]*10)\n return (iss.sublat / degree, iss.sublong / degree)", "def merc(lat, lon):\n\tr_major = 6378137.000\n\tx = r_major * math.radians(lon)\n\tscale = x/lon\n\ty = 180.0/math.pi * math.log(math.tan(math.pi/4.0 + lat * (math.pi/180.0)/2.0)) * scale\n\treturn (x, y)", "def getlatlon(self):\n lat = np.pi/2.0 - self._th\n time = self.gettime()\n lon = self._phi - 2*np.pi*time/86164.09164\n return lat, lon", "def LongLat(x, y, z):\n r = (x**2 + y**2 + z**2)**0.5\n long = numpy.arctan2(y, x)\n lat = numpy.arcsin(z / r)\n return long, lat, r", "def parsenwspt(text):\n lat = int(text[0:4]) / 100\n lon = int(text[4:])\n if lon < 1000:\n lon += 10000\n return (lon / -100, lat)", "def hello():\n\tlongitud = randint(0,100)\n\tlatitud = randint(0,100)\n\treturn str(longitud)+\",\"+str(latitud);", "def create_pseudo_epsg4326_coordinates(self):\n self.create_3d_coord_on_sphere(on_sphere=True)\n self.df_attributes['lat'] = 180*(pi/2 - np.arccos(self.df_attributes['coord_z']))/pi\n self.df_attributes['lon'] = 180*np.arctan2(self.df_attributes['coord_y'], self.df_attributes['coord_x'])/pi", "def _coord(xend, yend):\n x = np.random.randint(0, xend)\n y = np.random.randint(0, yend)\n return x, y", "def xy2ll(x, y):\n lon = math.degrees(x / EARTH_RADIUS)\n lat = math.degrees(2 * math.atan(math.exp(y / EARTH_RADIUS)) - math.pi / 2)\n return lon, lat + 51", "def _generate_coords(self):\n coords = np.dstack([self.X.ravel(), self.Y.ravel()])[0]\n return coords, spatial.cKDTree(coords)", "def latlon_2_grid(x, y, z, origin):\n new_y = (y - origin[1]) * 111111\n new_x = (x - origin[0]) * (111111 * np.cos(origin[1] * (np.pi/180)))\n return new_x, new_y, z", "def _getNewCoords(self, coords, offset):\n\n oldlat = math.radians(float(coords[0]))\n oldlon = math.radians(float(coords[1]))\n magnitude = float(offset[0]) / 6370.\n direction = math.radians(360.-float(offset[1]))\n\n # Calculate lat/lon given radial and distnace (http://www.edwilliams.org/avform.htm#LL)\n lat = math.asin(math.sin(oldlat) * math.cos(magnitude) + math.cos(oldlat) \\\n * math.sin(magnitude) * math.cos(direction))\n lon = (oldlon - math.asin(math.sin(direction) * math.sin(magnitude) / math.cos(lat)) \\\n + math.pi) % (2 * math.pi) - math.pi\n\n # print coords, offset, oldlat, oldlon, magnitude, direction, math.degrees(lat), math.degrees(lon)\n return (math.degrees(lat), math.degrees(lon))", "def calcPosition (lat, lon):\n nauticalMilePerLat = 60.00721\n nauticalMilePerLongitude = 60.10793\n rad = math.pi / 180.0\n milesPerNauticalMile = 1.15078\n \n y = lat * nauticalMilePerLat\n x = math.cos(lat * rad) * lon * nauticalMilePerLongitude\n\n return x * milesPerNauticalMile * 1609.344, y * milesPerNauticalMile * 1609.344", "def ll2xyz(lon_pt,lat_pt):\n\n xPt = np.cos(lat_pt) * np.cos(lon_pt)\n yPt = np.cos(lat_pt) * np.sin(lon_pt)\n zPt = np.sin(lat_pt)\n return [xPt,yPt,zPt]", "def lonlat(n_lon: int, n_lat: int) -> List[Tuple[float, float]]:\n grid = []\n for lon in np.linspace(0, 360.0 - 360.0 / n_lon, n_lon):\n for lat in np.linspace(-90, 90, n_lat):\n grid.append((lon, lat))\n return grid" ]
[ "0.7581835", "0.7333437", "0.72476673", "0.709161", "0.70625556", "0.69933414", "0.69697577", "0.69488233", "0.6847795", "0.6811374", "0.6656267", "0.66469145", "0.6581302", "0.6519071", "0.6518511", "0.6500831", "0.64401203", "0.64244574", "0.64127237", "0.6407213", "0.6367028", "0.633665", "0.63069975", "0.6288415", "0.6276787", "0.6266312", "0.62436396", "0.62237406", "0.62190104", "0.61933905" ]
0.7594734
0
get a mobile info
def get_mobile_info(self): # 1. select brand self.select_brand() # 2. select os self.select_os() # 3. device_id self.gen_device_id() # 4. lat lon self.gen_lat_lon() # 5. mac self.gen_mac()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mobile(self) -> Optional[str]:\n return pulumi.get(self, \"mobile\")", "def phone_mobile(self, instance):\r\n return instance.user.profile.phone_mobile", "def mobile_phone(self):\n if \"mobilePhone\" in self._prop_dict:\n return self._prop_dict[\"mobilePhone\"]\n else:\n return None", "def mobile_phone(self):\n if \"mobilePhone\" in self._prop_dict:\n return self._prop_dict[\"mobilePhone\"]\n else:\n return None", "def getDeviceInfo():\n url = \"https://api.roblox.com/reference/deviceinfo\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "def get_guest_info(self):\n url = \"http://api.applezhuan.com/api/c/get_guestinfo?&\"\n params = {\n \"android_id\": self.mobile.android_id,\n \"platform\": \"2\",\n \"av\": \"2\",\n \"token\": \"\",\n \"time\": self.get_current_time,\n \"ov\": self.mobile.os,\n \"lon\": self.mobile.lon,\n \"lat\": self.mobile.lat,\n \"device_name\": \"dpi\",\n \"device_code\": self.device_code,\n \"brand\": self.mobile.brand,\n \"mac\": self.mobile.mac,\n \"vn\": \"1.0.2\",\n \"network\": self.mobile.network\n }\n params_str = self.encrypt.get_secret_param(params)\n url = url + \"s=\" + params_str\n headers = {\n \"Accept-Language\": \"zh-CN,zh;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; U; Android \" + self.mobile.os + \"; zh-cn; GT-N7100 Build/\" +\n self.mobile.brand + \") AppleWebKit/534.30 (KHTML, like Gecko) \"\n \"Version/4.0 Mobile Safari/534.30\",\n \"Host\": \"api.applezhuan.com\",\n \"Connection\": \"Keep-Alive\",\n \"Accept-Encoding\": \"gzip\",\n \"Cookie\": self.cookie\n }\n\n res = requests.get(url, headers=headers)\n # print(res.text)\n result = json.loads(res.text)\n print(result)\n self.guest_info = result[\"d\"]\n self.guest_info.pop(\"h5_url\")\n self.guest_info.pop(\"banner\")\n self.guest_info.pop(\"menu\")\n self.guest_info.pop(\"headimg\")", "async def get_phone(self):\n\n e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone', method='get')\n return e", "def __str__(self):\n return self.mobile", "async def device_info(request):\n textx = await request.get_reply_message()\n codename = request.pattern_match.group(1)\n if codename:\n pass\n elif textx:\n codename = textx.text\n else:\n await edit_or_reply(request, \"`Usage: .device <codename> / <model>`\")\n return\n data = json.loads(\n get(\n \"https://raw.githubusercontent.com/androidtrackers/\"\n \"certified-android-devices/master/by_device.json\"\n ).text\n )\n results = data.get(codename)\n if results:\n reply = f\"**Search results for {codename}**:\\n\\n\"\n for item in results:\n reply += (\n f\"**Brand**: `{item['brand']}`\\n\"\n f\"**Name**: `{item['name']}`\\n\"\n f\"**Model**: `{item['model']}`\\n\\n\"\n )\n else:\n reply = f\"`Couldn't find info about {codename}!`\\n\"\n await edit_or_reply(request, reply)", "def get_info(self):\n if self.status != \"not connected\":\n m = self.serial\n m.write(\"info?\" + \"\\r\\n\")\n info = m.read(100)\n info = info[7:]\n result = string.strip(info)\n return result\n else:\n pass", "def get_info_cli(self, no):\n #\n os.system(\"toilet -f smblock --filter border:metal -w 50 'Indo Phone Number Checker - ( IPNC )'\")\n print()\n os.system(\"toilet -f smblock ' by : @danrfq' --filter gay\")\n print()\n\n for data in self.get_info(no):\n if \"message\" in data:\n print(c.fg.red+\"[ ERROR ]\\nFormat Nomor Yang Anda Masukkan Salah!\"+c.end+\"\\n\"+c.fg.lightgreen+\"Contoh Nomor : +6281291718019\"+c.end)\n else:\n print(c.fg.yellow+\"\"\"╔ [ {} Information ]\n╠\n╠ International : {}\n╠ National : {}\n╠ Provider : {}\n╠ Type : {}\n╠ Location : {}\n╠ Timezones : {}\n╠\n╚ [ Finish ]\"\"\".\n format(no,\n data[\"international\"],\n data[\"national\"],\n data['provider'],\n data[\"type\"].replace(\"_\",\" \").title(),\n data[\"location\"],\n \", \".join(data[\"timezone\"]))+c.end\n )", "def get_user_info(self):\n url = \"http://api.applezhuan.com/api/c/get_userinfo?&\"\n params = {\n \"android_id\": self.mobile.android_id,\n \"platform\": \"2\",\n \"av\": \"2\",\n \"token\": self.token,\n \"time\": self.get_current_time,\n \"ov\": self.mobile.os,\n \"lon\": self.mobile.lon,\n \"lat\": self.mobile.lat,\n \"device_name\": \"dpi\",\n \"device_code\": self.device_code,\n \"brand\": self.mobile.brand,\n \"mac\": self.mobile.mac,\n \"vn\": \"1.0.2\",\n \"network\": self.mobile.network\n }\n params_str = self.encrypt.get_secret_param(params)\n url = url + \"s=\" + params_str\n headers = {\n \"Accept-Language\": \"zh-CN,zh;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; U; Android \" + self.mobile.os + \"; zh-cn; GT-N7100 Build/\" +\n self.mobile.brand + \") AppleWebKit/534.30 (KHTML, like Gecko) \"\n \"Version/4.0 Mobile Safari/534.30\",\n \"Host\": \"api.applezhuan.com\",\n \"Connection\": \"Keep-Alive\",\n \"Accept-Encoding\": \"gzip\",\n \"Cookie\": self.cookie\n }\n\n res = requests.get(url, headers=headers)\n # print(res.text)\n result = json.loads(res.text)\n\n self.user_info = result[\"d\"]\n self.user_info.pop(\"h5_url\")\n self.user_info.pop(\"banner\")\n self.user_info.pop(\"menu\")\n self.user_info.pop(\"headimg\")\n self.user_info.pop(\"token\")\n # print(self.user_info)", "def get_device_details(device):\n ret = device.wait_for_output(\"SetupQRCode\")\n if ret is None or len(ret) < 2:\n return None\n\n qr_code = re.sub(\n r\"[\\[\\]]\", \"\", ret[-1].partition(\"SetupQRCode:\")[2]).strip()\n try:\n device_details = dict(SetupPayload().ParseQrCode(\n \"VP:vendorpayload%{}\".format(qr_code)).attributes)\n except exceptions.ChipStackError as ex:\n log.error(ex.msg)\n return None\n\n return device_details", "def getInfo():", "def check_mobile(data):\n\n firebase_uid = data['session'].split('/')[-1]\n db = firebase.database()\n follow_up_event = \"continue_house\"\n mobile = db.child(\"user_data\").child(firebase_uid).child(\"Mobile Number\").get().val()\n try:\n origin = data[\"queryResult\"][\"fulfillmentMessages\"][1][\"payload\"][\"origin\"]\n if origin == \"confirmedCall\":\n follow_up_event = \"continue_call\"\n except:\n pass\n if mobile == \"0\" or mobile is None:\n print(\"Mobile number not found.\")\n response = {\n \"followupEventInput\": {\n \"name\": \"request_mobile\",\n \"languageCode\": \"en-US\"\n }}\n else:\n print(\"Mobile number found: \" + mobile)\n response = {\n \"followupEventInput\": {\n \"name\": follow_up_event,\n \"languageCode\": \"en-US\"\n }\n }\n return response", "def get_single_phone(self, url: str) -> list:\n page = requests.get(url, headers={\"User-Agent\": self.user_agent.google})\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n temp_price = np.nan\n temp_model = np.nan\n temp_ram = np.nan\n temp_storage = np.nan\n temp_processor = np.nan\n temp_camera = np.nan\n\n if soup.find(class_=\"display-price\"):\n temp_price = soup.find(class_=\"display-price\").get_text()\n\n for item in soup.find_all('div', class_='s-name'):\n if item.get_text() == \"Model\": temp_model = item.next_sibling.get_text() \n if item.get_text() == \"RAM\": temp_ram = item.next_sibling.get_text()\n if item.get_text() == \"Storage Capacity\": temp_storage = item.next_sibling.get_text() \n if item.get_text() == \"Processor\": temp_processor = item.next_sibling.get_text() \n if item.get_text() == \"Camera Resolution\": temp_camera = item.next_sibling.get_text() \n \n return [temp_price, temp_model, temp_ram, temp_storage, temp_processor, temp_camera]", "def get_data():\n info_html = urlopen(\"http://marci1368.getmarci.com\").read()\n div = Soup(info_html, \"html.parser\").find('div', {'id': 'letterbox1'})\n moreinfo = get_itunes_info(div[\"data-artist\"], div[\"data-title\"])\n if not moreinfo:\n return {\"artistName\": div[\"data-artist\"],\n \"trackName\": div[\"data-title\"],\n \"collectionName\": div[\"data-album\"]}\n return moreinfo", "def get_info(self, no):\n\n final_result = {} # variable for store the data\n\n try:\n if type(no) == str:\n if no[3] == \"8\" or no[3] == \"2\":\n data = requests.get(\"https://api.antideo.com/phone/id/\"+no).json()\n if \"error\" not in data and data[\"valid\"] == True:\n if data[\"type\"] == \"FIXED_LINE\": ca = \"-\"\n else:\n try:ca = data[\"carrier\"]\n except:ca = \"-\"\n f = data[\"formats\"]\n final_result['number_phone'] = no\n final_result['international'] = f['international']\n final_result['national'] = f['national']\n final_result['provider'] = ca\n final_result['type'] = data['type'].replace('_', ' ').title()\n final_result['location'] = data['location']\n final_result['timezone'] = data['timezones']\n elif data['valid'] == False:\n final_result['message'] = \"Nomor yang anda masukan tidak valid\"\n else:\n final_result['message'] = \"Nomor yang anda masukan salah, contoh +6281291718019\"\n else:\n final_result['message'] = \"Error, the parameter must be string\"\n except:\n final_result['message'] = \"Error, the parameter must be string\"\n\n\n # returning the data\n yield final_result", "def nflinfo(self, irc, msg, args, optlist, optplayer):\n \n mobile = False\n for (option, arg) in optlist:\n if option == 'mobile':\n mobile = True\n \n optplayer = optplayer.lower().strip()\n \n lookupid = self._playerLookup('eid', optplayer)\n \n if lookupid == \"0\":\n irc.reply(\"No player found for: %s\" % optplayer)\n return\n \n if not mobile: # mobile method, which is an alternative.\n \n url = self._b64decode('aHR0cDovL20uZXNwbi5nby5jb20vbmZsL3BsYXllcmluZm8=') + '?playerId=%s&wjb=' % lookupid\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n\n soup = BeautifulSoup(html)\n team = soup.find('td', attrs={'class':'teamHeader'}).find('b')\n playerName = soup.find('div', attrs={'class':'sub bold'})\n divs = soup.findAll('div', attrs={'class':re.compile('^ind tL$|^ind alt$|^ind$')})\n\n append_list = []\n\n for div in divs:\n bold = div.find('b')\n if bold:\n key = bold \n bold.extract()\n value = div\n append_list.append(str(key.getText() + \": \" + value.getText()))\n\n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} :: {1} :: {2}\".format(ircutils.mircColor(playerName.getText(), 'red'),ircutils.bold(team.getText()), descstring)\n \n irc.reply(output) \n \n else:\n \n url = self._b64decode('aHR0cDovL2VzcG4uZ28uY29tL25mbC9wbGF5ZXIvXy9pZA==') + '/%s/' % lookupid\n \n try: \n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n html = html.replace('&nbsp;','')\n\n soup = BeautifulSoup(html)\n playername = soup.find('a', attrs={'class':'btn-split-btn'}).renderContents().strip()\n ul = soup.find('ul', attrs={'class':'general-info'})\n numpos = ul.find('li', attrs={'class':'first'})\n heightw = numpos.findNext('li')\n team = ul.find('li', attrs={'class':'last'}).find('a')\n ul2 = soup.find('ul', attrs={'class':'player-metadata floatleft'}) \n \n bd = ul2.find('li') # and remove span below\n span = bd.find('span') \n if span:\n span.extract()\n \n bp = bd.findNext('li')\n \n exp = bp.findNext('li') # remove span\n span = exp.find('span') \n if span:\n span.extract()\n \n col = exp.findNext('li') # remove span.\n span = col.find('span') \n if span:\n span.extract()\n \n output = \"{0} :: {1} {2} Bio: {3} {4} College: {5}\".format(ircutils.bold(playername), numpos.text, team.text, bd.text, exp.text, col.text)\n irc.reply(output)", "def list_mobile_global_beacons():\n return GlobalBeacon.list_mobile()", "def get_info(self,honeypotids):\n req = {\"type\":\"get_info\",\n \"from\":self.network.mc_id,\n \"to\": honeypotids}\n expect_dict = {\"type\":\"send_info\"}\n msg_list = self.send_receive(req,honeypotids,expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = msg[\"info\"]\n return answer", "def mobile(request):\n MOBILE_AGENT_RE = re.compile(\n r\".*(iphone|mobile|androidtouch)\", re.IGNORECASE)\n if MOBILE_AGENT_RE.match(request.META['HTTP_USER_AGENT']):\n return True\n else:\n return False", "def _get_details(self, device_object, **kwargs):\r\n params = dict()\r\n if kwargs:\r\n for key, val in kwargs.items():\r\n if '_' in key:\r\n new_key = key.replace(\"_\",\"-\") \r\n params[new_key] = val\r\n else:\r\n params[key] = val\r\n\r\n try: \r\n response = requests.get(device_object.href,\r\n auth=(self.user,self.pwd), \r\n params=params, verify=False)\r\n info = json.loads(response.text)\r\n return info[\"content\"]\r\n except requests.exceptions.RequestException as e:\r\n print \"Error:\",e\r\n return 1", "def getPhoneStats(url):\n p = Phone()\n try:\n request = requests.get(url)\n if request.status_code == 200:\n soup = BeautifulSoup(request.text, \"html.parser\")\n p.ram = get_ram(soup)\n p.memory = get_memory(soup)\n p.image = get_image(soup)\n for table in soup.findAll(\"table\"):\n header = table.th.get_text()\n if header == 'Camera':\n p.camera = get_camera(table)\n if header == 'Battery':\n p.battery = get_battery(table)\n if header == 'Misc':\n p.price = get_price(table)\n else:\n print('unable to connect ')\n except requests.HTTPError as e:\n print('Unable to open url', e)\n return p", "def ldap_get_number(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n number = result.get(\"mobile-phone\")[0]\n return number\n\n return None", "def get_personal_info(self):\n self.get(\"INFO\",\"GetPersonalInfo\")\n response = self.send()\n return response", "def get_device_info(self): # pylint: disable=no-self-use\r\n serial = get_serial_number()\r\n model = get_model()\r\n\r\n return {\r\n \"serial\": serial,\r\n \"model\": model,\r\n }", "def user_sends_get_call_to_the_devices():\n web_app.list_devices()", "def retrieve_data(self, device):\n CISCO_USER_MODE_LOGIN_INFO['device_type'] = 'cisco_ios'\n CISCO_USER_MODE_LOGIN_INFO['ip'] = device\n # add try catch\n device = ConnectHandler(**CISCO_USER_MODE_LOGIN_INFO)\n device.find_prompt()\n lldp_connections = device.send_command('show cdp neighbors')\n ram_usage = device.send_command('show processes memory | include Processor')\n cpu_usage = device.send_command('show processes cpu sorted | include CPU')\n errors = device.send_command('show interfaces | include CRC|Fast|Serial|Gig')\n unsed_port = device.send_command('show interfaces | include line protocol is down')\n return lldp_connections, ram_usage, cpu_usage, errors, unsed_port", "def get_device_information(self):\n return self.mycam.devicemgmt.GetDeviceInformation()" ]
[ "0.6920831", "0.65805537", "0.640192", "0.640192", "0.63525945", "0.62655234", "0.62214077", "0.6203879", "0.6175521", "0.6166035", "0.6148727", "0.61350715", "0.58731014", "0.5816996", "0.5743894", "0.57154775", "0.56877965", "0.56614953", "0.56607634", "0.56430876", "0.56250393", "0.5621831", "0.5620774", "0.56152856", "0.560884", "0.5581209", "0.5571417", "0.55666775", "0.554329", "0.55264753" ]
0.8020666
0