query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Test if ith object has at least one attribute value as in values.
def _has_at_least_one_value(self, i, values): for a in values: j = self.attributes.index(a) v = values[a] if self[i][j] == v: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_attr(self, name):\n return name in self and not self[name] in EMPTY_VALUES", "def _has_values(self, i, values):\n for a in values:\n j = self.attributes.index(a)\n v = values[a]\n if self[i][j] != v:\n return False\n return True", "def has_attributes(self):\n\n pass", "def hasattrs(obj, names):\n return all(hasattr(obj, attr) for attr in names)", "def hasattrs(obj, names):\n return all(hasattr(obj, attr) for attr in names)", "def hasRequiredAttributes(self):\n return _libsbml.SpeciesFeatureValue_hasRequiredAttributes(self)", "def contains_attr(self, gi):\n if gi is None:\n return False\n for gi_obj in self.gradual_items:\n if gi.attribute_col == gi_obj.attribute_col:\n return True\n return False", "def _does_product_contains_given_attributes(self, product, *attrs):\n\n for attribute in list(attrs[0]):\n if not product.get(attribute):\n return False\n\n return True", "def _checkValues(set_):\n if len(set_)<3: return False\n x = set_[2]\n # TODO: OPT: need optimization\n if (x is None) or len(x) == 0: return False # undefined\n for v in x:\n try:\n if Nlabels <= 2 and N.isscalar(v):\n continue\n if (isinstance(v, dict) or # not dict for pairs\n ((Nlabels>=2) and len(v)!=Nlabels) # 1 per each label for multiclass\n ): return False\n except Exception, e:\n # Something else which is not supported, like\n # in shogun interface we don't yet extract values per each label or\n # in pairs in the case of built-in multiclass\n if __debug__:\n debug('ROC', \"Exception %s while checking \"\n \"either %s are valid labels\" % (str(e), x))\n return False\n return True", "def has_attributes(self):\n return bool(self.attrs)", "def hasRequiredAttributes(self):\n return _libsbml.PossibleSpeciesFeatureValue_hasRequiredAttributes(self)", "def _hasValuesChecker(entity, params):\n \n for key, values in constraints.iteritems():\n if entity.__getattribute__(key) not in values:\n return False\n\n return True", "def has_value(cls, value):\n return value in [item.value for item in cls]", "def has_attr(self, key):\n return key in self.attrs", "def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)", "def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)", "def has_value(self):\n return hasattr(self, '_value')", "def attrs_all_equal(iterable, attr_name):\n return len({getattr(item, attr_name, float('nan')) for item in iterable}) <= 1", "def has_value(cls, value):\n return any(value == item.value for item in cls)", "def has_value(cls, value):\n return any(value == item.value for item in cls)", "def _is_ready(self):\n res = True\n for (key, val) in self._attrs.iteritems():\n if key not in self._optional_attrs:\n if val is None:\n res = False\n return res", "def has_attribute(self, key):\n return key in self.__dict", "def hasRequiredAttributes(self):\n return _libsbml.MultiSpeciesType_hasRequiredAttributes(self)", "def has_valid_values(self):\n for element, value in self.items():\n if not (0 <= value <= 1):\n return False\n return True", "def hasRequiredAttributes(self):\n return _libsbml.Species_hasRequiredAttributes(self)", "def isempty(self):\n\n if self.values is None or self.values.empty:\n return True", "def __contains__(self, item):\n return item in self.attrs", "def subfields_any(verifield, required):\n for req_key, req_val in required.items():\n if getitem(verifield, req_key, '') == req_val:\n return True\n return False", "def _hasValuesCheckerWrapper(self, args):\n \n constraints = args['constraints']\n \n def _hasValuesChecker(entity, params):\n \"\"\"Checks if values of specified properties of an entity are in\n given sets. \n \"\"\"\n \n for key, values in constraints.iteritems():\n if entity.__getattribute__(key) not in values:\n return False\n\n return True\n\n return _hasValuesChecker", "def tied(self):\n for (x, y) in self.fields:\n if self.fields[x, y] == self.empty:\n return False\n return True" ]
[ "0.70248365", "0.69457525", "0.6656487", "0.64049226", "0.64049226", "0.63894224", "0.6377084", "0.6371646", "0.6359832", "0.63261044", "0.63092935", "0.6277216", "0.620286", "0.61922383", "0.6173832", "0.6173832", "0.617009", "0.61589897", "0.61460346", "0.61460346", "0.6145882", "0.61243016", "0.61180365", "0.6096089", "0.6077811", "0.60463154", "0.6024339", "0.6021567", "0.6011289", "0.6003165" ]
0.7703603
0
Make and return compound (= original + complementary) context.
def compound(self): complementary_cxt = self.complementary() compound_table = [self.table[i] + complementary_cxt.table[i] for i in range(len(self.objects))] return Context(compound_table, self.objects, self.attributes + complementary_cxt.attributes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy(self):\n rv = Context(self.decimal, self.year_mode, self.quantize_interest,\n self.quantize_currency)\n return rv", "def cget_representation_context(representation):\n assert representation is not None, \"This is a bug\"\n\n version, subset, asset, project = cparenthood(representation)\n\n assert all([representation, version, subset, asset, project]), (\n \"This is a bug\"\n )\n\n context = {\n \"project\": project,\n \"asset\": asset,\n \"subset\": subset,\n \"version\": version,\n \"representation\": representation,\n }\n\n return context", "def context(self) -> _C_out:\n return self._context", "def complementary(self):\n complementary_attributes = ['not ' + self.attributes[i]\n for i in range(len(self.attributes))]\n complementary_table = []\n for i in range(len(self.objects)):\n complementary_table.append([not self.table[i][j]\n for j in range(len(self.attributes))])\n return Context(complementary_table, self.objects, complementary_attributes)", "def patch_context(data, i, j, k, r):\n idxs = (np.array([i+r,i-r,i,i,i,i]),\n np.array([j,j,j+r,j-r,j,j]),\n np.array([k,k,k,k,k+r,k-r]))\n ctx = data[idxs]\n return ctx", "def context(self) -> CONTEXT:", "def enrich_context(self, ctx: Context) -> Context:\n new_ctx = Context(ctx.expressions[:], ctx.namespace)\n for _ in range(self.expression_levels):\n new_ctx.extend(list(self.properties(new_ctx)))\n new_ctx.extend(list(self.unary_ops(new_ctx)))\n new_ctx.extend(list(self.binary_ops(new_ctx)))\n new_ctx.extend(list(self.calls(new_ctx)))\n new_ctx.extend(list(self.comparisons(new_ctx)))\n new_ctx.extend(list(self.bool_ops(new_ctx)))\n return new_ctx", "def _squash(self):\n return PPContext(width=self._content_width,\n truncate=self._truncate,\n default_bullet=self._default_bullet)", "def create_context_in_tuple(request):\n params = request.param\n if isinstance(params, tuple):\n cc = params[0]\n remainder = tuple(params[1:])\n else:\n cc = params\n remainder = tuple()\n\n ctx = cc()\n def finalizer():\n ctx.release()\n gc.collect()\n request.addfinalizer(finalizer)\n\n if isinstance(params, tuple):\n return (ctx,) + remainder\n else:\n return ctx", "def make_context(self, engine, args):\n args = self.normalize_args(args)\n _, ctx = self._make_argkey_and_context(engine, args)\n return ctx", "def context_local(context=None):\n class manager(object):\n def __init__(self, ctx):\n \"\"\"\n :type ctx: Context\n \"\"\"\n self.context = ctx.copy()\n\n def __enter__(self):\n self.orig_context = context_get()\n context_set(self.context)\n return self.context\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n context_set(self.orig_context)\n\n if context is None:\n context = context_get()\n return manager(context)", "def get_context(x, w=2, normalize=True):\n\n # check if context exists\n# if os.path.isfile('contextdata.npy'):\n# print('loading context data from file')\n# return np.load('contextdata.npy')\n#\n input_dim = x.shape\n\n if normalize:\n x = np.reshape(x, [input_dim[0]*input_dim[1], input_dim[2]]) # for ease of normalization\n x = sklearn.preprocessing.normalize(x, norm='l2', axis=1)\n x = np.reshape(x, [input_dim[0], input_dim[1], input_dim[2]])\n\n # padding\n p = Context.pad(x, w)\n\n # extract context\n c = Context.slide(p, w)\n\n# np.save('contextdata.npy', c)\n\n return c", "def contextbound(self, _cls=_StackBound):\n return _cls(self, self.push_context, self.pop_context)", "def compound(self):\n return self._compound", "def _evalContext(self):\n def xor(*args):\n return sum(args) == 1\n def neg(result):\n return not result\n context = {\n 'xor': xor,\n 'neg': neg\n }\n return context", "def _createContext(instance, args, kwargs, settings):\n context = kwargs.copy()\n args = list(args)\n context.update({name:getattr(instance, name, None) for name in settings.get('context', [])})\n context.update({key:args.pop(0) for key in settings.get('argsTokwargs', [])})\n return context", "def add_context(self, data):\n pre_context = self._pre_context\n post_context = self._post_context\n if pre_context == 0 and post_context == 0:\n return data\n num_features = data.shape[1]\n if self._context_state is None:\n self._context_state = np.zeros((pre_context, num_features))\n data = np.concatenate((self._context_state, data))\n self._context_state = data[-(pre_context + post_context):, :]\n num_rows = data.shape[0] - pre_context - post_context\n num_columns = num_features * (pre_context + post_context + 1)\n context_filled_data = np.empty((num_rows, num_columns), dtype=float)\n context_filled_data[:, (pre_context) * num_features:(pre_context + 1) *\n num_features] = data[pre_context:data.shape[0] -\n post_context, :]\n for shift_amt in range(1, pre_context + 1):\n result = self.shift(data, shift_amt, pre_context, post_context)\n context_filled_data[:, (pre_context - shift_amt) *\n num_features:(pre_context - shift_amt + 1) *\n num_features] = result\n for shift_amt in range(1, post_context + 1):\n result = self.shift(data, -(shift_amt), pre_context, post_context)\n context_filled_data[:, (pre_context + shift_amt) *\n num_features:(pre_context + shift_amt + 1) *\n num_features] = result\n return context_filled_data", "def get_context(self):\r\n ctx = {}\r\n for clause in self.where_clauses or []:\r\n clause.update_context(ctx)\r\n return ctx", "def compose(self, other, qargs=None, front=False):\n pass", "def make_context(self, char_digits, sig_digits, rounding):\n if rounding not in _round_code:\n raise ValueError(\"Invalid rounding type specified\")\n\n class CharacteristicClass(BinaryCharacteristic):\n digits = char_digits\n largest = 2**char_digits-1\n bias = 2**(char_digits-1)-1\n exp_largest = 2**(char_digits-1)\n exp_lowest = -2**(char_digits-1) + 1\n\n class SignificandClass(BinarySignificand):\n digits = sig_digits\n largest = 2**sig_digits-1\n\n class context(ContextClass):\n characteristicClass = CharacteristicClass\n significandClass = SignificandClass\n largest_denorm = (Decimal(2) ** Decimal(-2**(char_digits-1)+2) ) * \\\n (1 - Decimal(2)**(-sig_digits))\n largest_norm = (1 - Decimal(\"0.5\")**(sig_digits+1)) * \\\n 2 ** (2**(char_digits - 1))\n digits = 1 + char_digits + sig_digits\n round_mode = rounding\n Etop = 2**(char_digits-1)\n Etiny = -2**(char_digits-1) + 1\n\n context.__name__ = \"Float_%d_%d_%s\" % (char_digits, sig_digits,\n _round_code[rounding])\n\n self.contexts[(char_digits, sig_digits, rounding)] = context\n return context", "def __getstate__(self):\n state = composites.Composite.__getstate__(self)\n state[\"o\"] = None\n return state", "def __call__(self, *args, **kw):\n return self.clone(*args, **kw).as_ctrait()", "def __call__(self, *args, **kw):\n return self.clone(*args, **kw).as_ctrait()", "def composedfun( *args, **kwords ):\n return outerfun( innerfun( *args, **kwords ) )", "def _exprep(self, context):\n return `self`", "def context(tensor):\n raise NotImplementedError", "def _ReplaceCompound(self, from_id, to_id):\n if from_id == to_id:\n return\n \n # set the coefficient of the original compound to 0\n i = self._FindCompoundIndex(from_id)\n if i is None:\n return\n how_many = self.reactants[i].coeff\n self.reactants[i].coeff = 0\n\n # create a new compound with the new kegg_id and the same coefficient\n # or add the number to the coefficient if it already is a reactant\n j = self._FindCompoundIndex(to_id)\n if j is None:\n self.reactants[i] = CompoundWithCoeff.FromId(how_many, to_id)\n else:\n self.reactants[j].coeff += how_many\n self._Dedup()\n\n # clear the cache since the reaction has changed\n self._catalyzing_enzymes = None", "def internalContext(indices, token):\n\n context = list(token)\n numer = indices[1] - indices[0]\n for ix in range(indices[0], indices[1]):\n context[ix] = \"\"\n context = tuple(context)\n\n return context, numer", "async def copy_context_with(ctx: commands.Context, *, author=None, **kwargs):\n\n # copy context and update attributes\n alt_message = copy.copy(ctx.message)\n alt_message._update(alt_message.channel, kwargs)\n\n if author is not None:\n alt_message.author = author\n\n # obtain and return a new context of the same type\n return await ctx.bot.get_context(alt_message, cls=type(ctx))", "def _from_composite(self, name: str, context: Any) -> Any:\n attr_name = self.attribute_name\n comp_data = {}\n attribs = set(self.stash_by_attribute.keys())\n if self.load_keys is not None:\n attribs = attribs & self.load_keys\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'load attribs: {attribs}')\n for stash in self.stash_by_group.values():\n if len(stash.group & attribs) > 0:\n data = stash.load(name)\n logger.debug(f'loaded: {data}')\n if data is None:\n raise PersistableError(\n f'Missing composite data for id: {name}, ' +\n f'stash: {stash.group}, path: {stash.path}, ' +\n f'attribute: \\'{attr_name}\\'')\n if self.load_keys is None:\n comp_data.update(data)\n else:\n for k in set(data.keys()) & attribs:\n comp_data[k] = data[k]\n if context is not None:\n ordered_data = collections.OrderedDict()\n for k in context:\n if k in comp_data:\n ordered_data[k] = comp_data[k]\n comp_data = ordered_data\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'comp_data: {comp_data}')\n return comp_data" ]
[ "0.58621174", "0.56428766", "0.5634684", "0.55592126", "0.5528197", "0.5446185", "0.54385996", "0.53360796", "0.5193938", "0.51734984", "0.5138041", "0.51301104", "0.5090778", "0.50727236", "0.5049161", "0.503264", "0.500528", "0.50052077", "0.50011003", "0.4940245", "0.49299234", "0.49234837", "0.49234837", "0.49161214", "0.49048376", "0.49047202", "0.48800808", "0.48428595", "0.4827019", "0.47994906" ]
0.71345913
0
Make random context, useful for testing.
def make_random_context(num_obj, num_att, d): obj_ls = ['g' + str(x) for x in range(num_obj)] att_ls = ['m' + str(x) for x in range(num_att)] table = [[int(d > random.random()) for _ in range(num_att)] for _ in range(num_obj)] return Context(table, obj_ls, att_ls)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seed():", "def random_state(state):\n old_state = RandomState()\n state.set_global()\n yield\n old_state.set_global()", "def generate_context(self) -> Context:\n self._transient_context = Context()\n return self._transient_context", "def generate_random(self: object) -> None:\n self.random.set(Sequence.generate(length=50))", "def seed():\n pass", "def seed():\n pass", "def _make_context():\n return {'app': app, 'db': db, 'User': User}", "def fake_init():\n return Faker()", "def totem_random():\n random_head()\n random_head()\n random_head()", "def _make_context():\n return {'app': app, 'db': db, 'User': User}", "def clone_rand(self):", "def _make_context():\n\n return {\n 'app': app,\n 'db': db,\n 'User': User\n }", "def create_context(cls):\n pass", "def Randomize(seed=None):\n random.seed()", "def random():\n return constant(1)", "def _make_context():\n return {'app': app, 'db': db}", "def reset(ctx, with_testdb):\n ctx.invoke(init, with_testdb=with_testdb)\n ctx.invoke(seed)\n\n return None", "def reset(ctx, with_testdb):\n ctx.invoke(init, with_testdb=with_testdb)\n ctx.invoke(seed)\n\n return None", "def sample(self, context: Context) -> T:\n ...", "def _make_context():\n return {'app': app,\n 'db': db,\n 'User': User\n }", "def _context():\n global _trident_context\n if _trident_context is None:\n _trident_context = _Context()\n return _trident_context", "def seed_random():\n random.seed(0)", "def sample_contexts(contexts: list, n: int) -> list:\n return random.sample(contexts, n) if len(contexts) > n else contexts", "def create_challenge():\n\treturn os.urandom(12)", "async def lathow(self, context):\n\n await random_image(context, 'lathow')", "def _get_sample_plain_context(self):\r\n context = {\r\n 'course_title': \"Bogus Course Title\",\r\n 'course_url': \"/location/of/course/url\",\r\n 'account_settings_url': \"/location/of/account/settings/url\",\r\n 'platform_name': 'edX',\r\n 'email': '[email protected]',\r\n }\r\n return context", "def test_context_creation_and_retrieval(self):\n tracer_id = 'd551573a-01dc-41b2-b197-ea8afb7fbac1'.replace('-', '')\n\n with new_context(tracer_id=tracer_id):\n context = get_context()\n nose.tools.eq_(tracer_id, str(context.tracer_id))", "def testrandom(self):\n for i in range(100):\n AmuletAbility()", "def seed(*args, **kwargs): # real signature unknown\n pass", "def _generate_raw_environments(self, num, seed):" ]
[ "0.6258607", "0.6205847", "0.6091151", "0.5997577", "0.59703875", "0.59703875", "0.58561", "0.58427304", "0.57903767", "0.5789003", "0.578871", "0.578098", "0.5760028", "0.5742718", "0.5738766", "0.5716231", "0.5712584", "0.5712584", "0.57073", "0.57063144", "0.5705966", "0.56959456", "0.56951433", "0.5676908", "0.56668293", "0.56586105", "0.5652466", "0.5644117", "0.56401217", "0.5635604" ]
0.7311829
0
Testing list of reported events with limit
def test_limit(self) -> None: channel = self.make_request( "GET", self.url + "?limit=5", access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 5) self.assertEqual(channel.json_body["next_token"], 5) self._check_fields(channel.json_body["event_reports"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testMoreEvents(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=1000,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 1)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(job.getFiles(type=\"lfn\"), [\"/some/file/name\"])\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)", "def _get_limited_event_history(device_event_file_path,\n event_labels,\n limit,\n timeout=10.0):\n result = []\n timedout = False\n\n file_exists, remaining_timeout = _wait_for_event_file(device_event_file_path,\n timeout)\n if not file_exists:\n timedout = True\n return result, timedout\n\n timeout_str = \"{:f}\".format(remaining_timeout)\n\n if event_labels is None:\n tac_cmd = [\"tac\", device_event_file_path]\n head_cmd = [\"timeout\", timeout_str, \"head\", \"-n\", str(limit)]\n tac_proc = subprocess.Popen(tac_cmd, stdout=subprocess.PIPE)\n head_proc = subprocess.Popen(\n head_cmd, stdin=tac_proc.stdout, stdout=subprocess.PIPE)\n tac_proc.stdout.close()\n out, _ = head_proc.communicate()\n if head_proc.returncode == 124:\n timedout = True\n tac_proc.terminate()\n json_events = out.splitlines()\n else:\n tac_cmd = [\"tac\", device_event_file_path]\n grep_cmd = [\"timeout\", timeout_str, \"grep\", \"-m\", str(limit), \"-w\"]\n for event_label in event_labels:\n if event_label:\n grep_cmd.append(\"-e\")\n grep_cmd.append(event_label)\n tac_proc = subprocess.Popen(tac_cmd, stdout=subprocess.PIPE)\n grep_proc = subprocess.Popen(\n grep_cmd, stdin=tac_proc.stdout, stdout=subprocess.PIPE)\n tac_proc.stdout.close()\n out, _ = grep_proc.communicate()\n if grep_proc.returncode == 124:\n timedout = True\n tac_proc.terminate()\n json_events = out.splitlines()\n\n return _get_events_from_json_output(json_events, event_labels), timedout", "def test_stream(self):\n with skipping(NotImplementedError):\n self.es = EventStreamsTestClass(streams='recentchange')\n limit = 50\n self.es.set_maximum_items(limit)\n self.assertLength(list(self.es), limit)", "def test_limit_and_from(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=5&limit=10\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(channel.json_body[\"next_token\"], 15)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 10)\n self._check_fields(channel.json_body[\"event_reports\"])", "def test_max_events_range(self):\n\n self.log.info(\"Testing max_event counts\")\n enable_failover = True\n timeout_val = 10\n max_plus_1 = CbServer.Failover.MAX_EVENTS + 1\n\n # Set max_events between (min, max)\n for num_events in range(CbServer.Failover.MIN_EVENTS, max_plus_1):\n status = self.rest.update_autofailover_settings(\n enable_failover, timeout_val, maxCount=num_events)\n self.assertTrue(status, \"Failed to set max events=%s\" % num_events)\n self.validate_failover_settings(enable_failover, timeout_val,\n 0, num_events)\n\n for num_events in [0, max_plus_1]:\n self.log.info(\"Testing max_event_count=%s\" % num_events)\n status = self.rest.update_autofailover_settings(\n enable_failover, timeout_val, maxCount=max_plus_1)\n self.assertFalse(status, \"Able to set max events=%s\" % num_events)\n self.validate_failover_settings(enable_failover, timeout_val,\n 0, CbServer.Failover.MAX_EVENTS)", "def test_limit_items(self):\n AnnouncementFactory(\n title=\"Not going to be there\",\n expires_at=timezone.now() - datetime.timedelta(days=1),\n )\n for i in range(5):\n AnnouncementFactory()\n\n response = self.get(\"announcements:feed\")\n\n assert \"Not going to be there\" not in response.content.decode()", "def aggregated_results(self, limit=2000) -> List[dict]:\n stored_events = []\n for events in self._iter_events():\n stored_events.extend(events)\n if len(stored_events) >= limit:\n return stored_events[:limit]\n return stored_events", "def test_query_events(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events,\n \"Events do not exist for the date range\"\n )", "def test_finished_events_doesnt_appear_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2010-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2011, 1, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2012, 1, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n\r\n self.assertEqual(\r\n num_events,\r\n 0,\r\n msg=\"Убедитесь, что события в прошедшие события не показываются\",\r\n )", "def test_get_future_events(self):\n events = list(get_future_events())\n self.assertFalse(self.event_show1 in events)\n self.assertTrue(self.event_show2 in events)", "def test_channel_messages_unlimited_pagination():\n clear()\n userOne = auth_register('[email protected]', '123abc!@#', 'First', 'User') \n randChannel = channels_create(userOne['token'], 'randChannel', True)\n for _ in range(149):\n message_send(userOne['token'], randChannel['channel_id'], 'Hello')\n messages = channel_messages(userOne['token'], randChannel['channel_id'], 0)\n assert(messages['start'] == 0)\n assert(messages['end'] == 50) \n messages2 = channel_messages(userOne['token'], randChannel['channel_id'], 50)\n assert(messages2['start'] == 50)\n assert(messages2['end'] == 100) \n messages3 = channel_messages(userOne['token'], randChannel['channel_id'], 100)\n assert(messages3['start'] == 100)\n assert(messages3['end'] == -1) \n assert(len(messages3['messages']) == 49)\n # an error should be raised when start is beyond 149 messages\n with pytest.raises(InputError): \n channel_messages(userOne['token'], randChannel['channel_id'], 150)", "def test_started_but_not_finished_event_appears_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2020-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2020, 2, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2020, 12, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n with freeze_time(\"2020-05-01\"):\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=(\r\n \"Убедитесь, что начавшееся, но не \"\r\n \"закончившееся событие показывается в списке.\"\r\n ),\r\n )", "def test_calendar_query_limited_with_data(self):\n\n self.patch(config, \"MaxQueryWithDataResults\", 1)\n\n def _restoreValueOK(f):\n self.fail(\"REPORT must fail with 403\")\n\n def _restoreValueError(f):\n return None\n\n uids = [r[0] for r in (os.path.splitext(f) for f in os.listdir(self.holidays_dir)) if r[1] == \".ics\"]\n\n d = self.simple_event_query(None, uids)\n d.addCallbacks(_restoreValueOK, _restoreValueError)\n return d", "def test_query_events_invalid_per_page(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events_invalid_per_page,\n \"perPage must be at least 1\"\n )", "def test_get_events(self):\n\n request_params = {\n \"token\": EVENTBRITE_API_KEY,\n \"location.latitude\": \"37.4192008972\",\n \"location.longitude\": \"-122.057403564\",\n \"location.within\": \"20mi\",\n \"sort_by\": \"date\"\n }\n url_encoded_request_params = _update_urlencode_request_params(\"103,109\", 1, request_params)\n events_list, page_count = _get_events(url_encoded_request_params)\n self.assertTrue(type(events_list) is list)\n self.assertTrue(type(page_count) is int)", "def _iter_events(self) -> Generator:\n response = self.client.call()\n events: list = response.json()\n\n if not events:\n return []\n\n while True:\n yield events\n last = events.pop()\n self.client.set_next_run_filter(last['@timestamp'])\n response = self.client.call()\n events = response.json()\n try:\n events.pop(0)\n assert events\n except (IndexError, AssertionError):\n LOG('empty list, breaking')\n break", "def test_query_events(self):\n query_list = {\n 'q': 'test',\n 'type': 'show'\n }\n results = query_events(query_list)\n events = list(results['events'])\n showcase = list(results['showcase_events'])\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in showcase)\n self.assertFalse(self.event_film in events)", "def test_get_filtered_list_limit(self):\n flexmock(errata).should_receive(\"Advisory\").and_return(None)\n\n response = flexmock(status_code=200)\n response.should_receive(\"json\").and_return(test_structures.example_erratum_filtered_list)\n\n flexmock(errata.requests).should_receive(\"get\").and_return(response)\n\n res = errata.get_filtered_list(limit=1)\n self.assertEqual(1, len(res))", "def test_response_is_paginated(self):\r\n user = ViewAfishaTests.mentor\r\n EventFactory.create_batch(50, city=user.profile.city)\r\n client = self.return_authorized_user_client(user)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n\r\n self.assertTrue(\"next\" in response_data)\r\n self.assertTrue(\"previous\" in response_data)\r\n self.assertTrue(\"results\" in response_data)", "def test_collection_limit(testapp):\n obj1 = {\n 'title': \"Testing1\",\n 'description': \"This is testing object 1\",\n }\n obj2 = {\n 'title': \"Testing2\",\n 'description': \"This is testing object 2\",\n }\n obj3 = {\n 'title': \"Testing3\",\n 'description': \"This is testing object 3\",\n }\n testapp.post_json('/embedding-tests', obj1, status=201)\n testapp.post_json('/embedding-tests', obj2, status=201)\n testapp.post_json('/embedding-tests', obj3, status=201)\n res_all = testapp.get('/embedding-tests/?limit=all', status=200)\n res_2 = testapp.get('/embedding-tests/?limit=2', status=200)\n assert len(res_all.json['@graph']) == 3\n assert len(res_2.json['@graph']) == 2", "def test_get_events(self):\n events = gracedb.events()\n for event in events:\n self.assertTrue('graceid' in event)\n break", "def test_query_events_by_last_date(self):\n events = list(query_events_by_last_date(Event.objects.all(), timezone.now()))\n self.assertFalse(self.event_show2 in events)\n self.assertTrue(self.event_show1 in events)", "def test_default_limit(self):\n telem = self.create_logs(self.user1, num=200)\n\n response = self.client.get(telemetry_url)\n self.assertEqual(200, response.status_code)\n\n data = json.loads(response.content)\n\n self.assertEqual(100, len(data))", "def test_messenger_limit():\n all_messages_resp = requests.get(BASE_URL)\n all_messages = all_messages_resp.json()\n total_message_count = len(all_messages)\n message_limit = total_message_count // 2\n\n query_params = {\"limit\": message_limit}\n limit_resp = requests.get(BASE_URL, params=query_params)\n limited_messages = limit_resp.json()\n assert limit_resp.status_code == 200\n assert len(limited_messages) == message_limit", "def test50EventSplit(self):\n\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n eventsPerJob = 50\n jobGroups = jobFactory(events_per_job=eventsPerJob,\n performance=self.performanceParams)\n\n assert len(jobGroups) == 1, \\\n \"ERROR: JobFactory didn't return one JobGroup.\"\n\n assert len(jobGroups[0].jobs) == 2, \\\n \"ERROR: JobFactory created %s jobs not two\" % len(jobGroups[0].jobs)\n\n firstEvents = []\n for job in jobGroups[0].jobs:\n assert job.getFiles(type=\"lfn\") == [\"/some/file/name\"], \\\n \"ERROR: Job contains unknown files.\"\n\n assert job[\"mask\"].getMaxEvents() in [eventsPerJob, 1], \\\n \"ERROR: Job's max events is incorrect.\"\n\n assert job[\"mask\"][\"FirstEvent\"] in [0, eventsPerJob], \\\n \"ERROR: Job's first event is incorrect.\"\n\n assert job[\"mask\"][\"FirstEvent\"] not in firstEvents, \\\n \"ERROR: Job's first event is repeated.\"\n firstEvents.append(job[\"mask\"][\"FirstEvent\"])\n\n return", "def test_identify_limit(limit, all, expected):\n assert identify_limit(limit, all) == expected", "def test_query_events_missing_per_page(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events_page_without_per_page,\n \"perPage argument missing\"\n )", "def test_calendar_query_limited_without_data(self):\n\n self.patch(config, \"MaxQueryWithDataResults\", 1)\n\n def _restoreValueError(f):\n self.fail(\"REPORT must not fail with 403\")\n\n uids = [r[0] for r in (os.path.splitext(f) for f in os.listdir(self.holidays_dir)) if r[1] == \".ics\"]\n\n d = self.simple_event_query(None, uids, withData=False)\n d.addErrback(_restoreValueError)\n return d", "def test_mentor_can_list_available_events_in_his_city(self):\r\n\r\n city = CityFactory(name=\"Вермонт\")\r\n other_city = ViewAfishaTests.city\r\n user = UserFactory(profile__city=city)\r\n client = self.return_authorized_user_client(user)\r\n EventFactory.create_batch(10, city=city)\r\n EventFactory.create_batch(100, city=other_city)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n results = response_data.get(\"results\")\r\n\r\n self.assertEqual(\r\n len(results),\r\n 10,\r\n msg=(\r\n \"Проверьте что пользователь видит все доступные события \"\r\n \"в городе\"\r\n ),\r\n )", "def test_next_window_time_no_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n time.sleep(4)\n collected_value = test_window_scheme.filter(self.more_than_upper_bound)\n self.assertEquals(collected_value, self.more_than_upper_bound)" ]
[ "0.67866373", "0.6778284", "0.6770849", "0.67432696", "0.6477754", "0.6274708", "0.6200425", "0.6163394", "0.61155015", "0.6108119", "0.609398", "0.60556704", "0.60327744", "0.6014211", "0.59874356", "0.5962993", "0.59364414", "0.5924628", "0.5863921", "0.58372223", "0.58341086", "0.58284825", "0.58211243", "0.58014417", "0.57854", "0.5780069", "0.57705224", "0.5768248", "0.5756053", "0.573358" ]
0.71334475
0
Testing list of reported events with a defined starting point and limit
def test_limit_and_from(self) -> None: channel = self.make_request( "GET", self.url + "?from=5&limit=10", access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(channel.json_body["next_token"], 15) self.assertEqual(len(channel.json_body["event_reports"]), 10) self._check_fields(channel.json_body["event_reports"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def events_between(self, starting_measure, starting_offset, ending_measure, ending_offset):\n output_events = []\n for i in range(starting_measure - 1, ending_measure - 1 + 1):\n for event in self.event_groups[i].events:\n if i == starting_measure - 1:\n if i == 0 and event.offset >= starting_offset:\n output_events.append(event)\n elif i != 0 and event.offset > starting_offset:\n output_events.append(event)\n elif i == ending_measure - 1:\n if event.offset < ending_offset and ending_offset != 0:\n output_events.append(event)\n else:\n output_events.append(event)\n return output_events", "def testMoreEvents(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=1000,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 1)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(job.getFiles(type=\"lfn\"), [\"/some/file/name\"])\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)", "def test_query_events(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events,\n \"Events do not exist for the date range\"\n )", "def test_get_events(self):\n\n request_params = {\n \"token\": EVENTBRITE_API_KEY,\n \"location.latitude\": \"37.4192008972\",\n \"location.longitude\": \"-122.057403564\",\n \"location.within\": \"20mi\",\n \"sort_by\": \"date\"\n }\n url_encoded_request_params = _update_urlencode_request_params(\"103,109\", 1, request_params)\n events_list, page_count = _get_events(url_encoded_request_params)\n self.assertTrue(type(events_list) is list)\n self.assertTrue(type(page_count) is int)", "def _get_limited_event_history(device_event_file_path,\n event_labels,\n limit,\n timeout=10.0):\n result = []\n timedout = False\n\n file_exists, remaining_timeout = _wait_for_event_file(device_event_file_path,\n timeout)\n if not file_exists:\n timedout = True\n return result, timedout\n\n timeout_str = \"{:f}\".format(remaining_timeout)\n\n if event_labels is None:\n tac_cmd = [\"tac\", device_event_file_path]\n head_cmd = [\"timeout\", timeout_str, \"head\", \"-n\", str(limit)]\n tac_proc = subprocess.Popen(tac_cmd, stdout=subprocess.PIPE)\n head_proc = subprocess.Popen(\n head_cmd, stdin=tac_proc.stdout, stdout=subprocess.PIPE)\n tac_proc.stdout.close()\n out, _ = head_proc.communicate()\n if head_proc.returncode == 124:\n timedout = True\n tac_proc.terminate()\n json_events = out.splitlines()\n else:\n tac_cmd = [\"tac\", device_event_file_path]\n grep_cmd = [\"timeout\", timeout_str, \"grep\", \"-m\", str(limit), \"-w\"]\n for event_label in event_labels:\n if event_label:\n grep_cmd.append(\"-e\")\n grep_cmd.append(event_label)\n tac_proc = subprocess.Popen(tac_cmd, stdout=subprocess.PIPE)\n grep_proc = subprocess.Popen(\n grep_cmd, stdin=tac_proc.stdout, stdout=subprocess.PIPE)\n tac_proc.stdout.close()\n out, _ = grep_proc.communicate()\n if grep_proc.returncode == 124:\n timedout = True\n tac_proc.terminate()\n json_events = out.splitlines()\n\n return _get_events_from_json_output(json_events, event_labels), timedout", "def test_started_but_not_finished_event_appears_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2020-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2020, 2, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2020, 12, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n with freeze_time(\"2020-05-01\"):\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=(\r\n \"Убедитесь, что начавшееся, но не \"\r\n \"закончившееся событие показывается в списке.\"\r\n ),\r\n )", "def test_limit(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertEqual(channel.json_body[\"next_token\"], 5)\n self._check_fields(channel.json_body[\"event_reports\"])", "def test_past_event(self):\n pass", "def test_stream(self):\n with skipping(NotImplementedError):\n self.es = EventStreamsTestClass(streams='recentchange')\n limit = 50\n self.es.set_maximum_items(limit)\n self.assertLength(list(self.es), limit)", "def test_max_events_range(self):\n\n self.log.info(\"Testing max_event counts\")\n enable_failover = True\n timeout_val = 10\n max_plus_1 = CbServer.Failover.MAX_EVENTS + 1\n\n # Set max_events between (min, max)\n for num_events in range(CbServer.Failover.MIN_EVENTS, max_plus_1):\n status = self.rest.update_autofailover_settings(\n enable_failover, timeout_val, maxCount=num_events)\n self.assertTrue(status, \"Failed to set max events=%s\" % num_events)\n self.validate_failover_settings(enable_failover, timeout_val,\n 0, num_events)\n\n for num_events in [0, max_plus_1]:\n self.log.info(\"Testing max_event_count=%s\" % num_events)\n status = self.rest.update_autofailover_settings(\n enable_failover, timeout_val, maxCount=max_plus_1)\n self.assertFalse(status, \"Able to set max events=%s\" % num_events)\n self.validate_failover_settings(enable_failover, timeout_val,\n 0, CbServer.Failover.MAX_EVENTS)", "def test_get_future_events(self):\n events = list(get_future_events())\n self.assertFalse(self.event_show1 in events)\n self.assertTrue(self.event_show2 in events)", "def get_timeline_events(self, req, start, stop, filters):", "def test_next_window_time_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n # Value 15 will be filtered as it ranges between lower and upper bound limits\n filtered_value = test_window_scheme.filter(self.middle_value)\n self.assertEquals(filtered_value, self.middle_value)\n # Let next window time elapse\n time.sleep(4)\n filtered_value = test_window_scheme.filter(self.more_than_upper_bound)\n # None is expected as filtered value because at least one sample has been already passed and\n # value ranges outside lower and upper bound limits\n self.assertEquals(filtered_value, None)", "def get_events(event_list):\n float_events = [float(event) for event in event_list]\n active_events = [event for event in float_events if (event > 0.0)]\n\n events = defaultdict(list)\n\n for event in active_events:\n events[int(np.floor(event / 10000))].append(event % 10000)\n\n return events", "def test_response_is_paginated(self):\r\n user = ViewAfishaTests.mentor\r\n EventFactory.create_batch(50, city=user.profile.city)\r\n client = self.return_authorized_user_client(user)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n\r\n self.assertTrue(\"next\" in response_data)\r\n self.assertTrue(\"previous\" in response_data)\r\n self.assertTrue(\"results\" in response_data)", "def test_query_events(self):\n query_list = {\n 'q': 'test',\n 'type': 'show'\n }\n results = query_events(query_list)\n events = list(results['events'])\n showcase = list(results['showcase_events'])\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in showcase)\n self.assertFalse(self.event_film in events)", "def test99EventSplit(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n eventsPerJob = 99\n jobGroups = jobFactory(events_per_job=eventsPerJob,\n performance=self.performanceParams)\n\n assert len(jobGroups) == 1, \\\n \"ERROR: JobFactory didn't return one JobGroup.\"\n\n assert len(jobGroups[0].jobs) == 2, \\\n \"ERROR: JobFactory created %s jobs not two\" % len(jobGroups[0].jobs)\n\n firstEvents = []\n for job in jobGroups[0].jobs:\n assert job.getFiles(type=\"lfn\") == [\"/some/file/name\"], \\\n \"ERROR: Job contains unknown files.\"\n self.assertTrue(job[\"mask\"].getMaxEvents() in [eventsPerJob, 1],\n \"ERROR: Job's max events is incorrect.\")\n\n assert job[\"mask\"][\"FirstEvent\"] in [0, eventsPerJob], \\\n \"ERROR: Job's first event is incorrect.\"\n\n assert job[\"mask\"][\"FirstEvent\"] not in firstEvents, \\\n \"ERROR: Job's first event is repeated.\"\n firstEvents.append(job[\"mask\"][\"FirstEvent\"])\n\n return", "def test50EventSplit(self):\n\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n eventsPerJob = 50\n jobGroups = jobFactory(events_per_job=eventsPerJob,\n performance=self.performanceParams)\n\n assert len(jobGroups) == 1, \\\n \"ERROR: JobFactory didn't return one JobGroup.\"\n\n assert len(jobGroups[0].jobs) == 2, \\\n \"ERROR: JobFactory created %s jobs not two\" % len(jobGroups[0].jobs)\n\n firstEvents = []\n for job in jobGroups[0].jobs:\n assert job.getFiles(type=\"lfn\") == [\"/some/file/name\"], \\\n \"ERROR: Job contains unknown files.\"\n\n assert job[\"mask\"].getMaxEvents() in [eventsPerJob, 1], \\\n \"ERROR: Job's max events is incorrect.\"\n\n assert job[\"mask\"][\"FirstEvent\"] in [0, eventsPerJob], \\\n \"ERROR: Job's first event is incorrect.\"\n\n assert job[\"mask\"][\"FirstEvent\"] not in firstEvents, \\\n \"ERROR: Job's first event is repeated.\"\n firstEvents.append(job[\"mask\"][\"FirstEvent\"])\n\n return", "def test_query_events_without_start_and_end_date(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events_without_start_and_end_date,\n \"Page does not exist\"\n )", "def test_query_events_invalid_per_page(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events_invalid_per_page,\n \"perPage must be at least 1\"\n )", "def test_can_specify_number_of_smoothing_intervals(client: Client):\n organization = create_organization(name=\"test org\")\n team = create_team(organization=organization)\n user = create_user(\"user\", \"pass\", organization)\n\n client.force_login(user)\n\n with freeze_time(\"2021-09-20T16:00:00\"):\n journeys_for(\n events_by_person={\n \"abc\": [\n {\"event\": \"$pageview\", \"timestamp\": \"2021-09-01\"},\n {\"event\": \"$pageview\", \"timestamp\": \"2021-09-01\"},\n {\"event\": \"$pageview\", \"timestamp\": \"2021-09-02\"},\n {\"event\": \"$pageview\", \"timestamp\": \"2021-09-03\"},\n {\"event\": \"$pageview\", \"timestamp\": \"2021-09-03\"},\n {\"event\": \"$pageview\", \"timestamp\": \"2021-09-03\"},\n ]\n },\n team=team,\n )\n\n interval_3_trend = get_trends_ok(\n client,\n team=team,\n request=TrendsRequest(\n date_from=\"2021-09-01\",\n date_to=\"2021-09-03\",\n interval=\"day\",\n insight=\"TRENDS\",\n display=\"ActionsLineGraph\",\n smoothing_intervals=3,\n events=[\n {\n \"id\": \"$pageview\",\n \"name\": \"$pageview\",\n \"custom_name\": None,\n \"type\": \"events\",\n \"order\": 0,\n \"properties\": [],\n }\n ],\n ),\n )\n\n assert interval_3_trend == {\n \"is_cached\": False,\n \"last_refresh\": \"2021-09-20T16:00:00Z\",\n \"next\": None,\n \"timezone\": \"UTC\",\n \"result\": [\n {\n \"action\": ANY,\n \"label\": \"$pageview\",\n \"count\": 5,\n \"data\": [2.0, 1, 2.0],\n \"labels\": [\"1-Sep-2021\", \"2-Sep-2021\", \"3-Sep-2021\"],\n \"days\": [\"2021-09-01\", \"2021-09-02\", \"2021-09-03\"],\n \"persons_urls\": ANY,\n \"filter\": ANY,\n }\n ],\n }\n\n interval_2_trend = get_trends_ok(\n client,\n team=team,\n request=TrendsRequest(\n date_from=\"2021-09-01\",\n date_to=\"2021-09-03\",\n interval=\"day\",\n insight=\"TRENDS\",\n display=\"ActionsLineGraph\",\n smoothing_intervals=2,\n events=[\n {\n \"id\": \"$pageview\",\n \"name\": \"$pageview\",\n \"custom_name\": None,\n \"type\": \"events\",\n \"order\": 0,\n \"properties\": [],\n }\n ],\n ),\n )\n\n assert interval_2_trend == {\n \"is_cached\": False,\n \"last_refresh\": \"2021-09-20T16:00:00Z\",\n \"next\": None,\n \"timezone\": \"UTC\",\n \"result\": [\n {\n \"action\": ANY,\n \"label\": \"$pageview\",\n \"count\": 5,\n \"data\": [2.0, 1, 2.0],\n \"labels\": [\"1-Sep-2021\", \"2-Sep-2021\", \"3-Sep-2021\"],\n \"days\": [\"2021-09-01\", \"2021-09-02\", \"2021-09-03\"],\n \"persons_urls\": ANY,\n \"filter\": ANY,\n }\n ],\n }\n\n interval_1_trend = get_trends_ok(\n client,\n team=team,\n request=TrendsRequest(\n date_from=\"2021-09-01\",\n date_to=\"2021-09-03\",\n interval=\"day\",\n insight=\"TRENDS\",\n display=\"ActionsLineGraph\",\n smoothing_intervals=1,\n events=[\n {\n \"id\": \"$pageview\",\n \"name\": \"$pageview\",\n \"custom_name\": None,\n \"type\": \"events\",\n \"order\": 0,\n \"properties\": [],\n }\n ],\n ),\n )\n\n assert interval_1_trend == {\n \"is_cached\": False,\n \"last_refresh\": \"2021-09-20T16:00:00Z\",\n \"next\": None,\n \"timezone\": \"UTC\",\n \"result\": [\n {\n \"action\": {\n \"id\": \"$pageview\",\n \"type\": \"events\",\n \"order\": 0,\n \"name\": \"$pageview\",\n \"custom_name\": None,\n \"math\": None,\n \"math_property\": None,\n \"math_group_type_index\": ANY,\n \"properties\": {},\n },\n \"label\": \"$pageview\",\n \"count\": 6.0,\n \"data\": [2.0, 1.0, 3.0],\n \"labels\": [\"1-Sep-2021\", \"2-Sep-2021\", \"3-Sep-2021\"],\n \"days\": [\"2021-09-01\", \"2021-09-02\", \"2021-09-03\"],\n \"persons_urls\": ANY,\n \"filter\": ANY,\n }\n ],\n }", "def test_finished_events_doesnt_appear_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2010-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2011, 1, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2012, 1, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n\r\n self.assertEqual(\r\n num_events,\r\n 0,\r\n msg=\"Убедитесь, что события в прошедшие события не показываются\",\r\n )", "def findontarget(starttime, event_list):\n for r in event_list:\n if r[0]==18 and r[1]>starttime: return r[1]\n return None", "def test_channel_messages_unlimited_pagination():\n clear()\n userOne = auth_register('[email protected]', '123abc!@#', 'First', 'User') \n randChannel = channels_create(userOne['token'], 'randChannel', True)\n for _ in range(149):\n message_send(userOne['token'], randChannel['channel_id'], 'Hello')\n messages = channel_messages(userOne['token'], randChannel['channel_id'], 0)\n assert(messages['start'] == 0)\n assert(messages['end'] == 50) \n messages2 = channel_messages(userOne['token'], randChannel['channel_id'], 50)\n assert(messages2['start'] == 50)\n assert(messages2['end'] == 100) \n messages3 = channel_messages(userOne['token'], randChannel['channel_id'], 100)\n assert(messages3['start'] == 100)\n assert(messages3['end'] == -1) \n assert(len(messages3['messages']) == 49)\n # an error should be raised when start is beyond 149 messages\n with pytest.raises(InputError): \n channel_messages(userOne['token'], randChannel['channel_id'], 150)", "def test_get_event_logs(event_log_api_setup):\n api_response = event_log_api_setup.get_event_logs(limit=100, offset=0)\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")", "def find_consecutive_exceedences_above_threshold(\n events, n_events, mhw, joinAcrossGaps, maxGap\n):\n for ev in range(1, n_events + 1): # for each event\n event_duration = (events == ev).sum()\n if event_duration < minDuration: # is it longer than threshold?\n continue\n # extract the t where event starts and ends\n mhw[\"time_start\"].append(t[np.where(events == ev)[0][0]])\n mhw[\"time_end\"].append(t[np.where(events == ev)[0][-1]])\n\n # Link heat waves that occur before and after a short gap\n if joinAcrossGaps:\n mhw = join_events_across_gaps(maxGap, mhw)\n\n return mhw", "def _iter_events(self) -> Generator:\n response = self.client.call()\n events: list = response.json()\n\n if not events:\n return []\n\n while True:\n yield events\n last = events.pop()\n self.client.set_next_run_filter(last['@timestamp'])\n response = self.client.call()\n events = response.json()\n try:\n events.pop(0)\n assert events\n except (IndexError, AssertionError):\n LOG('empty list, breaking')\n break", "def test_next_window_time_no_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n time.sleep(4)\n collected_value = test_window_scheme.filter(self.more_than_upper_bound)\n self.assertEquals(collected_value, self.more_than_upper_bound)", "def test_list_dates(date_generated: list, end: datetime, start: datetime):\n assert min(date_generated) == start, 'test fail, the first day should be ' \\\n + str(start) +'but now is '+ str(min(date_generated))\n assert max(date_generated) == end - timedelta(days=1), 'test fail, the last day should be ' \\\n + str(end) +'but now is '+ str(max(date_generated))", "def testExactEvents(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n jobGroups = jobFactory(events_per_job=self.eventsPerJob,\n performance=self.performanceParams)\n\n assert len(jobGroups) == 1, \\\n \"ERROR: JobFactory didn't return one JobGroup.\"\n\n assert len(jobGroups[0].jobs) == 1, \\\n \"ERROR: JobFactory didn't create a single job.\"\n\n job = jobGroups[0].jobs.pop()\n\n assert job.getFiles(type=\"lfn\") == [\"/some/file/name\"], \\\n \"ERROR: Job contains unknown files.\"\n\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob, \"ERROR: Job's max events is incorrect.\")\n\n assert job[\"mask\"][\"FirstEvent\"] == 0, \\\n \"ERROR: Job's first event is incorrect.\"\n\n return" ]
[ "0.62582815", "0.62312603", "0.6084364", "0.60386384", "0.60299057", "0.5983324", "0.59013546", "0.5806697", "0.5779787", "0.5741872", "0.57410085", "0.5736532", "0.57252514", "0.5719931", "0.5711772", "0.5692633", "0.5652234", "0.5635039", "0.5623844", "0.5592603", "0.55861425", "0.55627155", "0.5552", "0.5535317", "0.5532469", "0.55091", "0.5504244", "0.550228", "0.54944557", "0.5481902" ]
0.6490504
0
Testing list of reported events with a filter of room
def test_filter_room(self) -> None: channel = self.make_request( "GET", self.url + "?room_id=%s" % self.room_id1, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 10) self.assertEqual(len(channel.json_body["event_reports"]), 10) self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["event_reports"]) for report in channel.json_body["event_reports"]: self.assertEqual(report["room_id"], self.room_id1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_filter_user_and_room(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?user_id=%s&room_id=%s\" % (self.other_user, self.room_id1),\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 5)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertNotIn(\"next_token\", channel.json_body)\n self._check_fields(channel.json_body[\"event_reports\"])\n\n for report in channel.json_body[\"event_reports\"]:\n self.assertEqual(report[\"user_id\"], self.other_user)\n self.assertEqual(report[\"room_id\"], self.room_id1)", "def test_query_events(self):\n query_list = {\n 'q': 'test',\n 'type': 'show'\n }\n results = query_events(query_list)\n events = list(results['events'])\n showcase = list(results['showcase_events'])\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in showcase)\n self.assertFalse(self.event_film in events)", "def test_mentor_can_list_available_events_in_his_city(self):\r\n\r\n city = CityFactory(name=\"Вермонт\")\r\n other_city = ViewAfishaTests.city\r\n user = UserFactory(profile__city=city)\r\n client = self.return_authorized_user_client(user)\r\n EventFactory.create_batch(10, city=city)\r\n EventFactory.create_batch(100, city=other_city)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n results = response_data.get(\"results\")\r\n\r\n self.assertEqual(\r\n len(results),\r\n 10,\r\n msg=(\r\n \"Проверьте что пользователь видит все доступные события \"\r\n \"в городе\"\r\n ),\r\n )", "def test_started_but_not_finished_event_appears_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2020-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2020, 2, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2020, 12, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n with freeze_time(\"2020-05-01\"):\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=(\r\n \"Убедитесь, что начавшееся, но не \"\r\n \"закончившееся событие показывается в списке.\"\r\n ),\r\n )", "def get_events(room, tipo, date):\n if(type(tipo) is str):\n tipo = tipo.upper()\n target_events = []\n events = room['events']\n \n for event in events:\n if (tipo == None and date == None):\n target_events.append(format_event(event))\n elif event['type'] == tipo and date == None:\n target_events.append(format_event(event))\n elif tipo == None and datetime.strptime(event['day'], \"%d/%m/%Y\") == date:\n target_events.append(format_event(event))\n elif event['type'] == tipo and datetime.strptime(event['day'], \"%d/%m/%Y\") == date:\n target_events.append(format_event(event))\n \n room['events'] = target_events", "def test_otoroshi_controllers_adminapi_analytics_controller_filterable_events(self):\n pass", "def test_finished_events_doesnt_appear_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2010-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2011, 1, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2012, 1, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n\r\n self.assertEqual(\r\n num_events,\r\n 0,\r\n msg=\"Убедитесь, что события в прошедшие события не показываются\",\r\n )", "def test_query_events_by_type(self):\n events = list(query_events_by_type(Event.objects.all(), 'show'))\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in events)\n self.assertFalse(self.event_film in events)\n events = list(query_events_by_type(Event.objects.all(), 'film'))\n self.assertFalse(self.event_show1 in events)\n self.assertFalse(self.event_show2 in events)\n self.assertTrue(self.event_film in events)", "def test_query_events(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events,\n \"Events do not exist for the date range\"\n )", "def test_query_events_by_text_search(self):\n events = list(query_events_by_text_search(Event.objects.all(), 'Film'))\n self.assertTrue(self.event_film in events)\n self.assertFalse(self.event_show1 in events)", "def test_get_future_events(self):\n events = list(get_future_events())\n self.assertFalse(self.event_show1 in events)\n self.assertTrue(self.event_show2 in events)", "def test_events_list(self):\n response = self.client.get(url_for(\n 'issues.eventsresourse',\n issue_number=self.TARGET_ISSUE_NUMBER))\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.json)", "def test_query_events_by_last_date(self):\n events = list(query_events_by_last_date(Event.objects.all(), timezone.now()))\n self.assertFalse(self.event_show2 in events)\n self.assertTrue(self.event_show1 in events)", "def get_some_events(cls, field, filter):\n try:\n events = list(events_coll.find({field: filter}))\n events_list = []\n if events is not None:\n for event in events:\n one_event = cls(**event)\n events_list.append(one_event)\n return events_list\n except Exception as e:\n print(e)", "def test_get_events(self):\n events = gracedb.events()\n for event in events:\n self.assertTrue('graceid' in event)\n break", "def test_query_events_by_first_date(self):\n events = list(query_events_by_first_date(Event.objects.all(), timezone.now()))\n self.assertTrue(self.event_show2 in events)\n self.assertFalse(self.event_show1 in events)", "def filter_events_for_clients(self, user_tuples, events, event_id_to_state):\n forgotten = yield defer.gatherResults([\n self.store.who_forgot_in_room(\n room_id,\n )\n for room_id in frozenset(e.room_id for e in events)\n ], consumeErrors=True)\n\n # Set of membership event_ids that have been forgotten\n event_id_forgotten = frozenset(\n row[\"event_id\"] for rows in forgotten for row in rows\n )\n\n def allowed(event, user_id, is_peeking):\n state = event_id_to_state[event.event_id]\n\n # get the room_visibility at the time of the event.\n visibility_event = state.get((EventTypes.RoomHistoryVisibility, \"\"), None)\n if visibility_event:\n visibility = visibility_event.content.get(\"history_visibility\", \"shared\")\n else:\n visibility = \"shared\"\n\n if visibility not in VISIBILITY_PRIORITY:\n visibility = \"shared\"\n\n # if it was world_readable, it's easy: everyone can read it\n if visibility == \"world_readable\":\n return True\n\n # Always allow history visibility events on boundaries. This is done\n # by setting the effective visibility to the least restrictive\n # of the old vs new.\n if event.type == EventTypes.RoomHistoryVisibility:\n prev_content = event.unsigned.get(\"prev_content\", {})\n prev_visibility = prev_content.get(\"history_visibility\", None)\n\n if prev_visibility not in VISIBILITY_PRIORITY:\n prev_visibility = \"shared\"\n\n new_priority = VISIBILITY_PRIORITY.index(visibility)\n old_priority = VISIBILITY_PRIORITY.index(prev_visibility)\n if old_priority < new_priority:\n visibility = prev_visibility\n\n # get the user's membership at the time of the event. (or rather,\n # just *after* the event. Which means that people can see their\n # own join events, but not (currently) their own leave events.)\n membership_event = state.get((EventTypes.Member, user_id), None)\n if membership_event:\n if membership_event.event_id in event_id_forgotten:\n membership = None\n else:\n membership = membership_event.membership\n else:\n membership = None\n\n # if the user was a member of the room at the time of the event,\n # they can see it.\n if membership == Membership.JOIN:\n return True\n\n if visibility == \"joined\":\n # we weren't a member at the time of the event, so we can't\n # see this event.\n return False\n\n elif visibility == \"invited\":\n # user can also see the event if they were *invited* at the time\n # of the event.\n return membership == Membership.INVITE\n\n else:\n # visibility is shared: user can also see the event if they have\n # become a member since the event\n #\n # XXX: if the user has subsequently joined and then left again,\n # ideally we would share history up to the point they left. But\n # we don't know when they left.\n return not is_peeking\n\n defer.returnValue({\n user_id: [\n event\n for event in events\n if allowed(event, user_id, is_peeking)\n ]\n for user_id, is_peeking in user_tuples\n })", "def test_filter_data_by_race():\n data = race.filter_data_by_race(random.randint(1, 3))\n assert len(data) == 11\n assert type(data) == list\n for datum in data:\n assert type(datum) == dict", "def test_getEventsForItinerary(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n events = []\n for i in range(10):\n hh = str(i)\n events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',\n end = '2015-08-21T'+hh+':25:00.000Z',\n date = '2015-08-21T00:00:00.000Z'))\n\n rv = self.json_get('/getEventsForItinerary/bbbb', date)\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert 'Itinerary for the day not found' in str(rv.data)\n\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert '{\"events\": []}' in str(rv.data)\n\n for e in events:\n rv = self.json_post('/createEvent/alex', e)\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n for e in events:\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n assert e['start'] in str(rv.data)\n assert e['end'] in str(rv.data)", "def test_get_events(self):\n\n request_params = {\n \"token\": EVENTBRITE_API_KEY,\n \"location.latitude\": \"37.4192008972\",\n \"location.longitude\": \"-122.057403564\",\n \"location.within\": \"20mi\",\n \"sort_by\": \"date\"\n }\n url_encoded_request_params = _update_urlencode_request_params(\"103,109\", 1, request_params)\n events_list, page_count = _get_events(url_encoded_request_params)\n self.assertTrue(type(events_list) is list)\n self.assertTrue(type(page_count) is int)", "def test_filter(klass, days_agos, AnalyzedAgileTicket):\n issue_list_kwargs = []\n for i in range(1, 3): # 2 issues with 2 day lead\n kwargs = dict(\n key=\"TEST-{}\".format(i),\n committed=dict(state=\"Committed\", entered_at=days_agos[2]),\n started=dict(state=\"Started\", entered_at=days_agos[2]),\n ended=dict(state=\"Ended\", entered_at=days_agos[0]),\n )\n issue_list_kwargs.append(kwargs)\n\n issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs]\n issue_out_of_range = AnalyzedAgileTicket(\n key=\"TEST-OOR\",\n committed=dict(state=\"Committed\", entered_at=days_agos[42]),\n started=dict(state=\"Started\", entered_at=days_agos[44]),\n ended=dict(state=\"Ended\", entered_at=days_agos[45]),\n )\n issue_list.append(issue_out_of_range)\n\n r = klass(\n title=\"Cycle Time Distribution Past 30 days\",\n start_date=days_agos[30],\n end_date=days_agos[0]\n )\n filtered_issues = r.filter_issues(issue_list)\n\n assert r.start_date > issue_out_of_range.ended['entered_at']\n assert len(filtered_issues) == 2", "def eventList(filterStr=\"\"):\n\tfilterStr = filterStr.upper()\n\tevents = [i for i in dir(cv2) if 'EVENT' in i and filterStr in i]\n\treturn events", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_events_responder(self):\n pass", "def test_filters_anonymous_with_empty_events():\n event = {}\n with pytest.raises(EventKeyError):\n filters.anonymous(event)", "def test_getEventsFromId(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n events = []\n for i in range(10):\n hh = str(i)\n events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',\n end = '2015-08-21T'+hh+':25:00.000Z',\n date = '2015-08-21T00:00:00.000Z'))\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + events[0]['start'] + events[0]['end'])\n invuid = '00000000000000000000000'\n\n for e in events:\n rv = self.json_post('/createEvent/alex', e)\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n\n rv = self.json_get('/getEventFromId/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getEventFromId/alex', {'uid': invuid})\n assert 'Event not found' in str(rv.data)\n\n for e in events:\n uid = str('alex_' + e['start'] + e['end'])\n rv = self.json_get('/getEventFromId/alex', {'uid': uid})\n assert uid in str(rv.data)\n assert e['start'] in str(rv.data)\n assert e['end'] in str(rv.data)", "def test_any(self):\n\n eventFilter = EventFilter(\"*\")\n\n # Start a session\n traceids = ['foobar']\n eventCallback = Mock()\n session = eventFilter.start(traceids, eventCallback)\n\n # The first FooEvent should be handled\n fooEvent1 = FooEvent(traceid=traceids)\n session.handle(fooEvent1)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n ])\n\n # The second FooEvent should also be handled\n fooEvent2 = FooEvent(traceid=traceids)\n session.handle(fooEvent2)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n ])\n\n # The BarEvent should also be handled\n barEvent1 = BarEvent(traceid=traceids)\n session.handle(barEvent1)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n call(barEvent1),\n ])\n\n # No more events should be added when the session is finalized\n session.finalize()\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n call(barEvent1),\n ])", "def get_timeline_events(self, req, start, stop, filters):", "def events(bot, event, *args):\n yield from _printEventList(bot, event)", "def test_filter_function_all(self):\n self.es.register_filter(lambda x: True)\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False)\n self.assertFalse(self.es.streamfilter(self.data))", "def test_get_Events(self):\n event_a = Event.objects.create(title=\"christmas party\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n event_b = Event.objects.create(title=\"Spring clean\",\n start=datetime.strptime(\"2020-04-03 09:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-04-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=7),\n recurrence_interval=0, description=\"get the church clean\", website_publish=True)\n event_b.invites.add(self.comms_grp)\n event_b.save()\n client = APIClient()\n resp = client.get('/api/events')\n self.assertEqual(resp.status_code, 200)\n events = Event.objects.all()\n self.assertEqual(events[0].title, json.loads(resp.content)[1]['title'])\n self.assertEqual(events[1].title, json.loads(resp.content)[0]['title'])" ]
[ "0.70420647", "0.65640336", "0.6479507", "0.6427254", "0.63308644", "0.61916304", "0.61171573", "0.60622156", "0.5943582", "0.5896896", "0.57922286", "0.5764191", "0.57424766", "0.5734207", "0.5713547", "0.56532145", "0.5650365", "0.5642602", "0.56422746", "0.562086", "0.5620261", "0.5606925", "0.5590376", "0.5543336", "0.55409014", "0.5535076", "0.553402", "0.55246335", "0.552307", "0.5522973" ]
0.7759454
0
Testing list of reported events with a filter of user
def test_filter_user(self) -> None: channel = self.make_request( "GET", self.url + "?user_id=%s" % self.other_user, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 10) self.assertEqual(len(channel.json_body["event_reports"]), 10) self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["event_reports"]) for report in channel.json_body["event_reports"]: self.assertEqual(report["user_id"], self.other_user)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_filter_user_and_room(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?user_id=%s&room_id=%s\" % (self.other_user, self.room_id1),\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 5)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertNotIn(\"next_token\", channel.json_body)\n self._check_fields(channel.json_body[\"event_reports\"])\n\n for report in channel.json_body[\"event_reports\"]:\n self.assertEqual(report[\"user_id\"], self.other_user)\n self.assertEqual(report[\"room_id\"], self.room_id1)", "def test_otoroshi_controllers_adminapi_analytics_controller_filterable_events(self):\n pass", "def test_query_events(self):\n query_list = {\n 'q': 'test',\n 'type': 'show'\n }\n results = query_events(query_list)\n events = list(results['events'])\n showcase = list(results['showcase_events'])\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in showcase)\n self.assertFalse(self.event_film in events)", "def _filter_events_for_client(self, user_id, events, is_peeking=False):\n types = (\n (EventTypes.RoomHistoryVisibility, \"\"),\n (EventTypes.Member, user_id),\n )\n event_id_to_state = yield self.store.get_state_for_events(\n frozenset(e.event_id for e in events),\n types=types\n )\n res = yield self.filter_events_for_clients(\n [(user_id, is_peeking)], events, event_id_to_state\n )\n defer.returnValue(res.get(user_id, []))", "def test_filters_anonymous_filtering():\n event = {\"username\": \"john\"}\n anonymous_event = {\"username\": \"\"}\n assert filters.anonymous(event) == event\n assert filters.anonymous(anonymous_event) is None", "def filter_events_for_clients(self, user_tuples, events, event_id_to_state):\n forgotten = yield defer.gatherResults([\n self.store.who_forgot_in_room(\n room_id,\n )\n for room_id in frozenset(e.room_id for e in events)\n ], consumeErrors=True)\n\n # Set of membership event_ids that have been forgotten\n event_id_forgotten = frozenset(\n row[\"event_id\"] for rows in forgotten for row in rows\n )\n\n def allowed(event, user_id, is_peeking):\n state = event_id_to_state[event.event_id]\n\n # get the room_visibility at the time of the event.\n visibility_event = state.get((EventTypes.RoomHistoryVisibility, \"\"), None)\n if visibility_event:\n visibility = visibility_event.content.get(\"history_visibility\", \"shared\")\n else:\n visibility = \"shared\"\n\n if visibility not in VISIBILITY_PRIORITY:\n visibility = \"shared\"\n\n # if it was world_readable, it's easy: everyone can read it\n if visibility == \"world_readable\":\n return True\n\n # Always allow history visibility events on boundaries. This is done\n # by setting the effective visibility to the least restrictive\n # of the old vs new.\n if event.type == EventTypes.RoomHistoryVisibility:\n prev_content = event.unsigned.get(\"prev_content\", {})\n prev_visibility = prev_content.get(\"history_visibility\", None)\n\n if prev_visibility not in VISIBILITY_PRIORITY:\n prev_visibility = \"shared\"\n\n new_priority = VISIBILITY_PRIORITY.index(visibility)\n old_priority = VISIBILITY_PRIORITY.index(prev_visibility)\n if old_priority < new_priority:\n visibility = prev_visibility\n\n # get the user's membership at the time of the event. (or rather,\n # just *after* the event. Which means that people can see their\n # own join events, but not (currently) their own leave events.)\n membership_event = state.get((EventTypes.Member, user_id), None)\n if membership_event:\n if membership_event.event_id in event_id_forgotten:\n membership = None\n else:\n membership = membership_event.membership\n else:\n membership = None\n\n # if the user was a member of the room at the time of the event,\n # they can see it.\n if membership == Membership.JOIN:\n return True\n\n if visibility == \"joined\":\n # we weren't a member at the time of the event, so we can't\n # see this event.\n return False\n\n elif visibility == \"invited\":\n # user can also see the event if they were *invited* at the time\n # of the event.\n return membership == Membership.INVITE\n\n else:\n # visibility is shared: user can also see the event if they have\n # become a member since the event\n #\n # XXX: if the user has subsequently joined and then left again,\n # ideally we would share history up to the point they left. But\n # we don't know when they left.\n return not is_peeking\n\n defer.returnValue({\n user_id: [\n event\n for event in events\n if allowed(event, user_id, is_peeking)\n ]\n for user_id, is_peeking in user_tuples\n })", "def test_started_but_not_finished_event_appears_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2020-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2020, 2, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2020, 12, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n with freeze_time(\"2020-05-01\"):\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=(\r\n \"Убедитесь, что начавшееся, но не \"\r\n \"закончившееся событие показывается в списке.\"\r\n ),\r\n )", "def test_get_future_events(self):\n events = list(get_future_events())\n self.assertFalse(self.event_show1 in events)\n self.assertTrue(self.event_show2 in events)", "def test_query_events_by_last_date(self):\n events = list(query_events_by_last_date(Event.objects.all(), timezone.now()))\n self.assertFalse(self.event_show2 in events)\n self.assertTrue(self.event_show1 in events)", "def test_query_events(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events,\n \"Events do not exist for the date range\"\n )", "def test_query_events_by_first_date(self):\n events = list(query_events_by_first_date(Event.objects.all(), timezone.now()))\n self.assertTrue(self.event_show2 in events)\n self.assertFalse(self.event_show1 in events)", "def test_get_users_eligible_for_fist_notification(self):\n # Given:\n self.batch_setup()\n # When:\n _datetime_24_months_ago = datetime.utcnow() - timedelta(days=750)\n criteria = {\"last_login_date\": _datetime_24_months_ago}\n criteria_one = {\"account_creation_date\": _datetime_24_months_ago}\n self.update_test_data(self.user_0, criteria)\n self.update_test_data(self.user_2, criteria)\n self.update_test_data(self.user_1, criteria_one)\n self.update_test_data(self.user_3, criteria_one)\n response = self.client.get(\"/api/batch/account/users/eligible-for-first-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(4, len(users))\n self.assertIn(self.user_0, users)\n self.assertIn(self.user_2, users)\n self.assertIn(self.user_1, users)\n self.assertIn(self.user_3, users)", "def test_mentor_can_list_available_events_in_his_city(self):\r\n\r\n city = CityFactory(name=\"Вермонт\")\r\n other_city = ViewAfishaTests.city\r\n user = UserFactory(profile__city=city)\r\n client = self.return_authorized_user_client(user)\r\n EventFactory.create_batch(10, city=city)\r\n EventFactory.create_batch(100, city=other_city)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n results = response_data.get(\"results\")\r\n\r\n self.assertEqual(\r\n len(results),\r\n 10,\r\n msg=(\r\n \"Проверьте что пользователь видит все доступные события \"\r\n \"в городе\"\r\n ),\r\n )", "def test_filters_anonymous_with_empty_events():\n event = {}\n with pytest.raises(EventKeyError):\n filters.anonymous(event)", "def test_finished_events_doesnt_appear_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2010-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2011, 1, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2012, 1, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n\r\n self.assertEqual(\r\n num_events,\r\n 0,\r\n msg=\"Убедитесь, что события в прошедшие события не показываются\",\r\n )", "def tempfeeder_exp_nonzerotest_users():\n\n return [ user for user in tempfeeder_exp().user_ids if all(tempfeeder_exp()[user]['Load']['2005-10-01 00:00':]) ]", "def test_apply_filter_multiple(app):\n with app.app_context():\n filters = [{'column': 'id', 'type': 'geq',\n 'value': '1'}, {'column': 'last_seen', 'type': 'leq',\n 'value': 121212121}]\n users = User.query\n for filter_ in filters:\n users = apply_filter(users, User, filter_)\n\n assert str(users.whereclause) == \\\n 'users.id >= :id_1 AND users.last_seen <= :last_seen_1'", "def test_filed_of_study_filter(self):\n # A Job in the database\n job_filter = {\"field_of_study\" : \"computer ENGINEERING\"}\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users with given filter\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"filter\":json.dumps(job_filter)})\n search_result = json.loads(resp.content)\n for user in search_result:\n self.assertIn(job_filter.title(),user['field_of_study'],\"Field of study filter doesn't run correctly\")", "def test_query_events_by_type(self):\n events = list(query_events_by_type(Event.objects.all(), 'show'))\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in events)\n self.assertFalse(self.event_film in events)\n events = list(query_events_by_type(Event.objects.all(), 'film'))\n self.assertFalse(self.event_show1 in events)\n self.assertFalse(self.event_show2 in events)\n self.assertTrue(self.event_film in events)", "async def test_filtered_denylist(hass, mock_client):\n handler_method = await _setup(\n hass,\n {\n \"include_entities\": [\"fake.included\", \"test.excluded_test\"],\n \"exclude_domains\": [\"fake\"],\n \"exclude_entity_globs\": [\"*.excluded_*\"],\n \"exclude_entities\": [\"not_real.excluded\"],\n },\n )\n\n tests = [\n FilterTest(\"fake.excluded\", False),\n FilterTest(\"fake.included\", True),\n FilterTest(\"alt_fake.excluded_test\", False),\n FilterTest(\"test.excluded_test\", True),\n FilterTest(\"not_real.excluded\", False),\n FilterTest(\"not_real.included\", True),\n ]\n\n for test in tests:\n event = make_event(test.id)\n handler_method(event)\n\n was_called = mock_client.labels.call_count == 1\n assert test.should_pass == was_called\n mock_client.labels.reset_mock()", "def get_group_restricted_events(user, all_events=False):\n types_allowed = get_types_allowed(user)\n\n if all_events:\n return Event.objects.filter(event_type__in=types_allowed)\n else:\n return Event.objects.filter(attendance_event__isnull=False, event_type__in=types_allowed)", "def test_list_filtering(self):\n # Test the \"all\" response.\n url = '/api/users/?all=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.contract_user.email)\n self.assertContains(response, self.del_user.email)\n self.assertContains(response, self.shared.email)\n # Test filtering by ad_deleted.\n url = '/api/users/?ad_deleted=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.del_user.email)\n self.assertNotContains(response, self.user1.email)\n url = '/api/users/?ad_deleted=false'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertNotContains(response, self.del_user.email)\n self.assertContains(response, self.user1.email)\n # Test filtering by email (should return only one object).\n url = '/api/users/?email={}'.format(self.user1.email)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n j = response.json()\n self.assertEqual(len(j['objects']), 1)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)\n # Test filtering by GUID (should return only one object).\n url = '/api/users/?ad_guid={}'.format(self.user1.ad_guid)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n j = response.json()\n self.assertEqual(len(j['objects']), 1)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)\n # Test filtering by cost centre (should return all, inc. inactive and contractors).\n url = '/api/users/?cost_centre={}'.format(self.cc2.code)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.user2.email)\n self.assertContains(response, self.contract_user.email)\n self.assertContains(response, self.del_user.email)\n self.assertNotContains(response, self.user1.email)\n self.assertNotContains(response, self.shared.email) # Belongs to CC1.\n # Test filtering by O365 licence status.\n self.user1.o365_licence = True\n self.user1.save()\n url = '/api/users/?o365_licence=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)", "def test_module(client, first_fetch_time, event_type_filter):\n since_time, _ = parse_date_range(first_fetch_time, date_format=DATE_FORMAT, utc=True)\n client.get_events(since_time=since_time, event_type_filter=event_type_filter)\n\n # test was successful\n return 'ok'", "def test_filter_4(self):\n\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'location-includes': 'i',\n 'following': 'K', 'follower': 'Ken'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = []\n self.assertEqual(actual, expected)", "def get_group_restricted_events(user):\n types_allowed = []\n\n groups = user.groups.all()\n\n if reduce(lambda r, g: g.name in ['Hovedstyret', 'dotKom'] or r, groups, False):\n return Event.objects.filter(attendance_event__isnull=False)\n\n for group in groups:\n if group.name == 'arrKom':\n types_allowed.append(1) # sosialt\n types_allowed.append(4) # utflukt\n\n if group.name == 'bedKom':\n types_allowed.append(2) # bedriftspresentasjon\n\n if group.name == 'fagKom':\n types_allowed.append(3) # kurs\n\n return Event.objects.filter(attendance_event__isnull=False, event_type__in=types_allowed)", "def test_multi_attrib_and(self):\n\n eventFilter = EventFilter(\"FooEvent[a=He,b=Lo]\")\n\n # Start a session\n traceids = ['foobar']\n eventCallback = Mock()\n session = eventFilter.start(traceids, eventCallback)\n\n # The first FooEvent should not be handled\n fooEvent1 = FooEvent(a=\"He\", b=\"Zo\", traceid=traceids)\n session.handle(fooEvent1)\n self.assertEqual(eventCallback.mock_calls, [\n ])\n\n # The second FooEvent should be handled\n fooEvent2 = FooEvent(a=\"He\", b=\"Lo\", traceid=traceids)\n session.handle(fooEvent2)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent2),\n ])\n\n # The BarEvent should not be handled\n barEvent = BarEvent(traceid=traceids)\n session.handle(barEvent)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent2),\n ])\n\n # No more events should be added when the session is finalized\n session.finalize()\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent2),\n ])", "def test_get_users_eligible_for_second_notification(self):\n # Given:\n self.batch_setup()\n # When:\n _datetime_30_months_ago = datetime.utcnow() - timedelta(days=1064)\n criteria = {\"last_login_date\": _datetime_30_months_ago}\n criteria_one = {\"account_creation_date\": _datetime_30_months_ago}\n self.update_test_data(self.user_0, criteria)\n self.update_test_data(self.user_2, criteria)\n self.update_test_data(self.user_1, criteria_one)\n self.update_test_data(self.user_3, criteria_one)\n response = self.client.get(\"/api/batch/account/users/eligible-for-second-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(4, len(users))\n self.assertIn(self.user_0, users)\n self.assertIn(self.user_2, users)\n self.assertIn(self.user_1, users)\n self.assertIn(self.user_3, users)", "def test_listing_from_wall_when_blocked_some_users(self):", "def test_get_users_ten_filter_ts_created_at(app, add_ten_users):\n with app.app_context():\n add_ten_users()\n filters = [dict(column='id', type='geq', value=5)]\n users = get_entities(User, 2, 3, filters,\n dict(column='ts_created_at', dir='desc'))\n assert str(users.query._order_by[0]) == 'users.created_at DESC'\n assert len(users.items) == 3\n assert users.total == 6", "def test_query_events_by_text_search(self):\n events = list(query_events_by_text_search(Event.objects.all(), 'Film'))\n self.assertTrue(self.event_film in events)\n self.assertFalse(self.event_show1 in events)" ]
[ "0.6936873", "0.6494265", "0.6479182", "0.64130163", "0.63875455", "0.6339563", "0.6235167", "0.610124", "0.60740095", "0.60681087", "0.603701", "0.6025905", "0.60069674", "0.5936405", "0.59047425", "0.58318835", "0.58245337", "0.58144695", "0.5805334", "0.5790437", "0.5780632", "0.57350206", "0.5733748", "0.5732555", "0.5722192", "0.57205015", "0.57191944", "0.5687497", "0.56782895", "0.5676656" ]
0.73923767
0
Testing list of reported events with a filter of user and room
def test_filter_user_and_room(self) -> None: channel = self.make_request( "GET", self.url + "?user_id=%s&room_id=%s" % (self.other_user, self.room_id1), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 5) self.assertEqual(len(channel.json_body["event_reports"]), 5) self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["event_reports"]) for report in channel.json_body["event_reports"]: self.assertEqual(report["user_id"], self.other_user) self.assertEqual(report["room_id"], self.room_id1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_filter_room(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?room_id=%s\" % self.room_id1,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 10)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 10)\n self.assertNotIn(\"next_token\", channel.json_body)\n self._check_fields(channel.json_body[\"event_reports\"])\n\n for report in channel.json_body[\"event_reports\"]:\n self.assertEqual(report[\"room_id\"], self.room_id1)", "def filter_events_for_clients(self, user_tuples, events, event_id_to_state):\n forgotten = yield defer.gatherResults([\n self.store.who_forgot_in_room(\n room_id,\n )\n for room_id in frozenset(e.room_id for e in events)\n ], consumeErrors=True)\n\n # Set of membership event_ids that have been forgotten\n event_id_forgotten = frozenset(\n row[\"event_id\"] for rows in forgotten for row in rows\n )\n\n def allowed(event, user_id, is_peeking):\n state = event_id_to_state[event.event_id]\n\n # get the room_visibility at the time of the event.\n visibility_event = state.get((EventTypes.RoomHistoryVisibility, \"\"), None)\n if visibility_event:\n visibility = visibility_event.content.get(\"history_visibility\", \"shared\")\n else:\n visibility = \"shared\"\n\n if visibility not in VISIBILITY_PRIORITY:\n visibility = \"shared\"\n\n # if it was world_readable, it's easy: everyone can read it\n if visibility == \"world_readable\":\n return True\n\n # Always allow history visibility events on boundaries. This is done\n # by setting the effective visibility to the least restrictive\n # of the old vs new.\n if event.type == EventTypes.RoomHistoryVisibility:\n prev_content = event.unsigned.get(\"prev_content\", {})\n prev_visibility = prev_content.get(\"history_visibility\", None)\n\n if prev_visibility not in VISIBILITY_PRIORITY:\n prev_visibility = \"shared\"\n\n new_priority = VISIBILITY_PRIORITY.index(visibility)\n old_priority = VISIBILITY_PRIORITY.index(prev_visibility)\n if old_priority < new_priority:\n visibility = prev_visibility\n\n # get the user's membership at the time of the event. (or rather,\n # just *after* the event. Which means that people can see their\n # own join events, but not (currently) their own leave events.)\n membership_event = state.get((EventTypes.Member, user_id), None)\n if membership_event:\n if membership_event.event_id in event_id_forgotten:\n membership = None\n else:\n membership = membership_event.membership\n else:\n membership = None\n\n # if the user was a member of the room at the time of the event,\n # they can see it.\n if membership == Membership.JOIN:\n return True\n\n if visibility == \"joined\":\n # we weren't a member at the time of the event, so we can't\n # see this event.\n return False\n\n elif visibility == \"invited\":\n # user can also see the event if they were *invited* at the time\n # of the event.\n return membership == Membership.INVITE\n\n else:\n # visibility is shared: user can also see the event if they have\n # become a member since the event\n #\n # XXX: if the user has subsequently joined and then left again,\n # ideally we would share history up to the point they left. But\n # we don't know when they left.\n return not is_peeking\n\n defer.returnValue({\n user_id: [\n event\n for event in events\n if allowed(event, user_id, is_peeking)\n ]\n for user_id, is_peeking in user_tuples\n })", "def test_mentor_can_list_available_events_in_his_city(self):\r\n\r\n city = CityFactory(name=\"Вермонт\")\r\n other_city = ViewAfishaTests.city\r\n user = UserFactory(profile__city=city)\r\n client = self.return_authorized_user_client(user)\r\n EventFactory.create_batch(10, city=city)\r\n EventFactory.create_batch(100, city=other_city)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n results = response_data.get(\"results\")\r\n\r\n self.assertEqual(\r\n len(results),\r\n 10,\r\n msg=(\r\n \"Проверьте что пользователь видит все доступные события \"\r\n \"в городе\"\r\n ),\r\n )", "def test_filter_user(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?user_id=%s\" % self.other_user,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 10)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 10)\n self.assertNotIn(\"next_token\", channel.json_body)\n self._check_fields(channel.json_body[\"event_reports\"])\n\n for report in channel.json_body[\"event_reports\"]:\n self.assertEqual(report[\"user_id\"], self.other_user)", "def get_events(room, tipo, date):\n if(type(tipo) is str):\n tipo = tipo.upper()\n target_events = []\n events = room['events']\n \n for event in events:\n if (tipo == None and date == None):\n target_events.append(format_event(event))\n elif event['type'] == tipo and date == None:\n target_events.append(format_event(event))\n elif tipo == None and datetime.strptime(event['day'], \"%d/%m/%Y\") == date:\n target_events.append(format_event(event))\n elif event['type'] == tipo and datetime.strptime(event['day'], \"%d/%m/%Y\") == date:\n target_events.append(format_event(event))\n \n room['events'] = target_events", "def test_query_events(self):\n query_list = {\n 'q': 'test',\n 'type': 'show'\n }\n results = query_events(query_list)\n events = list(results['events'])\n showcase = list(results['showcase_events'])\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in showcase)\n self.assertFalse(self.event_film in events)", "def test_started_but_not_finished_event_appears_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2020-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2020, 2, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2020, 12, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n with freeze_time(\"2020-05-01\"):\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=(\r\n \"Убедитесь, что начавшееся, но не \"\r\n \"закончившееся событие показывается в списке.\"\r\n ),\r\n )", "def test_query_events(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events,\n \"Events do not exist for the date range\"\n )", "def _filter_events_for_client(self, user_id, events, is_peeking=False):\n types = (\n (EventTypes.RoomHistoryVisibility, \"\"),\n (EventTypes.Member, user_id),\n )\n event_id_to_state = yield self.store.get_state_for_events(\n frozenset(e.event_id for e in events),\n types=types\n )\n res = yield self.filter_events_for_clients(\n [(user_id, is_peeking)], events, event_id_to_state\n )\n defer.returnValue(res.get(user_id, []))", "def _create_event_and_report_without_parameters(\n self, room_id: str, user_tok: str\n ) -> None:\n resp = self.helper.send(room_id, tok=user_tok)\n event_id = resp[\"event_id\"]\n\n channel = self.make_request(\n \"POST\",\n \"rooms/%s/report/%s\" % (room_id, event_id),\n {},\n access_token=user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)", "def test_otoroshi_controllers_adminapi_analytics_controller_filterable_events(self):\n pass", "def test_finished_events_doesnt_appear_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2010-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2011, 1, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2012, 1, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n\r\n self.assertEqual(\r\n num_events,\r\n 0,\r\n msg=\"Убедитесь, что события в прошедшие события не показываются\",\r\n )", "def _create_event_and_report(self, room_id: str, user_tok: str) -> None:\n resp = self.helper.send(room_id, tok=user_tok)\n event_id = resp[\"event_id\"]\n\n channel = self.make_request(\n \"POST\",\n \"rooms/%s/report/%s\" % (room_id, event_id),\n {\"score\": -100, \"reason\": \"this makes me sad\"},\n access_token=user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)", "def _create_event_and_report(self, room_id: str, user_tok: str) -> None:\n resp = self.helper.send(room_id, tok=user_tok)\n event_id = resp[\"event_id\"]\n\n channel = self.make_request(\n \"POST\",\n \"rooms/%s/report/%s\" % (room_id, event_id),\n {\"score\": -100, \"reason\": \"this makes me sad\"},\n access_token=user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)", "def test_query_events_by_type(self):\n events = list(query_events_by_type(Event.objects.all(), 'show'))\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in events)\n self.assertFalse(self.event_film in events)\n events = list(query_events_by_type(Event.objects.all(), 'film'))\n self.assertFalse(self.event_show1 in events)\n self.assertFalse(self.event_show2 in events)\n self.assertTrue(self.event_film in events)", "def test_mentor_sees_events_in_the_his_own_city_only(self):\r\n\r\n other_city = CityFactory()\r\n user = ViewAfishaTests.mentor\r\n user_other_city = UserFactory(profile__city=other_city)\r\n EventFactory.create_batch(10, city=other_city)\r\n\r\n client_user = self.return_authorized_user_client(user)\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n count = response_data.get(\"count\")\r\n\r\n self.assertEqual(\r\n count,\r\n 0,\r\n msg=(\r\n \"Убедитесь, что пользователю не возвращаются мероприятия \"\r\n \"в других городах.\"\r\n ),\r\n )\r\n\r\n client_other_user = self.return_authorized_user_client(user_other_city)\r\n response_data = client_other_user.get(EVENTS_URL, format=\"json\").data\r\n count = response_data.get(\"count\")\r\n\r\n self.assertEqual(\r\n count,\r\n 10,\r\n msg=(\r\n \"Убедитесь, что пользователю показывается мероприятие в его \"\r\n \"городе.\"\r\n ),\r\n )", "def test_query_events_by_last_date(self):\n events = list(query_events_by_last_date(Event.objects.all(), timezone.now()))\n self.assertFalse(self.event_show2 in events)\n self.assertTrue(self.event_show1 in events)", "def test_getEventsForItinerary(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n events = []\n for i in range(10):\n hh = str(i)\n events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',\n end = '2015-08-21T'+hh+':25:00.000Z',\n date = '2015-08-21T00:00:00.000Z'))\n\n rv = self.json_get('/getEventsForItinerary/bbbb', date)\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert 'Itinerary for the day not found' in str(rv.data)\n\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert '{\"events\": []}' in str(rv.data)\n\n for e in events:\n rv = self.json_post('/createEvent/alex', e)\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n for e in events:\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n assert e['start'] in str(rv.data)\n assert e['end'] in str(rv.data)", "def test_getEventsFromId(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n events = []\n for i in range(10):\n hh = str(i)\n events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',\n end = '2015-08-21T'+hh+':25:00.000Z',\n date = '2015-08-21T00:00:00.000Z'))\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + events[0]['start'] + events[0]['end'])\n invuid = '00000000000000000000000'\n\n for e in events:\n rv = self.json_post('/createEvent/alex', e)\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n\n rv = self.json_get('/getEventFromId/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getEventFromId/alex', {'uid': invuid})\n assert 'Event not found' in str(rv.data)\n\n for e in events:\n uid = str('alex_' + e['start'] + e['end'])\n rv = self.json_get('/getEventFromId/alex', {'uid': uid})\n assert uid in str(rv.data)\n assert e['start'] in str(rv.data)\n assert e['end'] in str(rv.data)", "def test_query_events_by_text_search(self):\n events = list(query_events_by_text_search(Event.objects.all(), 'Film'))\n self.assertTrue(self.event_film in events)\n self.assertFalse(self.event_show1 in events)", "def test_query_events_by_first_date(self):\n events = list(query_events_by_first_date(Event.objects.all(), timezone.now()))\n self.assertTrue(self.event_show2 in events)\n self.assertFalse(self.event_show1 in events)", "def test_filters_anonymous_filtering():\n event = {\"username\": \"john\"}\n anonymous_event = {\"username\": \"\"}\n assert filters.anonymous(event) == event\n assert filters.anonymous(anonymous_event) is None", "def test_events_list(self):\n response = self.client.get(url_for(\n 'issues.eventsresourse',\n issue_number=self.TARGET_ISSUE_NUMBER))\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.json)", "def get_some_events(cls, field, filter):\n try:\n events = list(events_coll.find({field: filter}))\n events_list = []\n if events is not None:\n for event in events:\n one_event = cls(**event)\n events_list.append(one_event)\n return events_list\n except Exception as e:\n print(e)", "def test_listing_from_wall_when_blocked_some_users(self):", "def test_event_view_list(self):\n response = self.client.get('/module/event/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'frontend/appointment/event/list.html')\n\n request = self.factory.get('/module/calendar/')\n request.user = self.user\n request.session = {}\n response = event_list(request)\n self.assertEqual(response.status_code, 200)", "def get_group_restricted_events(user):\n types_allowed = []\n\n groups = user.groups.all()\n\n if reduce(lambda r, g: g.name in ['Hovedstyret', 'dotKom'] or r, groups, False):\n return Event.objects.filter(attendance_event__isnull=False)\n\n for group in groups:\n if group.name == 'arrKom':\n types_allowed.append(1) # sosialt\n types_allowed.append(4) # utflukt\n\n if group.name == 'bedKom':\n types_allowed.append(2) # bedriftspresentasjon\n\n if group.name == 'fagKom':\n types_allowed.append(3) # kurs\n\n return Event.objects.filter(attendance_event__isnull=False, event_type__in=types_allowed)", "def get_group_restricted_events(user, all_events=False):\n types_allowed = get_types_allowed(user)\n\n if all_events:\n return Event.objects.filter(event_type__in=types_allowed)\n else:\n return Event.objects.filter(attendance_event__isnull=False, event_type__in=types_allowed)", "def test_get_future_events(self):\n events = list(get_future_events())\n self.assertFalse(self.event_show1 in events)\n self.assertTrue(self.event_show2 in events)", "def events(bot, event, *args):\n yield from _printEventList(bot, event)" ]
[ "0.76308376", "0.6389203", "0.63811326", "0.6315443", "0.6304196", "0.62385494", "0.62282383", "0.60201687", "0.601349", "0.5966037", "0.59357476", "0.58643967", "0.5805025", "0.5805025", "0.5782005", "0.56589717", "0.56469536", "0.56199026", "0.56002545", "0.5592264", "0.55373585", "0.5520126", "0.5518393", "0.5475232", "0.54672265", "0.5454451", "0.54527074", "0.5435509", "0.54307497", "0.5418815" ]
0.7921824
0
Testing search order. Order by timestamps.
def test_valid_search_order(self) -> None: # fetch the most recent first, largest timestamp channel = self.make_request( "GET", self.url + "?dir=b", access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 20) report = 1 while report < len(channel.json_body["event_reports"]): self.assertGreaterEqual( channel.json_body["event_reports"][report - 1]["received_ts"], channel.json_body["event_reports"][report]["received_ts"], ) report += 1 # fetch the oldest first, smallest timestamp channel = self.make_request( "GET", self.url + "?dir=f", access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 20) report = 1 while report < len(channel.json_body["event_reports"]): self.assertLessEqual( channel.json_body["event_reports"][report - 1]["received_ts"], channel.json_body["event_reports"][report]["received_ts"], ) report += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_order(self):\n\n # issue a valid query\n # Assure proper execution, and get results from quilt_history\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'out_of_order']))\n\n o = self.check_query_and_get_results3(o)\n\n # Check results\n # assure that results are in order\n l = []\n for i in xrange(1, 6):\n searchStr = \"{'timestamp': \" + str(i) + '}'\n index = o.find(searchStr)\n logging.debug(\"looking for string: \" + searchStr)\n self.assertTrue(index != -1)\n l.append(index)\n\n isSorted = all(l[i] <= l[i + 1] for i in xrange(len(l) - 1))\n self.assertTrue(isSorted)", "def test_searchSince(self):\n self.assertTrue(\n self.server.search_SINCE(self.earlierQuery, self.seq, self.msg))\n self.assertTrue(\n self.server.search_SINCE(self.sameDateQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_SINCE(self.laterQuery, self.seq, self.msg))", "def test_searchSentOn(self):\n self.assertFalse(\n self.server.search_SENTON(self.earlierQuery, self.seq, self.msg))\n self.assertTrue(\n self.server.search_SENTON(self.sameDateQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_SENTON(self.laterQuery, self.seq, self.msg))", "def test_searchSentSince(self):\n self.assertTrue(\n self.server.search_SENTSINCE(self.earlierQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_SENTSINCE(self.laterQuery, self.seq, self.msg))", "def test_searchBefore(self):\n self.assertFalse(\n self.server.search_BEFORE(self.earlierQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_BEFORE(self.sameDateQuery, self.seq, self.msg))\n self.assertTrue(\n self.server.search_BEFORE(self.laterQuery, self.seq, self.msg))", "def test_searchOn(self):\n self.assertFalse(\n self.server.search_ON(self.earlierQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_ON(self.sameDateQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_ON(self.laterQuery, self.seq, self.msg))", "def test_searchSentBefore(self):\n self.assertFalse(\n self.server.search_SENTBEFORE(self.earlierQuery, self.seq, self.msg))\n self.assertTrue(\n self.server.search_SENTBEFORE(self.laterQuery, self.seq, self.msg))", "def order_log_results(self, log_search_order):\n raise errors.Unimplemented()", "def test_simulate_search(self):\n obj = CalculateSearchTimes(True, TIME_PERIODS,\n search_path=SEARCH_PATH, df_path=DF_PATH)\n dt1 = dt.datetime(2017, 8, 9, 14, 56, 40, 796658)\n dt2 = dt.datetime(2017, 8, 9, 17, 56, 40, 796658)\n dt3 = dt.datetime(2017, 8, 9, 10, 56, 40, 796658)\n zip_1 = 10028\n zip_2 = 10027\n zip_3 = 10008\n result1 = obj.simulate_search(zip_3, dt1)\n self.assertTrue(result1 == 1666.6666666666667)\n\n result2 = obj.simulate_search(zip_1, dt2)\n self.assertTrue(result2 == 24.0)\n\n result3 = obj.simulate_search(zip_2, dt2)\n self.assertTrue(result3 == 1666.6666666666667)\n\n result4 = obj.simulate_search(zip_2, dt3)\n self.assertTrue(result4 == 11.0)", "def order_log_entry_results(self, log_entry_search_order):\n raise errors.Unimplemented()", "def test_query_sort_default_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(sorted(data)):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))", "def test_order_by(self):\n try:\n self.init_pglist_data(self.node)\n\n print(\"Creating index 'rumidx_orderby_sent'\")\n\n self.node.safe_psql(\n \"pglist\",\n \"CREATE INDEX rumidx_orderby_sent ON pglist USING rum (\"\n \" fts rum_tsvector_timestamp_ops, sent) \"\n \" WITH (attach=sent, to=fts, order_by_attach=t)\")\n\n print(\"Running tests\")\n\n self.assertEqual(\n self.node.safe_psql(\n \"pglist\",\n \"SELECT sent, subject \"\n \" FROM pglist \"\n \" WHERE fts @@ \"\n \" to_tsquery('english', 'backend <-> crushed') \"\n \" ORDER BY sent <=| '2016-01-01 00:01' LIMIT 5\"\n ),\n b'1999-06-02 11:52:46|Re: [HACKERS] PID of backend\\n'\n )\n\n self.assertEqual(\n self.node.safe_psql(\n \"pglist\",\n \"SELECT count(*) FROM pglist \"\n \"WHERE fts @@ to_tsquery('english', 'tom & lane')\"\n ),\n b'222813\\n'\n )\n\n self.node.safe_psql(\"pglist\", \"DROP INDEX rumidx_orderby_sent\");\n\n print(\"Creating index 'pglist_rum_idx'\")\n\n self.node.safe_psql(\n \"pglist\",\n \"CREATE INDEX pglist_rum_idx ON pglist USING rum (\"\n \" fts rum_tsvector_ops)\")\n\n print(\"Running tests\")\n\n self.assertEqual(\n self.node.execute(\n \"pglist\",\n \"SELECT id FROM pglist \"\n \"WHERE fts @@ to_tsquery('english', 'postgres:*') \"\n \"ORDER BY fts <=> to_tsquery('english', 'postgres:*') \"\n \"LIMIT 9\"\n )[0][0],\n 816114\n )\n\n # Autovacuum after large update, with active RUM index crashes postgres\n print(\"Test Issue #19\")\n\n self.node.safe_psql(\n \"pglist\",\n \"DELETE FROM pglist WHERE id < 100000\")\n self.node.safe_psql(\n \"pglist\",\n \"vacuum\")\n\n self.node.safe_psql(\"pglist\", \"DROP INDEX pglist_rum_idx\");\n\n except Exception as e:\n self.printlog(os.path.join(self.node.logs_dir, \"postgresql.log\"))\n raise e", "def _topological_sort_timestamp_index(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def batch_uses_proper_timestamp_test(self):\n session = self.prepare()\n session.execute(\"\"\"\n BEGIN BATCH USING TIMESTAMP 1111111111111111\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")\n rows = session.execute(\"SELECT id, writetime(firstname), writetime(lastname) FROM users\")\n res = sorted(rows)\n assert [list(res[0]), list(res[1])] == [[0, 1111111111111111, 1111111111111111], [1, 1111111111111111, 1111111111111111]], res", "def test_ordering(post_factory):\n now = timezone.now()\n\n p1 = post_factory(published=now - datetime.timedelta(hours=1))\n p2 = post_factory(published=now + datetime.timedelta(hours=1))\n p3 = post_factory(published=now)\n\n assert list(models.Post.objects.all()) == [p2, p3, p1]", "def testSortedNotes(self):\n for simple_score in self.simple_scores.values():\n notes = simple_score.sorted_notes\n assert all(notes[i].start_time <= notes[i + 1].start_time\n for i in range(len(notes) - 1))", "def test_search(self):\n d = self._search()\n self._response([2, 5, 10])\n self.assertEqual(self.successResultOf(d), [2, 5, 10])", "def test_query_sort_nondefault_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\",\n sort_order=\"desc\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(reversed(sorted(data))):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))", "def test_query_params_date(session, params, expected_number_of_hits):\n result = get_search(session, params)\n compare(result['total']['value'], expected_number_of_hits)", "def test_search(self):\n pass", "def test_search(self):\n pass", "def test_search(self):\n pass", "def test_search_two_dates(self):\n # search via 2 dates.\n self.data.search(user_date='01/01/1800', second_date='02/04/1827',\n all_names=True)\n\n test = self.data.search(user_date='5/21/2012',\n second_date='04/10/2012', first_name='Trevor',\n last_name='Harvey')\n item_date = datetime.datetime(month=4, day=19, year=2012)\n self.assertEqual(test[0].entry_date, item_date)\n\n self.data.search(user_date='03/12/0001', second_date='03/13/0001',\n all_names=True)\n return self.data.search(user_date='1/10/2013', second_date='5/21/2011',\n first_name='Trevor', last_name='Harvey')", "def test_videos_default_ordering(mocker, logged_in_apiclient):\n mocker.patch(\"ui.serializers.get_moira_client\")\n mocker.patch(\"ui.utils.get_moira_client\")\n VideoSetPagination.page_size = 5\n client, user = logged_in_apiclient\n collection = CollectionFactory(owner=user)\n VideoFactory.create_batch(10, collection=collection)\n url = reverse(\"models-api:video-list\")\n p1_response = client.get(\"{}?page=1\".format(url))\n assert len(p1_response.data[\"results\"]) == 5\n for i in range(4):\n current_video_date = p1_response.data[\"results\"][i][\"created_at\"]\n next_video_date = p1_response.data[\"results\"][i + 1][\"created_at\"]\n assert current_video_date >= next_video_date\n\n p2_response = client.get(\"{}?page=2\".format(url))\n last_entry_data = p1_response.data[\"results\"][-1][\"created_at\"]\n first_entry_data = p2_response.data[\"results\"][0][\"created_at\"]\n assert last_entry_data >= first_entry_data\n for i in range(4):\n current_video_date = p2_response.data[\"results\"][i][\"created_at\"]\n next_video_date = p2_response.data[\"results\"][i + 1][\"created_at\"]\n assert current_video_date >= next_video_date", "def testQueryWithTimestamp(self):\n for i in range(5):\n row_name = \"aff4:/row:query_with_ts\"\n data_store.DB.Set(row_name, \"metadata:5\", \"test\", timestamp=i + 10,\n replace=False, token=self.token)\n data_store.DB.Set(row_name, \"aff4:type\", \"test\", timestamp=i + 10,\n replace=False, token=self.token)\n\n # Read all timestamps.\n rows = [row for row in data_store.DB.Query(\n [], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:query_with_ts\",\n timestamp=data_store.DB.ALL_TIMESTAMPS, token=self.token)]\n attributes = rows[0]\n self.assertEqual(attributes[\"subject\"][0][0], \"aff4:/row:query_with_ts\")\n self.assertEqual(len(attributes[\"aff4:type\"]), 5)\n\n # Read latest timestamp.\n rows = [row for row in data_store.DB.Query(\n [], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:query_with_ts\",\n timestamp=data_store.DB.NEWEST_TIMESTAMP, token=self.token)]\n\n attributes = rows[0]\n self.assertEqual(attributes[\"subject\"][0][0], \"aff4:/row:query_with_ts\")\n self.assertEqual(len(attributes[\"aff4:type\"]), 1)\n self.assertEqual(attributes[\"aff4:type\"][0][0], \"test\")\n\n # Newest timestamp is 4.\n self.assertEqual(attributes[\"aff4:type\"][0][1], 14)\n\n # Now query for a timestamp range.\n rows = [row for row in data_store.DB.Query(\n [], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:query_with_ts\",\n timestamp=(11, 13), token=self.token)]\n\n attributes = rows[0]\n self.assertEqual(attributes[\"subject\"][0][0], \"aff4:/row:query_with_ts\")\n # Now we should have three timestamps.\n self.assertEqual(len(attributes[\"aff4:type\"]), 3)\n\n timestamps = [attribute[1] for attribute in attributes[\"aff4:type\"]]\n self.assertListEqual(sorted(timestamps), [11, 12, 13])", "def testTimestamps(self):\n predicate = \"metadata:predicate\"\n subject = \"aff4:/metadata:8\"\n\n # Extend the range of valid timestamps returned from the table to account\n # for potential clock skew.\n start = long(time.time() - 60) * 1e6\n data_store.DB.Set(subject, predicate, \"1\", token=self.token)\n\n (stored, ts) = data_store.DB.Resolve(subject, predicate, token=self.token)\n\n # Check the time is reasonable\n end = long(time.time() + 60) * 1e6\n\n self.assert_(ts >= start and ts <= end)\n self.assertEqual(stored, \"1\")", "def test_sort(self):\n sort_field = MoveSearchForm.sort\n for value, label in sort_field.kwargs['choices']:\n response = self.do_search(id=u'1', sort=value)\n self.assert_(\n response.tmpl_context.results,\n \"\"\"Sort by {0} doesn't crash\"\"\".format(value)\n )", "def test_search_time_spent_retrieves_corect_db_entries(self):\n # add some data to the database\n test_employee = [\n {'id': 1, 'name': \"Test Employee 1\"},\n ]\n test_log_entry_durations = [\n 1,\n 2,\n 2,\n 3,\n 5,\n ]\n e = db_manager.Employee.get_or_create(name=test_employee[0]['name'])\n # create some log entries\n for duration in test_log_entry_durations:\n db_manager.LogEntry.create(\n employee=e[0],\n date=datetime.date(2018, 1, duration),\n task_name='Test task of {}m'.format(duration),\n duration=duration,\n notes='Note'\n )\n match_duration = 2\n\n expected_results = []\n for duration in test_log_entry_durations:\n if duration == match_duration:\n new_record = OrderedDict([\n ('name', test_employee[0]['name']),\n ('date', datetime.date(2018, 1, duration)),\n ('task_name', 'Test task of {}m'.format(duration)),\n ('duration', duration),\n ('notes', \"Note\")\n ])\n expected_results.append(new_record)\n\n user_input = str(match_duration)\n with patch('builtins.input', side_effect=user_input):\n self.menu.search_time_spent()\n\n self.assertEqual(expected_results, self.menu.records)", "def test_search(client, example_records, h, prefix):\n res = client.get(prefix, headers=h)\n assert res.status_code == 200\n assert res.json[\"hits\"][\"total\"] == 2\n assert res.json['sortBy'] == 'title'", "def test_date_rage(self):\n\n query_params = {\n 'until_date': self.today,\n 'from_date': self.today,\n }\n search = OrderSearchEngine()\n query = search.filter_query(query_params)\n content = Q(created_at__range=[self.from_date, self.until_date])\n self.assertEqual(str(query), str(content))" ]
[ "0.72361386", "0.6569934", "0.65050995", "0.6474437", "0.6456082", "0.633678", "0.6248746", "0.61275387", "0.61030513", "0.6011395", "0.59952134", "0.589628", "0.5876904", "0.58597946", "0.5856868", "0.58429617", "0.58400095", "0.5835469", "0.5823607", "0.5755642", "0.5755642", "0.5755642", "0.5725712", "0.56777745", "0.56563807", "0.56546074", "0.5646058", "0.56387", "0.5638657", "0.562253" ]
0.73637056
0
Testing that a invalid search order returns a 400
def test_invalid_search_order(self) -> None: channel = self.make_request( "GET", self.url + "?dir=bar", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "Query parameter 'dir' must be one of ['b', 'f']", channel.json_body["error"], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_400_invalid_search(self):\n res = self.client().post('/api/questions/search', json={'search': 'This should fail'})\n res_body = json.loads(res.data)\n\n self.assertEqual(res.status_code, 400)\n self.assertFalse(res_body['success'])\n self.assertEqual(res_body['message'], 'Bad request')", "def test_search_number_invalid():\n response = search.number({\"body\": \"{}\"}, {})\n\n assert response[\"statusCode\"] == 422", "def test_bad_requests_give_400(self):\n self.assertEqual(self._request({}), 400)", "def test_404_search_results_unprocesssable(self):\n data = {'searchTerm':'wxyz'}\n res = self.client().post('/search', \n data=json.dumps(data),\n content_type='application/json')\n self.data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n json_res = json.loads(res.get_data(as_text=False))", "def test_ice_and_fire_external_invalid_search(self):\n response = self.client.get('/api/external-books?name=abc23123', format='json')\n self.assertEqual(200, response.data['status_code'])\n self.assertEqual(0, len(response.data['data']))", "def test_search_assigned_bad_request():\n response = search.assigned({\"body\": \"{}\"}, {})\n\n assert response[\"statusCode\"] == 422", "def test_search_history_and_assigned_invalid():\n response = search.history({\"body\": \"{}\"}, {})\n\n assert response[\"statusCode\"] == 422", "def test_make_order_with_some_data_missing(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={'item_name': 'Watermelon'}, headers={\n 'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Missing required param')", "def test_400_ans(self):\r\n self.assertEqual(unpack_answ(\r\n {RESPONSE: 400, ERROR: 'Bad Request'}), '400 : Bad Request')", "def test_list_tasks_invalid_order_argument(self):\n rv = TEST_CLIENT.post(\"/tasks/list-tasks\", json={\"order\": \"name unk\"})\n result = rv.json()\n\n expected = {\n \"message\": \"Invalid order argument\",\n \"code\": \"InvalidOrderBy\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def test_invalid_search_query(aquarius_instance):\n search_query = dict()\n search_query[\"sort\"] = \"foo_sort\"\n with pytest.raises(ValueError):\n aquarius_instance.query_search(search_query=search_query, sort=\"foo_sort\")", "def test_AlgorithmsIdHandler_GET_MalformedRequest(self):\n searchedId='xyz' + ' ' + '1'\n response = self.testapp.get('/algorithms/' + searchedId, expect_errors=True)\n self.assertEqual(400, response.status_int, msg='Wrong answer code')\n self.assertEqual('application/json', response.content_type)\n self.assertIn('Malformed Data', response.normal_body.decode(encoding='UTF-8'))", "def test_invalid_sell_order(self):\n\n _ = self.set_auth_token_header()\n\n data = {\n 'stock': 'GOOG',\n 'quantity': 15,\n 'price': 1.25,\n 'order_type': 'SELL'\n }\n\n # Order create API\n url = reverse('orders-list')\n response = self.client.post(url, data=data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['details'][0]),\n 'Not enough shares.')", "def test_make_order_with_quantity_invalid(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={\n 'item_name': 'Watermelon', 'item_price': 50, 'quantity': -3\n }, headers={'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Price and quantity must be ints >= 1')", "def test_get_invalid_filter(mockclient_cl1):\n r = mockclient_cl1.get(TEST_URL + \"?s=foo\")\n assert r.status_code == 400", "def test_error_data_order(client):\n data = dict(product_name=\"Latte\")\n response = client.post(\"/api/order\", headers=HEADERS, json=data)\n assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY", "def test_error_route(client):\n data = orderBase(product_name=\"Latte\", total_amount=1000)\n response = client.post(\"/order\", headers=HEADERS, json=data.dict())\n assert response.status_code == status.HTTP_404_NOT_FOUND", "def test_wrong_search_criteria(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_decrease\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n self.assertEqual(json.loads(resp.content),\"You give your input in wrong format. Please check the API documentation for the appropriate input format!!\",\"Sorting Critera Input Control Doesn't Work\")", "def test_exception_invalid_sort_order(self):\n self.assertRaises(ValueError, self.conn.query, \"id:\" + \"abc\",\n **{\"sort\":\"id\", \"sort_order\":\"invalid_sort_order\"})", "def test_make_order_with_price_invalid(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={\n 'item_name': 'Watermelon', 'item_price': -50, 'quantity': 3\n }, headers={'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Bad request. Price and quantity must be ints >= 1')", "def test_bad_awards_autocomplete_request(client):\n\n resp = client.post(\"/api/v1/federal_accounts/autocomplete/\", content_type=\"application/json\", data=json.dumps({}))\n assert resp.status_code == status.HTTP_400_BAD_REQUEST", "def test_search_history_invalid():\n response = search.history({\"body\": \"{}\"}, {})\n\n assert response[\"statusCode\"] == 422", "def test_sortby_invalid(self):\n qs = {'a': 1, 'w': 4, 'format': 'json', 'sortby': ''}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(200, response.status_code)", "def test_GET3(self):\n r = requests.get(self.address)\n self.assertEqual(r.status_code, 400)", "def bad_request():\n return HttpError(400)", "def test_AlgorithmsHandler_GETMalformedQuery(self):\n response = self.testapp.get('/algorithms/?qqry=algorithm', expect_errors=True)\n self.assertEqual(400, response.status_int)\n self.assertIsNotNone(response.charset)\n self.assertIn('Malformed Data', response.normal_body.decode(encoding=response.charset))\n self.assertEqual('application/json', response.content_type)", "def test_itemidnotvalid_return4042(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/asdfg\"\r\n r = requests.get(url)\r\n\r\n self.assertEqual(r.status_code, 404)", "def test_no_search_string(self):\n resp = SearchTest.client.get('/api/search/',{'token':SearchTest.valid_token})\n self.assertEqual(json.loads(resp.content),\"You give your input in wrong format. Please check the API documentation for the appropriate input format!!\",\"No Search String Test Error\")", "def test_bad_request(self):\n self._error_test(fitbit_exceptions.HTTPBadRequest)", "def test_query_search_wrongfield(self):\r\n # Test first a non-existant field for all end-points\r\n for endpoint in self.endpoints:\r\n res = self.app.get(\"/api/%s?wrongfield=value\" % endpoint)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['action'] == 'GET', err\r\n assert err['exception_cls'] == 'AttributeError', err" ]
[ "0.75018567", "0.7158932", "0.7119247", "0.70114154", "0.6923285", "0.6917125", "0.6822356", "0.67775583", "0.67404056", "0.67396665", "0.672678", "0.6684986", "0.66496515", "0.6613712", "0.66112727", "0.660449", "0.65978855", "0.65961367", "0.6592199", "0.6565199", "0.65610343", "0.6548511", "0.654187", "0.65280557", "0.64891064", "0.64816827", "0.64788115", "0.64657676", "0.64653856", "0.64566964" ]
0.7162226
1
Testing that a negative limit parameter returns a 400
def test_limit_is_negative(self) -> None: channel = self.make_request( "GET", self.url + "?limit=-5", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_exceed_limit_request(self):\n actions.login(ADMIN_EMAIL)\n ids_list = list(range(SkillAggregateRestHandler.MAX_REQUEST_SIZE))\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': ids_list}, True))\n\n response = transforms.loads(self.get(get_url).body)\n self.assertEqual(412, response['status'])", "def check_limit(limit_value):\n try:\n limit = int(limit_value)\n except ValueError:\n raise SystemExit('The argument \"limit\" should be a positive number')\n else:\n if limit < 1:\n raise SystemExit('The argument \"limit\" should be greater than 0')\n else:\n return limit", "def check_limit(limit):\n if limit:\n limit = int(limit)\n if limit > settings.MAX_LISTING_LIMIT or \\\n limit < settings.MIN_LISTING_LIMIT:\n # SuspiciousOperation raises 400 bad request in Django 1.11.\n # https://docs.djangoproject.com/en/1.11/ref/views/#the-400-bad-request-view\n raise SuspiciousOperation()\n return limit\n return settings.DEFAULT_LISTING_LIMIT", "def on_over_limit(limit):\n return (jsonify({'data': 'You hit the rate limit', 'error': '429'}), 429)", "def test_request_limit_overflow(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': 100, 'limit': 100})\n r._get_response(200, 20)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 4100, 'limit': 20})", "def test_limit(self):\n\t\tfor lim in [1, '234', -100, '-200']:\n\t\t\tself.filter.set_limit(lim)\n\t\t\tself.assertEqual(int(lim), self.filter.get_limit(), \"Limit mismatch: %s!=%s\" % (lim, self.filter.get_limit()))\n\t\tself.filter.set_limit('test')\n\t\tself.assertEqual('test', self.filter.get_limit(), \"String set failed for Filter limit.\")", "def test_rate_limited(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPConflict)\n self._check_response(response, 105)", "def test_get422(self):\n\n self.api.create(self.card, self.card_order_editable)\n self.api.create(self.card, self.card_order_editable2)\n with self.assertRaises(Exception) as context:\n self.api.get(self.card, limit=1000001)\n self.assertTrue(\"Invalid value for `limit`\" in context.exception.__str__())", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def test_exceeded_limit(self):\n msg=self.sample_data(\"error_exceeded_limit.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, PatronLoanLimitReached)\n eq_(u'Patron cannot loan more than 12 documents', error.message)", "def _validate_clear_args(limit):\n min_limit = 1\n max_limit = 20\n default_error = f\"[Limit] The `limit` argument must be a number between {min_limit} and {max_limit}\"\n try:\n limit = int(limit)\n except (ValueError, TypeError):\n return default_error\n if not (min_limit <= limit <= max_limit):\n return default_error\n return None", "def assertHttpTooManyRequests(self, resp):\r\n return self.assertEqual(resp.status_code, 429)", "def test_field_without_reaching_the_limit(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[f\"POST /1234 HTTP/1.1\\r\\nHost: localhost\\r\\nX-Long: {'1' * 200}\\r\\n\\r\\n\"],\n )\n self.check_response(\n client,\n status_code=\"200\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def test_field_without_reaching_the_limit(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[self.post_request + [(\"header\", \"x\" * 200)]],\n disable_hshc=True,\n )\n self.check_response(\n client,\n status_code=\"200\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def test_fail_on_rate_limit_exceeded(self):\n\n # setup 'short' limit for testing\n self.client.protocol.rate_limiter.rules = []\n self.client.protocol.rate_limiter.rules.append(\n XRateLimitRule(\n {\n \"short\": {\n \"usage\": 0,\n \"limit\": 600,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n \"long\": {\n \"usage\": 0,\n \"limit\": 30000,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n }\n )\n )\n\n # interact with api to get the limits\n self.client.get_athlete()\n\n # access the default rate limit rule\n rate_limit_rule = self.client.protocol.rate_limiter.rules[0]\n\n # get any of the rate limits, ex the 'short'\n limit = rate_limit_rule.rate_limits[\"short\"]\n\n # get current usage\n usage = limit[\"usage\"]\n print(\"last rate limit usage is {0}\".format(usage))\n\n # for testing purpses set the limit to usage\n limit[\"limit\"] = usage\n print(\"changing limit to {0}\".format(limit[\"limit\"]))\n\n # expect exception because of RateLimit has been\n # exceeded (or reached max)\n with self.assertRaises(exc.RateLimitExceeded):\n self.client.get_athlete()\n\n # request fired to early (less than 5 sec) causes timeout exception\n with self.assertRaises(exc.RateLimitTimeout):\n self.client.get_athlete()\n\n # once rate limit has exceeded wait until another request is possible\n # check if timeout has been set\n self.assertTrue(rate_limit_rule.limit_timeout > 0)\n print(\"limit timeout {0}\".format(rate_limit_rule.limit_timeout))\n\n # resetting limit\n # simulates Strava api - it would set the usage again to 0\n limit[\"limit\"] = 600\n print(\"resetting limit to {0}\".format(limit[\"limit\"]))\n\n try:\n # waiting until timeout expires\n time.sleep(5)\n\n # this time it should work again\n self.client.get_athlete()\n self.assertTrue(\"No exception raised\")\n except exc.RateLimitExceeded as e:\n self.fail(\"limiter raised RateLimitTimeout unexpectedly!\")\n\n # continue other tests with DefaultRateLimiter\n print(\"setting default rate limiter\")\n self.client.protocol.rate_limiter = DefaultRateLimiter()", "def test_get_cve_id_page_limit(reg_user_headers):\n res = requests.get(\n f'{env.AWG_BASE_URL}{CVE_ID_URL}',\n headers=reg_user_headers,\n params={\n 'page': '-1',\n }\n )\n assert res.status_code == 400\n response_contains_json(res, 'error', 'BAD_INPUT')", "def test_get_limit_no_dependants(self):\n self.assertEqual(\n gross_income.get_limit(),\n gross_income.BASE_LIMIT\n )", "def limit(self, limit):\n raise NotImplementedError(\"This should have been implemented.\")", "def allowedLimit(self, number, msg=None):\n return allowed_limit(number, msg)", "def test_get_remain_limit(self):\n finder = FinderInsidePro(self.test_key)\n limit = finder.get_remain_limit()\n assert isinstance(limit, int)\n assert limit > 0", "def test_limit(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertEqual(channel.json_body[\"next_token\"], 5)\n self._check_fields(channel.json_body[\"event_reports\"])", "def test_messenger_limit():\n all_messages_resp = requests.get(BASE_URL)\n all_messages = all_messages_resp.json()\n total_message_count = len(all_messages)\n message_limit = total_message_count // 2\n\n query_params = {\"limit\": message_limit}\n limit_resp = requests.get(BASE_URL, params=query_params)\n limited_messages = limit_resp.json()\n assert limit_resp.status_code == 200\n assert len(limited_messages) == message_limit", "def test_field_without_reaching_the_limit_2(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[self.post_request + [(\"header\", \"x\" * 294)]],\n disable_hshc=True,\n )\n self.check_response(\n client,\n status_code=\"200\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def test_invalid_query_params(self):\n url = reverse(\"metrics\")\n client = APIClient()\n\n params = {\"limit\": \"foo\"}\n url = url + \"?\" + urlencode(params, quote_via=quote_plus)\n response = client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_request_limit_inner_larger(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': 100, 'limit': 100})\n r._get_response(10, 200)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 2100, 'limit': 100})", "def test_identify_limit(limit, all, expected):\n assert identify_limit(limit, all) == expected", "def test_rate_limit_handling(self):\n self.create_org(status=CONNECTED)\n response = self.app.post('/adapter/qbo/test/update')\n\n # 500 should not be returned by the adapter\n self.assertEqual(response.status_code, 429)", "def test_field_without_reaching_the_limit_2(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[f\"POST /1234 HTTP/1.1\\r\\nHost: localhost\\r\\nX-Long: {'1' * 292}\\r\\n\\r\\n\"],\n )\n self.check_response(\n client,\n status_code=\"200\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def test_limits_boundary_values(self):\n\n def check_error_msg(status, output, storagelimit=False):\n import json\n if status == False:\n content = json.loads(output)[\"errors\"]\n if storagelimit:\n actual_error = content[\"dataStorageLimit\"]\n expected_error = '\"dataStorageLimit\" must be an integer between -1 and 100000'\n else:\n actual_error = content[\"dataThrottleLimit\"]\n expected_error = '\"dataThrottleLimit\" must be an integer between -1 and 2147483647'\n self.assertEqual(actual_error, expected_error)\n else:\n self.fail(\"expected to fail but passsed\")\n\n bucket = self.cluster.buckets[0]\n server = random.choice(bucket.servers)\n bucket_helper = BucketHelper(server)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2)\n check_error_msg(status, content)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648)\n check_error_msg(status, content)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=-2)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=2147483648)\n check_error_msg(status, content, True)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2,\n storage_limit=-2)\n check_error_msg(status, content)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648,\n storage_limit=2147483648)\n check_error_msg(status, content)\n check_error_msg(status, content, True)", "def test_query_train_jobs_with_exceeded_limit(self, client):\n params = dict(offset=0, limit=1000)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('error_code') == '50540002'" ]
[ "0.7617022", "0.7143843", "0.709307", "0.7042369", "0.6976879", "0.6941879", "0.6916557", "0.68980557", "0.6858989", "0.680703", "0.6793832", "0.6782149", "0.67627925", "0.6762043", "0.67223275", "0.6576358", "0.649637", "0.6467667", "0.645585", "0.6455209", "0.6427124", "0.6424087", "0.6421036", "0.6413763", "0.64002514", "0.63896424", "0.6386859", "0.63725656", "0.6356369", "0.6352885" ]
0.80326337
0
Testing that an invalid `report_id` returns a 400.
def test_invalid_report_id(self) -> None: # `report_id` is negative channel = self.make_request( "GET", "/_synapse/admin/v1/event_reports/-123", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "The report_id parameter must be a string representing a positive integer.", channel.json_body["error"], ) # `report_id` is a non-numerical string channel = self.make_request( "GET", "/_synapse/admin/v1/event_reports/abcdef", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "The report_id parameter must be a string representing a positive integer.", channel.json_body["error"], ) # `report_id` is undefined channel = self.make_request( "GET", "/_synapse/admin/v1/event_reports/", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "The report_id parameter must be a string representing a positive integer.", channel.json_body["error"], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_invalid_report_id(self) -> None:\n\n # `report_id` is negative\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/-123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is a non-numerical string\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/abcdef\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is undefined\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )", "def test_bad_requests_give_400(self):\n self.assertEqual(self._request({}), 400)", "def test_400_response(self):\n mock = Mock()\n mock.status_code = 400\n\n with self.assertRaises(RequestError):\n check_response(mock)", "def test_report_id_not_found(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n self.assertEqual(\"Event report not found\", channel.json_body[\"error\"])", "def test_report_id_not_found(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n self.assertEqual(\"Event report not found\", channel.json_body[\"error\"])", "def test_award_badge_gives_error_when_given_bad_badge_id(self):\n\n badgr = self.get_badgr_setup()\n\n with vcr.use_cassette('tests/vcr_cassettes/award_bad_badge_id.yaml'):\n with self.assertRaises(exceptions.BadBadgeIdError):\n badgr.award_badge('bad_badge_id',\n self.get_sample_award_badge_data())", "async def test_txn_get_with_bad_id(self):\n self.stream.preset_response(self.status.NO_RESOURCE)\n response = await self.get_assert_status('/transactions/bad', 404)\n\n self.assert_has_valid_error(response, 72)", "def test_incident_invalid_incident_id(self):\n resp = self.client.post(\n reverse('incidents', kwargs={'team_id': '7de98e0c-8bf9-414c-b397-05acb136935e'}),\n {'incident_ids': '96e3d488', 'annotation': 'Rebooted server'}\n )\n\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(resp.json(), {'error': 'Invalid incident id: 96e3d488'})", "def test_get_report_wrong_object_id(self):\n vt_analyses = VirusTotalAPIAnalyses('test_api_key')\n vt_analyses.get_report('test_object_id')\n http_err = vt_analyses.get_last_http_error()\n self.assertEqual(http_err, vt_analyses.HTTP_NOT_FOUND_ERROR)", "def test_invalid_request(client, auth_token, sample_project):\n # Given\n project_id = sample_project[\"uid\"];\n\n # When\n response = client.post(\"/projects/%s\" % project_id,\n data={\"invalid_field\": \"value\"},\n headers={'token': auth_token},\n follow_redirects=True)\n\n # Then\n assert 400 == response.status_code", "def assertHttpBadRequest(self, response):\r\n self.assertEqual(response.status_code, 400)", "def assertHttpBadRequest(self, resp):\r\n return self.assertEqual(resp.status_code, 400)", "def test_yearly_report_error(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 200, 'date_of_expense': '10-01-2021'})\n year = 'sdfg'\n res = self.client().get(f'/yearly_report?year={year}', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 400)\n results = json.loads(res.data)\n self.assertEqual(results['message'], f'The date {year} does not match the format YYYY')", "def test_id_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_team_swimmer_id(val))", "def test_id_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_event_swimmer_id(val))", "def test_job_fail(client):\n response = client.get('/status/random')\n assert response.status_code == 400", "def test_get_non_existent_campaign_by_id_fails(self):\n response = self.client.get(f\"{self.endpoint_url}99/\")\n response_body = response.get_json()\n error_details = response_body[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], CAMPAIGN_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], CAMPAIGN_NOT_FOUND_SUB_CODE)\n self.assertEqual(error_details[\"details\"], {\"campaign_id\": 99})", "def test_400_bad_request(self):\n # create route to abort the request with the 400\n @self.app.route('/400')\n def bad_request_error():\n abort(400)\n response = self.client.get('/400')\n self.assertEqual(response.status_code, 400)", "def bad_request():\n return HttpError(400)", "def test_wrong_edx_id(self):\r\n data = {\r\n \"EdX-ID\": \"Invalid-Id\",\r\n \"Result\": \"Testing\",\r\n \"Reason\": \"Testing\",\r\n \"MessageType\": \"Testing\"\r\n }\r\n json_data = json.dumps(data)\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=json_data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('edX ID Invalid-Id not found', response.content)\r\n self.assertEqual(response.status_code, 400)", "def test_raise_using_invalid_code(self):\n with self.assertRaises(CloudantFeedException) as cm:\n raise CloudantFeedException('foo')\n self.assertEqual(cm.exception.status_code, 100)", "def test_get_posts_missing_ids(client):\n response = client.simulate_get('/page/get_records')\n assert response.status_code == 400", "def test_get_item_details_invalid_id(self, mock_requests_get_404):\n with pytest.raises(exceptions.NoSuchItemException):\n resources.get_item_details(1)", "def test_400_ans(self):\r\n self.assertEqual(unpack_answ(\r\n {RESPONSE: 400, ERROR: 'Bad Request'}), '400 : Bad Request')", "def test_incident_missing_incident_id(self):\n resp = self.client.post(\n reverse('incidents', kwargs={'team_id': '7de98e0c-8bf9-414c-b397-05acb136935e'}),\n )\n\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(resp.json(), {'error': 'incident_ids is a required argument'})", "def bad_request_400(error):\n return jsonify({\n 'success': False,\n 'message': 'Bad request',\n 'error': 400\n }), 400", "def test_report_resource(client, app):\n with app.app_context():\n r = client.get(\"/reports/1\")\n assert r.status_code == 200\n assert \"Organization: Dunder Mifflin\" in r.get_data(as_text=True)\n\n # Test Invalid reports\n r = client.get(\"/reports/111\")\n assert r.status_code == 404", "def raise_for_status(self):\n if self.status >= 400:\n request_info = mock.Mock(real_url=\"http://example.com\")\n raise ClientResponseError(\n request_info=request_info,\n history=None,\n status=self.status,\n headers=self.headers,\n )", "def test_get_manifest_unexpected_report_name(self):\n with self.assertRaises(AzureReportDownloaderError):\n self.downloader._get_manifest(self.mock_data.bad_test_date)", "def test_400_bad_request(app, client):\n\n @app.route(\"/400\")\n def bad_request():\n abort(400)\n\n response = client.get(\"/400\")\n assert response.status_code == 400\n assert \"400 Bad Request\" in str(response.data)" ]
[ "0.7850871", "0.6897314", "0.6565259", "0.65553325", "0.6430627", "0.63368964", "0.6311277", "0.6301224", "0.62615025", "0.6256362", "0.62535536", "0.6204354", "0.6088427", "0.60642743", "0.6035252", "0.60337836", "0.6016078", "0.60159826", "0.60128593", "0.59775776", "0.59756577", "0.5937326", "0.5934424", "0.5899449", "0.5876917", "0.5875258", "0.5865194", "0.5853158", "0.58512425", "0.58470416" ]
0.81147087
0
Testing that a not existing `report_id` returns a 404.
def test_report_id_not_found(self) -> None: channel = self.make_request( "GET", "/_synapse/admin/v1/event_reports/123", access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) self.assertEqual("Event report not found", channel.json_body["error"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_report_id_not_found(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n self.assertEqual(\"Event report not found\", channel.json_body[\"error\"])", "def test_invalid_report_id(self) -> None:\n\n # `report_id` is negative\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/-123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is a non-numerical string\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/abcdef\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is undefined\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )", "def test_invalid_report_id(self) -> None:\n\n # `report_id` is negative\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/-123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is a non-numerical string\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/abcdef\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is undefined\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )", "def test_get_report_wrong_object_id(self):\n vt_analyses = VirusTotalAPIAnalyses('test_api_key')\n vt_analyses.get_report('test_object_id')\n http_err = vt_analyses.get_last_http_error()\n self.assertEqual(http_err, vt_analyses.HTTP_NOT_FOUND_ERROR)", "def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)", "def test_report_of_article_does_not_exist(self):\n from rest_framework.test import APIClient\n client = APIClient()\n response = client.post('/api/report/spoon/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result[\"error_message\"], \"The article you are reporting does not exist\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_non_existent_campaign_by_id_fails(self):\n response = self.client.get(f\"{self.endpoint_url}99/\")\n response_body = response.get_json()\n error_details = response_body[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], CAMPAIGN_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], CAMPAIGN_NOT_FOUND_SUB_CODE)\n self.assertEqual(error_details[\"details\"], {\"campaign_id\": 99})", "def test_get_item_details_invalid_id(self, mock_requests_get_404):\n with pytest.raises(exceptions.NoSuchItemException):\n resources.get_item_details(1)", "def test_get_report_file_id(self):\n vt_analyses = VirusTotalAPIAnalyses('test_api_key')\n vt_analyses.get_report('test_object_id')\n http_err = vt_analyses.get_last_http_error()\n self.assertEqual(http_err, vt_analyses.HTTP_OK)", "async def test_txn_get_with_bad_id(self):\n self.stream.preset_response(self.status.NO_RESOURCE)\n response = await self.get_assert_status('/transactions/bad', 404)\n\n self.assert_has_valid_error(response, 72)", "def test_retrieve_with_bad_id(self):\n resp = self.api_client.get('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'Not found')", "def test_nonexistent_report(self):\n command_line = [\"report\", \"notreport\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def test_get_non_existing(self):\n\n response = self.client.get('/auth/non-existing-resource')\n\n self.assert404(response)\n self.assertEqual('not found', response.json['error'])", "def test_not_existing_url(client):\n response = client.get('/not-exists')\n assert response.status_code == 404", "def test_fetch_nonexist_pdbid(self):\n pdbid = '1000'\n with self.assertRaisesRegex(ValueError, 'PDB ID not exist'):\n fetch(pdbid)", "def test_get_book_with_id_does_not_exist(self):\n\t\tlogin_data = self.register_and_login_in_user()\n\t\ttoken = login_data['auth_token']\n\n\t\t# get book id\n\t\tbook = self.client.get(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json'\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['message'] == 'book not found')\n\t\tself.assertEqual(book.status_code, 404)", "def check_item_does_not_exist(context, item):\n uuids = []\n reports = context.get(f\"report/{context.uuid[item]}\") if item == \"report\" else context.get(\"reports\")\n for report in reports[\"reports\"]:\n uuids.append(report[\"report_uuid\"])\n uuids.extend(report[\"subjects\"].keys())\n for subject in report[\"subjects\"].values():\n uuids.extend(subject[\"metrics\"].keys())\n for metric in subject[\"metrics\"].values():\n uuids.extend(metric[\"sources\"].keys())\n assert_false(context.uuid[item] in uuids)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.dataset.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_get_nonexistent_test(self):\n response = self.app.test_client().get('/test/99999')\n self.assertEqual(response.status_code, 404)\n self.assert_template_used('test/test_not_found.html')", "def test_get_non_existent_issue_fails(self):\n response = self.client.get(self.non_existent_url)\n response_json = response.get_json()\n error_details = response_json[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], ISSUE_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], ISSUE_NOT_FOUND_SUB_CODE)", "def check_response_update_nonexistent(response: HTTPResponse) -> bool: # pylint: disable=invalid-name\n return response.status_code == 404", "def test_404(self):\n for path in ('/foo', '/abs', '/abs/'):\n response = self.client.get(path)\n self.assertEqual(response.status_code,\n status.HTTP_404_NOT_FOUND,\n f'should get 404 for {path}')\n self.assertIn('text/html', response.content_type)\n\n response = self.client.get('/abs/1307.0001v999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known paper ID with '\n 'nonexistent version')\n response = self.client.get('/abs/alg-geom/07059999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for valid old paper ID '\n 'with nonexistent paper number affix')\n response = self.client.get('/abs/astro-ph/0110242')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known deleted paper')\n response = self.client.get('/abs/foo-bar/11223344')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for bad paper ID')", "def test_get_analysis_with_id_when_missing(analysis_store: MockStore):\n # GIVEN an id that doesn't exist\n missing_analysis_id = 12312423534\n\n # WHEN accessing the analysis\n analysis: Analysis = analysis_store.get_analysis_with_id(analysis_id=missing_analysis_id)\n\n # THEN it should return None\n assert not analysis", "def test_non_existent_course_id(self):\n self._login_as_staff()\n path = self.path(course_id='a/b/c')\n response = self.client.get(path)\n\n assert response.status_code == 404\n\n response = self.client.post(path)\n assert response.status_code == 404", "def assert404(self, response):\n self.assertEqual(response.status_code, 404)", "def test_grainbin_get_by_id_not_found(flaskclient, auth_headers):\n\n grainbin = GrainbinFactory().save()\n\n url = url_for(\"grainbin.GrainbinById\", grainbin_id=grainbin.id + 1)\n response = flaskclient.get(url, headers=auth_headers)\n\n assert response.status_code == 404", "def test_download_missing_file(self):\n key = \"badkey\"\n\n with self.assertRaises(AzureReportDownloaderError):\n self.downloader.download_file(key)", "def test_getting_one_question_with_invalid_questionId(self):\n response = self.get_one_question_with_invalid_questionId()\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_get_manifest_unexpected_report_name(self):\n with self.assertRaises(AzureReportDownloaderError):\n self.downloader._get_manifest(self.mock_data.bad_test_date)", "def test_request_returns_404(client):\n assert client.get(\"/url_que_nao_existe\").status_code == 404" ]
[ "0.80737585", "0.76326716", "0.72465545", "0.7060298", "0.6815434", "0.6761219", "0.656321", "0.6505561", "0.6406881", "0.6399025", "0.63805026", "0.63716185", "0.6366132", "0.6341178", "0.63260156", "0.6313996", "0.63133943", "0.6300672", "0.6275042", "0.6266949", "0.62664604", "0.62290144", "0.6223413", "0.61945885", "0.6193047", "0.6186939", "0.61762464", "0.6152956", "0.6138663", "0.61108065" ]
0.84145755
0
Testing delete a report.
def test_delete_success(self) -> None: channel = self.make_request( "DELETE", self.url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual({}, channel.json_body) channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) # check that report was deleted self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_superuser_delete_assessment(self):\n response = self.superuser.delete(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n response = self.superuser.get(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def storage_delete_report(self, report_id):\n self._get_queryset(report_id=report_id).delete()", "def storage_delete_report_file(self, report_pk):\n self._get_queryset(pk=report_pk).delete()", "def test_delete_record(self):\n pass", "def test_delete_records(self):\n pass", "def test_delete(self):\n pass", "def test_delete_alert_by_id(self):\n pass", "def test_delete_run(self):\n pass", "def test_delete_patient(self):\n response = self.client.delete(\n reverse('patient:patient-detail', kwargs={'pk': Patient.objects.get().id}))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Patient.objects.count(), 0)", "def test_report(topic, user):\n report = Report(reason=\"Test Report\")\n report.save(user=user, post=topic.first_post)\n assert report.reason == \"Test Report\"\n\n report.reason = \"Test Report Edited\"\n report.save()\n assert report.reason == \"Test Report Edited\"\n\n report.delete()\n report = Report.query.filter_by(id=report.id).first()\n assert report is None", "def test_delete_assessment(self):\n response = self.user_01.delete(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.user_02.delete(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.supervisor_formal.delete(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.convener.delete(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_client_verification_document_delete(self):\n pass", "def test_duplicate_crashreports_are_deleted(self):\n self._assert_duplicates_are_deleted(Crashreport)", "def test_delete1(self):\n pass", "def test_delete(self):\n\n self.metadata.create_or_update(data=self.create)\n\n # Find by name\n res_name = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n # Then fetch by ID\n res_id = self.metadata.get_by_id(\n entity=Dashboard, entity_id=str(res_name.id.__root__)\n )\n\n # Delete\n self.metadata.delete(\n entity=Dashboard, entity_id=str(res_id.id.__root__), recursive=True\n )\n\n # Then we should not find it\n res = self.metadata.list_entities(entity=Dashboard)\n assert not next(\n iter(\n ent\n for ent in res.entities\n if ent.fullyQualifiedName == self.entity.fullyQualifiedName\n ),\n None,\n )", "def test_client_risk_assessment_delete(self):\n pass", "def test_delete_case(self):\n pass", "def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_add_remove(self):\n saver = ReportStatsDBAccessor(\"myreport\", self.manifest_id)\n\n self.assertTrue(saver.does_db_entry_exist())\n returned_obj = saver._get_db_obj_query()\n self.assertEqual(returned_obj.first().report_name, \"myreport\")\n\n saver.delete()\n returned_obj = saver._get_db_obj_query()\n self.assertIsNone(returned_obj.first())", "def test_delete(self):\n SampleTemplate.create(self.metadata, self.new_study)\n SampleTemplate.delete(2)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.required_sample_info WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.study_sample_columns WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n with self.assertRaises(QiitaDBExecutionError):\n self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.sample_2\")", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_client_document_delete(self):\n pass", "def test_delete(self):\n # Count the number of records before the save\n existing_records_count = Track.objects.all().count()\n resp = self.api_client.delete('/api/metadata/tracks/2/')\n data = json.loads(resp.content)\n new_records_count = Track.objects.all().count()\n\n # Ensure request was successful, and the record is removed from the database.\n # Should return with a success message.\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(existing_records_count-1, new_records_count)\n self.assertEqual(data['detail'], 'Track successfully removed')", "def test_07_datastore_delete(self, Mock):\r\n html_request = FakeRequest(json.dumps({}), 200,\r\n {'content-type': 'application/json'})\r\n\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_delete(name='task',\r\n resource_id=self.task_resource_id)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_delete(name='task',\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_delete failed\" == type, type", "def test_delete7(self):\n pass", "def test_delete_findings(upload, test_id):\n check_delete()\n upload.test_delete_findings(test_id)", "def delete_report(report_id):\n report = Report.query.get(report_id)\n if report is None:\n flash(\n \"Report not found!\",\n \"alert-warning\",\n )\n elif not current_user.is_admin and not report.user.id == current_user.id:\n flash(\n \"You don't have permission to delete that.\",\n \"alert-warning\",\n )\n else:\n # Before deleting the report, check to see if any other users have\n # favorited this report. If so, simply transfer ownership to them\n current_user.unfavorite(report)\n if report.favorite_users:\n user = report.favorite_users[0]\n report.user = user\n db.session.commit()\n flash(\n \"Report ownership was transferred to {{ user.name }} since \"\n \"the report was in that user's favorites list.\",\n \"alert-success\",\n )\n else:\n db.session.delete(report)\n db.session.commit()\n flash(\n \"Report deleted\",\n \"alert-success\",\n )\n return redirect(request.args.get('next') or url_for('reports.my_reports'))", "def delete(self, report_id=None):\n if report_id is not None and isinstance(report_id, str):\n return self.collection.remove({'_id': ObjectId(report_id)})\n else:\n return self.collection.remove({'_id': report_id})", "def test_dashboards_v2_delete(self):\n pass", "def test_delete_project(self):\n pass" ]
[ "0.7181926", "0.7076668", "0.70488626", "0.70294976", "0.6937398", "0.6934279", "0.6881542", "0.6798384", "0.67498845", "0.6746264", "0.6628536", "0.6619082", "0.65968037", "0.6595696", "0.6588093", "0.6546987", "0.65190804", "0.64991343", "0.6493712", "0.64831465", "0.6468308", "0.64321446", "0.6432125", "0.64313704", "0.6431034", "0.6422867", "0.6408266", "0.63768816", "0.6364386", "0.633714" ]
0.7636673
0
Testing that an invalid `report_id` returns a 400.
def test_invalid_report_id(self) -> None: # `report_id` is negative channel = self.make_request( "DELETE", "/_synapse/admin/v1/event_reports/-123", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "The report_id parameter must be a string representing a positive integer.", channel.json_body["error"], ) # `report_id` is a non-numerical string channel = self.make_request( "DELETE", "/_synapse/admin/v1/event_reports/abcdef", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "The report_id parameter must be a string representing a positive integer.", channel.json_body["error"], ) # `report_id` is undefined channel = self.make_request( "DELETE", "/_synapse/admin/v1/event_reports/", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) self.assertEqual( "The report_id parameter must be a string representing a positive integer.", channel.json_body["error"], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_invalid_report_id(self) -> None:\n\n # `report_id` is negative\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/-123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is a non-numerical string\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/abcdef\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is undefined\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )", "def test_bad_requests_give_400(self):\n self.assertEqual(self._request({}), 400)", "def test_400_response(self):\n mock = Mock()\n mock.status_code = 400\n\n with self.assertRaises(RequestError):\n check_response(mock)", "def test_report_id_not_found(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n self.assertEqual(\"Event report not found\", channel.json_body[\"error\"])", "def test_report_id_not_found(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n self.assertEqual(\"Event report not found\", channel.json_body[\"error\"])", "def test_award_badge_gives_error_when_given_bad_badge_id(self):\n\n badgr = self.get_badgr_setup()\n\n with vcr.use_cassette('tests/vcr_cassettes/award_bad_badge_id.yaml'):\n with self.assertRaises(exceptions.BadBadgeIdError):\n badgr.award_badge('bad_badge_id',\n self.get_sample_award_badge_data())", "async def test_txn_get_with_bad_id(self):\n self.stream.preset_response(self.status.NO_RESOURCE)\n response = await self.get_assert_status('/transactions/bad', 404)\n\n self.assert_has_valid_error(response, 72)", "def test_incident_invalid_incident_id(self):\n resp = self.client.post(\n reverse('incidents', kwargs={'team_id': '7de98e0c-8bf9-414c-b397-05acb136935e'}),\n {'incident_ids': '96e3d488', 'annotation': 'Rebooted server'}\n )\n\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(resp.json(), {'error': 'Invalid incident id: 96e3d488'})", "def test_get_report_wrong_object_id(self):\n vt_analyses = VirusTotalAPIAnalyses('test_api_key')\n vt_analyses.get_report('test_object_id')\n http_err = vt_analyses.get_last_http_error()\n self.assertEqual(http_err, vt_analyses.HTTP_NOT_FOUND_ERROR)", "def test_invalid_request(client, auth_token, sample_project):\n # Given\n project_id = sample_project[\"uid\"];\n\n # When\n response = client.post(\"/projects/%s\" % project_id,\n data={\"invalid_field\": \"value\"},\n headers={'token': auth_token},\n follow_redirects=True)\n\n # Then\n assert 400 == response.status_code", "def assertHttpBadRequest(self, response):\r\n self.assertEqual(response.status_code, 400)", "def assertHttpBadRequest(self, resp):\r\n return self.assertEqual(resp.status_code, 400)", "def test_yearly_report_error(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 200, 'date_of_expense': '10-01-2021'})\n year = 'sdfg'\n res = self.client().get(f'/yearly_report?year={year}', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 400)\n results = json.loads(res.data)\n self.assertEqual(results['message'], f'The date {year} does not match the format YYYY')", "def test_id_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_team_swimmer_id(val))", "def test_id_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_event_swimmer_id(val))", "def test_job_fail(client):\n response = client.get('/status/random')\n assert response.status_code == 400", "def test_get_non_existent_campaign_by_id_fails(self):\n response = self.client.get(f\"{self.endpoint_url}99/\")\n response_body = response.get_json()\n error_details = response_body[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], CAMPAIGN_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], CAMPAIGN_NOT_FOUND_SUB_CODE)\n self.assertEqual(error_details[\"details\"], {\"campaign_id\": 99})", "def test_400_bad_request(self):\n # create route to abort the request with the 400\n @self.app.route('/400')\n def bad_request_error():\n abort(400)\n response = self.client.get('/400')\n self.assertEqual(response.status_code, 400)", "def bad_request():\n return HttpError(400)", "def test_wrong_edx_id(self):\r\n data = {\r\n \"EdX-ID\": \"Invalid-Id\",\r\n \"Result\": \"Testing\",\r\n \"Reason\": \"Testing\",\r\n \"MessageType\": \"Testing\"\r\n }\r\n json_data = json.dumps(data)\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=json_data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('edX ID Invalid-Id not found', response.content)\r\n self.assertEqual(response.status_code, 400)", "def test_raise_using_invalid_code(self):\n with self.assertRaises(CloudantFeedException) as cm:\n raise CloudantFeedException('foo')\n self.assertEqual(cm.exception.status_code, 100)", "def test_get_posts_missing_ids(client):\n response = client.simulate_get('/page/get_records')\n assert response.status_code == 400", "def test_get_item_details_invalid_id(self, mock_requests_get_404):\n with pytest.raises(exceptions.NoSuchItemException):\n resources.get_item_details(1)", "def test_400_ans(self):\r\n self.assertEqual(unpack_answ(\r\n {RESPONSE: 400, ERROR: 'Bad Request'}), '400 : Bad Request')", "def test_incident_missing_incident_id(self):\n resp = self.client.post(\n reverse('incidents', kwargs={'team_id': '7de98e0c-8bf9-414c-b397-05acb136935e'}),\n )\n\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(resp.json(), {'error': 'incident_ids is a required argument'})", "def bad_request_400(error):\n return jsonify({\n 'success': False,\n 'message': 'Bad request',\n 'error': 400\n }), 400", "def test_report_resource(client, app):\n with app.app_context():\n r = client.get(\"/reports/1\")\n assert r.status_code == 200\n assert \"Organization: Dunder Mifflin\" in r.get_data(as_text=True)\n\n # Test Invalid reports\n r = client.get(\"/reports/111\")\n assert r.status_code == 404", "def raise_for_status(self):\n if self.status >= 400:\n request_info = mock.Mock(real_url=\"http://example.com\")\n raise ClientResponseError(\n request_info=request_info,\n history=None,\n status=self.status,\n headers=self.headers,\n )", "def test_get_manifest_unexpected_report_name(self):\n with self.assertRaises(AzureReportDownloaderError):\n self.downloader._get_manifest(self.mock_data.bad_test_date)", "def test_400_bad_request(app, client):\n\n @app.route(\"/400\")\n def bad_request():\n abort(400)\n\n response = client.get(\"/400\")\n assert response.status_code == 400\n assert \"400 Bad Request\" in str(response.data)" ]
[ "0.8114839", "0.6898392", "0.65659076", "0.6555996", "0.6431462", "0.63372266", "0.6312958", "0.63020974", "0.6263201", "0.62577367", "0.62554604", "0.6206936", "0.60895234", "0.6066999", "0.6037492", "0.6035575", "0.60181403", "0.60166055", "0.60134876", "0.5979885", "0.59767765", "0.5938902", "0.59370863", "0.5901309", "0.5877592", "0.5874819", "0.58655244", "0.58543", "0.58524364", "0.58479136" ]
0.7851341
1
Testing that a not existing `report_id` returns a 404.
def test_report_id_not_found(self) -> None: channel = self.make_request( "DELETE", "/_synapse/admin/v1/event_reports/123", access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) self.assertEqual("Event report not found", channel.json_body["error"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_report_id_not_found(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n self.assertEqual(\"Event report not found\", channel.json_body[\"error\"])", "def test_invalid_report_id(self) -> None:\n\n # `report_id` is negative\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/-123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is a non-numerical string\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/abcdef\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is undefined\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )", "def test_invalid_report_id(self) -> None:\n\n # `report_id` is negative\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/-123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is a non-numerical string\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/abcdef\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is undefined\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )", "def test_get_report_wrong_object_id(self):\n vt_analyses = VirusTotalAPIAnalyses('test_api_key')\n vt_analyses.get_report('test_object_id')\n http_err = vt_analyses.get_last_http_error()\n self.assertEqual(http_err, vt_analyses.HTTP_NOT_FOUND_ERROR)", "def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)", "def test_report_of_article_does_not_exist(self):\n from rest_framework.test import APIClient\n client = APIClient()\n response = client.post('/api/report/spoon/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result[\"error_message\"], \"The article you are reporting does not exist\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_non_existent_campaign_by_id_fails(self):\n response = self.client.get(f\"{self.endpoint_url}99/\")\n response_body = response.get_json()\n error_details = response_body[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], CAMPAIGN_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], CAMPAIGN_NOT_FOUND_SUB_CODE)\n self.assertEqual(error_details[\"details\"], {\"campaign_id\": 99})", "def test_get_item_details_invalid_id(self, mock_requests_get_404):\n with pytest.raises(exceptions.NoSuchItemException):\n resources.get_item_details(1)", "def test_get_report_file_id(self):\n vt_analyses = VirusTotalAPIAnalyses('test_api_key')\n vt_analyses.get_report('test_object_id')\n http_err = vt_analyses.get_last_http_error()\n self.assertEqual(http_err, vt_analyses.HTTP_OK)", "async def test_txn_get_with_bad_id(self):\n self.stream.preset_response(self.status.NO_RESOURCE)\n response = await self.get_assert_status('/transactions/bad', 404)\n\n self.assert_has_valid_error(response, 72)", "def test_retrieve_with_bad_id(self):\n resp = self.api_client.get('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'Not found')", "def test_nonexistent_report(self):\n command_line = [\"report\", \"notreport\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def test_get_non_existing(self):\n\n response = self.client.get('/auth/non-existing-resource')\n\n self.assert404(response)\n self.assertEqual('not found', response.json['error'])", "def test_not_existing_url(client):\n response = client.get('/not-exists')\n assert response.status_code == 404", "def test_fetch_nonexist_pdbid(self):\n pdbid = '1000'\n with self.assertRaisesRegex(ValueError, 'PDB ID not exist'):\n fetch(pdbid)", "def test_get_book_with_id_does_not_exist(self):\n\t\tlogin_data = self.register_and_login_in_user()\n\t\ttoken = login_data['auth_token']\n\n\t\t# get book id\n\t\tbook = self.client.get(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json'\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['message'] == 'book not found')\n\t\tself.assertEqual(book.status_code, 404)", "def check_item_does_not_exist(context, item):\n uuids = []\n reports = context.get(f\"report/{context.uuid[item]}\") if item == \"report\" else context.get(\"reports\")\n for report in reports[\"reports\"]:\n uuids.append(report[\"report_uuid\"])\n uuids.extend(report[\"subjects\"].keys())\n for subject in report[\"subjects\"].values():\n uuids.extend(subject[\"metrics\"].keys())\n for metric in subject[\"metrics\"].values():\n uuids.extend(metric[\"sources\"].keys())\n assert_false(context.uuid[item] in uuids)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.dataset.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_get_nonexistent_test(self):\n response = self.app.test_client().get('/test/99999')\n self.assertEqual(response.status_code, 404)\n self.assert_template_used('test/test_not_found.html')", "def test_get_non_existent_issue_fails(self):\n response = self.client.get(self.non_existent_url)\n response_json = response.get_json()\n error_details = response_json[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], ISSUE_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], ISSUE_NOT_FOUND_SUB_CODE)", "def check_response_update_nonexistent(response: HTTPResponse) -> bool: # pylint: disable=invalid-name\n return response.status_code == 404", "def test_404(self):\n for path in ('/foo', '/abs', '/abs/'):\n response = self.client.get(path)\n self.assertEqual(response.status_code,\n status.HTTP_404_NOT_FOUND,\n f'should get 404 for {path}')\n self.assertIn('text/html', response.content_type)\n\n response = self.client.get('/abs/1307.0001v999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known paper ID with '\n 'nonexistent version')\n response = self.client.get('/abs/alg-geom/07059999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for valid old paper ID '\n 'with nonexistent paper number affix')\n response = self.client.get('/abs/astro-ph/0110242')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known deleted paper')\n response = self.client.get('/abs/foo-bar/11223344')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for bad paper ID')", "def test_get_analysis_with_id_when_missing(analysis_store: MockStore):\n # GIVEN an id that doesn't exist\n missing_analysis_id = 12312423534\n\n # WHEN accessing the analysis\n analysis: Analysis = analysis_store.get_analysis_with_id(analysis_id=missing_analysis_id)\n\n # THEN it should return None\n assert not analysis", "def test_non_existent_course_id(self):\n self._login_as_staff()\n path = self.path(course_id='a/b/c')\n response = self.client.get(path)\n\n assert response.status_code == 404\n\n response = self.client.post(path)\n assert response.status_code == 404", "def assert404(self, response):\n self.assertEqual(response.status_code, 404)", "def test_grainbin_get_by_id_not_found(flaskclient, auth_headers):\n\n grainbin = GrainbinFactory().save()\n\n url = url_for(\"grainbin.GrainbinById\", grainbin_id=grainbin.id + 1)\n response = flaskclient.get(url, headers=auth_headers)\n\n assert response.status_code == 404", "def test_download_missing_file(self):\n key = \"badkey\"\n\n with self.assertRaises(AzureReportDownloaderError):\n self.downloader.download_file(key)", "def test_getting_one_question_with_invalid_questionId(self):\n response = self.get_one_question_with_invalid_questionId()\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_get_manifest_unexpected_report_name(self):\n with self.assertRaises(AzureReportDownloaderError):\n self.downloader._get_manifest(self.mock_data.bad_test_date)", "def test_request_returns_404(client):\n assert client.get(\"/url_que_nao_existe\").status_code == 404" ]
[ "0.841445", "0.7632993", "0.7246946", "0.70600617", "0.681505", "0.6762001", "0.656388", "0.65051955", "0.6408046", "0.6397647", "0.6379985", "0.6372422", "0.6366025", "0.6340298", "0.63259286", "0.63136077", "0.6312867", "0.6301502", "0.6274185", "0.6266179", "0.6265957", "0.6228933", "0.622374", "0.6194388", "0.61927325", "0.61856824", "0.61762244", "0.6152272", "0.61384004", "0.61106074" ]
0.80735993
1
run() fetches the game price using information from the command line
def run(): parser = argparse.ArgumentParser(description='Retrieve the latest prices for a specified video game') parser.add_argument('title', help='Title of the game') parser.add_argument('-p', '--platform', help='Platform the game is on') args = parser.parse_args() if (not args.platform): platform = "any" else: platform = args.platform scraper = Scraper() results = scraper.searchAll(game=args.title, platform=platform) for result in results: print('${0:6}| {1:7}| {2:8}| {3}'.format(result[2], result[1], result[3], result[0]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n CheckFile(argv)\n bot = Trade()\n count = 0\n bot.period = 20\n while True:\n bot.Pull()\n if count == 0:\n bot.GetLittle()\n if bot.response == None:\n [bot.Sell(i, bot.share[i]) if bot.share[i] >= 1 else None for i in bot.price]\n print(\"EXIT\", flush = True)\n break\n if count >= bot.period:\n if count == bot.period:\n bot.GetCoefficient()\n if bot.money >= bot.initial_money * 1.5:\n tmp = dict((j[0], i) for i, j in bot.price.items())\n market = tmp[max(tmp)]\n tmp = Pronostic(bot.price[market], bot.period, bot.sd_coef / (1 + (pi / 10)))\n try:\n if tmp < 0:\n bot.Buy(market, GetLoad(count))\n elif tmp > 0 and bot.share[market] > 0:\n bot.Sell(market, bot.share[market] * GetLoad(count))\n except TradeError as error:\n continue\n else:\n for i in range(bot.market_number):\n market = bot.response[i].split(\":\")[0]\n tmp = Pronostic(bot.price[market], bot.period, 1.5)\n try:\n if tmp < 0:\n bot.Buy(market, GetLoad(count))\n elif tmp > 0 and bot.share[market] > 0:\n bot.Sell(market, bot.share[market])\n except TradeError as error:\n continue\n bot.DrawGraph(count)\n else:\n bot.DrawLobby(count)\n count += 1", "def main() -> None:\n parser = argparse.ArgumentParser(\n description=\"Optimise your Sportpools player selection\"\n )\n parser.add_argument(\n \"-f\",\n \"--file\",\n help=\"Path to file to import\",\n type=str,\n default=\"./data/Tennis Abstract_ 2020 Australian Open Men's Draw Forecast Forecast.htm\",\n required=True,\n )\n parser.add_argument(\n \"-b\",\n \"--black-points\",\n \"--black\",\n help=\"Total number of black points to use\",\n type=int,\n default=20,\n )\n parser.add_argument(\n \"-c\",\n \"--count\",\n \"--player-count\",\n help=\"Number of players to select\",\n type=int,\n default=14,\n )\n parser.add_argument(\n \"-l\", \"--loser\", help=\"Selected loser\", type=str,\n )\n\n args, _ = parser.parse_known_args()\n\n pool = TennisPool(ROUNDS).load_data(args.file).apply_filters().add_features()\n\n emulator = TennisPoolEmulator(pool.get_results())\n\n pool_results = emulator.play_draw(ROUNDS).add_features(ROUNDS).get_results()\n\n selection_optimum = optimise_selection(\n pool_results,\n selection_limit=args.count,\n black_points_limit=args.black_points,\n rounds=ROUNDS,\n loser=args.loser,\n )\n\n LOGGER.info(\"Optimal set of players is as follows:\")\n LOGGER.info(\"\\r\\n%s\", selection_optimum[\"schedule\"].head(25))\n\n LOGGER.info(\n \"The selection of these players results in %d points with %d black points\",\n selection_optimum[\"schedule\"][\"potency\"].sum(),\n selection_optimum[\"schedule\"][\"black\"].sum(),\n )\n LOGGER.info(\"Select your joker in this order:\")\n LOGGER.info(\n \"\\r\\n%s\",\n str(\n selection_optimum[\"schedule\"][selection_optimum[\"schedule\"][\"rounds\"] >= 4]\n .sort_values(by=[\"black\"], ascending=True)\n .head(5)\n ),\n )", "def main():\n user_input_of_coins() # can be used interactively just for fun, but use the test_suite to document your testing!\n i_steal_pennies_test_suite()", "def main(cls, args):\n theRepository = CloudGameRepository(\"games.ggp.org/base\")\n beginTime = System.currentTimeMillis()\n theGames = HashMap()\n for gameKey in theRepository.getGameKeys():\n theGames.put(gameKey, theRepository.getGame(gameKey))\n print \"Games: \" + len(theGames)\n endTime = System.currentTimeMillis()\n print \"Time: \" + (endTime - beginTime) + \"ms.\"", "def run_main():\n\n #Check for sold bikes\n checkSold(auto=True) #Change Auto to True to prevent user input\n\n #Find all available URLs split by Make & Model - Find Make\n print(\"Getting Makes...\")\n makes = getMakes()\n \n #Find all Models for each Make\n print(\"Getting Models...\")\n models = []\n for make in tqdm(makes, desc=\"Makes\"):\n models += getModels(make)\n\n\n #Find all URLs for each Model - Scrape bikes on each model\n errlog = \"\"\n print(\"Scraping Bikes...\")\n for model in tqdm(models, desc=\"Models\"):\n #Get urls for each model\n urlsTemp = getURLs(model)\n\n #Remove duplicates\n urlsTemp = removeDups(urlsTemp)\n\n #Remove listings already found\n urlsTemp = newURLs(\"motorcycles\", urlsTemp)\n\n #Get model description\n try:\n modelDesc = model.split(\"/\")[2].replace(\"model-\", \"\")\n except:\n modelDesc = \"Listings\"\n\n #Find motorbike details on all urls for this model\n #Split by model to prevent large datasets changing during code runtime\n for url in tqdm(urlsTemp, desc=modelDesc, leave=False):\n temp = Motorcycle(url)\n if not temp.na:\n temp.dbInsert()\n else:\n errlog += url + \"|\"\n \n #Finish\n if not errlog:\n print(\"Errors Found: \", errlog)\n if not printlog:\n print(printlog)\n \n print(\"Done!\")", "def run(self):\n pair = self.args.get('<pair>')\n\n self.validate(pair)\n market = self.client.returnTicker()\n self.inform(pair, market.get(pair))", "def main():\n args = get_parser().parse_args()\n players = prepare_game(\n decks_count=args.decks,\n auto_mode=args.auto_mode,\n player_one_name=args.name_player,\n players_count=args.players,\n )\n game(players=players)", "def main():\n args = parse_args()\n logging.basicConfig(level=getattr(logging, args.log.upper(), None),\n format=\"%(levelname)s, %(asctime)s, %(message)s\", filename=args.log_file)\n with open(args.file) as f:\n game_str = f.read()\n start = time.time()\n g = game.Game(game_str, args.trim)\n logging.debug(\"Reading the game took: {0} s\".format(time.time() - start))\n if args.elimination:\n g.IESDS()\n result = g.findEquilibria(args.method)\n if result is not None:\n if args.checkNE:\n success = g.checkNEs(result)\n else:\n success = True\n if success:\n print(g.printNE(result, payoff=args.payoff))\n else:\n sys.exit(\"Nash equilibria did not pass the test.\")\n else:\n sys.exit(\"Nash equilibrium was not found.\")", "def main():\n usage = \"usage: %prog [options] results\"\n parser = OptionParser(usage=usage)\n\n (options, args) = parser.parse_args()\n\n if len(args) != 1:\n parser.print_help()\n return 2\n\n # do stuff\n print('md5\\tgameworked')\n with open(args[0]) as results:\n results.readline() # Ignore header\n for line in results:\n fields = line.split('\\t')\n fields[0] = fields[0].replace('.exe', '')\n countfields = filter(iscount, fields)\n if gameworked(countfields):\n print('%s\\tYES' % fields[0])\n else:\n print('%s\\tNO' % fields[0])", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def run():\n\n title_list = [\"* id of item\",\n \"* title\",\n \"* price\",\n \"* month of the sale\",\n \"* day of the sale\",\n \"* year of the sale\",\n \"* customer's id\"]\n\n # ! sign with a position is unfinished function but added in options\n # !8. Show the sale numbers of games for each customer-292\n # !11. Show the customer who spent the most and the amount spent-365\"\n # !12. Show the customer's id who spent the most and the amount spent-376\"\n # !13. Show the most frequent buyers-387\n # !14. Show the if of the most freuent buyers-\n\n options = [\"Print table\",\n \"Get game title by id\",\n \"Show the most recently sold game\",\n \"Get the sum of games' prices by their id\",\n \"Get the customer's id by the id of a game\",\n \"Show ids of all customers who purchased games\",\n \"Show sale ids of all customers\",\n \"Show the owner of a recently sold game\",\n \"Show the owner's id of a recently sold game\",\n \"Show the most frequent buyers\",\n \"Show the ids of the most frequent buyers\",\n \"Get the customer by id\"]\n\n os.system('clear')\n file = \"model/sales/sales.csv\"\n choice = None\n while choice != \"0\":\n os.system('clear')\n terminal_view.print_predator()\n terminal_view.print_menu(\"What do you want to do:\", options, \"Back to main menu\")\n choice = terminal_view.get_choice(options)\n\n if choice == \"1\":\n os.system(\"clear\")\n common.all_print_table(title_list, file)\n\n elif choice == \"2\":\n os.system(\"clear\")\n print(\"Get game title by id\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n identification = common.get_input(\"Enter the id: \")\n print(sales.get_title_by_id_from_table(table, identification))\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"3\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n most_recently_sold_game = sales.get_item_id_title_sold_last(table)\n print(\"The most recently sold game is: \")\n terminal_view.print_table([most_recently_sold_game], [\"* id\", \"* title\"])\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"4\":\n os.system(\"clear\")\n print(\"Get the sum of games' prices by their id\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n item_ids = []\n x = True\n while x:\n add_id = common.get_input(\"Enter the id or 'x' to exit: \")\n if add_id == \"x\":\n x = False\n item_ids.append(add_id)\n print(sales.get_the_sum_of_prices_from_table(table, item_ids))\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"5\":\n os.system(\"clear\")\n print(\"Get the customer's id by the id of a game\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n sale_id = common.get_input(\"Enter the id of a game: \")\n print(sales.get_customer_id_by_sale_id_from_table(table, sale_id))\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"6\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n ids_of_all_customers = sales.get_all_customer_ids_from_table(table)\n print(\"ids of all customers who purchased games:\\n\", ids_of_all_customers)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"7\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n sale_ids_of_all_customers = sales.get_all_sales_ids_for_customer_ids_form_table(table)\n print(\"Sale ids of all customers:\\n\\n\", sale_ids_of_all_customers)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"8\":\n file_name_sales = common.get_double_file(\"Choose a file with sales: \")\n if file_name_sales == \"\":\n file_name_sales = file\n file_name_customer = common.get_double_file(\"Choose a file with customers: \")\n if file_name_customer == \"\":\n file_name_customer = \"model/crm/customers.csv\"\n table_from_customers = common.get_table_from_file(file_name_customer)\n table_from_sales = common.get_table_from_file(file_name_sales)\n last_buyer = sales.get_the_last_buyer_name(table_from_customers, table_from_sales)\n print(\"Owner of a recently sold game: \", last_buyer)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"9\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n last_buyer_id = sales.get_the_last_buyer_id(table)\n print(\"Owner's id of a recently sold game: \", last_buyer_id)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"10\":\n file_name_sales = common.get_double_file(\"Choose a file with sales: \")\n if file_name_sales == \"\":\n file_name_sales = file\n file_name_customer = common.get_double_file(\"Choose a file with customers: \")\n if file_name_customer == \"\":\n file_name_customer = \"model/crm/customers.csv\"\n table_from_customers = common.get_table_from_file(file_name_customer)\n table_from_sales = common.get_table_from_file(file_name_sales)\n the_most_frequent_buyers = sales.get_the_most_frequent_buyers_names(table_from_customers,\n table_from_sales,\n num=1)\n print(\"The most frequent buyers:\\n\", the_most_frequent_buyers)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"11\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n the_most_frequent_buyers_ids = sales.get_the_most_frequent_buyers_ids(table, num=1)\n print(\"ids of the most frequent buyers:\\n\", the_most_frequent_buyers_ids)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"12\":\n os.system(\"clear\")\n print(\"Get the customer by id\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = \"model/crm/customers.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, [\"* id\", \"* name\", \"* email\", \"* subscribed\"])\n identification = common.get_input(\"Enter the id: \")\n print(crm.get_name_by_id_from_table(table, identification))\n common.waiting()\n os.system(\"clear\")\n\n else:\n if choice != \"0\":\n terminal_view.print_error_message(\"There is no such choice.\\n\")\n common.waiting()", "def main():\n\n p = optparse.OptionParser()\n p.add_option('--username', '-u', default=\"\")\n p.add_option('--password', '-p', default=\"\")\n p.add_option('--max-price', '-m', default=\"\")\n p.add_option('--url-auction', '-a', default=\"\")\n\n options, arguments = p.parse_args()\n\n if options.url_auction is '':\n p.error('Auction URL not given')\n\n if (options.username is '') or (options.password is ''):\n p.error('Username and/or password not given')\n\n if options.max_price is '':\n p.error('Maximum price not given')\n\n # parse options\n url_auction = str(options.url_auction)\n username = str(options.username)\n password = str(options.password)\n max_price = int(options.max_price)\n\n # amount of euro to be bid over the highest amount\n over_bid = 2\n # seconds before deadline when the highest current bid is evaluated\n offset_seconds = 0.75\n\n # make sure the selenium firefox docker is running:\n # docker run -it -p 4444:4444 selenium/standalone-firefox\n driver = webdriver.Remote(command_executor='http://0.0.0.0:4444/wd/hub',\n desired_capabilities=DesiredCapabilities.FIREFOX)\n\n # login to vakantieveilingen\n login(driver, username, password)\n\n # go to auction page\n driver.get(url_auction)\n time.sleep(2)\n\n # get all static html elements before countdown\n bid_input_elem = driver.find_element_by_xpath(\"//*[@class='bid-input']\")\n bid_button_elem = driver.find_element_by_xpath(\"//*[@class='btn btn-orange']\")\n\n # countdown until 0.75 seconds before the deadline\n # setting depends on bandwidth\n countdown(url_auction, offset_seconds)\n\n # get highest bid\n bid_elem = driver.find_element_by_xpath(\"//*[@class='jsBidAmountUpdate highest-bid']\")\n bid = int(bid_elem.text)\n print(\"highest bid: {}\".format(bid))\n\n # place higest bid + 2 EUR\n # to avoid losing from a +1 bidder in the last 0.75 seconds\n if int(bid) < max_price:\n bid_input_elem.send_keys(str(bid + over_bid))\n bid_button_elem.click()\n time.sleep(1)\n driver.save_screenshot(\"your_bid.png\")", "def main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-c\", \"--pairs_toml\", default=\"pairs.toml\", help=\"path to toml file containing asset pairs\")\n parser.add_argument(\"-u\", \"--horizon_host\", default=\"https://horizon.stellar.org\",\n help=\"horizon host, including scheme\")\n parser.add_argument(\"-t\", \"--time_duration\", type=int, default=86400000,\n help=\"time duration in millis, defaults to 24 hours\")\n parser.add_argument(\"-b\", \"--bucket_resolution\", type=int, default=60000,\n help=\"bucket resolution for aggregation in millis, defaults to 5 minutes\")\n parser.add_argument(\"-o\", \"--output_file\", default=\"ticker.json\", help=\"output file path\")\n args = parser.parse_args()\n\n config = toml.load(args.pairs_toml)\n now = millis()\n end_time = now - (now % args.bucket_resolution)\n aggregated_pairs = aggregate_pairs(args.horizon_host, config[\"pair\"], end_time - args.time_duration, end_time,\n args.bucket_resolution)\n pair_prices_dict = get_prices(args.horizon_host, config[\"pair\"])\n dump_aggregated_pairs(now, aggregated_pairs, pair_prices_dict, args.output_file)", "def main(args):\n app = Application()\n if args and args.markets:\n app.set_markets(args.markets)\n if args and args.symbols:\n app.set_symbols(args.symbols)\n app.print_message()\n\n if args and app.markets:\n file_path = './../'\n\n scrapper = scrapping.Scrapper(app.markets)\n scrapper.get_symbols(f\"{file_path}data/stocks.json\")\n\n if len(app.symbols) > 0:\n companies = {}\n for symbol in app.symbols:\n file_name = f\"{file_path}data/{symbol}_financials.json\"\n companies[symbol] =\\\n scrapper.get_fundamental_analysis(symbol,\n file_name)\n print(companies)\n analysis_companies = analysis.Analyze(companies, app.symbols)\n result = analysis_companies.calculate()\n print(result)\n\n logger.info(args)", "def main():\n splitted_file = convert_input_to_list()\n encyclopedia_of_pizza = parse_pizza_info(splitted_file)\n pizza_winner = choose_pizza(encyclopedia_of_pizza)\n print_winner(pizza_winner)", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def main():\n args = get_args()\n SEED = args.seed\n playerhits = args.player_hits\n dealerhits = args.dealer_hits\n\n deck = [('♥2',2),('♥3',3),('♥4',4),('♥5',5),('♥6',6),('♥7',7),('♥8',8),('♥9',9),('♥10',10),('♥J',10),('♥Q',10),('♥K',10),('♥A',1),\n('♠2',2),('♠3',3),('♠4',4),('♠5',5),('♠6',6),('♠7',7),('♠8',8),('♠9',9),('♠10',10),('♠J',10),('♠Q',10),('♠K',10),('♠A',1),\n('♣2',2),('♣3',3),('♣4',4),('♣5',5),('♣6',6),('♣7',7),('♣8',8),('♣9',9),('♣10',10),('♣J',10),('♣Q',10),('♣K',10),('♣A',1),\n('♦2',2),('♦3',3),('♦4',4),('♦5',5),('♦6',6),('♦7',7),('♦8',8),('♦9',9),('♦10',10),('♦J',10),('♦Q',10),('♦K',10),('♦A',1)] \n\n deck.sort(key=lambda x: x[0])\n #print(deck)\n #print(\" \")\n random.seed(SEED)\n random.shuffle(deck)\n #print(deck)\n \n player_c1 = deck.pop()\n dealer_c1 = deck.pop()\n player_c2 = deck.pop()\n dealer_c2 = deck.pop()\n player_sum = player_c1[1] + player_c2[1]\n dealer_sum = dealer_c1[1] + dealer_c2[1]\n a = dealer_c1[0]\n b = player_c1[0]\n c = dealer_c2[0]\n d = player_c2[0] \n\n if playerhits == 1 and dealerhits == 0:\n player_c3 = deck.pop()\n e = player_c3[0]\n player_sum = player_c1[1] + player_c2[1] + player_c3[1]\n print(\"D [{:>2}]: {:<2} {:<2}\".format(dealer_sum, a, c))\n print(\"P [{:>2}]: {:<2} {:<2} {:<2}\".format(player_sum, b, d, e))\n elif dealerhits == 1 and playerhits == 0:\n dealer_c3 = deck.pop()\n f = dealer_c3[0]\n dealer_sum = dealer_c1[1] + dealer_c2[1] + dealer_c3[1]\n print(\"D [{:>2}]: {:<2} {:<2} {:<2}\".format(dealer_sum, a, c, f))\n print(\"P [{:>2}]: {:<2} {:<2}\".format(player_sum, b, d))\n elif playerhits == 0 and dealerhits == 0:\n print(\"D [{:>2}]: {:<2} {:<2}\".format(dealer_sum, a, c))\n print(\"P [{:>2}]: {:<2} {:<2}\".format(player_sum, b, d))\n elif playerhits == 1 and dealerhits == 1: \n player_c3 = deck.pop()\n dealer_c3 = deck.pop()\n f = dealer_c3[0]\n e = player_c3[0]\n player_sum = player_c1[1] + player_c2[1] + player_c3[1]\n dealer_sum = dealer_c1[1] + dealer_c2[1] + dealer_c3[1]\n print(\"D [{:>2}]: {:<2} {:<2} {:<2}\".format(dealer_sum, a, c, f)) \n print(\"P [{:>2}]: {:<2} {:<2} {:<2}\".format(player_sum, b, d, e))\n \n if dealer_sum > 21:\n print(\"Dealer busts.\")\n exit(0)\n if player_sum > 21:\n print(\"Player busts! You lose, loser!\")\n exit(0)\n if dealer_sum > 21 and player_sum > 21:\n print(\"Dealer busts.\\nPlayer busts! You lose, loser!\")\n exit(0)\n if dealer_sum == 21:\n print(\"Dealer wins!\")\n exit(0)\n if player_sum == 21:\n print(\"Player wins. You probably cheated.\")\n exit(0)\n if dealer_sum < 18:\n print(\"Dealer should hit.\")\n if player_sum < 18:\n print(\"Player should hit.\")", "def main():\n args = get_args().parse_args()\n\n (wins, losses) = args.record.split('-')\n print wins, losses\n \n is_winning_streak = args.streak[0] == 'W'\n streak_length = args.streak[1:]\n print is_winning_streak, streak_length\n\n (recent_wins, recent_losses) = args.last.split('-')\n print recent_wins, recent_losses\n\n power_ranking = calculate_power_ranking(int(wins), int(losses), is_winning_streak,\n int(streak_length), int(recent_wins),\n int(recent_losses))\n print power_ranking", "def process_cmd():\n web_scraper = SainsburyWebscraper()\n logger.info(\"Sainsbury web scraper initialized and loaded data from SainsburyWebscraper\")\n\n json_data = web_scraper.get_product_data()\n logger.info(\"Found %s products with the following data:\" % len(json_data[\"results\"]))\n print json.dumps(json_data, indent=4, sort_keys=True)", "def main():\n get_options()\n\n # string hash given to crack\n if args.hash_string:\n print(f\"\\nAttempting to crack hash '{args.hash_string}'...\\n\")\n result = crackhash.single(args.hash_type, args.hash_string, args.wordlist.name)\n\n print(\"Results\")\n print(f\"Comparisons: {result['checks']}\")\n print(f\"Time: {result['time']}\")\n if result[\"password\"]:\n print(f\"Password: '{result['password']}'\")\n else:\n print(f\"Password not found.\")\n\n # hash file given to crack\n elif args.hash_list:\n result = crackhash.multiple(\n args.hash_type, args.hash_list.name, args.wordlist.name\n )\n print(\"\\nResults\")\n print(f'Attempted: {result[\"attempted\"]}')\n print(f'Cracked: {result[\"cracked\"]}')\n print(f'Time: {result[\"time\"]}')\n print(f'Passwords: {result[\"results\"]}')", "def main():\n args = get_args()\n seed = args.seed\n player_hits = args.player_hits\n dealer_hits = args.dealer_hits\n\n random.seed(seed)\n #random.Random(seed_arg)\n\n suites = list('♥♠♣♦')\n\n # numbs = list(range(2,11))\n # face_cards = list('JQKA')\n #\n # numbs_cards = [str(s) for s in numbs]\n # numbs_cards += face_cards\n\n #create cards and values\n cards = ['A','2','3','4','5','6','7','8','9','10','J','Q','K']\n values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n\n #create dict of card values\n values = dict(zip(cards,values))\n\n #produce the deck of chards\n deck = list(product(suites, cards))\n\n #sort the deck of cards\n deck.sort()\n\n #shuffle deck of cards\n random.shuffle(deck)\n #print(deck)\n\n dealer = []\n player = []\n\n cards_dealt = 4\n\n while cards_dealt:\n player.append(deck.pop())\n cards_dealt -= 1\n\n dealer.append(deck.pop())\n cards_dealt -= 1\n\n if player_hits is True:\n player.append(deck.pop())\n\n if dealer_hits is True:\n dealer.append(deck.pop())\n\n player_sum = sum([values[t[1]] for t in player])\n\n dealer_sum = sum([values[t[1]] for t in dealer])\n\n dealer_string = ' '.join([''.join(x) for x in dealer])\n\n player_string = ' '.join([''.join(x) for x in player])\n\n #print out results:\n print('D [{:>2}]: {}'.format(dealer_sum, dealer_string ))\n print('P [{:>2}]: {}'.format(player_sum, player_string ))\n\n #Check if the player has more than 21; if so, print 'Player busts! You lose, loser!' and exit(0)\n if player_sum > 21:\n print('Player busts! You lose, loser!')\n sys.exit(0)\n\n #Check if the dealer has more than 21; if so, print 'Dealer busts.' and exit(0)\n if dealer_sum > 21:\n print('Dealer busts.')\n sys.exit(0)\n\n #Check if the player has exactly 21; if so, print 'Player wins. You probably cheated.' and exit(0)\n if player_sum == 21:\n print('Player wins. You probably cheated.')\n sys.exit(0)\n\n #Check if the dealer has exactly 21; if so, print 'Dealer wins!' and exit(0)\n if dealer_sum == 21:\n print('Dealer wins!')\n sys.exit(0)\n\n #check if dealer and player should hiting\n if dealer_sum < 18:\n print('Dealer should hit.')\n if player_sum < 18:\n print('Player should hit.')", "def cmd_calculation():", "def main():\n game = Game(TIMES, HARDNESS)\n game.start()\n game.print_score()", "def main():\n licensify(_parse_args())", "def execute(self):\n logging.info(\"Gathering scorecard data for [%s]\", str(self.package_url))\n\n source_repo = self.get_source_repository()\n if not source_repo:\n return\n\n token = self.get_api_token(\"github\")\n if not token:\n logging.warning(\"Unable to retrieve Github token.\")\n return\n\n try:\n result = subprocess.run(\n f'docker run --rm -it --env \"GITHUB_AUTH_TOKEN={token}\" docker.io/library/scorecard --repo={source_repo} --format json',\n shell=True,\n stdout=subprocess.PIPE,\n )\n scorecard_output = result.stdout.decode(\"utf-8\")\n scorecard_output = scorecard_output[scorecard_output.find(\"{\") :]\n js = json.loads(scorecard_output)\n\n payloads = []\n\n for check in js.get(\"Checks\", []):\n check_name = check.get(\"CheckName\", \"\").lower().strip()\n if not check_name:\n continue\n pass_value = str(check.get(\"Pass\", False)).lower()\n\n payload = {\n \"package_url\": str(self.package_url),\n \"operation\": \"replace\",\n \"key\": f\"openssf.scorecard.raw.{check_name}\",\n \"values\": [{\"value\": pass_value, \"properties\": check}],\n }\n payloads.append(payload)\n\n res = requests.post(self.METRIC_API_ENDPOINT, json=payloads, timeout=120)\n if res.status_code == 200:\n logging.info(\"Success: %s\", res.text)\n else:\n logging.warning(\"Failure: status code: %s\", res.status_code)\n\n except Exception as msg:\n logging.warn(\"Error processing Scorecard data: %s\", msg)\n raise", "def main():\n game_of_life(10, 20)", "def main():\n args = get_args()\n seed = args.seed\n player = args.player_hits\n dealer = args.dealer_hits\n\n if seed is not None:\n random.seed(seed)\n\n suites = ['♥','♠','♣','♦']\n ranks = ['2','3','4','5','6','7','8','9','10','J','Q','K','A']\n cards=list(product(suites,ranks))\n cards.sort()\n random.shuffle(cards)\n #print(cards)\n\n values = {'2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '10':10, 'J':10, 'Q':10, 'K':10, 'A':1}\n cards.reverse()\n\n p1=cards.pop(0)\n d1=cards.pop(0)\n p2=cards.pop(0)\n d2=cards.pop(0)\n\n if player:\n p3=cards.pop(0)\n p_hits = values[p1[1]] + values[p2[1]] +values[p3[1]]\n p_cards = ''.join(p1) + ' ' + ''.join(p2) + ' ' + ''.join(p3)\n else:\n p_hits = values[p1[1]] + values[p2[1]]\n p_cards = ''.join(p1) + ' ' + ''.join(p2)\n\n if dealer:\n d3=cards.pop(0)\n d_hits = values[d1[1]] + values[d2[1]] +values[d3[1]]\n d_cards = ''.join(d1) + ' ' + ''.join(d2) + ' ' + ''.join(d3)\n else:\n d_hits = values[d1[1]] + values[d2[1]]\n d_cards = ''.join(d1) + ' ' + ''.join(d2)\n\n print('D [{:2}]: {}'.format(d_hits, d_cards))\n print('P [{:2}]: {}'.format(p_hits, p_cards))\n\n if p_hits >21:\n print('Player busts! You lose, loser!')\n exit(0)\n if d_hits >21:\n print('Dealer busts.')\n exit(0)\n if p_hits ==21:\n print('Player wins. You probably cheated.')\n exit(0)\n if d_hits ==21:\n print('Dealer wins!')\n exit(0)\n if d_hits < 18:\n print('Dealer should hit.')\n if p_hits < 18:\n print('Player should hit.')", "def get_price(item_code):\n output = \"Get price for item {}.\".format(item_code)\n print(output)\n return output", "def usage():\n\n # Local constants\n\n # Local variables\n\n #****** start usage() ******#\n print()\n print(\" Usage: python TCGCardTracker.py <arguement below> <optional-argument-1>\")\n print(\"\\tadd (Optional): Add a card to your collection. Requires TCGPlayer URL.\")\n print(\"\\tdelete (Optional): Delete a card from your collection. Requires TCGPlayer URL.\")\n print(\"\\tupdate (Optional): Updates pricing data for every card in your collection.\")\n print(\"\\ttop25 (Optional): Outputs the 25 most valuable cards from your collection.\")\n print(\"\\texport (Optional): Exports a list of TCGPlayer URLs to a text file.\")\n print(\"\\texport_collection (Optional): Exports your collection to a .csv including most recent price data.\")\n print(\"\\timport (Optional): Imports a text file of TCGPlayer URLs to bulk import cards into your collection. Requires text file.\")\n print(\"\\tworth (Optional): Ouputs how much your collection is worth using latest price data.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tticker (Optional): Displays a ticker grid of the change in value over a given time. If run without the days back parameter it will default to 7 days.\")\n sys.exit()" ]
[ "0.60610706", "0.6005787", "0.59277415", "0.59141546", "0.5859248", "0.5801375", "0.57632864", "0.5726609", "0.57263625", "0.5714792", "0.5714792", "0.5712169", "0.5698735", "0.5679258", "0.5668227", "0.5641781", "0.56235695", "0.5609285", "0.5577383", "0.5566178", "0.5531977", "0.5517424", "0.55063456", "0.54947424", "0.5481021", "0.5463892", "0.5453161", "0.54508436", "0.54436815", "0.5442705" ]
0.78085846
0
Generate plotlybased stabilization plot from output from find_stable_poles. Arguments
def stabplot(lambda_stab, orders_stab, freq_range=None, frequency_unit='rad/s', damped_freq=False, psd_freq=None, psd_y=None, psd_plot_scale='log', renderer='browser_legacy'): if damped_freq: dampedornot = 'd' omega = np.abs(np.imag(lambda_stab)) else: dampedornot = 'n' omega = np.abs(lambda_stab) ix = np.arange(0, len(lambda_stab)) # index corresponding to if frequency_unit == 'rad/s': x = omega xlabel = f'$\omega_{dampedornot} \; [{frequency_unit}]$' tooltip_name = f'\omega_{dampedornot}' frequency_unit = 'rad/s' elif frequency_unit.lower() == 'hz': x = omega/(2*np.pi) xlabel = f'$f_{dampedornot} \; [{frequency_unit}]$' tooltip_name = f'f_{dampedornot}' frequency_unit = 'Hz' elif (frequency_unit.lower() == 's') or (frequency_unit.lower() == 'period'): x = (2*np.pi)/omega xlabel = f'Period, $T_{dampedornot} \; [{frequency_unit}]$' tooltip_name = f'T_{dampedornot}' frequency_unit = 's' # lambda_stab[np.abs(lambda_stab) == 0] = np.nan xi_stab = -np.real(lambda_stab)/np.abs(lambda_stab) # Rewrite xi as %, and make string text = [f'xi = {xi_i*100:.2f}% <br> ix = {ix}' for ix, xi_i in enumerate(xi_stab)] htemplate = f'{tooltip_name}' + ' = %{x:.2f} ' + f'{frequency_unit}<br>n =' + ' %{y}' +'<br> %{text}' stable_poles = pd.DataFrame({'freq': x, 'order':orders_stab}) scatter_trace = go.Scatter( x=stable_poles['freq'], y=stable_poles['order'], mode='markers', name='', hovertemplate = htemplate, text=text, marker={'color':'#4682b4'} ) overlay_trace = go.Scatter(x=psd_freq, y=psd_y, mode='lines', name='PSD', hoverinfo='skip', line={'color':'#cd5c5c'}) fig = make_subplots(specs=[[{"secondary_y": True}]]) scatter_trace['name'] = 'Poles' fig.add_trace(scatter_trace, secondary_y=False) if psd_freq is not None: fig.add_trace(overlay_trace, secondary_y=True) fig.update_yaxes(title_text="PSD", secondary_y=True, type=psd_plot_scale) fig['layout']['yaxis2']['showgrid'] = False fig.layout['xaxis']['title'] = xlabel fig.layout['yaxis']['title'] = '$n$' if freq_range is not None: fig.layout['xaxis']['range'] = freq_range fig['layout']['yaxis']['range'] = [0.05, np.max(orders_stab)*1.1] if renderer is None: import plotly.io as pio renderer = pio.renderers.default if renderer == 'browser_legacy': from plotly.offline import plot plot(fig, include_mathjax='cdn') else: fig.show(renderer=renderer, include_mathjax='cdn') return fig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stabplot(lambd, orders, phi=None, model=None, freq_range=None, frequency_unit='rad/s', damped_freq=False, psd_freq=None, psd_y=None, psd_plot_scale='log', \n renderer=None, pole_settings=None, selected_pole_settings=None, to_clipboard='none', return_ix=False):\n\n \n\n\n # Treat input settings\n if pole_settings is None: pole_settings = {}\n if selected_pole_settings is None: selected_pole_settings = {}\n\n unsel_settings = {'color':'#a9a9a9', 'size':6, 'opacity':0.6}\n unsel_settings.update(**pole_settings)\n\n current_settings = listify_each_dict_entry(unsel_settings, len(lambd))\n\n sel_settings = {'color':'#cd5c5c', 'size':10, 'opacity':1.0, 'line': {'color': '#000000', 'width': 0}}\n sel_settings.update(**selected_pole_settings)\n\n select_status = np.zeros(len(lambd), dtype=bool)\n \n # Create suffix and frequency value depending on whether damped freq. is requested or not\n if damped_freq:\n dampedornot = 'd'\n omega = np.abs(np.imag(lambd))\n else:\n dampedornot = 'n'\n omega = np.abs(lambd)\n\n # Create frequency/period axis and corresponding labels\n if frequency_unit == 'rad/s':\n x = omega\n xlabel = f'$\\omega_{dampedornot} \\; [{frequency_unit}]$'\n tooltip_name = f'\\omega_{dampedornot}'\n frequency_unit = 'rad/s'\n elif frequency_unit.lower() == 'hz':\n x = omega/(2*np.pi)\n xlabel = f'$f_{dampedornot} \\; [{frequency_unit}]$'\n tooltip_name = f'f_{dampedornot}'\n frequency_unit = 'Hz'\n elif (frequency_unit.lower() == 's') or (frequency_unit.lower() == 'period'):\n x = (2*np.pi)/omega\n xlabel = f'Period, $T_{dampedornot} \\; [{frequency_unit}]$'\n tooltip_name = f'T_{dampedornot}'\n frequency_unit = 's'\n \n # Damping ratio and index to hover\n xi = -np.real(lambd)/np.abs(lambd)\n text = [f'xi = {xi_i*100:.2f}% <br> ix = {ix}' for ix, xi_i in enumerate(xi)] # rewrite xi as %, and make string\n htemplate = f'{tooltip_name}' + ' = %{x:.2f} ' + f'{frequency_unit}<br>n =' + ' %{y}' +'<br> %{text}'\n\n # Construct dataframe and create scatter trace \n poles = pd.DataFrame({'freq': x, 'order':orders})\n scatter_trace = go.Scattergl(\n x=poles['freq'], y=poles['order'], mode='markers', name='',\n hovertemplate = htemplate, text=text,\n marker=current_settings)\n \n scatter_trace['name'] = 'Poles'\n\n # PSD overlay trace\n overlay_trace = go.Scatter(x=psd_freq, y=psd_y, mode='lines', name='PSD', \n hoverinfo='skip', line={'color':'#ffdab9'})\n \n # Create figure object, add traces and adjust labels and axes\n # fig = go.FigureWidget(scatter_trace)\n fig = make_subplots(rows=2, cols=1, specs=[[{\"type\": \"xy\", \"secondary_y\": True}],\n [{\"type\": \"table\"}]])\n fig.layout.hovermode = 'closest'\n fig.add_trace(scatter_trace, secondary_y=False, row=1, col=1)\n \n if psd_freq is not None:\n fig.add_trace(overlay_trace, secondary_y=True, row=1, col=1)\n fig.update_yaxes(title_text=\"PSD\", secondary_y=True, type=psd_plot_scale)\n fig['layout']['yaxis2']['showgrid'] = False\n \n fig.layout['xaxis']['title'] = xlabel\n fig.layout['yaxis']['title'] = '$n$'\n\n if freq_range is not None:\n fig.layout['xaxis']['range'] = freq_range\n \n fig['layout']['yaxis']['range'] = [0.05, np.max(orders)*1.1]\n\n # Renderer (refer to plotly documentation for details)\n if renderer is 'default':\n import plotly.io as pio\n renderer = pio.renderers.default\n\n df = pd.DataFrame(columns=['ix','x','xi'])\n pd.options.display.float_format = '{:,.2f}'.format\n \n fig.add_trace(\n go.Table(header=\n dict(values=['Pole index', xlabel, r'$\\xi [\\%]$'],\n fill_color='paleturquoise',\n align='left'),\n cells=\n dict(values=[],fill_color='lavender',\n align='left')), row=2, col=1)\n \n fig.update_layout(\n height=1000,\n showlegend=False\n )\n \n\n fig = go.FigureWidget(fig) #convert to widget\n \n # Callback function for selection poles\n ix = np.arange(0, len(lambd))\n ix_sel = []\n \n def update_table():\n df = pd.DataFrame(data={'ix': ix[select_status], 'freq': x[select_status], 'xi':100*xi[select_status]})\n \n if len(fig.data)==2:\n sel_ix = 1\n else:\n sel_ix = 2\n \n fig.data[sel_ix].cells.values=[df.ix, df.freq, df.xi]\n \n \n def toggle_pole_selection(trace, clicked_point, selector):\n def export_df():\n df.to_clipboard(index=False)\n \n def export_ix_list():\n import pyperclip #requires pyperclip\n ix_str = '[' + ', '.join(str(i) for i in ix_sel) + ']'\n pyperclip.copy(ix_str)\n\n for i in clicked_point.point_inds:\n if select_status[i]:\n for key in current_settings:\n current_settings[key][i] = unsel_settings[key]\n else:\n for key in current_settings:\n current_settings[key][i] = sel_settings[key] \n \n select_status[i] = not select_status[i] #swap status\n\n with fig.batch_update():\n trace.marker = current_settings\n \n update_table()\n ix_sel = ix[select_status]\n\n if to_clipboard == 'ix':\n export_ix_list()\n elif to_clipboard == 'df':\n export_df()\n\n fig.data[0].on_click(toggle_pole_selection) \n\n if renderer == 'browser_legacy':\n from plotly.offline import plot\n plot(fig, include_mathjax='cdn')\n elif renderer is not None:\n fig.show(renderer=renderer, include_mathjax='cdn')\n \n \n if return_ix:\n return fig, ix_sel\n else:\n return fig", "def visualize_hidden_units(ax, rollouts, num_stable_trials=10):\n num_hidden_units = rollouts.states_hidden.shape[2] // 2\n\n # use the first half of episodes to fit pca\n X_fit = rollouts.states_hidden[:(rollouts.NTestEpisodes // 2), :, num_hidden_units:]\n X_reshaped = np.reshape(X_fit, (-1, X_fit.shape[2]))\n pca = PCA(n_components=3)\n pca.fit(X_reshaped)\n X_test = rollouts.states_hidden[(rollouts.NTestEpisodes // 2):, :, num_hidden_units:]\n X_test_reshaped = np.reshape(X_test, (-1, X_test.shape[2]))\n X_pj = pca.transform(X_test_reshaped)\n\n print('explained variance ratio is: ', pca.explained_variance_ratio_)\n print('{} variance explained by 3 components'.format(np.sum(pca.explained_variance_ratio_)))\n\n stable_trials = []\n # visualize the 10 trials before switches\n for epi in range(0, 1):\n switch_trials = rollouts.Switched[epi, :].nonzero()[0]\n for i in switch_trials:\n idx = i + epi * rollouts.NTrials\n if i >= num_stable_trials:\n stable_trials.extend(np.arange(idx - num_stable_trials, idx))\n\n stable_trials = np.unique(np.array(stable_trials))\n best_arm = rollouts.best_arm.flatten()\n color = [0 if x == 0 else 1 for x in best_arm.tolist()]\n color = np.array(color)\n\n # option 1: visualize the stable trials right before switching\n # ideally should see two blobs corresponding to the rewarding arm\n # vis = ax.scatter(X_pj[stable_trials, 0], X_pj[stable_trials, 1], c = color[stable_trials], cmap = 'seismic')\n\n # option 2: plot best_arm against X_pj[:,0]\n ax.plot(1 + np.arange(rollouts.NTrials), X_pj[:rollouts.NTrials, 0], 'red', label='1st PC')\n ax.plot(1 + np.arange(rollouts.NTrials), 2+X_pj[:rollouts.NTrials, 1], 'orange', label='2nd PC')\n ax.plot(1 + np.arange(rollouts.NTrials), 4+X_pj[:rollouts.NTrials, 2], 'yellow', label='3rd PC')\n ax.legend()\n vis = ax.plot(1 + np.arange(rollouts.NTrials), best_arm[:rollouts.NTrials], 'blue', label='rewarding arm')\n\n # visualize the states along the first 2 pca components\n # vis = ax.scatter(X_pj[:,0], X_pj[:,1], c=np.arange(0, 1, 1/rollouts.NTrials), cmap = 'copper')\n\n # calculate the correlation between projected axis and best_arm\n # best_arm is the only independent factor I can think of now...\n for i in range(X_pj.shape[1]):\n corrcoefs = []\n for epi in range(X_test.shape[0]):\n corrcoef = np.corrcoef(X_pj[epi*rollouts.NTrials:(epi+1)*rollouts.NTrials, i],\n rollouts.best_arm[epi + rollouts.NTestEpisodes//2])\n corrcoefs.append(corrcoef[0, 1])\n print('correlation between {}th pca component and best_arm is {.2f}'.format(i, np.mean(corrcoefs)))\n\n # calculate the correlation between raw axis and best_arm\n for i in range(X_test.shape[2]):\n corrcoefs = []\n for epi in range(X_test.shape[0]):\n corrcoef = np.corrcoef(X_test[epi, :, i], rollouts.best_arm[epi + rollouts.NTestEpisodes//2])\n corrcoefs.append(corrcoef[0, 1])\n print('correlation between {}th hidden unit and best_arm iss {.2f}'.format(i, np.mean(corrcoefs)))\n\n print('\\n')\n return vis", "def plot_tsnes():\n # Two environments (for main paper figure. All for final figure)\n ENVS = [\n \"BipedalWalker-v3\",\n #\"LunarLander-v2\",\n #\"Pendulum-v0\"\n \"Acrobot-v1\",\n #\"CartPole-v1\"\n ]\n ALGO_TYPES = [\n \"stablebaselines\",\n \"stablebaselines\",\n \"wann\",\n \"wann\",\n ]\n ALGO_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMAES\",\n ]\n ALGO_PRETTY_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMA-ES\"\n ]\n\n REWARD_SCALES = {\n \"Pendulum-v0\": [-1600, -200],\n \"Acrobot-v1\": [-500, -100],\n \"LunarLander-v2\": [-230, 200],\n \"BipedalWalker-v3\": [-100, 300],\n \"CartPole-v1\": [0, 500]\n }\n\n figure, axs = pyplot.subplots(\n figsize=[6.4 * 2, 4.8],\n nrows=2,\n ncols=4,\n gridspec_kw={'hspace': 0, 'wspace': 0},\n )\n\n for plot_i in range(2):\n env = ENVS[plot_i]\n reward_scale = REWARD_SCALES[env]\n for algo_i in range(len(ALGO_TYPES)):\n column_idx = (algo_i % 2) + plot_i * 2\n row_idx = 0 if algo_i <= 1 else 1\n ax = axs[row_idx, column_idx]\n algo_type = ALGO_TYPES[algo_i]\n algo_name = ALGO_NAMES[algo_i]\n algo_pretty_name = ALGO_PRETTY_NAMES[algo_i]\n\n experiment_glob = \"experiments/{}_{}_{}_*\".format(algo_type, env, algo_name)\n experiment_paths = glob(experiment_glob)\n tsnes = []\n rewards = []\n for experiment_path in experiment_paths:\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n population_tsnes = []\n population_rewards = []\n for path in pivector_paths:\n data = np.load(path)\n population_tsnes.append(data[\"tsne\"])\n population_rewards.append(data[\"average_episodic_reward\"])\n data.close()\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n tsnes = np.concatenate(tsnes, axis=0)\n rewards = np.concatenate(rewards, axis=0)\n\n # Min-max normalization\n rewards = (rewards - reward_scale[0]) / (reward_scale[1] - reward_scale[0])\n\n scatter = ax.scatter(\n tsnes[:, 0],\n tsnes[:, 1],\n c=rewards,\n cmap=\"plasma\",\n s=1,\n vmin=0,\n vmax=1\n )\n\n ax.text(0.98, 0.98, algo_pretty_name, horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes)\n ax.set_xticks([])\n ax.set_yticks([])\n # Hide spines, the outer edges\n ax.spines[\"top\"].set_alpha(0.2)\n ax.spines[\"bottom\"].set_alpha(0.2)\n ax.spines[\"left\"].set_alpha(0.2)\n ax.spines[\"right\"].set_alpha(0.2)\n # Hide edge spines and bolden mid-spines\n if row_idx == 0:\n ax.spines[\"top\"].set_visible(False)\n else:\n ax.spines[\"bottom\"].set_visible(False)\n if column_idx == 0:\n ax.spines[\"left\"].set_visible(False)\n elif column_idx == 1:\n ax.spines[\"right\"].set_alpha(1.0)\n elif column_idx == 2:\n ax.spines[\"left\"].set_alpha(1.0)\n elif column_idx == 3:\n ax.spines[\"right\"].set_visible(False)\n\n # Add titles\n if row_idx == 0 and (column_idx == 0 or column_idx == 2):\n ax.set_title(env.split(\"-\")[0], x=1.0)\n\n cbaxes = figure.add_axes([0.4, 0.94, 0.2, 0.02])\n cbar = figure.colorbar(scatter, orientation=\"horizontal\", cax=cbaxes)\n cbar.set_ticks([0.0, 0.5, 1.0])\n cbar.set_ticklabels([\"Min\", \"Reward\", \"Max\"])\n cbar.ax.xaxis.set_ticks_position('top')\n cbar.ax.xaxis.set_label_position('top')\n cbar.ax.tick_params(labelsize=\"small\", length=0)\n figure.tight_layout()\n figure.savefig(\"figures/tsnes.png\", dpi=200, bbox_inches=\"tight\", pad_inches=0.0)", "def scatter(original, updated, main=\"\", save=None): \n #Remove hits with no improvement and calcate the number of hits with no\n #improvement(udated == original), positive imporvent (updated > original), \n #and negative improvment (updated < original)\n print len(original)\n positiveImprovement = []\n negativeImprovement = []\n noImprovement = 0\n for o, u in izip(original, updated):\n if int(o) == int(u):\n noImprovement +=1\n elif u > o:\n positiveImprovement.append((o,u))\n elif u < o:\n negativeImprovement.append((o,u))\n else:\n noImprovement +=1\n\n if not positiveImprovement:\n positiveImprovement = [()]\n if not negativeImprovement:\n negativeImprovement = [()]\n\n print positiveImprovement\n print negativeImprovement\n print noImprovement\n\n #Set deimensions\n x, y = zip(*positiveImprovement+negativeImprovement)\n xMax = int(round(sorted(x)[-1]/500.0)*500.0)\n yMax = int(round(sorted(y)[-1]/500.0)*500.0)\n sep = 500\n xticks = range(0, xMax, sep)\n yticks = range(0,yMax,sep)\n color_cycle = brewer2mpl.get_map('Set2', 'qualitative', 8).mpl_colors\n\n fig, ax = plt.subplots()\n ax.set_title(main)\n ax.set_xlabel(\"Original Bitscores\")\n ax.set_ylabel(\"Updated Bitscores\")\n\n \n #Plot postive improvement (green, automatically by prettyplotlib)\n if positiveImprovement:\n ppl.scatter(ax, *zip(*positiveImprovement), \n label=\"Positive Improvement ({} seqs)\".format(len(positiveImprovement)),\n color=color_cycle[0])\n\n #Draw no improvement line\n ppl.plot(ax, (0,xMax), (0,xMax), color='k', linestyle='-', linewidth=2,\n label=\"No Improvement ({} seqs)\".format(noImprovement))\n\n #Plot negative improvement (red, automatically by prettyplotlib)\n if negativeImprovement:\n ppl.scatter(ax, *zip(*negativeImprovement),\n label=\"Negative Improvement ({} seqs)\".format(len(negativeImprovement)),\n color=color_cycle[1])\n\n #Draw labels\n ppl.legend(ax)\n\n #Set axis\n ax.set_ylim([0,yMax])\n ax.set_xlim([0,xMax])\n\n if save is None:\n plt.show()\n else:\n pp = PdfPages(save)\n pp.savefig(fig)\n pp.close()", "def get_stability_plot(self):\n fig, ax = plt.subplots()\n first_episode = self.get_convergence_episode()\n\n values = self.stats['return_stats']['episode_totals']\n _, _, (y_lower, _) = self._moving_average(\n values, window=_ROLLING_WINDOW, p=_CONFIDENCE_LEVEL)\n episodes = np.arange(len(values))\n unstable_episodes = np.where(\n np.logical_and(values < y_lower[-1], episodes > first_episode))[0]\n\n ax.plot(episodes, values, color='steelblue', lw=2, alpha=.9,\n label='Return')\n for i, episode in enumerate(unstable_episodes):\n ax.axvline(episode, color='salmon', lw=2,\n label='Unstable' if i == 0 else None)\n ax.axvline(first_episode, color='seagreen', lw=2, label='Converged')\n\n ax.set_title('Normalized instability = {:.3f}%'.format(\n self.get_normalized_instability() * 100.))\n ax.legend()\n ax.set_ylabel('Return')\n ax.set_xlabel('Episode')\n return fig", "def internal_stability_plot(self,bounds=None,N=200,use_butcher=False,formula='lts',levels=[1,100,500,1000,1500,10000]):\n import nodepy.stability_function as stability_function\n import matplotlib.pyplot as plt\n from nodepy.utils import find_plot_bounds\n from matplotlib.colors import LogNorm\n\n p,q = self.stability_function(use_butcher=use_butcher,formula=formula)\n # Convert coefficients to floats for speed\n if p.coeffs.dtype=='object':\n p = np.poly1d([float(c) for c in p.coeffs])\n q = np.poly1d([float(c) for c in q.coeffs])\n\n stable = lambda z : np.abs(p(z)/q(z))<=1.0\n bounds = find_plot_bounds(stable,guess=(-10,1,-5,5))\n\n theta = self.internal_stability_polynomials(use_butcher=use_butcher,formula=formula)\n\n x=np.linspace(bounds[0],bounds[1],N)\n y=np.linspace(bounds[2],bounds[3],N)\n X=np.tile(x,(N,1))\n Y=np.tile(y[:,np.newaxis],(1,N))\n Z=X + Y * 1j\n\n th_vals = np.zeros((len(theta), N, N), dtype=np.complex64)\n\n for j in range(len(theta)):\n thetaj = np.poly1d([float(c) for c in theta[j].coeffs])\n th_vals[j,...] = thetaj(Z)\n th_max = np.max(np.abs(th_vals),axis=0)\n\n fig = plt.figure()\n CS = plt.contour(X,Y,th_max,colors='k',levels=levels)\n plt.clabel(CS, fmt='%d', colors='k')#,manual=True)\n\n p,q=self.__num__().stability_function(mode='float')\n stability_function.plot_stability_region(p,q,N,color='k',filled=False,bounds=bounds,\n fignum=fig.number)", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def convergence():\n fig, axes = plt.subplots(nrows=2, figsize=figsize(aspect=1.2))\n\n # label names\n label1 = str(league.lambda1)\n label2_list = [str(lambda2) for lambda2 in league.lambda2_list]\n\n # point spread and point total subplots\n subplots = [\n (False, [-0.5, 0.5], league.spreads, 'probability spread > 0.5'),\n (True, [200.5], league.totals, 'probability total > 200.5'),\n ]\n\n for ax, (commutes, lines, values, ylabel) in zip(axes, subplots):\n\n # train margin-dependent Elo model\n melo = Melo(lines=lines, commutes=commutes, k=1e-4)\n melo.fit(league.times, league.labels1, league.labels2, values)\n\n line = lines[-1]\n\n for label2 in label2_list:\n\n # evaluation times and labels\n times = np.arange(league.times.size)[::1000]\n labels1 = times.size * [label1]\n labels2 = times.size * [label2]\n\n # observed win probability\n prob = melo.probability(times, labels1, labels2, lines=line)\n ax.plot(times, prob)\n\n # true (analytic) win probability\n if ax.is_first_row():\n prob = skellam.sf(line, int(label1), int(label2))\n ax.axhline(prob, color='k')\n else:\n prob = poisson.sf(line, int(label1) + int(label2))\n ax.axhline(prob, color='k')\n\n # axes labels\n if ax.is_last_row():\n ax.set_xlabel('Iterations')\n ax.set_ylabel(ylabel)\n\n set_tight(w_pad=.5)", "def plot_ablation_losses():\n\n path1 = os.path.join(path_to_here, '../data/landscape_visualizations/DMSO/original/losses00-54.pickle')\n file = open(path1, 'rb')\n losses_array_1 = pickle.load(file)\n losses_array_1 = losses_array_1[1:, :]\n\n path2 = os.path.join(path_to_here, '../data/landscape_visualizations/DMSO/D_t/losses00-44.pickle')\n file = open(path2, 'rb')\n losses_array_2 = pickle.load(file)\n losses_array_2 = losses_array_2\n\n path3 = os.path.join(path_to_here, '../data/landscape_visualizations/DMSO/D_x_y/losses00-44.pickle')\n file = open(path3, 'rb')\n losses_array_3 = pickle.load(file)\n losses_array_3 = losses_array_3\n\n path4 = os.path.join(path_to_here, '../data/landscape_visualizations/DMSO/homog/losses00-76.pickle')\n file = open(path4, 'rb')\n losses_array_4 = pickle.load(file)\n losses_array_4 = losses_array_4[1:, :]\n\n paths = [path1, path2, path3, path4]\n\n\n\n\n # 'pdf', 'BC', 'pde', 'total', 'norm'\n fig = plt.figure(figsize = (2.1, 1.5))\n\n\n for path, losses_array, label in zip(paths, [losses_array_1, losses_array_2, losses_array_3, losses_array_4], ['D(x, y, t)', 'D(t)', 'D(x, y)', 'D']):\n\n print('shape', losses_array.shape)\n\n losses_base = os.path.basename(path)\n total_losses = losses_array[3, :] # total losses are idx 3 for these runs\n idxs = []\n means = []\n for idx in range(len(total_losses)-200):\n num_half_hrs = int(losses_base[6:8]) + idx*(int(losses_base[9:11])-int(losses_base[6:8]))/len(total_losses)\n if num_half_hrs < 55:\n idxs.append(num_half_hrs)\n means.append(np.mean(total_losses[idx:idx+200]))\n\n\n plt.scatter([i/2 for i in idxs], np.log10(means), s = 0.1, label = label)\n\n plt.ylabel(r'$log_{10}L_{total}$', fontsize = 6, labelpad = 1)\n plt.xlabel('Hours trained', fontsize = 6, labelpad = 1)\n plt.tick_params(axis = 'both', labelsize = 6)\n plt.tight_layout()\n plt.legend(fontsize = 6)\n\n\n\n plt.savefig(path_to_here+'/../outputs/ablation.png', dpi = 1200)", "def generateTrajectoryPlots(dir_path, traj_list, plot_name='scecliptic', plot_vg=True, plot_sol=True, \\\n plot_density=True, plot_showers=False):\n\n\n\n ### Plot Sun-centered geocentric ecliptic plots ###\n\n lambda_list = []\n beta_list = []\n vg_list = []\n sol_list = []\n\n shower_no_list = []\n shower_obj_dict = {}\n\n hypo_count = 0\n jd_min = np.inf\n jd_max = 0\n for traj in traj_list:\n\n # Reject all hyperbolic orbits\n if traj.orbit.e > 1:\n hypo_count += 1\n continue\n\n # Compute Sun-centered longitude\n lambda_list.append(traj.orbit.L_g - traj.orbit.la_sun)\n\n beta_list.append(traj.orbit.B_g)\n vg_list.append(traj.orbit.v_g/1000)\n sol_list.append(np.degrees(traj.orbit.la_sun))\n\n # Track first and last observation\n jd_min = min(jd_min, traj.jdt_ref)\n jd_max = max(jd_max, traj.jdt_ref)\n\n\n\n if plot_showers:\n\n # Perform shower association and track the list of all showers\n shower_obj = associateShowerTraj(traj)\n\n # If the trajectory was associated, sort it to the appropriate shower\n if shower_obj is not None:\n if shower_obj.IAU_no not in shower_no_list:\n shower_no_list.append(shower_obj.IAU_no)\n shower_obj_dict[shower_obj.IAU_no] = [shower_obj]\n else:\n shower_obj_dict[shower_obj.IAU_no].append(shower_obj)\n\n\n\n # Compute mean shower radiant for all associated showers\n shower_obj_list = []\n if plot_showers and shower_obj_dict:\n for shower_no in shower_obj_dict:\n\n # Check if there are enough shower members for plotting\n if len(shower_obj_dict[shower_no]) < MIN_SHOWER_MEMBERS:\n continue\n\n la_sun_mean = meanAngle([sh.la_sun for sh in shower_obj_dict[shower_no]])\n L_g_mean = meanAngle([sh.L_g for sh in shower_obj_dict[shower_no]])\n B_g_mean = np.mean([sh.B_g for sh in shower_obj_dict[shower_no]])\n v_g_mean = np.mean([sh.v_g for sh in shower_obj_dict[shower_no]])\n\n # Init a new shower object\n shower_obj_mean = MeteorShower(la_sun_mean, L_g_mean, B_g_mean, v_g_mean, shower_no)\n\n shower_obj_list.append(shower_obj_mean)\n\n\n\n print(\"Hyperbolic percentage: {:.2f}%\".format(100*hypo_count/len(traj_list)))\n\n # Compute the range of solar longitudes\n sol_min = np.degrees(jd2SolLonSteyaert(jd_min))\n sol_max = np.degrees(jd2SolLonSteyaert(jd_max))\n\n\n\n # Plot SCE vs Vg\n if plot_vg:\n plotSCE(lambda_list, beta_list, vg_list, (sol_min, sol_max), \n \"Sun-centered geocentric ecliptic coordinates\", \"$V_g$ (km/s)\", dir_path, plot_name + \"_vg.png\", \\\n shower_obj_list=shower_obj_list, plot_showers=plot_showers)\n\n\n # Plot SCE vs Sol\n if plot_sol:\n plotSCE(lambda_list, beta_list, sol_list, (sol_min, sol_max), \\\n \"Sun-centered geocentric ecliptic coordinates\", \"Solar longitude (deg)\", dir_path, \\\n plot_name + \"_sol.png\", shower_obj_list=shower_obj_list, plot_showers=plot_showers)\n \n\n \n # Plot SCE orbit density\n if plot_density:\n plotSCE(lambda_list, beta_list, None, (sol_min, sol_max), \n \"Sun-centered geocentric ecliptic coordinates\", \"Count\", dir_path, plot_name + \"_density.png\", \\\n density_plot=True, shower_obj_list=shower_obj_list, plot_showers=plot_showers)", "def nusselt_distrbution(layers_df, mesh_df, verbose=True, save=False, show=True):\n console.event(\"Constructing Nusselt distribution plot...\", verbose=verbose)\n\n t = time.time()\n font = {'size': 10}\n mpl.rc('font', **font)\n\n\n objects = mesh_df['object'].tolist()\n coordinates_full = mesh_df['coords'].tolist()\n conductivities = mesh_df['conductivity'].tolist()\n dT_dts = mesh_df['dT_dt'].tolist()\n coordinates = []\n nusselt_nos = []\n coordinates_min_z = layers_df['min_z'].tolist()\n nusselt_nos_list = layers_df['nusselt'].tolist()\n for index, coord in enumerate(coordinates_min_z):\n coordinates.append(coord)\n nusselt_nos.append(nusselt_nos_list[index])\n\n fig1 = plt.figure(figsize=(8.0, 5.0)) # depth vs nusselt number, depth vs heat flux\n\n ax1 = fig1.add_subplot(111)\n ax1.plot(coordinates, nusselt_nos, color='b', linewidth=2, linestyle='-')\n ax1.scatter(coordinates, nusselt_nos, color='b')\n ax1.set_xlabel(\"Depth (m)\")\n ax1.set_ylabel(\"Nusselt Number\")\n ax1.tick_params('y', colors='b')\n\n ax2 = ax1.twinx()\n ax2.plot(coordinates_full, dT_dts, color='r', linewidth=1.4, linestyle='--')\n ax2.set_ylabel(\"Heat Flux (degK/s)\")\n ax2.tick_params('y', colors='r')\n\n fig2 = plt.figure(figsize=(8.0, 5.0)) # depth vs nusselt number, depth vs thermal conductivity\n\n ax3 = fig2.add_subplot(111)\n ax3.plot(coordinates, nusselt_nos, color='b', linewidth=2, linestyle='-')\n ax3.scatter(coordinates, nusselt_nos, color='b')\n ax3.set_xlabel(\"Depth (m)\")\n ax3.set_ylabel(\"Nusselt Number\")\n ax3.tick_params('y', colors='b')\n\n ax4 = ax3.twinx()\n ax4.plot(coordinates_full, conductivities, color='m', linewidth=1.4, linestyle='--')\n ax4.set_ylabel(\"Thermal Conductivity\")\n ax4.tick_params('y', colors='m')\n\n object_dict = {}\n for index, object in enumerate(objects):\n if object.lower() != 'boundary':\n if object not in object_dict.keys():\n object_dict.update({object: [coordinates_full[index]]})\n else:\n object_dict[object].append(coordinates_full[index])\n for object in object_dict.keys():\n min_coord = min(object_dict[object])\n max_coord = max(object_dict[object])\n color = np.random.rand(3, )\n ax1.axvspan(xmin=min_coord, xmax=max_coord, color=color, alpha=0.2, label=str(object))\n ax3.axvspan(xmin=min_coord, xmax=max_coord, color=color, alpha=0.2, label=str(object))\n\n ax1.set_title(\"Nusselt No. Distribution Over Depth\")\n ax1.grid()\n ax1.legend(loc='lower left')\n ax3.set_title(\"Nusselt No. Distribution Over Depth\")\n ax3.grid()\n ax3.legend(loc='lower left')\n\n console.event(\"Finished constructing Nusselt distribution plot! (task took {}s)\".format(\n time.time() - t), verbose=verbose)\n\n if show is True:\n plt.show()\n if save is True:\n fig1.tight_layout()\n fig2.tight_layout()\n fig1_name = \"nusselt_distrib_fig1.png\"\n fig2_name = \"nusselt_distrib_fig2.png\"\n if fig1_name in os.listdir(os.getcwd()):\n os.remove(fig1_name)\n if fig2_name in os.listdir(os.getcwd()):\n os.remove(fig2_name)\n fig1.savefig(fig1_name, format='png')\n fig2.savefig(fig2_name, format='png')", "def visualize(houses:pd.DataFrame) -> None:\n #price_distribution(houses)\n #prop_types(houses)\n #zip_code(houses)\n #year_built(houses)\n #bed_bath(houses)\n return", "def plot_diffuser_balls_nv():\n import holoviews as hv\n cryostat_r = straxen.cryostat_outer_radius\n depth_stiffening_ring = 8\n r = cryostat_r + depth_stiffening_ring\n angles = np.array([-8, 90, -90, 180])\n x = r * np.cos(angles / 180 * np.pi)\n y = r * np.sin(angles / 180 * np.pi)\n db_ids = (18, 17, 11, 15)\n data = pd.DataFrame(np.array([x, y, db_ids]).T, columns=('x', 'y', 'id'))\n return hv.Points(data=data).opts(size=8, tools=['hover'])", "def plateSeparatedPerformance():\n model_perfs = pickle.load(open(\"pickles/separatePlateTestModelPerformances.pkl\", \"rb\"))\n model_stds = pickle.load(open(\"pickles/separatePlateTestModelStds.pkl\", \"rb\"))\n null_YFP_performances = pickle.load(open(\"pickles/separatePlateTestYFPPerformances.pkl\", \"rb\"))\n null_YFP_stds = pickle.load(open(\"pickles/separatePlateTestYFPStds.pkl\", \"rb\"))\n null_DAPI_performances = pickle.load(open(\"pickles/separatePlateTestDAPIPerformances.pkl\", \"rb\"))\n null_DAPI_stds = pickle.load(open(\"pickles/separatePlateTestDAPIStds.pkl\", \"rb\"))\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"DRW1\", \"DRW2\", \"DRW3\", \"DRW4\", \"DRW5\", \"DRW6\"]\n x = np.array([1, 2, 3, 4, 5, 6])\n width = .26\n rects = ax.bar(x, model_perfs, width, yerr=model_stds, capsize=3, error_kw=dict(lw=.2, capsize=1, capthick=1), color=\"red\", label=\"ML Model\", zorder=3)\n rects2 = ax.bar(x + width, null_YFP_performances, width, yerr=null_YFP_stds, capsize=3, error_kw=dict(lw=.2, capsize=1, capthick=1), color=\"gold\",label=\"Null YFP Model\", zorder=3)\n rects3 = ax.bar(x+ 2*width, null_DAPI_performances, width, yerr=null_DAPI_stds, capsize=3, error_kw=dict(lw=.2, capsize=1, capthick=1), color=\"blue\", label=\"Null DAPI Model\", zorder=3)\n autolabel(rects, ax, fontsize=8)\n autolabel(rects2, ax, fontsize=8)\n autolabel(rects3, ax, fontsize=8)\n plt.title(\"Pearson Performance by Drug Perturbation\",fontname=\"Times New Roman\", fontsize=14, y=1.0)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=12)\n ax.set_xlabel(\"Drug\", fontname=\"Times New Roman\", fontsize=12)\n ax.set_xticklabels(xlabels,fontsize=12, fontname=\"Times New Roman\")\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(7))\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width, box.height * 0.85])\n ax.legend(loc='upper right', prop={\"family\":\"Times New Roman\", \"size\":10}, bbox_to_anchor=(1, 1.32))\n plt.gcf().subplots_adjust(top=.76)\n plt.savefig(\"matplotlib_figures/separatedPlatesPerformance.png\", dpi=300)", "def plot_evaluation(parameters_dict, log_df, settings, evaluation_set_kde, plotname):\n\n\n plots = []\n\n\n ### setup the colors for each component\n if int(settings['nr_components']) < 3:\n colors = ['rgb(228,26,28)', 'rgb(55,126,184)']\n elif int(settings['nr_components']) < 13:\n colors = np.array(cl.scales[str(settings['nr_components'])]['qual']['Paired'])\n else:\n colors = cl.interp(cl.scales['10']['qual']['Paired'], 20)\n\n\n ### set up ab list\n ab_list = evaluation_set_kde['contact'].keys()\n\n\n\n\n ####################### plotting of settings\n print_to_table = {}\n for key in sorted(settings.keys()):\n if key not in ['fold_id_dir','plot_name', 'fixed_parameters', 'threads_proteins', 'qijab_dir',\n 'debug_mode', 'parameter_file', 'settings_file', 'optimization_log_file', 'braw_dir', 'pdb_dir', 'paramdir',\n 'mask_sse', 'lambda_w_fix', 'lfactor', 'plotdir', 'psicov_dir', 'contact', 'hessian_pseudocount']:\n print_to_table[key] = settings[key]\n\n print(\"Generate settings table...\")\n table_settings_1 = plot_settings_table(print_to_table, 1)\n table_settings_2 = plot_settings_table(print_to_table, 2)\n table_settings_3 = plot_settings_table(print_to_table, 3)\n plots.append(table_settings_1)\n plots.append(table_settings_2)\n plots.append(table_settings_3)\n\n\n ####################### negLL and realted plots\n if 'step' in log_df.columns and 'pass' in log_df.columns:\n\n if 'negLL' in log_df.columns:\n plot_negll = plot_convergence_trace_plotly(log_df,\n name=['negLL', 'negLL_crossval'],\n plot_title='neg LL trace for training and cross-val set')\n plots.append(plot_negll)\n\n plot_expfit_negll = plot_exponentialFit_negLL(log_df, plot_title='exponential Fit neg LL')\n plots.append(plot_expfit_negll)\n\n if 'timestamp' in log_df.columns:\n plot_timestamps = plot_convergence_trace_plotly(log_df,\n name=['timestamp'],\n plot_title='time (s) per iteration')\n plots.append(plot_timestamps)\n\n\n if 'gradient_norm_weights' in log_df.columns:\n plot_grad_norm_weights = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_weights'],\n plot_title='norm of weight gradients')\n plots.append(plot_grad_norm_weights)\n\n if 'gradient_norm_means' in log_df.columns:\n plot_grad_norm_means = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_means'],\n plot_title='norm of mean gradients')\n plots.append(plot_grad_norm_means)\n\n if 'gradient_norm_prec' in log_df.columns:\n plot_grad_norm_prec = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_prec'],\n plot_title='norm of precMat gradients')\n plots.append(plot_grad_norm_prec)\n\n\n ####################### plotting of parameters\n print(\"Generate distribution of parameters...\")\n\n #weights\n weights_dict = {}\n for component in range(settings['nr_components']):\n weights_dict['component ' + str(component)] = {\n 'weights (contact)': parameters_dict[\"weight_contact_\" + str(component)][0],\n 'weights (bg)': parameters_dict[\"weight_bg_\" + str(component)][0]\n }\n plot_weights = plot_barplot(\n weights_dict,\n 'Distribution of weights',\n 'component weights',\n type='group',\n colors=colors\n #,plot_out=\"/home/vorberg/weights.html\"\n )\n\n #mu\n mu_df = pd.DataFrame.from_dict(dict((k, parameters_dict[k]) for k in sorted(parameters_dict.keys()) if 'mu' in k))\n plot_means = plot_boxplot(\n mu_df,\n 'Distribution of Means',\n \"values of mean parameters\",\n colors=colors\n #,plot_out=\"/home/vorberg/mus.html\"\n )\n\n #std deviation\n prec_df = pd.DataFrame.from_dict(dict((k, parameters_dict[k]) for k in sorted(parameters_dict.keys()) if 'prec' in k))\n try:\n std_dev = prec_df.apply(lambda p: np.sqrt(1.0/p))\n if settings['prec_wrt_L']:\n std_dev = prec_df.apply(lambda p: np.sqrt(1.0/(p*142))) #in case precision is specified depending on L=142\n except ZeroDivisionError as e:\n print(e)\n std_dev=prec_df\n\n std_dev.columns = [column_name.replace(\"prec\", \"std\") for column_name in std_dev.columns]\n plot_stddev = plot_boxplot(\n std_dev,\n 'Distribution of std deviations',\n \"values of std deviation parameters\",\n colors=colors\n #,plot_out=\"/home/vorberg/std.html\"\n )\n\n\n plots.append(plot_weights)\n plots.append(plot_means)\n plots.append(plot_stddev)\n\n ####################### Scatterplot mu vs std dev\n print(\"Generate scatter plot mu vs std...\")\n scatter_dict = {}\n for component in range(settings['nr_components']):\n scatter_dict['mu_'+str(component)] = [\n mu_df['mu_'+str(component)].tolist(),\n std_dev['std_'+str(component)].tolist(),\n AB.values()\n ]\n plot_mu_vs_stddev = plot_scatter(scatter_dict,\n 'Mean vs std deviation',\n 'mean',\n \"std deviation\",\n False,\n colors\n #,plot_out=\"/home/vorberg/mu_vs_std.html\"\n )\n\n plots.append(plot_mu_vs_stddev)\n\n\n ############################################## plotting of gradient norms\n print(\"Generate gradient norms plot...\")\n\n #gradients for mu\n mu_grad_dict = {}\n annotations_dict = {}\n for component in range(settings['nr_components']):\n key = 'mu_'+str(component)\n mu_grad_dict[key] = log_df[key].tolist()[-1]\n annotations_dict[key] = AB\n\n\n plot_gradient_mu_stats = jitter_plot(mu_grad_dict,\n 'Distribution of gradients for mean in last iteration',\n annotations_dict,\n colors,\n None)\n plots.append(plot_gradient_mu_stats)\n\n\n #gradients for precMat\n precMat_grad_dict = {}\n annotations_dict = {}\n for component in range(settings['nr_components']):\n key = 'prec_'+str(component)\n precMat_grad_dict['diagPrecMat_'+str(component)] = log_df[key].tolist()[-1]\n annotations_dict['diagPrecMat_'+str(component)] = AB\n\n\n plot_gradient_precMat_stats = jitter_plot(\n precMat_grad_dict,\n 'Distribution of gradients for precMat in last iteration',\n annotations_dict,\n colors,\n None\n )\n plots.append(plot_gradient_precMat_stats)\n\n ##################################### plotting of gradient trace of a specific ab pair for all components\n print(\"Generate gradient trace plot...\")\n\n gradient_df = log_df.filter(regex=(\"mu_[0-9]*\"))\n plot_gradient_mu_ab_trace = plot_gradient_ab_trace(gradient_df,\n ab_list,\n colors\n )\n plots.append(plot_gradient_mu_ab_trace)\n\n gradient_df = log_df.filter(regex=(\"prec_[0-9]*\"))\n plot_gradient_prec_ab_trace = plot_gradient_ab_trace(\n gradient_df,\n ab_list,\n colors\n )\n plots.append(plot_gradient_prec_ab_trace)\n\n\n ##################################### plotting of univariate mixtures\n if len(evaluation_set_kde['contact']) == 0 or len(evaluation_set_kde['bg']) == 0:\n print \"Evaluation set is empty. Cannot plot Mixture Visualization.\"\n else:\n print(\"Generate parameter visualization 1d plots...\")\n plots.append(plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, settings['prec_wrt_L']))\n # plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, settings['prec_wrt_L'], plot_out=\"/home/vorberg/1d_vis.html\")\n\n # ------------------------------------------------------------------------------\n ### define merged plot\n # ------------------------------------------------------------------------------\n cols = 3.0\n rows = int(np.ceil((len(plots)-1) / cols)) + 2\n subplot_titles = []\n\n # set up titles\n for plot in range(len(plots)-1):\n subplot_titles.append(plots[plot]['layout']['title'])\n if len(subplot_titles) < (cols * (rows-2)):\n for i in range(int((cols * (rows-2))) - len(subplot_titles) ):\n subplot_titles.append(\" \")\n subplot_titles.append(plots[-1]['layout']['title'])\n\n\n # plot all plots as subplots\n fig = tools.make_subplots(rows=rows,\n cols=3,\n specs = [ [{} for col in range(int(cols))] for row in range(rows-2)] + \\\n [[{'rowspan':2, 'colspan': 3}, None, None], [None, None, None]],\n subplot_titles=tuple(subplot_titles),\n print_grid=False)\n\n\n\n\n for i, plot in enumerate(plots[:-1]):\n col = i % int(cols)\n row = (i - col) / int(cols)\n\n #add traces to subplot\n for trace in plot['data']:\n trace['showlegend']=False\n fig.append_trace(trace, row + 1, col + 1)\n\n # adjust x and y axis for table plotting\n if 'annotations' in plot['layout'].keys():\n for cell in plot['layout']['annotations']:\n cell['yref'] = 'y' + str(i + 1)\n cell['xref'] = 'x' + str(i + 1)\n fig['layout']['annotations'] += plot['layout']['annotations']\n\n # adjust axis for all plots\n fig['layout']['xaxis' + str(i + 1)].update(plot['layout']['xaxis1'])\n fig['layout']['yaxis' + str(i + 1)].update(plot['layout']['yaxis1'])\n\n ## add mixture visualisation plot - spans 3 columns\n for trace in plots[-1]['data']:\n fig.append_trace(trace, int(rows)-1, 1)\n fig['layout']['xaxis' + str(int(cols * (rows-2) + 1))].update(plots[-1]['layout']['xaxis1'])\n fig['layout']['yaxis' + str(int(cols * (rows-2) + 1))].update(plots[-1]['layout']['yaxis1'])\n\n #check which plots are visible/invisible according to menu selection\n trace_visibility_ab = {}\n for ab in range(len(ab_list)):\n trace_visibility_ab[ab] = []\n for i, plot in enumerate(plots):\n if 'updatemenus' not in plot['layout'].keys():\n trace_visibility_ab[ab].extend([True] * len(plot['data']))\n else:\n trace_visibility_ab[ab].extend(plot['layout']['updatemenus'][0]['buttons'][ab]['args'][1])\n\n\n #use menu of last plot (=vis of mixture) as template for multiplot menu\n fig['layout']['updatemenus'] = plots[-1]['layout']['updatemenus']\n for ab in range(len(ab_list)):\n fig['layout']['updatemenus'][0]['buttons'][ab]['args'][1] = trace_visibility_ab[ab]\n\n\n fig['layout']['legend']['yanchor'] = 'bottom'\n fig['layout']['legend']['y'] = 0\n fig['layout']['height'] = rows * 250\n fig['layout']['font'] = {'size': 18} # set global font size\n\n plotly_plot(fig, filename=plotname, auto_open=False)", "def plot_for_scaling_check(bolo_name):\n\n\n pop_path = \"../Analyse_\" + bolo_name + \"/Populations/Pop_for_scaling/\"\n\n #Load the estimator\n d_est = BDT_fh.open_estimator_file(bolo_name)\n\n #Best estimator for heat: coefficients\n coeff_EC1, coeff_EC2 = float(d_est[\"HEAT\"][:5]), 1 - float(d_est[\"HEAT\"][:5])\n coeff_EIB, coeff_EID = float(d_est[\"FID\"][:5]), 1-float(d_est[\"FID\"][:5])\n\n #Open event files\n data_types = {\"names\": (\"EC1\", \"EC2\", \"EIA\", \"EIB\", \"EIC\", \"EID\"), \"formats\": (\"f\", \"f\", \"f\", \"f\", \"f\", \"f\")}\n\n arr_heatonly = np.loadtxt(pop_path + bolo_name + \"_heatonly_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_all = np.loadtxt(pop_path + bolo_name + \"_all_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_FidGamma = np.loadtxt(pop_path + bolo_name + \"_FidGamma_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S1Gamma = np.loadtxt(pop_path + bolo_name + \"_S1Gamma_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S2Gamma = np.loadtxt(pop_path + bolo_name + \"_S2Gamma_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S1Beta = np.loadtxt(pop_path + bolo_name + \"_S1Beta_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S2Beta = np.loadtxt(pop_path + bolo_name + \"_S2Beta_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S1Pb = np.loadtxt(pop_path + bolo_name + \"_S1Pb_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S2Pb = np.loadtxt(pop_path + bolo_name + \"_S2Pb_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n\n arr_EI_heatonly, arr_EI_all = coeff_EIB*arr_heatonly[\"EIB\"] + coeff_EID*arr_heatonly[\"EID\"], coeff_EIB*arr_all[\"EIB\"] + coeff_EID*arr_all[\"EID\"]\n arr_EC_heatonly, arr_EC_all = coeff_EC1*arr_heatonly[\"EC1\"] + coeff_EC2*arr_heatonly[\"EC2\"], coeff_EC1*arr_all[\"EC1\"] + coeff_EC2*arr_all[\"EC2\"]\n arr_EI_FidGamma, arr_EC_FidGamma = coeff_EIB*arr_FidGamma[\"EIB\"] + coeff_EID*arr_FidGamma[\"EID\"], coeff_EC1*arr_FidGamma[\"EC1\"] + coeff_EC2*arr_FidGamma[\"EC2\"]\n arr_EI_S1Gamma, arr_EI_S2Gamma = coeff_EIB*arr_S1Gamma[\"EIB\"] + coeff_EID*arr_S1Gamma[\"EID\"], coeff_EIB*arr_S2Gamma[\"EIB\"] + coeff_EID*arr_S2Gamma[\"EID\"]\n arr_EC_S1Gamma, arr_EC_S2Gamma = coeff_EC1*arr_S1Gamma[\"EC1\"] + coeff_EC2*arr_S1Gamma[\"EC2\"], coeff_EC1*arr_S2Gamma[\"EC1\"] + coeff_EC2*arr_S2Gamma[\"EC2\"]\n arr_EI_S1Beta, arr_EI_S2Beta = coeff_EIB*arr_S1Beta[\"EIB\"] + coeff_EID*arr_S1Beta[\"EID\"], coeff_EIB*arr_S2Beta[\"EIB\"] + coeff_EID*arr_S2Beta[\"EID\"]\n arr_EC_S1Beta, arr_EC_S2Beta = coeff_EC1*arr_S1Beta[\"EC1\"] + coeff_EC2*arr_S1Beta[\"EC2\"], coeff_EC1*arr_S2Beta[\"EC1\"] + coeff_EC2*arr_S2Beta[\"EC2\"]\n arr_EI_S1Pb, arr_EI_S2Pb = coeff_EIB*arr_S1Pb[\"EIB\"] + coeff_EID*arr_S1Pb[\"EID\"], coeff_EIB*arr_S2Pb[\"EIB\"] + coeff_EID*arr_S2Pb[\"EID\"]\n arr_EC_S1Pb, arr_EC_S2Pb = coeff_EC1*arr_S1Pb[\"EC1\"] + coeff_EC2*arr_S1Pb[\"EC2\"], coeff_EC1*arr_S2Pb[\"EC1\"] + coeff_EC2*arr_S2Pb[\"EC2\"]\n\n lS1Beta, lS2Beta, lS1Pb, lS2Pb = np.where(arr_EC_S1Beta<15), np.where(arr_EC_S2Beta<15), np.where(arr_EC_S1Pb<15), np.where(arr_EC_S2Pb<15)\n lS1Gamma, lS2Gamma, lFidGamma = np.where(arr_EC_S1Gamma<15), np.where(arr_EC_S2Gamma<15), np.where(arr_EC_FidGamma<15)\n lheatonly, lall = np.where(arr_EC_heatonly<15), np.where(arr_EC_all<15)\n\n arr_EI_heatonly, arr_EC_heatonly = arr_EI_heatonly[lheatonly], arr_EC_heatonly[lheatonly]\n arr_EI_all, arr_EC_all = arr_EI_all[lall], arr_EC_all[lall]\n arr_EI_FidGamma, arr_EC_FidGamma = arr_EI_FidGamma[lFidGamma], arr_EC_FidGamma[lFidGamma]\n arr_EI_S1Gamma, arr_EC_S1Gamma = arr_EI_S1Gamma[lS1Gamma], arr_EC_S1Gamma[lS1Gamma]\n arr_EI_S2Gamma, arr_EC_S2Gamma = arr_EI_S2Gamma[lS2Gamma], arr_EC_S2Gamma[lS2Gamma]\n arr_EI_S1Beta, arr_EC_S1Beta = arr_EI_S1Beta[lS1Beta], arr_EC_S1Beta[lS1Beta]\n arr_EI_S2Beta, arr_EC_S2Beta = arr_EI_S2Beta[lS2Beta], arr_EC_S2Beta[lS2Beta]\n arr_EI_S1Pb, arr_EC_S1Pb = arr_EI_S1Pb[lS1Pb], arr_EC_S1Pb[lS1Pb]\n arr_EI_S2Pb, arr_EC_S2Pb = arr_EI_S2Pb[lS2Pb], arr_EC_S2Pb[lS2Pb]\n\n arr_EI_all, arr_EC_all = np.array(arr_EI_all).astype(float), np.array(arr_EC_all).astype(float)\n arr_EI_heatonly, arr_EC_heatonly = np.array(arr_EI_heatonly).astype(float), np.array(arr_EC_heatonly).astype(float)\n arr_EI_FidGamma, arr_EC_FidGamma = np.array(arr_EI_FidGamma).astype(float), np.array(arr_EC_FidGamma).astype(float)\n arr_EI_S1Gamma, arr_EC_S1Gamma = np.array(arr_EI_S1Gamma).astype(float), np.array(arr_EC_S1Gamma).astype(float)\n arr_EI_S2Gamma, arr_EC_S2Gamma = np.array(arr_EI_S2Gamma).astype(float), np.array(arr_EC_S2Gamma).astype(float) \n arr_EI_S1Beta, arr_EC_S1Beta = np.array(arr_EI_S1Beta).astype(float), np.array(arr_EC_S1Beta).astype(float)\n arr_EI_S2Beta, arr_EC_S2Beta = np.array(arr_EI_S2Beta).astype(float), np.array(arr_EC_S2Beta).astype(float)\n arr_EI_S1Pb, arr_EC_S1Pb = np.array(arr_EI_S1Pb).astype(float), np.array(arr_EC_S1Pb).astype(float)\n arr_EI_S2Pb, arr_EC_S2Pb = np.array(arr_EI_S2Pb).astype(float), np.array(arr_EC_S2Pb).astype(float)\n\n\n gr_heatonly = TGraph(len(arr_EI_heatonly), arr_EC_heatonly, arr_EI_heatonly)\n gr_FidGamma, gr_all = TGraph(len(arr_EI_FidGamma), arr_EC_FidGamma, arr_EI_FidGamma), TGraph(len(arr_EI_all), arr_EC_all, arr_EI_all)\n gr_S1Gamma, gr_S2Gamma = TGraph(len(arr_EI_S1Gamma), arr_EC_S1Gamma, arr_EI_S1Gamma), TGraph(len(arr_EI_S2Gamma), arr_EC_S2Gamma, arr_EI_S2Gamma)\n gr_S1Beta, gr_S2Beta = TGraph(len(arr_EI_S1Beta), arr_EC_S1Beta, arr_EI_S1Beta), TGraph(len(arr_EI_S2Beta), arr_EC_S2Beta, arr_EI_S2Beta)\n gr_S1Pb, gr_S2Pb = TGraph(len(arr_EI_S1Pb), arr_EC_S1Pb, arr_EI_S1Pb), TGraph(len(arr_EI_S2Pb), arr_EC_S2Pb, arr_EI_S2Pb)\n\n PyRPl.process_TGraph(gr_all, X_title = \"Heat\", Y_title = \"Ion\", color=kRed, marker_style = 20, marker_size = 0.1)\n PyRPl.process_TGraph(gr_FidGamma, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack), PyRPl.process_TGraph(gr_heatonly, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n PyRPl.process_TGraph(gr_S1Gamma, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack), PyRPl.process_TGraph(gr_S2Gamma, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n PyRPl.process_TGraph(gr_S1Beta, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack), PyRPl.process_TGraph(gr_S2Beta, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n PyRPl.process_TGraph(gr_S1Pb, X_title = \"Heat\", Y_title = \"Ion\", color=kRed), PyRPl.process_TGraph(gr_S2Pb, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n\n list_gr = [gr_all, gr_FidGamma, gr_S1Gamma, gr_S2Gamma, gr_S1Beta, gr_S2Beta, gr_S1Pb, gr_S2Pb, gr_heatonly]\n list_pts = [gr.GetN() for gr in list_gr[1:]]\n print gr_all.GetN(), sum(list_pts)\n h = TH2F(\"h\", \"h\", 100, -5, 15, 100, -5, 15)\n PyRPl.process_TH2(h, X_title = \"Heat\", Y_title = \"Ion\")\n h.Draw()\n for gr in list_gr:\n gr.Draw(\"*same\")\n\n raw_input()\n\n # arr_Q_S1Beta = arr_EI_S1Beta/((1+8./3)*arr_EC_S1Beta - arr_EI_S1Beta*5.5/3)\n # arr_Q_S2Beta = arr_EI_S1Beta/((1+8./3)*arr_EC_S1Beta - arr_EI_S1Beta*5.5/3)\n # arr_Q_S1Pb = arr_EI_S1Pb/((1+8./3)*arr_EC_S1Pb - arr_EI_S1Pb*5.5/3)\n # arr_Q_S2Pb = arr_EI_S2Pb/((1+8./3)*arr_EC_S2Pb - arr_EI_S2Pb*5.5/3)\n \n # gr_QS1Beta, gr_QS2Beta = TGraph(len(arr_Q_S1Beta), arr_EC_S1Beta, arr_Q_S1Beta), TGraph(len(arr_Q_S2Beta), arr_EC_S2Beta, arr_Q_S2Beta)\n # gr_QS1Pb, gr_QS2Pb = TGraph(len(arr_Q_S1Pb), arr_EC_S1Pb, arr_Q_S1Pb), TGraph(len(arr_Q_S2Pb), arr_EC_S2Pb, arr_Q_S2Pb)\n\n\n # PyRPl.process_TGraph(gr_QS1Beta, X_title = \"Heat\", Y_title = \"Q\", color=kOrange-3), PyRPl.process_TGraph(gr_QS2Beta, X_title = \"Heat\", Y_title = \"Q\", color=kBlue)\n # PyRPl.process_TGraph(gr_QS1Pb, X_title = \"Heat\", Y_title = \"Q\", color=kRed), PyRPl.process_TGraph(gr_QS2Pb, X_title = \"Heat\", Y_title = \"Q\", color=kGreen+2)", "def plot_ratios(path='/Volumes/OptiHDD/data/pylith/3d/agu2014/output',\n\t\t\t\tsteps=['step01','step02'],\n\t\t\t\t#labels='',\n\t\t\t\tshow=True,\n\t\t\t\txscale=1e3,\n\t\t\t\tyscale=1e-2):\n\tplt.figure()\n\t#path = '/Users/scott/Desktop/elastic'\n\n\t# Deep source\n\t#labels = ['no APMB', 'APMB']\n\t#if labels == '':\n\tlabels = steps\n\tdeep = {}\n\t#uzmax = 0.824873455364\n\t# NOT sure why hardcoded...\n\tuzmax = 1\n\tfor i,outdir in enumerate(steps):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\t\tprint(pointsFile)\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 30.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'o-',ms=4,lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'o--',ms=4,lw=4,color=l.get_color()) #mfc='none' transparent\n\t\tdeep[outdir] = uz_fem/uz_fem\n\n\t'''\n\t# Shallow Source\n\tshallow = {}\n\tuzmax = 0.949652827795\n\tfor i,outdir in enumerate(['step11','step12']):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t#normalize\n\tuz_fem = uz_fem / uzmax\n\tur_fem = ur_fem / uzmax\n\tx_fem = x_fem / 20.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'.-', mfc='w', lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'.--',lw=4, mfc='w',color=l.get_color()) #mfc='none' transparent\n\n\t\tshallow[outdir] = uz_fem/ur_fem\n\t'''\n\n\t# Annotate\n\tplt.axhline(color='k',lw=0.5)\n\t#plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\t#plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.legend()\n\tplt.grid()\n\t#plt.ylim(-0.5, 3.5)\n\t#plt.savefig('deep.png',bbox_inches='tight')\n\t#plt.savefig('shallow.png',bbox_inches='tight')\n\n\t# normalized\n\tplt.ylim(-0.5, 4)\n\tplt.xlim(0,10)\n\tplt.xlabel('Normalized Radial Distance [R / D]')\n\tplt.ylabel('Normalized Displacement [U / Uz_max]')\n\t#plt.savefig('normalized_deep.png',bbox_inches='tight')\n\tplt.savefig('normalized_shallow.png',bbox_inches='tight')\n\n\n\t# Plot ratios of uz versus NOTE: this plot is confusing,,, just keep ratio of uz_max to ur_max\n\t'''\n\tplt.figure()\n\tplt.plot(x_fem, deep['step01'], label='Deep no APMB')\n\tplt.plot(x_fem, deep['step02'], label='Deep w/ APMB')\n\tplt.plot(x_fem, shallow['step11'], label='Shallow no APMB')\n\tplt.plot(x_fem, shallow['step12'], label='Shallow w/ APMB')\n\tplt.xlabel('Distance [km]') #NOTE: maybe plot normailzed X-axis (R-d)\n\t#plt.xlabel('Normalized Distance [R/d]')\n\tplt.ylabel('Ratio [Uz/Ur]')\n\tplt.title('Ratio of vertical to radial displacement')\n\tplt.legend()\n\tplt.show()\n\t'''", "def plot(self):\r\n \r\n\r\n print(\"Printing decision surfaces of decision trees\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n for _ in range (self.n_estimators):\r\n plt.subplot(2, 3, _ + 1)\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = self.clfs[_].predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface of a decision tree using paired features\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig1 = plt\r\n\r\n # Figure 2\r\n print(\"Printing decision surface by combining the individual estimators\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = config.Classifier_AB.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface by combining individual estimators\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig2 = plt\r\n\r\n return [fig1,fig2]", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def plot(self):\n \n \n x_ibs=[] \n x_gss=[]\n y_ibs=[] \n y_gss=[]\n x_pso=[]\n x_bgd=[]\n y_bgd=[]\n y_pso=[]\n x_gd=[]\n y_gd=[]\n \n i=0.0000001\n \n # for k in range(1,51):\n # i= random.uniform(0.00000001, 1)\n # t_avg_ibs=[]\n # t_avg_gss=[]\n # for j in range(1,51):\n #L=random.randint(-100, 0)\n #U=random.randint(0, 100)\n max_iter=self.Max_iter \n L=self.Lower_bound\n U=self.Upper_bound\n \n minima=self.gss(L,U,i,1000)\n #print(\"minima at X = \",minima[1])\n x_ibs.append(self.I_bisection(L,U,minima[1],max_iter)[0])\n x_gss.append(self.gss(L,U,i,max_iter)[0])\n x_pso.append(self.particle_Swarm(self.func, L, U, 2, max_iter)[0])\n x_gd.append(self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)[0])\n x_bgd.append(self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)[0])\n #print(x_pso)\n for i in x_ibs[0]:\n #print(self.Func(i)) \n y_ibs.append(self.Func(i))\n for i in x_gss[0]:\n y_gss.append(self.Func(i)) \n for i in x_pso[0]:\n y_pso.append(self.Func(i)) \n for i in x_gd[0]:\n y_gd.append(self.Func(i)) \n for i in x_bgd[0]:\n y_bgd.append(self.Func(i)) \n #print(y_gss)\n\n plt.plot(x_ibs[0], y_ibs, 'r.')\n plt.plot(x_gss[0], y_gss, '.')\n plt.plot(x_pso[0], y_pso, 'y.')\n #plt.plot(x_gd[0], y_gd, 'y.')\n #plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y')\n \n plt.suptitle('Interval Bisection Search (Red) vs Golden Section Search (Blue) vs Particle swarm optimization (Green)')\n #plt.axis([0, 100, 0.00000001, 1]) \n plt.show()\n plt.plot(x_gd[0], y_gd, 'r.')\n plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y') \n plt.suptitle('Gradient Descent (Red) vs Batch Gradient Descent (Black) ')\n \n plt.show()\n \n start_time = timeit.default_timer()\n ibs=self.I_bisection(L,U,minima[1],max_iter)\n print(\" Execution time for Interval bisection Method is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gss=self.gss(L,U,i,max_iter)\n print(\" Execution time for Golden Section Search is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n pso=self.particle_Swarm(self.func, L, U, 2, max_iter)\n print(\" Execution time for Particle swarm optimization is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gd=self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)\n print(\" Execution time for Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n bgd=self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)\n print(\" Execution time for Batch Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n plt.plot(ibs[1], ibs[2], 'r.')\n plt.text(ibs[1], ibs[2],\"IB\")\n plt.plot(gss[1], gss[2], '.')\n plt.text(gss[1], gss[2],\" GSS\")\n plt.plot(pso[1], pso[2], 'y.')\n plt.text(pso[1], pso[2],\" PSO\")\n plt.plot(gd[1], gd[2], 'g.')\n plt.text(gd[1], gd[2],\" GD \")\n plt.plot(bgd[1],bgd[2], 'k.')\n plt.text(bgd[1], bgd[2],\" Batch_GD\")\n \n plt.xlabel('Value of X')\n plt.ylabel('NUmber of iteration') \n plt.suptitle('Number of iterations vs minimum value of x')\n \n plt.show()", "def plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, prec_wrt_L=False, plot_out=None):\n\n nr_components = settings['nr_components']\n plot = {'data': [], 'layout': {}}\n\n #set up drop down menu\n plot['layout']['updatemenus']=[{'xanchor':'left',\n 'yanchor':'top',\n 'x':1.02,\n 'y':0.6,\n 'buttons':[],\n 'active': 0,\n }]\n\n # component weights\n weights_bg = []\n weights_contact = []\n for component in range(nr_components):\n weights_bg.append(parameters_dict['weight_bg_'+str(component)][0])\n weights_contact.append(parameters_dict['weight_contact_'+str(component)][0])\n\n\n min_coupling_xaxis = -1\n max_coupling_xaxis = +1\n\n\n ab_list = evaluation_set_kde['contact'].keys()\n for ab in ab_list:\n\n plot['data'].append(\n go.Scatter(\n x=evaluation_set_kde['x_grid'],\n y=evaluation_set_kde['bg'][ab],\n mode='none',\n fill='tozeroy',\n fillcolor='rgb(50,50,205)',\n opacity=0.2,\n name='training data bg',\n showlegend=True,\n hoverinfo=None,\n visible=False\n )\n )\n\n plot['data'].append(\n go.Scatter(\n x=evaluation_set_kde['x_grid'],\n y=evaluation_set_kde['contact'][ab],\n fill='tonexty',\n fillcolor='rgb(50,205,50)',\n opacity=0.2,\n mode='none',\n name='training data contact',\n showlegend=True,\n hoverinfo=None,\n visible=False\n )\n )\n\n\n means = []\n sd = []\n for component in range(nr_components):\n means.append(parameters_dict['mu_'+str(component)][ab])\n try:\n if prec_wrt_L:\n sd.append(np.sqrt(1.0/(parameters_dict['prec_'+str(component)][ab] * 142) )) #in case precision is spec depending on L=142\n else:\n sd.append(np.sqrt(1.0/parameters_dict['prec_'+str(component)][ab]))\n except ZeroDivisionError as e:\n print(e)\n sd.append(0) #in case prec is zero bc optimizer tries strange values\n\n ### add components\n for component in range(nr_components):\n gaussian_component_density = get_coordinates_for_1d_gaussian(\n min_coupling_xaxis,\n max_coupling_xaxis,\n means[component],\n sd[component]\n )\n\n plot['data'].append(go.Scatter(x=gaussian_component_density[0],\n y=gaussian_component_density[1],\n mode='lines',\n name='component ' + str(component) + ' for ' + AB[ab],\n line=dict(dash='dot',\n color=colors[component]),\n showlegend=False,\n visible=False\n )\n )\n\n ### add mixture if there are more than one component\n if (nr_components > 1):\n gaussian_mixture_x_contact, gaussian_mixture_y_contact = get_coordinates_for_1d_gaussian_mixture(\n min_coupling_xaxis, max_coupling_xaxis,\n weights_contact,\n means,\n sd\n )\n\n plot['data'].append(go.Scatter(x=gaussian_mixture_x_contact,\n y=gaussian_mixture_y_contact,\n mode='lines',\n name='mixture (contact) for ' + AB[ab],\n line=dict(color='rgb(50,205,50)',\n width = 3),\n showlegend=False,\n visible=False\n )\n )\n\n if (nr_components > 1):\n gaussian_mixture_x_bg, gaussian_mixture_y_bg = get_coordinates_for_1d_gaussian_mixture(-1, 1,\n weights_bg,\n means,\n sd)\n plot['data'].append(go.Scatter(x=gaussian_mixture_x_bg,\n y=gaussian_mixture_y_bg,\n mode='lines',\n name='mixture (bg) for ' + AB[ab],\n line=dict(color='rgb(50,50,205 )',\n width = 3),\n showlegend=False,\n visible=False\n )\n )\n\n #set up drop down option\n nr_plots_per_ab = 2 + nr_components\n if (nr_components > 1):\n nr_plots_per_ab += 2\n\n plot['layout']['updatemenus'][0]['buttons'].append(\n {\n 'args':['visible', [False] * (nr_plots_per_ab) * ab_list.index(ab) +\n [True] * (nr_plots_per_ab) +\n [False] * (nr_plots_per_ab) * (len(ab_list) - ab_list.index(ab) - 1)+\n [True]] ,\n 'label': AB[ab],\n 'method':'restyle'\n })\n\n\n\n if \"regularizer\" in evaluation_set_kde.keys():\n plot['data'].append(\n go.Scatter(\n x=evaluation_set_kde['x_grid'],\n y=evaluation_set_kde['regularizer'],\n mode='lines',\n name='regularization prior',\n line=dict(color='black',\n width=3),\n showlegend=True,\n hoverinfo=None,\n visible=False\n )\n )\n\n\n plot['layout'].update({'title': 'Coupling prior as a gaussian mixture'})\n plot['layout'].update({'xaxis1': {'title': \"coupling values\"}})\n plot['layout'].update({'yaxis1': {'title': \"density\"}})\n plot['layout']['updatemenus'][0]['active']=0\n plot['layout']['yaxis1']['range']=[0,15]\n plot['layout']['font'] = {'size': 18}\n\n if plot_out is not None:\n plotly_plot(plot, filename=plot_out, auto_open=False)\n else:\n return plot", "def investigate_data(training_data):\n return sns.pairplot(training_data.sample(100), hue=\"status\")", "def make_plots():\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/topic_intro_data_05-23-17-08-23.csv')\n prep.prepare()\n k = 100\n trained_model_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_rf_10000trees.pkl\"\n with open(trained_model_file) as p:\n model = pickle.load(p)\n print \"loaded model\"\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n X_train, y_train = prep.subset(features)\n feature_importance(model, features)\n feature_subset_indices = [73, 13]\n gb_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_gb.pkl\"\n with open(gb_file) as p:\n gb = pickle.load(p)\n make_partial_dependence(gb, X_train, y_train, features, feature_subset_indices)", "def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)", "def drawValidationNeedles(self): \n #productive #onButton\n profprint()\n # reset report table\n # print \"Draw manually segmented needles...\"\n #self.table =None\n #self.row=0\n self.initTableView()\n self.deleteEvaluationNeedlesFromTable()\n while slicer.util.getNodes('manual-seg*') != {}:\n nodes = slicer.util.getNodes('manual-seg*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n \n if self.tableValueCtrPt==[[]]:\n self.tableValueCtrPt = [[[999,999,999] for i in range(100)] for j in range(100)]\n modelNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode')\n nbNode=modelNodes.GetNumberOfItems()\n for nthNode in range(nbNode):\n modelNode=slicer.mrmlScene.GetNthNodeByClass(nthNode,'vtkMRMLAnnotationFiducialNode')\n if modelNode.GetAttribute(\"ValidationNeedle\") == \"1\":\n needleNumber = int(modelNode.GetAttribute(\"NeedleNumber\"))\n needleStep = int(modelNode.GetAttribute(\"NeedleStep\"))\n coord=[0,0,0]\n modelNode.GetFiducialCoordinates(coord)\n self.tableValueCtrPt[needleNumber][needleStep]=coord\n print needleNumber,needleStep,coord\n # print self.tableValueCtrPt[needleNumber][needleStep]\n\n for i in range(len(self.tableValueCtrPt)):\n if self.tableValueCtrPt[i][1]!=[999,999,999]:\n colorVar = random.randrange(50,100,1)/float(100)\n controlPointsUnsorted = [val for val in self.tableValueCtrPt[i] if val !=[999,999,999]]\n controlPoints=self.sortTable(controlPointsUnsorted,(2,1,0))\n self.addNeedleToScene(controlPoints,i,'Validation')\n else:\n # print i\n pass", "def plot_stats(x_axis, y_axis, df, highlight=[]):\n a, b = df[x_axis], df[y_axis]\n\n X_train, X_test, y_train, y_test = train_test_split(a, b, test_size=0.33, random_state=42)\n\n X_train = np.array(X_train).reshape(-1, 1)\n X_test = np.array(X_test).reshape(-1, 1)\n y_train = np.array(y_train).reshape(-1, 1)\n y_test = np.array(y_test).reshape(-1, 1)\n\n regr = linear_model.LinearRegression()\n\n regr.fit(X_train, y_train)\n\n df[y_axis + \" STD\"] = df[y_axis].apply(lambda a: round((a-df[y_axis].mean())/df[y_axis].std()))\n df[y_axis + \" rank\"] = df[y_axis].rank(ascending=False)\n df[x_axis + \" rank\"] = df[x_axis].rank(ascending=False)\n \n mapper = linear_cmap(field_name=y_axis + \" STD\", palette=brewer[\"RdBu\"][len(df[y_axis + \" STD\"].unique())], \n low=min(df[y_axis + \" STD\"].unique()), high=max(df[y_axis + \" STD\"].unique()))\n \n source = ColumnDataSource(df)\n source2 = ColumnDataSource(df[df[\"Player\"].isin(highlight)])\n \n p = figure(x_range=(df[x_axis].min() - df[x_axis].std(), df[x_axis].max() + df[x_axis].std()), \n y_range=(df[y_axis].min() - df[y_axis].std(), df[y_axis].max() + df[y_axis].std()))\n \n r1 = p.circle(x=x_axis, y=y_axis,\n source=source, size=10, color=mapper, line_color=\"black\", legend_group= y_axis + \" STD\")\n\n p.title.text = y_axis + \" vs. \" + x_axis\n p.title.align = \"center\"\n p.xaxis.axis_label = x_axis\n p.yaxis.axis_label = y_axis\n p.legend.location = 'top_left'\n p.legend.title = \"St. Dev's from Avg \" + y_axis\n p.background_fill_color = \"#dddddd\"\n p.background_fill_alpha = 0.1\n \n line_x = [df[x_axis].min().item() - df[x_axis].std().item(), df[x_axis].max().item() + df[x_axis].std().item()]\n line_y = [(line_x[0]*regr.coef_.item()) + regr.intercept_.item(), (line_x[1]*regr.coef_.item()) + regr.intercept_.item()]\n r2 = p.line(line_x, line_y, line_width=2, color=\"black\")\n\n p.add_tools(HoverTool(renderers=[r1], tooltips=[\n (\"Player\", \"@Player\"),\n (y_axis, \"@{\" + y_axis +\"}{0.000}\"),\n (y_axis + \" Rank\", \"#@{\" + y_axis + \" rank}\"),\n (x_axis, \"@{\" + x_axis +\"}{0}\"),\n (x_axis + \" Rank\", \"#@{\" + x_axis + \" rank}\")]))\n\n \n p.add_tools(HoverTool(renderers=[r2], \n tooltips=[(x_axis, \"$x{0000}\"),\n (\"Predicted \" + y_axis, \"$y\")]))\n \n labels = LabelSet(x=x_axis, \n y=y_axis, text=\"Player\", y_offset=8,\n text_font_size=\"11px\", text_color=\"#555555\",\n source=source2, text_align='center')\n \n p.add_layout(labels)\n\n st.bokeh_chart(p)", "def plot_checks(inputfile, outputfile, absetamin, absetamax, max_pt, pu_min, pu_max):\n\n print \"Doing eta bin: %g - %g, max L1 jet pt: %g\" % (absetamin, absetamax, max_pt)\n\n # Input tree\n tree_raw = inputfile.Get(\"valid\")\n\n # Output folders\n output_f = outputfile.mkdir('eta_%g_%g' % (absetamin, absetamax))\n output_f_hists = output_f.mkdir(\"Histograms\")\n\n # Eta cut string\n eta_cutStr = \" TMath::Abs(eta)<%g && TMath::Abs(eta) > %g \" % (absetamax, absetamin)\n # Pt cut string\n pt_cutStr = \"pt < %g\" % max_pt\n # PU cut string\n pu_cutStr = \"numPUVertices <= %f && numPUVertices >= %f\" % (pu_max, pu_min)\n # Avoid L1 saturated jets cut (from 2017 any l1 jet with a saturated tower is auto given pt=1024GeV)\n avoidSaturation_cut = \"pt < 1023.1\"\n cutStr = \" && \".join([eta_cutStr, pt_cutStr, pu_cutStr, avoidSaturation_cut])\n\n # Draw response (pT^L1/pT^Gen) for all pt bins\n tree_raw.Draw(\"rsp>>hrsp_eta_%g_%g(100,0,5)\" % (absetamin, absetamax), cutStr)\n hrsp_eta = ROOT.gROOT.FindObject(\"hrsp_eta_%g_%g\" % (absetamin, absetamax))\n hrsp_eta.SetTitle(\";response (p_{T}^{L1}/p_{T}^{Ref});\")\n if absetamin < 2.9:\n fit_result = hrsp_eta.Fit(\"gaus\", \"QER\", \"\",\n hrsp_eta.GetMean() - hrsp_eta.GetRMS(),\n hrsp_eta.GetMean() + hrsp_eta.GetRMS())\n else:\n peak = hrsp_eta.GetBinCenter(hrsp_eta.GetMaximumBin())\n fit_result = hrsp_eta.Fit(\"gaus\", \"QER\", \"\",\n peak - (0.5 * hrsp_eta.GetRMS()),\n peak + (0.5 * hrsp_eta.GetRMS()))\n\n # mean = hrsp_eta.GetFunction(\"gaus\").GetParameter(1)\n # err = hrsp_eta.GetFunction(\"gaus\").GetParError(1)\n output_f_hists.WriteTObject(hrsp_eta)\n\n # nb_pt, pt_min, pt_max = 63, 0, 252 # for GCT/Stage 1\n nb_pt, pt_min, pt_max = 512, 0, 1024 # for Stage 2\n nb_rsp, rsp_min, rsp_max = 100, 0, 5\n\n # Draw rsp (pT^L1/pT^Gen) Vs GenJet pT\n tree_raw.Draw(\"rsp:ptRef>>h2d_rsp_gen(%d,%g,%g,%d,%g,%g)\" % (nb_pt, pt_min, pt_max, nb_rsp, rsp_min, rsp_max), cutStr)\n h2d_rsp_gen = ROOT.gROOT.FindObject(\"h2d_rsp_gen\")\n h2d_rsp_gen.SetTitle(\";p_{T}^{Ref} [GeV];response (p_{T}^{L1}/p_{T}^{Ref})\")\n output_f_hists.WriteTObject(h2d_rsp_gen)\n\n h2d_rsp_gen_norm = cu.norm_vertical_bins(h2d_rsp_gen)\n output_f_hists.WriteTObject(h2d_rsp_gen_norm)\n\n # Draw rsp (pT^L1/pT^Gen) Vs L1 pT\n tree_raw.Draw(\"rsp:pt>>h2d_rsp_l1(%d,%g,%g,%d,%g,%g)\" % (nb_pt, pt_min, pt_max, nb_rsp, rsp_min, rsp_max), cutStr)\n h2d_rsp_l1 = ROOT.gROOT.FindObject(\"h2d_rsp_l1\")\n h2d_rsp_l1.SetTitle(\";p_{T}^{L1} [GeV];response (p_{T}^{L1}/p_{T}^{Ref})\")\n output_f_hists.WriteTObject(h2d_rsp_l1)\n\n h2d_rsp_l1_norm = cu.norm_vertical_bins(h2d_rsp_l1)\n output_f_hists.WriteTObject(h2d_rsp_l1_norm)\n\n # Draw pT^Gen Vs pT^L1\n tree_raw.Draw(\"pt:ptRef>>h2d_gen_l1(%d,%g,%g,%d,%g,%g)\" % (nb_pt, pt_min, pt_max, nb_pt, pt_min, pt_max), cutStr)\n h2d_gen_l1 = ROOT.gROOT.FindObject(\"h2d_gen_l1\")\n h2d_gen_l1.SetTitle(\";p_{T}^{Ref} [GeV];p_{T}^{L1} [GeV]\")\n output_f_hists.WriteTObject(h2d_gen_l1)", "def obstab_plot_observable(yyyy: int, doy: int, gnss: str, dfprnobst: pd.DataFrame, dir_gfzplt: str, obstab_name: str, dt_first: datetime, dt_last: datetime, show_plot: bool = False, logger: logging.Logger = None) -> str:\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n amutils.logHeadTailDataFrame(df=dfprnobst, dfName='dfprnobst[{gnss:s}]'.format(gnss=gnss), logger=logger, callerName=cFuncName)\n\n # set up the plot\n plt.style.use('ggplot')\n # plt.style.use('seaborn-darkgrid')\n\n # determine index of first obst\n idx_PRN = dfprnobst.columns.get_loc('PRN') + 1\n nr_obsts = len(dfprnobst.columns[idx_PRN:])\n\n # used markers\n lst_markers = ['o', 'x', '+', '.', ',', 'v', '^', '<', '>', 's', 'd']\n\n # create 2 subplots with same axis if more than 1 obst, else only 1 subplot\n if nr_obsts == 1:\n fig, ax1 = plt.subplots(1, figsize=(10, 4))\n else:\n fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(10, 7), gridspec_kw={'height_ratios': [2, 1]})\n\n # create colormap with nrcolors discrete colors which is th efirst always present plot\n obst_colors, title_font = amutils.create_colormap_font(nrcolors=nr_obsts, font_size=12)\n obst_markers = lst_markers[:nr_obsts]\n for obst, obst_color, marker in zip(dfprnobst.columns[idx_PRN:], obst_colors, obst_markers):\n ax1.plot(dfprnobst['DATE_TIME'], dfprnobst[obst], color=obst_color, label=obst, alpha=0.6, linestyle='', marker=marker, markersize=2)\n\n # beautify plot\n ax1.xaxis.grid(b=True, which='major')\n ax1.yaxis.grid(b=True, which='major')\n\n ax1.set_ylabel(gfzc.dict_obstypes[dfprnobst.columns[idx_PRN][0]], fontdict=title_font)\n # ax1.yaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))\n\n ax1.legend(loc='best', markerscale=4)\n\n # setticks on Y axis to represent the PRNs\n if dfprnobst.columns[idx_PRN][0] == 'S':\n ax1.set_yticks(np.arange(10, 61, 10))\n\n # this will be the bottom axis if only 1 obst available\n axis = ax1\n\n # add difference plot when there are more than 1 obst available\n if nr_obsts > 1:\n # add difference between observables\n diff_colors = []\n for i, color in enumerate(amutils.get_spaced_colors(nr_obsts)):\n diff_colors.append(tuple(rgb / 256. for rgb in color))\n\n obst_diff_markers = lst_markers[:nr_obsts]\n\n dfprnobstdiff = pd.DataFrame(dfprnobst['DATE_TIME'])\n for i, obst1 in enumerate(dfprnobst.columns[idx_PRN:-1]):\n for j, obst2 in enumerate(dfprnobst.columns[idx_PRN + (i + 1):]):\n obst_diff = '{obst1:s}-{obst2:s}'.format(obst1=obst1, obst2=obst2)\n\n dfprnobstdiff[obst_diff] = dfprnobst[obst1] - dfprnobst[obst2]\n\n marker = obst_diff_markers[i * len(dfprnobst.columns[idx_PRN:-1]) + j]\n ax2.plot(dfprnobstdiff['DATE_TIME'], dfprnobstdiff[obst_diff], label=obst_diff, alpha=0.6, linestyle='', marker=marker, markersize=2)\n\n # beutify this plot\n if dfprnobst.columns[idx_PRN][0] == 'S':\n ax2.set_ylim([-10, +10])\n if dfprnobst.columns[idx_PRN][0] == 'C':\n ax2.set_ylim([-20, +20])\n ax2.set_ylabel('Diff {obst:s}'.format(obst=gfzc.dict_obstypes[dfprnobst.columns[idx_PRN][0]]), fontdict=title_font)\n\n # this will be the bottom axis if more than 1 obst available\n axis = ax2\n\n # plot title\n plt.suptitle('{obst:s} for PRN {prn:s} on {yy:02d}/{doy:03d}'.format(obst=gfzc.dict_obstypes[dfprnobst.columns[idx_PRN][0]], prn=dfprnobst['PRN'].iloc[0], yy=(yyyy % 100), doy=doy))\n\n # beautify plot\n axis.set_xlabel('Time', fontdict=title_font)\n axis.yaxis.grid(b=True, which='major')\n axis.legend(loc='best', markerscale=3)\n\n # create the ticks for the time axis\n axis.set_xlim([dt_first, dt_last])\n dtFormat = plot_utils.determine_datetime_ticks(startDT=dt_first, endDT=dt_last)\n\n if dtFormat['minutes']:\n # ax.xaxis.set_major_locator(dates.MinuteLocator(byminute=range(10, 60, 10), interval=1))\n pass\n else:\n axis.xaxis.set_major_locator(dates.HourLocator(interval=dtFormat['hourInterval'])) # every 4 hours\n axis.xaxis.set_major_formatter(dates.DateFormatter('%H:%M')) # hours and minutes\n\n axis.xaxis.set_minor_locator(dates.DayLocator(interval=1)) # every day\n axis.xaxis.set_minor_formatter(dates.DateFormatter('\\n%d-%m-%Y'))\n\n axis.xaxis.set_tick_params(rotation=0)\n for tick in axis.xaxis.get_major_ticks():\n # tick.tick1line.set_markersize(0)\n # tick.tick2line.set_markersize(0)\n tick.label1.set_horizontalalignment('center')\n\n fig.tight_layout()\n\n # save the plot in subdir png of GNSSSystem\n plt_name = '{basen:s}-{gnss:s}-{PRN:s}-{obst:s}.pdf'.format(basen=obstab_name.split('.')[0], gnss=gnss, PRN=dfprnobst['PRN'].iloc[0], obst=gfzc.dict_obstypes[dfprnobst.columns[idx_PRN][0]])\n fig.savefig(os.path.join(dir_gfzplt, plt_name), dpi=200)\n logger.info('{func:s}: created plot {plot:s}'.format(func=cFuncName, plot=colored(plt_name, 'green')))\n\n # if show_plot:\n if show_plot:\n plt.show(block=True)\n else:\n plt.close(fig)\n\n return plt_name", "def overview(self, minState=5):\n n = 600\n \n ### first plot: the RTOFFSETs and STATES\n plt.figure(10)\n plt.clf()\n plt.subplots_adjust(hspace=0.05, top=0.95, left=0.05,\n right=0.99, wspace=0.00, bottom=0.1)\n ax1 = plt.subplot(n+11)\n try:\n print self.insmode+' | pri:'+\\\n self.getKeyword('OCS PS ID')+' | sec:'+\\\n self.getKeyword('OCS SS ID')\n \n plt.title(self.filename+' | '+self.insmode+' | pri:'+\n self.getKeyword('OCS PS ID')+' | sec:'+\n self.getKeyword('OCS SS ID'))\n except:\n pass\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('FUOFFSET')*1e3,\n color=(1.0, 0.5, 0.0), label=self.DLtrack+' (FUOFFSET)',\n linewidth=3, alpha=0.5)\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+12, sharex=ax1) # == DDL movements\n \n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field(self.DDLtrack),\n color=(0.0, 0.5, 1.0), linewidth=3, alpha=0.5,\n label=self.DDLtrack)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field('PSP'),\n color=(0.0, 0.5, 1.0), linewidth=1, alpha=0.9,\n label='PSP', linestyle='dashed')\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+13, sharex=ax1) # == states\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'),\n color=(1.0, 0.5, 0.0), label='OPDC')\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n self.raw['DOPDC'].data.field('STATE'),\n color=(0.0, 0.5, 1.0), label='DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('STATES')\n yl=plt.ylim()\n plt.ylim(yl[0]-1, yl[1]+1)\n plt.xlim(0)\n ### fluxes\n plt.subplot(n+14, sharex=ax1)\n try:\n fsua_dark = self.fsu_calib[('FSUA', 'DARK')][0,0]\n fsub_dark = self.fsu_calib[('FSUB', 'DARK')][0,0]\n fsua_alldark = self.fsu_calib[('FSUA', 'DARK')].sum(axis=1)[0]\n fsub_alldark = self.fsu_calib[('FSUB', 'DARK')].sum(axis=1)[0]\n except:\n print 'WARNING: there are no FSUs calibrations in the header'\n fsua_dark = 0.0\n fsub_dark = 0.0\n fsua_alldark = 0.0\n fsub_alldark = 0.0\n\n M0 = 17.5\n fluxa = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n fsua_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU1 DIT'))\n print 'FLUX FSUA (avg, rms):', round(fluxa.mean(), 0), 'ADU/s',\\\n round(100*fluxa.std()/fluxa.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxa.mean()),2)\n fluxb = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n fsub_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU2 DIT'))\n print 'FLUX FSUB (avg, rms):', round(fluxb.mean(), 0), 'ADU/s',\\\n round(100*fluxb.std()/fluxb.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxb.mean()),2)\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\\\n fluxa/1000, color='b', alpha=0.5, label='FSUA')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\\\n fluxb/1000, color='r', alpha=0.5, label='FSUB')\n\n plt.ylim(1)\n plt.legend(prop={'size':9})\n plt.ylabel('flux - DARK (kADU)')\n plt.xlim(0)\n plt.subplot(n+15, sharex=ax1)\n try:\n # -- old data version\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field('OPDSNR'),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field('OPDSNR'),\n color='r', alpha=0.5, label='FSUB SNR')\n except:\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field(self.OPDSNR),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field(self.OPDSNR),\n color='r', alpha=0.5, label='FSUB SNR')\n plt.legend(prop={'size':9})\n \n A = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,3])\n snrABCD_a = ((A-C)**2+(B-D)**2)\n snrABCD_a /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n # snrABCD_a, color='b', alpha=0.5, linestyle='dashed')\n \n A = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,3])\n \n snrABCD_b = ((A-C)**2+(B-D)**2)\n snrABCD_b /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n # snrABCD_b, color='r', alpha=0.5, linestyle='dashed') \n \n # -- SNR levels:\n #plt.hlines([self.getKeyword('INS OPDC OPEN'),\n # self.getKeyword('INS OPDC CLOSE'),\n # self.getKeyword('INS OPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(1.0, 0.5, 0.0))\n #plt.hlines([self.getKeyword('INS DOPDC OPEN'),\n # self.getKeyword('INS DOPDC CLOSE'),\n # self.getKeyword('INS DOPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(0.0, 0.5, 1.0))\n # -- plot thresholds\n plt.ylabel('SNR')\n plt.xlim(0)\n \n if self.getKeyword('OCS DET IMGNAME')=='PACMAN_OBJ_ASTRO_':\n # == dual FTK\n plt.subplot(n+16, sharex=ax1)\n plt.ylabel('PRIMET ($\\mu$m)')\n #met = interp1d(np.float_(self.raw['METROLOGY_DATA'].\\\n # data.field('TIME')),\\\n # self.raw['METROLOGY_DATA'].data.field('DELTAL'),\\\n # kind = 'linear', bounds_error=False, fill_value=0.0)\n met = lambda x: np.interp(x,\n np.float_(self.raw['METROLOGY_DATA'].data.field('TIME')),\n self.raw['METROLOGY_DATA'].data.field('DELTAL'))\n metro = met(self.raw['DOPDC'].data.field('TIME'))*1e6\n n_ = min(len(self.raw['DOPDC'].data.field('TIME')),\n len(self.raw['OPDC'].data.field('TIME')))\n\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n metro, color=(0.5,0.5,0.), label='A-B')\n\n w1 = np.where((self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'OPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'OPDC FTK stat: 0%'\n\n w1 = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DOPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'DOPDC FTK stat: 0%'\n\n w = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DUAL FTK stat:', round(100*len(w[0])/float(n_),1), '%'\n except:\n print 'DUAL FTK stat: 0%'\n\n plt.xlim(0)\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], '.g', linewidth=2,\n alpha=0.5, label='dual FTK')\n #plt.legend()\n if len(w[0])>10 and False:\n coef = np.polyfit(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], 2)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n np.polyval(coef, self.raw['DOPDC'].\n data.field('TIME')),\n color='g')\n plt.ylabel('metrology')\n\n print 'PRIMET drift (polyfit) :', 1e6*coef[1], 'um/s'\n slope, rms, synth = NoisySlope(self.raw['DOPDC'].\n data.field('TIME')[w],\n metro[w], 3e6)\n plt.figure(10)\n yl = plt.ylim()\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n synth, color='r')\n plt.ylim(yl)\n print 'PRIMET drift (NoisySlope):',\\\n slope*1e6,'+/-', rms*1e6, 'um/s'\n else:\n # == scanning\n plt.subplot(n+16, sharex=ax1)\n fringesOPDC = \\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA3')[:,0]\n \n fringesDOPDC =\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA3')[:,0]\n \n plt.plot(self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesOPDC/fringesOPDC.std()),\n color=(1.0, 0.5, 0.0), alpha=0.6,\n label=self.primary_fsu+'/OPDC')\n plt.plot(self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesDOPDC/fringesDOPDC.std()),\n color=(0.0, 0.5, 1.0), alpha=0.6,\n label=self.secondary_fsu+'/DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('A-C')\n plt.xlabel('time stamp ($\\mu$s)')\n return", "def plot_hist_snfit_sncosmo(self):\n \n self.read_sncosmo(path='../sugar_analysis_data/results/res_salt2_SNF_'+str(self.width)+'.txt')\n self.read_snfit_results(snfit_res_path='../sugar_analysis_data/results/results_snfit_'+str(self.width)+'.txt')\n\n# self.read_sncosmo(path='../sugar_analysis_data/results/res_salt2_SNF_GF.txt')\n# self.read_snfit_results(snfit_res_path='../sugar_analysis_data/results/results_snfit_GF.txt')\n\n self.diff_x0_sncosmo = []\n self.diff_x0_err_sncosmo = []\n self.diff_x1_sncosmo = []\n self.diff_x1_err_sncosmo = [] \n self.diff_c_sncosmo = []\n self.diff_c_err_sncosmo = [] \n self.diff_mb_sncosmo = []\n self.diff_mb_err_sncosmo = [] \n self.diff_cov_x0_x1_sncosmo = []\n self.diff_cov_x0_c_sncosmo = []\n self.diff_cov_x1_c_sncosmo = []\n self.diff_cov_mb_x1_sncosmo = []\n self.diff_cov_mb_c_sncosmo = []\n self.diff_chi2 = []\n for i in range (len(self.sn_name)):\n for j in range (len(self.sncosmo_sn_name)):\n if self.sn_name[i] == self.sncosmo_sn_name[j]:\n if np.abs(self.x1[i] - self.sncosmo_x1[j]) < 0.02:\n self.diff_x0_sncosmo.append(self.x0[i] - self.sncosmo_x0[j])\n self.diff_x0_err_sncosmo.append(self.x0_err[i] - self.sncosmo_x0_err[j])\n self.diff_x1_sncosmo.append(self.x1[i] - self.sncosmo_x1[j])\n self.diff_x1_err_sncosmo.append(self.x1_err[i] - self.sncosmo_x1_err[j]) \n self.diff_c_sncosmo.append(self.c[i] - self.sncosmo_c[j])\n self.diff_c_err_sncosmo.append(self.c_err[i] - self.sncosmo_c_err[j]) \n self.diff_mb_sncosmo.append(self.mb[i] - self.sncosmo_mb[j])\n self.diff_mb_err_sncosmo.append(self.mb_err[i] - self.sncosmo_mb_err[j])\n self.diff_chi2.append(self.snfit_chi2[i] - self.sncosmo_chi2[j])\n# self.diff_cov_x0_x1_sncosmo.append()\n# self.diff_cov_x0_c_sncosmo.append()\n# self.diff_cov_x1_c_sncosmo.append()\n# self.diff_cov_mb_x1_sncosmo.append()\n# self.diff_cov_mb_c_sncosmo.append()\n else:\n print self.x1[i] - self.sncosmo_x1[j], self.sn_name[i],self.sncosmo_sn_name[j], self.x1[i], self.sncosmo_x1[j]\n\n# rcParams['font.size'] = 16.\n# font = {'family': 'normal', 'size': 16}\n# rc('axes', linewidth=1.5)\n# rc(\"text\", usetex=True)\n# rc('font', family='serif')\n# rc('font', serif='Times')\n# rc('legend', fontsize=25)\n# rc('xtick.major', size=5, width=1.5)\n# rc('ytick.major', size=5, width=1.5)\n# rc('xtick.minor', size=3, width=1)\n# rc('ytick.minor', size=3, width=1)\n# fig = plt.figure(figsize=(8.,8.)) \n# \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x0_sncosmo,50,label='$\\Delta$ x0_'+str(self.width))\n ax0_2.hist(self.diff_x0_err_sncosmo,50,label='$\\Delta$ x0 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n# ax0_1.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/x0_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x1_sncosmo,50,label='$\\Delta$ X1_'+str(self.width))\n ax0_2.hist(self.diff_x1_err_sncosmo,50,label='$\\Delta$ X1 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/x1_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_c_sncosmo,50,label='$\\Delta$ Color_'+str(self.width))\n ax0_2.hist(self.diff_c_err_sncosmo,50,label='$\\Delta$ Color error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/color_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_mb_sncosmo,50,label='$\\Delta$ mb_'+str(self.width))\n ax0_2.hist(self.diff_mb_err_sncosmo,50,label='$\\Delta$ mb error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/mb_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n plt.hist(self.diff_chi2,50,label='$\\Delta$ chi2_'+str(self.width))\n pdffile = '../sugar_analysis_data/results/chi2_'+str(self.width)+'.pdf'\n plt.legend()\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()" ]
[ "0.59795356", "0.59778273", "0.57227975", "0.5714864", "0.5668321", "0.5655379", "0.5626232", "0.560986", "0.5528758", "0.5513254", "0.5480079", "0.54680276", "0.54550433", "0.5433568", "0.5413373", "0.5396023", "0.53728133", "0.5363724", "0.5355438", "0.5349676", "0.53473616", "0.5321014", "0.5301597", "0.52988684", "0.5283682", "0.52807117", "0.5275174", "0.5270754", "0.52484673", "0.52463746" ]
0.6043595
0
Connects to given host address and port.
def connect(self, host=None, port=None): host = self.host if host is None else host port = self.port if port is None else port self.socket.connect(host, port)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(self, host, port):\n pass", "def connect(self, connection_host, connection_port):\n self.connection.connect((connection_host, connection_port))", "def connect_to_server(host, port) -> socket.SocketIO:\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = (host, port)\n print('[CLIENT LOG] connecting to {} port {}'.format(host,port)) \n sock.connect(server_address)\n return sock", "def connect(self, host, port=6667, use_ssl=False):\n self.log('@ Connecting to %s port %d' % (host, port))\n\n self.sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n self.sk.connect((host, port))\n self.log('@ Connected')\n self.connected = True\n self.heartbeat.start()\n self._callback('on_connected')", "def connect(self, host, port):\n\t\tif self.is_server:\n\t\t\traise socket.error(\"\"\"A server socket was used in place of a client\n\t\t\t\t\t\t\t socket for connecting\"\"\")\n\n\t\tself.socket.connect((host, port))\n\t\tself.socket_connected = True", "def connect(self, host=HOST, port=PORT, timeout=10):\r\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self._socket.connect((host, port))\r\n if timeout is not None:\r\n self._socket.settimeout(timeout)\r\n logger.info('Connected to: %s...', repr((host, port)))", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def connect_to_server(host, port):\n # Create a socket to use IPv4 and TCP stream communication\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Connect to the server\n client_socket.connect( (host, port) )\n return client_socket", "def connect(self, host, port=6667):\n\t\tprint(host)\n\t\tprint(port)\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n\t\tself.s = ssl.wrap_socket(sock)\n\t\tself.s.connect((host, port))", "def connect(self):\n try:\n self.sock.connect((self.hostname, self.port))\n print 'connected to ' + self.hostname\n except socket.gaierror as e:\n print(\"Recieved error when connecting to \" + str((self.hostname, self.port)))\n raise e", "def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((self.host, PORT)) # probably throws errors\n self.connected = True", "def connect(self, host, port):\n if self._connectedTo is not None:\n raise ValueError(\"Already connected\")\n self._connectedTo = (host, port)", "def connect(self,addr=None,port=None):\n\n self.type = 'connect'\n\n if addr != None:\n self.remote_location = (addr,int(port))\n try:\n s = socket(AF_INET,SOCK_STREAM)\n s.settimeout(1.0)\n s.connect(self.remote_location)\n self.status = 'connected'\n s.settimeout(0.0)\n self.sock = s\n except error as e:\n self.errno = e.errno\n self.status = 'closed'", "def connect(self):\n \n try:\n self.__sock.connect((self.__host, self.__port))\n\n except socket.error,e:\n print 'Oops, unable to connect. Try again!',e\n sys.exit(1)", "def make_connection( hostname, port = 4663 ):\n \tconnection = socket.socket();", "def connect(host, port, service=VoidService, config={}, ipv6=False, keepalive=False):\n s = SocketStream.connect(host, port, ipv6=ipv6, keepalive=keepalive)\n return connect_stream(s, service, config)", "def connect(self):\n if not self._socket:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self.host, self.port))\n self._socket.settimeout(0.0)", "def connect(self, host):\n if not self.app.connect(host):\n command = \"Connect({0})\".format(host).encode(\"utf-8\")\n self.exec_command(command)\n self.last_host = host", "def connect_to_server(self):\n\n try:\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((self.hostname, self.port))\n return client\n except Exception as e:\n print(\"Can't connect to server: \", e)\n sys.exit()", "def connect(self, host, port):\n\n self.connect_count = self.RETRY_COUNT\n timeout = None if self.debug_mode else FvpConnector.MAX_IDLE_TIME\n\n while not self.has_connect_timed_out():\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.settimeout(timeout)\n self.sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)\n self.sock.connect((host, port))\n return\n except ConnectionRefusedError:\n time.sleep(FvpConnector.RETRY_PERIOD)\n\n raise Exception(\"Failed to connect to FVP\")", "def connect(host, port = DEFAULT_SERVER_PORT):\n return factory.connect(host, port, SlaveService)", "def connect(self,ip,port):\n return self.network.connect(ip,port)", "def _connect_to_target(self, host):\n port = 80\n if ':' in host:\n host, _, port = host.partition(':')\n (socket_family, _, _, _, address) = socket.getaddrinfo(host, port)[0]\n self.target = socket.socket(socket_family)\n self.target.connect(address)", "def connect(self, host: str, port: int, timeout: float) -> None:\n self.socket.settimeout(timeout)\n self.socket.connect((host, port))\n self.socket.settimeout(0)", "def connect(self):\n self.client.connect(self.host, self.port)\n self.client.loop_forever()", "async def connect(\n self, host: str, port: int, use_tls: bool = False, loop=None\n ) -> None:\n\n self.logger.info('Connecting to {}:{}'.format(host, port))\n\n self.secure = use_tls\n connection = asyncio.open_connection(host, port, ssl=use_tls, loop=loop)\n try:\n self.reader, self.writer = await connection\n except Exception as exception:\n self.logger.error('Disconnected', exception)\n self.irc_disconnected(exception)\n return\n\n await self.connected()", "def connect(self, host, port):\n logging.debug(\"Connecting to %s:%i\", host, port)\n self._hasError = False\n self.tcpsocket = QTcpSocket()\n self.tcpsocket.error.connect(self.processError)\n self.tcpsocket.connected.connect(self._connected)\n self.tcpsocket.connected.connect(lambda: self._stopWaiting.emit())\n self.tcpsocket.readyRead.connect(self.receive)\n\n self.tcpsocket.connectToHost(host, port)\n self.waitForConnection()", "def connect(self) -> None:\n self.terminate()\n self._new_client().connect(\n hostname=self.ip,\n port=self.port,\n username=self.username,\n password=self.password,\n look_for_keys=False,\n allow_agent=False)", "def mpd_connect(host=_MPD_HOST, port=_MPD_PORT):\n \n _mpd_client.connect(host, port)", "def connect(host: str, port: int):\n print('Connecting to the server...')\n print(cmd.RESP_OK, type(cmd.RESP_OK))\n tn = telnetlib.Telnet(host = host, port = port)\n code, params = cmd.serv_read_resp(tn)\n if code != cmd.RESP_OK:\n print(f'Connection problem. {code, params}')\n exit(0)\n print(f'{params[0]}\\n')\n return tn" ]
[ "0.8277495", "0.7886647", "0.76533353", "0.76506543", "0.76097435", "0.7587816", "0.7525606", "0.75252724", "0.75134", "0.7500646", "0.74626714", "0.74174386", "0.735934", "0.7358163", "0.73108876", "0.7268154", "0.7267805", "0.7256103", "0.7247818", "0.7234581", "0.7214534", "0.7199228", "0.7147296", "0.71367687", "0.70816225", "0.7058566", "0.70556915", "0.70394784", "0.7013323", "0.6994841" ]
0.827836
0
Send message inside the given file.
def send_file_message(self, filename): data = self._readFile(filename) self.print_debug_message(data) self.socket.send(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send(self, fileName, message):\n project = e5App().getObject(\"Project\")\n if project.isProjectFile(fileName):\n self.__cooperationClient.sendEditorCommand(\n project.getHash(),\n project.getRelativeUniversalPath(fileName),\n message\n )", "def send_textfile(self, textfile, update, context):\n\n with open(textfile, 'r') as file:\n MSG = file.read()\n\n context.bot.send_message(chat_id=update.message.chat_id, text=MSG)", "def send_file(self):\n if os.path.isfile(\"client_files/\" + self.filename):\n with open(\"client_files/\" + self.filename, \"r\") as file:\n text_list = file.readlines()\n self.text_string = ''.join(text_list)\n send_msg(self.socket, self.text_string, HEADER_LENGTH)\n return True\n else:\n print(\"\\'{}\\' does not exist.\\nPlease provide a valid file name.\".format(self.filename))\n return False", "def reply_message(self, message):\n\n message = str(message).format(self.path).encode('utf-8')\n self.wfile.write(message)", "def sendFile(self, fullfilename):\n raise NotImplementedError(\"Implement this method in child class\")", "async def cat(self, ctx, file):\n await ctx.send(file=discord.File(file))", "async def send_file(self, file):\n with open(file, \"rb\") as file_bytes: # Opening file as readable in bytes\n print(f\"Send: {file_bytes!r}\")\n\n total_bytes = 0\n while True:\n chunk = file_bytes.read(1024)\n total_bytes += len(chunk)\n\n if not chunk: # Error with write_eof(). Need a way to finish\n print(\"Draining...\")\n\n check = \"end\"\n self.writer.write(check.encode())\n # Maybe writing at the end an empty list could work\n\n await self.writer.drain()\n break\n\n self.writer.write(chunk)\n print(f\"Sent: {total_bytes!r} bytes\")", "def send_file():\n data = ARGS.data\n filename = ARGS.file\n outstream = \"POST||\" + filename + \"||\" + data\n CLIENT_SOCKET.send(outstream.encode())", "def send_file(cobj, dest, port, fname, hash, handler):\n pass", "def send_file_contents(self):\n self.send_comm.send_nolimit(self.ply_dict)\n self.send_comm.send_eof()", "def send_file(self, filename, BUFF_LENGTH):\n out_file = open(filename,\"rb\")\n file_bytes = out_file.read(1024) \n while file_bytes != b'':\n self.client.send(file_bytes)\n file_bytes = out_file.read(1024) # read next block from file\n self.client.send(b'')", "def send_file(self, file_path) -> object:\n try:\n self.payload = {'file': open(file_path, 'rb')}\n except FileNotFoundError as fl_er:\n print(fl_er)\n exit(1)\n try:\n return requests.post(url = self.__webhooks, files = self.payload)\n except OverflowError as err:\n print('Size Overflow Error', err)\n exit(1)", "def send_file(self, file_path) -> object:\n method = 'sendDocument?' + 'chat_id=' + str(self.__chat_id_response())\n try:\n files = {'document': open(file_path, 'rb')}\n return requests.post(self.api_url + method, files = files)\n except FileNotFoundError as fn_err:\n print(fn_err)\n sys.exit(1)\n except TimeoutError as tm_err:\n print(tm_err)\n sys.exit(1)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)", "def send_file(self, model):\n\n fh = get_pdb_file(model)\n file_size = fh.tell()\n fh.seek(0)\n\n # Send request\n request = self.SENDFILE + model + ' ' + str(file_size)\n logging.debug(\"request {request}\")\n self.send(request)\n\n # Wait for ACK\n srv_resp = self.recieve()\n\n if srv_resp != self.RESP_OK:\n logging.debug(\"response {srv_resp}\")\n raise Exception('Server refuse to accept file')\n\n\n self.send(fh.read(), encode=False)\n fh.close()\n\n srv_resp = self.recieve()\n if srv_resp != \"OK\":\n logging.debug(\"response {srv_resp}\")\n raise Exception('Fail after sending file')" ]
[ "0.7248147", "0.7027971", "0.696669", "0.68094087", "0.67991465", "0.66986513", "0.65315217", "0.6488928", "0.64625514", "0.64422685", "0.64363587", "0.6415983", "0.6381144", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.62902164", "0.6271641" ]
0.7932562
0
Receive a response from the remote host.
def receive_response(self): return self.socket.receive()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_response(command):\n connection = get_client()\n\n connection.send(command)\n\n data = connection.recv()\n connection.close()\n\n return data", "async def recv(self):\n return await self.receptor.response_queue.get()", "def receive_response(self, private_key, responder_id, msg_tag, response):\n return self._handle_response(private_key, responder_id, msg_tag, response)", "def __receive_response(self, sock, raw=False, print_response=True, timeout=False, debug=False):\n if timeout:\n sock.settimeout(timeout)\n try:\n response = sock.recv(1024)\n if print_response:\n print(f\"<-- Received: {response}\")\n except Exception as e:\n if debug:\n print(\"No response:\", e)\n response = b\"\"\n finally:\n return response if raw else str(response, \"ascii\")", "def receive_response(self):\n print(\"Waiting response on chaussure...\")\n chunks = []\n\n while True:\n chunks.append(self.chaussette.recv(2048))\n\n if len(chunks[-1]) == 0:\n break\n\n self.chaussette.shutdown(socket.SHUT_RDWR)\n\n return ''.join(chunks)", "def get_response(self):\n if not self.debug:\n data = \"\"\n while \"\\n\" not in data:\n try:\n data = data + self.socket.recv(1024)\n except:\n logging.warning(\"Timeout on receive from socket\")\n logging.info(\"RECEIVE: \" + data)\n else:\n logging.info(\"Receive in debug mode\")\n # The zero makes ERR? command happy\n data = \"0\"\n\n return data", "def do_command(command):\n send_command(command)\n # time.sleep(0.1) # may be required on slow machines\n response = get_response()\n print(\"Rcvd: <<< \" + response)\n return response", "def recieve(self):\n\t\tif self._connected == True:\n\t\t\treturn self._sock.recv(1)", "def response(self):\n return self._send(bytes([0xef,0xfe,0x02,0x0,0x0,0x0,0x0,0x0]))", "def winhttp_WinHttpReceiveResponse(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hRequest\", \"lpReserved\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def _get_response(self, connection):\n\n response_header = self._receive(connection, 13)\n logger.debug('Response header: %s', response_header)\n\n if (not response_header.startswith(b'ZBXD\\x01')) or (len(response_header) != 13):\n logger.debug('Zabbix return not valid response.')\n result = False\n else:\n response_len = struct.unpack('<Q', response_header[5:])[0]\n response_body = connection.recv(response_len)\n result = json.loads(response_body.decode(\"utf-8\"))\n logger.debug('Data received: %s', result)\n\n try:\n connection.close()\n except socket.error:\n pass\n\n return result", "def receive(self):\n\n return self.sock.recv(1024)", "def _receive_thread(self):\r\n while True:\r\n try:\r\n self.response, ip = self.socket.recvfrom(2048)\r\n print(\"Response \", self.response)\r\n except socket.error as exc:\r\n print (\"Receive Thread caught exception socket.error : %s\" % exc)", "def recv(self, block=True, timeout=None):\n return self.conn.recv(block, timeout)", "def read_response(self, long_timeout=False):\n while True:\n try:\n return self.read_cbor_message()\n except EOFError as e:\n if not long_timeout:\n raise", "def recvRtspReply(self):\r\n\t\treply = self.rtspSocket_client.recv(256).decode(\"utf-8\")\r\n\t\tprint(\"\\nS: \\n\" + reply + \"\\n----------\")\r\n\t\treturn reply", "def _recv(self):\n result = self._con.receive()\n if result.startswith(Parser.NOT_OK_MSG) or len(result) == 0:\n return result\n while not result.endswith(Parser.OK_MSG + '\\n') and not result.startswith(Parser.OK_MSG):\n result += self._con.receive()\n return result", "def recv(self):\n return None", "def recv(self, length = 0):\n return self.received.get()", "def receive(self, request_id, timeout=None):\n res = None\n start_time = time.time()\n while res is None:\n with self.connlock:\n res = self.conn.do_receive(request_id)\n if res is None:\n time.sleep(0.1)\n if timeout and (time.time() - start_time > timeout):\n raise RequestTimeout(request_id)\n\n if 'Error' in res:\n raise ServerError(res['Error'], res)\n\n try:\n return res['Response']\n except:\n raise BadResponseError(\"Failed to parse response: {}\".format(res))", "def Recv(self, bytes_):\n return self._sock.recv(bytes_)", "def do_remote(self, *args):\n return self.do_scpi(':communicate:remote 1')", "def recv_nl(self, timeout=1.0):\n ret = ''\n tstart = time.time()\n while True:\n c = self.serial.read(1)\n if not c:\n if timeout is not None and time.time() - tstart >= timeout:\n raise Timeout('Timed out')\n continue\n if c == b'\\xFF':\n print(\"WARNING: bad response 0xFF\")\n continue\n c = c.decode(\"ascii\")\n self.verbose and print(\"%s %02X\" % (c, ord(c)))\n if c == '\\r':\n break\n ret += c\n else:\n raise Timeout('Timed out waiting for closing ~')\n\n if self.verbose:\n print('XRAY DEBUG: recv: returning: \"%s\"' % (ret, ))\n return ret", "def get_server_response(self):\n\n response = []\n while True:\n\n try:\n returned_data = self.socket.recv(len(self.last_sent_request), socket.MSG_WAITALL)\n except socket.timeout:\n print(\"Unable to read response from host. Timed out.\")\n break\n\n if not returned_data:\n break\n else:\n response.append(returned_data.decode(\"utf-8\"))\n\n response_obj = HTTPResponse(''.join(response))\n return response_obj", "def recv(self):\n return self._socket.recv()", "def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response", "def recv(self):\n\t\tmsg = self.pb.recv()\n\n\t\tif msg.get(0) == \"timeout\":\n\t\t\tprint \"You failed to find Toby before the time ran out!\"\n\t\t\tself.cleanup()\n\t\telif msg.get(0) == \"toby\":\n\t\t\tprint \"You found Toby. Good job!\"\n\t\t\tself.cleanup()\n\t\telif msg.get(0) == \"dead\":\n\t\t\tprint \"You died!\"\n\t\t\tself.cleanup()\n\n\t\treturn msg", "def readresp(self, cmd):\n\t\tdata = self.read(22)\n\t\tresponse = data[0]\n\t\t#print \"laser response\", self.mylaser, response\n\t\tgstt.lstt_dacanswers[self.mylaser] = response\n\t\tcmdR = data[1]\n\t\tstatus = Status(data[2:])\n\t\tr.set('/lack/'+str(self.mylaser), response)\n\n\t\tif cmdR != cmd:\n\t\t\traise ProtocolError(\"expected resp for %r, got %r\"\n\t\t\t\t% (cmd, cmdR))\n\n\t\tif response != \"a\":\n\t\t\traise ProtocolError(\"expected ACK, got %r\"\n\t\t\t\t% (response, ))\n\n\t\tself.last_status = status\n\t\treturn status", "def _recv_msg(self, msg):\n # If this is a response, pass it along to the Remote object to be\n # processesd by the correct reply/error handler\n if is_response(msg):\n self._remote.resolve(msg)\n\n # Otherwise process the request from the remote RPC client.\n elif is_request(msg):\n method, params = msg['method'], msg['params']\n if method in self._protocol.keys():\n try:\n args, kwargs = self._reconcile_parameters(method, params)\n\n result = getattr(self, method)(*args, **kwargs)\n self._send_msg(json_rpc_result(result, None, msg['id']))\n except Exception as e:\n if isinstance(e, jsonrpc.JSONRPCError):\n raise e\n else:\n raise jsonrpc.ServerError(str(e))\n else:\n raise jsonrpc.MethodNotFound(\"Method not allowed\")\n else:\n raise jsonrpc.ParseError(\"Could not parse msg: %s\" % msg)", "def receive(self, timeout=None) -> bytes:" ]
[ "0.6692667", "0.66210544", "0.6615369", "0.6444271", "0.63223696", "0.6285314", "0.62418765", "0.61905557", "0.6184888", "0.6167005", "0.6090777", "0.60867804", "0.6071184", "0.60570836", "0.60519344", "0.6039995", "0.602022", "0.6015102", "0.60061765", "0.59997016", "0.59486395", "0.5944377", "0.592156", "0.59107155", "0.5887304", "0.58834267", "0.58388865", "0.5831142", "0.5819523", "0.579463" ]
0.7255923
0
Counts the number of ocurrences of a a category value in a tagged file. Stores the result in a global dictionary.
def countTagsInFile(fname): with open(fname, 'r', encoding='utf-8') as f: for line in f: words = line.split(' ') for w in words: tag = w.split('_')[1].rstrip() cat = tag[0].upper() if tag not in dictionaries[cat]: dictionaries[cat][tag] = 1 else: dictionaries[cat][tag] += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tag_counts (count_file):\r\n tagcounts = defaultdict(int)\r\n f = open(count_file, 'r')\r\n for line in f:\r\n fields = line.split()\r\n if fields[1] != 'WORDTAG':\r\n continue\r\n count = int(fields[0])\r\n tag = fields[2]\r\n tagcounts[tag] += count \r\n f.close() \r\n return tagcounts", "def word_tag_counts (count_file):\r\n wordtagcounts = defaultdict(list)\r\n f = open(count_file, 'r')\r\n for line in f:\r\n fields = line.split(\" \")\r\n if fields[1] != 'WORDTAG':\r\n continue\r\n count = int(fields[0].strip())\r\n tag = fields[2].strip()\r\n word = fields[3].strip()\r\n wordtagcounts[word].append((tag, count)) \r\n f.close() \r\n return wordtagcounts", "def get_file_counts(filename):\n new_file = open(filename, \"r\")\n d = dict()\n for line in new_file: \n split_line = line.split()\n for word in split_line:\n if word in d:\n d[word] += 1\n else:\n d[word] = 1\n new_file.close()\n return d", "def count_freq(self, types=1):\n count_dict = {}\n if types == 1:\n for cat in self.categories:\n num_images = sum(\n [1 for i in self.data['annotations'] if i['category_id'] == self.cats_idx[cat]])\n count_dict[cat] = num_images\n elif types == 2:\n pass\n\n return count_dict", "def new_counts_dict():\n\n\tIN_FILES = [\"../_semtag_dataset_webanno_tfidf_inimigo.txt\",\"../_semtag_dataset_webanno_tfidf_publico.txt\" ]\n\n\ttxt = []\n\tfor in_file in IN_FILES:\n\t with codecs.open(in_file,\"r\",\"utf-8\") as fid:\n\t txt += fid.readlines()\n\t#words\n\twords = [w for m in txt for w in m.split()]\n\t#unique words\n\twords = list(set(words))\n\t#word index\n\twrd2idx = {w:-1 for w in words}\n\n\tset_trace()\n\t\n\twith open(COUNTS_DIC,\"w\") as fod:\n\t\tcPickle.dump(wrd2idx, fod, cPickle.HIGHEST_PROTOCOL)", "def print_wordcount(file_to_count):\n wordcount_dict = {}\n file_string = open(file_to_count).read()\n words = file_string.rstrip().split()\n \n for word in words:\n if word in wordcount_dict:\n value = wordcount_dict.get(word)\n value += 1\n wordcount_dict[word] = value\n else:\n wordcount_dict[word] = 1\n \n for key, value in wordcount_dict.items():\n print(key, value)\n \n return wordcount_dict", "def parseGroupsFileToDictOfCounts(groups_file):\n return parseGroupsFileToDict(groups_file, \"counts\")", "def get_tag_counts(label_matches):\r\n\ttag_counts = {}\r\n\tfor word_and_tag in label_matches.keys():\r\n\t\tcurrent_count = tag_counts.get(word_and_tag[_TAG], 0)\r\n\t\ttag_counts[word_and_tag[_TAG]] = current_count+1\r\n\treturn tag_counts", "def generate_counts():\n\n counts_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n doc_path = os.path.join(subfolder_path, filename)\n with open(doc_path, 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n counts_dict.update({doc_path: collections.Counter(normalised_text.split())})\n #print(counts_dict.get('file/crude/article560.txt'))\n\n vocab = generate_vocab()\n for value in counts_dict.values():\n for k in vocab.keys():\n if k not in value.items():\n value.update({k: 0})\n\n #print(counts_dict.get('file/crude/article560.txt'))\n return counts_dict", "def count_tags(tags):\n counts = {}\n for tag_list in tags.values():\n for tag in tag_list:\n if tag in counts:\n counts[tag] += 1\n else:\n counts[tag] = 1\n return counts", "def get_frequencies(filename):\n freq_dict = {}\n _,long_name = filename.split(\"\\\\\")\n name,_ = long_name.split(\"_gold_\")\n f = os.path.join(PARSED, name + \".fix.xml\")\n #soup = bs(open(f, 'r'))\n soup = bs(codecs.open(f, 'r', encoding='utf-8'))\n for sent in soup.findAll('sentence'):\n for token in sent.findAll('token'):\n try:\n w = token.word.string\n if w in freq_dict:\n freq_dict[w] += 1\n else:\n freq_dict[w] = 1\n except AttributeError:\n pass\n return freq_dict", "def calcCountDict(TFdict):\n\n countDict = {}\n\n for doc in TFdict:\n for term in doc:\n if term in countDict:\n countDict[term] +=1\n else:\n countDict[term] = 1\n\n return countDict", "def vectorize_content(self, content):\r\n file_dict = {}\r\n for word in content:\r\n if word in file_dict:\r\n file_dict[word] += 1\r\n else:\r\n file_dict[word] = 1\r\n return file_dict", "def load_word_counts(filename):\n raw_rows = csv_rows(filename)\n word_counts = defaultdict(lambda: 0)\n\n for line_number, raw_row in enumerate(raw_rows, 2):\n count = int(raw_row[\"count\"])\n ipa = raw_row[\"IPA\"]\n if '*' in ipa:\n continue\n\n # Fixes random badness.. hopefully doesn't hide anything?\n mod_ipa = ipa.replace('(', '').replace(')', '')\n\n # Work around a passage with an error in it:\n gloss = raw_row[\"Gloss\"] or raw_row[\"Text\"]\n\n category = raw_row[\"Category\"]\n\n skipword_characters = {'?'}\n try:\n for i, g in izip(mod_ipa.split('/'), gloss.split('/')):\n word = make_word(i, g, category)\n word_counts[word] += count\n except WordParseError as e:\n print (u\"Error on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n except IndexError as e:\n unknown_index = e.args[0]\n if unknown_index in skipword_characters:\n print (u\"Bad char on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n else:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n except:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n return word_counts", "def get_counts(filename, key):\r\n column_keys, get_data = get_csv(filename)\r\n assert(key in column_keys[1:])\r\n column = column_keys[1:].index(key)\r\n print 'getcounts() %s : %s column = %d' % (filename, key, column+1) \r\n counts_dict = {}\r\n for i,(k,v) in enumerate(get_data()):\r\n x = v[column]\r\n counts_dict[x] = counts_dict.get(x, 0) + 1\r\n return counts_dict", "def get_counts(data):\n\n word_count = {}\n syll_count = {}\n\n infile = data.corpus\n try:\n\n open_file = codecs.open(infile, 'r', encoding='utf-16')\n for line in open_file:\n line = line.lower()\n # Remove tablet indexing info and line numbers. Grab only text data\n line = line.split(',')\n text = clean_line(line[7])\n\n # Update the occurrences of the words in the line\n for word in text.split():\n count = word_count.setdefault(word, 0)\n word_count[word] = count + 1\n\n # Track occurrences of syllables\n update_syllable_count(word, syll_count)\n\n open_file.close()\n except IOError:\n print(\"Cannot open: \" + infile)\n\n return (word_count, syll_count)", "def get_usercounts(self):\n word_counts = defaultdict(int) # {}\n with open(self.filename) as f:\n for line in f:\n if line:\n username, words = self.get_username_words(line) # username1, cat dog\n num_words = len(words.split()) # 1\n word_counts[username] += num_words # {u1: 3, u2: 4, }\n return word_counts", "def get_counts(self):\n counts = {}\n for document in self.docs:\n for word in document:\n if word not in counts.keys():\n counts[word] = 1\n else:\n counts[word] += 1\n return counts", "def tag():\n iso_list = []\n tags = [\"spatial_entity\", \"place\", \"motion\", \"location\", \"signal\", \"qslink\", \"olink\"]\n for token in doc:\n if token.norm_ in tags:\n iso_list.append(token.norm_)\n setList = list(set(iso_list))\n my_dict = {i: iso_list.count(i) for i in setList}\n\n for i in tags:\n if i.lower() not in my_dict:\n my_dict[i] = 0\n print(my_dict)", "def get_contents(path : str) -> dict[str, dict[str, int]]:\n with open(path, \"r\") as f:\n lines = f.readlines();\n \n contents = {}\n\n # Could use regex here, but hardly seems necessary due to the input size.\n for line in lines:\n try:\n bag, contains = line.split(\" bags contain \");\n except:\n continue\n\n contents[bag] = Counter()\n if contains.startswith(\"no\"): continue\n\n # separate types\n for item in contains.split(\", \"):\n parts = item.split();\n color = parts[1] + \" \" + parts[2]\n count = int(parts[0])\n contents[bag][color] = count\n \n return contents", "def count_extracted(j_data):\n count = 0\n for record in j_data:\n tmp = {}\n desc = record['lcr_desc'].lower().split('/')\n title = desc[0]\n cat = category(title)\n if cat and 'location' in record:\n count += 1\n return count", "def get_all_counts(filename):\r\n column_keys, get_data = get_csv(filename)\r\n all_counts_dict = {}\r\n for key in column_keys[1:]:\r\n all_counts_dict[key] = {}\r\n\r\n for i,(k,v) in enumerate(get_data()):\r\n for key in column_keys[1:]:\r\n column = column_keys[1:].index(key)\r\n x = v[column]\r\n all_counts_dict[key][x] = all_counts_dict[key].get(x, 0) + 1\r\n return all_counts_dict", "def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq", "def get_word_count(file_name):\n\n my_file = open(file_name)\n word_count = {}\n\n for line in my_file:\n stripped_line = line.rstrip()\n line_list = stripped_line.split(' ')\n line_list = [word.lower() for word in line_list]\n\n for word in line_list:\n word_count[word] = word_count.get(word, 0) + 1\n\n for word_in_count, count in word_count.iteritems():\n print \"{} {}\".format(word_in_count, count)\n\n my_file.close()\n # return word_count", "def fileCounter(directory):", "def main():\n genredictionary = defaultdict(float)\n\n file2 = open(\"msd-topMAGD-genreAssignment.cls\", \"r\")\n\n for line in file2:\n genredictionary[line.split(\"\t\")[1][0:-1]] += 1\n\n\n\n print genredictionary", "def _get_counts(self, X: np.ndarray) -> Dict[int, np.ndarray]:\n return {f: np.bincount(X[:, f].astype(int), minlength=n_cat) for f, n_cat in\n self.categories_per_feature.items()}", "def read_counts_file(path):\n labels_filename = path\n with open(labels_filename, 'rb') as f:\n lines = f.read().decode()\n lines = lines.split('\\n')\n lines = filter(None, lines)\n\n labels_to_counts = {}\n for line in lines:\n index = line.index(':')\n labels_to_counts[line[:index]] = int(line[index+1:])\n return labels_to_counts", "def wordcount(input_file_path):\n\n # Execute word count command on the input file and obtain the output\n result = subprocess.check_output(['wc', input_file_path], stderr=subprocess.STDOUT)\n result = result.decode('utf-8')\n\n # Split the output string into lines, words, and characters\n (lines, words, characters, _) = result.split()\n\n # Create metadata dictionary\n metadata = {\n 'lines': lines,\n 'words': words,\n 'characters': characters\n }\n\n # Store metadata in result dictionary\n result = {\n 'metadata': metadata\n }\n\n # Return the result dictionary\n return result", "def count_tokens(tokens, filename):\n wordcounts = Counter(tokens)\n #print(wordcounts)\n wordcounts = pd.Series(wordcounts, name=filename)\n #print(wordcounts.head())\n return wordcounts" ]
[ "0.7715151", "0.6849592", "0.6818911", "0.6436489", "0.64020634", "0.63433975", "0.63389724", "0.63330853", "0.6318879", "0.63180465", "0.62615657", "0.62595445", "0.61637187", "0.6145789", "0.61236924", "0.60972047", "0.60384583", "0.6026211", "0.60130787", "0.59736466", "0.59711623", "0.59676385", "0.59601086", "0.59014845", "0.58867353", "0.58822364", "0.58682144", "0.58491", "0.58370227", "0.58339024" ]
0.765532
1
Segments a given category frequency dictionary into a lower number of categories, as close as it can to the relative frequencies indicated in weights.
def segmentDict(dict, weights): # Normalize weights weights = normalize(weights) segments = {} actual_weights = [] total_instances = 0 percent_instances = 0 i = 0 cat = None for k,v in dict.items(): total_instances += v if cat == None: cat = k[0].upper() sorted_d = sorted(dict.items(), key=operator.itemgetter(1), reverse=True) for k,v in sorted_d: percent_instances += v/total_instances segments[k] = cat + str(i) if percent_instances >= weights[i]: actual_weights += [percent_instances] percent_instances = 0 i += 1 actual_weights += [percent_instances] return [segments, actual_weights]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def categoryFrequency(categoryList): #TODO delete units\n n=len(categoryList)\n freq = dict()\n for i in categoryList:\n if i in freq.keys():\n freq[i]=freq[i]+1/float(n)\n else:\n freq[i]=1/float(n)\n sortedFreq=sorted([(v,k) for (k,v) in freq.items()], reverse = True)[:10]\n freq=dict([(k,v) for (v,k) in sortedFreq])\n return freq", "def weights_by_category(self):\n cate_weights = {}\n for cate in self.unique_category:\n cate_weights[cate] = self.weights[self.category == cate].sum()\n return pd.Series(cate_weights, index=self.unique_category)", "def catsPerWord(self, thresh):\n totalCats = 0\n words = 0\n for word, wordFreqs in self.lex.items():\n dictSize = len([f for f in wordFreqs.values() if f >= thresh])\n totalCats += dictSize\n if sum(wordFreqs.values()) >= 20:\n words += 1\n return float(totalCats)/float(words)", "def distance(dic_adj, categories_reduced, C0, CI, n_nodes):\r\n\tCxCy = []\r\n\r\n\tC0 = list(categories_reduced.get(C0)) #C0\r\n\tCI = set(categories_reduced.get(CI)) #C1\r\n\r\n\tfor s_value in C0[:n_nodes]:\r\n\t CxCy.extend(shortest_path_bfs_list(dic_adj, s_value, CI))\r\n\r\n\treturn statistics.median(CxCy)", "def get_weight(val):\n if val < 2000:\n category = 1\n elif val < 2500:\n category = 2\n elif val < 3000:\n category = 3\n elif val < 3500:\n category = 4\n else:\n category = 5\n return category", "def normalize_counter(c):\n total = sum(c.values())\n return {w:float(c[w])/total for w in c}", "def determine_category(weight):\n if weight < 52:\n return Category.FLY\n elif 52 <= weight < 57:\n return Category.FEATHER\n elif 57 <= weight < 63:\n return Category.LIGHT\n elif 63 <= weight < 69:\n return Category.WELTER\n elif 69 <= weight < 75:\n return Category.MEDIUM\n elif 75 <= weight < 81:\n return Category.MEDIUM_HEAVY\n elif 81 <= weight < 91:\n return Category.HEAVY\n elif weight >= 91:\n return Category.SUPER_HEAVY\n else:\n return TypeError", "def _analyse_topics(frequencies):\n freq = frequencies[0]\n freq_ref = frequencies[1]\n the_dict = weight_term_frequencies_one(freq,freq_ref)\n sorted_toks = sorted(the_dict.iteritems(),\n key=operator.itemgetter(1))\n \n sorted_toks.reverse()\n sorted_toks = sorted_toks[:400]\n final_toks = []\n for (k,v) in sorted_toks:\n best = True\n for (k1,v1) in sorted_toks:\n if k != k1:\n if (abs(v1-v)) < 0.2:\n if k in k1:\n best = False\n #print \"dropped\", k\n if best:\n final_toks.append((k,v))\n \n very_final_toks = {}\n for (k,v) in final_toks:\n close_val = [(k2,v2) for k2,v2 in final_toks[:50] if abs(v-v2) < 0.2]\n if len(close_val) < 1:\n very_final_toks[k] = v\n else:\n similar = [(k3,v3,len(k3)) for k3,v3 in close_val if difflib.SequenceMatcher(None,k,k3).quick_ratio() > 0.89]\n if len(similar) > 1:\n a,b,c = sorted(similar,key=operator.itemgetter(2))[0]\n very_final_toks[a] = b\n else:\n very_final_toks[k] = v\n \n very_final_toks = sorted(very_final_toks.iteritems(),\n key=operator.itemgetter(1))\n very_final_toks.reverse()\n return very_final_toks", "def divide_weighted_array(items_with_weights, max_segment_weight):\n total_weight = sum([weight for (item, weight) in items_with_weights])\n num_segments = ceil(total_weight / max_segment_weight)\n while True:\n # Try to divide to num_segments segments\n ret_segments = [[] for i in range(num_segments)]\n ret_total_weights_heap = [(0, i) for i in range(num_segments)]\n heapq.heapify(ret_total_weights_heap)\n failed = False\n for item, weight in items_with_weights:\n segment_weight, segment_idx = heapq.heappop(ret_total_weights_heap)\n if weight + segment_weight > max_segment_weight:\n failed = True\n break\n ret_segments[segment_idx].append(item)\n heapq.heappush(ret_total_weights_heap, (segment_weight + weight, segment_idx))\n if failed:\n num_segments += 1\n continue\n return ret_segments", "def weightKmers(self, weightDict):\n for k, w in weightDict.iteritems():\n assert k in self.kmers\n self.G.edge[k + \"_L\"][k + \"_R\"]['weight'] = w", "def calc_class_weights(label_freq):\n\n most_common_label_freq = label_freq[0]\n weighted_slots = sorted([(index, most_common_label_freq[1] / freq) for (index, freq) in label_freq])\n return [weight for (_, weight) in weighted_slots]", "def get_word_stats(segments, feats_dict):\r\n word_count_list = []\r\n word_lengths = []\r\n long_count = 0\r\n for segment in segments:\r\n word_count_list.append(len(segment))\r\n for word in segment:\r\n word_lengths.append(len(word))\r\n if len(word) > 6:\r\n long_count += 1\r\n # Compute segment level statistics\r\n feats_dict['wc_mean'] = np.mean(word_count_list) if word_count_list else float('nan')\r\n feats_dict['wc_median'] = np.median(word_count_list) if word_count_list else float('nan')\r\n feats_dict['wc_stdev'] = np.std(word_count_list) if word_count_list else float('nan')\r\n feats_dict['wc_min'] = min(word_count_list) if word_count_list else float('nan')\r\n feats_dict['wc_max'] = max(word_count_list) if word_count_list else float('nan')\r\n feats_dict['total_count'] = sum(word_count_list) if word_count_list else float('nan')\r\n\r\n # Compute fraction of words across whole call that are long (i.e. 6+ words)\r\n feats_dict['lw_count'] = (long_count / feats_dict['total_count']) if feats_dict['total_count'] else float('nan')\r\n # Compute mean length of any word used\r\n feats_dict['word_len'] = np.mean(word_lengths) if word_lengths else float('nan')", "def lp(word, category, unique, k, name=\"category\"):\n\t\tp1 = category.count(word) + k\n\t\tp2 = len(category) + unique\n\t\tprint(word + \" in \"+name+\": \" + str((p1 * 1.0) / (p2 * 1.0)))\n\t\treturn (p1 * 1.0) / (p2 * 1.0)", "def normalize(self):\n for key in self.corpus.keys():\n sum_count = 0\n words = []\n counts = []\n for k, v in self.corpus[key].items():\n sum_count += v\n words.append(k)\n counts.append(v)\n prob = [float(count)/sum_count for count in counts]\n\n self.corpus[key] = [words, prob]", "def cdf(weights):\r\n\treturn np.cumsum(weights) / sum(weights)", "def get_segments(weights, threshold):\n marker_list = [True if i >= threshold else False for i in weights]\n i = 0\n final_pairs = []\n while i < len(weights):\n if marker_list[i]:\n start = i\n while i < len(weights) and marker_list[i]:\n i = i + 1\n end = i - 1\n if end-start > 1:\n final_pairs.append(start)\n final_pairs.append(end)\n i = i + 1\n return np.array(final_pairs)", "def correctFrequency(bias, dictionary):\r\n os.rename(\"cleanedData/\"+str(bias)+\"cleaned.txt\", \"cleanedData/\"+str(bias)+\"needToChange.txt\")\r\n tempDict = {}\r\n with open(\"cleanedData/\"+str(bias)+\"needToChange.txt\",'r') as f:\r\n for line in f:\r\n freq = line.split(\": \")\r\n tempDict[freq[0]] = int(freq[1].strip())\r\n for tup in dictionary:\r\n if tup[0] in tempDict:\r\n tempDict[tup[0]]+=tup[1]\r\n else:\r\n tempDict[tup[0]]=tup[1]\r\n sortedDict = sorted(tempDict.items(),key=operator.itemgetter(1),reverse=True)\r\n \r\n url = \"cleanedData/\" + str(bias) + 'cleaned.txt'\r\n with open(url, 'w') as f:\r\n for word in sortedDict:\r\n f.write(word[0] + \": \" +str(word[1])+\"\\n\") \r\n os.remove(\"cleanedData/\"+str(bias)+\"needToChange.txt\")", "def get_weight_category(self) -> WeightCategory:\n return WeightCategory.light if self.weight < 100 else WeightCategory.heavy", "def cut_by_count(self, min_count=1, max_count=None):\n word_count = list()\n for word, count in iteritems(self.word_count):\n word_count.append((word, count))\n\n self.clear_dictionary(keep_special=True)\n\n for word, count in word_count:\n if min_count is not None and count < min_count:\n continue\n if max_count is not None and count > max_count:\n continue\n self.add(word, count=count)\n\n print(\"After cut, Dictionary Size is %d\" % len(self))", "def calc_weight(str,dict):\n for i,c in enumerate(str):\n dict[c] += 10**(len(str)-(i+1))", "def wash_categories(product: dict):\n\n i = 0\n while i <= len(product['categories_tags']) - 1:\n if ':' in product['categories_tags'][i]:\n product['categories_tags'][i] = \\\n (product['categories_tags'][i].split(':'))[1]\n i += 1\n\n product['categories'] = product['categories'].split(',')\n i = 0\n while i <= len(product['categories']) - 1:\n if ':' in product['categories'][i]:\n product['categories'][i] = \\\n (product['categories'][i].split(':'))[1]\n i += 1", "def classify_by_weight(wts_file):\r\n\r\n nodes = {}\r\n data = []\r\n flag = 0\r\n with open(wts_file) as wf:\r\n for num, line in enumerate(wf, 0):\r\n if num == 0 or '#' in line or line == '\\n':\r\n continue\r\n else:\r\n data = line.split()\r\n if data[1] != '0':\r\n if data[1] not in nodes.keys():\r\n nodes[data[1]] = []\r\n nodes[data[1]].append(data[0])\r\n else:\r\n continue\r\n return nodes", "def __create_conv_weights(self, conv_weights):\n\n conv_xform_weights = []\n curr_n = 32\n k = 5\n for idx, conv_w in enumerate(conv_weights):\n\n curr_n = self.n_values[idx]\n W = self.__create_W_matrix(curr_n, conv_w)\n conv_xform_weights.append(W)\n\n return conv_xform_weights", "def clusterAlgorithm(values):\n clusterMap = dict()\n for value in values:\n if value[2] not in clusterMap.keys():\n clusterMap[value[2]] = []\n clusterMap[value[2]].append(value)\n frequency = [float(len(clusterMap[value[2]])) for value in values]\n total = sum(frequency)\n weightValues = [freq / total for freq in frequency]\n print sum(weightValues)\n lightValues = [value[1] for value in values]\n return np.average(lightValues, weights = weightValues)", "def rebuild_by_freq(self, thd=3):\n self.word2idx = {'<unk>': 0, '<pad>': 1, '<mask>': 2}\n self.idx2word = ['<unk>', '<pad>', '<mask>']\n\n for k, v in self.word2frq.items():\n if v >= thd and (k not in self.idx2word):\n self.idx2word.append(k)\n self.word2idx[k] = len(self.idx2word) - 1\n\n print('Number of words:', len(self.idx2word))\n return len(self.idx2word)", "def normalize(counter):\n total = sum(counter.values())\n return [(char, cnt/total) for char, cnt in counter.most_common()]", "def reprocess_dict (dict1):\n out = {};\n for kk,value in dict1.iteritems():\n # parse keys\n (lo0,dur,decdec,freqmhz,nch),weight = kk[0].split(\"_\"),kk[1]\n if weight != \"natural\":\n weight += \":\" + kk[3];\n dec = -int(decdec.split(\"-\")[1]);\n freq = int(freqmhz[:-3])\n # parse layout\n lo = lo0;\n if lo[-2] in \"abcd\":\n lores = \"0.\"+lo[-1];\n lofreq = dict(a=650,b=800,c=1000,d=1400)[lo[-2]];\n lo = lo[:-2];\n else:\n lores = 0;\n lofreq = 0;\n lo = lo[4:];\n l00 = lo0[4:]\n wbins.add(weight);\n # make new entry\n out[lo0,lores,lofreq,freq,dec,weight] = [value,kk];\n return out;", "def catsAtFreqThresh(self, stopPoint):\n freqDist = self.freqDist(self.cats)\n mostFreq = max(freqDist.keys())\n rows = []\n catsSeen = 0\n totalCats = len(self.cats)\n for threshold in xrange(0, mostFreq):\n totalCats -= freqDist.get(threshold, 0)\n if threshold >= stopPoint:\n break\n rows.append(totalCats)\n return rows", "def gen_categories(self, min_threshold=0):\r\n category_df = self.poi_df[['poi_index', 'category_index']].copy()\r\n category_counter = Counter(category_df['category_index'])\r\n category_counter = pd.Series(category_counter.values(), index=category_counter.keys())\r\n valid_category = category_counter[category_counter >= min_threshold].index\r\n category_df = category_df[category_df['category_index'].isin(valid_category)]\r\n return category_df.sort_values('poi_index').to_numpy()", "def gen_categories(self, min_threshold=0):\n category_df = self.poi_df[['poi_index', 'category_index']].copy()\n category_counter = Counter(category_df['category_index'])\n category_counter = pd.Series(category_counter.values(), index=category_counter.keys())\n valid_category = category_counter[category_counter >= min_threshold].index\n category_df = category_df[category_df['category_index'].isin(valid_category)]\n return category_df.sort_values('poi_index').to_numpy()" ]
[ "0.5892979", "0.5838423", "0.5622817", "0.5414212", "0.53944296", "0.5326376", "0.52474064", "0.52364326", "0.5097868", "0.5033735", "0.502306", "0.4973841", "0.4943396", "0.492052", "0.4912217", "0.48782256", "0.48415238", "0.4830099", "0.48221216", "0.4802242", "0.48008767", "0.47920054", "0.47804457", "0.47743642", "0.47722092", "0.47598055", "0.47500128", "0.47495165", "0.4746412", "0.47381723" ]
0.6690722
0
Get all files in a directory. Check the file extensions to determine if it is a changable file. Write qualified files to the changeable files list. The copyright date is checked by a different procedure.
def get_requested_files(directory_path, file_extension_list, changeable_files_list): # process each file in the directory filepaths = glob.glob(directory_path + "/*") for filepath in filepaths: unused, ext = os.path.splitext(filepath) # check for the requested files if ext in file_extension_list: filepath = filepath.replace('\\', '/') changeable_files_list.append(filepath) changeable_files_list.sort()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_date(dest=dest):\n for root, _, files in os.walk(dest):\n ignore = [\"README.md\",\"SUMMARY.md\"]\n _ = [edit_files(root + \"/\" + file) for file in files if (file not in ignore and file.endswith(\".md\"))]", "def process_all_files():\n src_files = get_doc_files()\n\n for src_pathname in src_files:\n if src_pathname.suffix in MARKDOWN_EXTENSIONS:\n process_file_markdown(src_pathname)\n elif src_pathname.suffix in STATIC_ASSET_EXTENSIONS:\n process_file_copytodest(src_pathname)", "def getAllFiles(self):\n\n\t\treturn self.getFilesForDirs([])", "def get_all_files(cwd):\n return os.listdir(cwd)", "def all_changed_files(self):\n return [path_to_file_type(os.path.join(self.path, p)) for p in self.changed_paths() if p]", "def scan_dir(self, dir):\n import pathlib\n import magic\n\n for filename in find_all_files(dir):\n self.filelist.append({\n \"filename\": filename,\n \"mime\": magic.from_file(filename, mime=True),\n \"size_bytes\": os.path.getsize(filename),\n \"ext\": pathlib.Path(filename).suffix\n })", "def _update_files():\n configuration_settings = get_configuration()\n\n # Need to find all of the files that are stored in the input_files directories in order to start building the\n # reports that will be used to generate the static log files.\n for input_path in configuration_settings.processing.inputs:\n search_path = pathlib.Path(input_path)\n\n # Currently going to make the assumption that everyone is using the path naming convention that I'm dictating\n # which is YYYY/MM/DD/file.ext\n for file_component in search_path.glob('*/*/*/*'):\n # Store all of the files into a dictionary containing the keys and a list of the files that are associated\n # with that day\n updaters.update_files(search_path, file_component)", "def get_update_file_list(directory):\n update_files_list = set(UPDATE_FILES_STATIC)\n update_files_exclude = set(UPDATE_FILES_EXCLUDE)\n\n for root, dirs, files in os.walk(path.join(PATH_ROOT, directory)):\n for filen in files:\n if UPDATE_FILES_RE.match(filen):\n filep = path.join(root, filen)\n update_files_list.add(path.relpath(filep, PATH_ROOT))\n \n return update_files_list - update_files_exclude", "def retrieve_all_files(self):\n result = utilities.rscandir(\n self.folder(), ignore_dirs=[\".git\"])\n\n return result", "def touch_files_dependent_on_changes(kymera_path, dirs, suffixes, changes):\n for dir in dirs:\n if dir[0] != '/':\n # This is a relative path to kymera root\n dir = kymera_path + dir\n if not os.path.exists(dir):\n print \"Directory %s included in ALL_SRCDIRS, ALL_INCDIRS or CFG_LIBS doesn't exist, continuing...\" % dir\n else:\n for file_name in os.listdir(dir):\n full_file_path= os.path.join(dir, file_name)\n # Filter a list of filenames down to those with one of the given suffixes\"\n if matching_file(suffixes, full_file_path):\n # Find all the files from a set with one of a list of suffices\n # containing one of the changed definitions\n if grep_words(changes, full_file_path):\n print \"Mark file for rebuild:\", full_file_path\n touch_file(full_file_path)", "def util_build_file_list(dirname, IGNORE_CREGEX):\n outlist = []\n logging.info('Scanning directory: %s', dirname)\n try:\n with os.scandir(dirname) as filelist:\n filelist_filt = [a for a in filelist if a.is_file() and not any(list(map(lambda rg: True if rg.match(a.name) else False, IGNORE_CREGEX)))]\n outlist = [ {'dir': dirname, 'filename': a.name, 'ctime': a.stat().st_ctime, 'mtime': a.stat().st_mtime} for a in filelist_filt ]\n dirlist = [ a for a in filelist if a.is_dir() ]\n if len(dirlist) > 0:\n outlist.append(list(map(util_build_file_list, dirlist)))\n except FileNotFoundError:\n logging.error('Directory not found: %s' % dirname)\n pass\n except Exception as e:\n logging.error('Error due to %s' % e) \n logging.debug('Filelist generated for %s as %s' % (dirname, outlist))\n return outlist", "def scan_dir(self, directory=\".\"):\n for root, dirs, files in os.walk(directory, topdown=False):\n for name in files:\n for filetype in self.allowed_file_types:\n if name.split(\".\")[-1] == filetype:\n self.song_list.append(os.path.join(root, name))", "def GetAllFiles(self):\r\n\t\tdir_list = []\r\n\t\tdir_list.append(self.path) \r\n\t\tfor dir in dir_list: \r\n\t\t\tfiles = os.listdir(dir)\r\n\t\t\tfor file in files:\r\n\t\t\t\tfull_name = dir + \"\\\\\\\\\" + file\r\n\t\t\t\tif(os.path.isdir(full_name)): \r\n\t\t\t\t\tif(file[0] == '.'):\t# 排除隐藏文件夹\r\n\t\t\t\t\t\tpass \r\n\t\t\t\t\telse:\t# 添加非隐藏文件夹 \r\n\t\t\t\t\t\tdir_list.append(full_name) \r\n\t\t\t\tif(os.path.isfile(full_name)):\r\n\t\t\t\t\tfor type in self.types.split('|'):\r\n\t\t\t\t\t\tif file.find(type) != -1:\r\n\t\t\t\t\t\t\t# 添加文件 \r\n\t\t\t\t\t\t\tself.file_list.append(full_name)\r\n\t\t\t\t\t\t\t#print \"Add file \" + full_name\r", "def __get_files_to_rename(self, directory):\n return [file for file in self.__get_files(directory, \"pdf\") if \"_compress_\" in file]", "def list_all_files(dir):\n\n result = []\n for root, _, filenames in os.walk(dir):\n for name in filenames:\n filename, ext = os.path.splitext(name)\n if ext == '.cs' or ext == '.xaml':\n result.append(os.path.join(root, name))\n return result", "def collect_files(path, audio_files):\n\n for entry in os.scandir(path):\n if entry.is_dir():\n collect_files(entry.path, audio_files)\n if entry.is_file() and (entry.path.endswith(\".flac\") or entry.path.endswith(\".wav\")):\n audio_files.append(entry.path)", "def listen_files_list(self, directory):\r\n files = [f for f in os.listdir(directory) if\r\n f[-len(self.fileExt):] == self.fileExt]\r\n return files", "def _findChangedFiles(self):\n changedFiles = []\n # calculate and update checksums always for ALL files\n for observedFile in self.observedFiles:\n if os.path.isfile(observedFile.filePath):\n currentChecksum = checksumFile(observedFile.filePath)\n else:\n currentChecksum = None\n # different values with None value checking\n if ((observedFile.lastChecksum is None\n and currentChecksum is not None)\n or observedFile.lastChecksum != currentChecksum):\n changedFiles.append(observedFile) # notify change\n observedFile.lastChecksum = currentChecksum # update checksum\n\n return changedFiles", "def getContentFiles():\n contentFiles = []\n for contentDir, subDirs, filenames in os.walk(sourceDir, followlinks=True):\n if shouldIgnore(contentDir):\n subDirs[:] = []\n continue\n for filename in filenames:\n if not shouldIgnore(filename):\n cf = ContentFile(os.path.join(contentDir, filename))\n log(`cf.path`)\n contentFiles.append(cf)\n return contentFiles", "def __get_files(self, directory, file_extension):\n path_spec = \"{}**/*.{}\" if self.__config.recursive() else \"{}*.{}\"\n return glob.glob(path_spec.format(directory, file_extension), recursive=self.__config.recursive())", "def Main(root_directory):\n filepaths = GetAllFilepaths(root_directory)\n for filepath in filepaths:\n parser = fileparser.CreateParser(filepath)\n if not parser:\n ReportWarning('cannot find a parser for file %s, skipping...' %\n filepath)\n continue\n old_file_contents = ReadFileIntoString(filepath)\n comment_blocks = parser.FindAllCommentBlocks(old_file_contents)\n if not comment_blocks:\n ReportWarning('cannot find any comment blocks in file %s' %\n filepath)\n old_copyright_block = parser.FindCopyrightBlock(comment_blocks)\n if not old_copyright_block:\n ReportWarning('cannot find copyright block in file %s' % filepath)\n (year, holder) = parser.GetCopyrightBlockAttributes(old_copyright_block)\n if holder and not ConfirmAllowedCopyrightHolder(holder):\n ReportWarning(\n 'unrecognized copyright holder \"%s\" in file %s, skipping...' % (\n holder, filepath))\n continue\n new_copyright_block = parser.CreateCopyrightBlock(year, holder)\n if old_copyright_block:\n new_file_contents = old_file_contents.replace(\n old_copyright_block, new_copyright_block, 1)\n else:\n new_file_contents = new_copyright_block + old_file_contents\n WriteStringToFile(new_file_contents, filepath)", "def __get_files(self):\n if len(self.files) == 0:\n self.files = os.listdir(self.__path())\n self.files.sort()\n if self.parent:\n self.files.insert(0, \"..\")\n for index, name in enumerate(self.files, start=1):\n if self.__is_dir(self.__make_path(name)):\n self.files[index] = name + \"/\"", "def getFiles(directory, showName, extension):\n os.chdir(directory)\n \n list = []\n \n for file in glob.glob(\"*\"+showName+\"*.\"+extension):\n list.append(file)\n \n return list", "def update_reports():\n return os.listdir('./reports')", "def list_files(path, scan_new_files=False):\n seen = set()\n for root, _, files in os.walk(path):\n for fname in files:\n if fname.endswith('.swp'):\n continue # Temporary files left around if CTRL-C-ing while downloading.\n if fname == \"OWNERS\":\n continue # OWNERS file should not be uploaded.\n fpath = os.path.join(root, fname)\n if not os.path.isfile(fpath) or fname.startswith('.'):\n continue\n if fpath.endswith(SUFFIX):\n fpath = fpath[:-len(SUFFIX)]\n elif not scan_new_files:\n continue\n if fpath not in seen:\n seen.add(fpath)\n yield fpath", "def readdir(self, path, fh, *args, **pargs):\n path = self.clean_path(path)\n files = os.listdir(path)\n i = 0\n while(i < len(files)):\n if(os.path.splitext(files[i])[1] == '.cue'):\n cue_file = files.pop(i)\n try:\n to_add, meta, to_remove = self.get_cue_files(\n os.path.join(path, cue_file), verbose=self._verbose)\n files.extend(to_add.keys())\n for f in to_remove:\n files.remove(f)\n except Exception:\n print(f'Error parsing {cue_file}:', file=sys.stderr, flush=True)\n import traceback\n traceback.print_exc()\n else:\n i += 1\n\n return ['.', '..'] + files", "def list_all_files(\n dir_path: str = \"./domain\", ext: str = \".py\", excludes=None, includes=None, return_base_name=False\n) -> List[str]:\n files = []\n for entry in os.scandir(dir_path):\n if entry.is_dir():\n files += list_all_files(entry.path, ext=ext, excludes=excludes, return_base_name=return_base_name)\n elif entry.is_file():\n if not ext or (ext and entry.path.endswith(ext)):\n if excludes and entry.path.endswith(excludes):\n continue\n if includes and not entry.path.endswith(includes):\n continue\n if return_base_name:\n files.append(os.path.basename(entry.path))\n else:\n files.append(entry.path)\n else:\n pass\n return files", "def get_files_from_directory(self, fordir):\n if fordir.endswith('.jack'):\n return [os.path.abspath(fordir)]\n else:\n directory = fordir\n return ['{}'.format(os.path.abspath(os.path.join(directory, each))) for each in os.listdir(fordir) if each.endswith('.jack')]", "def file_checker():\n\n PATH_RELEASE1_IDEN = os.getcwd()+'/archive_all_2014-10/'\n PATH_RELEASE1_UNIDE = None\n #PATH_RELEASE1_UNIDE = os.getcwd()+'/archive_all_2014-10/'\n\n PATH_RELEASE2_IDEN = os.getcwd()+'/archive_all_2016-10/archive_identified_2016-10/'\n PATH_RELEASE2_UNIDE = os.getcwd() + '/archive_all_2016-10/archive_unidentified_2016-10/'\n\n\n #From here don't change anything.\n #This global function finds the .mgf files in paths\n list_of_files_release1_ide = glob.glob(PATH_RELEASE1_IDEN+'*.mgf')\n list_of_files_release1_unide = None #REMOVE THIS PART AND UNCOMMENT NEXT LINE IN NEXT RELEASES.\n\n #list_of_files_release1_unid = glob.glob(PATH_RELEASE1_UNID'+*.mgf')\n\n list_of_files_release2_ide = glob.glob(PATH_RELEASE2_IDEN+'*.mgf')\n list_of_files_release2_unide = glob.glob(PATH_RELEASE2_UNIDE+'*.mgf')\n\n\n #Check if exist cache folder. If not will make it. \n #RELEASE 1 \n if not os.path.exists(PATH_RELEASE1_IDEN+'cache'):\n os.makedirs(PATH_RELEASE1_IDEN+'cache')\n\n # if not os.path.exists(PATH_RELEASE1_UNIDE'+cache'):\n # os.makedirs(PATH_RELEASE1_UNIDE'+cache')\n\n #RELEASE2\n if not os.path.exists(PATH_RELEASE2_IDEN+'cache'):\n os.makedirs(PATH_RELEASE2_IDEN+'cache')\n\n if not os.path.exists(PATH_RELEASE2_UNIDE+'cache'):\n os.makedirs(PATH_RELEASE2_UNIDE+'cache')\n \n\n return PATH_RELEASE1_IDEN, \\\n PATH_RELEASE2_IDEN, \\\n PATH_RELEASE2_UNIDE, \\\n list_of_files_release1_ide, \\\n list_of_files_release2_ide, \\\n list_of_files_release2_unide", "def update(self):\n if os.path.isdir(self.full_path):\n self.file_list = os.listdir(self.full_path)\n else:\n self.file_list = []" ]
[ "0.6463595", "0.60821253", "0.60237235", "0.6008348", "0.60045683", "0.59918845", "0.5943025", "0.59408975", "0.5901942", "0.58879", "0.58497125", "0.5778965", "0.57649547", "0.5728319", "0.5727412", "0.57250494", "0.5697347", "0.56716734", "0.5655342", "0.5637762", "0.5625007", "0.5618135", "0.5607927", "0.56074256", "0.56059754", "0.5584274", "0.55830467", "0.558095", "0.55802727", "0.55751926" ]
0.6124572
1
Find and replace the copyright date in input file_path. The new file is output to the updated_file_list in the calling function. "True" is returned if the file was updated, otherwise "False"
def modify_input_file(filepath, updated_file_list): lines = 0 # current input line number file_changed = False # the file has changed # find and change matching lines pattern = re.compile("[Cc]opyright") with open(filepath, mode='r', encoding='utf-8', newline='') as file_in: for line in file_in: lines += 1 if pattern.search(line) and __old_date in line: line = line.replace(__old_date, __new_date) file_changed = True updated_file_list.append(line) return file_changed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_files(i_file):\n a_file = open(i_file, \"r\")\n content = a_file.readlines()\n\n content[3] = f\"years: {datetime.now().year}\\n\"\n content[4] = f'lastupdated: \"{date.today()}\"\\n'\n a_file = open(i_file, \"w\") #open the same file and overrite line3 & 4\n a_file.writelines(content)\n\n a_file.close()", "def check_copyright_year(filename: str, *, copyright_line: str, is_newly_created: bool) -> None:\n year = copyright_line[12:16]\n if is_newly_created and year != _current_year:\n raise HeaderCheckFailure(f'{filename}: copyright year must be {_current_year} (was {year})')\n elif not _current_century_regex.match(year):\n raise HeaderCheckFailure(\n f\"{filename}: copyright year must match '{_current_century_regex.pattern}' (was {year}): \" +\n f\"current year is {_current_year}\"\n )", "def set_file_copyright(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_copytext_set:\n self.file_copytext_set = True\n if validations.validate_file_cpyright(text):\n if isinstance(text, string_types):\n self.file(doc).copyright = str_from_text(text)\n else:\n self.file(doc).copyright = text # None or NoAssert\n return True\n else:\n raise SPDXValueError('File::CopyRight')\n else:\n raise CardinalityError('File::CopyRight')\n else:\n raise OrderError('File::CopyRight')", "def update_license_file(data_dir):\n license_file = os.path.join(data_dir, LICENSE_FILENAME)\n temp_dir = tempfile.mkdtemp()\n gh_license_filename = os.path.join(temp_dir, LICENSE_FILENAME)\n try:\n _, headers = urlretrieve(LICENSE_URL, gh_license_filename)\n except IOError as e:\n # Python 2 uses the unhelpful IOError for this. Re-raise as the more\n # appropriate URLError.\n raise URLError(e.strerror)\n\n with open(gh_license_filename, \"rb\") as f:\n github_license = f.read()\n\n try:\n with open(license_file, \"rb\") as f:\n current_license = f.read()\n except (IOError, OSError):\n current_license = b\"\"\n\n github_digest = hashlib.sha256(github_license).hexdigest()\n current_digest = hashlib.sha256(current_license).hexdigest()\n\n if github_digest == current_digest:\n return False\n\n shutil.copyfile(gh_license_filename, license_file)\n shutil.rmtree(temp_dir, ignore_errors=True)\n return True", "def licensify(command_line_args):\n with open(command_line_args.license) as fp:\n license_header = fp.read()\n files = [\n path.join(dirname, f)\n for dirname, _, filenames in walk(command_line_args.directory)\n for f in fnmatch.filter(filenames, command_line_args.files)\n if not (command_line_args.exclude and fnmatch.fnmatch(f, command_line_args.exclude))\n ]\n try:\n result = apply_license_header(\n license_header, files,\n command_line_args.check, command_line_args.dry_run or command_line_args.check\n )\n except LicensesOutOfDateError as error:\n stdout.write(repr(error))\n exit(1)\n if result:\n message = 'The following files have been changed: {}'.format(', '.join(result))\n else:\n message = 'No files changed'\n stdout.write(message + linesep)", "def test_ensure_copyright():\n issues = []\n regex = re.compile(r\"# Copyright \\d{4}(-\\d{4})? Canonical Ltd.$\")\n for filepath in get_python_filepaths():\n if os.stat(filepath).st_size == 0:\n continue\n\n with open(filepath, \"rt\", encoding=\"utf8\") as fh:\n for line in itertools.islice(fh, 5):\n if regex.match(line):\n break\n else:\n issues.append(filepath)\n if issues:\n msg = \"Please add copyright headers to the following files:\\n\" + \"\\n\".join(issues)\n pytest.fail(msg, pytrace=False)", "def update_frozen_license() -> int:\n srcpath = Path(\"doc/src/license.rst\")\n dstpath = Path(\"cx_Freeze/initscripts/frozen_application_license.txt\")\n try:\n content = srcpath.read_text(encoding=\"utf-8\")\n except OSError:\n print(ERROR1, file=sys.stderr)\n return 1\n content = FROZEN_HEADER + \"\\n\".join(content.splitlines()[1:]) + \"\\n\"\n try:\n dstpath.write_text(content, encoding=\"utf-8\")\n print(dstpath, \"ok\")\n except OSError as io_error:\n print(ERROR2, f\"({io_error}).\", file=sys.stderr)\n return 1\n return 0", "def fix_file_dates(source_file_name, dest_file_name):\n shutil.copystat(source_file_name, dest_file_name)\n print(\"Fixed dates for \" + dest_file_name)", "def updateFile(filename, content):\n\tfilename = adaptPath(filename)\n\tif filename != None:\n\t\ttry:\n\t\t\toldContent = open(filename, \"r\").read()\n\t\texcept IOError:\n\t\t\toldContent = \"\"\n\t\tif oldContent != content:\n\t\t\tfile = open (filename, \"w\")\n\t\t\tfile.write(content)\n\t\t\tfile.close()\n\treturn content", "def changeDate(names, date, ctlFunc = lambda s, d: True): \n\n # parse date\n try:\n day, month, year = re.fullmatch(\"(\\d\\d)(\\d\\d)(\\d\\d\\d\\d)\", date).groups()\n except AttributeError as e:\n raise\n \n # convert strings to ints\n day = int(day)\n month = int(month)\n year = int(year)\n \n for name in names:\n\n if ctlFunc(name, \"*DATE*\"):\n\n # get HH MM SS from file\n p_timestamp = os.path.getmtime(name)\n mdt = datetime.datetime.fromtimestamp(p_timestamp)\n \n # construct new datetime object with file time and provided date\n mdt = datetime.datetime(year, month, day, mdt.hour, mdt.minute, mdt.second)\n\n # change to new file timestamp by passing in datetime.timestamp() \n os.utime(name, (mdt.timestamp(), mdt.timestamp()))", "def update(snippet_original, filename):\n## THIS IS APPENDING, NOT REPLACING\n\tlogging.info(\"Searching for {} in {}\".format(snippet_original, filename))\n\tlogging.debug(\"Opening file\")\n\twith open(filename, \"r+\") as f:\n\t\treader = csv.reader(f)\n\t\twriter = csv.writer(f)\n\t\tlogging.debug(\"Searching for '{}'\".format(snippet_original))\n\t\tin_file = False\n\t\tfor row in reader:\n\t\t\tif str(row[1]) == snippet_original:\n\t\t\t\tin_file = True\n\t\t\t\tprint row\n\t\t\t\tnew_text = raw_input(\"Insert new snippet text: \")\n\t\t\t\trow = writer.writerow([str(row[0]), new_text])\n\t\t\t\tprint row\n\t\tif in_file == False:\n\t\t\tprint \"That's not in this file\"\n\tlogging.debug(\"Search complete\")\n\treturn snippet_original, filename", "def update_date(dest=dest):\n for root, _, files in os.walk(dest):\n ignore = [\"README.md\",\"SUMMARY.md\"]\n _ = [edit_files(root + \"/\" + file) for file in files if (file not in ignore and file.endswith(\".md\"))]", "def _update_pyrex_file(self, lines, filename):\n found_version_line = False\n for lineno, line in enumerate(lines):\n if line.startswith('__version__'):\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n lines[lineno] = '__version__ = \"%s\"\\n' % str(self.VersionTuple)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)", "def update_file(file_to_write, content):\n if read_file(file_to_write, stop_if_failure = False, split_to_lines = False) != content:\n if debug:\n print file_to_write + \" was updated!\"\n with open(file_to_write,\"w\") as targetfile:\n targetfile.write(content)\n return True\n else:\n if debug:\n print file_to_write + \" remains the same.\"\n return False", "def update_files(regex_replace_list, values, is_release=True):\n # Copy the regex replace list, but update the replace strings to include\n # the supplied values.\n regex_replace_list = [(reg, repl.format(**values)) for (reg, repl) in regex_replace_list]\n filens = get_update_file_list(values[\"calico-version\"])\n for filen in filens:\n old_lines = load_file(filen)\n new_lines = []\n include = True\n master_block = False\n for line in old_lines:\n if is_release:\n if line.startswith(BLOCK_INDICATOR_MASTER_START):\n assert not master_block, \"<!--- start indicator with no end in file %s\" % filen\n master_block = True\n include = False\n continue\n if line.startswith(BLOCK_INDICATOR_MASTER_ELSE):\n assert master_block, \"<!--- else indicator with no start in file %s\" % filen\n include = True\n continue\n if line.startswith(BLOCK_INDICATOR_MASTER_END):\n assert master_block, \"<!--- end indicator with no start in file %s\" % filen\n include = True\n master_block = False\n continue\n if include:\n for regex, replace in regex_replace_list:\n line = regex.sub(replace, line)\n new_lines.append(line)\n assert not master_block, \"<!--- start indicator with no end in file %s\" % filen\n replace_file(filen, new_lines)", "def format_finder(files):\r\n\r\n file_format = ['%Y', '%m', '%d']\r\n year = ''\r\n month = ''\r\n da = ''\r\n\r\n for file in files:\r\n\r\n index = file.find('_')\r\n date = file[: index]\r\n if '-' in date:\r\n separator = '-'\r\n else:\r\n separator = '_'\r\n date = date.split(separator)\r\n\r\n for d in range(len(date)):\r\n\r\n # If the date doesn't contain decimal (Eg: August) then it would return None\r\n if not date[d].isdecimal():\r\n return None\r\n\r\n # If the element in the date is of length greater then 2 then it would be a year (Eg: 2020)\r\n # And that value is set as the index of year\r\n if len(date[d]) > 2:\r\n year = d\r\n\r\n # If the integer of element in the date is of length greater then 12 then it would be a date (Eg: 25)\r\n # And that value is set as the index of date\r\n elif int(date[d]) > 12:\r\n da = d\r\n\r\n # If Both year and date are set, then the correct index for the month would be 3- (year+date)\r\n # Eg: 3 -(0+1)\r\n if (year != '') and (da != ''):\r\n month = 3 - (year + da)\r\n break\r\n\r\n # If Month is set, then we change the format according to their set value\r\n # Eg: format = ['%Y', '%m', '%d'], and year = 1, da = 0, month = 2\r\n # Then format[year=1] = '%Y'\r\n # Then format[da=0] = '%d'\r\n # Then format[month=2] = '%m'\r\n # format = ['%d', '%Y', '%m']\r\n if month:\r\n file_format[year] = '%Y'\r\n file_format[month] = '%m'\r\n file_format[da] = '%d'\r\n break\r\n else:\r\n # The script executes this only if none of the files had an date element( Which is not year)\r\n # That was greater than 12, Eg: 2020-06-10\r\n # Meaning that we cannot know for sure which element represents the date/month\r\n # Hence we arbitrarily assign one element as date and another as month\r\n if year != 0:\r\n # If the index of year is zero, we let the format to be same as it was assigned first\r\n # Else we arbitrarily assign '0' th index to month\r\n file_format[year] = '%Y'\r\n file_format[0] = '%m'\r\n file_format[3 - year] = '%d'\r\n return f'{file_format[0]}-{file_format[1]}-{file_format[2]}'", "def _update_python_file(self, lines, filename):\n found_version_line = False\n for lineno, line in enumerate(lines):\n if line.startswith('__version__'):\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n lines[lineno] = '__version__ = \"%s\"\\n' % self.Version\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)", "def creation_year(path_to_file):\n if platform.system() == 'Windows':\n print(\"last modified: %s\" % time.ctime(os.path.getmtime(path_to_file)))\n modtime = time.ctime(os.path.getmtime(path_to_file))\n \n print(\"created: %s\" % time.ctime(os.path.getctime(path_to_file)))\n modtime = datetime.datetime.strptime(modtime, \"%a %b %d %H:%M:%S %Y\")\n modtime = datetime.datetime.strftime(modtime, \"%Y\")\n return modtime", "def append_date_file(filename, search_str=\"----\", append_time=True, include_second=False,\n prepend=None):\n filename_final = \"\"\n if filename.find(search_str) >= 0:\n str_date, str_time = calc_date_time(include_sec=include_second)\n if append_time:\n if prepend and isinstance(prepend, str):\n filename_final = filename.replace(\n search_str, \"_\".join([prepend, str_date, str_time]))\n else:\n filename_final = filename.replace(\n search_str, \"_\".join([str_date, str_time]))\n else:\n if prepend and isinstance(prepend, str):\n filename_final = filename.replace(\n search_str, \"_\".join([prepend, str_date]))\n else:\n filename_final = filename.replace(\n search_str, str_date)\n else:\n filename_final = filename\n\n return filename_final", "def conditional_copy(asciitest_out_dir, doc_file):\n # path join uses backslash win32 which is not cmake compatible\n\n filename = save_cmake_filename(doc_file)\n\n filename1 = os.path.join(asciitest_out_dir, filename + \".temp\").replace(\"\\\\\",\"/\")\n filename2 = os.path.join(asciitest_out_dir, filename).replace(\"\\\\\",\"/\")\n\n update_if_different(filename1, filename2)", "def saveInGit(file_content, file_name, report_date):\n file_path = \"/\".join([crs_reports_dir,file_name])\n existed = os.path.isfile(file_path) \n if existed:\n # TODO Check that this specific version of this file isn't already\n # in the comment history\n pass\n with open(file_path, 'w') as f: \n f.write(file_content)\n f.close()\n gitAdd(file_name, crs_reports_dir)\n if existed:\n # TODO Set the commit date to be the CRS release date\n gitCommit(file_name, crs_reports_dir, '%s was updated' % file_name,\n report_date)\n else:\n gitCommit(file_name, crs_reports_dir, 'Added %s' % file_name,\n report_date)\n \n \n \n # 1.) If file_name exists:\n # 1.)overwrite it, \n # 2.) Commit an update to the file_name\n # else:\n # 1.) Create and save a new file\n # 2.) Commit the new file", "def copyright(files = 0):\n\n header = __getCopyrightHeader().split(\"\\n\") + ['', '']\n \n # Add\n files = files.split(\" \") if not files==0 else list_js_files()\n for file in files:\n name = file\n buf = open(file, \"r\").read()\n \n f = open(file,\"w\")\n f.write(\"\\n\".join(header))\n f.write(re.compile(\"^\\s*((\\/\\*\\!(.*?)\\*\\/)\\s*)*\",re.DOTALL).sub(\"\",buf))\n f.close()\n \n print fabric.colors.green(\"COPYRIGHTED \", True) + name", "def update_metadata_csv(self, source):\n timestamp = os.path.getmtime(source)\n filedate = datetime.datetime.fromtimestamp(timestamp)\n return self.update_metadata_date(filedate)", "def _determines_copyright_dates() -> str:\n this_year = datetime.now().year\n copyright_start_date = configuration.get_value(ConfigurationVariable.COPYRIGHT_START_DATE)\n return _to_copyright_date_string(copyright_start_date, this_year)", "def _determines_copyright_dates() -> str:\n this_year = datetime.now().year\n copyright_start_date = configuration.get_value(ConfigurationVariable.COPYRIGHT_START_DATE)\n return _to_copyright_date_string(copyright_start_date, this_year)", "def change_line(file_path, included_strings, excluded_strings, replacement):\n if not os.path.isfile(file_path):\n print file_path+\" file not found!\"\n return\n temp_path = file_path+\"_temp\"\n temp_file = open(temp_path, 'w')\n with open(file_path, 'r') as f:\n for line in f:\n if all([x in line for x in included_strings]) and \\\n all([x not in line for x in excluded_strings]):\n temp_file.write(replacement)\n else:\n temp_file.write(line)\n temp_file.close()\n os.system(\"mv \"+temp_path+\" \"+file_path)\n return", "def update_version_files (component):\n\n vprint (\"Updating version files for \" + component)\n\n retval = []\n\n ## Update component/VERSION.txt\n path = get_path(component, \"VERSION.txt\")\n with open (path, \"r+\") as version_file:\n new_version = re.sub (component + \" version .*\",\n \"%s version %s, released %s\" % (component,\n comp_versions[component + \"_version\"],\n release_date),\n version_file.read ())\n if opts.take_action:\n version_file.seek (0)\n version_file.truncate (0)\n version_file.write (new_version)\n else:\n print (\"New version file for \" + component)\n print (new_version)\n\n vprint (\"Updating Version.h for \" + component)\n\n retval.append(path)\n\n ## Update COMPONENT/component/Version.h\n comp_l = len(component + \"_\")\n parts = {k[comp_l:]:v for (k, v) in comp_versions.items() if k.startswith(component)}\n parts[\"comp\"] = component\n version_header = \"\"\"\n// -*- C++ -*-\n// This is file was automatically generated by $ACE_ROOT/bin/make_release.py\n\n#define {comp}_MAJOR_VERSION {major}\n#define {comp}_MINOR_VERSION {minor}\n#define {comp}_MICRO_VERSION {micro}\n#define {comp}_VERSION \\\"{version}\\\"\n#define {comp}_VERSION_CODE 0x{code:x}\n#define {comp}_MAKE_VERSION_CODE(a,b,c) (((a) << 16) + ((b) << 8) + (c))\n\"\"\".format(**parts)\n\n path = get_path(component, component.lower (), \"Version.h\")\n if opts.take_action:\n with open (path, 'w+') as version_h:\n version_h.write (version_header)\n else:\n print (\"New Version.h for \" + component)\n print (version_header)\n\n retval.append(path)\n\n # Update component/PROBLEM-REPORT-FORM\n vprint (\"Updating PRF for \" + component)\n\n version_line_re = re.compile (r\"^\\s*(\\w+) +VERSION ?:\")\n path = get_path(component, \"PROBLEM-REPORT-FORM\")\n\n with open (path, 'r+') as prf:\n new_prf = \"\"\n for line in prf.readlines ():\n match = version_line_re.search (line)\n if match is not None:\n vprint (\"Found PRF Version for \" + match.group (1))\n new_version = comp_versions[match.group(1) + \"_version\"]\n line = version_re.sub (new_version, line)\n\n new_prf += line\n\n if opts.take_action:\n prf.seek (0)\n prf.truncate (0)\n prf.writelines (new_prf)\n else:\n print (\"New PRF for \" + component)\n print (\"\".join (new_prf))\n\n retval.append(path)\n\n return retval", "def process_file(self, filepath, only_if_updated=True):\n raise NotImplementedError()", "def file_checker():\n\n PATH_RELEASE1_IDEN = os.getcwd()+'/archive_all_2014-10/'\n PATH_RELEASE1_UNIDE = None\n #PATH_RELEASE1_UNIDE = os.getcwd()+'/archive_all_2014-10/'\n\n PATH_RELEASE2_IDEN = os.getcwd()+'/archive_all_2016-10/archive_identified_2016-10/'\n PATH_RELEASE2_UNIDE = os.getcwd() + '/archive_all_2016-10/archive_unidentified_2016-10/'\n\n\n #From here don't change anything.\n #This global function finds the .mgf files in paths\n list_of_files_release1_ide = glob.glob(PATH_RELEASE1_IDEN+'*.mgf')\n list_of_files_release1_unide = None #REMOVE THIS PART AND UNCOMMENT NEXT LINE IN NEXT RELEASES.\n\n #list_of_files_release1_unid = glob.glob(PATH_RELEASE1_UNID'+*.mgf')\n\n list_of_files_release2_ide = glob.glob(PATH_RELEASE2_IDEN+'*.mgf')\n list_of_files_release2_unide = glob.glob(PATH_RELEASE2_UNIDE+'*.mgf')\n\n\n #Check if exist cache folder. If not will make it. \n #RELEASE 1 \n if not os.path.exists(PATH_RELEASE1_IDEN+'cache'):\n os.makedirs(PATH_RELEASE1_IDEN+'cache')\n\n # if not os.path.exists(PATH_RELEASE1_UNIDE'+cache'):\n # os.makedirs(PATH_RELEASE1_UNIDE'+cache')\n\n #RELEASE2\n if not os.path.exists(PATH_RELEASE2_IDEN+'cache'):\n os.makedirs(PATH_RELEASE2_IDEN+'cache')\n\n if not os.path.exists(PATH_RELEASE2_UNIDE+'cache'):\n os.makedirs(PATH_RELEASE2_UNIDE+'cache')\n \n\n return PATH_RELEASE1_IDEN, \\\n PATH_RELEASE2_IDEN, \\\n PATH_RELEASE2_UNIDE, \\\n list_of_files_release1_ide, \\\n list_of_files_release2_ide, \\\n list_of_files_release2_unide", "def needs_refreshing(filepath):\n today = datetime.date.today()\n year = today.year - 2000 # Obviously does not work prior to 2000\n if today.month <= 6:\n current_season = str(year - 1) + str(year)\n else:\n current_season = str(year) + str(year + 1)\n return (current_season in filepath and\n last_modified_date(filepath) != today)" ]
[ "0.604624", "0.5977958", "0.5909992", "0.5897362", "0.58621687", "0.58343244", "0.5745788", "0.5733377", "0.55461645", "0.5482797", "0.5452656", "0.5441414", "0.5390062", "0.53553164", "0.5338147", "0.528025", "0.52801585", "0.5274388", "0.5259699", "0.5258172", "0.5230019", "0.5206075", "0.5202175", "0.5177515", "0.5177515", "0.5150398", "0.5145545", "0.5133074", "0.5130559", "0.5129895" ]
0.7235687
0
Update version number in the source files. The directory list and file extensions are in main().
def update_source_files(source_directory_list, source_extension_list): # get source files in the directory list source_total = 0 for unused, source_directory in enumerate(source_directory_list): source_files_list = [] get_requested_files(source_directory, source_extension_list, source_files_list) # update the files with shared object references for unused, source_file in enumerate(source_files_list): updated_file = [] file_changed = modify_input_file(source_file, updated_file) if file_changed: filepath = get_printble_filepath(source_file) print(filepath) source_total += 1 if __file_update: write_output_file(updated_file, source_file) print("Total Files", source_total) print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_version_files (component):\n\n vprint (\"Updating version files for \" + component)\n\n retval = []\n\n ## Update component/VERSION.txt\n path = get_path(component, \"VERSION.txt\")\n with open (path, \"r+\") as version_file:\n new_version = re.sub (component + \" version .*\",\n \"%s version %s, released %s\" % (component,\n comp_versions[component + \"_version\"],\n release_date),\n version_file.read ())\n if opts.take_action:\n version_file.seek (0)\n version_file.truncate (0)\n version_file.write (new_version)\n else:\n print (\"New version file for \" + component)\n print (new_version)\n\n vprint (\"Updating Version.h for \" + component)\n\n retval.append(path)\n\n ## Update COMPONENT/component/Version.h\n comp_l = len(component + \"_\")\n parts = {k[comp_l:]:v for (k, v) in comp_versions.items() if k.startswith(component)}\n parts[\"comp\"] = component\n version_header = \"\"\"\n// -*- C++ -*-\n// This is file was automatically generated by $ACE_ROOT/bin/make_release.py\n\n#define {comp}_MAJOR_VERSION {major}\n#define {comp}_MINOR_VERSION {minor}\n#define {comp}_MICRO_VERSION {micro}\n#define {comp}_VERSION \\\"{version}\\\"\n#define {comp}_VERSION_CODE 0x{code:x}\n#define {comp}_MAKE_VERSION_CODE(a,b,c) (((a) << 16) + ((b) << 8) + (c))\n\"\"\".format(**parts)\n\n path = get_path(component, component.lower (), \"Version.h\")\n if opts.take_action:\n with open (path, 'w+') as version_h:\n version_h.write (version_header)\n else:\n print (\"New Version.h for \" + component)\n print (version_header)\n\n retval.append(path)\n\n # Update component/PROBLEM-REPORT-FORM\n vprint (\"Updating PRF for \" + component)\n\n version_line_re = re.compile (r\"^\\s*(\\w+) +VERSION ?:\")\n path = get_path(component, \"PROBLEM-REPORT-FORM\")\n\n with open (path, 'r+') as prf:\n new_prf = \"\"\n for line in prf.readlines ():\n match = version_line_re.search (line)\n if match is not None:\n vprint (\"Found PRF Version for \" + match.group (1))\n new_version = comp_versions[match.group(1) + \"_version\"]\n line = version_re.sub (new_version, line)\n\n new_prf += line\n\n if opts.take_action:\n prf.seek (0)\n prf.truncate (0)\n prf.writelines (new_prf)\n else:\n print (\"New PRF for \" + component)\n print (\"\".join (new_prf))\n\n retval.append(path)\n\n return retval", "def set_version(self, bundle, ctx, filename, version):", "def updateCodeFiles(self):\n # if this annoying slow, could probably drop to bash or soemthing\n # for a search/replace\n for filename, filetype in self._get_code_files():\n lines = open(filename).readlines()\n found_version_line = False\n\n if self.Verbose:\n print 'Reading %s' % filename\n\n if filetype is 'Python':\n lines, write_out = self._update_python_file(lines, filename)\n elif filetype is 'PyRex':\n lines, write_out = self._update_pyrex_file(lines, filename)\n elif filetype is 'C':\n lines, write_out = self._update_c_file(lines, filename)\n else:\n raise TypeError, \"Unknown code file type %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)", "def set_version(self, version):\n\n def update_version(version, filepath):\n with open(filepath, \"r\") as stream:\n contents = stream.read()\n\n new_contents = _fix_contents_version(contents, version)\n assert contents != new_contents\n with open(filepath, \"w\") as stream:\n stream.write(new_contents)\n\n update_version(version, os.path.join(\".\", \"package.json\"))\n update_version(version, os.path.join(\".\", \"src\", \"setup.py\"))\n update_version(\n version, os.path.join(\".\", \"src\", \"robocorp_code\", \"__init__.py\")\n )", "def update(src):", "def run(self):\n\n version_str = (\n get_git_version(here))\n\n version_uniparser_dict = (\n get_uniparser_version())\n\n if (version_str is not None or\n version_uniparser_dict is not None):\n\n with open(\n os.path.join(here, 'lingvodoc', 'version.py'), 'w',\n encoding = 'utf-8') as version_py_file:\n\n version_py_file.write(\n self.version_py_template.format(\n repr(version_str),\n repr(version_uniparser_dict)))\n\n # Continuing with setup.\n\n super().run()", "def test_changeVersionInFile(self):\n # The version numbers are arbitrary, the name is only kind of\n # arbitrary.\n packageName = 'foo'\n oldVersion = Version(packageName, 2, 5, 0)\n file = self.makeFile('README',\n \"Hello and welcome to %s.\" % oldVersion.base())\n\n newVersion = Version(packageName, 7, 6, 0)\n _changeVersionInFile(oldVersion, newVersion, file.path)\n\n self.assertEqual(file.getContent(),\n \"Hello and welcome to %s.\" % newVersion.base())", "def overwrite_version_in_package(base_dir: PathOrStrType, version: str):\n base_init = os.path.join(str(base_dir), '__init__.py')\n with open(base_init, 'r+') as f:\n content = f.read()\n version_regex = re.compile(r'^__version__ = .+$', flags=re.MULTILINE)\n new_content = version_regex.sub(_version_line(version), content)\n f.seek(0)\n f.write(new_content)\n f.truncate()", "def _testVersionChanging(self, major, minor, micro, prerelease=None):\n versionUpdates = []\n def myVersionChanger(sourceTree, versionTemplate):\n versionUpdates.append((sourceTree, versionTemplate))\n versionChanger = ChangeVersionsScript()\n versionChanger.changeAllProjectVersions = myVersionChanger\n version = \"%d.%d.%d\" % (major, minor, micro)\n if prerelease is not None:\n version += \"pre%d\" % (prerelease,)\n versionChanger.main([version])\n self.assertEquals(len(versionUpdates), 1)\n self.assertEquals(versionUpdates[0][0], FilePath(\".\"))\n self.assertEquals(versionUpdates[0][1].major, major)\n self.assertEquals(versionUpdates[0][1].minor, minor)\n self.assertEquals(versionUpdates[0][1].micro, micro)\n self.assertEquals(versionUpdates[0][1].prerelease, prerelease)", "def version():\n with cd(settings.SRC_PATH()):\n new_version = prompt('New version number?')\n run('echo \"window.version=\\'{0}\\';\" > app/static/js/version.js'\n .format(new_version))", "def _update_python_file(self, lines, filename):\n found_version_line = False\n for lineno, line in enumerate(lines):\n if line.startswith('__version__'):\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n lines[lineno] = '__version__ = \"%s\"\\n' % self.Version\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)", "def _update_pyrex_file(self, lines, filename):\n found_version_line = False\n for lineno, line in enumerate(lines):\n if line.startswith('__version__'):\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n lines[lineno] = '__version__ = \"%s\"\\n' % str(self.VersionTuple)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)", "def test_updateVersion(self):\n project = self.makeProject(Version(\"bar\", 2, 1, 0))\n newVersion = Version(\"bar\", 3, 2, 9)\n project.updateVersion(newVersion)\n self.assertEquals(project.getVersion(), newVersion)\n self.assertEquals(\n project.directory.child(\"topfiles\").child(\"README\").getContent(),\n \"3.2.9\")", "def rsIncrementOutputVersion(args):\n\n lyr = rsUtility.activeLayer.name()\n versions = rsRenderOutput.getVersions(lyr)\n\n global rsRenderOutput\n rsRenderOutput = renderOutput.RenderOutput()\n\n if not versions:\n print '# Unable to increment version. No versions folders exists (yet).'\n return\n\n versions = [int(re.sub('[^0-9]', '', f)) for f in versions]\n if not versions:\n return\n\n incremented_version_string = 'v{0}'.format(str(max(versions) + 1).zfill(3))\n rsRenderOutput.addVersionDir(lyr, incremented_version_string)\n rsRenderOutput.setVersion(incremented_version_string)\n _outputTemplate()\n _updatePathText()", "def ReadAndUpdateVersion(version_filename, update_position=None):\n if os.path.exists(version_filename):\n current_version = open(version_filename).readlines()[0]\n numbers = current_version.split('.')\n if update_position:\n numbers[update_position] = '%02d' % (int(numbers[update_position]) + 1)\n if update_position < -1:\n numbers[update_position + 1:] = ['00'] * -(update_position + 1)\n version = '.'.join(numbers)\n else:\n version = FIRST_VERSION\n with open(version_filename, 'w') as fout:\n fout.write(version)\n print('\\n'.join(['Version %s' % version]))\n return version", "def updateVersions(self):\r\n f = open('../versions.pckl', 'wb')\r\n pickle.dump(self.versions, f)\r\n f.close()", "def increment_version(path):\n pattern = r\"v(?P<version>\\d{3})\"\n regex = re.compile(pattern)\n match = regex.search(path)\n if not match:\n raise ValueError(\"%s does not contain a version number\" % path)\n version = match.group(\"version\")\n version = \"v\" + str(int(version) + 1).zfill(3)\n return regex.sub(version, path)", "def _update_version(self) -> None:\n # Implement in child class.\n raise NotImplementedError", "def update_versions(new_py_version: str) -> None:\n current_py_version = get_current_py_version()\n current_py_version_without_suffix = replace_dev_suffix_with(current_py_version, \"\")\n\n # Python\n replace_occurrences(\n files=[Path(\"mlflow\", \"version.py\")],\n pattern=re.escape(current_py_version),\n repl=new_py_version,\n )\n # JS\n replace_occurrences(\n files=[\n Path(\n \"mlflow\",\n \"server\",\n \"js\",\n \"src\",\n \"common\",\n \"constants.tsx\",\n )\n ],\n pattern=re.escape(current_py_version),\n repl=new_py_version,\n )\n\n # Java\n for java_extension in [\"xml\", \"java\"]:\n replace_occurrences(\n files=Path(\"mlflow\", \"java\").rglob(f\"*.{java_extension}\"),\n pattern=rf\"{re.escape(current_py_version_without_suffix)}(-SNAPSHOT)?\",\n repl=replace_dev_suffix_with(new_py_version, \"-SNAPSHOT\"),\n )\n\n # R\n replace_occurrences(\n files=[Path(\"mlflow\", \"R\", \"mlflow\", \"DESCRIPTION\")],\n pattern=f\"Version: {re.escape(current_py_version_without_suffix)}\",\n repl=f\"Version: {replace_dev_suffix_with(new_py_version, '')}\",\n )", "def get_and_update_versions ():\n\n try:\n get_comp_versions (\"ACE\")\n get_comp_versions (\"TAO\")\n\n if opts.update:\n files = []\n files += update_version_files (\"ACE\")\n files += update_version_files (\"TAO\")\n files += create_changelog (\"ACE\")\n files += create_changelog (\"TAO\")\n files += update_spec_file ()\n files += update_debianbuild ()\n\n commit (files)\n\n except:\n print (\"Fatal error in get_and_update_versions.\")\n raise", "def main(args: argparse.Namespace) -> None:\n if args.is_rc and args.is_dev:\n raise ValueError(\"A release version cannot be both RC and dev.\")\n if args.is_rc:\n assert args.rc is not None, \"rc field must be specified if is_rc is specified\"\n assert args.rc >= 1, \"RC version must start from 1.\"\n else:\n assert args.rc is None, \"is_rc must be specified in order to specify rc field\"\n update_cmake(args.major, args.minor, args.patch)\n update_pypkg(\n args.major,\n args.minor,\n args.patch,\n is_rc=args.is_rc,\n is_dev=args.is_dev,\n rc_ver=args.rc,\n )", "def bumpversion(path=\"setup.cfg\"):\n config = ConfigParser()\n config.read(path)\n cfg = open(path, 'w')\n new_version = \"0.0.0\"\n if config.has_option('metadata', 'version'):\n old_version = config.get('metadata', 'version')\n major, minor, patch = old_version.split(\".\")\n new_version = \"%s.%s.%s\" % (major, minor, int(patch) + 1)\n if not config.has_section('metadata'):\n config.add_section('metadata')\n config.set('metadata', 'version', new_version)\n config.write(cfg)\n cfg.close()\n return new_version", "def main():\n start = time.time()\n path, recursive, old, new, undo = get_args()\n # if it is an undo, exit afterwards\n if undo:\n revert()\n exit()\n file_list = get_files(recursive, path)\n change_extension(path, file_list, old, new)\n runtime = time.time() - start\n print('complete \\nran for ' + str(runtime) + ' seconds.')", "def statusupdate(filepath):\n pass", "def _main(args):\n if args.files:\n _update_files()\n\n if args.templates:\n _update_template(args.template_definition)", "def update_version(self, new_version):\n if new_version is not None:\n self.version_details = json.loads(new_version)\n\n # Update port file.\n http_port = self.version_details['appscaleExtensions']['httpPort']\n version_key = VERSION_PATH_SEPARATOR.join(\n [self.project_id, self.service_id, self.version_id])\n port_file_location = os.path.join(\n CONFIG_DIR, 'port-{}.txt'.format(version_key))\n with open(port_file_location, 'w') as port_file:\n port_file.write(str(http_port))\n\n logger.info('Updated version details: {}'.format(version_key))\n if self.callback is not None:\n self.callback()", "def version_update(component_version, job_type):\n version_update_list = {\n \"validation_parameter.sample\": \"validation_parameter.yaml\",\n \"machine_detail.sample\": \"machine_detail.yaml\",\n }\n sample_file_data = Common.validation_param_detail(\n \"validation_parameter.sample\", \"jenkins_job_details\"\n )\n # The purpose of this line to check the subversion of the job and update\n # their supported subversion in the validation_parameter.yaml\"\n try:\n if sample_file_data[f\"{job_type}\"][\n \"sub_version_count\"\n ] != component_version.count(\".\"):\n component_version = component_version[\n 0 : int(sample_file_data[f\"{job_type}\"][\"sub_version_count\"]) + 2\n ]\n Common.logger.info(f\"Supported Component version: {component_version}\")\n command = f\"sed -i 's/<component-version>/{component_version}/'\"\n except Exception as ex:\n Common.logger.error(\n f\"Component Version: {component_version} and Job Type \"\n f\"{job_type} is not supported and dump error {ex}\"\n )\n\n for tmp_file in version_update_list:\n if os.path.isfile(f\"{Common.config_path}/{version_update_list[tmp_file]}\"):\n os.remove(f\"{Common.config_path}/{version_update_list[tmp_file]}\")\n status = os.popen(\n f\"cp {Common.config_path}/{tmp_file} {Common.config_path}/\"\n f\"{version_update_list[tmp_file]}\"\n )\n status.close()\n file_name = f\"{Common.config_path}/{version_update_list[tmp_file]}\"\n status = os.popen(f\"{command} {file_name}\")\n status.close()\n Common.logger.info(\n \"Version updated successfully in runtime gernated validation yaml file\"\n )", "def test_updates_static_version(self):\n scripts.update_static_asset_version.main()", "def update_version():\n version = os.environ.get('TRAVIS_COMMIT', None) or \\\n subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])\n version_file = path.join('slingsby', 'VERSION')\n with open(version_file, 'w') as fh:\n fh.write(version)", "def up_to_date(self, gyp_file, target=None, **kw):\n raise NotImplementedError" ]
[ "0.67035234", "0.64823806", "0.63954556", "0.6380026", "0.6160189", "0.61480564", "0.6115521", "0.6089588", "0.60198855", "0.600825", "0.6003172", "0.5936001", "0.5881396", "0.58597773", "0.5844964", "0.5829816", "0.57968193", "0.577143", "0.57502824", "0.5740181", "0.5700411", "0.56697726", "0.5648129", "0.5647241", "0.5617972", "0.56057656", "0.5602698", "0.56021225", "0.5585313", "0.557668" ]
0.6538263
1
Write the updated file to disk. Used by both the project files and the source files. Rename the current file by appending a ".orig" extension. Write a new file to replace the .orig file.
def write_output_file(updated_file, file_path): orig_file = file_path + ".orig" # remove an existion .orig file if os.path.isfile(orig_file): os.remove(orig_file) # rename the current file os.rename(file_path, orig_file) # write the new file with open(file_path, mode='w', encoding='utf-8', newline='') as file_out: for line in updated_file: file_out.write(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace(file,original_text,replacement_text):\n with open(file, \"rt\") as fin:\n with open(str(file+\"temp\"), \"wt\") as fout:\n for line in fin:\n fout.write(line.replace(original_text,replacement_text))\n os.rename(str(file+\"temp\"),file)\n return", "def updateFile(filename, content):\n\tfilename = adaptPath(filename)\n\tif filename != None:\n\t\ttry:\n\t\t\toldContent = open(filename, \"r\").read()\n\t\texcept IOError:\n\t\t\toldContent = \"\"\n\t\tif oldContent != content:\n\t\t\tfile = open (filename, \"w\")\n\t\t\tfile.write(content)\n\t\t\tfile.close()\n\treturn content", "def UpdateFile(self, modID = None):\n if modID is None:\n modID = self.modActive\n\n source = self.modules[modID][1]\n filename = self.modules[modID][2]\n\n try:\n file = open(filename, \"wt\")\n file.write(source)\n finally:\n file.close()", "def update_file(this_file, new_lines):\r\n file_format = get_file_format(this_file)\r\n return new_write_file(this_file, new_lines, file_format=file_format)", "def _update_ondisk(self):\n with open(self.orig_path, \"w\") as f:\n f.write(self.content)", "def overwrite_file(self):\n\n new_file = open(self.temp_filename, 'r')\n file = open(self.filename, 'w')\n file.writelines(new_file.readlines())\n new_file.close()\n file.close()\n os.remove(self.temp_filename)", "def write(self, forced=False):\n if not self.modified:\n if not forced:\n return\n else:\n self.update_source()\n\n with codecs.open(self.filename, 'w', 'utf8') as f:\n f.write(self.source)\n self.modified = False", "def replace_in_file(path, old, new):\n with open(path) as fp:\n content = fp.read()\n\n lpf.ensure_removed(path)\n with open(path, 'w') as fp:\n fp.write(content.replace(old, new))", "def replace_file(filename, contents):\n filename = path.join(PATH_ROOT, filename)\n filename_bak = \"%s.release.bak\" % filename\n os.rename(filename, filename_bak)\n with open(filename, \"w\") as out_file:\n out_file.write(\"\".join(contents))\n shutil.copymode(filename_bak, filename)\n os.remove(filename_bak)", "def rewrite_all_file(self, data):\r\n with open(self.file_name, 'w', encoding='utf-8') as self.file:\r\n self.file.write(data)", "def filewrite(self, filename):\n io.write(self, filename)", "def rename_file (self):\n\t\tassert self.__filename, \"Renaming could not complete because the new filename could not be determined, one or more needed arguments is empty!\"\n\t\tos.rename( self._file.path, self.__filename )\n\t\t\n\t\tif self.verbose and self.log :\tself.log.info( 'File renamed from %s to %s' % (self._file.path, self.__filename))", "def RenameFile(self, oldname: str, newname: str) -> None:\n ...", "def saveFile(self,newfile=None):\n if newfile == None:\n shutil.move(self.filename,self.filename+'~')\n self.handler = open(self.filename,'w')\n else:\n self.handler = open(newfile,'w')\n self.handler.writelines(self.content)\n self.handler.close()", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def _overwrite(self, filename, s):\r\n if os.path.exists(self._html_dir): # Make sure we're not immediately after a clean-all.\r\n with open(os.path.join(self._html_dir, filename), 'w') as f:\r\n f.write(s)", "def update_file(file_to_write, content):\n if read_file(file_to_write, stop_if_failure = False, split_to_lines = False) != content:\n if debug:\n print file_to_write + \" was updated!\"\n with open(file_to_write,\"w\") as targetfile:\n targetfile.write(content)\n return True\n else:\n if debug:\n print file_to_write + \" remains the same.\"\n return False", "def RewriteFile(start, end, original_dir, original_filename, snippet,\n outdir=None):\n original_path = GetPath(os.path.join(original_dir, original_filename))\n original = file(original_path, 'r')\n original_content = original.read()\n original.close()\n if outdir:\n outpath = os.path.join(outdir, original_filename)\n else:\n outpath = original_path\n out = file(outpath, 'w')\n rx = re.compile(r'%s\\n.*?%s\\n' % (re.escape(start), re.escape(end)),\n re.DOTALL)\n new_content = re.sub(rx, '%s\\n%s%s\\n' % (start, snippet, end),\n original_content)\n out.write(new_content)\n out.close()\n print 'Output ' + os.path.normpath(outpath)", "def replace_file(new_content, current_location):\r\n\tif should_replace(new_content, current_location):\r\n\t\tabs_path = os.path.abspath(current_location)\r\n\t\tcurrent_dir, filename = os.path.split(abs_path)\r\n\t\ttmp_filename = '{0}.{1}'.format(filename, time.time())\r\n\t\ttmp_path = os.path.join(current_dir, tmp_filename)\r\n\r\n\t\ttry:\r\n\t\t\twith open(tmp_path, 'w') as tmp:\r\n\t\t\t\ttmp.write(new_content.getvalue())\r\n\t\t\tos.rename(tmp_path, abs_path)\t\r\n\t\texcept IOError:\r\n\t\t\tprint('Failed to replace ''{0}'''.format(abs_path), file=sys.stderr)\r\n\t\t\treturn False\r\n\t\treturn True\r\n\treturn False", "def replace_file(new_content, current_location):\n\tif should_replace(new_content, current_location):\n\t\tabs_path = os.path.abspath(current_location)\n\t\tcurrent_dir, filename = os.path.split(abs_path)\n\t\ttmp_filename = '{0}.{1}'.format(filename, time.time())\n\t\ttmp_path = os.path.join(current_dir, tmp_filename)\n\n\t\ttry:\n\t\t\twith open(tmp_path, 'w') as tmp:\n\t\t\t\ttmp.write(new_content.getvalue())\n\t\t\tos.rename(tmp_path, abs_path)\t\n\t\texcept IOError:\n\t\t\tprint('Failed to replace ''{0}'''.format(abs_path), file=sys.stderr)\n\t\t\treturn False\n\t\treturn True\n\treturn False", "def write_to_file(self, filename: str) -> None:", "def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))", "def overwrite_original_file(self, value):\n self.__overwrite_original_file = value", "def rename_file(path, old_name, new_name):\n \n old_file = os.path.join(path, old_name)\n new_file = os.path.join(path, new_name)\n os.rename(old_file, new_file)", "def write(self, fname):\n pass", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def edit_files(i_file):\n a_file = open(i_file, \"r\")\n content = a_file.readlines()\n\n content[3] = f\"years: {datetime.now().year}\\n\"\n content[4] = f'lastupdated: \"{date.today()}\"\\n'\n a_file = open(i_file, \"w\") #open the same file and overrite line3 & 4\n a_file.writelines(content)\n\n a_file.close()", "def write_to_file(self, line):\r\n self.file.write(line)\r\n self.file.write(NEW_LINE)", "def renewFile(filename):\n\n\tfileRepo = repertoire + filename + extension # Position du fichier\n\n\t# Ouvre en ecriture et l'ecrase\n\t# La methode with ferme le fichier automatiquement\n\twith open(fileRepo, \"w\") as robFile:\n\t\trobFile.write(filename + \"\\n\") # Ecrit le nom du fichier au debut" ]
[ "0.6319781", "0.6288311", "0.6250029", "0.6249721", "0.6226605", "0.6155845", "0.6119171", "0.6113191", "0.60984194", "0.605242", "0.60221267", "0.6005523", "0.5968745", "0.5966853", "0.59334344", "0.59334344", "0.5924986", "0.58883196", "0.5884015", "0.5861894", "0.5781545", "0.5757236", "0.5752444", "0.5743696", "0.5737481", "0.56825024", "0.5676641", "0.5674033", "0.5672615", "0.5672466" ]
0.7659451
0
Returns n + reversed(n).
def reverse_and_add(n): return n + int(str(n)[::-1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rev(n):\n return int(''.join(reversed(str(n))))", "def reverse(n):\n return(int(str(n)[::-1]))", "def reverse_last(vals, n):\n if len(vals)<=n:\n return vals[::-1]\n else:\n old=vals[0:len(vals)-n]\n new=vals[-1:len(vals)-n-1:-1]\n return old+new", "def reverseTheList(n):\n print(n[::-1])\n return(n[::-1])", "def reverse_slice(n):\n return n[::-1]", "def zeropad_to_length(self, n):\n oldn = self.nt\n assert n >= oldn\n return self.zeropad(0, n - oldn)", "def sum(n):\n if n == 0:\n return 0\n return sum(n - 1) + n", "def reversed_of_string(n):\n return ''.join(reversed(n))", "def decrement(self, n=1):\n self.increment(-n)", "def swapfirstlast(n):\n print (n[-1:] + n[1:-1] + n[:1])\n return (n[-1:] + n[1:-1] + n[:1])", "def num_reverse(num):\r\n return int(str(num)[::-1])", "def reverse(x):\n return x[::-1]", "def mirror(n):\n return (n % 10)*10 + (n // 10)", "def move(self, n: int) -> \"Linked[T]\":\n out = self\n if n >= 0:\n for _ in range(n):\n out = out.forward\n else:\n for _ in range(-n):\n out = out.backward\n return out", "def Arn(r, n):\n ret = 1\n for t in range(n, n-r+1-1, -1):\n ret *= t\n return ret", "def invertNumber( n ):\n\tsign = +1\t\t\t\t\t\t\t\t# We need to preserve the sign.\n\tif n < 0:\n\t\tsign = -1\n\tn = abs( n )\n\tpower = int( math.log10( n ) )\t\t\t# We need no know the positional value of the left-most digit.\n\tresult = 0\n\twhile n > 0:\n\t\tdigit = n % 10\n\t\tresult += digit * ( 10 ** power )\t# The right-most digit is multiplied by the highest positional value of the\n\t\tn //= 10\t\t\t\t\t\t\t# original number. Then, we move to the next digit (to the left), and reduce the\n\t\tpower -= 1\t\t\t\t\t\t\t# power of 10 we have to use with it. We proceed iteratively until n is depleted.\n\n\treturn sign * result", "def skip_add(n):\n \n\n\n\n if n ==0:\n return 0\n if n ==1:\n return 1\n else:\n return n + skip_add(n-2)", "def tribonacci(self, n: int) -> int:\n # Solution 1 - 24 ms\n # Solution 2 - 12 ms\n if n in self.mem:\n return self.mem[n]\n self.mem[n] = self.tribonacci(n - 1) + self.tribonacci(n - 2) + self.tribonacci(n - 3)\n return self.mem[n]", "def lucas_recur(n):\n if n == 0:\n return 2\n elif n == 1:\n return 1\n return lucas_recur(n - 1) + lucas_recur(n - 2)", "def sum_to_n(n):\n total = 0\n for i in range(1,n+1):\n total += i\n return total", "def fibonacci_series_to(n):\r\n l = [0, 1] \r\n for i in range(n - 1):\r\n l = [*l, l[-1] + l[-2]]\r\n return l[:n]", "def sum_to(n):\n the_sum = 0\n for counter in range(n+1):\n the_sum = the_sum + counter\n return the_sum", "def value_n_from_end(self, n):\n # check the validity of the input\n if n > self.n-1:\n print(f\"Error; n is greater than the length of the list = {self.n-1}\") \n return\n \n temp_node = self.head # store head\n for _ in range((self.n-1) - n):\n temp_node = temp_node.next # traverse the list\n return temp_node.val", "def reverseInt(self, x):\n reverse = 0\n while x:\n reverse *= 10\n reverse += x % 10\n x //= 10\n return reverse", "def question_25(list_num: int) -> int:\n list_num.reverse()\n return list_num", "def triangular_number(n):\n return n*(n+1) / 2", "def fib_r(n):\r\n if n < 0:\r\n raise ValueError(\"Must use positive integers\")\r\n if n == 0:\r\n return n\r\n elif n == 1:\r\n return n\r\n else:\r\n return FibSeq.fib_r(n-1) + FibSeq.fib_r(n-2)", "def triangle(n):\n\n accumulator = 0\n\n for i in range(1,n+1):\n accumulator += i\n\n return accumulator", "def reverse(Number):\r\n L=list(reversed([a for a in str(Number)]))\r\n L=''.join(L)\r\n return (int(L))", "def reverse(seq):\n return seq[::-1]" ]
[ "0.7608247", "0.75295496", "0.728547", "0.6924487", "0.68271923", "0.6680292", "0.66388965", "0.6602577", "0.6486767", "0.6343752", "0.63146275", "0.6301133", "0.62174916", "0.616755", "0.61604625", "0.6144047", "0.6141926", "0.6127323", "0.6058644", "0.6056239", "0.6048981", "0.6046169", "0.60408354", "0.6040287", "0.6034187", "0.6003931", "0.59908247", "0.59848225", "0.597698", "0.5974978" ]
0.8104129
0
Iterator indexing control points of a Bezier simplex.
def BezierIndex(dim, deg): def iterate(c, r): if len(c) == dim - 1: yield c + (r, ) else: for i in range(r, -1, -1): yield from iterate(c + (i, ), r - i) yield from iterate((), deg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n for point in self.points:\n yield point", "def get_controlpoints(self, index):\n if index < 2: # is index in very beginning\n current_controlpoints = self.controlpoints[0:4] # use first points\n elif index > len(self.controlpoints) - 2: # is index in very end\n current_controlpoints = self.controlpoints[-4:] # use last points\n else:\n current_controlpoints = self.controlpoints[index - 2:index + 2]\n return current_controlpoints", "def __iter__(self):\n pt = (self.x, self.y)\n for i in pt:\n yield i", "def bezierPoly(ctrlP):\n n = len(ctrlP) - 1 #degree of the polynomial\n first = True\n for t in np.linspace(0.0, 1.0, 5 * n):\n point = bezierFunc(ctrlP, t)\n if first: # Initialize list of points in the polynomial\n bezierPointsList = np.copy(point)\n first = False\n else:\n bezierPointsList = np.append(bezierPointsList, point, axis=0)\n return bezierPointsList", "def bezier(t, points):\n n = len(points) - 1\n x = y = 0\n for i, pos in enumerate(points):\n bern = bernstein(t, i, n)\n x += pos[0] * bern\n y += pos[1] * bern\n return x, y", "def __iter__(self):\n for idx in range(0, self.Npoints):\n position = self.start + (self.end-self.start)/self.Npoints*idx\n yield position\n raise StopIteration()", "def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)", "def __next__(self):\n if self.iterator < len(self.points):\n self.iterator += 1\n return self.points[self.iterator-1]\n else:\n raise StopIteration", "def __iter__(self):\n return self.points.__iter__()", "def __next__(self):\n if self.iterator < len(self.points):\n iterator = self.iterator\n self.iterator += 1\n return self.points[iterator]\n else:\n raise StopIteration", "def set_control_points(self, control_points):\r\n for curve_index, points in enumerate(control_points):\r\n self.curves[curve_index].control_points = points", "def iterator(self):\n return _osgAnimation.VertexList_iterator(self)", "def __iter__(self):\n for p in self.positions(): # use same order as positons()\n yield p.element() # but yield each element", "def __iter__(self):\n for i in range(len(self.ks)):\n yield self.get_neighs([i]), self.get_sp_rel_pos([i]),\\\n [self.ks[i]], self.iss", "def iter_points(self):\n for x in range(self.left, self.right + 1):\n for y in range(self.top, self.bottom + 1):\n yield Point(x, y)", "def bezier_curve_range(n, points):\n for i in xrange(n):\n t = i / float(n - 1)\n yield bezier(t, points)", "def __init__(self, P):\n self._n = len(P) # control point iterator\n self._P = P\n self._X, self._Y, self._Z, self._W = self.sep() \n self._bc = self._bn()", "def __iter__(self):\n for p in self.positions():\n yield p.element()", "def iterator(self):\n return _osgAnimation.mapVertexInfluence_iterator(self)", "def get_bezier_indexes(path_points: list):\n\n if len(path_points) < 2:\n raise ValueError(f\"path_points must contain at least 2 points, got {len(path_points)} instead\")\n\n last_non_zigzag_idx = 0\n for i in range(len(path_points)):\n if math.isclose(path_points[i][1], 0):\n raise ValueError(f\"POINT {i} HAS 0 SPEED!\")\n elif path_points[i][1] > 0:\n last_non_zigzag_idx = i\n else:\n break\n\n a_non_bezier_indexes = [0] # A points\n b_non_bezier_indexes = [1] # B points\n while last_non_zigzag_idx > b_non_bezier_indexes[-1]:\n a_non_bezier_indexes.append(a_non_bezier_indexes[-1] + config.NUMBER_OF_BEZIER_POINT)\n b_non_bezier_indexes.append(b_non_bezier_indexes[-1] + config.NUMBER_OF_BEZIER_POINT)\n\n non_bezier_indexes = a_non_bezier_indexes + b_non_bezier_indexes\n bezier_indexes = []\n for i in range(last_non_zigzag_idx + 1):\n if i not in non_bezier_indexes:\n bezier_indexes.append(i)\n\n return bezier_indexes", "def __iter__(self):\n start_times = (start for start, end in self.tss)\n names = (name.rstrip() for name in self.inps)\n for ind, (c, t) in enumerate(zip(names, start_times)):\n yield (c, t, ind)", "def vertex_iterator(self):\n for X in self.fe.L:\n for x in self.K.unit_group:\n yield (X, x)", "def __iter__(self):\n for coord in self.position:\n yield coord", "def _iterCoordsets(self):\n\n for i in range(self._n_csets):\n yield self._coords[i]", "def __iter__(self):\n yield self._x\n yield self._y", "def value_iterator(self):\n return _osgAnimation.mapVertexInfluence_value_iterator(self)", "def __iter__(self):\n return iter(self.vertices.values())", "def __iter__(self) -> Iterable[Tuple[float, float]]:\n return iter([self.x, self.y])", "def __iter__(self):\n return iter(self.adjacent)", "def generator(self) -> Iterator[Tuple[int, int, complex]]:\n for inda in range(self._core.lena()):\n alpha_str = self._core.string_alpha(inda)\n for indb in range(self._core.lenb()):\n beta_str = self._core.string_beta(indb)\n yield alpha_str, beta_str, self.coeff[inda, indb]" ]
[ "0.6138841", "0.60835415", "0.59836525", "0.59470624", "0.59358245", "0.59015346", "0.5879152", "0.58778214", "0.5873675", "0.58443034", "0.58370155", "0.5782884", "0.5742098", "0.56580085", "0.56258917", "0.5603704", "0.55990475", "0.55845714", "0.557098", "0.5552596", "0.55366474", "0.5530336", "0.55067533", "0.55042636", "0.5494575", "0.54596645", "0.5432638", "0.5423521", "0.54204303", "0.5419772" ]
0.6090857
1
Count the number of elements whose value is not 0.
def count_nonzero(a): return (np.count_nonzero(a))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_of_nonzero_elements(my_vector):\n counter = 0\n for element in my_vector:\n if element != 0:\n counter += 1\n return counter", "def get_zeros(self):\n return self.serie.isin([0]).sum()", "def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count += 1\n return count", "def count_nonzero(tensor):\n raise NotImplementedError", "def check_none_zero_values(self):\n hits = -1\n if (len(self.data.shape) == 3):\n hits = 0\n frames = self.data.shape[0]\n pixels = self.data.shape[1]\n bins = self.data.shape[2]\n for i in range(frames):\n for j in range(pixels):\n for k in range(bins):\n value = self.data[i][j][k]\n if value > 0:\n hits += 1\n print(\"self.data[{}][{}][{}]; Sum so far = {}\".format(i, j, k, hits))\n return hits", "def count_ge_one(array):\r\n return numpy.count_nonzero(array >= 1)", "def count(x):\n return sum(np.asarray(x).astype(bool))", "def count(self, value): # real signature unknown; restored from __doc__\n return 0", "def countZeroes(arr):\n counter = 0\n #sort the array\n arr.sort(reverse=True)\n print(arr)\n n = len(arr)\n print(n)\n\n # Find index of first zero in given array\n first = firstZero(arr, 0, n - 1)\n \n # If 0 is not present at all, return 0\n if (first == -1):\n return 0\n\n for i in range(first,len(arr)):\n if (arr[i] == 0):\n counter += 1\n else:\n break\n\n return counter", "def isZero(self):\n return self.count == 0", "def check_zero(col):\n return np.sum(col == 0.0)", "def none_count(d):\n return six.moves.reduce(lambda x, y: x + 1 if y == None else x, d.values(), 0)", "def observed_species(counts):\n return (counts!=0).sum()", "def __len__(self):\n return len(np.where(np.logical_not(self.data.mask))[0])", "def count_placeholders(series):\n count = 0\n\n for i in range(series.size-1, -1, -1):\n if pd.isnull(series[i]) or series[i] == 0:\n count += 1\n else:\n break\n\n return count", "def get_zeros(self):\n zero_values = self.df[self.col_name].isin([0]).sum(axis=0)\n return zero_values", "def test_count_0(self):\n self.assertEqual(count(0), 0, 'Between 0 and 0, there is 0 lucky numbers.')", "def count_null(self): \n print('Null Counts:', self.X.isnull().sum()[self.X.isnull().sum() > 0])", "def nnz(self):\n return len(self.value)", "def _find0(self):\n for index in range(0, self.size):\n if self.elements[index] == 0:\n return index\n return self.size", "def count(self,val):\n return sum(1 for e in self.frontierpq if e[0]==val)", "def find_empty(counts): \n for index,count in enumerate(counts):\n if count == 0:\n return index\n return None", "def count_ones(value):\n return bin(value).count('1')", "def __nonzero__(self):\n for e in self:\n if e != 0:\n return True\n return False", "def get_num_nonzero_betas(betas, genesets, threshold=1e-6):\n total_nonzeros = 0\n total_genesets = 0\n for idx, b in enumerate(betas):\n geneset_nonzeros = sum(np.greater(b, threshold))\n total_nonzeros += geneset_nonzeros\n if geneset_nonzeros > 0:\n total_genesets += 1\n print \"geneset found\", genesets[idx], \"nonzeros\", geneset_nonzeros, \"total genes\", b.size\n return total_nonzeros[0,0], total_genesets", "def has_zeros(tensor, verbose=True):\n tensor_numpy = tensor.data.cpu().numpy().flatten()\n where_zero = np.argwhere(tensor_numpy == 0.0)\n\n zero_count = len(where_zero)\n zero = zero_count != 0\n\n if verbose and zero:\n print(f\"Encountered {zero_count} zeros\")\n\n return zero", "def count(self, elem):\n if not self.step:\n return _coconut.float(\"inf\") if elem == self.start else 0\n return int(elem in self)", "def count(self):\n return sum(1 for _ in self)", "def count_paths_with_zero_intervals(self):\n zeros = []\n for path in self.paths:\n # print(\"Checking path {}\".format(path))\n has_zero = 0\n for arc in path:\n # lb = self.arc_info[arc][\"lower_bound\"]\n # ub = self.arc_info[arc][\"upper_bound\"]\n # print(\"{} {} interval\".format(lb,ub))\n if (self.arc_info[arc][\"upper_bound\"] -\n self.arc_info[arc][\"lower_bound\"]) == 0:\n has_zero = 1\n zeros.append(has_zero)\n print(zeros)\n return(sum(zeros))", "def osd(counts):\n return (counts!=0).sum(), (counts==1).sum(), (counts==2).sum()" ]
[ "0.7499776", "0.74260056", "0.7189956", "0.70507324", "0.7043536", "0.6923293", "0.6901537", "0.68947256", "0.6805576", "0.67249465", "0.67243916", "0.6666576", "0.66371304", "0.6594162", "0.65321916", "0.65197927", "0.6511688", "0.65069556", "0.64964545", "0.6477194", "0.64498985", "0.6437244", "0.64245695", "0.639161", "0.63603455", "0.6338596", "0.63183016", "0.6264146", "0.62507284", "0.6183806" ]
0.75625044
0
Get an index with nonzero element.
def nonzero_indices(a): return (np.nonzero(a)[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _find0(self):\n for index in range(0, self.size):\n if self.elements[index] == 0:\n return index\n return self.size", "def non_zero_idx_val(seq):\n return [(i, v) for i, v in enumerate(seq) if v > 0]", "def find_empty(counts): \n for index,count in enumerate(counts):\n if count == 0:\n return index\n return None", "def index_of_least_significant_zero_bit(self, value):\n\n index = 1\n while (value & 1) != 0:\n value >>= 1\n index += 1\n return index", "def get_zero_nonzero_idx(spins):\n idx_zero = spins.index(0)\n idx_nonzero = tuple([idx for idx, spin in enumerate(spins) if spin != 0])\n if len(idx_nonzero) + 1 != len(spins):\n raise ValueError(\"`spins` must contain exactly one zero.\")\n return idx_zero, idx_nonzero", "def zero_indexed(array):\n if all(dl == 0 for dl in array.datashape.dim_low):\n return array\n if any(dl < 0 for dl in array.datashape.dim_low):\n raise ValueError(\"Cannot zero_index array: one or more \"\n \"dimensions start < 0\")\n\n ds = array.datashape.copy()\n ds.dim_low = [0] * ds.ndim\n return array.redimension(ds.schema)", "def nonzero(self):\n\t\t_x = self.__seqvector.vec.nonzero()[1]\n\t\t_x = list(set(_x)) # uniquify them\n\t\t_x.sort() # sort positions\n\t\treturn _x", "def innulo(self):\n for i in range(self.n):\n if not comozero(self[i]):\n return i\n return None", "def _unit_vector_or_zeros(index, size):\n u = np.zeros(size, int)\n if index != -1:\n u[index] = 1\n return u", "def get_empty_pos(arr):\n\n\tpos = []\n\tfor i in range(len(arr)):\n\t\tif arr[i] == 0:\n\t\t\tpos.append(i)\n\n\treturn pos", "def nonzero_values(a):\r\n return a.flatten()[flatnonzero(a)]", "def count_nonzero(a):\n return (np.count_nonzero(a))", "def _find_zero(board):\n for r_index, row in enumerate(board):\n for c_index, num in enumerate(row):\n if num == 0:\n return r_index, c_index", "def nonzero_first(arr, *, axis):\n def nonzero_first_1d(arr):\n try:\n return np.nonzero(arr)[0][0]\n except IndexError:\n return -1\n return np.apply_along_axis(nonzero_first_1d, axis, arr)", "def first_element(\n x: torch.Tensor,\n element: Union[int, float],\n dim: int = 1,\n) -> torch.Tensor:\n mask = x == element\n found, indices = ((mask.cumsum(dim) == 1) & mask).max(dim)\n indices[(~found) & (indices == 0)] = x.shape[dim]\n return indices", "def first_nz_mask(values, index):\n mask = np.full(values.size, True)\n for idx, value in enumerate(values):\n if value == 0:\n mask[idx] = False\n else:\n break\n return mask", "def find_zeros(spectra):\n zero_idx = np.where((np.max(spectra, axis=-1) == 0) &\n (np.sum(spectra, axis=-1) == 0))[0]\n if len(zero_idx) > 0:\n return zero_idx", "def nonzero(x, /):\n\n if isinstance(x, dpnp_array) or isinstance(x, dpt.usm_ndarray):\n dpt_array = x.get_array() if isinstance(x, dpnp_array) else x\n return tuple(\n dpnp_array._create_from_usm_ndarray(y)\n for y in dpt.nonzero(dpt_array)\n )\n\n return call_origin(numpy.nonzero, x)", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def filter_zeros(X):\n\tnoNonzeros = np.count_nonzero(X, axis=1)\n\tmask = np.where(noNonzeros > 0)\n\treturn X[mask[0], :]", "def find_empty(bo):\n for i in range(len(bo)):\n for j in range(len(bo[0])):\n if bo[i][j] == 0:\n return (i, j)\n\n return None", "def flatnonzero(a):\r\n if a.ndim == 0:\r\n raise ValueError('Nonzero only supports non-scalar arrays.')\r\n return nonzero(a.flatten(), return_matrix=True)[0]", "def count_nonzero(tensor):\n raise NotImplementedError", "def get_0_pos(grid):\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] == 0:\n return i, j\n return -1, -1", "def non_masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask-1,order='C'))[0]", "def _find_nonzero_runs(values):\n\n error_checking.assert_is_numpy_array_without_nan(values)\n error_checking.assert_is_numpy_array(values, num_dimensions=1)\n\n zero_flags = numpy.concatenate((\n [True], numpy.equal(values, 0), [True]\n ))\n\n nonzero_flags = numpy.invert(zero_flags)\n differences = numpy.abs(numpy.diff(nonzero_flags))\n index_matrix = numpy.where(differences == 1)[0].reshape(-1, 2)\n\n return index_matrix[:, 0], index_matrix[:, 1] - 1", "def find_zero(self, t, y):\n return y[0]", "def __nonzero__(self):\n return _uhd_swig.range_vector_t___nonzero__(self)", "def first_nonzero_bin(h,bmin=1):\n for i in xrange(bmin+1,h.GetNbinsX()+1):\n if h.GetBinContent(i-1)>0 and h.GetBinContent(i)>0:\n return i-1 # i # set to i to skip the first bin, too!\n print 'WARNING: failed to find a non-zero bin in first_nonzero_bin. Proceeding starting with first bin...'\n return bmin", "def _index(self) -> int:\n return -1" ]
[ "0.7123152", "0.69450545", "0.6875251", "0.66805685", "0.6558049", "0.64854866", "0.64763135", "0.6397523", "0.6385339", "0.63798714", "0.63608164", "0.6356379", "0.63298357", "0.627602", "0.6251115", "0.62202823", "0.61571443", "0.61149204", "0.60853887", "0.60822195", "0.6061934", "0.60534763", "0.6032935", "0.5980475", "0.5935942", "0.5912687", "0.58863854", "0.5856469", "0.5851569", "0.5841538" ]
0.78467286
0
Run county and state level projections for a specific intervention.
def run_projections( state_input_file, county_input_file, intervention: Intervention, run_validation=True ) -> Tuple[DodInterventionResult, DodInterventionResult]: states_key_name = f"states.{intervention.name}" states_df = build_processed_dataset.get_usa_by_states_df( state_input_file, intervention.value ) if run_validation: validate_results.validate_states_df(states_key_name, states_df) states_shp, states_shx, states_dbf = generate_shapefiles.get_usa_state_shapefile( states_df ) if run_validation: validate_results.validate_states_shapefile( states_key_name, states_shp, states_shx, states_dbf ) logger.info(f"Generated state shape files for {intervention.name}") # Run County level projections counties_key_name = f"counties.{intervention.name}" counties_df = build_processed_dataset.get_usa_by_county_with_projection_df( county_input_file, intervention.value ) if run_validation: validate_results.validate_counties_df(counties_key_name, counties_df) ( counties_shp, counties_shx, counties_dbf, ) = generate_shapefiles.get_usa_county_shapefile(counties_df) if run_validation: validate_results.validate_counties_shapefile( counties_key_name, counties_shp, counties_shx, counties_dbf ) state_results = DodInterventionResult( states_key_name, states_df, (states_shp, states_shx, states_dbf) ) county_results = DodInterventionResult( counties_key_name, counties_df, (counties_shp, counties_shx, counties_dbf) ) return state_results, county_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_projections(\n input_file, run_validation=True\n) -> TopCountiesPipelineProjectionResult:\n # Run County level projections\n intervention = Intervention.SELECTED_MITIGATION\n\n counties_key_name = f\"counties.{intervention.name}\"\n # note i think build_processed_dataset should porbably be renamed?\n counties_df = build_processed_dataset.get_usa_by_county_with_projection_df(\n input_file, intervention.value\n )\n if run_validation:\n validate_results.validate_counties_df(counties_key_name, counties_df)\n\n county_results = TopCountiesPipelineProjectionResult(counties_df)\n\n return county_results", "def run_scenario(self):\n self.initialize_random_map()\n self.visualize_environment('initial')\n self.get_tower_target_coverages()\n self.solve_environment()\n self.visualize_environment('solved')", "def run_projections(self):\n self.projection_collection100 = DonorCollection()\n self.projection_collection50 = DonorCollection()\n double_over_100 = dict(list(\n (name, list(map(lambda x: x * 2, donations.donations))) for name, donations in self.donors.items()))\n triple_under_50 = dict(list(\n (name, list(map(lambda x: x * 3, donations.donations))) for name, donations in self.donors.items()))\n for donor, donations in double_over_100.items():\n self.projection_collection100.add(donor, donations)\n for donor, donations in triple_under_50.items():\n self.projection_collection50.add(donor, donations)\n return self.projection_collection50, self.projection_collection100", "def main():\n \n \"\"\" Download and load data\"\"\"\n dfs = get_data()\n \n \"\"\" Preprocess data, combine rows for country provinces\"\"\"\n combine_list = [\"Australia\", \"US\", \"Canada\", \"Mainland China\", \"China\"]\n for key in dfs.keys():\n dfs[key] = preprocess(df=dfs[key], combine_list=combine_list)\n \n \"\"\" Compute additional variables\"\"\"\n dfs = compute_deaths_over_closed(dfs)\n dfs = compute_active_cases(dfs)\n dfs = compute_death_rate(dfs)\n dfs = compute_df_reindexed(dfs, \"active_cases\")\n dfs = compute_df_reindexed(dfs, \"death_rate\")\n \n \"\"\"Remove 0 and 1 from rate variables\"\"\"\n for keys in [\"death_rate\", \"death_rate_reindexed\", \"deaths_over_closed\"]:\n dfs[keys] = remove_corner_values(dfs[keys])\n \n \"\"\" Set parameters for plotting\"\"\"\n titles = {\"active_cases\": \"COVID-19 Active Cases\", \"active_cases_reindexed\": \"COVID-19 Active Cases (Days from the Start of the Outbreak)\", \"deaths_over_closed\": \"COVID-19 Deaths over (Deaths + Recovered)\", \"death_rate\": \"COVID-19 Death Rate\", \"death_rate_reindexed\": \"COVID-19 Death Rate (Days from the Start of the Outbreak)\"}\n filenames = {\"active_cases\": \"covid19_active.png\", \"active_cases_reindexed\": \"covid19_active_ri.png\", \"deaths_over_closed\": \"covid19_death_over_closed.png\", \"death_rate\": \"covid19_death_rate.png\", \"death_rate_reindexed\": \"covid19_death_rate_ri.png\"}\n row_inclusion_index_threasholds = {\"active_cases\": 770, \"active_cases_reindexed\": 500, \"deaths_over_closed\": 770, \"death_rate\": 770, \"death_rate_reindexed\": 500}\n row_inclusion_indices = {}\n #row_inclusion_indices.get(x) is None:\n # row_inclusion_indices = dfs[\"cases\"].iloc[:,-1] > x\n\n \"\"\" Plot\"\"\"\n for key in row_inclusion_index_threasholds.keys():\n row_inclusion_indices[key] = dfs[\"cases\"].iloc[:,-1] > row_inclusion_index_threasholds[key]\n if key in [\"active_cases_reindexed\", \"death_rate_reindexed\"]:\n row_inclusion_indices[key] = dfs[\"cases\"].iloc[:,-5] > row_inclusion_index_threasholds[key]\n plot(dfs[key], row_inclusion_indices.get(key), titles[key], filenames[key])", "def main():\n\n # Log messages to stdout\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n stream=sys.stdout,\n )\n\n # Load the sample dataset: the US states and their corresponding population number.\n # (data from https://www.census.gov/)\n us_states_path = os.path.join(os.getcwd(), \"sample_data\", \"cb_2018_us_state_5m.shp\")\n us_pop_path = os.path.join(os.getcwd(), \"sample_data\", \"nst-est2019-01.xlsx\")\n us_states = gpd.read_file(us_states_path)\n us_inhab = pd.read_excel(us_pop_path, skiprows=3, engine=\"openpyxl\").add_prefix(\n \"pop_\"\n )\n # Tidy up rows and column names\n us_inhab.rename(columns={us_inhab.columns[0]: \"NAME\"}, inplace=True)\n us_inhab.NAME = us_inhab.NAME.str.replace(\".\", \"\")\n # Join population numbers and us state geometries.\n us_states = us_states.merge(us_inhab, on=\"NAME\").reset_index()\n # Inspect the data\n print(us_states.info())\n\n # Initialize a circle style cartogram for inhabitants per state in 2019.\n circle_cg = CircleCartogram(\n gdf=us_states,\n size_column=\"pop_2019\",\n mode=2,\n time_limit=60, # The total amount of seconds the model is allowed to run. Useful for working with mode 3.\n )\n square_cg = SquareCartogram(\n gdf=us_states,\n size_column=\"pop_2019\",\n mode=1,\n time_limit=60, # The total amount of seconds the model is allowed to run. Useful for working with mode 3.\n )\n square2_cg = SquareCartogram(\n gdf=us_states,\n size_column=\"pop_2019\",\n mode=4,\n time_limit=60, # The total amount of seconds the model is allowed to run. Useful for working with mode 3.\n )\n\n # Calculate the cartogram geometries.\n circle_cg.calculate()\n square_cg.calculate()\n square2_cg.calculate()\n\n # Plot both the original map and the cartogram side by side.\n gdfs = [us_states, circle_cg.gdf, square_cg.gdf, square2_cg.gdf]\n m = Map(\n gdfs=gdfs,\n title=\"Population per US State in 2019\",\n column=\"pop_2019\",\n labels=\"STUSPS\",\n )\n m.ax[0][0].set_xlim(-150, -60)\n m.plot()\n plt.show()", "def main():\n region = 'Kanto'\n year = 2000\n # callParallelGA(region)\n callParallelReducedGA(region)\n \n\n region = 'EastJapan'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)\n\n\n region = 'Tohoku'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)\n\n \n region = 'Kansai'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)", "def process_actors(actapi: act.api.Act, args: Config, falcon: Intel) -> None:\n\n # Get ISO3166 map from country code -> name\n countries: Dict[str, str] = {}\n\n debug(f\"Fetching ISO-3166 data (proxy={args.proxy_string})\")\n\n for country in worker.fetch_json(\n COUNTRY_REGIONS, args.proxy_string, args.http_timeout\n ):\n countries[country[\"alpha-2\"].upper()] = country[\"name\"]\n\n for actor in crowdstrike_intel.get_actors(falcon):\n handle_actor(actapi, actor, countries, args.output_format)", "def stage(self):\n\n # prepare projected land allocation data\n self.prep_projected()\n\n # prepare base land use data\n self.prep_base()\n\n # harmonize grid area between projected and base layer land allocation\n self.harmony()\n\n # apply constraints\n self.set_constraints()\n\n # create kernel density filter if not running multiple jobs\n self.kernel_filter()\n\n # set data for step zero\n self.set_step_zero()", "def preprocess(input_dir, output_dir, crs, resolution, country, overwrite):\n # Set data directories if not provided and create them if necessary\n if not input_dir:\n input_dir = os.path.join(os.curdir, \"Data\", \"Input\")\n if not output_dir:\n output_dir = os.path.join(os.curdir, \"Data\", \"Intermediary\")\n input_dir, output_dir = Path(input_dir), Path(output_dir)\n for p in (input_dir, output_dir):\n p.mkdir(parents=True, exist_ok=True)\n\n # Create raster grid from CLI options\n geom = country_geometry(country)\n dst_crs = CRS.from_string(crs)\n transform, shape, bounds = create_grid(geom, dst_crs, resolution)\n args = {\n \"dst_crs\": dst_crs,\n \"dst_bounds\": bounds,\n \"dst_res\": resolution,\n \"overwrite\": overwrite,\n \"geom\": geom,\n }\n\n raw = Raw(input_dir)\n preprocess_land_cover(\n src_files=raw.land_cover,\n dst_raster=output_dir.joinpath(\"land_cover.tif\").as_posix(),\n **args,\n )\n preprocess_elevation(src_files=raw.elevation, dst_dir=output_dir, **args)\n preprocess_osm(\n src_file=raw.openstreetmap[0],\n dst_dir=output_dir,\n dst_crs=dst_crs,\n dst_shape=shape,\n dst_transform=transform,\n geom=geom,\n overwrite=overwrite,\n )\n preprocess_surface_water(\n src_files=raw.surface_water,\n dst_raster=output_dir.joinpath(\"surface_water.tif\").as_posix(),\n **args,\n )\n\n log.info(\"Writing area of interest to disk.\")\n with open(output_dir.joinpath(\"area_of_interest.geojson\"), \"w\") as f:\n json.dump(geom.__geo_interface__, f)", "def projected():\n # pull updated list of donors(class)\n projected_list = create_donors_list()\n print('''Welcome to the Projection Option. Here you can run projections for contributions. \n Help Companies structure their matching donations based on past contribution amounts.\n Simply enter the minumum and maximum donation range that will be matched and see the total contribution:''')\n try:\n minimum_input = float(\n input('Enter a minimum donation amount (0 if none): '))\n maximum_input = float(\n input('Enter a maximum donation amount (0 if none): '))\n factor = float(\n input('Please enter the factor you wish to multiply these donations by >> '))\n except ValueError:\n print('Please follow instructions and enter a number only')\n\n projections = projection(projected_list, factor,\n minimum_input, maximum_input)\n print('\\nProjected contribution value: ${:,.2f}'.format(projections))", "def workflow(save_dir):\n year = 2016\n month_series = range(1, 13)\n total_potential_biomass_multiplier = 48.8\n total_standing_biomass_multiplier = 45.25\n biomass_jitter = 3.\n diet_sufficiency_multiplier = 0.28\n diet_sufficiency_jitter = 0.01\n avg_animal_density = 0.0175\n animal_density_jitter = 0.005\n\n # twelve months of precipitation rasters covering the study area\n precip_basename_list = [\n 'chirps-v2.0.{}.{:02d}.tif'.format(year, month) for month in\n month_series]\n\n # reclassify 0 to NoData in CHIRPS rasters\n output_precip_dir = os.path.join(save_dir, 'precip')\n if not os.path.exists(output_precip_dir):\n os.makedirs(output_precip_dir)\n for bn in precip_basename_list:\n base_raster = os.path.join(PRECIP_DIR, bn)\n target_raster = os.path.join(output_precip_dir, bn)\n pygeoprocessing.raster_calculator(\n [(base_raster, 1)], zero_to_nodata, target_raster,\n gdal.GDT_Float32, _IC_NODATA)\n\n # generate outputs\n for month in month_series:\n precip_raster = os.path.join(\n output_precip_dir, 'chirps-v2.0.{}.{:02d}.tif'.format(year, month))\n\n total_potential_biomass_path = os.path.join(\n save_dir, 'potential_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_potential_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_potential_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n total_standing_biomass_path = os.path.join(\n save_dir, 'standing_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_standing_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_standing_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n diet_sufficiency_path = os.path.join(\n save_dir, 'diet_sufficiency_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n diet_sufficiency_multiplier,\n diet_sufficiency_jitter]],\n precip_to_correlated_output, diet_sufficiency_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n animal_density_path = os.path.join(\n save_dir, 'animal_density_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n avg_animal_density,\n animal_density_jitter]],\n precip_to_animal_density, animal_density_path,\n gdal.GDT_Float32, _IC_NODATA)", "def prep_projected(self):\n\n self.config.logger.info(\"Preparing projected land use data...\")\n\n # set start time\n t0 = time.time()\n\n if self.config.gcamwrapper_df is not None:\n\n self.config.logger.info(f\"Using projected GCAM data from `gcamwrapper` data frame\")\n projected_land_cover_file = proc.format_gcam_data(self.config.gcamwrapper_df,\n f_out='',\n start_year=self.config.start_year,\n through_year=self.config.end_year,\n region_name_field='gcam_region_name',\n region_id_field='gcam_region_id',\n basin_name_field='glu_name',\n basin_id_field='basin_id',\n output_to_csv=False)\n\n elif self.config.gcam_database is not None:\n\n self.config.logger.info(f\"Using projected GCAM data from: {self.config.gcam_database}\")\n projected_land_cover_file = rdr.read_gcam_land(self.config.gcam_database_dir,\n self.config.gcam_database_name,\n self.config.gcam_query, self.d_bsnnm_id,\n self.config.metric, self.config.crop_type)\n\n\n else:\n self.config.logger.info(f\"Using projected GCAM data from: {self.config.projected_lu_file}\")\n projected_land_cover_file = self.config.projected_lu_file\n\n # extract and process data contained from the land allocation GCAM output file\n gcam_data = rdr.read_gcam_file(projected_land_cover_file,\n self.gcam_landclasses,\n start_yr=self.config.start_year,\n end_yr=self.config.end_year,\n timestep=self.config.timestep,\n scenario=self.config.scenario,\n region_dict=self.d_regnm_id,\n agg_level=self.config.agg_level,\n area_factor=self.config.proj_factor,\n metric_seq=self.metric_sequence_list,\n logger=self.config.logger)\n\n # unpack variables\n self.user_years, self.gcam_ludata, self.gcam_aez, self.gcam_landname, self.gcam_regionnumber, self.allreg, \\\n self.allregnumber, self.allregaez, self.allaez, self.metric_id_array, self.sequence_metric_dict = gcam_data\n\n self.config.logger.info('PERFORMANCE: Projected landuse data prepared in {0} seconds'.format(time.time() - t0))", "def createTerritoryGeometries(config, start_time):\n # get the correct names for all of the provinces within each territory\n file_name = config['shape_files_path'] + config['county_shape_file_name']\n names_df = gpd.read_file(file_name)\n names_df.rename(columns={'NAMELSAD':'NAME'})\n names_df = names_df[['GEOID', 'NAME']]\n\n df_holder = []\n # read in block files for the 4 excluded US territories\n for territory in ['60','66','69','78']:\n try:\n temp_time = time.localtime()\n # open the appropriate block file for the given territory\n file_name = config['shape_files_path'] +\\\n \"block/tl_%s_%s_tabblock%s.shp\" %\\\n (config['census_vintage'],territory,config['census_vintage'][2:])\n temp_df = gpd.read_file(file_name)\n # modify the column names so they match what we expect in the tract and \n # county geojson files\n change_columns = { 'STATEFP%s' % config['census_vintage'][2:]:'state_fips', \n 'COUNTYFP%s' % config['census_vintage'][2:]: 'county_fips',\n 'GEOID%s' % config['census_vintage'][2:]:'block_fips',\n 'ALAND%s' % config['census_vintage'][2:]:'aland'}\n temp_df.rename(columns=change_columns, inplace=True)\n\n # create the tract file for the given territory\n tract_df = temp_df[['block_fips', 'aland', 'geometry']]\n tract_df['GEOID'] = tract_df['block_fips'].str[:11]\n tract_df['NAME']=tract_df['GEOID'].str[5:11]\n tract_df['NAME'] = np.where(tract_df['NAME'].str[4:6] != '00', \n tract_df['NAME'].str[:4] + \".\" + tract_df['NAME'].str[4:6], \n tract_df['NAME'].str[:4])\n\n # dissolve the blocks into tract level detail\n tract_df=tract_df[['GEOID', 'NAME', 'geometry']].loc[tract_df['aland']>0].dissolve(by='GEOID')\n tract_df.reset_index(inplace=True)\n\n # save the newly created tracts for the territory into a shape file\n # for later use by processes\n file_name = config['shape_files_path'] +\\\n \"tract/gz_%s_%s_140_00_500k.shp\" %\\\n (config['census_vintage'],territory)\n tract_df.to_file(file_name)\n\n # provide status or data processing\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - FINISHED WRITING TRACT SHAPE FILE\n FOR US TERRITORY %s\n \"\"\" % territory\n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen\n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED WRITING TRACT SHAPE FILE\n FOR US TERRITORY %s\n \"\"\" % territory \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False\n\n try:\n # create the dataframe for capturing county level data\n temp_time = time.localtime()\n county_df = temp_df[['state_fips', 'county_fips', 'aland', 'geometry']]\n county_df['GEOID'] = county_df['state_fips'] + county_df['county_fips']\n\n # merge the block level data at the county level to get the geometry\n county_df=county_df[['GEOID', 'geometry']].loc[county_df['aland']>0].dissolve(by='GEOID')\n\n # the county records for US states include names. The names cannot\n # be easily constructed following a set of rules, so instead we just\n # merge the names of the territories that are listed in the tiger line\n # files with the geometries we just calculated. This ends up giving\n # us the information we need to create the equivalent of a fully \n # populated 2010 county cartographic file that includes territories\n county_df = county_df.merge(names_df, left_on='GEOID', right_on='GEOID')\n county_df = county_df[['GEOID', 'NAME', 'geometry']]\n\n # append the information to a list that we will process later\n df_holder.append(county_df)\n\n # provide the status on the data processing for this task\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - PROCESSED COUNTY DATA FOR\n US TERRITORY %s\n \"\"\" % territory\n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen \n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED PROCESSING COUNTY DATA\n FOR US TERRITORY %s\n \"\"\" % territory \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False \n\n try:\n # now that we have the county level data for the territories, we need to merge\n # it with the US county data and create a single file for subsequent processing\n # open the county cartographic bounday file\n file_name = config['shape_files_path'] + config['county_cb_shape_file_name']\n county = gpd.read_file(file_name)\n\n # the cartographic boundary files do not have full names, so concatenate the \n # name and lsad columns and overwrite the original name\n county['NAME']=county['NAME'] + ' ' + county['LSAD']\n\n # extract the county fips from the non-standard county fips identifier in the\n # 2010 cartographic boundary file and then preserve only the necessary columns\n county['GEOID']=county['GEO_ID'].str[9:]\n county = county[['GEOID', 'NAME','geometry']]\n\n # append the county data to the list to be used to build the single file\n df_holder.append(county)\n\n # merge all of the dataframes into a single dataframe, sort it, and then \n # write the file out as a shape file so it can be used later for subsequent\n # data processing\n counties = pd.concat([x for x in df_holder])\n counties.sort_values(by='GEOID',inplace=True)\n file_name = config['shape_files_path'] + config['county_gzm_shape_file_name']\n counties.to_file(file_name)\n \n # provide the status on the data processing for this task\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - COMPLETED UPDATING COUNTY \n CARTOGRAPHIC SHAPE FILE\n \"\"\" \n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time))) \n return True \n\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen \n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED UPDATING COUNTY \n CARTOGRAPHIC SHAPE FILE\n \"\"\" \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False", "def run(layers):\n\n # Value above which people are regarded affected\n # For this dataset, 0 is no data, 1 is cloud, 2 is normal water level\n # and 3 is overflow.\n threshold = 0\n\n # Identify hazard and exposure layers\n inundation = get_hazard_layer(layers)\n\n [population] = get_exposure_layers(layers)\n\n # Extract data as numeric arrays\n D = inundation.get_data(nan=0.0) # Depth\n\n # Scale the population layer\n P = population.get_data(nan=0.0, scaling=True)\n I = numpy.where(D > threshold, P, 0)\n\n # Assume an evenly distributed population for Gender\n G = 0.5\n pregnant_ratio = 0.024 # 2.4% of women are estimated to be pregnant\n\n # Calculate breakdown\n P_female = P * G\n P_male = P - P_female\n P_pregnant = P_female * pregnant_ratio\n\n I_female = I * G\n I_male = I - I_female\n I_pregnant = I_female * pregnant_ratio\n\n # Generate text with result for this study\n total = str(int(sum(P.flat) / 1000))\n count = str(int(sum(I.flat) / 1000))\n\n total_female = str(int(sum(P_female.flat) / 1000))\n total_male = str(int(sum(P_male.flat) / 1000))\n total_pregnant = str(int(sum(P_pregnant.flat) / 1000))\n\n affected_female = str(int(sum(I_female.flat) / 1000))\n affected_male = str(int(sum(I_male.flat) / 1000))\n affected_pregnant = str(int(sum(I_pregnant.flat) / 1000))\n\n # Create raster object and return\n R = Raster(I,\n projection=inundation.get_projection(),\n geotransform=inundation.get_geotransform(),\n name='People affected',\n keywords={'total': total, 'count': count,\n 'total_female': total_female, 'affected_female': affected_female,\n 'total_male': total_male, 'affected_male': affected_male,\n 'total_pregnant': total_pregnant, 'affected_pregnant': affected_pregnant,\n })\n return R", "def main():\n # Return needed Data Frames to analyze\n data_frame, seasons, col, labels, stats, kaggle = load_frames()\n\n # Create the maps now\n create_shot_maps(data_frame,seasons)\n create_scenario_map()\n \n # Create the Plots\n plot_season_graphs(stats)\n plot_pie_charts(kaggle)\n plot_shot_timings(kaggle)\n plot_radar(stats, col, labels)", "def get_glevel_ori_agency(county_cens_file, crime_df, filename, cens_year, city_cens_file=False):\n\n \"\"\"\n 1. Append cities census file to counties census file\n \"\"\"\n national_census_df = pd.read_csv(county_cens_file)\n\n \"\"\"\n Checking for city census file coz we need to first append city census file to the bottom of county census file for 2000 and 2010.\n And city census file is passed only for 2000 and 2010 since for 1990 city and county census data is already together.\n \"\"\"\n if city_cens_file:\n cities_df = pd.read_csv(city_cens_file)\n national_census_df = national_census_df.append([cities_df])\n\n # Drop duplicates\n national_census_df = national_census_df.drop_duplicates(['STATEFP', 'place_fips'])\n national_census_df.to_csv(f'/Users/salma/Studies/Research/Criminal_Justice/research_projects/US Crime Analytics/data/cen_00/Census_{cens_year}_Unique.csv', index=False)\n\n\n \"\"\"\n 2.\n Merge census unique files with Crime_Major_Gov_Fips to get the correct cgovtype, CNTY based on fips state, fips place. \n Also obtain ORI, Agency columns from crime file. \n \"\"\"\n national_census_df = national_census_df.merge(crime_df, on=['STATEFP', 'place_fips'], how='right')\n\n\n \"\"\"\n 3. Create final Govt_level = Govt_level_y column which has govt_level values from crime file and get rid of _x and _y columns \n \"\"\"\n national_census_df['Govt_level'] = national_census_df['Govt_level_y']\n national_census_df['CNTY'] = national_census_df['CNTY_y']\n national_census_df.drop(['Govt_level_x', 'Govt_level_y', 'CNTY_x', 'CNTY_y'], axis=1, inplace=True)\n\n \"\"\"\n Add the year column to have year for even the missing census rows for certain ORIs\n \"\"\"\n national_census_df['YEAR'] = cens_year\n\n \"\"\"\n 4. Rearrange columns so that ORI, AGENCY, Govt_level are at the beginning\n \"\"\"\n cols = list(national_census_df.columns.values)\n cols.pop(cols.index('ORI'))\n cols.pop(cols.index('AGENCY'))\n cols.pop(cols.index('Govt_level'))\n cols.pop(cols.index('CNTY'))\n cols.pop(cols.index('YEAR'))\n\n national_census_df = national_census_df[['ORI', 'AGENCY', 'Govt_level', 'CNTY', 'YEAR'] + cols]\n #national_census_df = national_census_df[['ORI', 'AGENCY', 'YEAR'] + cols]\n\n # write the final df with updated govt_level, ori, agency etc. to a csv\n national_census_df.to_csv(f'/Users/salma/Studies/Research/Criminal_Justice/research_projects/US Crime Analytics/data/cen_00/{filename}.csv', index=False)", "def main():\n for city in CITIES:\n fetchNewseumImage(city)", "def main():\n # start_time = time.time()\n\n # city: (latitude, longtitude) +/- 0.5 degree\n # geocenter for EU: 50 9\n cities = {\n 'Vienna': (48, 16),\n 'Brussels': (51, 4),\n 'Sofia': (43, 23),\n 'Zagreb': (46, 16),\n 'Nicosia': (35, 33),\n 'Prague': (50, 14),\n 'Copenhagen': (55, 13),\n 'Tallinn': (59, 25),\n 'Helsinki': (60, 25),\n 'Paris': (49, 2),\n 'Berlin': (53, 13),\n 'Athens': (38, 24),\n 'Budapest': (48, 19),\n 'Dublin': (53, -6),\n 'Rome': (42, 13),\n 'Riga': (57, 24),\n 'Vilnius': (55, 25),\n 'Luxembourg': (50, 6),\n 'Valletta': (36, 15),\n 'Amsterdam': (52, 5),\n 'Warsaw': (52, 21),\n 'Lisbon': (39, -9),\n 'Bucharest': (44, 26),\n 'Bratislava': (48, 17),\n 'Ljubljana': (46, 15),\n 'Madrid': (40, -4),\n 'Stockholm': (59, 18),\n 'London': (52, 0)\n }\n\n cities = OrderedDict(sorted(cities.items(), key=lambda t: t[0]))\n cities_indices = [x for x in range(len(cities))]\n cities_names = [key for key in cities.keys()]\n\n for key, value in cities.items():\n nu_v = hf.equirectangular_projection(\n value[0], value[1], phi_0=50, l_0=9)\n cities[key] = nu_v\n\n decoder = {value: key for (key, value) in cities.items()}\n\n ga.cities = cities\n # ga.cities_names = cities_names\n # ga.cities_indices = cities_indices\n param_names = ['v1', 'v2', 't', 'n', 'pm', 'pc', 'tournsize', 'size']\n f = open('params.txt', 'r')\n param_values = [float(l) if '.' in l else int(l) for l in f]\n f.close()\n params = dict(zip(param_names, param_values))\n\n ga.Salesman.diploid = True\n starters = ga.mfp(params['size'])\n v1 = params['v1'] # velocity 1 in Poland\n v2 = params['v2'] # velocity 2 in Poland\n t = params['t'] # period of change of velocity in Poland\n n = params['n'] # number of generations\n pm = params['pm'] # probabilty of mutation (per gene)\n pc = params['pc'] # probability of crossover\n tournsize = params['tournsize']\n\n start_time = time.time()\n salesmen = starters\n ga.Salesman.velocity_pol = v1\n path_s = ga.findbest(salesmen).fitness\n print('first population best: ' + str(round(1 / path_s, 2)) + ' hours')\n\n results = [[0, path_s]]\n counter = 0\n for i in range(n):\n if counter == t // 2 - 1:\n ga.Salesman.velocity_pol = v1 if ga.Salesman.velocity_pol == v2 \\\n else v2\n counter = 0\n counter += 1\n salesmen = ga.evolution(salesmen, pm, pc, tournsize)\n path = ga.findbest(salesmen).fitness\n results.append([i + 1, path])\n\n path_d = ga.findbest(salesmen).fitness\n path_d_seq = ga.findbest(salesmen).best_seq\n print(str(n) + '-th population best (diploidal): ' +\n str(round(1 / path_d, 2)) + ' hours')\n print([decoder[x] for x in path_d_seq])\n print(\"Time elapsed: \" + str(time.time() - start_time) + 's')\n\n start_time = time.time()\n salesmen = starters\n ga.Salesman.diploid = False\n ga.Salesman.velocity_pol = v1\n\n results2 = [[0, path_s]]\n counter = 0\n for i in range(n):\n if counter == t // 2 - 1:\n ga.Salesman.velocity_pol = v1 if ga.Salesman.velocity_pol == v2 \\\n else v2\n counter = 0\n counter += 1\n salesmen = ga.evolution(salesmen, pm, pc, tournsize)\n path = ga.findbest(salesmen).fitness\n results2.append([i + 1, path])\n\n path_h = ga.findbest(salesmen).fitness\n path_h_seq = ga.findbest(salesmen).city_seq\n print(str(n) + '-th population best (haploidal): ' +\n str(round(1 / path_h, 2)) + ' hours')\n print([decoder[x] for x in path_h_seq])\n print(\"Time elapsed: \" + str(time.time() - start_time) + 's')\n\n # plot fitnesses:\n results = np.asarray(results)\n results2 = np.asarray(results2)\n plt.plot(results[:, 0], results[:, 1], 'b-', label='diploidal')\n plt.plot(results2[:, 0], results2[:, 1], 'g-', label='haploidal')\n plt.legend(loc=4)\n plt.show()\n\n # plot paths:\n fig, ax = plt.subplots(1)\n\n starters_best_seq = ga.findbest(starters).city_seq\n starters_best_seq += [starters_best_seq[0]] # close the loop\n starters_best_seq = np.asarray(starters_best_seq)\n plt.plot(starters_best_seq[:, 0], starters_best_seq[:, 1], 'r-', alpha=0.2)\n\n labels = cities_indices\n cities = np.asarray(list(ga.cities.values()))\n\n plt.scatter(cities[:, 0], cities[:, 1], color='r')\n for label, x, y in zip(labels, cities[:, 0], cities[:, 1]):\n plt.annotate(label, xy=(x, y), xytext=(-6, -12),\n textcoords='offset points')\n poland_c = hf.equirectangular_projection(52, 19, 50, 9)\n poland = plt.Circle(poland_c, .047, color='r', alpha=0.3)\n ax.add_artist(poland)\n\n path_d_seq = path_d_seq + [path_d_seq[0]]\n path_d_seq = np.asarray(path_d_seq)\n\n path_h_seq = path_h_seq + [path_h_seq[0]]\n path_h_seq = np.asarray(path_h_seq)\n\n plt.plot(path_h_seq[:, 0],\n path_h_seq[:, 1], 'g-', label='haploidal')\n plt.plot(path_d_seq[:, 0],\n path_d_seq[:, 1], 'b-', label='diploidal')\n\n legend = \"Legend:\\n\"\n legend += \"\\n\".join([str(ii) + ': ' + name\n for ii, name in enumerate(cities_names)])\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n ax.text(-0.15, 0.95, legend,\n transform=ax.transAxes,\n fontsize=14, verticalalignment='top', bbox=props)\n\n plt.axis('off')\n plt.legend(loc=4)\n plt.show()", "def main():\n data = pd.read_csv('countries.csv')\n # import_data_pandas(data)\n # continent_data(data)\n # continent_data_le(data)\n continent_data_gdp_growth(data)", "def compute(self):\n self.find_n()\n\n # call hotspot field plots\n for scenario in self.scenarios:\n fields_dict = {}\n ancestor_files = []\n for filename in io.get_all_ancestor_files(self.cfg,\n pattern='hotspot_*.nc'):\n key = os.path.basename(os.path.dirname(filename))\n splitname = os.path.basename(filename).split(\"_\")\n if key.split(\"_\")[-1] == scenario:\n fields_dict[(\n f\"{splitname[-1].split('.nc')[0]}_\"\n f\"{splitname[1]}_{key}\")] = iris.load_cube(filename)\n ancestor_files.append(filename)\n fields_dict[\"scenario\"] = scenario\n fields_dict[\"ancestors\"] = ancestor_files\n self.hotspot_fields_plot(fields_dict)\n\n # call scatter plots\n for season in self.seasons:\n timeseries_dict = {\"large_scale\": {}, \"regional\": {}}\n for region, value in timeseries_dict.items():\n for filename in io.get_all_ancestor_files(\n self.cfg,\n pattern=f'rolling_mean_{region}_{season}.nc'):\n value[os.path.basename(os.path.dirname(filename))] = (\n iris.load_cube(filename))\n value[os.path.basename(\n os.path.dirname(filename))] = (filename)\n for var_combination in self.var_combinations:\n self.timeseries_scatter_plot(deepcopy(timeseries_dict), season,\n var_combination)", "def run_calib(projector=OPTOMA_HD33()):\n w, h = (0.2160, 0.2794)\n obj_points = np.array([[-w/2, h/2, 0], [w/2, h/2, 0],\n [-w/2, 0, 0], [w/2, 0, 0],\n [-w/2, 0, h/2], [w/2, 0, h/2]])\n\n global img_points, going\n img_points = []\n\n try:\n window = Window()\n window.MoveXY(1600,0)\n window.ShowFullScreen(True)\n going = True\n\n @window.eventx\n def EVT_MOUSE_EVENTS(evt):\n global going, img_points\n if evt.ButtonUp(wx.MOUSE_BTN_LEFT):\n img_points.append(evt.Position)\n print('Picked point %d of 6' % (len(img_points)))\n if len(img_points) == len(obj_points):\n print \"Done\"\n going = False\n\n print(\"\"\"[Extrinsic Calibration] \n\nThere should be 6 points marked on the table and backdrop. \nMoving the mouse over the projected display, click each of the points\nin order:\n (left top, on the backdrop),\n (right top, on the backdrop),\n (left center, on the crease),\n (right center, on the crease),\n (left bottom, on the table),\n (right bottom, on the table)\n\nFollow along with this illustration: http://imgur.com/asfsfd.jpg\n\nClick the six points:\n\"\"\")\n\n while going: cv.WaitKey(10)\n\n finally:\n window.Close()\n\n img_points = np.array(img_points, 'f')\n projector.calibrate_extrinsic(img_points, obj_points)\n\n np.save('%s/config/projector' % (newest_folder), (projector.KK, projector.RT))\n print('OK')", "def _run(erosion_distance_metres, output_file_name):\n\n error_checking.assert_is_geq(erosion_distance_metres, 0.)\n file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)\n\n latitudes_deg, longitudes_deg = conus_boundary.read_from_netcdf()\n\n if erosion_distance_metres > 0:\n latitudes_deg, longitudes_deg = conus_boundary.erode_boundary(\n latitudes_deg=latitudes_deg, longitudes_deg=longitudes_deg,\n erosion_distance_metres=erosion_distance_metres\n )\n\n figure_object, axes_object, basemap_object = (\n plotting_utils.create_lambert_conformal_map(\n min_latitude_deg=MIN_PLOT_LATITUDE_DEG,\n max_latitude_deg=MAX_PLOT_LATITUDE_DEG,\n min_longitude_deg=MIN_PLOT_LONGITUDE_DEG,\n max_longitude_deg=MAX_PLOT_LONGITUDE_DEG,\n resolution_string='i'\n )\n )\n\n plotting_utils.plot_coastlines(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR, line_width=BORDER_WIDTH\n )\n plotting_utils.plot_countries(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR, line_width=BORDER_WIDTH\n )\n plotting_utils.plot_states_and_provinces(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR, line_width=BORDER_WIDTH\n )\n plotting_utils.plot_parallels(\n basemap_object=basemap_object, axes_object=axes_object,\n num_parallels=NUM_PARALLELS, line_width=BORDER_WIDTH\n )\n plotting_utils.plot_meridians(\n basemap_object=basemap_object, axes_object=axes_object,\n num_meridians=NUM_MERIDIANS, line_width=BORDER_WIDTH\n )\n\n x_coords_metres, y_coords_metres = basemap_object(\n longitudes_deg, latitudes_deg\n )\n axes_object.plot(\n x_coords_metres, y_coords_metres,\n color=CONUS_COLOUR, linestyle='solid', linewidth=CONUS_LINE_WIDTH\n )\n\n print('Saving figure to: \"{0:s}\"...'.format(output_file_name))\n figure_object.savefig(\n output_file_name, dpi=FIGURE_RESOLUTION_DPI,\n pad_inches=0, bbox_inches='tight'\n )\n pyplot.close(figure_object)", "def main():\n logfile = setup_log(os.path.join(os.environ['decor'], 'logs',\n 'transform_corr'))\n logfile.info('Started 9.transform_corr.py')\n\n subj_list = ['RSDE', 'VREA']\n for subject in subj_list:\n os.chdir(os.path.join(os.environ['decor'], subject, '6mmblur_results'))\n for m in ['AV', 'A', 'V', 'lowlev']:\n tcorr_suf = '6mmblur_tcorr_out_spearman'\n setnames_call_funcs(logfile, subject, m, tcorr_suf)", "def main():\n\takpPoints,chpPoints = extractSupporterCities(\"Data/PreprocessedAkpTweets.csv\",\n\t\t\t\t\t\t\t\t\t\t\t \"Data/PreprocessedChpTweets.csv\")\n\tgenerateMapPoints(akpPoints,chpPoints)\n\tgenerateCitySentimentData(akpPoints,chpPoints)\n\tgenerateChoroplethMap(\"Data/tr_cities_modified.json\",\"Data/city_ratio.csv\")", "def test_conus():\n sat = gini.GINIZFile(get_test_file(\"TIGN02\", fponly=True))\n assert sat.archive_filename() == \"GOES_SUPER_IR_201509281745.png\"\n assert sat.awips_grid() == 0\n assert sat.metadata[\"map_projection\"] == 5", "def main(config):\n file_paths_info = [('GLOFRIS','WATCH','ARG_inunriver_historical_000000000WATCH_1980_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP45','ARG_inunriver_rcp4p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP85','ARG_inunriver_rcp8p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('FATHOM','AR_fluvial_undefended_merged','AR-FU-1000.tif'),\n \t\t\t\t('FATHOM','AR_pluvial_undefended_merged','AR-PU-1000.tif')\n \t\t\t\t]\n figure_names = ['GLOFRIS-WATCH-fluvial','GLOFRIS-RCP45-fluvial','GLOFRIS-RCP85-fluvial','FATHOM-fluvial','FATHOM-pluvial']\n figure_titles = ['current fluvial flooding','RCP4.5 fluvial flooding','RCP8.5 fluvial flooding','current fluvial flooding','current pluvial flooding']\n for f_i in range(len(file_paths_info)):\n\t hazard_file = os.path.join(config['paths']['data'],'flood_data', file_paths_info[f_i][0],file_paths_info[f_i][1],file_paths_info[f_i][2])\n\t output_file = os.path.join(config['paths']['figures'], 'flood-map-{}.png'.format(figure_names[f_i]))\n\t ax = get_axes()\n\t plot_basemap(ax, config['paths']['data'])\n\t scale_bar(ax, location=(0.8, 0.05))\n\t plot_basemap_labels(ax, config['paths']['data'], include_regions=True,include_zorder=3)\n\n\t proj_lat_lon = ccrs.PlateCarree()\n\n\n\t # Create color map\n\t colors = plt.get_cmap('Blues')\n\n\t # Read in raster data\n\t data, lat_lon_extent = get_data(hazard_file)\n\t data[(data <= 0) | (data > 5)] = np.nan\n\t max_val = np.nanmax(data)\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val)\n\n\t # Plot population data\n\t im = ax.imshow(data, extent=lat_lon_extent,transform=proj_lat_lon, cmap=colors,norm =norm, zorder=2)\n\n\t # Add colorbar\n\t cbar = plt.colorbar(im, ax=ax,fraction=0.1, shrink=0.87,pad=0.01, drawedges=False, orientation='horizontal',\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val), ticks=list(np.linspace(0,max_val,3)))\n\t cbar.set_clim(vmin=0,vmax=max_val)\n\n\n\t cbar.outline.set_color(\"none\")\n\t cbar.ax.yaxis.set_tick_params(color='black')\n\t cbar.ax.set_xlabel('Flood depths (m)',fontsize=12,color='black')\n\n\t plt.title('1 in 1000 year {}'.format(figure_titles[f_i]), fontsize = 14)\n\t save_fig(output_file)\n\t plt.close()", "def func(self):\n account = self.account\n city_name = 'Phoenix' if not self.args else self.args\n a = Astral()\n a.solar_depression = 'civil'\n city = a[city_name]\n if not city:\n return\n timezone = city.timezone\n sun = city.sun(date=datetime.date.today(), local=True)\n\n account.msg('Information for %s/%s\\n' % (city_name, city.region))\n account.msg('Timezone: %s' % timezone)\n account.msg('Latitude: %.02f; Longitude: %.02f' % (city.latitude, city.longitude))\n account.msg('Dawn: %s' % str(sun['dawn']))\n account.msg('Sunrise: %s' % str(sun['sunrise']))\n account.msg('Noon: %s' % str(sun['noon']))\n account.msg('Sunset: %s' % str(sun['sunset']))\n account.msg('Dusk: %s' % str(sun['dusk']))", "def main():\n\n region = 'Kanto'\n year = 2000\n callParallelReducedGAwithP_AVR(region)\n\n region = 'EastJapan'\n year = 2000\n callParallelReducedGAwithP_AVR(region)\n\n region = 'Tohoku'\n year = 2000\n callParallelReducedGAwithP_AVR(region)\n\n region = 'Kansai'\n year = 2000\n callParallelReducedGAwithP_AVR(region)", "def import_national_boundaries(self, name):\n print \"\\n4.3- importa shape con confini nazionali ISTAT\"\n countrySHP = os.path.join(\"boundaries\", \"italy_2011_WGS84.shp\")\n countrySQL = os.path.join(\"boundaries\", \"italy_%s.sql\" % name)\n if os.path.isfile(countrySQL):\n call(\"rm %s\" % countrySQL, shell=True)\n cmd = \"shp2pgsql -s 4326 -W 'LATIN1' %s italy %s > %s\" % (countrySHP, name, countrySQL)\n print cmd\n call(cmd, shell=True)\n call(\"psql -h localhost -U %s -d %s -f %s\" % (self.user, name, countrySQL), shell=True)\n call(\"rm %s\" % countrySQL, shell=True)\n call(\"echo 'CREATE INDEX ON italy USING GIST (geom);'| psql -U %s -d %s\" % (self.user, name), shell=True)\n call(\"echo 'ANALYZE italy;'| psql -U %s -d %s\" % (self.user, name), shell=True)", "def main() -> None:\n # What is each dataset's coverage of arXiv?\n arxiv = pd.read_gbq(f'select * from {DATASET}.dataset_arxiv_coverage', project_id='gcp-cset-projects')\n write_latest(arxiv, OUTPUT_DIR / 'arxiv_coverage.csv')\n\n for x in CITATION_PERCENTILES:\n for arxiv_only in [True, False]:\n table = f'country_share_{x - 1}th{\"_arxiv\" if arxiv_only else \"\"}'\n df = pd.read_gbq(f'select * from {DATASET}.{table}', project_id='gcp-cset-projects')\n write_latest(df, OUTPUT_DIR / f'{table}.csv')\n plot_country_shares(df, OUTPUT_DIR / f'{table}.png')\n country_share_min = pd.read_gbq(f'select * from {DATASET}.country_share_99th_arxiv_min',\n project_id='gcp-cset-projects')\n write_latest(country_share_min, OUTPUT_DIR / f'country_share_99th_arxiv_min.csv')\n plot_country_shares(country_share_min, OUTPUT_DIR / f'country_share_99th_arxiv_min.png')\n # Without any citation threshold for inclusion\n country_shares = pd.read_gbq(f'select * from {DATASET}.country_shares', project_id='gcp-cset-projects')\n write_latest(country_shares, OUTPUT_DIR / f'country_shares.csv')\n plot_country_shares(country_shares, OUTPUT_DIR / f'country_shares.png')\n # DS/MAG/WOS only\n for dataset in ['ds', 'mag', 'wos']:\n dataset_country_shares = pd.read_gbq(f'select * from {DATASET}.country_share_99th_{dataset}',\n project_id=PROJECT_ID)\n write_latest(dataset_country_shares, OUTPUT_DIR / f'country_shares_{dataset}.csv')\n plot_country_shares(dataset_country_shares, OUTPUT_DIR / f'country_shares_{dataset}.png')\n\n df = pd.read_gbq(f'select * from {DATASET}.mag_replication', project_id='gcp-cset-projects')\n df = df.query('country != \"Other\"')\n import plotly.express as px\n fig = px.line(df, x='Year', y='proportion', color='country', range_y=(0, .5))\n fig.show()\n\n # Summarize overlap between predictions by method\n overlap_counts = calculate_overlap('summary')\n write_latest(overlap_counts, OUTPUT_DIR / 'overlap_counts.csv')\n overlap_1pct_counts = calculate_overlap('summary_1pct', columns=[\n 'keyword_hit', 'elsevier_hit', 'subject_hit', 'arxiv_scibert_hit', 'arxiv_scibert_cl_hit',\n 'arxiv_scibert_cv_hit', 'arxiv_scibert_ro_hit'])\n write_latest(overlap_1pct_counts, OUTPUT_DIR / 'overlap_arxiv_99th_counts.csv')\n overlap_arxiv_1pct_min_counts = calculate_overlap('summary_arxiv_1pct_min')\n write_latest(overlap_arxiv_1pct_min_counts, OUTPUT_DIR / 'overlap_arxiv_99th_min_counts.csv')\n\n # Assess divergence between methods/models by subject\n # Keyword hits alone\n kw_only_subjects = pd.read_gbq(f'select wos_subject, ds_subject, mag_subject, count(*) as count '\n f'from {DATASET}.comparison '\n f'where keyword_hit is true and elsevier_hit is false and scibert_hit is false '\n f'group by 1, 2, 3 '\n f'order by 4 desc',\n project_id='gcp-cset-projects')\n write_latest(kw_only_subjects, OUTPUT_DIR / 'divergence_subjects_keywords.csv')\n # Elsevier alone\n elsevier_only_subjects = pd.read_gbq(f'select wos_subject, ds_subject, mag_subject, count(*) as count '\n f'from {DATASET}.comparison '\n f'where keyword_hit is false and elsevier_hit is true and scibert_hit is false '\n f'group by 1, 2, 3 '\n f'order by 4 desc',\n project_id='gcp-cset-projects')\n write_latest(elsevier_only_subjects, OUTPUT_DIR / 'divergence_subjects_elsevier.csv')\n # SciBERT hits alone\n scibert_only_subjects = pd.read_gbq(f'select wos_subject, ds_subject, mag_subject, count(*) as count '\n f'from {DATASET}.comparison '\n f'where keyword_hit is false and elsevier_hit is false and scibert_hit is true '\n f'group by 1, 2, 3 '\n f'order by 4 desc',\n project_id='gcp-cset-projects')\n write_latest(scibert_only_subjects, OUTPUT_DIR / 'divergence_subjects_scibert.csv')\n # SciBERT hits alone with arXiv coverage\n arxiv_scibert_only_subjects = pd.read_gbq(f'select wos_subject, ds_subject, mag_subject, count(*) as count '\n f'from {DATASET}.comparison '\n f'where keyword_hit is false and elsevier_hit is false and arxiv_scibert_hit is true '\n f'group by 1, 2, 3 '\n f'order by 4 desc',\n project_id='gcp-cset-projects')\n write_latest(arxiv_scibert_only_subjects, OUTPUT_DIR / 'divergence_subjects_arxiv_scibert.csv')\n\n mag_ai = calculate_overlap('mag_ai_fields_overlap', columns=['has_mag_id', 'mag_ai_hit', 'arxiv_scibert_hit'])\n mag_ai['label'] = mag_ai['label'].str.replace('Has_Mag_Id', 'MAG')\n mag_ai['label'] = mag_ai['label'].str.replace('Mag_Ai', 'MAG AI')\n mag_ai['label'] = mag_ai['label'].str.replace('Arxiv_Scibert', 'SciBERT')\n write_latest(mag_ai, OUTPUT_DIR / 'mag_ai_overlap.csv')\n\n # Ancillary table: summarize overlap across datasets\n dataset_overlap = calculate_overlap('dataset_overlap', columns=['in_wos', 'in_ds', 'in_mag'])\n dataset_overlap['label'] = dataset_overlap['label'].str.replace('In_', '').str.upper()\n write_latest(dataset_overlap, OUTPUT_DIR / 'dataset_overlap.csv')\n\n # Summarize overlap across datasets, by whether articles were predicted positive by SciBERT\n do_scibert = calculate_overlap('dataset_overlap_by_prediction',\n columns=['scibert_hit', 'in_wos', 'in_ds', 'in_mag'])\n # This requires some cleanup, because calculate_overlap wasn't written to do overlap + group-by\n do_scibert['label'] = do_scibert['label'].str.replace('In_', '').str.upper()\n do_scibert['label'] = do_scibert['label'].str.replace('SCIBERT . ', '').str.upper()\n do_scibert = do_scibert.query(\"label != 'SCIBERT'\")\n do_scibert = do_scibert.pivot_table(index=['label', 'in_ds', 'in_wos', 'in_mag'], columns='scibert_hit')\n do_scibert = do_scibert.sort_values(['in_wos', 'in_ds', 'in_mag'])\n # Recalculate percentages calculate_overlap did cell count / n, but we want column percentages for easy comparison\n # of overlap between positive and negative predictions\n for pred in [True, False]:\n do_scibert[('Pct', pred)] = do_scibert[('Count', pred)] / do_scibert[('Count', pred)].sum()\n write_latest(do_scibert, OUTPUT_DIR / 'dataset_overlap_by_prediction.csv', index=True)" ]
[ "0.6431515", "0.5800134", "0.574178", "0.54861444", "0.5438259", "0.53394425", "0.5185603", "0.518323", "0.51795524", "0.5169798", "0.51546836", "0.51012677", "0.5046388", "0.5035656", "0.5010574", "0.50065094", "0.5000971", "0.499779", "0.49879894", "0.49829248", "0.4957002", "0.49474823", "0.4944454", "0.49178767", "0.4909603", "0.4906762", "0.48872238", "0.488703", "0.48836517", "0.48549217" ]
0.73377126
0
Deploys results from an intervention to specified output directory.
def deploy_results(intervention_result: DodInterventionResult, output: str): dataset_deployer.upload_csv( intervention_result.key, intervention_result.projection_df.to_csv(), output ) dataset_deployer.deploy_shape_files( output, intervention_result.key, *intervention_result.shapefile_data ) logger.info(f"Generated state shape files")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deploy_results(result: TopCountiesPipelineResult, key: str, output: str):\n dataset_deployer.upload_json(key, result.api.json(), output)", "def get_output(self, output_dir=\"tools_output\"):\n\n output_dir = self.project_dir / output_dir / self.name\n # create output directory if didn't exist\n if not output_dir.exists():\n os.makedirs(output_dir)\n logger.info(f\"Created {output_dir}\")\n\n for outfile in self.output:\n outfile = self.project_dir / outfile\n if outfile.exists():\n src = os.fspath(outfile)\n dst = os.fspath(output_dir / outfile.name)\n shutil.move(src, dst)\n logger.info(f\"Moved {outfile.name} to {output_dir}\")\n else:\n msg = f\"File not found: {outfile} - did you execute run() before?\"\n logger.error(msg)\n raise FileNotFoundError(msg)", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def run(self):\n for lof in self.data_files:\n if lof[0]:\n base = getattr(self, 'install_' + lof[0])\n else:\n base = getattr(self, 'install_base')\n dir = convert_path(lof[1])\n if not os.path.isabs(dir):\n dir = os.path.join(base, dir)\n elif self.root:\n dir = change_root(self.root, dir)\n self.mkpath(dir)\n\n files = lof[2]\n if len(files) == 0:\n # If there are no files listed, the user must be\n # trying to create an empty directory, so add the\n # directory to the list of output files.\n self.outfiles.append(dir)\n else:\n # Copy files, adding them to the list of output files.\n for f in files:\n f = convert_path(f)\n (out, _) = self.copy_file(f, dir)\n #print \"DEBUG: \", out # dbg\n self.outfiles.append(out)\n \n\n return self.outfiles", "def post_build(self, manager):\n if not self.output_files_dir.exists():\n return\n\n output_file_dirs = [\n d for d in self.output_files_dir.rglob(\"*\") if d.is_dir()\n ] + [self.output_files_dir]\n for output_file_dir in output_file_dirs:\n stem = output_file_dir.relative_to(self.output_files_dir)\n api_path = self.api_dir / stem / ALL_JSON\n\n yield self.task(\n name=f\"contents:{stem}\",\n doc=f\"create a Jupyter Contents API response for {stem}\",\n actions=[\n (self.one_contents_path, [output_file_dir, api_path]),\n (self.maybe_timestamp, [api_path]),\n ],\n file_dep=[p for p in output_file_dir.rglob(\"*\") if not p.is_dir()],\n targets=[api_path],\n )", "def Do(self, input_dict: Dict[str, List[types.Artifact]],\n output_dict: Dict[str, List[types.Artifact]],\n exec_properties: Dict[str, Any]) -> None:\n for output_key, artifact_list in output_dict.items():\n for idx, artifact in enumerate(artifact_list):\n dest = artifact.uri\n src = os.path.join(self._test_data_dir, self._component_id, output_key,\n str(idx))\n if not fileio.exists(src):\n raise FileNotFoundError(\"{} does not exist\".format(src))\n io_utils.copy_dir(src, dest)\n logging.info(\"Finished copying from %s to %s\", src, dest)", "def postProcessOutput(self):\n\n logging.info(\" ========> Analysis %20s called postProcessOutput:\"%(self.name))\n\n if self.checkExpectedOutputFiles() == False:\n raise Exception(\"Missing expected output files. Number missing are [%d]\"%(len(self.missing_output_files)))\n\n FileUtils.checkDirExists(self.output_dir)\n\n tmpfiles = []\n\n logging.info(\" ========> Analysis %20s called postProcessOutput: Moving files from %s to %s \"%(self.name,self.working_dir,self.output_dir))\n try:\n for srcfile in self.expected_output_files:\n\n fullsrcfile = os.path.join(self.working_dir,srcfile)\n destfile = os.path.join(self.output_dir,srcfile)\n\n FileUtils.checkDirExistsForFile(destfile)\n\n res = shutil.move(fullsrcfile,destfile)\n\n if res == None:\n res = \"OK\"\n else:\n res = \"FAILED\"\n\n print \"Checking %s\"%destfile\n tmpfiles.append(destfile)\n \n logging.info(\" ========> Analysis %20s called postProcessOutput: Result of file move for %s = %s\" % (self.name,srcfile,res))\n\n except Exception as e:\n logging.info(\" ========> Analysis %20s file move failed %s\"%(self.name,e))\n raise\n\n self.output_files = tmpfiles\n\n for f in self.temp_output_files:\n logging.info(\" ========> Analysis %20s removing temp file %s \"%(self.name,f))\n\t res = os.remove(f)", "def main(input_dir, output_dir):\n\n process(input_dir, output_dir)", "def _store_test_result(ptfhost):\n logger.info(\"Copying file from folder: {0} to folder: {1}\".format(\n\t\tSAI_TEST_REPORT_TMP_DIR_ON_PTF, \n\t\tSAI_TEST_REPORT_DIR_ON_PTF))\n ptfhost.shell(\"cp {0}/*.* {1}/\".format(\n\t\tSAI_TEST_REPORT_TMP_DIR_ON_PTF, \n\t\tSAI_TEST_REPORT_DIR_ON_PTF))", "def __setup_output_directory(self):\n print('Setting up output directory')\n time_stamp = datetime.now().strftime(\"%d-%m-%Y-%H-%M-%S\")\n self.output_path = os.path.join(self.output_base_path, '%s_%s' % (self.execution_name, time_stamp))\n print('- Creating output directory: %s' % self.output_path)\n os.makedirs(self.output_path)\n print('- Output directory created')", "def execute(self, log_out, log_err):\n EventGenerator.execute(self, log_out, log_err)\n if 'moller' not in self.name:\n src = os.path.join(self.rundir, 'brems.stdhep')\n dest = os.path.join(self.rundir, self.output_files()[0])\n logger.debug(\"Copying '%s' to '%s'\" % (src, dest))\n shutil.copy(src, dest)", "def run(self):\n # get components list\n #component_id_list = self.getComponentsList()\n asset_id = 3776\n component_id_list = self.get_component_info_for_one_asset(asset_id)\n # call computeResults method\n results = self.compute_results(component_id_list)\n # write to the output file\n self.write_to_file(results)", "def deploy():\n build()\n copy()\n install()", "def run(self) -> None:\n self.destination_directory.mkdir(parents=True, exist_ok=True)\n repos = self.GetNextBatchOfResults()\n while not self.IsDone(repos):\n num_remaining = (self.repo_query.max_results - self.i)\n repos = repos[:num_remaining]\n self.MakeRepositoryMetas(repos)\n repos = self.GetNextBatchOfResults()", "def _make_output_directory(self):\n fs = self._filesystem\n output_filename = fs.join(self._root_output_dir, self._test_name)\n fs.maybe_make_directory(fs.dirname(output_filename))", "def upload_result(result_hdf5_path, vodir):\n cmd = 'vcp {0} {2}/{1}'.format(\n result_hdf5_path, os.path.basename(result_hdf5_path),\n vodir)\n print cmd\n subprocess.call(cmd, shell=True)", "def _process_task_output(self):\n # pylint: disable=too-many-branches\n directory = self._executor.out_dir\n if not os.path.exists(directory):\n return\n try:\n for root, _dirs, files in os.walk(directory):\n for name in files:\n filepath = os.path.join(root, name)\n # the name should match what is in the db!\n\n if name == 'output.json':\n log.debug(\"POSTRO FOUND output.json\")\n # parse and compare/update with the tasks output ports from db\n output_ports = dict()\n with open(filepath) as f:\n output_ports = json.load(f)\n task_outputs = self._task.output\n for to in task_outputs:\n if to['key'] in output_ports.keys():\n to['value'] = output_ports[to['key']]\n log.debug(\"POSTRPO to['value]' becomes %s\", output_ports[to['key']])\n flag_modified(self._task, \"output\")\n _session = self._db.Session()\n try:\n _session.commit()\n except exc.SQLAlchemyError as e:\n log.debug(e)\n _session.rollback()\n finally:\n _session.close()\n else:\n # we want to keep the folder structure\n if not root == directory:\n rel_name = os.path.relpath(root, directory)\n name = rel_name + \"/\" + name\n\n object_name = str(self._task.project_id) + \"/\" + self._task.node_id + \"/\" + name\n success = False\n ntry = 3\n trial = 0\n while not success and trial < ntry:\n log.debug(\"POSTRO pushes to S3 %s try %s from %s\", object_name, trial, ntry)\n success = self._s3.client.upload_file(self._s3.bucket, object_name, filepath)\n trial = trial + 1\n\n except (OSError, IOError) as _e:\n logging.exception(\"Could not process output\")", "def write_to_output(output):\n try:\n # changing current directory to script directory\n OutputWrite.change_to_script_directory(__file__)\n # writing the output a file\n timestamp_in_secs = time.time()\n time_stamp_readable = datetime.datetime.fromtimestamp(\n timestamp_in_secs).strftime(\"%Y_%m_%d-%Ih_%Mm_%Ss_%p\")\n try:\n if not os.path.isdir('../results'):\n os.chdir('..')\n print('Current directory {0}'.format(os.getcwd()))\n os.mkdir('./results')\n OutputWrite.change_to_script_directory(__file__)\n except OSError as _ex_:\n print(\"Unable to create results directory {0}\".format(_ex_))\n abspath = os.path.abspath('..')\n print('abspath of ..', abspath)\n path = OutputWrite.create_dir_structure()\n file_name = os.path.join(path, 'output_' +\n time_stamp_readable)\n print('The file name after joining', file_name)\n with open(file_name, 'w') as file_obj:\n file_obj.write(output)\n\n except FileNotFoundError as err:\n print('Unable write the test results into the file {0}'.\n format(err))", "def report(self, output_dir):", "def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()", "def submit_scripts(self, out):\n program_folder = os.path.join(out, self.out)\n for config in self.configurations:\n config.submit_script(program_folder)\n return None", "def __manage_output_folder(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)", "def perform_upload(path):\n subprocess.call(\n ['twine', 'upload', path + '/dist/*'])", "def append(cls, output_dir, result):\n aggregator = cls.load(output_dir)\n aggregator.append_result(result)", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def submit_scripts(self, out):\n program_folder = os.path.join(out, self.out)\n for config in self.configurations:\n config.submit_CaVEMan_scripts(\n program_folder, self.path2exe, self.ref_fai, self.file1, self.file2,\n self.config_file, self.qsub_dir, self.mstep_script, self.merge_script, self.estep_script\n )\n return None", "def main(base_dir: str, output_dir: str) -> None:\n base_path = pathlib.Path(base_dir)\n output_path = pathlib.Path(output_dir).expanduser()\n\n stage_copy_images(base_path, output_path)\n stage_extract_videos(base_path, output_path)", "def run(self):\n out_fd = self.output()\n out_dir = os.path.join(self.LOCAL_ROOT, self.SHARED_RELATIVE_PATH)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n in_fd = self.input()\n\n with in_fd.open('r') as fd:\n result = fd.read()\n with out_fd.open('w') as o_fd:\n o_fd.write(result)", "def output():\n\n print(\"\\n*****************************************************************\")\n print(\"\\nAll transfer data is saved in 'All_transfer_frequencies.csv'\")\n print(\"\\nThe most likely transfers are saved in 'likely_transfers.csv'\")\n\n os.mkdir(\"Transfer_results\")\n os.system(\"mv *.csv Transfer_results\")\n\n print(\"\\nBoth results are saved in the 'Transfer_results' directory\")\n print(\"\\nScript finished running\")\n print(\"\\n*****************************************************************\")", "def main(output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'\n files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']\n for filename in files:\n r = requests.get(baseurl+filename, stream=True)\n if r.status == 200:\n with open(output_filepath+\"/\"+filename, \"wb\") as f:\n f.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)" ]
[ "0.65361625", "0.60336024", "0.5989197", "0.58939755", "0.5884008", "0.58762157", "0.58761215", "0.5833462", "0.57733375", "0.576358", "0.57186383", "0.56984663", "0.5694056", "0.56569403", "0.5604466", "0.55966806", "0.559395", "0.5547209", "0.55382", "0.5537242", "0.5533864", "0.55130583", "0.54807377", "0.5471503", "0.5458212", "0.5457301", "0.54511636", "0.54104316", "0.54045844", "0.5401839" ]
0.7408499
0
fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filelike) elements for data to be uploaded as files. Yield body's chunk as bytes
def iter(self, fields, files): encoder = codecs.getencoder('utf-8') for key, value in fields.iteritems(): key = self.u(key) yield encoder('--{}\r\n'.format(self.boundary)) yield encoder(self.u( 'Content-Disposition: form-data; name="{}"\r\n').format(key)) yield encoder('\r\n') if isinstance(value, int) or isinstance(value, float): value = str(value) yield encoder(self.u(value)) yield encoder('\r\n') for key, value in files.iteritems(): key = self.u(key) filename = self.u(value.name) yield encoder('--{}\r\n'.format(self.boundary)) yield encoder(self.u('Content-Disposition: form-data; name="{}"; filename="{}"\r\n').format(key, filename)) yield encoder('Content-Type: {}\r\n'.format(mimetypes.guess_type(filename)[0] or 'application/octet-stream')) yield encoder('\r\n') buff = value.read() yield (buff, len(buff)) yield encoder('\r\n') yield encoder('--{}--\r\b'.format(self.boundary))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode_multipart_formdata(self, fields, files):\r\n if files is None:\r\n files = []\r\n if fields is None:\r\n fields = {}\r\n\r\n readers = []\r\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\r\n CRLF = '\\r\\n'\r\n L1 = []\r\n for key in fields:\r\n L1.append('--' + BOUNDARY)\r\n L1.append('Content-Disposition: form-data; name=\"%s\"' % key)\r\n L1.append('')\r\n L1.append(fields[key])\r\n b1 = CRLF.join(L1)\r\n readers.append(b1)\r\n\r\n for file_info in files:\r\n L = []\r\n L.append('')\r\n L.append('--' + BOUNDARY)\r\n disposition = \"Content-Disposition: form-data;\"\r\n filename = _qiniu_escape(file_info.get('filename'))\r\n L.append('%s name=\"file\"; filename=\"%s\"' % (disposition, filename))\r\n L.append('Content-Type: %s' %\r\n file_info.get('mime_type', 'application/octet-stream'))\r\n L.append('')\r\n L.append('')\r\n b2 = CRLF.join(L)\r\n readers.append(b2)\r\n\r\n data = file_info.get('data')\r\n readers.append(data)\r\n\r\n L3 = ['', '--' + BOUNDARY + '--', '']\r\n b3 = CRLF.join(L3)\r\n readers.append(b3)\r\n\r\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\r\n return content_type, MultiReader(readers)", "def __encode_multipart_formdata(self, fields, files):\n BOUNDARY = fogbugz._make_boundary()\n\n if len(files) > 0:\n fields['nFileCount'] = str(len(files))\n\n crlf = '\\r\\n'\n buf = fogbugz.BytesIO()\n\n for k, v in fields.items():\n vcall = str\n if isinstance(v, unicode):\n vcall = unicode\n if fogbugz.DEBUG:\n print(\"field: %s: %s\"% (repr(k), repr(v)))\n lines = [\n '--' + BOUNDARY,\n 'Content-disposition: form-data; name=\"%s\"' % k,\n '',\n vcall(v),\n '',\n ]\n buf.write(crlf.join(lines).encode('utf-8'))\n\n n = 0\n for f, h in files.items():\n n += 1\n lines = [\n '--' + BOUNDARY,\n 'Content-disposition: form-data; name=\"File%d\"; '\n 'filename=\"%s\"' % (n, f),\n '',\n ]\n buf.write(crlf.join(lines).encode('utf-8'))\n lines = [\n 'Content-type: application/octet-stream',\n '',\n '',\n ]\n buf.write(crlf.join(lines).encode('utf-8'))\n buf.write(h.read())\n buf.write(crlf.encode('utf-8'))\n\n buf.write(('--' + BOUNDARY + '--' + crlf).encode('utf-8'))\n content_type = \"multipart/form-data; boundary=%s\" % BOUNDARY\n return content_type, buf.getvalue()", "def encode_multipart_formdata(self,fields, files, BOUNDARY = '-----'+mimetools.choose_boundary()+'-----'):\n\n CRLF = '\\r\\n'\n L = []\n if isinstance(fields, dict):\n fields = fields.items()\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n filetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % filetype)\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY # what if no files are encoded\n return content_type, body", "def _encode_multipart_formdata(self, fields, files=[]):\n boundary=_generate_boundary()\n crlf = '\\r\\n'\n\n l = []\n for k, v in fields:\n l.append('--' + boundary)\n l.append('Content-Disposition: form-data; name=\"%s\"' % k)\n l.append('')\n l.append(v)\n for (k, f, v) in files:\n l.append('--' + boundary)\n l.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (k, f))\n l.append('Content-Type: %s' % self._get_content_type(f))\n l.append('')\n l.append(v)\n l.append('--' + boundary + '--')\n l.append('')\n body = crlf.join(l)\n return boundary, body", "def encode_multipart_formdata(cls, fields, files):\n boundary = '----------ThIs_Is_tHe_bouNdaRY_$'\n lines = []\n for (key, value) in fields:\n lines.append('--' + boundary)\n lines.append('Content-Disposition: form-data; name=\"%s\"' % key)\n lines.append('')\n lines.append(str(value))\n for (key, filename, value, content_type) in files:\n filename = filename + mimetypes.guess_extension(content_type)\n lines.append('--' + boundary)\n lines.append(\n 'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (\n key, filename\n )\n )\n lines.append('Content-Type: %s' % content_type)\n lines.append('')\n lines.append(value)\n lines.append('--' + boundary + '--')\n lines.append('')\n body = b'\\r\\n'.join(map(lambda x: x.encode('utf8') if isinstance(x, str) else x, lines))\n content_type = 'multipart/form-data; boundary=%s' % boundary\n return content_type, body", "def encode_multipart_formdata(fields, files):\r\n # changed the boundary to be more similar to the perl script written by\r\n # Andreas\r\n BOUNDARY = 'xYzZY'\r\n CRLF = '\\r\\n'\r\n L = []\r\n for (key, value) in fields:\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\r\n L.append('')\r\n L.append(value)\r\n for (key, filename, value) in files:\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\r\n (key, filename))\r\n L.append('Content-Type: %s' % get_content_type(filename))\r\n L.append('')\r\n L.append(value)\r\n L.append('--' + BOUNDARY + '--')\r\n L.append('')\r\n body = CRLF.join(L)\r\n content_type = 'multipart/form-data'\r\n return content_type, body", "def encode_multipart_formdata(fields, files=()):\r\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\r\n CRLF = '\\r\\n'\r\n L = []\r\n for key, value in fields.items():\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\r\n L.append('')\r\n L.append(value)\r\n for (key, filename, value) in files:\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\r\n (key, filename))\r\n content_type = mimetypes.guess_type(filename)[0] or DEFAULT_TYPE\r\n L.append('Content-Type: %s' % content_type)\r\n L.append('')\r\n L.append(value)\r\n L.append('--' + BOUNDARY + '--')\r\n L.append('')\r\n body = CRLF.join(L)\r\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\r\n return content_type, body", "def encode_multipart(fields, files, boundary=None):\n\n def escape_quote(s):\n return s.replace('\"', '\\\\\"')\n\n if boundary is None:\n boundary = ''.join(random.choice(_BOUNDARY_CHARS) for i in range(30))\n\n lines = []\n\n for name, value in fields.items():\n lines.extend((\n f'--{boundary}',\n f'Content-Disposition: form-data; name=\"{escape_quote(name)}\"',\n '',\n value,\n ))\n\n for name, value in files.items():\n filename = value['filename']\n mimetype = (value.get('mimetype') or\n mimetypes.guess_type(filename)[0] or\n 'application/octet-stream')\n name, filename = escape_quote(name), escape_quote(filename)\n\n lines.extend((\n f'--{boundary}',\n f'Content-Disposition: form-data; name=\"{name}\"; filename=\"{filename}\"',\n f'Content-Type: {mimetype}',\n '',\n value['content'],\n ))\n\n lines.extend((\n f'--{boundary}--',\n '',\n ))\n body = '\\r\\n'.join(lines)\n\n headers = {\n 'Content-Type': f'multipart/form-data; boundary={boundary}',\n 'Content-Length': str(len(body)),\n }\n\n return (body, headers)", "def encode_multipart_formdata(fields, files):\n BOUNDARY = mimetools.choose_boundary()\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n #print key,value\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n #print key, filename\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n content_type = get_content_type(filename)\n #content_type='text/plain; charset=ascii'\n #content_type='application/octet-stream'\n L.append('Content-Type: %s' % content_type)\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def _encode_multipart_formdata(self, fields, files):\r\n BOUNDARY = mimetools.choose_boundary()\r\n content = []\r\n\r\n fields = fields or {}\r\n files = files or {}\r\n\r\n for key in fields:\r\n content.append('--' + BOUNDARY + '\\r\\n')\r\n content.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % key)\r\n content.append('\\r\\n')\r\n content.append(fields[key])\r\n content.append('\\r\\n')\r\n\r\n for key in files:\r\n filename = files[key]['filename']\r\n value = files[key]['content']\r\n content.append('--' + BOUNDARY + '\\r\\n')\r\n content.append('Content-Disposition: form-data; name=\"%s\"; ' % key)\r\n content.append('filename=\"%s\"\\r\\n' % filename)\r\n content.append('\\r\\n')\r\n content.append(value)\r\n content.append('\\r\\n')\r\n\r\n content.append('--')\r\n content.append(BOUNDARY)\r\n content.append('--\\r\\n')\r\n content.append('\\r\\n')\r\n\r\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\r\n\r\n return content_type, ''.join(map(str, content))", "def multipart_stream(self, metadata, source_path):\n\n boundary = self.MULTIPART_BOUNDARY\n\n yield str.encode('--%s\\r\\nContent-Disposition: form-data; '\n 'name=\"metadata\"\\r\\n\\r\\n' % boundary +\n '%s\\r\\n' % json.dumps(metadata) +\n '--%s\\r\\n' % boundary)\n yield b'Content-Disposition: form-data; name=\"content\"; filename=\"i_love_backups\"\\r\\n'\n yield b'Content-Type: application/octet-stream\\r\\n\\r\\n'\n\n with source_path.open() as stream:\n while True:\n f = stream.read(DEFAULT_BUFFER_SIZE)\n if f:\n yield f\n else:\n break\n\n yield str.encode('\\r\\n--%s--\\r\\n' % boundary +\n 'multipart/form-data; boundary=%s' % boundary)", "def encode_multipart_formdata(fields, files):\r\n BOUNDARY = \"------8f8289fwur280hfoit9073u89428h\"\r\n CRLF = '\\r\\n'\r\n L = []\r\n for (key, value) in fields.items():\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\r\n L.append('')\r\n L.append(value)\r\n for (filename, content) in files.items():\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (filename, filename))\r\n L.append('Content-Type: %s' % get_content_type(filename))\r\n L.append('')\r\n L.append(content)\r\n L.append('--' + BOUNDARY + '--')\r\n L.append('')\r\n body = CRLF.join(L)\r\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\r\n return content_type, body", "def encode_multipart_formdata(fields, boundary=None):\n body = io.BytesIO()\n if boundary is None:\n boundary = choose_boundary()\n\n for fieldname, value in iter_fields(fields):\n body.write(b'--%s\\r\\n' % (boundary))\n\n if isinstance(value, tuple):\n filename, data = value\n writer(body).write('Content-Disposition: form-data; name=\"%s\"; '\n 'filename=\"%s\"\\r\\n' % (fieldname, filename))\n body.write(b'Content-Type: %s\\r\\n\\r\\n' %\n (get_content_type(filename)))\n else:\n data = value\n writer(body).write('Content-Disposition: form-data; name=\"%s\"\\r\\n'\n % (fieldname))\n body.write(b'Content-Type: text/plain\\r\\n\\r\\n')\n\n if isinstance(data, int):\n data = str(data) # Backwards compatibility\n\n if isinstance(data, unicode):\n writer(body).write(data)\n else:\n body.write(data)\n\n body.write(b'\\r\\n')\n\n body.write(b'--%s--\\r\\n' % (boundary))\n\n content_type = b'multipart/form-data; boundary=%s' % boundary\n\n return body.getvalue(), content_type", "def encode_multipart_formdata(fields, files):\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def encode_multipart_formdata(fields, files):\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def encode_multipart_formdata(fields, files):\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def encode_multipart_formdata(fields, files):\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def encode_multipart_formdata(fields):\n\tBOUNDARY = '---------------------------473995594142710163552326102'\n\tCRLF = '\\r\\n'\n\tL = []\n\tfor (key, filename, value) in fields:\n\t\tL.append('--' + BOUNDARY)\n\t\tif filename is None:\n\t\t\tL.append('Content-Disposition: form-data; name=\"%s\"' % key)\n\t\telse:\n\t\t\tL.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n\t\t\tL.append('Content-Type: %s' % get_content_type(filename))\n\t\tL.append('')\n\t\tL.append(value)\n\tL.append('--' + BOUNDARY + '--')\n\tL.append('')\n\tbody = CRLF.join(L)\n\tcontent_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n\treturn content_type, body", "def encode_multipart_formdata(fields, files):\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = b'\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = b''\n for l in L:\n if len(body) > 0:\n body = body + CRLF\n if isinstance(l, str):\n body = body + bytes(l, 'utf-8')\n else:\n body = body + l\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def call_with_multipart(self, path, fields=None, files=None):\r\n content_type, mr = self.encode_multipart_formdata(fields, files)\r\n return self.call_with(path, mr, content_type, mr.length())", "def EncodeMultipartFormData(fields, files):\r\n BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'\r\n CRLF = '\\r\\n'\r\n lines = []\r\n for (key, value) in fields:\r\n lines.append('--' + BOUNDARY)\r\n lines.append('Content-Disposition: form-data; name=\"%s\"' % key)\r\n lines.append('')\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n lines.append(value)\r\n for (key, filename, value) in files:\r\n lines.append('--' + BOUNDARY)\r\n lines.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\r\n (key, filename))\r\n lines.append('Content-Type: %s' % GetContentType(filename))\r\n lines.append('')\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n lines.append(value)\r\n lines.append('--' + BOUNDARY + '--')\r\n lines.append('')\r\n body = CRLF.join(lines)\r\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\r\n return content_type, body", "def build_body(self):\n # Build a list of lists, each containing \"lines\" of the\n # request. Each part is separated by a boundary string.\n # Once the list is built, return a string where each\n # line is separated by '\\r\\n'.\n parts = []\n part_boundary = '--' + self.boundary\n\n # Add the form fields\n parts.extend(\n [bytes(part_boundary.encode(self.charset)),\n bytes(('Content-Disposition: form-data; name=\"%s\"' % name).encode(self.charset))\n if PYTHON_VERSION_3 else ('Content-Disposition: form-data; name=\"%s\"' % name),\n bytes(('Content-Type: text/plain; charset=%s' % self.charset).encode(self.charset)),\n bytes(''.encode(self.charset)),\n bytes(value.encode(self.charset)) if PYTHON_VERSION_3 else value\n ]\n for name, value in self.form_fields\n )\n\n # Add the files to upload\n parts.extend(\n [bytes(part_boundary.encode(self.charset)),\n bytes(('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\n (field_name, filename)).encode(self.charset)) if PYTHON_VERSION_3 else\n ('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (field_name, filename)),\n bytes(('Content-Type: %s' % content_type).encode(self.charset)),\n bytes('Content-Transfer-Encoding: binary'.encode(self.charset)),\n bytes(''.encode(self.charset)),\n body,\n ]\n for field_name, filename, content_type, body in self.files\n )\n\n # Flatten the list and add closing boundary marker,\n # then return CR+LF separated data\n flattened = list(itertools.chain(*parts))\n flattened.append(bytes(('--' + self.boundary + '--').encode(self.charset)))\n flattened.append(bytes(''.encode(self.charset)))\n return bytes('\\r\\n'.encode(self.charset)).join(flattened)", "def EncodeMultipartFormData(fields, files):\n BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'\n CRLF = '\\r\\n'\n lines = []\n for (key, value) in fields:\n lines.append('--' + BOUNDARY)\n lines.append('Content-Disposition: form-data; name=\"%s\"' % key)\n lines.append('')\n if type(value) == unicode:\n value = value.encode(\"utf-8\")\n lines.append(value)\n for (key, filename, value) in files:\n if type(filename) == unicode:\n filename = filename.encode(\"utf-8\")\n if type(value) == unicode:\n value = value.encode(\"utf-8\")\n lines.append('--' + BOUNDARY)\n lines.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\n (key, filename))\n lines.append('Content-Type: %s' % GetContentType(filename))\n lines.append('')\n lines.append(value)\n lines.append('--' + BOUNDARY + '--')\n lines.append('')\n body = CRLF.join(lines)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def parse_multipart(request):\n\n # This code will process each non-file field in the form\n fields = {}\n data = request.form.to_dict()\n for field in data:\n fields[field] = data[field]\n print(\"Processed field: %s\" % field)\n\n # This code will process each file uploaded\n files = request.files.to_dict()\n for file_name, file in files.items():\n # Note: GCF may not keep files saved locally between invocations.\n # If you want to preserve the uploaded files, you should save them\n # to another location (such as a Cloud Storage bucket).\n file.save(get_file_path(file_name))\n print(\"Processed file: %s\" % file_name)\n\n # Clear temporary directory\n for file_name in files:\n file_path = get_file_path(file_name)\n os.remove(file_path)\n\n return \"Done!\"", "async def next(self) -> Optional[Union[MultipartReader, BodyPartReader]]:\n ...", "async def next(self) -> Optional[Union[MultipartReader, BodyPartReader]]:\n ...", "def _request_generator(request, data_handler):\n # First, the request header.\n yield data_handler.request_to_bytes(request)\n\n # Then, for the body. The body can be bytes or an iterator, but that's it.\n # The iterator is the more general case, so let's transform the bytes into\n # an iterator via my friend the list.\n if isinstance(request.body, bytes):\n body = [request.body]\n else:\n body = request.body\n\n for data_chunk in body:\n yield data_handler.body_chunk_to_bytes(data_chunk)\n\n yield data_handler.end_of_body()", "def iter_fields(self, named=None, **kwargs): # pylint: disable=W0613\n # Note: using 'with' here is better than making a shell copy\n if named is not None:\n for name in named:\n with self.fields[name] as f:\n yield f\n else:\n for fld in self.fields:\n with fld as f:\n yield f", "def get_multipart_request_body(query, variables, file, file_name):\n return {\n 'operations': json.dumps({'query': query, 'variables': variables}),\n 'map': json.dumps({file_name: ['variables.file']}), file_name: file}", "def __iter__(self):\n\n # TODO: handle chunked encoding delimited by marker instead\n # of content-length.\n\n while True:\n length = None\n #if line == (self._boundary+\"\\r\\n\").encode('ascii'):\n while True:\n l = self.response.fp.readline()\n # print(\"Chunk start\")\n # l = self.response.fp.readline()\n # l = self.response.fp.readline()\n # print(l)\n\n if l.startswith(b\"Content-Length:\"):\n length = int(l.split(b\" \")[1])\n # print(\"found length\", length)\n\n if length is not None and l== b\"\\r\\n\":\n break\n\n yield self.response.fp.read(length)\n \n # Look for an empty line, signifying the end of the headers." ]
[ "0.6703004", "0.6487768", "0.6286864", "0.62665135", "0.6258376", "0.6140374", "0.60724163", "0.6047501", "0.5997188", "0.5991683", "0.59880644", "0.59826344", "0.59681565", "0.5966974", "0.5966974", "0.5966974", "0.5966974", "0.5918873", "0.59077895", "0.5839803", "0.58249885", "0.5804433", "0.5785088", "0.5782284", "0.5681026", "0.5681026", "0.5583003", "0.54855067", "0.54605544", "0.54599565" ]
0.8044235
0
Makes a batched request against the Facebook Ads API endpoint.
def make_batch_request(self, batch): args = {} args['access_token'] = self.access_token args['batch'] = json.dumps(batch) args = {k.encode('utf-8'): unicode(v).encode('utf-8') for k, v in args.items()} logger.info('Making a batched request with %s' % args) try: f = urllib2.urlopen(self.api_root, urllib.urlencode(args)) data = json.load(f) # For debugging self.data = data for idx, val in enumerate(data): data[idx] = json.loads(val['body']) return data except urllib2.HTTPError as e: logger.info('%s' % e) return json.load(e) except urllib2.URLError as e: logger.warn('URLError: %s' % e.reason)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def run_requests(self):\n loop = asyncio.get_event_loop()\n tasks = []\n async with aiohttp.ClientSession(connector=self.connector) as session:\n\n for index, id in enumerate(self.ids):\n if id not in self.processed_ids:\n url = self.base_url + id\n auth_token = base64.b64encode(id.encode('ascii'))\n header = {\"Authorization\": auth_token.decode('UTF-8')}\n tasks.append(asyncio.ensure_future(self._request_one(url=url, header=header, id=id, index = index, session = session)))\n\n _ = await asyncio.gather(*tasks)", "def _dispatch_batches(self, base_url, endpoint, item_list, prep_args, dataset_id=None, dataset_version=None):\n pool = ThreadPool(processes=self.pool_size)\n batch = []\n\n # Decide which _prep function to use based on the endpoint\n if endpoint == 'import' or endpoint == 'import-events':\n prep_function = Mixpanel._prep_event_for_import\n elif endpoint == 'engage' or endpoint == 'import-people':\n prep_function = Mixpanel._prep_params_for_profile\n else:\n Mixpanel.LOGGER.warning(\n 'endpoint must be \"import\", \"engage\", \"import-events\" or \"import-people\", found: ' + str(endpoint))\n return\n\n if base_url == self.BETA_IMPORT_API:\n batch_size = 1000\n else:\n batch_size = 50\n\n for item in item_list:\n if prep_args is not None:\n # Insert the given item as the first argument to be passed to the _prep function determined above\n prep_args[0] = item\n params = prep_function(*prep_args)\n if params:\n batch.append(params)\n else:\n batch.append(item)\n\n if len(batch) == batch_size:\n # Add an asynchronous call to _send_batch to the thread pool\n pool.apply_async(self._send_batch, args=(base_url, endpoint, batch, dataset_id, dataset_version),\n callback=Mixpanel._response_handler_callback)\n batch = []\n\n # If there are fewer than batch_size updates left ensure one last call is made\n if len(batch):\n # Add an asynchronous call to _send_batch to the thread pool\n pool.apply_async(self._send_batch, args=(base_url, endpoint, batch, dataset_id, dataset_version),\n callback=Mixpanel._response_handler_callback)\n pool.close()\n pool.join()", "def batch(self):\n return self._client.batch()", "def batch_query(url, headers=None, timeout=299):\n\n offset = 0\n count = 0\n\n proxies = {\n 'http': ARGS.proxy_string,\n 'https': ARGS.proxy_string\n }\n\n options = {\n \"headers\": headers,\n \"verify\": False,\n \"timeout\": timeout,\n \"proxies\": proxies,\n \"params\": {}\n }\n\n while True: # do - while offset < count\n options[\"params\"][\"offset\"] = offset\n req = requests.get(url, **options)\n\n if not req.status_code == 200:\n errmsg = \"status_code: {0.status_code}: {0.content}\"\n raise UnknownResult(errmsg.format(req))\n\n res = req.json()\n data = res[\"data\"]\n count = res.get(\"count\", 0)\n\n yield from data\n\n offset += len(data)\n\n if offset >= count:\n break", "def _http_post(\n self, batched_event_list, validation_hit=False, postpone=False, date=None\n ):\n self._check_date_not_in_future(date)\n status_code = None # Default set to know if batch loop does not work and to bound status_code\n\n # set domain\n domain = self._base_domain\n if validation_hit is True:\n domain = self._validation_domain\n logger.info(f\"Sending POST to: {domain}\")\n\n # loop through events in batches of 25\n batch_number = 1\n for batch in batched_event_list:\n url = f\"{domain}?measurement_id={self.measurement_id}&api_secret={self.api_secret}\"\n request = {\"client_id\": self.client_id, \"events\": batch}\n self._add_user_props_to_hit(request)\n\n # make adjustments for postponed hit\n request[\"events\"] = (\n {\"name\": batch[\"name\"], \"params\": batch[\"params\"]}\n if (postpone)\n else batch\n )\n\n if date is not None:\n logger.info(f\"Setting event timestamp to: {date}\")\n assert (\n postpone is False\n ), \"Cannot send postponed historical hit, ensure postpone=False\"\n\n ts = self._datetime_to_timestamp(date)\n ts_micro = self._get_timestamp(ts)\n request[\"timestamp_micros\"] = int(ts_micro)\n logger.info(f\"Timestamp of request is: {request['timestamp_micros']}\")\n\n if postpone:\n # add timestamp to hit\n request[\"timestamp_micros\"] = batch[\"_timestamp_micros\"]\n\n req = urllib.request.Request(url)\n req.add_header(\"Content-Type\", \"application/json; charset=utf-8\")\n jsondata = json.dumps(request)\n json_data_as_bytes = jsondata.encode(\"utf-8\") # needs to be bytes\n req.add_header(\"Content-Length\", len(json_data_as_bytes))\n result = urllib.request.urlopen(req, json_data_as_bytes)\n\n status_code = result.status\n logger.info(f\"Batch Number: {batch_number}\")\n logger.info(f\"Status code: {status_code}\")\n batch_number += 1\n\n return status_code", "def ExecuteBatch(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def ExecuteBatch(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def BeginExecuteBatch(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _batch(self, batch_request_entries):\n necessary_keys = [\"id\", \"version\", \"method\", \"params\"]\n\n results = []\n\n for (idx, request) in enumerate(batch_request_entries):\n error = None\n result = None\n\n # assert presence of important details\n for necessary_key in necessary_keys:\n if not necessary_key in request.keys():\n raise FakeBitcoinProxyException(\"Missing necessary key {} for _batch request number {}\".format(necessary_key, idx))\n\n if isinstance(request[\"params\"], list):\n method = getattr(self, request[\"method\"])\n result = method(*request[\"params\"])\n else:\n # matches error message received through python-bitcoinrpc\n error = {\"message\": \"Params must be an array\", \"code\": -32600}\n\n results.append({\n \"error\": error,\n \"id\": request[\"id\"],\n \"result\": result,\n })\n\n return results", "def _batching_call(self, *args, **kw):\n b_start = kw.pop('b_start', None)\n b_size = kw.pop('b_size', None)\n results = list(self._original_call(*args, **kw))\n\n if b_start is None:\n return results\n\n if b_size is None:\n b_size = len(results)\n\n return results[b_start:b_start+b_size]", "def BeginExecuteBatch(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def Batch(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def get_locations(addresses):\n # Construct the URL to do the batch request\n query_string = urlencode({\"apiKey\": YOUR_API_KEY})\n url = f\"{GEOCODING_BATCH_API}?{query_string}\"\n\n # Build the JSON payload for the batch POST request\n data = json.dumps(addresses)\n\n # And use Content-Type: application/json in the headers\n headers = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\n\n # Make the POST request to the API\n async with aiohttp.ClientSession() as session:\n async with session.post(url, data=data, headers=headers) as response:\n response_json = await response.read()\n response_data = json.loads(response_json)\n\n # The API can return a dict with a pending status if it needs more\n # time to complete. Poll the API until the result is ready.\n while isinstance(response_data, dict) and response_data.get(\"status\") == \"pending\":\n # Wait a bit before calling the API\n await asyncio.sleep(0.1)\n\n # Query the result to see if it's ready yet\n request_id = response_data.get(\"id\")\n async with aiohttp.ClientSession() as session:\n async with session.get(url + f\"&id={request_id}\") as response:\n response_json = await response.read()\n response_data = json.loads(response_json)\n\n # Gather the results into a dictionary of address -> (lat, lon)\n locations = {}\n for result in response_data:\n address = result[\"query\"][\"text\"]\n coords = result[\"lat\"], result[\"lon\"]\n locations[address] = coords\n\n return locations", "async def send(data, config):\n headers = {'Content-Type': 'application/octet-stream'}\n timeout = aiohttp.ClientTimeout(total=config.TIMEOUT)\n try:\n async with aiohttp.ClientSession(timeout=timeout) as session:\n async with session.post(f\"http://{config.REST_API_URL}/batches\", data=data, headers=headers) as response:\n data = await response.read()\n except Exception as e:\n logging.error(\"Blockchain rest-api is unreachable, Please fix it dude\")\n raise ApiInternalError(\"Blockchain rest-api is unreachable, Please fix it dude\")\n return data", "def fetchBatchAccounts(config, start, limit): \n config['params']['from'] = start\n config['params']['limit'] = limit\n url = config['domain']\n r = requests.get(url, headers=config['headers'], params=config['params']).json()\n print(\"Downloading From: \", config['params']['from'], ' To: ', config['params']['from'] + config['params']['limit'], '| Limit: ', config['params']['limit'])\n return r", "def get_batches(auth, base_url='https://api.cratejoy.com/v1/'):\n \n batch_endpoint = '{}shipment_batches/'.format(base_url)\n\n resp = requests.get(\n batch_endpoint,\n auth=auth\n )\n\n print('GET request to {} responded with status '\n 'code: {}'.format(batch_endpoint,\n resp.status_code))\n print(resp.content)", "def ExecuteBatchQueue(self):\n\t\tself.client.ExecuteBatch(self.batch_queue, 'https://www.google.com/m8/feeds/contacts/default/full/batch')\n\t\tself.ClearBatchQueue();", "def _send_batch(self, base_url, endpoint, batch, dataset_id=None, dataset_version=None, retries=0):\n try:\n params = {'data': base64.b64encode(json.dumps(batch).encode()).decode()}\n if dataset_id:\n params['dataset_id'] = dataset_id\n params['token'] = self.token\n if dataset_version:\n params['dataset_version'] = dataset_version\n response = self.request(base_url, [endpoint], params, 'POST')\n msg = \"Sent \" + str(len(batch)) + \" items on \" + time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"!\"\n Mixpanel.LOGGER.debug(msg)\n return response\n except BaseException as be:\n Mixpanel.LOGGER.debug('Exception in _send_batch')\n Mixpanel.LOGGER.debug(be)\n Mixpanel.LOGGER.warning(\"Failed to import batch, dumping to file import_backup.txt\")\n with open('import_backup.txt', 'a+') as backup:\n json.dump(batch, backup)\n backup.write('\\n')", "def _batchRequest(self, updateEntry, deleteEntry):\n\n print 'Executing batch request to insert, update and delete entries.'\n # feed that holds all the batch rquest entries\n request_feed = gdata.calendar.data.CalendarEventFeed()\n\n # creating an event entry to insert\n insertEntry = gdata.calendar.data.CalendarEventEntry()\n insertEntry.title = atom.data.Title(text='Python: batch insert')\n insertEntry.content = atom.data.Content(text='my content')\n start_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime())\n end_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',\n time.gmtime(time.time() + 3600))\n insertEntry.when.append(gdata.calendar.data.When(start=start_time,\n end=end_time))\n insertEntry.batch_id = gdata.data.BatchId(text='insert-request')\n\n # add the insert entry to the batch feed\n request_feed.AddInsert(entry=insertEntry)\n\n if updateEntry:\n updateEntry.batch_id = gdata.data.BatchId(text='update-request')\n updateEntry.title = atom.data.Title(text='Python: batch update')\n # add the update entry to the batch feed\n request_feed.AddUpdate(entry=updateEntry)\n\n if deleteEntry:\n deleteEntry.batch_id = gdata.data.BatchId(text='delete-request')\n # add the delete entry to the batch feed\n request_feed.AddDelete(entry=deleteEntry)\n\n # submit the batch request to the server\n response_feed = self.cal_client.ExecuteBatch(request_feed, gdata.calendar.client.DEFAULT_BATCH_URL)\n\n # iterate the response feed to get the operation status\n for entry in response_feed.entry:\n print '\\tbatch id: %s' % (entry.batch_id.text,)\n print '\\tstatus: %s' % (entry.batch_status.code,)\n print '\\treason: %s' % (entry.batch_status.reason,)\n if entry.batch_id.text == 'insert-request':\n insertEntry = entry\n elif entry.batch_id.text == 'update-request':\n updateEntry = entry\n\n return (insertEntry, updateEntry)", "def _batch_request(self, jobs):\n return generate_batch_request(jobs, self._batch_request_size)", "def batch(self, request):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"POST\", \"/1/indexes/%s/batch\" % self.url_index_name, self.client.timeout, request)", "def batch(self, reqs):\n return self.connection.batch_(reqs)", "def get_activities_response(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0,\n etag=None, min_id=None, cache=None,\n fetch_replies=False, fetch_likes=False,\n fetch_shares=False, fetch_events=False):\n if activity_id:\n # Sometimes Facebook requires post ids in USERID_POSTID format; sometimes\n # it doesn't accept that format. I can't tell which is which yet, so try\n # them all.\n ids_to_try = [activity_id]\n if '_' in activity_id:\n user_id_prefix, activity_id = activity_id.split('_', 1)\n ids_to_try.insert(0, activity_id)\n if user_id:\n ids_to_try.append('%s_%s' % (user_id, activity_id))\n\n for id in ids_to_try:\n try:\n posts = [json.loads(self.urlopen(API_OBJECT_URL % id).read())]\n break\n except urllib2.URLError, e:\n logging.warning(\"Couldn't fetch object %s: %s\", id, e)\n else:\n posts = []\n\n if posts == [False]: # FB returns false for \"not found\"\n posts = []\n\n else:\n url = API_SELF_POSTS_URL if group_id == source.SELF else API_HOME_URL\n url = url % (user_id if user_id else 'me', start_index)\n if count:\n url = util.add_query_params(url, {'limit': count})\n headers = {'If-None-Match': etag} if etag else {}\n try:\n resp = self.urlopen(url, headers=headers)\n etag = resp.info().get('ETag')\n posts = json.loads(resp.read()).get('data', [])\n except urllib2.HTTPError, e:\n if e.code == 304: # Not Modified, from a matching ETag\n posts = []\n else:\n raise\n\n activities = [self.post_to_activity(p) for p in posts]\n response = self._make_activities_base_response(activities)\n response['etag'] = etag\n return response", "def start_requests(self):\n authors_pandas = conf.read_from_data('authors.json')\n author_link_list = list(\n map(lambda obj: (obj['keyUrl'], conf.gd_base_url + obj['article_url'], obj['article_url']),\n authors_pandas))\n for link in author_link_list:\n yield Request(url=link[1])", "async def _get_data(self):\n coros = []\n results = []\n for series_ids in self.series_ids:\n response = self._post(data={\"series_id\": series_ids})\n coros.append(response)\n if len(coros) == 5: # throttle at 5\n _ = await asyncio.gather(*coros)\n results.extend(_)\n coros = [] # Reset accumulator\n if coros:\n results.extend(await asyncio.gather(*coros))\n\n return filter(None, results)", "def download_data():\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n FacebookAdsApi.init(config.app_id(),\n config.app_secret(),\n config.access_token())\n ad_accounts = _get_ad_accounts()\n target_accounts = list(filter(None, config.target_accounts().split(',')))\n if len(target_accounts) > 0:\n logging.info('the app can see %s accounts but the configuration specified only %s target accounts: %s', len(ad_accounts), len(target_accounts), ', '.join(target_accounts))\n ad_accounts = [ad_account for ad_account in ad_accounts if ad_account['account_id'] in config.target_accounts()]\n logging.info('after filtering %s accounts will be downloaded: %s', len(target_accounts), ', '.join(target_accounts))\n download_data_sets(ad_accounts)", "def getDataBatch(self, batch_size):\n for i in range(batch_size):\n params.offset = params.offset+i #increment by 1 for the next set of batch\n url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json'\n url_params = {'q': self.args.query.replace(' ', '+'),'api-key': self.args.api_key,'page': params.offset}\n response = requests.get(url, params=url_params)\n r = response.json()\n\n #start by checking call was successful\n if response.ok:\n if r['status'] != 'OK':\n log.error(\"Error with API call, NYT status not ok\")\n return None\n\n # TODO: implement - this dummy implementation returns one batch of data\n list_of_art = []\n for art in r['response']['docs']:\n list_of_art.append(functions.flatten_json(art)) #attach to list returned in call\n yield list_of_art\n else:\n log.error(\"Error during API call on request side\")", "def _do_request(self, endpoint, params=None):\n\n resp = requests.get(\n urljoin(API_BASE_URL, endpoint), headers=self.headers, params=params\n )\n print(urljoin(API_BASE_URL, endpoint))\n if resp.status_code == 404:\n return []\n if resp.status_code == 429:\n period_remaining = int(\n re.match(r\"\\D*(\\d+)\\D*\", resp.json()[\"message\"]).group(1)\n )\n raise RateLimitException(\n message=resp.json()[\"message\"], period_remaining=period_remaining\n )\n resp.raise_for_status()\n return resp.json()", "def get_events_batch() -> PayloadDictList:\n ...", "def _get_batch(self):\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n response = apiCall.get(self._get_token(), url,self._proxy, {}, 10)\n logging.debug(response)\n return response" ]
[ "0.60785455", "0.58264667", "0.5737969", "0.5705558", "0.5633526", "0.5589942", "0.55709636", "0.555063", "0.54238087", "0.5399167", "0.53891563", "0.53837866", "0.53830326", "0.53726614", "0.5364764", "0.5341414", "0.5318793", "0.5305902", "0.52968866", "0.5242328", "0.5241959", "0.5237799", "0.5237119", "0.5211229", "0.5197889", "0.51772743", "0.5167802", "0.51583636", "0.515648", "0.514099" ]
0.6579774
0
Makes a batched request with label against the Facebook Ads API.
def make_labeled_batch_request(self, batch): try: labels = batch.keys() queries = batch.values() data = self.make_batch_request(queries) # For debugging self.data = data return dict(zip(labels, data)) except urllib2.HTTPError as e: print '%s' % e return json.load(e) except urllib2.URLError as e: print 'URLError: %s' % e.reason
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_batch_request(self, batch):\n args = {}\n args['access_token'] = self.access_token\n args['batch'] = json.dumps(batch)\n args = {k.encode('utf-8'): unicode(v).encode('utf-8')\n for k, v in args.items()}\n logger.info('Making a batched request with %s' % args)\n try:\n f = urllib2.urlopen(self.api_root, urllib.urlencode(args))\n data = json.load(f)\n # For debugging\n self.data = data\n for idx, val in enumerate(data):\n data[idx] = json.loads(val['body'])\n return data\n except urllib2.HTTPError as e:\n logger.info('%s' % e)\n return json.load(e)\n except urllib2.URLError as e:\n logger.warn('URLError: %s' % e.reason)", "def Batch(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def post(self):\n url = self.request.get('url')\n try:\n response = urlfetch.fetch(url)\n if response.status_code == 200:\n items = simplejson.loads(response.content)\n key = Batch(pickled_items=pickle.dumps(items)).put()\n if key:\n taskqueue.Task(\n url='/tasks/etl',\n params={'batch_id': key.id()}\n ).add('etl')\n else:\n logging.info(\"Fetch failed, got response %d\" % response.status_code)\n except urlfetch_errors.DownloadError, e:\n logging.info(\"Twitter responded too slowly. %s\" % e.message)", "def ExecuteBatch(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _batch(self, batch_request_entries):\n necessary_keys = [\"id\", \"version\", \"method\", \"params\"]\n\n results = []\n\n for (idx, request) in enumerate(batch_request_entries):\n error = None\n result = None\n\n # assert presence of important details\n for necessary_key in necessary_keys:\n if not necessary_key in request.keys():\n raise FakeBitcoinProxyException(\"Missing necessary key {} for _batch request number {}\".format(necessary_key, idx))\n\n if isinstance(request[\"params\"], list):\n method = getattr(self, request[\"method\"])\n result = method(*request[\"params\"])\n else:\n # matches error message received through python-bitcoinrpc\n error = {\"message\": \"Params must be an array\", \"code\": -32600}\n\n results.append({\n \"error\": error,\n \"id\": request[\"id\"],\n \"result\": result,\n })\n\n return results", "def get_events_batch() -> PayloadDictList:\n ...", "def ExecuteBatch(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def _batching_call(self, *args, **kw):\n b_start = kw.pop('b_start', None)\n b_size = kw.pop('b_size', None)\n results = list(self._original_call(*args, **kw))\n\n if b_start is None:\n return results\n\n if b_size is None:\n b_size = len(results)\n\n return results[b_start:b_start+b_size]", "def BeginExecuteBatch(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def make_foursquare_requests(self, tids, client, limitor):\n if len(tids) == 0:\n return\n failed = lambda x: isinstance(x, foursquare.FoursquareException) or \\\n 'checkin' not in x or 'venue' not in x['checkin']\n go, waiting = limitor.more_allowed(client)\n if not go:\n time.sleep(waiting + 3)\n print('do batch')\n try:\n answers = [r['checkin']['venue']['id']\n for r in client.multi() if not failed(r)]\n for tid, lid in zip(tids, answers):\n self.queue.put((tid, lid))\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n print(sys.exc_info()[1])\n finally:\n del tids[:]", "def SampleBatch(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def get_locations(addresses):\n # Construct the URL to do the batch request\n query_string = urlencode({\"apiKey\": YOUR_API_KEY})\n url = f\"{GEOCODING_BATCH_API}?{query_string}\"\n\n # Build the JSON payload for the batch POST request\n data = json.dumps(addresses)\n\n # And use Content-Type: application/json in the headers\n headers = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\n\n # Make the POST request to the API\n async with aiohttp.ClientSession() as session:\n async with session.post(url, data=data, headers=headers) as response:\n response_json = await response.read()\n response_data = json.loads(response_json)\n\n # The API can return a dict with a pending status if it needs more\n # time to complete. Poll the API until the result is ready.\n while isinstance(response_data, dict) and response_data.get(\"status\") == \"pending\":\n # Wait a bit before calling the API\n await asyncio.sleep(0.1)\n\n # Query the result to see if it's ready yet\n request_id = response_data.get(\"id\")\n async with aiohttp.ClientSession() as session:\n async with session.get(url + f\"&id={request_id}\") as response:\n response_json = await response.read()\n response_data = json.loads(response_json)\n\n # Gather the results into a dictionary of address -> (lat, lon)\n locations = {}\n for result in response_data:\n address = result[\"query\"][\"text\"]\n coords = result[\"lat\"], result[\"lon\"]\n locations[address] = coords\n\n return locations", "def _apply_label(self, label):\n data = {\n \"name\" : label.title,\n \"description\" : label.desc,\n \"color\" : label.color\n }\n resp = self._post(\n self._base + \"/labels\", data=self._format_data(data))", "def RunBatchJob(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _http_post(\n self, batched_event_list, validation_hit=False, postpone=False, date=None\n ):\n self._check_date_not_in_future(date)\n status_code = None # Default set to know if batch loop does not work and to bound status_code\n\n # set domain\n domain = self._base_domain\n if validation_hit is True:\n domain = self._validation_domain\n logger.info(f\"Sending POST to: {domain}\")\n\n # loop through events in batches of 25\n batch_number = 1\n for batch in batched_event_list:\n url = f\"{domain}?measurement_id={self.measurement_id}&api_secret={self.api_secret}\"\n request = {\"client_id\": self.client_id, \"events\": batch}\n self._add_user_props_to_hit(request)\n\n # make adjustments for postponed hit\n request[\"events\"] = (\n {\"name\": batch[\"name\"], \"params\": batch[\"params\"]}\n if (postpone)\n else batch\n )\n\n if date is not None:\n logger.info(f\"Setting event timestamp to: {date}\")\n assert (\n postpone is False\n ), \"Cannot send postponed historical hit, ensure postpone=False\"\n\n ts = self._datetime_to_timestamp(date)\n ts_micro = self._get_timestamp(ts)\n request[\"timestamp_micros\"] = int(ts_micro)\n logger.info(f\"Timestamp of request is: {request['timestamp_micros']}\")\n\n if postpone:\n # add timestamp to hit\n request[\"timestamp_micros\"] = batch[\"_timestamp_micros\"]\n\n req = urllib.request.Request(url)\n req.add_header(\"Content-Type\", \"application/json; charset=utf-8\")\n jsondata = json.dumps(request)\n json_data_as_bytes = jsondata.encode(\"utf-8\") # needs to be bytes\n req.add_header(\"Content-Length\", len(json_data_as_bytes))\n result = urllib.request.urlopen(req, json_data_as_bytes)\n\n status_code = result.status\n logger.info(f\"Batch Number: {batch_number}\")\n logger.info(f\"Status code: {status_code}\")\n batch_number += 1\n\n return status_code", "def BatchAnnotateImages(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def batch(self, request):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"POST\", \"/1/indexes/%s/batch\" % self.url_index_name, self.client.timeout, request)", "def put_labels():\n dao.delete_all_labels()\n for label in request.json:\n if 'id' not in label or not label['id']:\n label['id'] = str(uuid.uuid4())\n dao.set_label(id=label['id'],\n name=label['name'],\n fields=label['fields'])\n return if_found(dao.get_labels())", "def BatchAnnotateImages(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def batch(self):\n return self._client.batch()", "def BeginExecuteBatch(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def call(self):\n for _ in range(len(self._shared_list)):\n # label_kwargs are the counter.labels(...)\n # func_call is the call to be made after the labels e.g. inc for increase\n # func_call_args are the args for the func call, e.g. 1 to\n (label_kwargs, (func_call, *func_call_args)) = self._shared_list.pop(0)\n\n # the below is the same as: `counter.labels(x=1, y=2, z=3).inc(2)`\n self.counter.labels(**label_kwargs).__getattribute__(func_call)(*func_call_args)", "def run(self, batch):\n response = self.post(batch)\n log.info(\"< Discarding batch response\")\n response.close()", "def request_datasets(\n self, requests_batch: Optional[Any] = None, unique_classes: bool = True\n ) -> Tuple[Tuple[DatasetRequest], FeedList]:", "def labels(self, request, *args, **kwargs):\n http_status = status.HTTP_400_BAD_REQUEST\n # pylint: disable=attribute-defined-outside-init\n self.object = instance = self.get_object()\n\n if request.method == \"POST\":\n add_tags_to_instance(request, instance)\n http_status = status.HTTP_201_CREATED\n\n tags = instance.tags\n label = kwargs.get(\"label\")\n\n if request.method == \"GET\" and label:\n data = [tag[\"name\"] for tag in tags.filter(name=label).values(\"name\")]\n\n elif request.method == \"DELETE\" and label:\n count = tags.count()\n tags.remove(label)\n\n # Accepted, label does not exist hence nothing removed\n http_status = (\n status.HTTP_200_OK\n if count > tags.count()\n else status.HTTP_404_NOT_FOUND\n )\n\n data = list(tags.names())\n else:\n data = list(tags.names())\n\n if request.method == \"GET\":\n http_status = status.HTTP_200_OK\n\n setattr(self, \"etag_data\", data)\n\n return Response(data, status=http_status)", "def _batchRequest(self, updateEntry, deleteEntry):\n\n print 'Executing batch request to insert, update and delete entries.'\n # feed that holds all the batch rquest entries\n request_feed = gdata.calendar.data.CalendarEventFeed()\n\n # creating an event entry to insert\n insertEntry = gdata.calendar.data.CalendarEventEntry()\n insertEntry.title = atom.data.Title(text='Python: batch insert')\n insertEntry.content = atom.data.Content(text='my content')\n start_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime())\n end_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',\n time.gmtime(time.time() + 3600))\n insertEntry.when.append(gdata.calendar.data.When(start=start_time,\n end=end_time))\n insertEntry.batch_id = gdata.data.BatchId(text='insert-request')\n\n # add the insert entry to the batch feed\n request_feed.AddInsert(entry=insertEntry)\n\n if updateEntry:\n updateEntry.batch_id = gdata.data.BatchId(text='update-request')\n updateEntry.title = atom.data.Title(text='Python: batch update')\n # add the update entry to the batch feed\n request_feed.AddUpdate(entry=updateEntry)\n\n if deleteEntry:\n deleteEntry.batch_id = gdata.data.BatchId(text='delete-request')\n # add the delete entry to the batch feed\n request_feed.AddDelete(entry=deleteEntry)\n\n # submit the batch request to the server\n response_feed = self.cal_client.ExecuteBatch(request_feed, gdata.calendar.client.DEFAULT_BATCH_URL)\n\n # iterate the response feed to get the operation status\n for entry in response_feed.entry:\n print '\\tbatch id: %s' % (entry.batch_id.text,)\n print '\\tstatus: %s' % (entry.batch_status.code,)\n print '\\treason: %s' % (entry.batch_status.reason,)\n if entry.batch_id.text == 'insert-request':\n insertEntry = entry\n elif entry.batch_id.text == 'update-request':\n updateEntry = entry\n\n return (insertEntry, updateEntry)", "def ExecuteBatch(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ExecuteBatch(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def request_api(\r\n student_comments: pd.Series, \r\n url: str, \r\n chunk_size: int = 50\r\n ) -> pd.Series:\r\n \r\n for i, chunk in enumerate(chunks(student_comments, chunk_size)):\r\n print(f'Processing batch {i} of size {len(chunk)}')\r\n \r\n response = chunk.apply(lambda x: requests.get(f'{url}&q={x}') if x is not None else None)\r\n response.to_pickle(Path.cwd().joinpath('OUTPUT').joinpath(f'luis_result_{str(i).zfill(4)}'))", "def search(self):\n\n if (self.latitude is None or self.longitude is None):\n raise Exception('Please specify both a latitude and longitude')\n\n if (self.access_token == '' or self.access_token is None):\n raise Exception('Please specify a valid access token')\n\n # Book-keeping\n id_limit = 50 # Only 50 per /?ids= call allowed by FB\n curr_time = int(round(time.time()))\n venues_count = 0\n events_count = 0\n\n # Initial places request info\n place_params = {\n 'type': 'place',\n 'q': self.query,\n 'center': str(self.latitude) + ',' + str(self.longitude),\n 'distance': self.distance,\n 'limit': 1000,\n 'fields': 'id',\n 'access_token': self.access_token\n }\n place_url = ('https://graph.facebook.com/' + self.version + '/search?' +\n urllib.urlencode(place_params))\n\n # Grab places and prepare to get events\n\n places_data = r.get(place_url).json()['data']\n venues_count = len(places_data)\n\n # Batch places based on FB id_limit\n ids = []\n temp_lst = []\n for place in places_data:\n temp_lst.append(place['id'])\n if len(temp_lst) >= id_limit:\n ids.append(temp_lst)\n temp_lst = []\n if len(ids) == 0:\n ids.append(temp_lst)\n\n # Inner function to convert a list of\n # ids to a request url for events\n def ids_to_url(id_lst):\n events_fields = [\n 'id',\n 'type',\n 'name',\n 'cover.fields(id,source)',\n 'picture.type(large)',\n 'description',\n 'start_time',\n 'end_time',\n 'category',\n 'attending_count',\n 'declined_count',\n 'maybe_count',\n 'noreply_count'\n ]\n\n fields = [\n 'id',\n 'name',\n 'about',\n 'emails',\n 'cover.fields(id,source)',\n 'picture.type(large)',\n 'location',\n 'events.fields(' + ','.join(events_fields) + ')'\n ]\n\n timing = ('.since(' + str(self.since) + ')' +\n ('' if self.until is None else '.until(' + str(self.until) + ')'))\n\n events_params = {\n 'ids': ','.join(id_lst),\n 'access_token': self.access_token,\n 'fields': ','.join(fields) + timing\n }\n\n events_url = ('https://graph.facebook.com/' + self.version + '/?' +\n urllib.urlencode(events_params))\n\n return r.get(events_url).json()\n\n # Event results\n results = [ids_to_url(id_lst) for id_lst in ids]\n\n # Inner function to convert a list of\n # of venue result events to a list of\n # well-formatted events\n def venue_to_events(venue):\n venue_events = []\n if 'events' in venue and len(venue['events']['data']) > 0:\n for event in venue['events']['data']:\n event_r = dict()\n event_r['id'] = event['id']\n event_r['name'] = event['name']\n event_r['type'] = event['type']\n event_r['cover_picture'] = event['cover']['source'] if 'cover' in event else None\n event_r['profile_picture'] = event['picture']['data']['url'] if 'picture' in event else None\n event_r['description'] = event['description'] if 'description' in event else None\n event_r['start_time'] = event['start_time'] if 'start_time' in event else None\n event_r['end_time'] = event['end_time'] if 'end_time' in event else None\n event_r['time_from_now'] = self.calculate_start_time_diff(curr_time, event['start_time'])\n event_r['category'] = event['category'] if 'category' in event else None\n event_r['distance'] = (self.haversine_distance([venue['location']['latitude'],\n venue['location']['longitude']],\n [self.latitude, self.longitude]) * 1000\n if 'location' in venue else None)\n\n event_r['stats'] = {\n 'attending': event['attending_count'],\n 'declined': event['declined_count'],\n 'maybe': event['maybe_count'],\n 'noreply': event['noreply_count']\n }\n\n event_r['venue'] = {\n 'id': venue['id'],\n 'name': venue['name'],\n 'about': venue['about'] if 'about' in venue else None,\n 'emails': venue['emails'] if 'emails' in venue else None,\n 'cover_picture': venue['cover']['source'] if 'cover' in venue else None,\n 'profile_picture': venue['picture']['data']['url'] if 'picture' in venue else None,\n 'location': venue['location'] if 'location' in venue else None\n }\n\n venue_events.append(event_r)\n return venue_events\n\n # Grab the events\n events = []\n for result in results:\n for venue_id in result.keys():\n events.extend(venue_to_events(result[venue_id]))\n events_count = len(events)\n\n # Sort if specified\n if self.sort is not None:\n events.sort(self.allowed_sorts[self.sort])\n\n # Return events w/metadata\n return {\n 'events': events,\n 'metadata': { 'venues': venues_count, 'events': events_count }\n }" ]
[ "0.60108954", "0.52968585", "0.51935077", "0.51130617", "0.49954018", "0.49781558", "0.49417585", "0.49284583", "0.49239787", "0.49023873", "0.48961797", "0.48732227", "0.4869587", "0.48607367", "0.4852175", "0.48043475", "0.47995743", "0.47840145", "0.47726312", "0.4725853", "0.4721541", "0.47191396", "0.4715708", "0.4712265", "0.47118092", "0.47018257", "0.46974567", "0.46974567", "0.46859547", "0.46734008" ]
0.6057076
0
Returns debug information about the given token.
def debug_token(self, token): path = 'debug_token' args = { 'input_token': token, 'access_token': '%s|%s' % (self.app_id, self.app_secret) } return self.make_request(path, 'GET', args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_device_token_info(self, device_token):\n url = DEVICE_TOKEN_URL + device_token\n status, response = self._request('GET', None, url)\n if status == 404:\n return None\n elif status != 200:\n raise AirshipFailure(status, response)\n return json.loads(response)", "def print_token(self):\n\n log.success(\"Your token : [{}]\".format(self.get_token()))", "def debug_access_token(self, token, app_id, app_secret):\n args = {\n \"input_token\": token,\n \"access_token\": \"{0}|{1}\".format(app_id, app_secret),\n }\n return self.request(self.version + \"/\" + \"debug_token\", args=args)", "def token(self):\n return self[\"token\"]", "def debug(self, auth_token, message):\n\n self._log(auth_token, logging.DEBUG, message)", "def get_info(self, token):\n\n openid_resp = get_remote(get_config(\"login.qq.openid_url\") + token)\n self.log.debug(\"get access_token from qq:\" + token)\n info = json.loads(openid_resp[10:-4])\n\n if info.get(\"error\") is not None:\n raise Exception(info)\n\n return info", "def get_token_info_remote(self, token_info_url):", "def print_token(self, token_node_index):\n err_msg = \"The given node is not a token node.\"\n assert isinstance(self.nodes[token_node_index], TokenNode), err_msg\n onset = self.nodes[token_node_index].onset\n offset = self.nodes[token_node_index].offset\n return self.text[onset:offset]", "def _GetDebugTokens(self, cli):\n return [(token.Token.Text, c + ' ') for c in cli.debug.contents()]", "def output_debug_info(self):", "def inspect(self) -> TokenInspection:\n return TokenInspection._load(self._get(\"/api/v1/token/inspect\").json())", "def DebugInfo( self, request_data ):\n pass", "def token(self):\r\n return self._token", "def token(self) -> str:", "def token(self):\n print(\"getter of token called\")\n return self._token", "def _pretty_print_token(self, token):\n INLINE = 0\n BOL = 1\n extended_print = ('ID', 'INT', 'FLOAT', 'STRING')\n next_line_tokens = ('NEWLINE', 'INDENT', 'DEDENT')\n\n if self.printer_state == BOL:\n self.printer_state = INLINE\n\n print(str(token.lineno) + self.level * \" \", end=' ')\n\n if token is None:\n pass\n elif token.type in next_line_tokens:\n if token.type == \"INDENT\":\n self.level += 1\n elif token.type == \"DEDENT\":\n self.level -= 1\n\n print(token.type + '\\n', end=' ')\n self.printer_state = BOL\n elif token.type in extended_print:\n print('(' + token.type + ', ' + str(token.value) + ')', end=' ')\n else:\n print(token.type, end=' ')", "def token(self):\n token = self.lex.token()\n if token is not None:\n print(token)\n return token", "def debug(node):\n print \"%r\" % node", "def token(self):\n return self._token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def LookupToken(self, dmtoken):\n self.ReadClientStateFile()\n return self._registered_tokens.get(dmtoken, None)", "def __str__(self):\n return self.token", "def get_debug():\n return _DEBUG", "def __debugInfo(self, msg):\n\t\tif self.verbosity:\n\t\t\tprint(stylize(\"[*] DEBUG: {}\".format(msg), colored.fg(\"wheat_1\")))", "def debug():", "def dump_token_info(w):\r\n out = []\r\n lexer.input(w)\r\n while True:\r\n tok = lexer.token()\r\n if not tok: break\r\n out.append(tok)\r\n return out", "def debug(self, input):\n # Pass the debug information that you may think is important for your\n # evaluators\n debug_info = 'debug info'\n return debug_info", "def debug(self, input):\n # Pass the debug information that you may think is important for your\n # evaluators\n debug_info = 'debug info'\n return debug_info", "def token(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"token\")" ]
[ "0.61889523", "0.6077173", "0.60240054", "0.59475166", "0.5887552", "0.5765328", "0.571191", "0.5630604", "0.5609732", "0.55547845", "0.5550004", "0.55489063", "0.5496969", "0.54789287", "0.5475456", "0.5461978", "0.54539835", "0.5444571", "0.5423897", "0.5423897", "0.5423897", "0.53990394", "0.53988063", "0.5393059", "0.538118", "0.53694046", "0.53608173", "0.5354884", "0.5354884", "0.5347603" ]
0.75205034
0
Returns the users of the given ad account.
def get_adusers(self, account_id, batch=False): path = 'act_%s/users' % account_id return self.make_request(path, 'GET', batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_users(self, account_name=None, account_id=None, path=None,\n user_name=None, user_id=None, search=False ):\n userlist=[]\n accounts = self.get_all_accounts(account_id=account_id, account_name=account_name,\n search=search)\n for account in accounts:\n #if account['account_id'] == self.account_id:\n # access =self.get_users_from_account()\n #else:\n if account.get('account_id') == self.eucarc.account_id:\n delegate_account = None\n else:\n delegate_account = account['account_name']\n users = self.get_users_from_account(path=path,\n user_name=user_name,\n user_id=user_id,\n delegate_account=delegate_account,\n search=search)\n for user in users:\n user['account_name']=account['account_name']\n user['account_id']=account['account_id']\n userlist.append(user)\n return userlist", "def users(self, site = None):\r\n uids = self.user_ids()\r\n if uids:\r\n users = Account._byID(uids, True, return_dict = False)\r\n return [self.ajax_user(u) for u in users]\r\n else:\r\n return ()", "def get_all_users():", "def get_users_from_account(self, path=None, user_name=None, user_id=None,\n delegate_account=None, search=False):\n self.log.debug('Attempting to fetch all access matching- user_id:' +\n str(user_id) + ' user_name:' + str(user_name) + \" acct_name:\" +\n str(delegate_account))\n retlist = []\n params = {}\n if search:\n re_meth = re.search\n else:\n re_meth = re.match\n if delegate_account:\n params['DelegateAccount'] = delegate_account \n response = self.get_response_items('ListUsers', params, item_marker='users',\n list_marker='Users')\n for user in response:\n if path is not None and not re_meth(path, user['path']):\n continue\n if user_name is not None and not re_meth(user_name, user['user_name']):\n continue\n if user_id is not None and not re_meth(user_id, user['user_id']):\n continue\n retlist.append(user)\n return retlist", "def get_users(self):\n fields = ['name', ]\n return self.get_data(\"myUsers\", fields)", "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def get_users(self):\n users = []\n page = 1\n while not len(users) % 100:\n users += self._get('/users?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not users:\n break\n page += 1\n return users", "def get_accounts(self):\n me = objects.AdUser(fbid=\"me\")\n my_accounts = list(me.get_ad_accounts(fields=[\n 'id',\n 'name',\n 'timezone_name',\n 'amount_spent',\n 'currency']))\n return my_accounts", "def get_users():\n return db.fetch_users()", "def users(self,org_id=None):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))", "def get_users(self):\n res = self.conn.cursor().execute('SELECT id,email,username FROM users')\n return res.fetchall()", "def get_user_list():\r\n session = tables.get_session()\r\n if session is None:\r\n return {'success': False, 'reason': 'failed'}\r\n try:\r\n user_account = UserAccount()\r\n user_account.find_all_user(session)\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Get user details failed: %s', err)\r\n return {'success': False, 'reason': 'failed'}\r\n finally:\r\n session.close()\r\n return {'success': True}", "def listUsers(self):\n return self._client.listUsers()", "def get_users(self):\n return self.execute(TABELLE['users']['select']['all'])", "def get_users(self, query_args={}):\n endpoint = '/v3/educator/users'\n result = self.request(endpoint, query_args)\n\n users = []\n for data in result.response:\n user = User(data)\n users.append(user)\n\n return users", "def get_ldap_users(conn, searchfilter, attrs):\n\n base_dn = conn.server.info.other['DefaultNamingContext'][0]\n conn.search(search_base=base_dn, search_filter=searchfilter, attributes=attrs)\n return conn.entries", "def get_all_users(db):\n return list(db['user'].find())", "def get_users(self):\n return self.get_all_dbusers()", "def getUsers(self) -> List[bbUser.bbUser]:\n return list(self.users.values())", "def get_users(filter, api_site_parameter, page = 1, pagesize = 30, sort = 'reputation'):\n path = \"users\"\n results = __fetch_results(path, api_site_parameter, inname= filter, page = page, pagesize = pagesize, sort = sort)\n return results", "def get_users(self, *args, **kwargs):\n\n users_data = api.get_users(\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return [en.User(creds=self.__creds, **user_data) for user_data in users_data]", "def list_users(BrokerId=None, MaxResults=None, NextToken=None):\n pass", "def get_user_list():\n users_tuple = db_session.query(Chat.chatID).all()\n users_list = [user for user, in users_tuple]\n return users_list", "def get_users():\n users = functions.users()\n return users", "def _get_userlist_by_userright(self, userright):\n params = {\n \"action\": \"query\",\n \"list\": \"allusers\",\n \"format\": \"json\",\n \"augroup\": userright,\n \"aulimit\": \"500\",\n }\n r = self.session.get(ENWIKI_API, params=params)\n data = r.json()\n return [u[\"name\"] for u in data[\"query\"][\"allusers\"]]", "def list_users(profile=None, api_key=None):\n return salt.utils.pagerduty.list_items(\n \"users\", \"id\", __salt__[\"config.option\"](profile), api_key, opts=__opts__\n )", "def get_users(self):\n query = \"\"\"SELECT firstname,lastname,othernames,email,phonenumber,\\\n username,public_id,isadmin,isactive,registered\\\n from users ORDER BY registered ASC\"\"\"\n conn = self.db\n cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows", "def fetch_users(self):\n data = self._make_request()\n return data['result']", "def get_users(self, email):\n active_users = User.objects.filter(\n email__iexact=email,\n is_active=True\n )\n return (u for u in active_users)", "def _get_users_list(self):\n return self.users['user_id'].tolist()" ]
[ "0.7507129", "0.7396333", "0.7098", "0.7009831", "0.70055866", "0.69502074", "0.6932174", "0.68302804", "0.68110144", "0.67669076", "0.67633724", "0.67538893", "0.67271054", "0.6725447", "0.669157", "0.6640427", "0.6639801", "0.6636435", "0.6625716", "0.6609267", "0.6600872", "0.65843415", "0.6537155", "0.65336865", "0.65334976", "0.65244263", "0.65088403", "0.6489295", "0.6487848", "0.6481793" ]
0.8482868
0
Returns the fields of the given ad account.
def get_adaccount(self, account_id, fields=None, batch=False): path = 'act_%s' % account_id args = {'fields': fields} if fields else {} return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]", "def get_fields(self, table_name):\n return self.get_table_meta(table_name)['fields']", "def get_fields(self, dm_name):\n dm = self.get_dm(dm_name)\n return dm['mdmFields']", "def _get_fields(self):\n return self._fields", "def get_fields_for_cr(cr_id):\n # Construct request\n url = \"{}/reports/{}/patient_fields\"\n url = url.format(FABRIC_API_URL, cr_id)\n\n sys.stdout.flush()\n result = requests.get(url, auth=auth)\n return result.json()", "def get_account_details(self):\n pass", "def get_fields(self):\r\n return self.fields", "def get_account_columns():\n return ar.get_columns()", "def describe_account_attributes():\n pass", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def get_fields(self):\n\n\t\treturn self.__fields", "def listFields(self):\n return self.get_json('/field')", "def get_fields(cls):\n return cls.fields.values()", "def all_fields(item):\n return scom.all_fields(item)", "def fields() -> Dict[str, models.Field]:\n return dict(\n (field.name, field)\n for field in AccountTier._meta.get_fields()\n if field.name not in [\"id\"]\n )", "def getFieldNumbers():\n return _getCampaignDict()[\"field_numbers\"]", "def fields(self):\r\n return self._by_name.iteritems()", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))", "def get_returnable_fields(result, verbose=False):\n check_result(result)\n result_info = get_result(result)\n returnable_fields = result_info[\"returnable_fields\"]\n if verbose:\n pprint(returnable_fields)\n return returnable_fields", "def get_account_info(self):\n resource = self.domain + \"/account\"\n self.logger.debug(\"Pulling data from {0}\".format(resource))\n response = self.session.get(resource)\n\n if response.status_code != requests.codes.ok:\n return response.raise_for_status()\n data = response.text\n root = Et.fromstring(data)\n bf = BadgerFish(dict_type=dict)\n account_info = bf.data(root)\n return account_info", "def get_fields(self):\n return list(self.metadata.keys())", "def get_ad_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get ad data for account {}'.format(ad_account['account_id']))\n ads = ad_account.get_ads(\n fields=['id',\n 'name',\n 'adset_id',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for ad in ads:\n result[ad['id']] = {'name': ad['name'],\n 'ad_set_id': ad['adset_id'],\n 'attributes': parse_labels(ad.get('adlabels', []))}\n return result", "def get_fields(self):\n \n return self.metadata.keys()", "def get_adcreatives(self, account_id, fields, batch=False):\n path = 'act_%s/adcreatives' % account_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_fields():\n if not request.is_xhr:\n abort(403)\n fields = Field.query.all()\n result = {field.id:field.name for field in fields}\n return jsonify(result)", "def fields(self) -> Dict[str, Field]:\n return self._fields", "def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]" ]
[ "0.64900297", "0.64633363", "0.6435302", "0.6278439", "0.6271584", "0.6231578", "0.61347127", "0.61293006", "0.6110465", "0.60895866", "0.6078529", "0.6078529", "0.6067266", "0.6041993", "0.60249627", "0.59713835", "0.5970587", "0.59226525", "0.5899934", "0.58937836", "0.58563364", "0.5834959", "0.58260286", "0.58154404", "0.57975066", "0.5790694", "0.5759758", "0.5745378", "0.5737365", "0.5733911" ]
0.66040295
0
Return the fields for the given ad campaign group.
def get_adcampaign_group(self, campaign_group_id, fields, batch=False): path = '%s' % campaign_group_id args = {'fields': fields} return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_adcampaigns_of_campaign_group(self, campaign_group_id, fields,\n batch=False):\n path = '%s/adcampaigns' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adgroup(self, adgroup_id, fields=None, batch=False):\n path = '%s' % adgroup_id\n args = {'fields': fields} if fields else {}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_stats_by_adcampaign_group(\n self, campaign_group_id, fields=None, filters=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if fields:\n args['fields'] = json.dumps(fields)\n if filters:\n args['filters'] = json.dumps(filters)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = '%s/stats' % campaign_group_id\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaign_groups(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaign_groups' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def get_adcreatives_by_adgroup(self, adgroup_id, fields, batch=False):\n path = '{0}/adcreatives'.format(adgroup_id)\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def getGroupData(service, groupName, attList):\n # import IPython ; IPython.embed() ; exit(); \n groupsDataList = service.contactGroups().list().execute()[\"contactGroups\"]\n for group in groupsDataList:\n if group[\"name\"] == groupName:\n groupData = []\n for att in attList:\n groupData.append(group[att])\n return groupData", "def get_fields(self, dm_name):\n dm = self.get_dm(dm_name)\n return dm['mdmFields']", "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def getAGroupInfo(group_id):\r\n return Group.getAGroupInfo(group_id)", "def get_adcampaign(self, campaign_id, fields, batch=False):\n path = '%s' % campaign_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def list_group(group):\n\n members = group_members(group)\n ret = {}\n if members:\n for member in members:\n info = get(member)\n if info:\n ret[uid2dn(member)] = info\n return ret", "def customer_group_get(group_id=None):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n\n if group_id:\n query += \"\"\"\n AND `group_id` = \\\"%s\\\"\n \"\"\" % (group_id)\n\n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def required_fields(self, gid):\n r = self.get(\"/groups/{g:d}/fields\".format(g=gid))\n return r.json()", "def record_fields(self):\n\n record_fields_grp = self.settings_grp[RECORD_FIELDS]\n\n record_fields_dict = {}\n for group_name, dset in record_fields_grp.items():\n record_fields_dict[group_name] = list(dset.asstr())\n\n return record_fields_dict", "def get_fields_for_cr(cr_id):\n # Construct request\n url = \"{}/reports/{}/patient_fields\"\n url = url.format(FABRIC_API_URL, cr_id)\n\n sys.stdout.flush()\n result = requests.get(url, auth=auth)\n return result.json()", "def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):\n if groupby and groupby[0] == \"state\":\n # Default result structure\n # states = self._get_state_list(cr, uid, context=context)\n states = [('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')]\n read_group_all_states = [{\n '__context': {'group_by': groupby[1:]},\n '__domain': domain + [('state', '=', state_value)],\n 'state': state_value,\n 'state_count': 0,\n } for state_value, state_name in states]\n # Get standard results\n read_group_res = super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)\n # Update standard results with default results\n result = []\n for state_value, state_name in states:\n res = filter(lambda x: x['state'] == state_value, read_group_res)\n if not res:\n res = filter(lambda x: x['state'] == state_value, read_group_all_states)\n res[0]['state'] = [state_value, state_name]\n result.append(res[0])\n return result\n else:\n return super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)", "def customer_group_get_related(group_id):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `groups`.`company_name` = (\n SELECT `asshole`.`company_name` \n FROM \n (\n SELECT * \n FROM `groups` \n WHERE `group_id` = \"%s\"\n ) AS `asshole`\n )\n \"\"\" %(group_id)\n \n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def group_fields(r, group_name, fields):\n g = {}\n for f in fields.keys():\n g[fields[f]] = r.pop(f, None)\n r[group_name] = g\n return r", "def get_grouped_data(self, field_name):\n pass", "def getGroupMembers(group_id):\r\n return Group.getGroupMembers(group_id)", "def GetGroupMembers(self, group):\n return []", "def get_group_details(self, group_id):\n url = self.groups_url + \"/\" + group_id\n return requests.get(url, headers=self.headers)", "def get_fields(maps_dg):\n fields = []\n for mapi in maps_dg:\n fields.append(nmt.NmtField(des_mask, [mapi]))\n\n return fields", "def _iter_field_paths(grp):\n field_paths = []\n for field_name in grp:\n if isinstance(grp[field_name], h5py.Group):\n for subfield in grp[field_name]:\n\n # if it is a sparse field don't do the subfields since\n # they will be _sparse_idxs and data which are not\n # what we want here\n if field_name not in grp.file['_settings/sparse_fields']:\n field_paths.append(field_name + '/' + subfield)\n else:\n field_paths.append(field_name)\n return field_paths", "def get_fields(self):\n\t\tlogging.debug(\"Beginning\")\n\t\toptions=dict(api_key = self.apiKey, results = 0)\n\t\turl = '{ts}channels/{id}/feeds.json'.format(\n\t\t\tts=self.tsRUL,\n\t\t\tid=self.channel\n\t\t)\n\t\ttry:\n\t\t\tresults = requests.get(url, params=options)\n\t\t\tif results.ok != True:\n\t\t\t\tlogging.error(\"The URL didn't return a 200\")\n\t\t\t\treturn\n\t\texcept:\n\t\t\tlogging.error(\"Error calling the thingspeak URL\")\n\t\t\treturn\n\t\tresultsJson = results.json()\n\t\tchannelsJson = resultsJson['channel']\n\t\tfields = dict()\n\t\tfor i in range(1,8):\n\t\t\tif 'field'+str(i) in channelsJson:\n\t\t\t\tfields['field'+str(i)] = channelsJson['field'+str(i)]\n\t\treturn fields", "def get_fields(ds):\n\n # Get layer\n layer = ds.GetLayer(0)\n # feature.GetFieldCount()\n layer_defn = layer.GetLayerDefn()\n field_names = [layer_defn.GetFieldDefn(i).GetName() for i in range(layer_defn.GetFieldCount())]\n\n return field_names", "def get_adgroups_by_adcampaign(self, campaign_id, fields=None,\n status_fields=None, batch=False):\n path = '%s/adgroups' % campaign_id\n args = {'fields': fields} if fields else {}\n if status_fields:\n args['adgroup_status'] = status_fields\n return self.make_request(path, 'GET', args, batch=batch)", "def customer_group_get_all():\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n SELECT \n `group_id`, \n `group_name`, \n `description`, \n `timestamp`, \n `created_by`, \n `creation_time`, \n `is_deleted`, \n `updated_by`, \n `role_id`, \n `is_default`, \n `is_customer` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n user_group_details = None\n cursor = db.cursor()\n if cursor.execute(query) != 0:\n user_group_details = cursor.fetchall()\n cursor.close()\n db.close()\n return user_group_details", "def _get_fields(self):\n return self._fields" ]
[ "0.68681467", "0.6605877", "0.62355703", "0.6148244", "0.6107268", "0.60888064", "0.59329355", "0.5819793", "0.5795829", "0.57690173", "0.5762033", "0.57099044", "0.5699368", "0.56639004", "0.5526836", "0.5524331", "0.54867035", "0.54537773", "0.54388326", "0.5430076", "0.5427567", "0.53581536", "0.5336417", "0.5287742", "0.52775943", "0.52761406", "0.5275775", "0.5258339", "0.5233385", "0.5223667" ]
0.7202808
0
Returns the fields of all ad campaign groups from the given ad account.
def get_adcampaign_groups(self, account_id, fields, batch=False): path = 'act_%s/adcampaign_groups' % account_id args = { 'fields': fields, 'limit': self.DATA_LIMIT } return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_adgroups_by_adaccount(self, account_id, fields=None,\n status_fields=None, batch=False):\n path = 'act_%s/adgroups' % account_id\n args = {'fields': fields} if fields else {}\n if status_fields:\n args['adgroup_status'] = status_fields\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adgroups_by_adcampaign(self, campaign_id, fields=None,\n status_fields=None, batch=False):\n path = '%s/adgroups' % campaign_id\n args = {'fields': fields} if fields else {}\n if status_fields:\n args['adgroup_status'] = status_fields\n return self.make_request(path, 'GET', args, batch=batch)", "def get_all_groups(self, account_name=None, account_id=None, path=None, group_name=None,\n group_id=None, search=False ):\n grouplist=[]\n accounts = self.get_all_accounts(account_id=account_id, account_name=account_name,\n search=search)\n for account in accounts:\n groups = self.get_groups_from_account(path=path,\n group_name=group_name,\n group_id=group_id,\n delegate_account=account['account_name'],\n search=search)\n for group in groups:\n group['account_name']=account['account_name']\n group['account_id']=account['account_id']\n grouplist.append(group)\n return grouplist", "def get_adcampaigns(self, account_id, fields=None, batch=False):\n return self.get_adcampaigns_of_account(account_id, fields, batch=batch)", "def get_adcampaigns_of_campaign_group(self, campaign_group_id, fields,\n batch=False):\n path = '%s/adcampaigns' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaigns_of_account(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaigns' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaign_group(self, campaign_group_id, fields, batch=False):\n path = '%s' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def get_adcampaign_list(self, account_id):\n fields = 'id, name, campaign_status, start_time, end_time, ' \\\n 'daily_budget, lifetime_budget, budget_remaining'\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaigns(account_id, fields, batch=True),\n self.get_stats_by_adcampaign(account_id, batch=True),\n ]\n return self.make_batch_request(batch)", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def get_campaign_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get campaign data for account {}'.format(ad_account['account_id']))\n campaigns = ad_account.get_campaigns(\n fields=['id',\n 'name',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for campaign in campaigns:\n result[campaign['id']] = {'name': campaign['name'],\n 'attributes': parse_labels(\n campaign.get('adlabels', []))}\n return result", "def getAdGroupIds(self):\n query = \"\"\"\n select adgroups.id as adgroup_id from adgroups \n join campaigns on campaigns.id = adgroups.campaign_id\n where adgroups.account_id = '%s'\n and campaigns.status = 'enabled'\n and adgroups.status = 'enabled'\n \n \"\"\" % (self.account_id)\n\n df = pd.read_sql(query, Database().createEngine())\n ids = list(df.adgroup_id.values)\n return ids", "def get_groups(id_project):\n data = sql.list_groups(id_project)\n names = [(d['id'], d['name']) for d in data]\n return names", "def getGroupData(service, groupName, attList):\n # import IPython ; IPython.embed() ; exit(); \n groupsDataList = service.contactGroups().list().execute()[\"contactGroups\"]\n for group in groupsDataList:\n if group[\"name\"] == groupName:\n groupData = []\n for att in attList:\n groupData.append(group[att])\n return groupData", "def getGroups():\r\n return Group.getGroups()", "def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def groups(self):\n return self.get_data(\"groups\")", "def getGroups(self):\n return [g[0] for g in grp.getgrall()]", "def getGroupMembers(group_id):\r\n return Group.getGroupMembers(group_id)", "def get_adgroup(self, adgroup_id, fields=None, batch=False):\n path = '%s' % adgroup_id\n args = {'fields': fields} if fields else {}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_stats_by_adcampaign_group(\n self, campaign_group_id, fields=None, filters=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if fields:\n args['fields'] = json.dumps(fields)\n if filters:\n args['filters'] = json.dumps(filters)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = '%s/stats' % campaign_group_id\n return self.make_request(path, 'GET', args, batch=batch)", "def get_all_access_groups():\n\treturn {\"access_groups\": [ag.serialize for ag in AccessGroup.query.all()]}, 200", "def customer_group_get_all():\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n SELECT \n `group_id`, \n `group_name`, \n `description`, \n `timestamp`, \n `created_by`, \n `creation_time`, \n `is_deleted`, \n `updated_by`, \n `role_id`, \n `is_default`, \n `is_customer` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n user_group_details = None\n cursor = db.cursor()\n if cursor.execute(query) != 0:\n user_group_details = cursor.fetchall()\n cursor.close()\n db.close()\n return user_group_details", "def get_groups(self):\n result = self.conn.usergroup.get(status=0, output='extend', selectUsers=\"extend\")\n groups = {group[\"name\"]: Group(\n name=group[\"name\"],\n id=group[\"usrgrpid\"],\n members=group[\"users\"],\n ) for group in result}\n return groups", "def GetGroupList(setting):\n groups = set()\n\n for name in setting:\n dev = setting[name]\n format_, group = GetFieldDef(dev, fields=\"format_, group\")\n if group is not None and len(group) > 0:\n groups.add(group.title())\n if isinstance(format_, dict):\n subgroups = GetGroupList(format_)\n if subgroups is not None and len(subgroups) > 0:\n for group in subgroups:\n groups.add(group.title())\n\n groups=list(groups)\n groups.sort()\n return groups", "def get_all_as_groups(as_connection):\n as_groups_list = []\n get_as_groups = as_connection.get_all_groups()\n as_groups_list.extend(get_as_groups)\n\n token = get_as_groups.next_token\n while token is not None:\n get_as_groups = as_connection.get_all_groups(\n next_token=token)\n as_groups_list.extend(get_as_groups)\n token = get_as_groups.next_token\n print \"Processed {0} AutoScaling Group\"\\\n .format(len(as_groups_list))\n return as_groups_list", "def list_groups(self):\n return self.get_admin(\"groups\")", "def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]", "def getAGroupInfo(group_id):\r\n return Group.getAGroupInfo(group_id)" ]
[ "0.72821957", "0.6975432", "0.6340183", "0.6284819", "0.61755216", "0.6174087", "0.6126248", "0.60715556", "0.5999946", "0.592391", "0.5749097", "0.5744151", "0.5610665", "0.55707353", "0.5536677", "0.55062884", "0.5487591", "0.5441287", "0.54341173", "0.5427679", "0.54129875", "0.53985727", "0.53946596", "0.5393789", "0.5391575", "0.53798294", "0.5379178", "0.53697157", "0.5366988", "0.53637373" ]
0.80460984
0
Delete specific campaign group.
def delete_adcampaign_group(self, campaign_group_id, batch=False): path = '%s' % campaign_group_id return self.make_request(path, 'DELETE', batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_group(self, group_id):\n url = self.groups_url + \"/%s\" % group_id\n return requests.delete(url, headers=self.headers)", "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "async def delete_contact_group(dbcon: DBConnection, contact_group_id: int) -> None:\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n q = \"\"\"delete from contact_groups where id=%s\"\"\"\n await dbcon.operation(q, (contact_group_id,))", "def delete_group(_request, group_id):\n group = models.UserGroup.get_by_id(int(group_id))\n group.delete()\n\n url = urlresolvers.reverse('views.admin.list_groups')\n return http.HttpResponseRedirect(url)", "def delete_group(self, group_id: str):\n # If successful, this method returns 204 No Content response code.\n # It does not return anything in the response body.\n # Using resp_type=\"text\" to avoid parsing error in the calling method.\n self.ms_client.http_request(method='DELETE', url_suffix=f'groups/{group_id}', resp_type=\"text\")", "def delete_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n client.delete_group(group_id)\n\n # get the group data from the context\n group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === \"{group_id}\")')\n if isinstance(group_data, list):\n group_data = group_data[0]\n\n # add a field that indicates that the group was deleted\n group_data['Deleted'] = True # add a field with the members to the group\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}\n\n human_readable = f'Group: \"{group_id}\" was deleted successfully.'\n return human_readable, entry_context, NO_OUTPUTS", "def delete_group(self, group_name):\r\n params = {'GroupName' : group_name}\r\n return self.get_response('DeleteGroup', params)", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def delete_group(\n group_id: BSONObjectId,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.delete_group\")\n grp: Group = Group.objects.get(pk=group_id)\n logging.debug(\"Deleting group %s (%s)\", grp.group_name, group_id)\n grp.delete()", "def delete_group(gid):\n if request.method == 'POST':\n hl.deleteGroup(gid)\n return redirect('/users')", "def test_delete_group(self):\n self.group.delete_group.return_value = succeed('del')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(result, 'del')", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def delete_group_group_member(self, targetgroup, groupname):\n try:\n targetgroup = self.quote(targetgroup)\n groupname = self.quote(groupname)\n self.g.delete('groups/%s/groups/%s' % (targetgroup,\n groupname),\n headers={})\n except HTTPError as e:\n return self._manage_errors(e)", "def delete_group(args, p4, group_name, metrics):\n LOG.debug(\"delete_group() {}\".format(group_name))\n r = p4.fetch_group(group_name)\n if r and r.get('Owners') and p4gf_const.P4GF_USER in r.get('Owners'):\n print_verbose(args, _(\"Deleting group '{group_name}'...\").format(group_name=group_name))\n p4.run('group', '-a', '-d', group_name)\n metrics.groups += 1\n else:\n print_verbose(args, _(\"Not deleting group '{group}':\"\n \" Does not exist or '{user}' is not an owner.\")\n .format(group=group_name, user=p4gf_const.P4GF_USER))", "def delete_group(groupname):\n response = jsonify(admin.delete_group(current_app.scoped_session(), groupname))\n return response", "def delete_group(self, group_o):\n class_query = ClassQuery('fvTenant')\n class_query.propFilter = 'eq(fvTenant.name, \"' + group_o.name + '\")'\n tenant_list = self.moDir.query(class_query)\n if len(tenant_list) > 0:\n tenant_list[0].delete()\n self.commit(tenant_list[0])", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def delete():\n name = request.json['name']\n group = models.user.Group.get(name)\n if not group:\n raise Absent('Group does not exists.', deletion=False)\n else:\n models.db.session.delete(group)\n models.db.session.commit()\n return response(200, deletion=True)", "def do_del_group(dbsync, group):\n pass", "def delete(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('DELETE', url)", "def delete(ctx):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n\n if not click.confirm(\"Are sure you want to delete experiment group `{}`\".format(_group)):\n click.echo('Existing without deleting experiment group.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment_group.delete_experiment_group(\n user, project_name, _group)\n # Purge caching\n GroupManager.purge()\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not delete experiment group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.status_code == 204:\n Printer.print_success(\"Experiment group `{}` was delete successfully\".format(_group))", "def delete_targetgroup(self, group_id):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).delete()\r\n self._db.commit()\r\n return result", "def customer_group_delete(group_id):\n result = {\"success\" : 1, \"message\" : \"Customer can not be Deleted\"}\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n \n #clean up the user id\n group_id = db.escape_string(group_id)\n \n query = \"\"\"\n DELETE FROM `groups`\n WHERE `groups`.`group_id` = \"%s\"\n \"\"\" %(group_id)\n cursor = db.cursor()\n try:\n if (cursor.execute(query)) != 0:\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Group Deleted Successfully\"}\n except Exception as customer_exp:\n result = {\"success\" : 1, \"message\" : \"Customer Group can not be Deleted \" + str(e)}\n finally:\n cursor.close()\n db.close()\n return result", "def delete_group(id, createdby):\n query = \"DELETE FROM groups WHERE group_id = {} AND createdby ='{}'\".format(id, createdby)\n cur.execute(query)", "def del_group(self, group_id, group_type):\n self._mod_group(\n command=self.ofproto.OFPGC_DELETE,\n group_id=group_id,\n group_type=group_type,\n )", "def delete_group():\n incoming = request.get_json()\n Chatroom.delete_chatroom_with_room_id(incoming['room_id'])\n return jsonify(results = incoming['room_id'])", "def remove_inv_group(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n gw = kwargs['gateway']\n group_id = kwargs['objectname']\n json_response_status_code = delete_inventory_group_json_response(proxy, sessiontoken, gw, group_id)\n if json_response_status_code == 200:\n print(\"The group \" + group_id + \" has been deleted\")\n else:\n print(\"Something went wrong - please check your syntax and try again.\")", "def rpc_campaign_delete(self, campaign_id):\n\t\tsession = db_manager.Session()\n\t\tsession.delete(db_manager.get_row_by_id(session, db_models.Campaign, campaign_id))\n\t\tsession.commit()\n\t\tsession.close()\n\t\treturn", "async def delete_contact_from_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:\n q = \"\"\"delete from contact_group_contacts where contact_group_id=%s and contact_id=%s\"\"\"\n q_args = (contact_group_id, contact_id)\n await dbcon.operation(q, q_args)" ]
[ "0.7501498", "0.74689204", "0.744864", "0.7417873", "0.737822", "0.7363642", "0.7334851", "0.7320734", "0.72889686", "0.7264399", "0.7235754", "0.7199511", "0.7180974", "0.71728706", "0.7139698", "0.7083039", "0.70539457", "0.70402956", "0.7021091", "0.70121497", "0.69845533", "0.69392157", "0.68951714", "0.687174", "0.68704385", "0.68258834", "0.6787721", "0.67502904", "0.6748094", "0.67393607" ]
0.8175089
0
Returns the fields for the given ad campaign.
def get_adcampaign(self, campaign_id, fields, batch=False): path = '%s' % campaign_id args = {'fields': fields} return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fields_for_cr(cr_id):\n # Construct request\n url = \"{}/reports/{}/patient_fields\"\n url = url.format(FABRIC_API_URL, cr_id)\n\n sys.stdout.flush()\n result = requests.get(url, auth=auth)\n return result.json()", "def get_adcampaign_detail(self, account_id, campaign_id, date_preset):\n campaign_fields = [\n 'name', 'campaign_status', 'daily_budget', 'lifetime_budget',\n 'start_time', 'end_time']\n campaign_data_columns = [\n 'campaign_name', 'reach', 'frequency', 'clicks',\n 'actions', 'total_actions', 'ctr', 'spend']\n adgroup_data_columns = [\n 'campaign_id', 'campaign_name', 'adgroup_id', 'adgroup_name',\n 'reach', 'frequency', 'clicks', 'ctr', 'actions', 'cpm', 'cpc',\n 'spend']\n demographic_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'age', 'gender']\n placement_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'placement']\n campaign_filters = [{\n 'field': 'campaign_id', 'type': 'in', 'value': [campaign_id]}]\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaign(campaign_id, campaign_fields, batch=True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', campaign_data_columns,\n campaign_filters, ['action_type'], True),\n self.get_adreport_stats(\n account_id, date_preset, 1, campaign_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', adgroup_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', demographic_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', placement_data_columns,\n campaign_filters, None, True),\n ]\n return self.make_batch_request(batch)", "def get_campaign_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get campaign data for account {}'.format(ad_account['account_id']))\n campaigns = ad_account.get_campaigns(\n fields=['id',\n 'name',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for campaign in campaigns:\n result[campaign['id']] = {'name': campaign['name'],\n 'attributes': parse_labels(\n campaign.get('adlabels', []))}\n return result", "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def _get_fields(self):\n return self._fields", "def getFieldNumbers():\n return _getCampaignDict()[\"field_numbers\"]", "def get_adcampaigns(self, account_id, fields=None, batch=False):\n return self.get_adcampaigns_of_account(account_id, fields, batch=batch)", "def get_adcampaigns_of_account(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaigns' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def get_campaign(self, campaign_id: str) -> dict:\n return self.http_request(\"GET\", f'/campaign/{campaign_id}')", "def get_adcreative(self, creative_id, fields, batch=False):\n path = '%s' % creative_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_fields(self, dm_name):\n dm = self.get_dm(dm_name)\n return dm['mdmFields']", "def get_adcampaigns_of_campaign_group(self, campaign_group_id, fields,\n batch=False):\n path = '%s/adcampaigns' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaign_list(self, account_id):\n fields = 'id, name, campaign_status, start_time, end_time, ' \\\n 'daily_budget, lifetime_budget, budget_remaining'\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaigns(account_id, fields, batch=True),\n self.get_stats_by_adcampaign(account_id, batch=True),\n ]\n return self.make_batch_request(batch)", "def _extend_record(self, campaign, fields, pull_ads):\n campaign_out = campaign.api_get(fields=fields).export_all_data()\n if pull_ads:\n campaign_out[\"ads\"] = {\"data\": []}\n ids = [ad[\"id\"] for ad in campaign.get_ads()]\n for ad_id in ids:\n campaign_out[\"ads\"][\"data\"].append({\"id\": ad_id})\n return campaign_out", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]", "def get_fields(self):\r\n return self.fields", "def get_adcampaign_group(self, campaign_group_id, fields, batch=False):\n path = '%s' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcreatives(self, account_id, fields, batch=False):\n path = 'act_%s/adcreatives' % account_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_campaign_info(self, id):\n logger.info(\"Function call: get_campaign_info from: {}\".format(id, ))\n return self.__handle_error(\"Empty campaign id\") if not id else self.__handle_result(self.__send_request('campaigns/{}'.format(id, )))", "def get_fields(cls):\n return cls.fields.values()", "def get_fields(self, table_name):\n return self.get_table_meta(table_name)['fields']", "def get_fields(self):\n\n\t\treturn self.__fields", "def get_fields(self):\n\t\tlogging.debug(\"Beginning\")\n\t\toptions=dict(api_key = self.apiKey, results = 0)\n\t\turl = '{ts}channels/{id}/feeds.json'.format(\n\t\t\tts=self.tsRUL,\n\t\t\tid=self.channel\n\t\t)\n\t\ttry:\n\t\t\tresults = requests.get(url, params=options)\n\t\t\tif results.ok != True:\n\t\t\t\tlogging.error(\"The URL didn't return a 200\")\n\t\t\t\treturn\n\t\texcept:\n\t\t\tlogging.error(\"Error calling the thingspeak URL\")\n\t\t\treturn\n\t\tresultsJson = results.json()\n\t\tchannelsJson = resultsJson['channel']\n\t\tfields = dict()\n\t\tfor i in range(1,8):\n\t\t\tif 'field'+str(i) in channelsJson:\n\t\t\t\tfields['field'+str(i)] = channelsJson['field'+str(i)]\n\t\treturn fields", "def get_fields(self):\n \n return self.metadata.keys()", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def listFields(self):\n return self.get_json('/field')", "def get_all_camapaign_stats_data(campaign_id):\n all_campaign_stats_data = []\n all_campaign_stats = Contribution.query.filter_by(campaign_id=campaign_id).all()\n for campaign_stat in all_campaign_stats:\n campaign_stat_data = {}\n campaign_stat_data['username'] = campaign_stat.username\n campaign_stat_data['file'] = campaign_stat.file\n campaign_stat_data['edit_type'] = campaign_stat.edit_type\n campaign_stat_data['edit_action'] = campaign_stat.edit_action\n campaign_stat_data['country'] = campaign_stat.country\n campaign_stat_data['depict_item'] = campaign_stat.depict_item\n campaign_stat_data['depict_prominent'] = campaign_stat.depict_prominent\n campaign_stat_data['caption_text'] = campaign_stat.caption_text\n campaign_stat_data['caption_language'] = campaign_stat.caption_language\n campaign_stat_data['date'] = campaign_stat.date\n all_campaign_stats_data.append(campaign_stat_data)\n return all_campaign_stats_data", "def get_fields(self):\n return list(self.metadata.keys())" ]
[ "0.65898985", "0.6338148", "0.6232564", "0.60172665", "0.59051216", "0.58895314", "0.58822185", "0.5873872", "0.5861341", "0.5854177", "0.5851545", "0.58459795", "0.57753587", "0.576402", "0.5692089", "0.5692089", "0.56912607", "0.56907666", "0.5686218", "0.56728125", "0.5672243", "0.5658981", "0.5650722", "0.56174713", "0.55808", "0.5577632", "0.554685", "0.55449986", "0.55355036", "0.54976034" ]
0.711426
0
Return the fields of all adcampaigns from the given adcampaign group.
def get_adcampaigns_of_campaign_group(self, campaign_group_id, fields, batch=False): path = '%s/adcampaigns' % campaign_group_id args = {'fields': fields} return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_adcampaign_group(self, campaign_group_id, fields, batch=False):\n path = '%s' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcreatives_by_adgroup(self, adgroup_id, fields, batch=False):\n path = '{0}/adcreatives'.format(adgroup_id)\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaign_groups(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaign_groups' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaign(self, campaign_id, fields, batch=False):\n path = '%s' % campaign_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adgroup(self, adgroup_id, fields=None, batch=False):\n path = '%s' % adgroup_id\n args = {'fields': fields} if fields else {}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_stats_by_adcampaign_group(\n self, campaign_group_id, fields=None, filters=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if fields:\n args['fields'] = json.dumps(fields)\n if filters:\n args['filters'] = json.dumps(filters)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = '%s/stats' % campaign_group_id\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaigns(self, account_id, fields=None, batch=False):\n return self.get_adcampaigns_of_account(account_id, fields, batch=batch)", "def get_adcampaigns_of_account(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaigns' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaign_detail(self, account_id, campaign_id, date_preset):\n campaign_fields = [\n 'name', 'campaign_status', 'daily_budget', 'lifetime_budget',\n 'start_time', 'end_time']\n campaign_data_columns = [\n 'campaign_name', 'reach', 'frequency', 'clicks',\n 'actions', 'total_actions', 'ctr', 'spend']\n adgroup_data_columns = [\n 'campaign_id', 'campaign_name', 'adgroup_id', 'adgroup_name',\n 'reach', 'frequency', 'clicks', 'ctr', 'actions', 'cpm', 'cpc',\n 'spend']\n demographic_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'age', 'gender']\n placement_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'placement']\n campaign_filters = [{\n 'field': 'campaign_id', 'type': 'in', 'value': [campaign_id]}]\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaign(campaign_id, campaign_fields, batch=True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', campaign_data_columns,\n campaign_filters, ['action_type'], True),\n self.get_adreport_stats(\n account_id, date_preset, 1, campaign_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', adgroup_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', demographic_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', placement_data_columns,\n campaign_filters, None, True),\n ]\n return self.make_batch_request(batch)", "def get_adcampaign_list(self, account_id):\n fields = 'id, name, campaign_status, start_time, end_time, ' \\\n 'daily_budget, lifetime_budget, budget_remaining'\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaigns(account_id, fields, batch=True),\n self.get_stats_by_adcampaign(account_id, batch=True),\n ]\n return self.make_batch_request(batch)", "def getGroupData(service, groupName, attList):\n # import IPython ; IPython.embed() ; exit(); \n groupsDataList = service.contactGroups().list().execute()[\"contactGroups\"]\n for group in groupsDataList:\n if group[\"name\"] == groupName:\n groupData = []\n for att in attList:\n groupData.append(group[att])\n return groupData", "def get_campaign_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get campaign data for account {}'.format(ad_account['account_id']))\n campaigns = ad_account.get_campaigns(\n fields=['id',\n 'name',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for campaign in campaigns:\n result[campaign['id']] = {'name': campaign['name'],\n 'attributes': parse_labels(\n campaign.get('adlabels', []))}\n return result", "def get_adgroups_by_adcampaign(self, campaign_id, fields=None,\n status_fields=None, batch=False):\n path = '%s/adgroups' % campaign_id\n args = {'fields': fields} if fields else {}\n if status_fields:\n args['adgroup_status'] = status_fields\n return self.make_request(path, 'GET', args, batch=batch)", "def getAllCampaigns(service):\n # Using AWQL to retrieve campaigns.\n query = (adwords.ServiceQueryBuilder()\n .Select('Id', 'Name', 'Status', 'StartDate', 'EndDate',\n 'BudgetId', 'BudgetStatus', 'BudgetName', 'Amount',\n 'BudgetReferenceCount', 'IsBudgetExplicitlyShared')\n .Limit(0, pageSize)\n .Build())\n campaigns = []\n for page in query.Pager(service):\n if page['entries']:\n for campaign in page['entries']:\n campaigns.append(campaign)\n else:\n pass\n return campaigns", "async def get_contacts_for_contact_group(dbcon: DBConnection, contact_group_id: int) -> Iterable[object_models.Contact]:\n q = \"\"\"select\n contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active\n from contact_group_contacts, contacts\n where contact_group_contacts.contact_group_id = %s\n and contact_group_contacts.contact_id = contacts.id\"\"\"\n return [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (contact_group_id,))]", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def _extend_record(self, campaign, fields, pull_ads):\n campaign_out = campaign.api_get(fields=fields).export_all_data()\n if pull_ads:\n campaign_out[\"ads\"] = {\"data\": []}\n ids = [ad[\"id\"] for ad in campaign.get_ads()]\n for ad_id in ids:\n campaign_out[\"ads\"][\"data\"].append({\"id\": ad_id})\n return campaign_out", "def getAGroupInfo(group_id):\r\n return Group.getAGroupInfo(group_id)", "def get_all_camapaign_stats_data(campaign_id):\n all_campaign_stats_data = []\n all_campaign_stats = Contribution.query.filter_by(campaign_id=campaign_id).all()\n for campaign_stat in all_campaign_stats:\n campaign_stat_data = {}\n campaign_stat_data['username'] = campaign_stat.username\n campaign_stat_data['file'] = campaign_stat.file\n campaign_stat_data['edit_type'] = campaign_stat.edit_type\n campaign_stat_data['edit_action'] = campaign_stat.edit_action\n campaign_stat_data['country'] = campaign_stat.country\n campaign_stat_data['depict_item'] = campaign_stat.depict_item\n campaign_stat_data['depict_prominent'] = campaign_stat.depict_prominent\n campaign_stat_data['caption_text'] = campaign_stat.caption_text\n campaign_stat_data['caption_language'] = campaign_stat.caption_language\n campaign_stat_data['date'] = campaign_stat.date\n all_campaign_stats_data.append(campaign_stat_data)\n return all_campaign_stats_data", "def list_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def parse_groups(self):\n\n data = []\n ads_by_data = []\n for date_time, group in self.groups:\n for ad in group:\n ads_by_data.append({\"ad\": ad})\n date_key = self.date_to_string(date_time)\n data.append({date_key: ads_by_data})\n ads_by_data = []\n\n return data", "def list_group(group):\n\n members = group_members(group)\n ret = {}\n if members:\n for member in members:\n info = get(member)\n if info:\n ret[uid2dn(member)] = info\n return ret", "def get_adcreatives(self, account_id, fields, batch=False):\n path = 'act_%s/adcreatives' % account_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "async def get_garages_by_garage_group_id(self,garage_group_id,account_id ) ->[Garage]:\r\n async with self._db.acquire() as conn:\r\n if not self._user.is_superuser and garage_group_id =='all':\r\n sql = \"\"\"select * from garage where garage_id in (\r\n\t select garage_id from map_garage_to_garage_group where garage_group_id in(\r\n\t\t select garage_group_id from map_garage_group_to_account where account_id=:account_id \r\n\t )\r\n )\r\n \"\"\"\r\n queryResult= [dict(row.items()) async for row in await conn.execute(text(sql),{'account_id':account_id})]\r\n return queryResult\r\n elif garage_group_id == 'all' and self._user.is_superuser:\r\n sql = \"\"\"select * from garage\"\"\"\r\n queryResult= [dict(row.items()) async for row in await conn.execute(sql)]\r\n else:\r\n sql = \"select * from garage where garage_id in (select garage_id from map_garage_to_garage_group where garage_group_id=:garage_group_id)\"\r\n queryResult= [dict(row.items()) async for row in await conn.execute(text(sql),{'garage_group_id':garage_group_id})]\r\n \r\n return queryResult", "def get_list_of_campaigns(self, limit=0, offset=0):\n logger.info(\"Function call: get_list_of_campaigns\")\n return self.__handle_result(self.__send_request('campaigns', 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def getGroupMembers(group_id):\r\n return Group.getGroupMembers(group_id)", "def customer_group_get_all():\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n SELECT \n `group_id`, \n `group_name`, \n `description`, \n `timestamp`, \n `created_by`, \n `creation_time`, \n `is_deleted`, \n `updated_by`, \n `role_id`, \n `is_default`, \n `is_customer` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n user_group_details = None\n cursor = db.cursor()\n if cursor.execute(query) != 0:\n user_group_details = cursor.fetchall()\n cursor.close()\n db.close()\n return user_group_details", "def get_campaign(self, campaign_id: str) -> dict:\n return self.http_request(\"GET\", f'/campaign/{campaign_id}')", "def get_drip_campaigns(self):\n return list(DripCampaign.objects(user_id=self.user_id))", "def _get_campaigns(self, params):\n return self._api.account.get_campaigns(params={**params, **self._state_filter()}, fields=[self.state_pk])" ]
[ "0.75774086", "0.67639214", "0.670902", "0.6692576", "0.64996964", "0.64950144", "0.63905364", "0.6336975", "0.60845774", "0.6055715", "0.59464204", "0.5822739", "0.57892954", "0.5702668", "0.5640149", "0.5562213", "0.5470797", "0.54013616", "0.53902537", "0.5338464", "0.53319997", "0.530421", "0.52917904", "0.52325535", "0.5221813", "0.5177148", "0.5175168", "0.51746124", "0.51681787", "0.5157931" ]
0.8035114
0
Returns the fields for the given ad group.
def get_adgroup(self, adgroup_id, fields=None, batch=False): path = '%s' % adgroup_id args = {'fields': fields} if fields else {} return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fields(self, dm_name):\n dm = self.get_dm(dm_name)\n return dm['mdmFields']", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def get_fields(maps_dg):\n fields = []\n for mapi in maps_dg:\n fields.append(nmt.NmtField(des_mask, [mapi]))\n\n return fields", "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def get_adcampaign_group(self, campaign_group_id, fields, batch=False):\n path = '%s' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def getAGroupInfo(group_id):\r\n return Group.getAGroupInfo(group_id)", "def get_fields(ds):\n\n # Get layer\n layer = ds.GetLayer(0)\n # feature.GetFieldCount()\n layer_defn = layer.GetLayerDefn()\n field_names = [layer_defn.GetFieldDefn(i).GetName() for i in range(layer_defn.GetFieldCount())]\n\n return field_names", "def _iter_field_paths(grp):\n field_paths = []\n for field_name in grp:\n if isinstance(grp[field_name], h5py.Group):\n for subfield in grp[field_name]:\n\n # if it is a sparse field don't do the subfields since\n # they will be _sparse_idxs and data which are not\n # what we want here\n if field_name not in grp.file['_settings/sparse_fields']:\n field_paths.append(field_name + '/' + subfield)\n else:\n field_paths.append(field_name)\n return field_paths", "def required_fields(self, gid):\n r = self.get(\"/groups/{g:d}/fields\".format(g=gid))\n return r.json()", "def get_fields(self, table_name):\n return self.get_table_meta(table_name)['fields']", "def list_group(group):\n\n members = group_members(group)\n ret = {}\n if members:\n for member in members:\n info = get(member)\n if info:\n ret[uid2dn(member)] = info\n return ret", "def getGroupData(service, groupName, attList):\n # import IPython ; IPython.embed() ; exit(); \n groupsDataList = service.contactGroups().list().execute()[\"contactGroups\"]\n for group in groupsDataList:\n if group[\"name\"] == groupName:\n groupData = []\n for att in attList:\n groupData.append(group[att])\n return groupData", "def _get_fields(self):\n return self._fields", "def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]", "def get_fields(self):\n\n\t\treturn self.__fields", "def record_fields(self):\n\n record_fields_grp = self.settings_grp[RECORD_FIELDS]\n\n record_fields_dict = {}\n for group_name, dset in record_fields_grp.items():\n record_fields_dict[group_name] = list(dset.asstr())\n\n return record_fields_dict", "def get_adcreatives_by_adgroup(self, adgroup_id, fields, batch=False):\n path = '{0}/adcreatives'.format(adgroup_id)\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_fields(self):\n \n fields = []\n for img in self.img_lst:\n fields += img.get_fields()\n \n fields = list(set(fields))\n \n return fields", "def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]", "def GetGroupMembers(self, group):\n return []", "def get_grouped_data(self, field_name):\n pass", "def get_fields(self):\r\n return self.fields", "def get_adcampaigns_of_campaign_group(self, campaign_group_id, fields,\n batch=False):\n path = '%s/adcampaigns' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_fields(self):\n fields = []\n for items in self.order_items:\n fields += items.get_fields()\n \n fields = list(set(fields))\n \n field_order = ['recordId', 'orderId', 'itemId', 'collectionId']\n \n out_fields = field_order\n \n for f in fields:\n if f not in field_order:\n out_fields.append(f)\n \n return out_fields", "def getGroupMembers(group_id):\r\n return Group.getGroupMembers(group_id)", "def fields(self):\r\n return self._by_name.iteritems()", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def group_fields(r, group_name, fields):\n g = {}\n for f in fields.keys():\n g[fields[f]] = r.pop(f, None)\n r[group_name] = g\n return r", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()" ]
[ "0.6449476", "0.62611914", "0.625818", "0.62575144", "0.62036747", "0.6120664", "0.6064907", "0.5937128", "0.5926256", "0.5881843", "0.5869837", "0.5859543", "0.5796039", "0.57387525", "0.5731148", "0.5718497", "0.5698123", "0.56732863", "0.56500703", "0.56445265", "0.5642179", "0.56362", "0.5620909", "0.5613633", "0.5600126", "0.5599426", "0.5587685", "0.5587685", "0.5585773", "0.5576069" ]
0.6635314
0
Returns the fields of all ad groups from the given ad account.
def get_adgroups_by_adaccount(self, account_id, fields=None, status_fields=None, batch=False): path = 'act_%s/adgroups' % account_id args = {'fields': fields} if fields else {} if status_fields: args['adgroup_status'] = status_fields return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_adcampaign_groups(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaign_groups' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def get_all_groups(self, account_name=None, account_id=None, path=None, group_name=None,\n group_id=None, search=False ):\n grouplist=[]\n accounts = self.get_all_accounts(account_id=account_id, account_name=account_name,\n search=search)\n for account in accounts:\n groups = self.get_groups_from_account(path=path,\n group_name=group_name,\n group_id=group_id,\n delegate_account=account['account_name'],\n search=search)\n for group in groups:\n group['account_name']=account['account_name']\n group['account_id']=account['account_id']\n grouplist.append(group)\n return grouplist", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def get_adgroups_by_adcampaign(self, campaign_id, fields=None,\n status_fields=None, batch=False):\n path = '%s/adgroups' % campaign_id\n args = {'fields': fields} if fields else {}\n if status_fields:\n args['adgroup_status'] = status_fields\n return self.make_request(path, 'GET', args, batch=batch)", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()", "def show_all_groups(self, account_name=None, account_id=None, path=None,\n group_name=None, group_id=None, search=False, print_table=True):\n pt = PrettyTable(['ACCOUNT:', 'GROUPNAME:', 'GROUP_ID:'])\n pt.hrules = 1\n pt.align = 'l'\n list = self.get_all_groups(account_name=account_name, account_id=account_id,\n path=path, group_name=group_name, group_id=group_id,\n search=search)\n for group in list:\n pt.add_row([group['account_name'], group['group_name'], group['group_id']])\n if print_table:\n self.log.info(\"\\n\" + str(pt) + \"\\n\")\n else:\n return pt", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def getAGroupInfo(group_id):\r\n return Group.getAGroupInfo(group_id)", "def getGroups():\r\n return Group.getGroups()", "def get_groups(id_project):\n data = sql.list_groups(id_project)\n names = [(d['id'], d['name']) for d in data]\n return names", "def getGroupData(service, groupName, attList):\n # import IPython ; IPython.embed() ; exit(); \n groupsDataList = service.contactGroups().list().execute()[\"contactGroups\"]\n for group in groupsDataList:\n if group[\"name\"] == groupName:\n groupData = []\n for att in attList:\n groupData.append(group[att])\n return groupData", "def GetGroupList(setting):\n groups = set()\n\n for name in setting:\n dev = setting[name]\n format_, group = GetFieldDef(dev, fields=\"format_, group\")\n if group is not None and len(group) > 0:\n groups.add(group.title())\n if isinstance(format_, dict):\n subgroups = GetGroupList(format_)\n if subgroups is not None and len(subgroups) > 0:\n for group in subgroups:\n groups.add(group.title())\n\n groups=list(groups)\n groups.sort()\n return groups", "def getGroups(self):\n return [g[0] for g in grp.getgrall()]", "def groups(self):\n return self.get_data(\"groups\")", "def get_all_access_groups():\n\treturn {\"access_groups\": [ag.serialize for ag in AccessGroup.query.all()]}, 200", "def getAdGroupIds(self):\n query = \"\"\"\n select adgroups.id as adgroup_id from adgroups \n join campaigns on campaigns.id = adgroups.campaign_id\n where adgroups.account_id = '%s'\n and campaigns.status = 'enabled'\n and adgroups.status = 'enabled'\n \n \"\"\" % (self.account_id)\n\n df = pd.read_sql(query, Database().createEngine())\n ids = list(df.adgroup_id.values)\n return ids", "def get_groups(self):\n result = self.conn.usergroup.get(status=0, output='extend', selectUsers=\"extend\")\n groups = {group[\"name\"]: Group(\n name=group[\"name\"],\n id=group[\"usrgrpid\"],\n members=group[\"users\"],\n ) for group in result}\n return groups", "def get_all_nda(self):\n return list(self._groups_groupby_NDA.groups.keys())", "def list_groups(self):\n return self.get_admin(\"groups\")", "def get_memberships(self, kwargs):\n account = kwargs[\"account\"]\n recursive = kwargs.get(\"recursive\", False)\n\n already_printed = set()\n\n def lookup_groups(dn, leading_sp, already_treated):\n results = self.engine.query(self.engine.DISTINGUISHED_NAME(dn), [\"memberOf\", \"primaryGroupID\"])\n for result in results:\n if \"memberOf\" in result:\n for group_dn in result[\"memberOf\"]:\n if group_dn not in already_treated:\n print(\"{g:>{width}}\".format(g=group_dn, width=leading_sp + len(group_dn)))\n already_treated.add(group_dn)\n lookup_groups(group_dn, leading_sp + 4, already_treated)\n\n if \"primaryGroupID\" in result and result[\"primaryGroupID\"]:\n pid = result[\"primaryGroupID\"]\n results = list(self.engine.query(self.engine.PRIMARY_GROUP_ID(pid)))\n if results:\n already_treated.add(results[0][\"dn\"])\n\n return already_treated\n\n results = self.engine.query(self.engine.ACCOUNT_IN_GROUPS_FILTER(account), [\"memberOf\", \"primaryGroupID\"])\n for result in results:\n if \"memberOf\" in result:\n for group_dn in result[\"memberOf\"]:\n print(group_dn)\n if recursive:\n already_printed.add(group_dn)\n s = lookup_groups(group_dn, 4, already_printed)\n already_printed.union(s)\n\n # for some reason, when we request an attribute which is not set on an object,\n # ldap3 returns an empty list as the value of this attribute\n if \"primaryGroupID\" in result and result[\"primaryGroupID\"] != []:\n pid = result[\"primaryGroupID\"]\n results = list(self.engine.query(self.engine.PRIMARY_GROUP_ID(pid)))\n if results:\n print(results[0][\"dn\"])", "def get_all_groups(self):\n return self.groups + ['all']", "def getGroupMembers(group_id):\r\n return Group.getGroupMembers(group_id)", "def list_group(self, groupname):\n return self.get_admin(\"groups/{}\".format(groupname))", "def customer_group_get_all():\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n SELECT \n `group_id`, \n `group_name`, \n `description`, \n `timestamp`, \n `created_by`, \n `creation_time`, \n `is_deleted`, \n `updated_by`, \n `role_id`, \n `is_default`, \n `is_customer` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n user_group_details = None\n cursor = db.cursor()\n if cursor.execute(query) != 0:\n user_group_details = cursor.fetchall()\n cursor.close()\n db.close()\n return user_group_details", "def list_group(group):\n\n members = group_members(group)\n ret = {}\n if members:\n for member in members:\n info = get(member)\n if info:\n ret[uid2dn(member)] = info\n return ret", "def get_nested_groups(self, conn, group: str) -> typing.List[str]:\n nested_groups = list()\n conn.search(\n search_base=self.group_search_base,\n search_filter=self.group_search_filter.format(group=group),\n search_scope=ldap3.SUBTREE)\n if conn.response:\n for nested_group in conn.response:\n if 'dn' in nested_group:\n nested_groups.extend([nested_group['dn']])\n groups = self.get_nested_groups(conn, nested_group['dn'])\n nested_groups.extend(groups)\n nested_groups = list(set(nested_groups))\n return nested_groups", "def get_groups(self):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" ORDER BY $groupname_field$\",{'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: get_groups: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_groupname_field]", "def get_all_as_groups(as_connection):\n as_groups_list = []\n get_as_groups = as_connection.get_all_groups()\n as_groups_list.extend(get_as_groups)\n\n token = get_as_groups.next_token\n while token is not None:\n get_as_groups = as_connection.get_all_groups(\n next_token=token)\n as_groups_list.extend(get_as_groups)\n token = get_as_groups.next_token\n print \"Processed {0} AutoScaling Group\"\\\n .format(len(as_groups_list))\n return as_groups_list", "def get_group_names(self):\r\n return self.groups.keys()" ]
[ "0.73521173", "0.6695194", "0.63296837", "0.6296121", "0.60746205", "0.58177465", "0.57922953", "0.5769778", "0.5761587", "0.5732374", "0.57210445", "0.5718868", "0.571857", "0.5706689", "0.5658657", "0.5629515", "0.5623619", "0.55995756", "0.557363", "0.55527174", "0.5529558", "0.551751", "0.5512683", "0.55062324", "0.54940253", "0.5489056", "0.54705024", "0.5468669", "0.54660785", "0.5461239" ]
0.72566795
1
Returns the fields of all ad groups from the given ad campaign.
def get_adgroups_by_adcampaign(self, campaign_id, fields=None, status_fields=None, batch=False): path = '%s/adgroups' % campaign_id args = {'fields': fields} if fields else {} if status_fields: args['adgroup_status'] = status_fields return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_adcampaign_groups(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaign_groups' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaigns_of_campaign_group(self, campaign_group_id, fields,\n batch=False):\n path = '%s/adcampaigns' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaign_group(self, campaign_group_id, fields, batch=False):\n path = '%s' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adgroups_by_adaccount(self, account_id, fields=None,\n status_fields=None, batch=False):\n path = 'act_%s/adgroups' % account_id\n args = {'fields': fields} if fields else {}\n if status_fields:\n args['adgroup_status'] = status_fields\n return self.make_request(path, 'GET', args, batch=batch)", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def getGroupData(service, groupName, attList):\n # import IPython ; IPython.embed() ; exit(); \n groupsDataList = service.contactGroups().list().execute()[\"contactGroups\"]\n for group in groupsDataList:\n if group[\"name\"] == groupName:\n groupData = []\n for att in attList:\n groupData.append(group[att])\n return groupData", "def parse_groups(self):\n\n data = []\n ads_by_data = []\n for date_time, group in self.groups:\n for ad in group:\n ads_by_data.append({\"ad\": ad})\n date_key = self.date_to_string(date_time)\n data.append({date_key: ads_by_data})\n ads_by_data = []\n\n return data", "def get_stats_by_adcampaign_group(\n self, campaign_group_id, fields=None, filters=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if fields:\n args['fields'] = json.dumps(fields)\n if filters:\n args['filters'] = json.dumps(filters)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = '%s/stats' % campaign_group_id\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaign(self, campaign_id, fields, batch=False):\n path = '%s' % campaign_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_all_groups(self, account_name=None, account_id=None, path=None, group_name=None,\n group_id=None, search=False ):\n grouplist=[]\n accounts = self.get_all_accounts(account_id=account_id, account_name=account_name,\n search=search)\n for account in accounts:\n groups = self.get_groups_from_account(path=path,\n group_name=group_name,\n group_id=group_id,\n delegate_account=account['account_name'],\n search=search)\n for group in groups:\n group['account_name']=account['account_name']\n group['account_id']=account['account_id']\n grouplist.append(group)\n return grouplist", "def get_adgroup(self, adgroup_id, fields=None, batch=False):\n path = '%s' % adgroup_id\n args = {'fields': fields} if fields else {}\n return self.make_request(path, 'GET', args, batch=batch)", "def groups(self):\n return self.get_data(\"groups\")", "def getGroups():\r\n return Group.getGroups()", "def getGroups(self):\n return [g[0] for g in grp.getgrall()]", "def get_groups(id_project):\n data = sql.list_groups(id_project)\n names = [(d['id'], d['name']) for d in data]\n return names", "def get_adcreatives_by_adgroup(self, adgroup_id, fields, batch=False):\n path = '{0}/adcreatives'.format(adgroup_id)\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaigns(self, account_id, fields=None, batch=False):\n return self.get_adcampaigns_of_account(account_id, fields, batch=batch)", "def get_all_camapaign_stats_data(campaign_id):\n all_campaign_stats_data = []\n all_campaign_stats = Contribution.query.filter_by(campaign_id=campaign_id).all()\n for campaign_stat in all_campaign_stats:\n campaign_stat_data = {}\n campaign_stat_data['username'] = campaign_stat.username\n campaign_stat_data['file'] = campaign_stat.file\n campaign_stat_data['edit_type'] = campaign_stat.edit_type\n campaign_stat_data['edit_action'] = campaign_stat.edit_action\n campaign_stat_data['country'] = campaign_stat.country\n campaign_stat_data['depict_item'] = campaign_stat.depict_item\n campaign_stat_data['depict_prominent'] = campaign_stat.depict_prominent\n campaign_stat_data['caption_text'] = campaign_stat.caption_text\n campaign_stat_data['caption_language'] = campaign_stat.caption_language\n campaign_stat_data['date'] = campaign_stat.date\n all_campaign_stats_data.append(campaign_stat_data)\n return all_campaign_stats_data", "def GetGroupList(setting):\n groups = set()\n\n for name in setting:\n dev = setting[name]\n format_, group = GetFieldDef(dev, fields=\"format_, group\")\n if group is not None and len(group) > 0:\n groups.add(group.title())\n if isinstance(format_, dict):\n subgroups = GetGroupList(format_)\n if subgroups is not None and len(subgroups) > 0:\n for group in subgroups:\n groups.add(group.title())\n\n groups=list(groups)\n groups.sort()\n return groups", "def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]", "def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()", "def get_group_list(self):\n return [(item[0], item[1][0]) for item in self.contacts_by_group_list]", "def getAGroupInfo(group_id):\r\n return Group.getAGroupInfo(group_id)", "def getAdGroupIds(self):\n query = \"\"\"\n select adgroups.id as adgroup_id from adgroups \n join campaigns on campaigns.id = adgroups.campaign_id\n where adgroups.account_id = '%s'\n and campaigns.status = 'enabled'\n and adgroups.status = 'enabled'\n \n \"\"\" % (self.account_id)\n\n df = pd.read_sql(query, Database().createEngine())\n ids = list(df.adgroup_id.values)\n return ids", "def get_all_groups(self):\n return self.groups + ['all']", "def getGroupMembers(group_id):\r\n return Group.getGroupMembers(group_id)", "def get_groups(self, customer_id='my_customer'):\n try:\n paged_results = self.repository.groups.list(customer=customer_id)\n flattened_results = api_helpers.flatten_list_results(\n paged_results, 'groups')\n LOGGER.debug('Getting all the groups for customer_id = %s,'\n ' flattened_results = %s',\n customer_id, flattened_results)\n return flattened_results\n except RefreshError as e:\n # Authentication failed, log before raise.\n LOGGER.exception(GSUITE_AUTH_FAILURE_MESSAGE)\n raise e\n except (errors.HttpError, HttpLib2Error) as e:\n raise api_errors.ApiExecutionError('groups', e)", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def get_groups(self):\n response = self._get(\"groups\")\n\n return response.json()", "def get_campaign_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get campaign data for account {}'.format(ad_account['account_id']))\n campaigns = ad_account.get_campaigns(\n fields=['id',\n 'name',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for campaign in campaigns:\n result[campaign['id']] = {'name': campaign['name'],\n 'attributes': parse_labels(\n campaign.get('adlabels', []))}\n return result" ]
[ "0.7530305", "0.65122443", "0.6437828", "0.6331688", "0.6045154", "0.5912933", "0.5782121", "0.56986874", "0.5694998", "0.56798816", "0.56371003", "0.56278396", "0.5623327", "0.56128645", "0.5500975", "0.54885703", "0.54868734", "0.54770875", "0.54585457", "0.5445233", "0.54437536", "0.5442808", "0.5431799", "0.5404907", "0.5390356", "0.5377168", "0.5351628", "0.5350834", "0.5315412", "0.53074056" ]
0.73259014
1
Returns the fields for the given ad creative.
def get_adcreative(self, creative_id, fields, batch=False): path = '%s' % creative_id args = {'fields': fields} return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fields_for_cr(cr_id):\n # Construct request\n url = \"{}/reports/{}/patient_fields\"\n url = url.format(FABRIC_API_URL, cr_id)\n\n sys.stdout.flush()\n result = requests.get(url, auth=auth)\n return result.json()", "def meta_fields(item):\n return scom.meta_fields(item)", "def deal_fields(self):\r\n return deals.DealFields(self)", "def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]", "def _get_fields(self):\n return self._fields", "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def get_fields(self, dm_name):\n dm = self.get_dm(dm_name)\n return dm['mdmFields']", "def get_fields(self):\r\n return self.fields", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def all_fields(item):\n return scom.all_fields(item)", "def get_adcreatives(self, account_id, fields, batch=False):\n path = 'act_%s/adcreatives' % account_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def get_fields(self, table_name):\n return self.get_table_meta(table_name)['fields']", "def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]", "def list_meta_fields():\n ret = {}\n status, result = _query(action=\"meta\", command=\"fields\")\n root = ET.fromstring(result)\n for field in root:\n field_id = None\n field_ret = {\"name\": field.text}\n for item in field.items():\n field_ret[item[0]] = item[1]\n if item[0] == \"id\":\n field_id = item[1]\n ret[field_id] = field_ret\n return ret", "def get_fields(self):\n\n\t\treturn self.__fields", "def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in self._cursor.description:\n fieldname = des[0]\n results[column] = fieldname\n column = column + 1\n\n return results", "def get_fields(self):\n \n fields = []\n for img in self.img_lst:\n fields += img.get_fields()\n \n fields = list(set(fields))\n \n return fields", "def get_fields(self):\n return list(self.metadata.keys())", "def _get_fields(self, album, extra):\n # Start with the configured base fields.\n if album:\n fields = self.config['albumfields'].as_str_seq()\n else:\n fields = self.config['itemfields'].as_str_seq()\n\n # Add the requested extra fields.\n if extra:\n fields += extra\n\n # Ensure we always have the `id` field for identification.\n fields.append('id')\n\n return set(fields)", "def get_fields(ds):\n\n # Get layer\n layer = ds.GetLayer(0)\n # feature.GetFieldCount()\n layer_defn = layer.GetLayerDefn()\n field_names = [layer_defn.GetFieldDefn(i).GetName() for i in range(layer_defn.GetFieldCount())]\n\n return field_names", "def get_fields(self):\n \n return self.metadata.keys()", "def listFields(self):\n return self.get_json('/field')", "def get_fields(dgid, metadata=None, computed_columns=None):\n # NOTE: metadata does not contain computed_columns yet\n if metadata is None:\n conn = get_database_connection(dgid)\n metadata = get_metadata(conn)\n\n # Used to evaluate computed columns\n unify_computed_columns(computed_columns)\n columns = list(metadata.keys())\n select_expr_as = [get_field_name(column, metadata) for column in columns]\n databases = [\"datagrid\"]\n\n if computed_columns:\n # Only passed in when calling from endpoint\n update_state(computed_columns, metadata, databases, columns, select_expr_as)\n # Now metadata has computed columns\n\n fields = {}\n for column in metadata:\n datatype = metadata[column][\"type\"]\n field_name = get_field_name(column, metadata)\n qbtype = datatype_to_qbtype(datatype)\n if qbtype is None:\n continue\n\n if datatype in [\"FLOAT\", \"INTEGER\", \"ROW_ID\"]:\n fields[field_name] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n # name, datatype, min, max, avg, variance, total, stddev, other\n if (metadata[column][\"minimum\"] is not None) and (\n metadata[column][\"minimum\"] is not None\n ):\n min_value = metadata[column][\"minimum\"]\n max_value = metadata[column][\"maximum\"]\n fields[field_name][\"fieldSettings\"] = {\n \"min\": min_value,\n \"max\": max_value,\n }\n fields[field_name][\"valueSources\"] = [\"value\", \"field\"]\n\n elif datatype == \"DATETIME\":\n field_exp = \"datetime(%s, 'unixepoch')\" % field_name\n fields[field_exp] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n if (metadata[column][\"minimum\"] is not None) and (\n metadata[column][\"minimum\"] is not None\n ):\n min_value = metadata[column][\"minimum\"]\n max_value = metadata[column][\"maximum\"]\n fields[field_exp][\"fieldSettings\"] = {\n \"min\": min_value,\n \"max\": max_value,\n # \"dateFormat\": \"DD-MM-YYYY\",\n # \"timeFormat\":\n # \"valueFormat\":\n }\n fields[field_exp][\"valueSources\"] = [\n \"value\",\n \"field\",\n \"func\",\n ] # adds Now, and Relative\n\n elif datatype == \"BOOLEAN\":\n fields[field_name] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n fields[field_name][\"fieldSettings\"] = {\n \"labelYes\": \"True\",\n \"labelNo\": \"False\",\n }\n fields[field_name][\"valueSources\"] = [\"value\", \"field\"]\n\n elif datatype == \"TEXT\":\n fields[field_name] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n fields[field_name][\"valueSources\"] = [\"value\", \"field\"]\n\n elif datatype == \"JSON\":\n # Asset metadata columns are named\n # 'COLUMN_NAME.metadata' or 'COLUMN_NAME--metadata'\n fields[field_name] = {\n \"label\": column.replace(\".metadata\", \"\").replace(\"--metadata\", \"\"),\n \"field\": field_name,\n \"tooltip\": \"The '%s' column (type 'JSON') from the data grid\"\n % (column,),\n \"type\": \"!struct\",\n \"subfields\": {},\n }\n subfields = ast.literal_eval(metadata[column][\"other\"])\n # Only filterable keys are in subfields\n for key in subfields:\n # Query Builder filter types: \"text\", \"number\", \"boolean\", or \"list-of-text\"\n qbtype = subfields[key][\"type\"]\n if qbtype == \"list-of-text\":\n field_exp = \"json_extract(%s, '$.%s')\" % (field_name, key)\n fields[field_name][\"subfields\"][field_exp] = {\n \"type\": \"text\",\n \"label\": key,\n \"field\": field_name,\n \"tableName\": \"1\", # special signal for JSON queries in our QueryBuilder\n \"operators\": [\"like\"],\n }\n else:\n field_exp = \"json_extract(%s, '$.%s')\" % (field_name, key)\n fields[field_name][\"subfields\"][field_exp] = {\n \"type\": qbtype,\n \"label\": key,\n \"field\": field_name,\n \"tableName\": \"1\", # special signal for JSON queries in our QueryBuilder\n }\n if \"values\" in subfields[key]:\n fields[field_name][\"subfields\"][field_exp][\"type\"] = \"select\"\n fields[field_name][\"subfields\"][field_exp][\"fieldSettings\"] = {\n \"listValues\": sorted(subfields[key][\"values\"])\n }\n\n return fields", "def get_fields(self, path):\n with self.inspector(path) as opened_file:\n return opened_file.describe_fields()", "def get_fields(self, request, obj=None):\n if obj and obj.cwr:\n return (\n 'nwr_rev', 'description', 'works', 'filename', 'view_link',\n 'download_link')\n else:\n return ('nwr_rev', 'description', 'works')", "def fields(self) -> List[str]:\n return FRAME_FIELDS[self.header.format] + CALCULATED_FIELDS", "def extract_fields(self, json_dict):\n raise NotImplementedError()", "def extract(self):\n self.field_list = []\n \n try:\n self.mfields = self.getModel()._meta.fields\n if(self.mfields):\n try:\n for model_fields in self.mfields:\n if(model_fields.name == \"id\"):\n pass \n \n elif(model_fields.name == \"pci\"):\n pass \n elif(model_fields.name == \"sci\"):\n pass \n elif(model_fields.name == \"validated\"):\n pass \n else:\n self.field_list.append(model_fields.name)\n return self.field_list\n except:\n raise \n else:\n return None \n except:\n raise" ]
[ "0.6293726", "0.58557326", "0.5828929", "0.57436365", "0.5670117", "0.56427336", "0.56157863", "0.55968785", "0.55868536", "0.5586053", "0.5585029", "0.5538533", "0.5538533", "0.55381703", "0.55337745", "0.5524702", "0.5506261", "0.54799724", "0.54741216", "0.54603386", "0.5459506", "0.54592705", "0.5416563", "0.541382", "0.5380885", "0.53587884", "0.53463703", "0.5317924", "0.5306788", "0.5285659" ]
0.7481938
0
Returns the ad images for the given ad account.
def get_adimages(self, account_id, hashes=None, batch=False): path = 'act_%s/adimages' % account_id args = {} if hashes is not None: args = {'hashes': hashes} return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_image_list(self, account):\n images = self.driver(account).list_images()\n return [image.name for image in images]", "def cmd_account_images(client, args):\n account_images = client.get_account_images(args.username, args.page)\n data = [item.__dict__ for item in account_images]\n generate_output({'account_images': data}, args.output_file)", "def list_amis(self):\n images = self._driver.list_images(ex_owner=self.account_id)\n return images", "def cmd_account_image_ids(client, args):\n account_image_ids = client.get_account_image_ids(args.username, args.page)\n generate_output({'account_image_ids': account_image_ids})", "def get_ads_pixels(self, account_id, fields=None, batch=False):\n path = 'act_%s/adspixels' % account_id\n args = {'fields': fields} if fields else {}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_images(self, limit=None):\n url = (\"https://api.imgur.com/3/account/{0}/\"\n \"images/{1}\".format(self.name, '{}'))\n resp = self._imgur._send_request(url, limit=limit)\n return [Image(img, self._imgur) for img in resp]", "def get_all_images_for_user(username):\n\n user = get_user_by_username(username)\n return user.images", "def get_images(self):\n # test\n for it in self.xml.iterfind('image'):\n print(it)\n\n elements = []\n els = self.xml.findall('image')\n for el in els:\n elements.push(el.find('src')[0])\n els = self.xml.findall('full_picture')\n elements = elements + els\n self.__download_(elements)", "def get_apartment_images(self, soup, apartment_dict):\n\n image_urls = []\n images_container = soup.find('div', class_='photos')\n images_container = images_container.find('div')\n\n # Iterate over images in gallery\n for image_container in images_container.find_all('div'):\n anchor_tag = image_container.find('a')\n if anchor_tag:\n image_urls.append(self.base_url + anchor_tag['href'])\n apartment_dict['image_urls'] = image_urls", "def images(self):\n return self.gameimage_set.all()", "def get_images(self, page_number):", "def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-images option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_images()[\"items\"]:\n image = {\"id\": item[\"id\"]}\n image.update(item[\"properties\"])\n ret[image[\"name\"]] = image\n\n return ret", "def _get_images(self, fuzzable_request):\n res = []\n\n try:\n response = self._uri_opener.GET(fuzzable_request.get_uri(),\n cache=False)\n except:\n om.out.debug('Failed to retrieve the page for finding captchas.')\n else:\n # Do not use parser_cache here, it's not good since CAPTCHA implementations\n # *might* change the image name for each request of the HTML\n #dp = parser_cache.dpc.get_document_parser_for( response )\n try:\n document_parser = DocumentParser.DocumentParser(response)\n except BaseFrameworkException:\n return []\n \n image_path_list = document_parser.get_references_of_tag('img')\n\n GET = self._uri_opener.GET\n sha1 = hashlib.sha1\n \n result_iter = self.worker_pool.imap_unordered(GET, image_path_list)\n \n for image_response in result_iter:\n if image_response.is_image():\n img_src = image_response.get_uri()\n img_hash = sha1(image_response.get_body()).hexdigest()\n res.append((img_src, img_hash, response))\n\n return res", "def list(self):\n r = self.target.ttbd_iface_call(\"images\", \"list\", method = \"GET\")\n return r['result']", "def get_images(self):\n \n return self.img_lst", "def getimgs():", "def list_images():\n return json_response(list_manifests())", "def get_image_links(queries):\n images = []\n\n for query in queries:\n url = 'http://www.bing.com/images/search?q=' + urllib.quote_plus(query) + '&FORM=HDRSC2'\n soup = BeautifulSoup(requests.get(url).text, 'lxml')\n links = [a['src'] for a in soup.find_all('img', {'src': re.compile('mm.bing.net')})]\n images.extend(links)\n time.sleep(5) # wait 5 seconds before next scrape\n\n return images", "def get_images(self, ctx, page):\n is_imgur = 'source' in page.meta and page.meta['source'] == 'imgur'\n if 'type' in page.meta and page.meta['type'] == 'album':\n album = page.meta\n images = []\n if is_imgur:\n pp.pprint(page.meta)\n # bind to template via json\n images = self.get_imgur_album_images(page)\n self.albums[album['slug']] = images\n else:\n # get paths of all of the images in the album\n srcs = []\n # get absolute paths of images in album for each file type\n for file_type in FILE_TYPES:\n imgs = glob.glob(\n GALLERY_DIR + album['slug'] + '/*.' + file_type\n )\n\n for img in imgs:\n img_rel_path = (\n REL_GALLERY_DIR +\n album['slug'] + '/' + img.split('/')[-1]\n )\n srcs.append(img_rel_path)\n\n # split full srcs and thumb srcs from srcs into two lists\n images = []\n thumb_srcs = filter(\n lambda src: src.split('/')[-1].startswith(THUMB_PREFIX),\n srcs\n )\n for thumb_src in thumb_srcs:\n src = thumb_src.replace(THUMB_PREFIX, '')\n thumb_width, thumb_height = self.calc_img_hw(thumb_src)\n width, height = self.calc_img_hw(src)\n images.append({\n 'thumb_src': thumb_src,\n 'thumb_width': thumb_width,\n 'thumb_height': thumb_height,\n\n 'src': src,\n 'width': width,\n 'height': height,\n })\n self.albums[album['slug']] = images", "def images(self, details=True, **query):\n img = _image.ImageDetail if details else _image.Image\n return list(self._list(img, paginated=True, **query))", "def images(self, **kwargs):\n return self.get_list(self.cloudman.compute.images(),\n kind=\"image\")", "def get_images(self):\n \n images = []\n for order in self.order_lst:\n o_items = order.get_items()\n images.append(o_items.get_image())\n \n return images", "async def fetch_all_images(sess: Session = Depends(get_db)):\n image_list = utils_com.get_com_image_list(sess)\n return image_list", "def get_images():\n images = {}\n for k, v in DB.IMAGES.iteritems():\n images[k] = v.__dict__\n return images", "def images(self):\n return self._data[\"images\"]", "def get_all_images(access_token):\n url = 'http://interview.agileengine.com/images'\n headers = {\n 'Authorization': 'Bearer ' + access_token\n }\n images = []\n try:\n logging.info(\"Fetching all the images\")\n response = requests.get(\n url,\n headers=headers\n )\n if response.ok: \n total_pages = response.json().get('pageCount')\n images = response.json().get('pictures')\n logging.info(f\"fetched 1 of {total_pages}\")\n for i in range(2,total_pages + 1):\n paginated_url = f'http://interview.agileengine.com/images?page={i}'\n response = requests.get(\n paginated_url,\n headers=headers\n )\n images += response.json().get('pictures')\n logging.info(f\"fetched {i} of {total_pages}\")\n \n detailed_images = []\n for image in images:\n detail_url = f\"http://interview.agileengine.com/images/{image.get('id')}\"\n \n logging.info(f\"Retrieving detail of {image['id']}\")\n response = requests.get(\n detail_url,\n headers=headers\n )\n if response.ok:\n detailed_images.append(response.json())\n return detailed_images\n except requests.exceptions.HTTPError:\n logging.exception('HTTP error')\n except requests.exceptions.ConnectionError:\n logging.exception('Connection error')\n except requests.exceptions.Timeout:\n logging.exception('Timeout error')\n except requests.exceptions.RequestException as e:\n logging.exception('Unexpected error')", "def get_images(self):\n if not hasattr(self, '_BasePublication__images_cache'):\n self.__images_cache = self.images.all()\n return self.__images_cache", "def get_images(directory=None): #import from mask.py\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def get_images():\n return _IMAGES", "def get_images(self):\n return self._get_brains(\"Image\")" ]
[ "0.7314663", "0.71794915", "0.65721077", "0.64426255", "0.6411373", "0.6164322", "0.602651", "0.5982048", "0.59179264", "0.5905286", "0.58921134", "0.5881818", "0.58739316", "0.5863717", "0.58315206", "0.58247125", "0.5811616", "0.5795393", "0.5752705", "0.573772", "0.5731795", "0.5714331", "0.57123935", "0.5683858", "0.56809837", "0.56632143", "0.56611115", "0.56569993", "0.5634543", "0.56136835" ]
0.7258551
1
Returns the stats for a Facebook campaign group.
def get_stats_by_adcampaign_group( self, campaign_group_id, fields=None, filters=None, batch=False, start_time=None, end_time=None): args = {} if fields: args['fields'] = json.dumps(fields) if filters: args['filters'] = json.dumps(filters) if start_time: args['start_time'] = self.__parse_time(start_time) if end_time: args['end_time'] = self.__parse_time(end_time) path = '%s/stats' % campaign_group_id return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getGrpStats(group):\n return {'min': group.min(), 'max': group.max(),\n 'count': group.count(), 'mean': group.mean(), 'sum':group.sum()}", "def get_campaign_stats(self,campaign_id):\n campaign = Campaign(campaign_id)\n fields = ['account_name',\n 'campaign_name',\n 'clicks',\n 'cpc',\n 'reach',\n 'ctr',\n 'frequency',\n 'impressions',\n 'cpm',\n 'relevance_score']\n #Need to create a new list for data\n data = []\n \n #Grab lifetime campaign stats to find start and end date of campaign and convert to datetime formate\n insights = campaign.get_insights(fields=fields, params={'date_preset':'lifetime'})\n date_begin = datetime.datetime.strptime(insights[0]['date_start'], \"%Y-%m-%d\")\n date_end = datetime.datetime.strptime(insights[0]['date_stop'], \"%Y-%m-%d\")\n date_diff = datetime.timedelta(days=25)\n new_date = date_begin + date_diff\n\n #Pass in these values to the api\n api_date_first = str(date_begin).split()[0]\n api_date_last = str(new_date).split()[0]\n\n #Strange API limitation where you can only grab 25 values at a time. \n while date_begin < date_end:\n insights = campaign.get_insights(fields=fields, params={ 'time_range':{'since':api_date_first, 'until':api_date_last}, 'time_increment':1})\n insights = list(insights)\n date_begin = new_date \n new_date = date_begin + date_diff\n api_date_first = api_date_last\n api_date_last = str(new_date).split()[0]\n data += insights\n\n return data", "def ListGroupStats(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_stats_by_adgroup(\n self, account_id, adgroup_ids=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if adgroup_ids is not None:\n args['adgroup_ids'] = json.dumps(adgroup_ids)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = 'act_%s/adgroupstats' % account_id\n return self.make_request(path, 'GET', args, batch=batch)", "def _compute_group_stats():\n group_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # TODO: move this to property of evaluation group or add dedicated data model.\n # GOAL: should be configurable from within the Django admin backend.\n #\n # MINIMAL: move to local_settings.py?\n #\n # The following dictionary defines the number of HITs each group should\n # have completed during the WMT16 evaluation campaign.\n \n for group in groups:\n _name = group.name\n \n _group_stats = HIT.compute_status_for_group(group)\n _total = _group_stats[0]\n \n if _total > 0 and not _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = 0\n elif _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = GROUP_HIT_REQUIREMENTS[_name]\n _delta = _total - _required\n _data = (_total, _required, _delta)\n \n if _data[0] > 0:\n group_stats.append((_name, _data))\n \n # Sort by number of remaining HITs.\n group_stats.sort(key=lambda x: x[1][2])\n \n # Add totals at the bottom.\n global_total = sum([x[1][0] for x in group_stats])\n global_required = sum([x[1][1] for x in group_stats])\n global_delta = global_total - global_required\n global_data = (global_total, global_required, global_delta)\n group_stats.append((\"Totals\", global_data))\n \n return group_stats", "def get_all_camapaign_stats_data(campaign_id):\n all_campaign_stats_data = []\n all_campaign_stats = Contribution.query.filter_by(campaign_id=campaign_id).all()\n for campaign_stat in all_campaign_stats:\n campaign_stat_data = {}\n campaign_stat_data['username'] = campaign_stat.username\n campaign_stat_data['file'] = campaign_stat.file\n campaign_stat_data['edit_type'] = campaign_stat.edit_type\n campaign_stat_data['edit_action'] = campaign_stat.edit_action\n campaign_stat_data['country'] = campaign_stat.country\n campaign_stat_data['depict_item'] = campaign_stat.depict_item\n campaign_stat_data['depict_prominent'] = campaign_stat.depict_prominent\n campaign_stat_data['caption_text'] = campaign_stat.caption_text\n campaign_stat_data['caption_language'] = campaign_stat.caption_language\n campaign_stat_data['date'] = campaign_stat.date\n all_campaign_stats_data.append(campaign_stat_data)\n return all_campaign_stats_data", "def getGroupInfo(groupId):\n url = f\"https://groups.roblox.com/v1/groups/{groupId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "def get_adcampaign_group(self, campaign_group_id, fields, batch=False):\n path = '%s' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_conversion_stats(self, adgroup_id, batch=False):\n path = '%s/conversions' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)", "def get_adcampaigns_of_campaign_group(self, campaign_group_id, fields,\n batch=False):\n path = '%s/adcampaigns' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def stats(self) -> Sequence['outputs.GetSystemGroupsGroupStatResult']:\n return pulumi.get(self, \"stats\")", "def get_groups(self):\n url = \"https://m.facebook.com/groups/?seemore\"\n groups = dict()\n self.get(url)\n br = self.find_elements_by_class_name(\"br\")\n for b in br:\n try:\n notis = int(b.text[-2:])\n group_name = b.text[:-2]\n except ValueError:\n group_name = b.text\n notis = 0\n try:\n link = b.find_element_by_tag_name(\"a\").get_attribute('href')\n groups[group_name] = (mfacebookToBasic(link), notis)\n except Exception as e:\n log.error(\"Can't get group link\")\n return groups", "def get_adcampaign_groups(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaign_groups' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def get_conversion_stats_by_adgroup(self, account_id, adgroup_ids=None,\n include_deleted=False, batch=False):\n path = 'act_%s/adgroupconversions' % account_id\n args = {}\n if adgroup_ids is not None:\n args['adgroup_ids'] = json.dumps(adgroup_ids)\n if include_deleted is not None:\n args['include_deleted'] = include_deleted\n return self.make_request(path, 'GET', args, batch=batch)", "def get_all_teams(group):\n base_url = 'http://worldcup.kimonolabs.com/api/teams'\n url = (base_url + '?apikey={key}&group={group}&sort={sort}'\n .format(group=group,\n key='KERbxAUfDYovbQnn9pR3pbLWEMRp47AQ',\n sort='groupRank'))\n r = requests.get(url)\n return r.json()", "def get_clan_aggregate_stats_get(self, groupId, modes):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Stats/AggregateClanStats/{groupId}/\"))", "def get_batch_stats(self, batch):\n\t\t\n\t\treturn self.batch_stats[batch]", "def DescribeGroupAndStatisticsProxy(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeGroupAndStatisticsProxy\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeGroupAndStatisticsProxyResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def group1_stats(self):\n return self._group1_stats", "def getGroupData(service, groupName, attList):\n # import IPython ; IPython.embed() ; exit(); \n groupsDataList = service.contactGroups().list().execute()[\"contactGroups\"]\n for group in groupsDataList:\n if group[\"name\"] == groupName:\n groupData = []\n for att in attList:\n groupData.append(group[att])\n return groupData", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def DescribeProxyGroupStatistics(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeProxyGroupStatistics\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeProxyGroupStatisticsResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def get_keyword_stats(self, adgroup_id, batch=False):\n path = '%s/keywordstats' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)", "def list_group_stats(\n self,\n project_name,\n time_range,\n group_id=None,\n service_filter=None,\n timed_count_duration=None,\n alignment=None,\n alignment_time=None,\n order=None,\n page_size=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n # Wrap the transport method to add retry and timeout logic.\n if \"list_group_stats\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"list_group_stats\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.list_group_stats,\n default_retry=self._method_configs[\"ListGroupStats\"].retry,\n default_timeout=self._method_configs[\"ListGroupStats\"].timeout,\n client_info=self._client_info,\n )\n\n request = error_stats_service_pb2.ListGroupStatsRequest(\n project_name=project_name,\n time_range=time_range,\n group_id=group_id,\n service_filter=service_filter,\n timed_count_duration=timed_count_duration,\n alignment=alignment,\n alignment_time=alignment_time,\n order=order,\n page_size=page_size,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"project_name\", project_name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n iterator = google.api_core.page_iterator.GRPCIterator(\n client=None,\n method=functools.partial(\n self._inner_api_calls[\"list_group_stats\"],\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n ),\n request=request,\n items_field=\"error_group_stats\",\n request_token_field=\"page_token\",\n response_token_field=\"next_page_token\",\n )\n return iterator", "def customer_group_get(group_id=None):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n\n if group_id:\n query += \"\"\"\n AND `group_id` = \\\"%s\\\"\n \"\"\" % (group_id)\n\n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def test_get_scaling_group_info(self):\n def view_manifest(with_policies, with_webhooks, get_deleting):\n self.assertEqual(with_policies, False)\n self.assertEqual(with_webhooks, False)\n self.assertEqual(get_deleting, True)\n return succeed(manifest)\n\n manifest = {}\n self.group.view_manifest.side_effect = view_manifest\n info = self.perform_with_group(\n Effect(GetScalingGroupInfo(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(info, (self.group, manifest))", "def get_stats_by_adcampaign(self, account_id, campaign_ids=None,\n batch=False, start_time=None, end_time=None):\n args = {}\n if campaign_ids is not None:\n args['campaign_ids'] = json.dumps(campaign_ids)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = 'act_%s/adcampaignstats' % account_id\n return self.make_request(path, 'GET', args, batch=batch)", "def get_email_statistic_by_campaigns(self, email):\n logger.info(\"Function call: get_email_statistic_by_campaigns for '{}'\".format(email, ))\n return self.__handle_error('Empty email') if not email else self.__handle_result(self.__send_request('emails/{}/campaigns'.format(email, )))", "def get_table_stats(campaign_id, username):\n # participantids for this campaign\n campaign_user_names = []\n # We are querrying all the users who participate in the campaign\n contribs_for_campaign = Contribution.query.filter_by(campaign_id=campaign_id).all()\n for campaign_contribution in contribs_for_campaign:\n campaign_user_names.append(campaign_contribution.username)\n # we get the unique ids so as not to count an id twice\n campaign_user_names_set = set(campaign_user_names)\n # We then re-initialize the ids array\n campaign_user_names = []\n # We now obtain the ranking for all the users in the system and their files improved\n all_camapaign_users_list = []\n # We iterate the individual participants id in a campaign and get the user info\n for user_name in campaign_user_names_set:\n user = User.query.filter_by(username=user_name).first()\n all_camapaign_users_list.append(user)\n \n # We get the users and their contribution data\n all_contributors_data = get_all_users_contribution_data_per_campaign(all_camapaign_users_list, campaign_id)\n current_user_rank = get_user_ranking(all_contributors_data, username)\n\n # We add rank to all contributor's data\n for user_data in all_contributors_data:\n user_data['rank'] = get_user_ranking(all_contributors_data, user_data['username'])\n\n # We get all the campaign coountry sorted data\n all_campaign_country_statistics_data = get_campaign_country_data(campaign_id)\n\n campaign_table_stats = {}\n campaign_table_stats['all_contributors_data'] = all_contributors_data\n campaign_table_stats['all_campaign_country_statistics_data'] = all_campaign_country_statistics_data\n campaign_table_stats['current_user_rank'] = current_user_rank\n campaign_table_stats['campaign_editors'] = len(campaign_user_names_set)\n return campaign_table_stats", "def get_feed_group_data(\n self,\n feed: str,\n group: str,\n since: Optional[datetime.datetime] = None,\n next_token: str = None,\n ) -> GroupData:\n try:\n listing_json, record = self._get_feed_group_data()\n if record.content_type != \"application/x-tar\":\n raise UnexpectedMIMEType(record.content_type)\n return GroupData(\n data=record.content,\n next_token=None,\n since=since,\n record_count=1,\n response_metadata={\n \"checksum\": listing_json.get(\"checksum\"),\n \"built\": listing_json.get(\"built\"),\n \"version\": listing_json.get(\"version\"),\n },\n )\n except (HTTPStatusException, json.JSONDecodeError, UnicodeDecodeError) as e:\n logger.debug(\"Error executing grype DB data download: %s\", e)\n raise e" ]
[ "0.6227779", "0.60927224", "0.60745287", "0.60659075", "0.5944774", "0.5805374", "0.575456", "0.5705138", "0.56683", "0.5656494", "0.5629444", "0.5595925", "0.552353", "0.546793", "0.54674387", "0.53961784", "0.53895855", "0.5380818", "0.53679377", "0.53511876", "0.53357524", "0.5308348", "0.5272481", "0.52490944", "0.5240966", "0.5211687", "0.5210435", "0.52081174", "0.520437", "0.51802284" ]
0.6892386
0
Returns the stats for a Facebook campaign by adgroup.
def get_stats_by_adgroup( self, account_id, adgroup_ids=None, batch=False, start_time=None, end_time=None): args = {} if adgroup_ids is not None: args['adgroup_ids'] = json.dumps(adgroup_ids) if start_time: args['start_time'] = self.__parse_time(start_time) if end_time: args['end_time'] = self.__parse_time(end_time) path = 'act_%s/adgroupstats' % account_id return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_stats_by_adcampaign_group(\n self, campaign_group_id, fields=None, filters=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if fields:\n args['fields'] = json.dumps(fields)\n if filters:\n args['filters'] = json.dumps(filters)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = '%s/stats' % campaign_group_id\n return self.make_request(path, 'GET', args, batch=batch)", "def get_campaign_stats(self,campaign_id):\n campaign = Campaign(campaign_id)\n fields = ['account_name',\n 'campaign_name',\n 'clicks',\n 'cpc',\n 'reach',\n 'ctr',\n 'frequency',\n 'impressions',\n 'cpm',\n 'relevance_score']\n #Need to create a new list for data\n data = []\n \n #Grab lifetime campaign stats to find start and end date of campaign and convert to datetime formate\n insights = campaign.get_insights(fields=fields, params={'date_preset':'lifetime'})\n date_begin = datetime.datetime.strptime(insights[0]['date_start'], \"%Y-%m-%d\")\n date_end = datetime.datetime.strptime(insights[0]['date_stop'], \"%Y-%m-%d\")\n date_diff = datetime.timedelta(days=25)\n new_date = date_begin + date_diff\n\n #Pass in these values to the api\n api_date_first = str(date_begin).split()[0]\n api_date_last = str(new_date).split()[0]\n\n #Strange API limitation where you can only grab 25 values at a time. \n while date_begin < date_end:\n insights = campaign.get_insights(fields=fields, params={ 'time_range':{'since':api_date_first, 'until':api_date_last}, 'time_increment':1})\n insights = list(insights)\n date_begin = new_date \n new_date = date_begin + date_diff\n api_date_first = api_date_last\n api_date_last = str(new_date).split()[0]\n data += insights\n\n return data", "def get_adcampaigns_of_campaign_group(self, campaign_group_id, fields,\n batch=False):\n path = '%s/adcampaigns' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaign_group(self, campaign_group_id, fields, batch=False):\n path = '%s' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_conversion_stats_by_adgroup(self, account_id, adgroup_ids=None,\n include_deleted=False, batch=False):\n path = 'act_%s/adgroupconversions' % account_id\n args = {}\n if adgroup_ids is not None:\n args['adgroup_ids'] = json.dumps(adgroup_ids)\n if include_deleted is not None:\n args['include_deleted'] = include_deleted\n return self.make_request(path, 'GET', args, batch=batch)", "def get_all_camapaign_stats_data(campaign_id):\n all_campaign_stats_data = []\n all_campaign_stats = Contribution.query.filter_by(campaign_id=campaign_id).all()\n for campaign_stat in all_campaign_stats:\n campaign_stat_data = {}\n campaign_stat_data['username'] = campaign_stat.username\n campaign_stat_data['file'] = campaign_stat.file\n campaign_stat_data['edit_type'] = campaign_stat.edit_type\n campaign_stat_data['edit_action'] = campaign_stat.edit_action\n campaign_stat_data['country'] = campaign_stat.country\n campaign_stat_data['depict_item'] = campaign_stat.depict_item\n campaign_stat_data['depict_prominent'] = campaign_stat.depict_prominent\n campaign_stat_data['caption_text'] = campaign_stat.caption_text\n campaign_stat_data['caption_language'] = campaign_stat.caption_language\n campaign_stat_data['date'] = campaign_stat.date\n all_campaign_stats_data.append(campaign_stat_data)\n return all_campaign_stats_data", "def get_keyword_stats(self, adgroup_id, batch=False):\n path = '%s/keywordstats' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)", "def get_conversion_stats(self, adgroup_id, batch=False):\n path = '%s/conversions' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)", "def getGrpStats(group):\n return {'min': group.min(), 'max': group.max(),\n 'count': group.count(), 'mean': group.mean(), 'sum':group.sum()}", "def get_stats_by_adcampaign(self, account_id, campaign_ids=None,\n batch=False, start_time=None, end_time=None):\n args = {}\n if campaign_ids is not None:\n args['campaign_ids'] = json.dumps(campaign_ids)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = 'act_%s/adcampaignstats' % account_id\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaign_groups(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaign_groups' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def get_campaign_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get campaign data for account {}'.format(ad_account['account_id']))\n campaigns = ad_account.get_campaigns(\n fields=['id',\n 'name',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for campaign in campaigns:\n result[campaign['id']] = {'name': campaign['name'],\n 'attributes': parse_labels(\n campaign.get('adlabels', []))}\n return result", "def get_adcampaign_detail(self, account_id, campaign_id, date_preset):\n campaign_fields = [\n 'name', 'campaign_status', 'daily_budget', 'lifetime_budget',\n 'start_time', 'end_time']\n campaign_data_columns = [\n 'campaign_name', 'reach', 'frequency', 'clicks',\n 'actions', 'total_actions', 'ctr', 'spend']\n adgroup_data_columns = [\n 'campaign_id', 'campaign_name', 'adgroup_id', 'adgroup_name',\n 'reach', 'frequency', 'clicks', 'ctr', 'actions', 'cpm', 'cpc',\n 'spend']\n demographic_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'age', 'gender']\n placement_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'placement']\n campaign_filters = [{\n 'field': 'campaign_id', 'type': 'in', 'value': [campaign_id]}]\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaign(campaign_id, campaign_fields, batch=True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', campaign_data_columns,\n campaign_filters, ['action_type'], True),\n self.get_adreport_stats(\n account_id, date_preset, 1, campaign_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', adgroup_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', demographic_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', placement_data_columns,\n campaign_filters, None, True),\n ]\n return self.make_batch_request(batch)", "def get_email_statistic_by_campaigns(self, email):\n logger.info(\"Function call: get_email_statistic_by_campaigns for '{}'\".format(email, ))\n return self.__handle_error('Empty email') if not email else self.__handle_result(self.__send_request('emails/{}/campaigns'.format(email, )))", "def get_conversion_stats_by_adcampaign(\n self, account_id, campaign_ids=None, include_deleted=False,\n start_time=None, end_time=None, aggregate_days=None,\n by_impression_time=True, batch=False):\n path = 'act_%s/adcampaignconversions' % account_id\n args = {}\n if campaign_ids is not None:\n args['campaign_ids'] = json.dumps(campaign_ids)\n if include_deleted is not None:\n args['include_deleted'] = include_deleted\n if start_time is not None:\n args['start_time'] = start_time\n if end_time is not None:\n args['end_time'] = end_time\n if aggregate_days is not None:\n args['aggregate_days'] = aggregate_days\n if not by_impression_time:\n args['by_impression_time'] = 'false'\n return self.make_request(path, 'GET', args, batch=batch)", "def updateAdGroups(self):\n adgroup_ids = self.getAdGroupIds()\n for adgroup_id in adgroup_ids:\n message = \"\"\n df = self.getAdvertPerformanceDf(adgroup_id)\n\n ad_count = df.shape[0]\n\n if ad_count == 0:\n self.writeToDatabase(0, self.getPriority(0, False), \"no_ads\", adgroup_id)\n continue\n\n eta_ad_count = df[df.ad_type == 'Expanded text ad'].shape[0]\n\n has_winners = \"winning\" in df.ctr_message.values or \"winning\" in df.conversion_rate_message.values\n\n def getMessage():\n if eta_ad_count == 0:\n return \"no_expanded_text_ads\" # we can only show ad groups which at least 1 ETA (for the placeholder text)\n\n if has_winners:\n return \"has_winners\"\n\n if ad_count < 2:\n return \"too_few_ads\"\n self.writeToDatabase(ad_count, self.getPriority(df.shape[0], has_winners), getMessage(), adgroup_id)", "def _compute_group_stats():\n group_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # TODO: move this to property of evaluation group or add dedicated data model.\n # GOAL: should be configurable from within the Django admin backend.\n #\n # MINIMAL: move to local_settings.py?\n #\n # The following dictionary defines the number of HITs each group should\n # have completed during the WMT16 evaluation campaign.\n \n for group in groups:\n _name = group.name\n \n _group_stats = HIT.compute_status_for_group(group)\n _total = _group_stats[0]\n \n if _total > 0 and not _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = 0\n elif _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = GROUP_HIT_REQUIREMENTS[_name]\n _delta = _total - _required\n _data = (_total, _required, _delta)\n \n if _data[0] > 0:\n group_stats.append((_name, _data))\n \n # Sort by number of remaining HITs.\n group_stats.sort(key=lambda x: x[1][2])\n \n # Add totals at the bottom.\n global_total = sum([x[1][0] for x in group_stats])\n global_required = sum([x[1][1] for x in group_stats])\n global_delta = global_total - global_required\n global_data = (global_total, global_required, global_delta)\n group_stats.append((\"Totals\", global_data))\n \n return group_stats", "def getAdvertPerformanceDf(self, adgroup_id):\n query = \"\"\"\n SELECT advert_performance.advert_id,adverts.ad_type,adverts.adgroup_id,advert_performance.clicks, advert_performance.conversion_rate_message,advert_performance.ctr_message\n FROM advert_performance\n join adverts\n on adverts.id = advert_performance.advert_id\n where advert_performance.account_id = '%s'\n and advert_performance.date_range = '%s' \n and adgroup_id = '%s'\n and adverts.status = 'enabled'\n \"\"\" % (self.account_id, self.date_range, adgroup_id)\n\n # need to update this (and all) queries to match the date_range we've set\n return pd.read_sql_query(query, Database().createEngine())", "def getGroupData(service, groupName, attList):\n # import IPython ; IPython.embed() ; exit(); \n groupsDataList = service.contactGroups().list().execute()[\"contactGroups\"]\n for group in groupsDataList:\n if group[\"name\"] == groupName:\n groupData = []\n for att in attList:\n groupData.append(group[att])\n return groupData", "def get_adgroup(self, adgroup_id, fields=None, batch=False):\n path = '%s' % adgroup_id\n args = {'fields': fields} if fields else {}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_stats_by_adaccount(self, account_id, batch=False, start_time=None, end_time=None):\n args = {}\n start_time = start_time or 0\n path = 'act_{0}/stats/{1}'.format(account_id, self.__parse_time(start_time))\n if end_time:\n path = path + '/{0}'.format(self.__parse_time(end_time))\n return iterate_by_page(self.make_request(path, 'GET', args, batch=batch))", "def get_conversion_stats_by_adaccount(self, account_id, batch=False):\n path = 'act_%s/conversions' % account_id\n return self.make_request(path, 'GET', batch=batch)", "def parse_groups(self):\n\n data = []\n ads_by_data = []\n for date_time, group in self.groups:\n for ad in group:\n ads_by_data.append({\"ad\": ad})\n date_key = self.date_to_string(date_time)\n data.append({date_key: ads_by_data})\n ads_by_data = []\n\n return data", "def get_table_stats(campaign_id, username):\n # participantids for this campaign\n campaign_user_names = []\n # We are querrying all the users who participate in the campaign\n contribs_for_campaign = Contribution.query.filter_by(campaign_id=campaign_id).all()\n for campaign_contribution in contribs_for_campaign:\n campaign_user_names.append(campaign_contribution.username)\n # we get the unique ids so as not to count an id twice\n campaign_user_names_set = set(campaign_user_names)\n # We then re-initialize the ids array\n campaign_user_names = []\n # We now obtain the ranking for all the users in the system and their files improved\n all_camapaign_users_list = []\n # We iterate the individual participants id in a campaign and get the user info\n for user_name in campaign_user_names_set:\n user = User.query.filter_by(username=user_name).first()\n all_camapaign_users_list.append(user)\n \n # We get the users and their contribution data\n all_contributors_data = get_all_users_contribution_data_per_campaign(all_camapaign_users_list, campaign_id)\n current_user_rank = get_user_ranking(all_contributors_data, username)\n\n # We add rank to all contributor's data\n for user_data in all_contributors_data:\n user_data['rank'] = get_user_ranking(all_contributors_data, user_data['username'])\n\n # We get all the campaign coountry sorted data\n all_campaign_country_statistics_data = get_campaign_country_data(campaign_id)\n\n campaign_table_stats = {}\n campaign_table_stats['all_contributors_data'] = all_contributors_data\n campaign_table_stats['all_campaign_country_statistics_data'] = all_campaign_country_statistics_data\n campaign_table_stats['current_user_rank'] = current_user_rank\n campaign_table_stats['campaign_editors'] = len(campaign_user_names_set)\n return campaign_table_stats", "def get_adcampaign_list(self, account_id):\n fields = 'id, name, campaign_status, start_time, end_time, ' \\\n 'daily_budget, lifetime_budget, budget_remaining'\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaigns(account_id, fields, batch=True),\n self.get_stats_by_adcampaign(account_id, batch=True),\n ]\n return self.make_batch_request(batch)", "def getAGroupInfo(group_id):\r\n return Group.getAGroupInfo(group_id)", "def getAllCampaigns(service):\n # Using AWQL to retrieve campaigns.\n query = (adwords.ServiceQueryBuilder()\n .Select('Id', 'Name', 'Status', 'StartDate', 'EndDate',\n 'BudgetId', 'BudgetStatus', 'BudgetName', 'Amount',\n 'BudgetReferenceCount', 'IsBudgetExplicitlyShared')\n .Limit(0, pageSize)\n .Build())\n campaigns = []\n for page in query.Pager(service):\n if page['entries']:\n for campaign in page['entries']:\n campaigns.append(campaign)\n else:\n pass\n return campaigns", "def ListGroupStats(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_adcampaigns_of_account(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaigns' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def get_batch_stats(self, batch):\n\t\t\n\t\treturn self.batch_stats[batch]" ]
[ "0.6947843", "0.6364444", "0.62626386", "0.60540676", "0.59857357", "0.59130126", "0.5794092", "0.57710516", "0.57229096", "0.56759286", "0.5656072", "0.5655446", "0.5593035", "0.5501304", "0.5412198", "0.53732365", "0.53527164", "0.534602", "0.53379744", "0.53250647", "0.5315681", "0.53025043", "0.5249177", "0.5248525", "0.52412486", "0.5235472", "0.5213301", "0.5203855", "0.5201416", "0.51610875" ]
0.651328
1
Returns the ad report stats for the given account.
def get_adreport_stats(self, account_id, date_preset, time_increment, data_columns, filters=None, actions_group_by=None, batch=False): path = 'act_%s/reportstats' % account_id args = { 'date_preset': date_preset, 'time_increment': time_increment, 'data_columns': json.dumps(data_columns), } if filters is not None: args['filters'] = json.dumps(filters) if actions_group_by is not None: args['actions_group_by'] = actions_group_by return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_stats_by_adaccount(self, account_id, batch=False, start_time=None, end_time=None):\n args = {}\n start_time = start_time or 0\n path = 'act_{0}/stats/{1}'.format(account_id, self.__parse_time(start_time))\n if end_time:\n path = path + '/{0}'.format(self.__parse_time(end_time))\n return iterate_by_page(self.make_request(path, 'GET', args, batch=batch))", "def get_conversion_stats_by_adaccount(self, account_id, batch=False):\n path = 'act_%s/conversions' % account_id\n return self.make_request(path, 'GET', batch=batch)", "def get_statistics(self):\n url = \"https://api.imgur.com/3/account/{0}/stats\".format(self.name)\n return self._imgur._send_request(url, needs_auth=True)", "def get_adreport_stats2(self, account_id, data_columns, date_preset=None,\n date_start=None, date_end=None,\n time_increment=None, actions_group_by=None,\n filters=None, async=False, batch=False, offset=None,\n sort_by=None, sort_dir=None, summary=None,\n limit=None):\n if date_preset is None and date_start is None and date_end is None:\n raise AdsAPIError(\"Either a date_preset or a date_start/end \\\n must be set when requesting a stats info.\")\n path = 'act_%s/reportstats' % account_id\n args = {\n 'data_columns': json.dumps(data_columns),\n }\n if date_preset:\n args['date_preset'] = date_preset\n if offset:\n args['offset'] = offset\n if date_start and date_end:\n args['time_interval'] = \\\n self.get_time_interval(date_start, date_end)\n if time_increment:\n args['time_increment'] = time_increment\n if filters:\n args['filters'] = json.dumps(filters)\n if actions_group_by:\n args['actions_group_by'] = json.dumps(actions_group_by)\n if sort_by:\n args['sort_by'] = sort_by\n if sort_dir:\n args['sort_dir'] = sort_dir\n if summary is not None:\n args['summary'] = summary\n if limit:\n args['limit'] = limit\n if async:\n args['async'] = 'true'\n return self.make_request(path, 'POST', args=args, batch=batch)\n return self.make_request(path, 'GET', args=args, batch=batch)", "def get_stats_by_adcampaign(self, account_id, campaign_ids=None,\n batch=False, start_time=None, end_time=None):\n args = {}\n if campaign_ids is not None:\n args['campaign_ids'] = json.dumps(campaign_ids)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = 'act_%s/adcampaignstats' % account_id\n return self.make_request(path, 'GET', args, batch=batch)", "def get_ad_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get ad data for account {}'.format(ad_account['account_id']))\n ads = ad_account.get_ads(\n fields=['id',\n 'name',\n 'adset_id',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for ad in ads:\n result[ad['id']] = {'name': ad['name'],\n 'ad_set_id': ad['adset_id'],\n 'attributes': parse_labels(ad.get('adlabels', []))}\n return result", "def stats(self, **kwargs):\n return self.client.api.stats(self.id, **kwargs)", "def stats(self):\n url = client.build_url('stats')\n _, res_json = client.get(url, headers=self.headers)\n\n return res_json", "def get_stats(self):\n return self.manager.get_stats(self)", "def get_stats(self):\n return self.stats", "def get_agents_stats(self, account_id, filters=None):\n return self.rest_request.get('accounts/' + str(account_id) +\n '/agents/stats', filters)", "def get_stats(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/stats\"\n\n _response = self.connector.http_call(\"get\", _url)\n\n # Update object\n self.stats = _response.json()", "def get_report(analytics):\n return analytics.reports().batchGet(\n body={\n 'reportRequests': [\n {\n 'viewId': VIEW_ID,\n 'pageSize': 1000000,\n 'dateRanges': [{'startDate': startDate, 'endDate': endDate}],\n 'metrics': metrics_dimensions_list[i][0],\n 'dimensions': metrics_dimensions_list[i][1],\n 'includeEmptyRows': 'true'\n } for i in range(len(metrics_dimensions_list))\n ]\n }\n ).execute()", "def getReportMetrics(self):\n return self.__unwrapResults().reportMetrics", "def GetStats(self):\r\n\t\tArg1 = self.href\r\n\t\treturn self._execute('GetStats', payload=locals(), response_object=None)", "def get_report(analytics):\n logging.info(\"Fetching reports from google analytics\")\n\n return analytics.reports().batchGet(\n body={\n 'reportRequests': [\n {\n 'viewId': VIEW_ID,\n 'dateRanges': [{'startDate': '30daysAgo', 'endDate': 'today'}],\n 'metrics': [{'expression': 'ga:pageviews'}, {'expression': 'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'}, {'expression': 'ga:bounces'}, {'expression': 'ga:entrances'}, {'expression': 'ga:exits'}],\n 'dimensions': [{'name': 'ga:pagePath'}],\n 'orderBys': [{'fieldName': 'ga:pageviews', 'sortOrder': 'DESCENDING'}],\n 'pageSize': MAX_SIZE\n }]\n }\n ).execute()", "def get_stats(cls, contract_month_start_day=1):\n # I could do this in SQL with date_trunc, but eventually this'll need\n # to be contract-month, so like the 7th-7th or something, which AFAIK\n # can't be done in SQL (and certainly not in Django). So just do this\n # by hand. There are only a few hundred reports/month right now, so this\n # should be OK.\n stats = {}\n\n reports = cls.objects.filter(days_until_triage__isnull=False)\n for report in reports:\n first_day, last_day = dates.contract_month(report.created_at, contract_month_start_day)\n if first_day not in stats:\n stats[first_day] = {\n 'count': 0,\n 'triaged_accurately': 0,\n 'false_negatives': 0,\n 'triaged_within_one_day': 0,\n 'last_day': last_day,\n\n }\n\n stats[first_day]['count'] += 1\n stats[first_day]['triaged_accurately'] += report.is_accurate\n stats[first_day]['false_negatives'] += report.is_false_negative\n if report.days_until_triage <= 1:\n stats[first_day]['triaged_within_one_day'] += 1\n\n stats[\"totals\"] = {\n key: sum(month_stats[key] for month_stats in stats.values()) if stats else 0\n for key in ('count', 'triaged_accurately', 'false_negatives', 'triaged_within_one_day')\n }\n\n return stats", "def get_report(analytics):\n # https://developers.google.com/analytics/devguides/reporting/core/v4/rest/\n # https://analyticsreporting.googleapis.com/$discovery/rest?version=v4\n # https://developers.google.com/analytics/devguides/reporting/core/v4/basics\n return analytics.reports().batchGet(\n prettyPrint=True,\n body={\n 'reportRequests': [\n dict(\n viewId=PROJECT_VIEW_ID,\n dateRanges=[\n {\n 'startDate': start_date,\n 'endDate': 'yesterday',\n },\n ],\n metrics=[\n {'expression': 'ga:users'}, # ,\n # {'expression': 'ga:newUsers'},\n ], dimensions=[\n {'name': 'ga:date'},\n ], orderBys=[\n {'fieldName': 'ga:date', 'sortOrder': 'DESCENDING'},\n ], pageSize=max_results,\n ),\n ],\n },\n ).execute()", "def get_account_summary(self):\r\n return self.get_object('GetAccountSummary', {}, SummaryMap)", "def get_campaign_stats(self,campaign_id):\n campaign = Campaign(campaign_id)\n fields = ['account_name',\n 'campaign_name',\n 'clicks',\n 'cpc',\n 'reach',\n 'ctr',\n 'frequency',\n 'impressions',\n 'cpm',\n 'relevance_score']\n #Need to create a new list for data\n data = []\n \n #Grab lifetime campaign stats to find start and end date of campaign and convert to datetime formate\n insights = campaign.get_insights(fields=fields, params={'date_preset':'lifetime'})\n date_begin = datetime.datetime.strptime(insights[0]['date_start'], \"%Y-%m-%d\")\n date_end = datetime.datetime.strptime(insights[0]['date_stop'], \"%Y-%m-%d\")\n date_diff = datetime.timedelta(days=25)\n new_date = date_begin + date_diff\n\n #Pass in these values to the api\n api_date_first = str(date_begin).split()[0]\n api_date_last = str(new_date).split()[0]\n\n #Strange API limitation where you can only grab 25 values at a time. \n while date_begin < date_end:\n insights = campaign.get_insights(fields=fields, params={ 'time_range':{'since':api_date_first, 'until':api_date_last}, 'time_increment':1})\n insights = list(insights)\n date_begin = new_date \n new_date = date_begin + date_diff\n api_date_first = api_date_last\n api_date_last = str(new_date).split()[0]\n data += insights\n\n return data", "def get_stats(self):\n return scales.getStats()[self.stats_name]", "def get_report(analytics):\n \n #define what window of time you are most interested in\n today = datetime.date.today()\n #im only getting one day in the past\n timeDelta = today - datetime.timedelta(days=1)\n print \"Checking dates \",timeDelta,\" to \",today\n\n #where to get dimensions \n #https://developers.google.com/analytics/devguides/reporting/core/dimsmets\n return analytics.reports().batchGet(\n body={\n 'reportRequests': [\n {\n 'viewId': VIEW_ID,\n 'dateRanges': [{'startDate': str(timeDelta), 'endDate': str(today)}],\n 'metrics': [{'expression': 'ga:sessions'},\n {'expression':'ga:pageviews'},\n {'expression':'ga:avgSessionDuration'}],\n #'dimensions': [{'name': 'ga:country'}]\n }]\n }\n ).execute()", "def get_account_info(self):\n resource = self.domain + \"/account\"\n self.logger.debug(\"Pulling data from {0}\".format(resource))\n response = self.session.get(resource)\n\n if response.status_code != requests.codes.ok:\n return response.raise_for_status()\n data = response.text\n root = Et.fromstring(data)\n bf = BadgerFish(dict_type=dict)\n account_info = bf.data(root)\n return account_info", "def get_stats() -> dict:\n\n url = f\"{CONFIG.POSTGREST}/app_about_stats\"\n\n try:\n response = requests.get(url)\n response.raise_for_status()\n except (requests.ConnectionError, requests.exceptions.HTTPError) as e:\n APP.logger.error(f'API request for db stats returned: {e}')\n else:\n results = json.loads(response.text)\n # APP.logger.debug(results)\n return results", "def get_account_ad_performance_for_single_day(ad_account: adaccount.AdAccount,\n single_date: datetime) -> adsinsights.AdsInsights:\n logging.info('download Facebook ad performance of act_{ad_account_id} on {single_date}'.format(\n ad_account_id=ad_account['account_id'],\n single_date=single_date.strftime('%Y-%m-%d')))\n\n ad_insights = ad_account.get_insights(\n # https://developers.facebook.com/docs/marketing-api/insights/fields\n fields=['date_start',\n 'ad_id',\n 'impressions',\n 'actions',\n 'spend',\n 'action_values'],\n # https://developers.facebook.com/docs/marketing-api/insights/parameters\n params={'action_attribution_windows': ['28d_click'],\n # https://developers.facebook.com/docs/marketing-api/insights/action-breakdowns\n 'action_breakdowns': ['action_type'],\n # https://developers.facebook.com/docs/marketing-api/insights/breakdowns\n 'breakdowns': ['impression_device'],\n 'level': 'ad',\n 'limit': 1000,\n 'time_range': {'since': single_date.strftime('%Y-%m-%d'),\n 'until': single_date.strftime('%Y-%m-%d')},\n # By default only ACTIVE campaigns get considered.\n 'filtering': [{\n 'field': 'ad.effective_status',\n 'operator': 'IN',\n 'value': ['ACTIVE',\n 'PAUSED',\n 'PENDING_REVIEW',\n 'DISAPPROVED',\n 'PREAPPROVED',\n 'PENDING_BILLING_INFO',\n 'CAMPAIGN_PAUSED',\n 'ARCHIVED',\n 'ADSET_PAUSED']}]})\n\n return ad_insights", "def scanstats(self):\n assert 'masscan' in self._scan_result, 'Do a scan before trying to get result !'\n assert 'scanstats' in self._scan_result['masscan'], 'Do a scan before trying to get result !'\n\n return self._scan_result['masscan']['scanstats']", "def stats(self, **kwargs):\n return stats.stats(self._host, self._session, **kwargs)", "def get_profile_stats():\n return p_stats", "def stats(self) -> Dict:\n return self._stats", "def get_spend_by_account_custom_daterange(self, account_id, start_date, end_date):\n try:\n account = Client.objects.get(id=account_id)\n except Client.DoesNotExist:\n return\n\n spend_sum = 0\n adwords_accounts = account.adwords.all()\n for adwords_account in adwords_accounts:\n client = get_client()\n client.client_customer_id = adwords_account.dependent_account_id\n\n report_downloader = client.GetReportDownloader(version=settings.API_VERSION)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n ],\n 'dateRange': {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': end_date.strftime('%Y%m%d')\n }\n }\n\n try:\n campaign_exclusion = CampaignExclusions.objects.get(account=account)\n excluded_campaign_ids = [campaign.campaign_id for campaign in campaign_exclusion.aw_campaigns.all()]\n if len(excluded_campaign_ids) > 0:\n campaign_report_selector['predicates'].append({\n 'field': 'CampaignId',\n 'operator': 'NOT_IN',\n 'values': excluded_campaign_ids\n })\n except CampaignExclusions.DoesNotExist:\n pass\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n campaign_report = Reporting.parse_report_csv_new(\n report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_report:\n # This is the cost for this timerange\n cost = int(campaign_row['cost']) / 1000000\n spend_sum += cost\n\n return spend_sum" ]
[ "0.7248535", "0.6493395", "0.6449132", "0.64028156", "0.59609824", "0.59464306", "0.593626", "0.5906503", "0.5861286", "0.5833227", "0.5813863", "0.58119303", "0.58100086", "0.58036923", "0.5796423", "0.57952833", "0.5760015", "0.57439005", "0.5737804", "0.56976837", "0.5684599", "0.56789017", "0.5673046", "0.5666566", "0.5647232", "0.56013155", "0.55898213", "0.5582044", "0.5578135", "0.55775213" ]
0.6803735
1
Returns completed result of the given async job
def get_async_job_result(self, account_id, job_id, batch=False): path = 'act_%s/reportstats' % account_id args = { 'report_run_id': job_id } return self.make_request(path, 'GET', args=args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def my_job_async(mustfail):\n if mustfail:\n raise RuntimeError('Job failed, as requested!')\n\n return {\n 'message': 'job well done',\n 'payload': {'coolstuff': 'here'},\n 'readiness': 1.0\n }", "def get_result(self, timeout):\n\n backend = self.parallel._backend\n\n if backend.supports_retrieve_callback:\n # We assume that the result has already been retrieved by the\n # callback thread, and is stored internally. It's just waiting to\n # be returned.\n return self._return_or_raise()\n\n # For other backends, the main thread needs to run the retrieval step.\n try:\n if backend.supports_timeout:\n result = self.job.get(timeout=timeout)\n else:\n result = self.job.get()\n outcome = dict(result=result, status=TASK_DONE)\n except BaseException as e:\n outcome = dict(result=e, status=TASK_ERROR)\n self._register_outcome(outcome)\n\n return self._return_or_raise()", "def get_result(self):\n\t\treturn handle_to_object(call_sdk_function('PrlJob_GetResult', self.handle))", "def get_async_response(job_uuid, request, xform, count=0):\n\n def _get_response():\n export = get_object_or_404(Export, task_id=job_uuid)\n return export_async_export_response(request, export)\n\n try:\n job = AsyncResult(job_uuid)\n if job.state == \"SUCCESS\":\n resp = _get_response()\n else:\n resp = async_status(celery_state_to_status(job.state))\n\n # append task result to the response\n if job.result:\n result = job.result\n if isinstance(result, dict):\n resp.update(result)\n else:\n resp.update({\"progress\": str(result)})\n except (OperationalError, ConnectionError) as e:\n report_exception(\"Connection Error\", e, sys.exc_info())\n if count > 0:\n raise ServiceUnavailable from e\n\n return get_async_response(job_uuid, request, xform, count + 1)\n except BacklogLimitExceeded:\n # most likely still processing\n resp = async_status(celery_state_to_status(\"PENDING\"))\n\n return resp", "def view_result(job_id):\n job = fetch_data.AsyncResult(job_id, app=app)\n if job.successful():\n result = job.result\n return jsonify({'job_id': job_id, 'result': job.result})\n else:\n result = 'job was not finished or was not successful'\n return jsonify({'job_id': job_id, 'result': result})", "def wait_for_job_complete(self, job):\n res, tasks = None, None\n if job['status'].lower() == SUCCEEDED:\n try:\n res, tasks = job['result'], job['task']\n except KeyError:\n pass\n return 0, res, job['status'], tasks\n\n def _wait_for_job_complete():\n \"\"\"Called at an interval until the job is finished.\"\"\"\n retries = kwargs['retries']\n try:\n kwargs['retries'] = retries + 1\n if not kwargs['wait_for_job_called']:\n is_complete, result, rc, status, task = (\n self._is_job_finished(job_id))\n if is_complete is True:\n kwargs['wait_for_job_called'] = True\n kwargs['rc'], kwargs['status'] = rc, status\n kwargs['result'], kwargs['task'] = result, task\n else:\n kwargs['status'], kwargs['task'] = status, task\n except Exception as error:\n exception_message = 'Issue encountered waiting for job.'\n LOG.exception(exception_message)\n raise exception.VolumeBackendAPIException(\n data=exception_message) from error\n\n return kwargs\n\n job_id = job['jobId']\n kwargs = {'retries': 0, 'wait_for_job_called': False,\n 'rc': 0, 'result': None}\n\n while not kwargs['wait_for_job_called']:\n time.sleep(self.interval)\n kwargs = _wait_for_job_complete()\n if kwargs['retries'] > self.retries:\n LOG.error('_wait_for_job_complete failed after {cnt} '\n 'tries.'.format(cnt=kwargs['retries']))\n kwargs['rc'], kwargs['result'] = -1, kwargs['result']\n break\n\n LOG.debug('Return code is: {rc}. Result is {res}.'.format(\n rc=kwargs['rc'], res=kwargs['result']))\n return (kwargs['rc'], kwargs['result'],\n kwargs['status'], kwargs['task'])", "async def get_task_result(task_id: TaskId):", "def get(self):\n if not self.finished():\n self.wait()\n return self._result", "def result(self):\n with self.__lock:\n assert(self.__complete)\n return self.__result", "async def _fetch_data(self) -> JobInfo:\n return await self.api.get_job()", "def get_async_job_status(self, job_id, batch=False):\n path = '%s' % job_id\n return self.make_request(path, 'GET', batch=batch)", "def _retrieve_result(self, out):\n try:\n result = self.parallel._backend.retrieve_result_callback(out)\n outcome = dict(status=TASK_DONE, result=result)\n except BaseException as e:\n # Avoid keeping references to parallel in the error.\n e.__traceback__ = None\n outcome = dict(result=e, status=TASK_ERROR)\n\n self._register_outcome(outcome)\n return outcome['status'] != TASK_ERROR", "def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()", "def result():\n # Retrieve JSON parameters data.\n data = request.get_json() or {}\n data.update(dict(request.values))\n tid = data.get(\"tid\")\n if not tid:\n raise abort(400, \"missing 'tid' data\")\n\n # Get the result (if exists and finished).\n result = tasks.process_message.AsyncResult(tid)\n # Return status and result if available.\n resp = {\n \"status\": result.status,\n \"result\": None,\n }\n if result.ready():\n resp[\"result\"] = result.get()\n return resp", "async def result_info(self) -> Optional[JobResult]:\n v = await self._redis.get(result_key_prefix + self.job_id)\n if v:\n return deserialize_result(v, deserializer=self._deserializer)\n else:\n return None", "def result(self, job):\n\n assert isinstance(job, six.string_types)\n\n try:\n response = requests.get('{}/api/v1/result/{}'.format(self.URL, job))\n except (Timeout, ConnectionError):\n raise ServiceError('Service unavailable: timeout.', 4)\n\n result = self._validate(response)\n data = result.get('state')\n state = State.from_dict(data) if data else None\n\n if state is not None:\n self.__previous_job = self.__current_job\n self.__current_job = None\n\n return result.get('status'), state", "def exec_job(self, job: Job, eval_args: Tuple[Tuple, dict]) -> Promise:\n self.events_queue.put(lambda: self._exec_job(job, eval_args))\n return job.result_promise", "def result(self):\n assert(self.__complete)\n return self.__result", "def get_job_output(job_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetJobResult]:\n ...", "def wait_for_job(self, operation, status_code, job):\n task = None\n if status_code == STATUS_202:\n rc, result, status, task = self.wait_for_job_complete(job)\n if rc != 0:\n exception_message = (\n 'Error {op}. Status code: {sc}. Error: {err}. '\n 'Status: {st}.'.format(\n op=operation, sc=rc, err=six.text_type(result),\n st=status))\n LOG.error(exception_message)\n raise exception.VolumeBackendAPIException(\n data=exception_message)\n return task", "async def my_job_async(my_job_manager):\n\n @my_job_manager.job_manager_class.job()\n async def my_job_async(mustfail):\n \"\"\"Async uncancelable job function.\"\"\"\n if mustfail:\n raise RuntimeError('Job failed, as requested!')\n\n return {\n 'message': 'job well done',\n 'payload': {'coolstuff': 'here'},\n 'readiness': 1.0\n }\n\n my_job_manager.job_orig = my_job_async\n my_job_manager.job = channels.db.database_sync_to_async(my_job_async)\n return my_job_manager", "def get(self, id):\n result_task = AsyncResult(id = id, app = backapp)\n state = result_task.state\n\n if state == states.STARTED:\n return { 'id':result_task.task_id, 'status': state }, 200\n # task still pending or unknown\n elif state == states.PENDING:\n return { 'id':result_task.task_id, 'status': state }, 200\n elif state == states.SUCCESS:\n return { 'id':result_task.task_id, 'status': state }, 303, {'Location': api.url_for(MathJobResult,id=result_task.task_id)}\n else:\n return error(result_task)", "def complete_job(self, job, token, status, error, res, context=None):\n return self._client.call_method(\n 'UserAndJobState.complete_job',\n [job, token, status, error, res], self._service_ver, context)", "async def get_jobs(jobId: int) -> str: \n return mngr.getJob(str(jobId))", "async def get_result(request):\n job_id = request.match_info['job_id']\n r = redis.Redis(\n host=os.environ['REDIS_HOST'],\n port=6379,\n decode_responses=True,\n )\n if not r.exists(job_id):\n return web.HTTPNotFound(text='Results are unavailable.')\n output_id = r.get(job_id)\n filename = output_id + '.json'\n try:\n with open(os.path.join(CACHE_DIR, filename), 'r') as f:\n response = json.load(f)\n except FileNotFoundError:\n # Redis is out-of-sync with file system. Remove the offending key.\n r.delete(job_id)\n return web.HTTPNotFound(text='Results are unavailable.')\n return web.json_response(response, dumps=functools.partial(json.dumps, indent=4))", "def get_result(self, wait=-1):\n\n if not self.is_done():\n\n if wait >= 0:\n self.thread.join(wait)\n\n else:\n raise Asynchronous.NotYetDoneException(\n 'the call has not yet completed its task'\n )\n\n if self.result is None:\n self.result = self.queue.get()\n\n return self.result", "async def test_job_async_done(my_job_async):\n\n # Set up callback to get notifications when job state changes.\n job = None\n job_update_counter = 0\n\n def on_job_update(_job):\n \"\"\"The callback updates `job` and `job_update_counter`.\"\"\"\n nonlocal job, job_update_counter\n job = _job\n job_update_counter += 1\n # Assert that the job is reported as cancelable only when it is\n # in the `PENDING` or `CANCELING` state.\n if job.state in ['PENDING', 'CANCELING']:\n assert job.is_cancelable, ('Job is not cancelable when it '\n 'must be cancelable!')\n else:\n assert not job.is_cancelable, ('Job is cancelable when it '\n 'must not be cancelable!')\n\n my_job_async.set_on_update(on_job_update)\n\n # Submit a job which must finish OK.\n new_job = await my_job_async.job(mustfail=False)\n\n # Check job is cancelable until it is started.\n assert new_job.is_cancelable, ('Job instance states that just submitter '\n 'job is not cancelable!')\n\n # Process ASGI messages and wait for the job to finish.\n await my_job_async.process_jobs()\n\n # Check job state when job is done.\n assert job.state == 'DONE', f'Finished job has wrong state `{job.state}`!'\n\n assert not job.is_cancelable, ('Job instance states that uncancelable '\n 'job can be canceled!')\n\n # Check that job update callback has been called three times:\n # 1. job is submitted\n # 2. job switches to the working state\n # 3. job finishes\n assert job_update_counter == 3, 'Incorrect number of job updates detected!'", "def _get_job_results(query=None):\n if not query:\n raise CommandExecutionError(\"Query parameters cannot be empty.\")\n\n response = __proxy__[\"panos.call\"](query)\n\n # If the response contains a job, we will wait for the results\n if \"result\" in response and \"job\" in response[\"result\"]:\n jid = response[\"result\"][\"job\"]\n\n while get_job(jid)[\"result\"][\"job\"][\"status\"] != \"FIN\":\n time.sleep(5)\n\n return get_job(jid)\n else:\n return response", "def _return_result(self, done):\n chain_future(done, self._running_future)\n\n self.current_future = done\n self.current_index = self._unfinished.pop(done)", "def wait_until_complete(self):\n self._log.debug(\"waiting for upload job %s to complete\", self._job_id)\n xpath = ManoProject.prefix_project(\"D,/rw-image-mgmt:upload-jobs/\" +\n \"rw-image-mgmt:job[rw-image-mgmt:id={}]\".\n format(quoted_key(str(self._job_id))),\n project=self._project,\n log=self._log)\n\n while True:\n query_iter = yield from self._dts.query_read(xpath)\n job_status_msg = None\n for fut_resp in query_iter:\n job_status_msg = (yield from fut_resp).result\n break\n\n if job_status_msg is None:\n raise UploadJobError(\"did not get a status response for job_id: %s\",\n self._job_id)\n\n if job_status_msg.status == \"COMPLETED\":\n msg = \"upload job %s completed successfully\" % self._job_id\n self._log.debug(msg)\n return\n\n elif job_status_msg.status == \"FAILED\":\n msg = \"upload job %s as not successful: %s\" % (self._job_id, job_status_msg.status)\n self._log.error(msg)\n raise UploadJobFailed(msg)\n\n elif job_status_msg.status == \"CANCELLED\":\n msg = \"upload job %s was cancelled\" % self._job_id\n self._log.error(msg)\n raise UploadJobCancelled(msg)\n\n yield from asyncio.sleep(.5, loop=self._loop)" ]
[ "0.699001", "0.6720908", "0.668794", "0.6670894", "0.6610265", "0.64277935", "0.64068246", "0.6380871", "0.62620497", "0.6219399", "0.6183571", "0.61668086", "0.610857", "0.60896164", "0.60810596", "0.60508716", "0.6036827", "0.6029165", "0.6024251", "0.60072297", "0.6003556", "0.6003257", "0.5994473", "0.593562", "0.5890696", "0.5882811", "0.58570886", "0.58509296", "0.5834833", "0.58180004" ]
0.6900781
1
Returns the aggregated conversion stats for the given ad account.
def get_conversion_stats_by_adaccount(self, account_id, batch=False): path = 'act_%s/conversions' % account_id return self.make_request(path, 'GET', batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_conversion_stats(self, adgroup_id, batch=False):\n path = '%s/conversions' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)", "def get_conversion_stats_by_adgroup(self, account_id, adgroup_ids=None,\n include_deleted=False, batch=False):\n path = 'act_%s/adgroupconversions' % account_id\n args = {}\n if adgroup_ids is not None:\n args['adgroup_ids'] = json.dumps(adgroup_ids)\n if include_deleted is not None:\n args['include_deleted'] = include_deleted\n return self.make_request(path, 'GET', args, batch=batch)", "def get_conversion_stats_by_adcampaign(\n self, account_id, campaign_ids=None, include_deleted=False,\n start_time=None, end_time=None, aggregate_days=None,\n by_impression_time=True, batch=False):\n path = 'act_%s/adcampaignconversions' % account_id\n args = {}\n if campaign_ids is not None:\n args['campaign_ids'] = json.dumps(campaign_ids)\n if include_deleted is not None:\n args['include_deleted'] = include_deleted\n if start_time is not None:\n args['start_time'] = start_time\n if end_time is not None:\n args['end_time'] = end_time\n if aggregate_days is not None:\n args['aggregate_days'] = aggregate_days\n if not by_impression_time:\n args['by_impression_time'] = 'false'\n return self.make_request(path, 'GET', args, batch=batch)", "def get_stats_by_adaccount(self, account_id, batch=False, start_time=None, end_time=None):\n args = {}\n start_time = start_time or 0\n path = 'act_{0}/stats/{1}'.format(account_id, self.__parse_time(start_time))\n if end_time:\n path = path + '/{0}'.format(self.__parse_time(end_time))\n return iterate_by_page(self.make_request(path, 'GET', args, batch=batch))", "def get_ad_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get ad data for account {}'.format(ad_account['account_id']))\n ads = ad_account.get_ads(\n fields=['id',\n 'name',\n 'adset_id',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for ad in ads:\n result[ad['id']] = {'name': ad['name'],\n 'ad_set_id': ad['adset_id'],\n 'attributes': parse_labels(ad.get('adlabels', []))}\n return result", "def get_campaign_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get campaign data for account {}'.format(ad_account['account_id']))\n campaigns = ad_account.get_campaigns(\n fields=['id',\n 'name',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for campaign in campaigns:\n result[campaign['id']] = {'name': campaign['name'],\n 'attributes': parse_labels(\n campaign.get('adlabels', []))}\n return result", "def get_campaign_stats(self,campaign_id):\n campaign = Campaign(campaign_id)\n fields = ['account_name',\n 'campaign_name',\n 'clicks',\n 'cpc',\n 'reach',\n 'ctr',\n 'frequency',\n 'impressions',\n 'cpm',\n 'relevance_score']\n #Need to create a new list for data\n data = []\n \n #Grab lifetime campaign stats to find start and end date of campaign and convert to datetime formate\n insights = campaign.get_insights(fields=fields, params={'date_preset':'lifetime'})\n date_begin = datetime.datetime.strptime(insights[0]['date_start'], \"%Y-%m-%d\")\n date_end = datetime.datetime.strptime(insights[0]['date_stop'], \"%Y-%m-%d\")\n date_diff = datetime.timedelta(days=25)\n new_date = date_begin + date_diff\n\n #Pass in these values to the api\n api_date_first = str(date_begin).split()[0]\n api_date_last = str(new_date).split()[0]\n\n #Strange API limitation where you can only grab 25 values at a time. \n while date_begin < date_end:\n insights = campaign.get_insights(fields=fields, params={ 'time_range':{'since':api_date_first, 'until':api_date_last}, 'time_increment':1})\n insights = list(insights)\n date_begin = new_date \n new_date = date_begin + date_diff\n api_date_first = api_date_last\n api_date_last = str(new_date).split()[0]\n data += insights\n\n return data", "def get_account_ad_performance_for_single_day(ad_account: adaccount.AdAccount,\n single_date: datetime) -> adsinsights.AdsInsights:\n logging.info('download Facebook ad performance of act_{ad_account_id} on {single_date}'.format(\n ad_account_id=ad_account['account_id'],\n single_date=single_date.strftime('%Y-%m-%d')))\n\n ad_insights = ad_account.get_insights(\n # https://developers.facebook.com/docs/marketing-api/insights/fields\n fields=['date_start',\n 'ad_id',\n 'impressions',\n 'actions',\n 'spend',\n 'action_values'],\n # https://developers.facebook.com/docs/marketing-api/insights/parameters\n params={'action_attribution_windows': ['28d_click'],\n # https://developers.facebook.com/docs/marketing-api/insights/action-breakdowns\n 'action_breakdowns': ['action_type'],\n # https://developers.facebook.com/docs/marketing-api/insights/breakdowns\n 'breakdowns': ['impression_device'],\n 'level': 'ad',\n 'limit': 1000,\n 'time_range': {'since': single_date.strftime('%Y-%m-%d'),\n 'until': single_date.strftime('%Y-%m-%d')},\n # By default only ACTIVE campaigns get considered.\n 'filtering': [{\n 'field': 'ad.effective_status',\n 'operator': 'IN',\n 'value': ['ACTIVE',\n 'PAUSED',\n 'PENDING_REVIEW',\n 'DISAPPROVED',\n 'PREAPPROVED',\n 'PENDING_BILLING_INFO',\n 'CAMPAIGN_PAUSED',\n 'ARCHIVED',\n 'ADSET_PAUSED']}]})\n\n return ad_insights", "def get_statistics(self):\n url = \"https://api.imgur.com/3/account/{0}/stats\".format(self.name)\n return self._imgur._send_request(url, needs_auth=True)", "def get_stats_by_adcampaign(self, account_id, campaign_ids=None,\n batch=False, start_time=None, end_time=None):\n args = {}\n if campaign_ids is not None:\n args['campaign_ids'] = json.dumps(campaign_ids)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = 'act_%s/adcampaignstats' % account_id\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adreport_stats(self, account_id, date_preset, time_increment,\n data_columns, filters=None, actions_group_by=None,\n batch=False):\n path = 'act_%s/reportstats' % account_id\n args = {\n 'date_preset': date_preset,\n 'time_increment': time_increment,\n 'data_columns': json.dumps(data_columns),\n }\n if filters is not None:\n args['filters'] = json.dumps(filters)\n if actions_group_by is not None:\n args['actions_group_by'] = actions_group_by\n return self.make_request(path, 'GET', args, batch=batch)", "def get_account_summary(self):\r\n return self.get_object('GetAccountSummary', {}, SummaryMap)", "def get_stats(self):\n return scales.getStats()[self.stats_name]", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def scanstats(self):\n assert 'masscan' in self._scan_result, 'Do a scan before trying to get result !'\n assert 'scanstats' in self._scan_result['masscan'], 'Do a scan before trying to get result !'\n\n return self._scan_result['masscan']['scanstats']", "def get_adcampaigns_of_account(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaigns' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "async def __getDataFromBalance(self, account) -> dict:\n _LOGGER.info(\"Getting appliance usage data\")\n\n data = {}\n\n URL_BALANCE = API_HOST + \"/api/resources/account/{account}/balance?count=-1\"\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(URL_BALANCE.format(account=account))\n if response.status == 200:\n data = (await response.json())[\"data\"]\n\n indice = [i for i, x in enumerate(data) if x[\"details\"] == \"DEBT\"][\n 0\n ]\n\n deb = data[indice][\"amount\"]\n\n except Exception as e:\n _LOGGER.error(e)\n\n return {\"balance_data\": data}", "def handle_chart_of_accounts(self, chart_of_accounts):\n chart_grouped = chart_of_accounts.groupby([\"account\"]).sum()\n return chart_grouped", "def get_stats(self):\n return self.stats", "def get_stats(cls, contract_month_start_day=1):\n # I could do this in SQL with date_trunc, but eventually this'll need\n # to be contract-month, so like the 7th-7th or something, which AFAIK\n # can't be done in SQL (and certainly not in Django). So just do this\n # by hand. There are only a few hundred reports/month right now, so this\n # should be OK.\n stats = {}\n\n reports = cls.objects.filter(days_until_triage__isnull=False)\n for report in reports:\n first_day, last_day = dates.contract_month(report.created_at, contract_month_start_day)\n if first_day not in stats:\n stats[first_day] = {\n 'count': 0,\n 'triaged_accurately': 0,\n 'false_negatives': 0,\n 'triaged_within_one_day': 0,\n 'last_day': last_day,\n\n }\n\n stats[first_day]['count'] += 1\n stats[first_day]['triaged_accurately'] += report.is_accurate\n stats[first_day]['false_negatives'] += report.is_false_negative\n if report.days_until_triage <= 1:\n stats[first_day]['triaged_within_one_day'] += 1\n\n stats[\"totals\"] = {\n key: sum(month_stats[key] for month_stats in stats.values()) if stats else 0\n for key in ('count', 'triaged_accurately', 'false_negatives', 'triaged_within_one_day')\n }\n\n return stats", "def sum_stats(stats_data):\n t_bounces = 0\n t_complaints = 0\n t_delivery_attempts = 0\n t_rejects = 0\n for dp in stats_data:\n t_bounces += int(dp['Bounces'])\n t_complaints += int(dp['Complaints'])\n t_delivery_attempts += int(dp['DeliveryAttempts'])\n t_rejects += int(dp['Rejects'])\n\n return {\n 'Bounces': t_bounces,\n 'Complaints': t_complaints,\n 'DeliveryAttempts': t_delivery_attempts,\n 'Rejects': t_rejects,\n }", "def get_adcampaign_list(self, account_id):\n fields = 'id, name, campaign_status, start_time, end_time, ' \\\n 'daily_budget, lifetime_budget, budget_remaining'\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaigns(account_id, fields, batch=True),\n self.get_stats_by_adcampaign(account_id, batch=True),\n ]\n return self.make_batch_request(batch)", "def get_stats(self):\n return self.manager.get_stats(self)", "def get_adreport_stats2(self, account_id, data_columns, date_preset=None,\n date_start=None, date_end=None,\n time_increment=None, actions_group_by=None,\n filters=None, async=False, batch=False, offset=None,\n sort_by=None, sort_dir=None, summary=None,\n limit=None):\n if date_preset is None and date_start is None and date_end is None:\n raise AdsAPIError(\"Either a date_preset or a date_start/end \\\n must be set when requesting a stats info.\")\n path = 'act_%s/reportstats' % account_id\n args = {\n 'data_columns': json.dumps(data_columns),\n }\n if date_preset:\n args['date_preset'] = date_preset\n if offset:\n args['offset'] = offset\n if date_start and date_end:\n args['time_interval'] = \\\n self.get_time_interval(date_start, date_end)\n if time_increment:\n args['time_increment'] = time_increment\n if filters:\n args['filters'] = json.dumps(filters)\n if actions_group_by:\n args['actions_group_by'] = json.dumps(actions_group_by)\n if sort_by:\n args['sort_by'] = sort_by\n if sort_dir:\n args['sort_dir'] = sort_dir\n if summary is not None:\n args['summary'] = summary\n if limit:\n args['limit'] = limit\n if async:\n args['async'] = 'true'\n return self.make_request(path, 'POST', args=args, batch=batch)\n return self.make_request(path, 'GET', args=args, batch=batch)", "def getStats(self):\n\n raise NotImplementedError", "def stats(self) -> Dict:\n return self._stats", "def get_account_balance(account):\n balance = 0\n\n for address in get_addresses_by_account(account):\n balance += get_address_balance(address)\n\n return float(balance)", "def get_spend_by_account_custom_daterange(self, account_id, start_date, end_date):\n try:\n account = Client.objects.get(id=account_id)\n except Client.DoesNotExist:\n return\n\n spend_sum = 0\n adwords_accounts = account.adwords.all()\n for adwords_account in adwords_accounts:\n client = get_client()\n client.client_customer_id = adwords_account.dependent_account_id\n\n report_downloader = client.GetReportDownloader(version=settings.API_VERSION)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n ],\n 'dateRange': {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': end_date.strftime('%Y%m%d')\n }\n }\n\n try:\n campaign_exclusion = CampaignExclusions.objects.get(account=account)\n excluded_campaign_ids = [campaign.campaign_id for campaign in campaign_exclusion.aw_campaigns.all()]\n if len(excluded_campaign_ids) > 0:\n campaign_report_selector['predicates'].append({\n 'field': 'CampaignId',\n 'operator': 'NOT_IN',\n 'values': excluded_campaign_ids\n })\n except CampaignExclusions.DoesNotExist:\n pass\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n campaign_report = Reporting.parse_report_csv_new(\n report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_report:\n # This is the cost for this timerange\n cost = int(campaign_row['cost']) / 1000000\n spend_sum += cost\n\n return spend_sum", "def get_trading_stats(self):\r\n method = self.public_endpoints['trading_stats']['method']\r\n url = self.base_url + self.public_endpoints['trading_stats']['url']\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))" ]
[ "0.7043421", "0.6315266", "0.6203622", "0.60176706", "0.5397881", "0.5308747", "0.5242094", "0.52374476", "0.5203921", "0.5127736", "0.50927025", "0.5086971", "0.5065482", "0.5055341", "0.5028451", "0.49794018", "0.49695638", "0.4959153", "0.49582964", "0.49520278", "0.49387485", "0.48994023", "0.48861656", "0.486155", "0.4846963", "0.48439336", "0.48260844", "0.4806491", "0.47982413", "0.47981033" ]
0.8317373
0
Returns the conversion stats for a single ad group.
def get_conversion_stats(self, adgroup_id, batch=False): path = '%s/conversions' % adgroup_id return self.make_request(path, 'GET', batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_conversion_stats_by_adgroup(self, account_id, adgroup_ids=None,\n include_deleted=False, batch=False):\n path = 'act_%s/adgroupconversions' % account_id\n args = {}\n if adgroup_ids is not None:\n args['adgroup_ids'] = json.dumps(adgroup_ids)\n if include_deleted is not None:\n args['include_deleted'] = include_deleted\n return self.make_request(path, 'GET', args, batch=batch)", "def get_conversion_stats_by_adaccount(self, account_id, batch=False):\n path = 'act_%s/conversions' % account_id\n return self.make_request(path, 'GET', batch=batch)", "def getGrpStats(group):\n return {'min': group.min(), 'max': group.max(),\n 'count': group.count(), 'mean': group.mean(), 'sum':group.sum()}", "def get_stats_by_adcampaign_group(\n self, campaign_group_id, fields=None, filters=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if fields:\n args['fields'] = json.dumps(fields)\n if filters:\n args['filters'] = json.dumps(filters)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = '%s/stats' % campaign_group_id\n return self.make_request(path, 'GET', args, batch=batch)", "def group1_stats(self):\n return self._group1_stats", "def stats(self) -> Sequence['outputs.GetSystemGroupsGroupStatResult']:\n return pulumi.get(self, \"stats\")", "def get_stats_by_adgroup(\n self, account_id, adgroup_ids=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if adgroup_ids is not None:\n args['adgroup_ids'] = json.dumps(adgroup_ids)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = 'act_%s/adgroupstats' % account_id\n return self.make_request(path, 'GET', args, batch=batch)", "def _compute_group_stats():\n group_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # TODO: move this to property of evaluation group or add dedicated data model.\n # GOAL: should be configurable from within the Django admin backend.\n #\n # MINIMAL: move to local_settings.py?\n #\n # The following dictionary defines the number of HITs each group should\n # have completed during the WMT16 evaluation campaign.\n \n for group in groups:\n _name = group.name\n \n _group_stats = HIT.compute_status_for_group(group)\n _total = _group_stats[0]\n \n if _total > 0 and not _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = 0\n elif _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = GROUP_HIT_REQUIREMENTS[_name]\n _delta = _total - _required\n _data = (_total, _required, _delta)\n \n if _data[0] > 0:\n group_stats.append((_name, _data))\n \n # Sort by number of remaining HITs.\n group_stats.sort(key=lambda x: x[1][2])\n \n # Add totals at the bottom.\n global_total = sum([x[1][0] for x in group_stats])\n global_required = sum([x[1][1] for x in group_stats])\n global_delta = global_total - global_required\n global_data = (global_total, global_required, global_delta)\n group_stats.append((\"Totals\", global_data))\n \n return group_stats", "def updateAdGroups(self):\n adgroup_ids = self.getAdGroupIds()\n for adgroup_id in adgroup_ids:\n message = \"\"\n df = self.getAdvertPerformanceDf(adgroup_id)\n\n ad_count = df.shape[0]\n\n if ad_count == 0:\n self.writeToDatabase(0, self.getPriority(0, False), \"no_ads\", adgroup_id)\n continue\n\n eta_ad_count = df[df.ad_type == 'Expanded text ad'].shape[0]\n\n has_winners = \"winning\" in df.ctr_message.values or \"winning\" in df.conversion_rate_message.values\n\n def getMessage():\n if eta_ad_count == 0:\n return \"no_expanded_text_ads\" # we can only show ad groups which at least 1 ETA (for the placeholder text)\n\n if has_winners:\n return \"has_winners\"\n\n if ad_count < 2:\n return \"too_few_ads\"\n self.writeToDatabase(ad_count, self.getPriority(df.shape[0], has_winners), getMessage(), adgroup_id)", "def get_stats(self):\n return scales.getStats()[self.stats_name]", "def metrics_group():", "def get_clan_aggregate_stats_get(self, groupId, modes):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Stats/AggregateClanStats/{groupId}/\"))", "def get_conversion_stats_by_adcampaign(\n self, account_id, campaign_ids=None, include_deleted=False,\n start_time=None, end_time=None, aggregate_days=None,\n by_impression_time=True, batch=False):\n path = 'act_%s/adcampaignconversions' % account_id\n args = {}\n if campaign_ids is not None:\n args['campaign_ids'] = json.dumps(campaign_ids)\n if include_deleted is not None:\n args['include_deleted'] = include_deleted\n if start_time is not None:\n args['start_time'] = start_time\n if end_time is not None:\n args['end_time'] = end_time\n if aggregate_days is not None:\n args['aggregate_days'] = aggregate_days\n if not by_impression_time:\n args['by_impression_time'] = 'false'\n return self.make_request(path, 'GET', args, batch=batch)", "def get_keyword_stats(self, adgroup_id, batch=False):\n path = '%s/keywordstats' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)", "def get_batch_stats(self, batch):\n\t\t\n\t\treturn self.batch_stats[batch]", "def _adc_info_first_pass(\n self, adc_name: str, config_group: h5py.Group\n ) -> Tuple[Tuple[int, Tuple[int, ...], Dict[str, Any]], ...]:\n # 'Raw data + config/SIS 3301' group has only one possible\n # adc ('SIS 3301')\n # adc_info = (\n # int, # board number\n # (int, ...), # connected channel numbers\n # {'bit': 14, # bit resolution\n # 'clock rate': <Quantity 100.0 MHz>,\n # 'nshotnum': int,\n # 'shot average (software)': int,\n # 'sample average (hardware)': int})\n #\n # initialize\n adc_info = []\n\n # conns is a tuple of tuples where each tuple is a seed for the\n # elements of `adc_info`\n conns = self._find_adc_connections(adc_name, config_group)\n\n for conn in conns:\n # define 'bit' and 'clock rate'\n conn[2][\"bit\"] = 14\n conn[2][\"clock rate\"] = u.Quantity(100.0, unit=\"MHz\")\n\n # add 'shot average (software)' to dict\n if \"Shots to average\" in config_group.attrs:\n shtave = config_group.attrs[\"Shots to average\"]\n if shtave == 0 or shtave == 1:\n shtave = None\n else:\n shtave = None\n conn[2][\"shot average (software)\"] = shtave\n\n # add 'sample average (hardware)' to dict\n splave = None\n avestr = \"\"\n find_splave = False\n if \"Samples to average\" in config_group.attrs:\n avestr = config_group.attrs[\"Samples to average\"]\n avestr = _bytes_to_str(avestr)\n find_splave = True\n elif \"Unnamed\" in config_group.attrs:\n avestr = config_group.attrs[\"Unnamed\"]\n try:\n avestr = _bytes_to_str(avestr)\n find_splave = True\n except TypeError:\n avestr = \"\"\n find_splave = False\n\n if find_splave:\n if avestr != \"No averaging\":\n _match = re.fullmatch(\n r\"(\\bAverage\\s)(?P<NAME>.+)(\\sSamples\\b)\", avestr\n )\n if bool(_match):\n try:\n # splave = int(avestr.split()[1])\n splave = int(_match.group(\"NAME\"))\n\n if splave == 0 or splave == 1:\n splave = None\n except ValueError:\n warn(\n f\"Found sample averaging of '{_match.group('NAME')}' \"\n f\"but can not convert to int...using a value of \"\n f\"None instead\"\n )\n conn[2][\"sample average (hardware)\"] = splave\n\n # append info\n adc_info.append(conn)\n\n return tuple(adc_info)", "def getAGroupInfo(group_id):\r\n return Group.getAGroupInfo(group_id)", "def get_stats(self):\n return self.stats", "def get_adcampaign_group(self, campaign_group_id, fields, batch=False):\n path = '%s' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def getStats(self):\n\n raise NotImplementedError", "def get_campaign_stats(self,campaign_id):\n campaign = Campaign(campaign_id)\n fields = ['account_name',\n 'campaign_name',\n 'clicks',\n 'cpc',\n 'reach',\n 'ctr',\n 'frequency',\n 'impressions',\n 'cpm',\n 'relevance_score']\n #Need to create a new list for data\n data = []\n \n #Grab lifetime campaign stats to find start and end date of campaign and convert to datetime formate\n insights = campaign.get_insights(fields=fields, params={'date_preset':'lifetime'})\n date_begin = datetime.datetime.strptime(insights[0]['date_start'], \"%Y-%m-%d\")\n date_end = datetime.datetime.strptime(insights[0]['date_stop'], \"%Y-%m-%d\")\n date_diff = datetime.timedelta(days=25)\n new_date = date_begin + date_diff\n\n #Pass in these values to the api\n api_date_first = str(date_begin).split()[0]\n api_date_last = str(new_date).split()[0]\n\n #Strange API limitation where you can only grab 25 values at a time. \n while date_begin < date_end:\n insights = campaign.get_insights(fields=fields, params={ 'time_range':{'since':api_date_first, 'until':api_date_last}, 'time_increment':1})\n insights = list(insights)\n date_begin = new_date \n new_date = date_begin + date_diff\n api_date_first = api_date_last\n api_date_last = str(new_date).split()[0]\n data += insights\n\n return data", "def _process_stats_data(self, graph_rule, trans_stats_data):\n formatted_data = {}\n translation_data = {}\n if graph_rule:\n formatted_data['graph_rule'] = graph_rule\n if trans_stats_data.get('branch'):\n formatted_data['branch'] = trans_stats_data['branch']\n ticks = trans_stats_data.get('ticks')\n labels = {}\n [labels.update({index: val}) for index, val in ticks]\n graph_data = trans_stats_data.get('graph_data', {})\n for unit in graph_data:\n temp_stat = {}\n stat = unit.get('data', [])\n for index, val in stat:\n temp_stat.update({labels.get(index): val})\n translation_data[unit.get('label', 'label')] = temp_stat\n formatted_data['translation_stats'] = translation_data\n return formatted_data", "def ListGroupStats(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get(self, request):\n query = Stats()\n stats = query.get_format_stats()\n return Response(stats)", "def getAdvertPerformanceDf(self, adgroup_id):\n query = \"\"\"\n SELECT advert_performance.advert_id,adverts.ad_type,adverts.adgroup_id,advert_performance.clicks, advert_performance.conversion_rate_message,advert_performance.ctr_message\n FROM advert_performance\n join adverts\n on adverts.id = advert_performance.advert_id\n where advert_performance.account_id = '%s'\n and advert_performance.date_range = '%s' \n and adgroup_id = '%s'\n and adverts.status = 'enabled'\n \"\"\" % (self.account_id, self.date_range, adgroup_id)\n\n # need to update this (and all) queries to match the date_range we've set\n return pd.read_sql_query(query, Database().createEngine())", "def parse_groups(self):\n\n data = []\n ads_by_data = []\n for date_time, group in self.groups:\n for ad in group:\n ads_by_data.append({\"ad\": ad})\n date_key = self.date_to_string(date_time)\n data.append({date_key: ads_by_data})\n ads_by_data = []\n\n return data", "def get_stats(self, epg_dn):\n # Apic saves up to 95 different objects with statistic information\n traffic_list = []\n for i in range(10, -1, -1):\n traffic = self.moDir.lookupByDn(epg_dn + '/HDl2IngrBytesAg15min-%s' % str(i))\n if traffic is not None:\n traffic_list.append(traffic)\n return traffic_list", "def get_conversion_rate(self, newunit):\n if not self.unit or not self.unitgroup:\n logging.error(\"Metric %s can't be converted into %s unit. \"\n \"Please correct your config file.\" % (self.name,\n newunit))\n sys.exit(1)\n\n try:\n return self.unitgroup.get_conversion_rate(self.unit, newunit)\n except Exception as e:\n logging.error(\"Failed to convert metric %s into unit %s. \"\n \"Reason: %s. Please correct your config file.\"\n % (self.name, newunit, e.message))\n sys.exit(1)", "def GetAdGroupAd(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def stats(self):\n return self._stats" ]
[ "0.7126618", "0.6879782", "0.6557388", "0.58285874", "0.58243865", "0.581837", "0.57725024", "0.55744255", "0.5480726", "0.54243845", "0.5361287", "0.5352107", "0.5349627", "0.52375495", "0.52040744", "0.5131106", "0.5118021", "0.5033729", "0.50227857", "0.50193995", "0.50003475", "0.4957141", "0.49471247", "0.49207366", "0.49196035", "0.49187055", "0.49009883", "0.48835137", "0.48631382", "0.48517245" ]
0.82147694
0
Returns the remarketing pixel.
def get_remarketing_pixel(self, account_id, batch=False): logger.warn("This method is deprecated and is replaced with get_ads_pixels.") path = 'act_%s/remarketingpixelcode' % account_id return self.make_request(path, 'GET', batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_px(self):\n pass", "def getPixel(self):\r\n return self.__buffer[y][x]", "def pixel_ref(self):\n return self._pixel_ref", "def pixel_type(self):\n pass", "def gpix(self):\n return self._gpix", "def getimage(self):", "def getPixels(self):\n self._logger.debug(\"getPixels\")", "def getPixel (self, x, y):\r\n return self.image [y][x]", "def get_pixel_color(self, x, y):\n raise NotImplementedError # remove when we fix it. :)\n\n # do the Window import here because we don't want to import it at the\n # top or else we won't be able to set window properties\n from kivy.core.window import Window\n\n # convert the passed x/y to the actual x/y of the Window since it's\n # possible for the mpf-mc display size to be different than the Window\n # size\n x *= Window.width / Window.children[0].width\n y *= Window.height / Window.children[0].height\n\n return glReadPixels(x, y, 1, 1, GL_RGB, GL_UNSIGNED_BYTE)", "def get_pixel(self, x, y):\n if x < 0 or x > 7 or y < 0 or y > 15:\n # Ignore out of bounds pixels.\n return\n if y < 8:\n return self.get_led( y * 16 + x)\n else:\n return self.get_led((y-8) * 16 + (x+8))", "def getPixel(self, px, py):\n if not self.inBounds(px,py):\n return IColor()\n idx = py*self.w + px\n return self.data[idx]", "def pixel(self, x, y, color=None):\n if not 0 <= x <= 4:\n return None\n if not 0 <= y <= 6:\n return None\n return super()._pixel(x, y, color)", "def get_pixel(self, x,y):\n\t\tstructval=self.__gdal__.ReadRaster(px,py,1,1,buf_type=G.GDT_UInt16) #Assumes 16 bit int aka 'short'\n\t\treturn struct.unpack('h' , structval)[0]", "def retrieve_pixel(self, x, y, index):\n pass", "def getPixel(self,x,y):\n return color_to_rgb(self._image.get(x, y))", "def pixel(self, x: int, y: int, colour: int, /) -> None:", "def Name(cls) -> str:\n return 'pixel'", "def ppix(self):\n return self._ppix", "def get_reflect_marker(self):\r\n return Marker((255, 255, 255), self._screen)", "def getRoverImage(self):\n # Your code goes here, this code is just an example\n return 'mario.ppm'", "def epix(self):\n return self._epix", "def referencepixel(self, *args, **kwargs):\n return _coordsys.coordsys_referencepixel(self, *args, **kwargs)", "def get_display_px(self):\n return self.image.size", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) * 2\n lobyte, hibyte = framebuf.buf[index : index + 2]\n r = hibyte & 0xF8\n g = ((hibyte & 0x07) << 5) | ((lobyte & 0xE0) >> 5)\n b = (lobyte & 0x1F) << 3\n return (r << 16) | (g << 8) | b", "def getPixels(self):\n\t\treturn self.strip.ledsColorBuffer", "def get_displacement_texture(self):\n return self.displacement_tex", "def get_pixel(self, x, y):\n assert self.valid_coordinates(x, y)\n return self.pixels[self.pixel_offset(x, y)]", "def getPixel(self,x,y) :\n # check the bounds to make sure we are in the correct area\n if x<0 or x>self.m_width :\n print \"error x out of bounds\\n\"\n return\n if y<0 or y>self.m_height :\n print \"error y our of bounds\\n\"\n return\n # now calculate the index into the 1D array of data\n index=(y*self.m_width*4)+x*4\n # grab the pixels\n red = self.getUcharArrayItem(self.charPixelPtr,index)\n green = self.getUcharArrayItem(self.charPixelPtr,index+1)\n blue = self.getUcharArrayItem(self.charPixelPtr,index+2)\n alpha=self.getUcharArrayItem(self.charPixelPtr,index+3)\n return (red,green,blue,alpha)", "def get_wximg(self, time_str):\n\n log(\"Getting radar imagery for {} at {}\".format(self._location, time_str))\n url = self.get_url(\n '/radar/IDR{}.T.{}.png'.format(self._radar_id, time_str)\n )\n return self.get_image(url)", "def get_image():\n image_response = client.simGetImages([airsim.ImageRequest(\"0\", airsim.ImageType.Scene, False, False)])[0]\n image1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8)\n image_rgba = image1d.reshape(image_response.height, image_response.width, 4)\n return image_rgba[78:144,1:255,0:3].astype(float)\n # return image_rgba[78:144,76:255,0:3].astype(float)" ]
[ "0.6252342", "0.5916868", "0.58560795", "0.5755901", "0.57525766", "0.57007796", "0.56927764", "0.5637961", "0.56269395", "0.5596106", "0.5575986", "0.5571929", "0.5563752", "0.55550534", "0.55192196", "0.5498717", "0.5465229", "0.5461728", "0.54475176", "0.5447481", "0.54397494", "0.5376221", "0.5369505", "0.5352325", "0.53478277", "0.52968186", "0.5294643", "0.5287409", "0.52741724", "0.52719253" ]
0.69339037
0
Returns the information for the given offsite pixel.
def get_offsite_pixel(self, offsite_pixel_id, batch=False): path = '%s' % offsite_pixel_id return self.make_request(path, 'GET', batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pixel(self, x, y):\n if x < 0 or x > 7 or y < 0 or y > 15:\n # Ignore out of bounds pixels.\n return\n if y < 8:\n return self.get_led( y * 16 + x)\n else:\n return self.get_led((y-8) * 16 + (x+8))", "def retrieve_pixel(self, x, y, index):\n pass", "def get_pixel(self, x, y):\n assert self.valid_coordinates(x, y)\n return self.pixels[self.pixel_offset(x, y)]", "def get_pixel(image, x, y):\n x = in_bound(image[\"height\"], x)\n y = in_bound(image[\"width\"], y)\n \n return image['pixels'][ x * image[\"width\"] + y]", "def get_pixel(self, x,y):\n\t\tstructval=self.__gdal__.ReadRaster(px,py,1,1,buf_type=G.GDT_UInt16) #Assumes 16 bit int aka 'short'\n\t\treturn struct.unpack('h' , structval)[0]", "def get_pixel(framebuf, x, y):\n index = (y >> 3) * framebuf.stride + x\n offset = y & 0x07\n return (framebuf.buf[index] >> offset) & 0x01", "def getPixel (self, x, y):\r\n return self.image [y][x]", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) // 8\n offset = 7 - x & 0x07\n return (framebuf.buf[index] >> offset) & 0x01", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) * 3\n return (\n (framebuf.buf[index] << 16)\n | (framebuf.buf[index + 1] << 8)\n | framebuf.buf[index + 2]\n )", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) >> 2\n pixel = framebuf.buf[index]\n\n shift = (x & 0b11) << 1\n return (pixel >> shift) & 0b11", "def GetPixelPoint(*args, **kwargs):\n return _gdi_.DC_GetPixelPoint(*args, **kwargs)", "def pixel(self, x, y, color=None):\n if not 0 <= x <= 4:\n return None\n if not 0 <= y <= 6:\n return None\n return super()._pixel(x, y, color)", "def extract_pixels(img_):\n non_zero_pixels = np.argwhere(0 < img_)\n x = non_zero_pixels.T[0].astype(np.float32)\n y = non_zero_pixels.T[1].astype(np.float32)\n return x, y", "def getPixel(data,x,y):\n d0= data[y,x*2]\n \n if ( (d0[0]==255) and (d0[1]==127)):\n return [0.0,0.0,0.0]\n d1= data[y,x*2+1]\n test=_U()\n test.data=(c_ubyte * 6)(d0[0],d0[1],d0[2],d1[0],d1[1],d1[2])\n X=hex (test.DistXYZ.x)\n Y=hex (test.DistXYZ.y)\n Z=hex (test.DistXYZ.z)\n \n X=float(int(X,16)-int(\"0x7FFF\",16))/1000.0\n Y=float(int(Y,16)-int(\"0x7FFF\",16))/1000.0\n Z=float(int(Z,16)-int(\"0x7FFF\",16))/1000.0\n return [X,Y,Z]", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) * 2\n lobyte, hibyte = framebuf.buf[index : index + 2]\n r = hibyte & 0xF8\n g = ((hibyte & 0x07) << 5) | ((lobyte & 0xE0) >> 5)\n b = (lobyte & 0x1F) << 3\n return (r << 16) | (g << 8) | b", "def pixel_offset(self, x, y):\n return y * self.width + x", "def getPixel(self, px, py):\n if not self.inBounds(px,py):\n return IColor()\n idx = py*self.w + px\n return self.data[idx]", "def skyPixels(self,i, d,Feeds, selectFeature):\n\n # We store all the pointing information\n x = (d['level1/spectrometer/pixel_pointing/pixel_ra'][...])[Feeds[:,None],selectFeature]\n x = x[...,0:self.datasizes[i]].flatten()\n y = (d['level1/spectrometer/pixel_pointing/pixel_dec'][...])[Feeds[:,None],selectFeature]\n y = y[...,0:self.datasizes[i]].flatten()\n\n\n el = (d['level1/spectrometer/pixel_pointing/pixel_el'][...])[Feeds[:,None],selectFeature]\n el = el[...,0:self.datasizes[i]]\n\n\n pixels = self.getFlatPixels(x,y)\n pixels[pixels < 0] = -1\n pixels[pixels > self.naive.npix] = -1\n\n return pixels", "def getPixel(self):\r\n return self.__buffer[y][x]", "def extract_target_pixel_location(self):\n #Respective Image location\n pixel_array = self.imageprepare(self.image_path)\n\n #Select less_than_target color point --> must be calibrated\n #?? Should we use an abstract class here instead of an if statment ??\n if self.color == \"g\":\n less_than_target = .15\n else:\n raise ValueError(\"Unknown color value\")\n\n #Chooses target pixels as well as it's location\n target_pixels = []\n for pixel in enumerate(pixel_array):\n if pixel[1] < less_than_target:\n target_pixels.append(pixel[0])\n\n return target_pixels", "def pixel(self, x: int, y: int, colour: int, /) -> None:", "def get_pixel_locations(self, pixels):\n if self._cached_point_cloud is None:\n self._cached_point_cloud = self.as_point_cloud()\n pixel_locations = [\n self._cached_point_cloud[pixel.y * self.camera_setup.width +\n pixel.x] for pixel in pixels\n ]\n return [\n pylot.utils.Location(loc[0], loc[1], loc[2])\n for loc in pixel_locations\n ]", "def retrieve_pixel_value(lon, lat, data_source):\n dataset = gdal.Open(data_source)\n\n gt = dataset.GetGeoTransform()\n the_band = dataset.GetRasterBand(1)\n px = int((lon - gt[0]) / gt[1]) # x pixel\n py = int((lat - gt[3]) / gt[5]) # y pixel\n\n buf = the_band.ReadRaster(px, py, 1, 1, buf_type=gdal.GDT_Int16)\n elev = struct.unpack(\"h\", buf)\n\n return elev[0]", "def pixelcoord(coordx: float, coordy: float) -> Tuple[int, int]:\n ox, oy = origin()\n x, y = int(round(ox+coordx)), int(round(oy-coordy))\n return (x, y)", "def get_pos_in_pixels(self):\n pixelpos = Vector(self.pos.x * 32, -self.pos.y * 32)\n return pixelpos + self.offset", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def getPixelColour(self, item, pixel):\n return item.get_at(pixel)", "def getpixels(self,x,y,dx,dy,Nx,Ny):\n \n Dx = (Nx*dx)\n Dy = (Ny*dy)\n\n # Not Nx + 1 to account for rounding\n pX = (x/dx + (Nx + 2)/2.).astype(int)\n pY = (y/dy + (Ny + 2)/2.).astype(int)\n pixels = pX + pY*Nx\n pixels[((pX < 0) | (pX >= Nx)) | ((pY < 0) | (pY >= Ny))] = -1\n\n # here we do use Nx + 1 as you want the precise float value of the pixel.\n return pixels,x/dx + (Nx + 1)/2., y/dx + (Nx + 1.)/2.", "def get_pixel(self, i, j):\n # Inside image bounds?\n if i > self.width or j > self.height:\n print(\"Pixel out of bounds\")\n return None\n\n # Get Pixel\n pixel = self.image.getpixel((i, j))\n return pixel", "def getPixels(self):\n self._logger.debug(\"getPixels\")" ]
[ "0.66749126", "0.6524832", "0.64080584", "0.6356776", "0.6308759", "0.6170481", "0.61681867", "0.6124925", "0.59940475", "0.5963207", "0.59393615", "0.59164435", "0.59158933", "0.5904558", "0.5904007", "0.5900166", "0.5849955", "0.5826816", "0.58201385", "0.58138734", "0.58109", "0.5778144", "0.576622", "0.57598144", "0.5753282", "0.5722767", "0.57111335", "0.5710013", "0.5708405", "0.5696739" ]
0.7272531
0
Returns the list of offsite pixels for the given account.
def get_offsite_pixels(self, account_id, batch=False): path = 'act_%s/offsitepixels' % account_id return self.make_request(path, 'GET', batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ads_pixels(self, account_id, fields=None, batch=False):\n path = 'act_%s/adspixels' % account_id\n args = {'fields': fields} if fields else {}\n return self.make_request(path, 'GET', args, batch=batch)", "def neighbour_pixels(x, y):\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y ), (x, y ), (x + 1, y ),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)\n ]", "def _build_list_of_excluded_pixels(self, exclude_zones):\n \n pixels = []\n for x, y, width, height in exclude_zones:\n for row in range(height):\n for col in range(width):\n pixels.append(Pixel(col + x, row + y))\n \n return pixels", "def get_offsite_pixel(self, offsite_pixel_id, batch=False):\n path = '%s' % offsite_pixel_id\n return self.make_request(path, 'GET', batch=batch)", "def get_pixel_locations(self, pixels):\n if self._cached_point_cloud is None:\n self._cached_point_cloud = self.as_point_cloud()\n pixel_locations = [\n self._cached_point_cloud[pixel.y * self.camera_setup.width +\n pixel.x] for pixel in pixels\n ]\n return [\n pylot.utils.Location(loc[0], loc[1], loc[2])\n for loc in pixel_locations\n ]", "def findImmediateNeighbours(self):\n immediateNeighbours = []\n\n if self.xCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate - 1, self.yCoordinate))\n\n if self.xCoordinate + 1 < 395:\n immediateNeighbours.append(PixelPosition(self.xCoordinate + 1, self.yCoordinate))\n\n if self.yCoordinate + 1 < 500:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate + 1))\n\n if self.yCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate - 1))\n\n return immediateNeighbours", "def getPixels(self):\n self._logger.debug(\"getPixels\")", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def get_pixel_indices(self, lats, lons):\n return self._hpx.get_pixel_indices(lats, lons)", "def get_remarketing_pixel(self, account_id, batch=False):\n logger.warn(\"This method is deprecated and is replaced with get_ads_pixels.\")\n path = 'act_%s/remarketingpixelcode' % account_id\n return self.make_request(path, 'GET', batch=batch)", "def getpixels(self,x,y,dx,dy,Nx,Ny):\n \n Dx = (Nx*dx)\n Dy = (Ny*dy)\n\n # Not Nx + 1 to account for rounding\n pX = (x/dx + (Nx + 2)/2.).astype(int)\n pY = (y/dy + (Ny + 2)/2.).astype(int)\n pixels = pX + pY*Nx\n pixels[((pX < 0) | (pX >= Nx)) | ((pY < 0) | (pY >= Ny))] = -1\n\n # here we do use Nx + 1 as you want the precise float value of the pixel.\n return pixels,x/dx + (Nx + 1)/2., y/dx + (Nx + 1.)/2.", "def calcAllPixelsAddress(pixIDList, dimX, dimY):\n ini = True\n for pixIDs in pixIDList:\n pixs = np.zeros((pixIDs.shape[0], 3))\n szFrame = dimX*dimY\n pixs[:,2] = pixIDs // szFrame\n pixs[:,1] = (pixIDs % szFrame) // dimX\n pixs[:,0] = (pixIDs % szFrame) % dimX\n\n if ini:\n pixPoints = pixs\n ini = False\n else:\n pixPoints = np.vstack((pixPoints, pixs))\n\n return pixPoints", "def get_pixels(self):\n\n # pygame board needs to be initialized the first time\n if not self.board:\n self.setup_display(render_gui=False)\n\n self.draw_window(draw_leaderboard=False)\n pixels = pygame.surfarray.array3d(self.window)\n return np.moveaxis(pixels, 1, 0)", "def create_offsite_pixel(self, account_id, name, tag, batch=False):\n path = 'act_%s/offsitepixels' % account_id\n args = {\n 'name': name,\n 'tag': tag,\n }\n return self.make_request(path, 'POST', args, batch=batch)", "def get_image_list(self, account):\n images = self.driver(account).list_images()\n return [image.name for image in images]", "def get_pixel_indices(self, lats, lons):\n raise NotImplementedError(\"MapBase.get_pixel_indices()\")", "def _build_list_of_excluded_pixels2(self, exclude_zones, img_width, img_height):\n \n full_image = numpy.ones((img_height, img_width), dtype=uint8)\n for x, y, width, height in exclude_zones:\n \n # creates a matrix where 0 is placed on pixels to exclude, and 1 on pixel to keep\n exclusion = numpy.zeros((height, width), dtype=uint8)\n exclusion = numpy.pad(exclusion, ((min(y, img_height) , max(0, img_height - (y + height))), (min(x, img_width), max(0, img_width - (x + width)))), constant_values=1)\n \n full_image *= exclusion[0:img_height, 0:img_width] # crop exclusion array if it's size is higher than image (exclusion zone outside of image dimensions)\n \n return full_image", "def available_healpix_pixels(self):\n return [dataset.info['healpix_pixel'] for dataset in self._datasets]", "def scan_points(val_addr,x_points,y_points):\n\n a = len(val_addr)\n pixels = np.zeros((y_points,x_points))\n lines = np.where(val_addr==234)[0]\n\n if lines.shape ==(0,):\n lines = np.array([0,a])\n else:\n lines =np.concatenate((lines,[a]))\n for i in range(y_points):\n part = val_addr[lines[i]:lines[i+1]]\n pix = np.concatenate(([0],np.where(part==233)[0],[lines[i+1]-lines[i]]))\n\n if i%2==0:\n pixels[i] = lines[i] + pix[:-1]\n else:\n pixels[i] = lines[i] + np.flip(pix[:-1])\n return(pixels,lines)", "def extract_pixels(img_):\n non_zero_pixels = np.argwhere(0 < img_)\n x = non_zero_pixels.T[0].astype(np.float32)\n y = non_zero_pixels.T[1].astype(np.float32)\n return x, y", "def getPixels(self):\n\t\treturn self.strip.ledsColorBuffer", "def user_ip_list(uid):\r\n session = tables.get_session()\r\n res = []\r\n if session is None:\r\n return res\r\n try:\r\n ip_table = IpAddrs()\r\n res.extend(ip_table.get_ips_by_uid(uid, session))\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Get user ip list failed: %s', err)\r\n return []\r\n finally:\r\n session.close()\r\n return res", "def get_pixels(surface):\n pixels = []\n for y in range(surface.get_height()):\n for x in range(surface.get_width()):\n pixels.append(surface.get_at((x,y))[:3])\n return pixels", "def skyPixels(self,i, d,Feeds, selectFeature):\n\n # We store all the pointing information\n x = (d['level1/spectrometer/pixel_pointing/pixel_ra'][...])[Feeds[:,None],selectFeature]\n x = x[...,0:self.datasizes[i]].flatten()\n y = (d['level1/spectrometer/pixel_pointing/pixel_dec'][...])[Feeds[:,None],selectFeature]\n y = y[...,0:self.datasizes[i]].flatten()\n\n\n el = (d['level1/spectrometer/pixel_pointing/pixel_el'][...])[Feeds[:,None],selectFeature]\n el = el[...,0:self.datasizes[i]]\n\n\n pixels = self.getFlatPixels(x,y)\n pixels[pixels < 0] = -1\n pixels[pixels > self.naive.npix] = -1\n\n return pixels", "def get_pixel_skydirs(self):\n raise NotImplementedError(\"MapBase.get_pixel_skydirs()\")", "def calcPixelsAddress(svIDList, pixIDList, dimX, dimY):\n ini = True\n for svIDs in svIDList:\n for svID in svIDs:\n pixIDs = pixIDList[svID]\n pixs = np.zeros((pixIDs.shape[0], 3))\n szFrame = dimX*dimY\n pixs[:,2] = pixIDs // szFrame\n pixs[:,1] = (pixIDs % szFrame) // dimX\n pixs[:,0] = (pixIDs % szFrame) % dimX\n\n if ini:\n pixPoints = pixs\n ini = False\n else:\n pixPoints = np.vstack((pixPoints, pixs))\n\n if ini:\n return None\n else:\n return pixPoints", "def get_pixel_skydirs(self):\n\n xpix = np.linspace(0, self.npix[0] - 1., self.npix[0])\n ypix = np.linspace(0, self.npix[1] - 1., self.npix[1])\n xypix = np.meshgrid(xpix, ypix, indexing='ij')\n return SkyCoord.from_pixel(np.ravel(xypix[0]),\n np.ravel(xypix[1]), self.wcs)", "def neighbors_ip(self):\n neighbors = self.neighbors()\n nei_list = []\n net_ip = self._rloc_ip_net_addr()\n if neighbors is not None:\n for nei_rec in neighbors:\n nei_ip = net_ip + hex(nei_rec.rloc16)[2:]\n nei_list.append(nei_ip)\n return nei_list", "def seek_spot_lists(self, xml_path):\n \n spot_dict = self.get_pixel_cordinate_from_xml(xml_path)\n self.spot_dict = spot_dict\n spot_list = list(spot_dict.values())\n return spot_list", "def get_neighbours(x, y, board):\n return [get_left(x, y, board), get_upper(x, y, board), get_right(x, y, board), get_lower(x, y, board)]" ]
[ "0.6172726", "0.5849796", "0.58011657", "0.5752363", "0.55722404", "0.54265267", "0.5412152", "0.53439766", "0.52993065", "0.52671695", "0.52521837", "0.5125096", "0.5120147", "0.51007414", "0.50539196", "0.50453466", "0.50400776", "0.5027511", "0.49755818", "0.4962378", "0.4931053", "0.49291104", "0.49231133", "0.48933977", "0.4868497", "0.4863114", "0.48557025", "0.48507053", "0.48117557", "0.47547388" ]
0.7049922
0
Returns the keyword stats for the given ad group.
def get_keyword_stats(self, adgroup_id, batch=False): path = '%s/keywordstats' % adgroup_id return self.make_request(path, 'GET', batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_keywords_and_impressions(client, customer_id, page_size):\n ga_service = client.get_service('GoogleAdsService', version='v2')\n results = []\n\n query = ('SELECT ad_group_criterion.keyword.text, '\n 'metrics.impressions, metrics.clicks, metrics.cost_micros '\n 'FROM keyword_view WHERE segments.date DURING LAST_7_DAYS '\n 'AND ad_group.status = \\'ENABLED\\' '\n 'AND ad_group_criterion.status IN (\\'ENABLED\\', \\'PAUSED\\') '\n 'ORDER BY metrics.impressions DESC '\n 'LIMIT 100')\n response = ga_service.search(customer_id, query, page_size=page_size)\n try:\n for row in response:\n criterion = row.ad_group_criterion\n metrics = row.metrics\n results+= [criterion.keyword.text.value,\n metrics.impressions.value,\n metrics.cost_micros.value],\n return [[results],['Keyword', 'Impressions', 'Cost_Micros']]\n except GoogleAdsException as ex:\n print('Request with ID {} failed with status {} and includes the '\n 'following errors:'.format(ex.request_id, ex.error.code().name))\n return None", "def GetKeywordPlanAdGroupKeyword(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def getGrpStats(group):\n return {'min': group.min(), 'max': group.max(),\n 'count': group.count(), 'mean': group.mean(), 'sum':group.sum()}", "def get_stats_by_adgroup(\n self, account_id, adgroup_ids=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if adgroup_ids is not None:\n args['adgroup_ids'] = json.dumps(adgroup_ids)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = 'act_%s/adgroupstats' % account_id\n return self.make_request(path, 'GET', args, batch=batch)", "def get_keyword(package):\n\ttry:\n\t\tsubstr = re.search(r'(\\S+)_(\\S+)', package)\n\t\tif substr:\n\t\t\treturn substr.groups()\n\texcept Exception,e:\n\t\tlog.error(str(e))\n\t\treturn None", "def analyze_trending_keyword(keyword=\"pokemon\", count=100, keep_all=False, debug=False):\n print('analyzing keyword: {}'.format(keyword))\n tweets = get_search_tweets(query=keyword, count=count, debug=debug)\n\n return process_tweets(tweets, keep_all=keep_all, debug=debug)", "def get_stats_by_adcampaign_group(\n self, campaign_group_id, fields=None, filters=None, batch=False,\n start_time=None, end_time=None):\n args = {}\n if fields:\n args['fields'] = json.dumps(fields)\n if filters:\n args['filters'] = json.dumps(filters)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = '%s/stats' % campaign_group_id\n return self.make_request(path, 'GET', args, batch=batch)", "def get_descriptive(self, groups, key):\n group = groups.get_group(key)\n perm = group[\"perm\"].describe()\n t_test = group[\"t_test\"].describe()\n means = {\"perm\": perm, \"t_test\": t_test}\n return means", "def metrics_group():", "def _fe_keyword_match(self, sample):\n result = OrderedDict()\n\n for item in self._keywords:\n result[item + \"_kw\"] = 1 if item in sample['fqdn'] else 0\n\n return result", "def create_keyword_score(self):\n keywords_score = {}\n for zettel in self.lemma_tokens:\n for word in zettel:\n if zettel.count(word) >= self.min_keyword_freq:\n keywords_score.setdefault(word[0], 0)\n word_list = re.split(\" \", word[0])\n score = 0\n for new_word in word_list:\n score += self.word_scores[new_word]\n keywords_score[word[0]] = score\n return keywords_score", "def getAGroupInfo(group_id):\r\n return Group.getAGroupInfo(group_id)", "def get_lifetime_sparseness_for_group(group):\n image_responses = group.mean_response.values\n lifetime_sparseness = compute_lifetime_sparseness(image_responses)\n return pd.Series({'lifetime_sparseness': lifetime_sparseness})", "def cached_dm_find_fits_keyword(key):\n return MODEL.find_fits_keyword(key.upper(), return_result=True)", "def make_group_by_keyword(self, keyword):\r\n pass", "def _compute_group_stats():\n group_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # TODO: move this to property of evaluation group or add dedicated data model.\n # GOAL: should be configurable from within the Django admin backend.\n #\n # MINIMAL: move to local_settings.py?\n #\n # The following dictionary defines the number of HITs each group should\n # have completed during the WMT16 evaluation campaign.\n \n for group in groups:\n _name = group.name\n \n _group_stats = HIT.compute_status_for_group(group)\n _total = _group_stats[0]\n \n if _total > 0 and not _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = 0\n elif _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = GROUP_HIT_REQUIREMENTS[_name]\n _delta = _total - _required\n _data = (_total, _required, _delta)\n \n if _data[0] > 0:\n group_stats.append((_name, _data))\n \n # Sort by number of remaining HITs.\n group_stats.sort(key=lambda x: x[1][2])\n \n # Add totals at the bottom.\n global_total = sum([x[1][0] for x in group_stats])\n global_required = sum([x[1][1] for x in group_stats])\n global_delta = global_total - global_required\n global_data = (global_total, global_required, global_delta)\n group_stats.append((\"Totals\", global_data))\n \n return group_stats", "def fetch_keywords(training_data, num_languages, min_keyword_num=20):\n cnt_per_lang = [0] * num_languages\n cnt_per_lang_kw = [{} for _ in range(num_languages)]\n\n # Read row in training_data and count keywords in codes with langauge\n for index, (lang_id, code) in enumerate(training_data):\n cnt_per_lang[lang_id] += 1\n\n for keyword in shaman.KeywordFetcher.fetch(code):\n # if keyword exists in fetched data, add '1' to keyword data\n cnt_per_lang_kw[lang_id][keyword] = cnt_per_lang_kw[lang_id].get(\n keyword, 0) + 1\n\n print('Fetch keyword %d/%d ' %\n (index, len(training_data)), end='\\r')\n\n # Get dataset indexed by keyword\n ret = {}\n keyword_cnt = {}\n for lang_id, obj in enumerate(cnt_per_lang_kw):\n for keyword, count in obj.items():\n if keyword not in ret:\n ret[keyword] = [0] * num_languages\n\n # Record probability\n ret[keyword][lang_id] = (count / cnt_per_lang[lang_id])\n\n # Record total count of this keyword\n keyword_cnt[keyword] = keyword_cnt.get(keyword, 0) + count\n\n # Check total counts of the keyword and ignore if count is too small\n # (threshold is determined by `min_keyword_num`)\n keywords2remove = []\n for keyword in ret:\n if keyword_cnt[keyword] < min_keyword_num:\n keywords2remove.append(keyword)\n continue\n del keyword_cnt[keyword]\n\n for keyword in keywords2remove:\n del ret[keyword]\n\n print('Fetch keyword completed ')\n return ret", "def getGroupDataWeight(self, groupName):\n return self.getGroupSetting(groupName, self._dataWeightToken, 1.0)", "def grpc_iterator(client, customer_id, page_size):\n ga_service = client.get_service('GoogleAdsService', version='v2')\n results = []\n\n query = ('SELECT ad_group_criterion.keyword.text, '\n 'metrics.impressions, metrics.clicks, metrics.cost_micros '\n 'FROM keyword_view WHERE segments.date DURING LAST_7_DAYS '\n 'AND ad_group.status = \\'ENABLED\\' '\n 'AND ad_group_criterion.status IN (\\'ENABLED\\', \\'PAUSED\\') '\n 'ORDER BY metrics.impressions DESC '\n 'LIMIT 100')\n response = ga_service.search(customer_id, query, page_size=page_size)\n return response", "def searchGlossary(self,keyword):\n\t\twords = []\n\n\t\tfor letter in glossary:\n\t\t\tfor word in glossary[letter]:\n\t\t\t\tprint word.keys()[0]\n\t\t\t\tif keyword.lower() in word.keys()[0].lower():\n\t\t\t\t\twords.append(word)\n\n\t\treturn words", "def updateAdGroups(self):\n adgroup_ids = self.getAdGroupIds()\n for adgroup_id in adgroup_ids:\n message = \"\"\n df = self.getAdvertPerformanceDf(adgroup_id)\n\n ad_count = df.shape[0]\n\n if ad_count == 0:\n self.writeToDatabase(0, self.getPriority(0, False), \"no_ads\", adgroup_id)\n continue\n\n eta_ad_count = df[df.ad_type == 'Expanded text ad'].shape[0]\n\n has_winners = \"winning\" in df.ctr_message.values or \"winning\" in df.conversion_rate_message.values\n\n def getMessage():\n if eta_ad_count == 0:\n return \"no_expanded_text_ads\" # we can only show ad groups which at least 1 ETA (for the placeholder text)\n\n if has_winners:\n return \"has_winners\"\n\n if ad_count < 2:\n return \"too_few_ads\"\n self.writeToDatabase(ad_count, self.getPriority(df.shape[0], has_winners), getMessage(), adgroup_id)", "def get_all_criteria_names(group):\n return (\n criterion.findtext(\"name\")\n for criterion in group.findall(\"criteria/criterion\") if\n criterion.findtext(\"search_type\") != \"member of\")", "def testKeywordTrafficEstimates(self):\n selector = {\n 'campaignEstimateRequests': [{\n 'adGroupEstimateRequests': [{\n 'keywordEstimateRequests': [\n {\n 'keyword': {\n 'xsi_type': 'Keyword',\n 'matchType': 'BROAD',\n 'text': 'mars cruise'\n },\n 'maxCpc': {\n 'xsi_type': 'Money',\n 'microAmount': '1000000'\n }\n },\n {\n 'keyword': {\n 'xsi_type': 'Keyword',\n 'matchType': 'PHRASE',\n 'text': 'cheap cruise'\n },\n 'maxCpc': {\n 'xsi_type': 'Money',\n 'microAmount': '1000000'\n }\n },\n {\n 'keyword': {\n 'xsi_type': 'Keyword',\n 'matchType': 'EXACT',\n 'text': 'cruise'\n },\n 'maxCpc': {\n 'xsi_type': 'Money',\n 'microAmount': '1000000'\n }\n }\n ],\n 'maxCpc': {\n 'xsi_type': 'Money',\n 'microAmount': '1000000'\n }\n }],\n 'criteria': [\n {\n 'xsi_type': 'Location',\n 'id': '2044'\n },\n {\n 'xsi_type': 'Language',\n 'id': '1000'\n }\n ]\n }]\n }\n self.assert_(isinstance(self.__class__.service.Get(selector), tuple))", "def group_describe(self, group):\n mapped = self.map_vects(datanorm)\n mappednp= np.array(mapped)\n \n groups= mappednp[:,0]\n data['Group'] = pd.Series(groups, index=data.index)\n print(data[data['Group']==group].describe())", "def MutateKeywordPlanAdGroupKeywords(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_nested_groups_names(group):\n return (\n criterion.findtext(\"value\")\n for criterion in group.findall(\"criteria/criterion\") if\n criterion.findtext(\"name\") in (\"Computer Group\", \"Mobile Device Group\")\n and criterion.findtext(\"search_type\") == \"member of\")", "def getwords(self):\n return self._aggroWords.keys()", "def get_keywords_and_values(words):\n d={}\n triple_keyword_value = 5\n double_keyword_value= 3\n single_keyword_occurance_value = 1\n\n stop_words = set(stopwords.words(\"english\"))\n\n for i in range(0, len(words)-2):\n if words[i] not in stop_words and words[i].isalnum():\n d[words[i]] = d.get(words[i],0.0)+ single_keyword_occurance_value\n if words[i+1] not in stop_words and words[i+1].isalnum():\n d[words[i]+\" \"+words[i+1]] = d.get(words[i]+\" \"+words[i+1],0.0)+double_keyword_value\n if words[i + 2] not in stop_words and words[i + 2].isalnum():\n d[words[i]+\" \"+words[i+1]+\" \"+words[i+2]] = d.get(words[i]+\" \"+words[i+1]+\" \"+words[i+2],0.0)+triple_keyword_value\n\n print(i, len(words))\n\n if words[i+1] not in stop_words and words[i+1].isalnum():\n d[words[i+1]] = d.get(words[i+1],0.0)+ single_keyword_occurance_value\n if words[i+2] not in stop_words and words[i+2].isalnum():\n d[words[i+1]+\" \"+words[i+2]] = d.get(words[i+1]+\" \"+words[i+2],0.0)+double_keyword_value\n if words[i+2] not in stop_words and words[+2].isalnum():\n d[words[i+2]] = d.get(words[i+2],0.0)+ single_keyword_occurance_value\n return d", "def getKeywords(tmdbKeywords):\n \n words = []\n if \"keywords\" in tmdbKeywords:\n for keyword in tmdbKeywords[\"keywords\"]:\n words += _format(keyword[\"name\"]).split()\n else:\n raise AttributeError(\"%s instance has no attribute keywords\" % tmdbKeywords) \n return words", "def average_kappa_for_group(db, groupId):\n documents = db.documents.find({'groupId': groupId})\n kappas = []\n for document in documents:\n if document_has_annotations(db, document['_id']) and document_has_numbers(db, document['_id']):\n kappas.append(get_kappa_for_document(db, document['_id']))\n return sum(kappas)/float(len(kappas))" ]
[ "0.607016", "0.60511845", "0.5765904", "0.5403789", "0.53584504", "0.5270912", "0.51138765", "0.5043539", "0.4957441", "0.49099302", "0.48909253", "0.48906374", "0.4882828", "0.48628682", "0.48413464", "0.48088658", "0.48002428", "0.4779215", "0.47685438", "0.47670633", "0.47668236", "0.47368687", "0.4732161", "0.4729801", "0.47124594", "0.47050667", "0.46997643", "0.4698764", "0.46961594", "0.4668705" ]
0.82910204
0
Returns the rate card for Homepage Ads.
def get_ratecard(self, account_id, batch=False): path = 'act_%s/ratecard' % account_id return self.make_request(path, 'GET', batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n user = get_authenticated_user()\n return get_card(user)", "def get_card (self, card):\n\t\treturn self._card", "def kpi_card4(df=data):\n total = df['passenger_count'].sum().round()\n return [\n html.H4('Total Passenger Amount', className='card-title'),\n html.P(f'{int(total):,d}', className='card-value'),\n ]", "def get_banner(self,context,request):\n ba = queryMultiAdapter((context,request), interfaces.IBanner)\n if not ba:\n return ''\n return ba()", "def get_card(self):\n return self.card", "def get_rate(parent=None):\n dialog = RateDialog(parent)\n dialog.exec_()\n rate = dialog.rate\n return rate", "def displayad(self, hash):\r\n return ads.DisplayAd(self, hash)", "def api_call(cls, currency):\n headers = {\"x-accept-version\": \"2.0.0\", \"Accept\": \"application/json\"}\n r = requests.get(cls.API_URL + currency, headers=headers)\n r.raise_for_status()\n return r.json()[\"data\"][\"rate\"]", "def get_protect_rate(soup):\n\n # up there with with route name\n grade_table = soup.h3\n \n # destroys the grade spans and looks for text\n while grade_table.span != None:\n grade_table.span.decompose()\n protect_rate = grade_table.getText()\n protect_rate = protect_rate.encode('utf8', errors = 'ignore').strip()\n \n return { 'protect_rate': protect_rate }", "def kpi_card2(df=data):\n total = df['trip_distance'].sum().round()\n return [\n html.H4('Total Trip Distance', className='card-title'),\n html.P(f'{int(total):,d}', className='card-value'),\n ]", "def getImage(cardTitle, size=\"normal\"):\n page = requests.get(\"https://api.scryfall.com/cards/named?exact=\"+name)\n page_json = json.loads(page.content)\n image_link = page_json[\"image_uris\"][size]\n image_response = requests.get(image_link)\n img = Image.open(BytesIO(image_response.content))\n return img.resize((384, 535)).convert(\"1\")", "def get_card(name_str, page=1):\r\n payload = {'name': name_str, 'page': page}\r\n response = query('https://api.magicthegathering.io/v1/cards', payload)\r\n return response.json()", "def deal_card(self):\n return self._deal(1)[0]", "def get_card(self):\n\n card = random.randint(1,13)\n return card", "def get(self, request ):\n return render(request, \"main_display_cards.html\")", "def get_cards(query_param):\n return _query_scryfall(query_param)", "def propeller_card(card):\n return card.as_html()", "def rate(self):\n return self.brate / FAC", "def coin_rate(request, coin):\n coin_obj = get_object_or_404(Coins, symbol=coin.upper())\n ratings = Rating.objects.filter(name_coin = coin_obj)\n return render(request, 'scraper/coin_rate.html', {'ratings': ratings})", "def get_reference_rates_tab_analyzed_rate(self):\n return self.get_text_from_element(self.reference_rates_tab_analyzed_tab_column_locator)", "def aces_high(card):\n if isinstance(card, Value):\n if card == Value.Ace:\n return 14\n return card.value\n\n if card.joker:\n return 15\n if card.value == Value.Ace:\n return 14\n return card.value.value", "def get_card_value(self, card):\n if card >= 10:\n return 10\n if card == 1:\n return 11\n return card", "def ac(self):\n if self.armor:\n return self.armor.ac\n return 10 + self.dexterity", "def get(self):\n\n from advertise import Orders, Advert\n\n URL = self.request.url\n strURLlist = URL.split(\"/\")\n strDepositReference = strURLlist[len(strURLlist) - 1]\n\n # The Actual Order requested\n findRequest = Orders.query(Orders.deposit_reference == strDepositReference)\n thisOrderList = findRequest.fetch()\n\n if len(thisOrderList) > 0:\n thisOrder = thisOrderList[0]\n else:\n thisOrder = Orders()\n\n # Organization details of the owner of the account\n findRequest = Organization.query(Organization.strOrganizationID == thisOrder.organization_id)\n thisOrgList = findRequest.fetch()\n\n if len(thisOrgList) > 0:\n thisOrg = thisOrgList[0]\n else:\n thisOrg = Organization()\n\n # Main Account Details of the owner of the account\n findRequest = Accounts.query(Accounts.uid == thisOrder.uid)\n thisAccountList = findRequest.fetch()\n\n if len(thisAccountList) > 0:\n thisAccount = thisAccountList[0]\n else:\n thisAccount = Accounts()\n\n # The Advert being paid for\n findRequest = Advert.query(Advert.advert_id == thisOrder.advert_id)\n thisAdvertList = findRequest.fetch()\n\n if len(thisAdvertList) > 0:\n thisAdvert = thisAdvertList[0]\n else:\n thisAdvert = Advert()\n\n from advertise import Payments # This is to force the use of payments class in adverts\n # Payment details Advert\n findRequest = Payments.query(Payments.order_id == thisOrder.order_id)\n thisRelatedPaymentList = findRequest.fetch()\n\n # User Organization Payment Details\n findRequest = Payments.query(Payments.organization_id == thisOrder.organization_id)\n thisOrganizationPaymentsList = findRequest.fetch()\n\n template = template_env.get_template('templates/dashboard/payments/AdvertOrders.html')\n context = {'thisOrder': thisOrder, 'thisOrg': thisOrg, 'thisAccount': thisAccount, 'thisAdvert': thisAdvert,\n 'thisRelatedPaymentList': thisRelatedPaymentList,\n 'thisOrganizationPaymentsList': thisOrganizationPaymentsList}\n self.response.write(template.render(context))", "def kpi_card1(df=data):\n total = len(df)\n return [\n html.H4('Total Trips', className='card-title'),\n html.P(f'{int(total):,d}', className='card-value'),\n ]", "def get_cards():\n with open(\"mashape_key.txt\", \"r\") as mashape_key:\n api_key = mashape_key.read()\n print(api_key)\n url = \"https://omgvamp-hearthstone-v1.p.mashape.com/cards?collectible=1\"\n headers = {\"X-Mashape-Key\": api_key}\n response = requests.get(url, headers=headers)\n cards = json.loads(response.text)\n return cards", "def get(self, request):\n\n # get query params data\n self.from_currency = request.query_params.get('from_currency', None)\n self.to_currency = request.query_params.get('to_currency', None)\n self.date = request.query_params.get('date', None)\n\n # perform simple validation on query params\n is_not_valid = self.validate()\n if is_not_valid:\n return Response({\"data\":None, \"status\":is_not_valid}, status=status.HTTP_400_BAD_REQUEST)\n\n # try to fetch data from database if exist, else get it from external API and save it in database\n try:\n rate = Rate.objects.get(from_currency=self.from_currency, to_currency=self.to_currency, date=self.date).rate\n\n except:\n response = get(f\"https://www.frankfurter.app/{self.date}?from={self.from_currency}&to={self.to_currency}\")\n\n if response.status_code != 200:\n return Response({\"data\":None, \"status\":response.reason}, status=status.HTTP_404_NOT_FOUND)\n\n rate = response.json()[\"rates\"][self.to_currency]\n self.date = response.json()['date']\n\n # Create a record with the two currencies rate\n Rate.objects.create(from_currency=self.from_currency, to_currency=self.to_currency, date=self.date, rate=rate)\n\n return Response({\"data\":{\n \"date\":self.date, \n \"rate\":f\"1 {self.from_currency} = {rate} {self.to_currency}\"\n }, \n \"status\":\"Successful\"})", "def getAction(self, game, state):\n if self.getUnknownCard(state) is None:\n baseVal = self.getBase(state)\n counts = self.getCounts(state)[baseVal]\n if counts[0] > counts[1]:\n return dnUtil.Action.Higher\n elif counts[0] < counts[1]:\n return dnUtil.Action.Lower\n else:\n if dnUtil.random.random() > 50:\n return dnUtil.Action.Higher\n else:\n return dnUtil.Action.Lower\n elif self.getLoseBool(state):\n return dnUtil.Action.Exit\n else:\n nextVal = self.getUnknownCard(state)\n nextCounts = self.getCounts(state)[nextVal]\n deckSize = sum(nextCounts)\n high, low, tie = nextCounts\n\n if high > low:\n winrate = high / deckSize\n elif high < low:\n winrate = low / deckSize\n else:\n winrate = 0.5\n\n risk = game.getRisk()\n if self.expectancyPcnt(winrate) > self.expectancyPcnt(risk / 100):\n return dnUtil.Action.Continue, winrate * 100\n else:\n return dnUtil.Action.Exit, winrate * 100", "def action_hit(self) -> None:\n print(self.deal_card(self.user))", "def parse_ad_page(self, response):\n ad_base_xpath = '/html/body/section[@id=\"container\"]/main/section[@class=\"content-center\"]/section[@id=\"adview\"]'\n ad_base = response.xpath(ad_base_xpath)\n ad_base2 = ad_base.xpath(\n 'section/section/section[@class=\"properties lineNegative\"]')\n\n title = ad_base.xpath('section/header/h1/text()').extract_first()\n utag_data = response.xpath(\n '/html/body/script[4]/text()').extract_first()\n\n images = ad_base.xpath(\n 'section/section/script[2]/text()').extract_first()\n if images is None:\n images = response.xpath(\n '//*[@class=\"item_image big popin-open trackable\"]/@data-popin-content').extract_first()\n\n places = ad_base.xpath('aside/div/script/text()').extract_first()\n date_str = ad_base2.xpath('p/text()').extract_first()\n\n check_date = datetime.now()\n user_name = ad_base2.xpath(\n 'div[@class=\"line line_pro noborder\"]/p/a/text()').extract_first()\n description = ad_base2.xpath(\n 'div[@class=\"line properties_description\"]/p[@itemprop=\"description\"]/text()').extract()\n\n is_phonenumber = response.xpath(\n 'boolean(count(//button[@class=\"button-orange large phoneNumber trackable\"]))').extract_first()\n\n #lbc_ad = LeboncoinItem()\n lbc_ad = {}\n lbc_ad['ad_url'] = response.url\n lbc_ad['title'] = title\n lbc_ad['description'] = description\n lbc_ad['criterias'] = utag_data\n lbc_ad['images'] = images\n lbc_ad['user_name'] = user_name\n lbc_ad['places'] = places\n lbc_ad['upload_date'] = date_str\n lbc_ad['check_date'] = check_date\n lbc_ad['is_phonenumber'] = is_phonenumber\n\n self.logger.debug(\"ad_url, nb doc : {}\\t\\t{}\".format(\n response.url, self.nb_doc))\n self.nb_doc -= 1 # decrement cnt usefull for stop spider\n\n self.logger.debug(\"lbc_ad : {}\".format(lbc_ad))\n yield self.lbcAd.proper(lbc_ad)" ]
[ "0.55201805", "0.53813046", "0.5323483", "0.5319892", "0.5319833", "0.5310196", "0.5271338", "0.5227973", "0.52195853", "0.5211209", "0.51686454", "0.5116151", "0.5108667", "0.5050621", "0.5028366", "0.5001399", "0.4989199", "0.4986583", "0.49688375", "0.49473494", "0.49314734", "0.49288744", "0.4911829", "0.4889395", "0.48826027", "0.48743284", "0.48646888", "0.48596877", "0.48411053", "0.48400655" ]
0.64530134
0
Returns the reach estimate for the given currency and targeting.
def get_reach_estimate(self, account_id, targeting_spec, currency=None, creative_action_spec=None, bid_for=None, batch=False): path = 'act_%s/reachestimate' % account_id args = { 'targeting_spec': json.dumps(targeting_spec), } if currency is not None: args['currency'] = json.dumps(currency) if creative_action_spec is not None: args['creative_action_spec'] = json.dumps(creative_action_spec) if bid_for is not None: args['bid_for'] = bid_for return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cost(org, target, amount):\n rep, _ = target.Dominion.reputations.get_or_create(organization=org)\n base = 200\n if amount > 0:\n base -= rep.respect + rep.affection\n else:\n base += rep.respect + rep.affection\n if base < 0:\n base = 0\n return base * abs(amount)", "def get_target_coin(self):\n target_coin = self.coin_pair_dict['target']\n return target_coin", "def classic(target, pore_diameter='pore.diameter'):\n network = target.project.network\n throats = network.throats(target.name)\n cn = network['throat.conns'][throats]\n ctc_dist = ctc(target)\n value = ctc_dist - network[pore_diameter][cn].sum(axis=1) / 2\n return value", "def get_expected_cost(self):", "def estimate(values, target):\n\n # next time\n # diff(values)\n\n\n return 1.", "def insurance(self):\n insurance_cost = 0.0056 * self.input_dict['project_value_usd']\n return insurance_cost", "def get_price_data(self, source, commitment):\n pricing_category = self.pricing[source]\n if 'override' in pricing_category:\n pricing_data = pricing_category['override']\n else:\n source_commitment = commitment[source]\n pricing_data = pricing_category[str(source_commitment)]\n return Decimal(pricing_data['overage']), pricing_data['included']", "def __get_estimated_price(total_estimated_hours, complexity, discount):\n total_estimated_price = 0.0\n\n index = int(complexity)\n hourly_rate = HOURLY_PRICE[0] if total_estimated_hours < 5 else HOURLY_PRICE[1]\n complexity_rate = COMPLEXITY_RATE[index]\n\n daily_balance = (DAILY_BALANCE[index] * int(total_estimated_hours / 8) )\n\n if total_estimated_hours < 5:\n total_estimated_price = hourly_rate * complexity_rate * total_estimated_hours\n else:\n total_estimated_price = hourly_rate * complexity_rate * total_estimated_hours - daily_balance\n\n if discount:\n total_estimated_price *= (1 - (discount / 100.0))\n\n total_estimated_price = format(total_estimated_price, ',.2f')\n return total_estimated_price", "def CallValue(contract : 'Contract') -> float:\n return Option.__call_value(contract.underlyingPrice, contract.strikePrice, contract.interestRate / 100, contract.daysToExpiration / 365, contract.volatility / 100)", "def _get_reward(self):\n if self.is_game_done:\n return self.price - 1\n else:\n return 0.0", "def inflatedCost(self):\n\t\tinflated = self.cost\n\n\t\t# https://www.in2013dollars.com/Wine-at-home/price-inflation/2020-to-2021?amount=10000\n\t\tif self.acquisition.year <= 2018: # 2018-to-2019\n\t\t\tinflated *= 1.010727\n\n\t\tif self.acquisition.year <= 2019: # 2019-to-2020\n\t\t\tinflated *= 1.002446\n\n\t\tif self.acquisition.year <= 2020: # 2020-to-2021\n\t\t\tinflated *= 1.010612\n\n\t\tif self.acquisition.year <= 2021: # 2021-to-2022\n\t\t\tinflated *= 1.011850\n\n\t\treturn round(inflated, 2)", "def _get_reward(self, normalized_state, normalized_unconstrained_action, normalized_constrained_action):\n denormalized_unconstrained_charge_rate_in_W = self.denormalize_network_output(normalized_unconstrained_action)\n denormalized_constrained_charge_rate_in_W = self.denormalize_network_output(normalized_constrained_action)\n denormalized_state = normalized_state * self.energy_system.stm_train_subsequent_states_stds + self.energy_system.stm_train_subsequent_states_means\n\n cost_of_net_drawn_electricity = self._get_cost_of_net_drawn_electricity_in_euros(denormalized_state, denormalized_constrained_charge_rate_in_W)\n charge_rate_punishment = self._get_punishment_for_excessive_charge_rate(denormalized_unconstrained_charge_rate_in_W)\n soc_punishment = self._get_punishment_for_impossible_resulting_soc(denormalized_state, denormalized_unconstrained_charge_rate_in_W) \n reward = - cost_of_net_drawn_electricity - charge_rate_punishment - soc_punishment\n #tf.summary.scalar('cost_of_net_drawn_electricity in euros', cost_of_net_drawn_electricity) \n #tf.summary.scalar('reward', reward)\n\n return reward, cost_of_net_drawn_electricity", "def get_exchange_reward_per_euro(model):\n exchanged_euros = get_exchanged_euros(model)\n total_euros = get_total_euros(model) \n total_teos = get_total_teos(model)\n exchange_pool = (total_euros - total_teos)*model.buffer_share*model.exchange_reward_share\n if exchanged_euros == 0 or exchange_pool <= 0:\n return 0\n exchange_reward_per_euro = exchange_pool / exchanged_euros \n return round(float(exchange_reward_per_euro),4)", "def get_estimate(start_lat, start_lon, end_lat, end_lon, token):\n endpoint = \"https://api.uber.com/v1/estimates/price\"\n params = {\n 'start_latitude': start_lat,\n 'start_longitude': start_lon,\n 'end_latitude': end_lat,\n 'end_longitude': end_lon,\n 'server_token': token\n }\n r = requests.get(endpoint, params=params)\n r.raise_for_status()\n return r.json().pop('prices', [])", "def calculate_appropriate_target(self):\n pass", "def calculate_appropriate_target(self):\n pass", "def getCost(self, state, action):\n util.raiseNotDefined()", "def new_get_historical_price(base, target, date):\n if base == \"BTC\" and target == \"EUR\":\n return {\"BTC\": {\"EUR\": 10000}}\n elif base == \"EUR\" and target == \"BTC\":\n return {\"EUR\": {\"BTC\": 0.00012}}\n elif base == \"LTC\" and target == \"BTC\":\n return {\"LTC\": {\"BTC\": 0.02}}\n elif base == \"LTC\" and target == \"EUR\":\n return {\"LTC\": {\"EUR\": 250}}", "def get_reward(self):\n #original reward function: reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos)).sum()\n thrusts = self.sim.get_propeler_thrust(self.sim.prop_wind_speed)\n linear_forces = self.sim.get_linear_forces(thrusts)\n distance = np.linalg.norm(self.target_pos - self.sim.pose[:3])\n #speed = math.sqrt(np.square(self.sim.find_body_velocity()).sum())\n #with 300x300x300m env, the max distance from one corner to another is 519\n max_distance = 519\n #Focus quadcopter on not crashing but first rewarding an upward linear force until at the height of the target\n if self.sim.pose[2] < self.target_pos[2]:\n #velocity_discount = 1/speed\n reward = np.tanh(linear_forces[2])\n #after getting to the correct z-coordinate, move to the correct y-coordinate\n elif self.sim.pose[1] < self.target_pos[1]:\n #velocity_discount = 1/speed\n reward = 1 + np.tanh(linear_forces[1])\n #finally, after getting rewards for the x and y coordinates, give reward for distance\n #at this stage, the drone will have overshot the x and y coordinates, but it would be in a better area to\n #start searching for the x coordinate\n elif distance > 1 and self.sim.pose[2] > self.target_pos[2] and self.sim.pose[1] > self.target_pos[1] :\n reward = 2 + (1-math.pow((distance/300),.04))\n elif distance < 1:\n self.success = True\n reward = 100\n #possible reward for hover: np.exp(-np.square(linear_forces[2]))\n return reward", "def actual_ico_funding_goal():\n return to_wei(0, \"ether\")", "def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost", "def solve(band_sizes, band_rates, income, target_profit):\n # based on income, find out: 1. which band the person is in, 2. how much of\n # that band is left\n remaining_income = income\n for i in range(len(band_sizes)):\n size, rate = band_sizes[i], band_rates[i]\n if remaining_income <= size:\n left_in_band = size - remaining_income\n band = i\n break\n remaining_income -= size\n\n # now that we know what tax band the person is in, calculate how much more\n # money we need to give for the person to receive target_profit\n sizes = [left_in_band] + band_sizes[band+1:]\n rates = band_rates[band:]\n gift = 0\n left_to_get = target_profit\n for size, rate in zip(sizes, rates):\n if rate != 1: # prevent division by zero\n need_to_give = left_to_get / (1 - rate)\n if need_to_give < size:\n gift += need_to_give\n return gift\n gift += size\n left_to_get -= size * (1 - rate)", "def cost(self) -> float:", "def getValue(currency=None):", "def estimate_return(self, ob_no, re_n, hidden, masks):\n adv_n, q_n = self.compute_advantage(ob_no, re_n, hidden, masks)\n return q_n, adv_n", "def _compute_reward_(self):\n if self._target_type == \"position\":\n dist = np.linalg.norm(self._target_diff_, ord=2)\n if self._reward_type == \"linear\":\n reward_dist = -dist\n elif self._reward_type == \"precision\":\n reward_dist = -dist +\\\n np.exp( -dist**2 / 0.01)\n elif self._reward_type == \"sparse\":\n if dist < 0.05:\n reward_dist = 0\n else:\n reward_dist = -0.1\n\n elif self._target_type == \"angle\":\n dist = np.linalg.norm(self._target_diff_, ord=1)\n if self._reward_type == \"linear\":\n reward_dist = -dist\n elif self._reward_type == \"precision\":\n reward_dist = -dist +\\\n np.exp(-dist ** 2 / 0.01)\n elif self._reward_type == \"sparse\":\n raise NotImplementedError\n\n # TODO: doublecheck whether '0' or '-1' should be used as the index\n reward_vel = -self._vel_penalty * np.square(self._qd_[-1, self._joint_indices]).sum()\n\n #self.info['reward_dist'] = reward_dist\n #self.info['reward_vel'] = reward_vel\n\n return (reward_dist + reward_vel) * self._dt / 0.008", "def get_virtual_price() -> uint256:\n D: uint256 = self._get_D(self._xp(), self._A())\n # D is in the units similar to DAI (e.g. converted to precision 1e18)\n # When balanced, D = n * x_u - total virtual value of the portfolio\n token_supply: uint256 = ERC20(self.lp_token).totalSupply()\n return D * PRECISION / token_supply", "def cost(predictions, targets):\n # averages the error across all data points, taking the values that have not been rounded to 0 and 1.\n return np.mean( (predictions - targets)**2)", "def target_buy_price(self):\n if self.period_tick == 0:\n return random.randint(1, 10)\n elif self.period_tick % self.perseverance == 0:\n # Player runs out of patience and decides to change target price.\n (avg_price,\n max_price,\n min_price) = self.market.get_stock_price_last_period()\n\n power = self.period_tick / self.perseverance\n target_price = min(min_price + power, self.money_balance * 0.5)\n return target_price\n else:\n return None", "def get_estimated_score(match_data: dict) -> float:\n \n auto_high = {match_data['auto_HighClose']: match_data['auto_conInnerClose'],\n match_data['auto_HighFrontCP']: match_data['auto_conInnerFrontCP'],\n match_data['auto_HighLine']: match_data['auto_conInnerLine']\n }\n auto_low = match_data['auto_Low']\n auto_line = match_data['auto_leftSectorLine']\n \n tele_high = {match_data['tele_HighClose']: match_data['tele_conInnerClose'],\n match_data['tele_HighFrontCP']: match_data['tele_conInnerFrontCP'],\n match_data['tele_HighLine']: match_data['tele_conInnerLine'],\n match_data['tele_HighBackCP']: match_data['tele_conInnerBackCP']\n }\n tele_low = match_data['tele_Low']\n climbed = match_data['tele_Climbed']\n parked = match_data['tele_UnderSG']\n \n score = 0\n \n # Gives autonomous points\n for x in auto_high:\n score += (4.3, 4.8)[auto_high[x]] * x\n score += auto_low * 2\n if auto_line: score += 5\n \n # Gives teleop points\n for x in tele_high:\n score += (2.15, 2.4)[tele_high[x]] * x\n score += tele_low\n \n # Gives endgame points\n if climbed: score += 25\n if parked: score += 5\n \n return score" ]
[ "0.6184802", "0.55034554", "0.54555136", "0.54076797", "0.53823316", "0.5301148", "0.5263448", "0.5148293", "0.5114286", "0.51138216", "0.5108673", "0.50789857", "0.5073114", "0.5045106", "0.5020766", "0.5020766", "0.50109684", "0.500113", "0.49957886", "0.49835432", "0.49826637", "0.49726182", "0.49717534", "0.4953086", "0.49438712", "0.49347508", "0.49316296", "0.48996565", "0.48861015", "0.4865745" ]
0.71005875
0
Returns the list of ad campaigns and related data.
def get_adcampaign_list(self, account_id): fields = 'id, name, campaign_status, start_time, end_time, ' \ 'daily_budget, lifetime_budget, budget_remaining' batch = [ self.get_adaccount(account_id, ['currency'], batch=True), self.get_adcampaigns(account_id, fields, batch=True), self.get_stats_by_adcampaign(account_id, batch=True), ] return self.make_batch_request(batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_drip_campaigns(self):\n return list(DripCampaign.objects(user_id=self.user_id))", "def list_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def getAllCampaigns(service):\n # Using AWQL to retrieve campaigns.\n query = (adwords.ServiceQueryBuilder()\n .Select('Id', 'Name', 'Status', 'StartDate', 'EndDate',\n 'BudgetId', 'BudgetStatus', 'BudgetName', 'Amount',\n 'BudgetReferenceCount', 'IsBudgetExplicitlyShared')\n .Limit(0, pageSize)\n .Build())\n campaigns = []\n for page in query.Pager(service):\n if page['entries']:\n for campaign in page['entries']:\n campaigns.append(campaign)\n else:\n pass\n return campaigns", "def get_adcampaigns(self, account_id, fields=None, batch=False):\n return self.get_adcampaigns_of_account(account_id, fields, batch=batch)", "def get_campaign_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get campaign data for account {}'.format(ad_account['account_id']))\n campaigns = ad_account.get_campaigns(\n fields=['id',\n 'name',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for campaign in campaigns:\n result[campaign['id']] = {'name': campaign['name'],\n 'attributes': parse_labels(\n campaign.get('adlabels', []))}\n return result", "def _get_campaigns(self, params):\n return self._api.account.get_campaigns(params={**params, **self._state_filter()}, fields=[self.state_pk])", "def get_adcampaign_detail(self, account_id, campaign_id, date_preset):\n campaign_fields = [\n 'name', 'campaign_status', 'daily_budget', 'lifetime_budget',\n 'start_time', 'end_time']\n campaign_data_columns = [\n 'campaign_name', 'reach', 'frequency', 'clicks',\n 'actions', 'total_actions', 'ctr', 'spend']\n adgroup_data_columns = [\n 'campaign_id', 'campaign_name', 'adgroup_id', 'adgroup_name',\n 'reach', 'frequency', 'clicks', 'ctr', 'actions', 'cpm', 'cpc',\n 'spend']\n demographic_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'age', 'gender']\n placement_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'placement']\n campaign_filters = [{\n 'field': 'campaign_id', 'type': 'in', 'value': [campaign_id]}]\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaign(campaign_id, campaign_fields, batch=True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', campaign_data_columns,\n campaign_filters, ['action_type'], True),\n self.get_adreport_stats(\n account_id, date_preset, 1, campaign_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', adgroup_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', demographic_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', placement_data_columns,\n campaign_filters, None, True),\n ]\n return self.make_batch_request(batch)", "def get_list_of_campaigns(self, limit=0, offset=0):\n logger.info(\"Function call: get_list_of_campaigns\")\n return self.__handle_result(self.__send_request('campaigns', 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def get_campaigns(self, uuid=None):\n params = self._build_params(uuid=uuid)\n return self._get_query('campaigns', params, Campaign)", "def get_adcampaigns_of_account(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaigns' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def get_adcampaign(self, campaign_id, fields, batch=False):\n path = '%s' % campaign_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_campaign_stats(self,campaign_id):\n campaign = Campaign(campaign_id)\n fields = ['account_name',\n 'campaign_name',\n 'clicks',\n 'cpc',\n 'reach',\n 'ctr',\n 'frequency',\n 'impressions',\n 'cpm',\n 'relevance_score']\n #Need to create a new list for data\n data = []\n \n #Grab lifetime campaign stats to find start and end date of campaign and convert to datetime formate\n insights = campaign.get_insights(fields=fields, params={'date_preset':'lifetime'})\n date_begin = datetime.datetime.strptime(insights[0]['date_start'], \"%Y-%m-%d\")\n date_end = datetime.datetime.strptime(insights[0]['date_stop'], \"%Y-%m-%d\")\n date_diff = datetime.timedelta(days=25)\n new_date = date_begin + date_diff\n\n #Pass in these values to the api\n api_date_first = str(date_begin).split()[0]\n api_date_last = str(new_date).split()[0]\n\n #Strange API limitation where you can only grab 25 values at a time. \n while date_begin < date_end:\n insights = campaign.get_insights(fields=fields, params={ 'time_range':{'since':api_date_first, 'until':api_date_last}, 'time_increment':1})\n insights = list(insights)\n date_begin = new_date \n new_date = date_begin + date_diff\n api_date_first = api_date_last\n api_date_last = str(new_date).split()[0]\n data += insights\n\n return data", "def list_campaigns_extended(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def get(self):\n query = Campaign.query\n return paginate(Campaign.__tablename__, query, self.schema), HTTPStatus.OK", "def get_campaign_name_list(self):\n campaigns = self.find('campaigns', {})\n campaign_names = []\n for campaign in campaigns:\n if 'name' in campaign:\n campaign_names.append(campaign['name'])\n return campaign_names", "def test_get_existent_campaigns_returns_campaigns_list(self):\n test_campaign = return_canned_campaign()\n test_campaign.create()\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response_body, {\"campaigns\": [{\"id\": 1, \"name\": \"Test Campaign\"}]}\n )", "def get_adcampaigns_of_campaign_group(self, campaign_group_id, fields,\n batch=False):\n path = '%s/adcampaigns' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def all(self, campaign_id, **queryparams):\n self.campaign_id = campaign_id\n self.report_id = None\n return self._mc_client._get(url=self._build_path(campaign_id, 'abuse-reports'), **queryparams)", "def list_campaigns(self, interval: str, page: str = None, limit: str = None) -> dict:\n params = remove_empty_elements({\"interval\": interval,\n \"page\": page,\n \"size\": limit,\n \"format\": \"json\"})\n return self.http_request(\"GET\", '/campaign/ids', params=params)", "def get_ads():\n return coll_ad.distinct(KEY_AD_ID)", "def get_campaigns(self, uuids=None, before=None, after=None, pager=None):\n params = self._build_params(uuid=uuids, before=before, after=after)\n return Campaign.deserialize_list(self._get_multiple('campaigns', params, pager))", "def get_campaign(self, campaign_id: str) -> dict:\n return self.http_request(\"GET\", f'/campaign/{campaign_id}')", "def get_all_camapaign_stats_data(campaign_id):\n all_campaign_stats_data = []\n all_campaign_stats = Contribution.query.filter_by(campaign_id=campaign_id).all()\n for campaign_stat in all_campaign_stats:\n campaign_stat_data = {}\n campaign_stat_data['username'] = campaign_stat.username\n campaign_stat_data['file'] = campaign_stat.file\n campaign_stat_data['edit_type'] = campaign_stat.edit_type\n campaign_stat_data['edit_action'] = campaign_stat.edit_action\n campaign_stat_data['country'] = campaign_stat.country\n campaign_stat_data['depict_item'] = campaign_stat.depict_item\n campaign_stat_data['depict_prominent'] = campaign_stat.depict_prominent\n campaign_stat_data['caption_text'] = campaign_stat.caption_text\n campaign_stat_data['caption_language'] = campaign_stat.caption_language\n campaign_stat_data['date'] = campaign_stat.date\n all_campaign_stats_data.append(campaign_stat_data)\n return all_campaign_stats_data", "def _extend_record(self, campaign, fields, pull_ads):\n campaign_out = campaign.api_get(fields=fields).export_all_data()\n if pull_ads:\n campaign_out[\"ads\"] = {\"data\": []}\n ids = [ad[\"id\"] for ad in campaign.get_ads()]\n for ad_id in ids:\n campaign_out[\"ads\"][\"data\"].append({\"id\": ad_id})\n return campaign_out", "def get_adcreatives(self, account_id, fields, batch=False):\n path = 'act_%s/adcreatives' % account_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_conversion_stats_by_adcampaign(\n self, account_id, campaign_ids=None, include_deleted=False,\n start_time=None, end_time=None, aggregate_days=None,\n by_impression_time=True, batch=False):\n path = 'act_%s/adcampaignconversions' % account_id\n args = {}\n if campaign_ids is not None:\n args['campaign_ids'] = json.dumps(campaign_ids)\n if include_deleted is not None:\n args['include_deleted'] = include_deleted\n if start_time is not None:\n args['start_time'] = start_time\n if end_time is not None:\n args['end_time'] = end_time\n if aggregate_days is not None:\n args['aggregate_days'] = aggregate_days\n if not by_impression_time:\n args['by_impression_time'] = 'false'\n return self.make_request(path, 'GET', args, batch=batch)", "def get_ad_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get ad data for account {}'.format(ad_account['account_id']))\n ads = ad_account.get_ads(\n fields=['id',\n 'name',\n 'adset_id',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for ad in ads:\n result[ad['id']] = {'name': ad['name'],\n 'ad_set_id': ad['adset_id'],\n 'attributes': parse_labels(ad.get('adlabels', []))}\n return result", "def get_adcampaign_groups(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaign_groups' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def get_advisories(self):\n\n advisories = []\n\n for i in range(len(self.__data['advisories'])):\n data = requests.get(self.__data['advisories'][i]['links']['self']['href'], headers=getHeaders()).json()\n this = {}\n this['id'] = data['id']\n this['name'] = data['name']\n advisories.append(this)\n\n return advisories", "def get_ad_set_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get ad set data for account {}'.format(ad_account['account_id']))\n ad_sets = ad_account.get_ad_sets(\n fields=['id',\n 'name',\n 'campaign_id',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for ad_set in ad_sets:\n result[ad_set['id']] = {'name': ad_set['name'],\n 'campaign_id': ad_set['campaign_id'],\n 'attributes': parse_labels(\n ad_set.get('adlabels', []))}\n return result" ]
[ "0.7791002", "0.76178706", "0.74240816", "0.7406647", "0.73380303", "0.7225367", "0.7168782", "0.7105438", "0.7103571", "0.68827987", "0.68807346", "0.6824061", "0.68058723", "0.67796344", "0.66644067", "0.6588232", "0.6488719", "0.6457609", "0.64117867", "0.6348168", "0.6261265", "0.62245405", "0.6171688", "0.6151998", "0.6073662", "0.6023222", "0.59485435", "0.5821726", "0.5805038", "0.58001924" ]
0.7850429
0
Returns the detail of an ad campaign.
def get_adcampaign_detail(self, account_id, campaign_id, date_preset): campaign_fields = [ 'name', 'campaign_status', 'daily_budget', 'lifetime_budget', 'start_time', 'end_time'] campaign_data_columns = [ 'campaign_name', 'reach', 'frequency', 'clicks', 'actions', 'total_actions', 'ctr', 'spend'] adgroup_data_columns = [ 'campaign_id', 'campaign_name', 'adgroup_id', 'adgroup_name', 'reach', 'frequency', 'clicks', 'ctr', 'actions', 'cpm', 'cpc', 'spend'] demographic_data_columns = [ 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend', 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'age', 'gender'] placement_data_columns = [ 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend', 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'placement'] campaign_filters = [{ 'field': 'campaign_id', 'type': 'in', 'value': [campaign_id]}] batch = [ self.get_adaccount(account_id, ['currency'], batch=True), self.get_adcampaign(campaign_id, campaign_fields, batch=True), self.get_adreport_stats( account_id, date_preset, 'all_days', campaign_data_columns, campaign_filters, ['action_type'], True), self.get_adreport_stats( account_id, date_preset, 1, campaign_data_columns, campaign_filters, None, True), self.get_adreport_stats( account_id, date_preset, 'all_days', adgroup_data_columns, campaign_filters, None, True), self.get_adreport_stats( account_id, date_preset, 'all_days', demographic_data_columns, campaign_filters, None, True), self.get_adreport_stats( account_id, date_preset, 'all_days', placement_data_columns, campaign_filters, None, True), ] return self.make_batch_request(batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_campaign(self, campaign_id: str) -> dict:\n return self.http_request(\"GET\", f'/campaign/{campaign_id}')", "def get_campaign_info(self, id):\n logger.info(\"Function call: get_campaign_info from: {}\".format(id, ))\n return self.__handle_error(\"Empty campaign id\") if not id else self.__handle_result(self.__send_request('campaigns/{}'.format(id, )))", "def get_adcampaign(self, campaign_id, fields, batch=False):\n path = '%s' % campaign_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def sms_get_campaign_info(self, id):\n if not id:\n return self.__handle_error(\"Empty campaign id\")\n\n logger.info(\"Function call: sms_get_campaign_info from: {}\".format(id, ))\n return self.__handle_result(self.__send_request('/sms/campaigns/info/{}'.format(id, )))", "def get(self, campaign_id):\n campaign = Campaign.query.filter_by(mailchimp_id=campaign_id).first()\n if campaign is None:\n return {\"message\": \"Campaign could not be found.\"}, HTTPStatus.NOT_FOUND\n return self.schema.dump(campaign), HTTPStatus.OK", "def get_campaign(self, campaignId, **kwargs) -> ApiResponse:\n return self._request(fill_query_params(kwargs.pop('path'), campaignId), params=kwargs)", "def get_campaign_command(client: Client, campaign_id: str) -> CommandResults | str:\n try:\n raw_response = client.get_campaign(campaign_id)\n except ValueError:\n return 'Campaign Id not found'\n\n campaign_general_fields = ['id', 'name', 'description', 'startDate', 'notable']\n campaign_fields = ['families', 'techniques', 'actors', 'brands', 'malware']\n\n outputs = {}\n outputs['campaignMembers'] = dict_safe_get(raw_response, ['campaignMembers'])\n outputs['info'] = {key: value for key, value in raw_response.items() if key in campaign_general_fields}\n outputs.update({key: value for key, value in raw_response.items() if key in campaign_fields})\n fields_readable_output = \"\"\n for field in campaign_fields:\n fields_readable_output += \"\\n\" + tableToMarkdown(field.capitalize(),\n dict_safe_get(outputs, [field]), headers=['id', 'name'],\n headerTransform=pascalToSpace\n )\n\n campaign_info_output = tableToMarkdown('Campaign Information',\n outputs['info'],\n headers=['id', 'name', 'description', 'startDate', 'notable'],\n headerTransform=pascalToSpace\n )\n campaign_members_output = tableToMarkdown('Campaign Members',\n outputs['campaignMembers'],\n headers=['id', 'threat', 'type'],\n headerTransform=pascalToSpace\n )\n\n readable_output = campaign_info_output + \"\\n\" + campaign_members_output + fields_readable_output\n\n return CommandResults(\n readable_output=readable_output,\n outputs_prefix='Proofpoint.Campaign',\n outputs=outputs,\n outputs_key_field='id',\n raw_response=raw_response\n )", "def get_campaign(self, uuid):\n return Campaign.deserialize(self._get_single('campaigns', {'uuid': uuid}))", "def get_campaign_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get campaign data for account {}'.format(ad_account['account_id']))\n campaigns = ad_account.get_campaigns(\n fields=['id',\n 'name',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for campaign in campaigns:\n result[campaign['id']] = {'name': campaign['name'],\n 'attributes': parse_labels(\n campaign.get('adlabels', []))}\n return result", "def getCampaignById(service, campaignId):\n endCampaign = None\n # Again using AWQL to retrieve campaigns.\n query = (adwords.ServiceQueryBuilder()\n .Select('Id', 'Name', 'CampaignGroupId', 'Status', 'ServingStatus',\n 'StartDate', 'EndDate',\n 'BudgetId', 'BudgetName', 'BudgetStatus', 'Amount',\n 'DeliveryMethod', 'BudgetReferenceCount', 'IsBudgetExplicitlyShared',\n 'Settings')\n .Where('Id').EqualTo(campaignId)\n .Limit(0, 1)\n .Build())\n for page in query.Pager(service):\n if page['entries']:\n for campaign in page['entries']:\n endCampaign = campaign\n else:\n pass\n return endCampaign", "def get_adcreative(self, creative_id, fields, batch=False):\n path = '%s' % creative_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def get(self):\n query = Campaign.query\n return paginate(Campaign.__tablename__, query, self.schema), HTTPStatus.OK", "def campaign_name(self):\n\n return self._campaign_name", "def campaign_name(self):\n\n return self._campaign_name", "def campaign_name(self):\n\n return self._campaign_name", "def test_get_campaign_by_id_passes(self):\n response = self.client.get(f\"{self.endpoint_url}{self.test_campaign.id}/\")\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"id\": CAMPAIGN_ID, \"name\": CAMPAIGN_NAME})", "def get_campaign_cost(self, id):\n logger.info(\"Function call: get_campaign_cost: '{}'\".format(id, ))\n return self.__handle_error(\"Empty addressbook id\") if not id else self.__handle_result(self.__send_request('addressbooks/{}/cost'.format(id)))", "def contact_details(self):\n return self.data.get(\"contactDetails\")", "def incident_detail(cls, incident_id=None):\n # Strict mode off to avoid an XMLParseError for custom attributes that are not expected\n with cls.soap_client.settings(strict=False):\n\n incident_detail = cls.soap_client.service.incidentDetail(incidentId=incident_id)\n\n return incident_detail", "def get_adcampaign_group(self, campaign_group_id, fields, batch=False):\n path = '%s' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def getCamp(self, id):\n return self.__camps[id];", "def get_adcampaign_list(self, account_id):\n fields = 'id, name, campaign_status, start_time, end_time, ' \\\n 'daily_budget, lifetime_budget, budget_remaining'\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaigns(account_id, fields, batch=True),\n self.get_stats_by_adcampaign(account_id, batch=True),\n ]\n return self.make_batch_request(batch)", "def get_campaign_extended(self, campaignId, **kwargs) -> ApiResponse:\n return self._request(fill_query_params(kwargs.pop('path'), campaignId), params=kwargs)", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def campaign_status(self):\n return self._campaign_status" ]
[ "0.730492", "0.7148773", "0.70886153", "0.6952887", "0.66787946", "0.6621084", "0.64367497", "0.6201446", "0.61358225", "0.6095113", "0.60102504", "0.5969368", "0.59596705", "0.59596705", "0.59596705", "0.5865227", "0.58570576", "0.58103025", "0.5659969", "0.56390685", "0.56014836", "0.56014836", "0.56014836", "0.5582836", "0.5570148", "0.55646783", "0.5538173", "0.5538173", "0.5538173", "0.5516474" ]
0.73936963
0
Returns the autocomplete data for the given query and type.
def get_autocomplete_data(self, q, type, want_localized_name=False, list=None, limit=None, batch=False): path = '%s/search' % q args = {'type': type} if want_localized_name: args['want_localized_name'] = want_localized_name if list: args['list'] = list if limit: args['limit'] = limit return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def autocomplete(\n self,\n query,\n fields=None,\n operator=None,\n order_by_relevance=True,\n backend=\"default\",\n ):\n search_backend = get_search_backend(backend)\n return search_backend.autocomplete(\n query,\n self,\n fields=fields,\n operator=operator,\n order_by_relevance=order_by_relevance,\n )", "def autocomplete_possibilities():\n try:\n # get data sent by client\n typed_input = request.args.get('q')\n print(' ')\n print('\\n------ getting autocomplete_possibilities ------')\n print(f\"recived: input:{typed_input}\")\n\n # call the google API\n results = gmaps.places_autocomplete(typed_input)\n data = [\n {'value': r['place_id'], 'text': r['description']}\n for r in results\n ]\n\n # Pass data to the front end\n print(f'returning: {data}')\n return jsonify(data)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)", "def search_for(self, q, q_type):\n cache_key = md5.new(\"{0}--{1}\".format(q, q_type)).hexdigest()\n data = self.cache.get(cache_key, [])\n\n if not data:\n if q_type == 'artist':\n pre_cache = self.artist_pre_cache\n elif q_type == 'album':\n pre_cache = self.album_pre_cache\n elif q_type == 'track':\n pre_cache = self.track_pre_cache\n else:\n raise Exception('Invalid query type')\n data = pre_cache.get(q)\n\n if not data:\n search_url = self._absolute_url('/v1/search')\n response = self._make_request(search_url, { 'q': q, 'type': q_type })\n if response.status_code != 200:\n return None\n data = response.json()\n self.cache[cache_key] = data\n return data", "def get_data(self, query):\n result = input(\"{}: \".format(query))\n return result", "def get_autocomplete(cursor, query):\n cursor.execute(\"SELECT * FROM entities WHERE name LIKE %s ORDER BY total_occurences DESC LIMIT 9;\", [query + \"%\"])\n return_obj = {'entities':[]}\n\n for entity in cursor.fetchall():\n return_obj['entities'].append({\n 'name': entity[1],\n 'score': entity[2]\n })\n return return_obj", "def search(query, type):\n try:\n if type == \"beer\":\n return BreweryDb.search({'q': query, 'type': type, 'withBreweries': 'Y'})['data']\n else:\n return BreweryDb.search({'q': query, 'type': type})['data']\n except Exception:\n return []", "def autocomplete():\n value = str(request.args.get('q'))\n result = s.query(Genes).filter(Genes.name.like(\"%\" + value + \"%\")).all()\n data = [i.name for i in result]\n return jsonify(matching_results=data)", "def get_data(query, search_type):\n\n def filter_movies_only(entries):\n return [e for e in entries if e['media_type'] == 'movie']\n\n query = query.encode('utf-8')\n tmdb = get_tmdb(lang)\n search = tmdb.Search()\n if search_type == 'movie':\n movies = search.movie(query=query)['results']\n else:\n persons = search.person(query=query)['results']\n # We only select the first found actor/director.\n if persons:\n person_id = persons[0]['id']\n else:\n return []\n person = tmdb.People(person_id)\n person.combined_credits()\n if search_type == 'actor':\n movies = filter_movies_only(person.cast)\n else:\n movies = filter_movies_only(person.crew)\n movies = [m for m in movies if m['job'] == 'Director']\n return movies", "def get_data(query):\n par = {\"key\": str(GOOGLE_KEY), \"query\": query}\n url = \"https://maps.googleapis.com/maps/api/place/textsearch/json\"\n req = requests.get(url, params=par)\n return req.json()", "def search_autocomplete(request):\n response = HttpResponse(content_type='application/json')\n query = request.GET.get('query', None)\n if query:\n try:\n suggestions = []\n for node in nc.get_indexed_node(nc.graphdb.manager, 'name', query):\n suggestions.append(node['name'])\n d = {'query': query, 'suggestions': suggestions, 'data': []}\n json.dump(d, response)\n except Exception:\n pass\n return response\n return False", "def get_places_autocomplete(q: str = None, **params) -> JsonResponse:\n if params.get('page') == 'all':\n places = PlaceAutocompletePaginator(q=q, **params).all()\n else:\n places = get(f'{API_V1}/places/autocomplete', q=q, **params).json()\n\n places['results'] = convert_all_coordinates(places['results'])\n return places", "def researchbytype():\n if request.method == 'GET':\n user2 = request.args.get('type')\n data2 = {}\n data2 = Beers.find({\"Type\":user2}, {\"_id\":0})\n return fct.returning(data2)", "def get_users_autocomplete(q: str, **params) -> JsonResponse:\n response = get(f'{API_V1}/users/autocomplete', q=q, **params)\n users = response.json()\n users['results'] = convert_all_timestamps(users['results'])\n return users", "def get(self, type, q, page=None):\r\n url = '{0}/{1}'.format(self.get_url(), type)\r\n params = base.get_params(('q', 'page'), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def autocomplete():\n query = '' if request.args.get('query') is None else request.args.get('query')\n\n prefixed_words = []\n close_words = []\n for f in app.preprocessed.words:\n lowered = f.lower()\n if lowered.startswith(query) and lowered != query:\n prefixed_words.append(f)\n elif levenshtein(query, lowered) <= 1:\n close_words.append(f)\n\n result = {\n 'success': True,\n 'data': {\n 'suggestions': prefixed_words + close_words\n }\n }\n return jsonify(result)", "def getResultDefs(self, type=None):\n results = self.results.values()\n\n if type:\n results = filter(lambda result: result.type == type, results)\n\n return results", "def get_search_suggestions(Resource=None, SuggestionQuery=None):\n pass", "async def getAutocompleteConfig(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getAutocompleteConfig()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/search/autocomplete/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/search/autocomplete/\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")", "def suggestions(self, request):\n data = request.query_params\n try:\n page_size = int(data.get('page_size', 5))\n except ValueError:\n raise InvalidParameter('page_size', \"Invalid `page_size`\")\n type_qp = data.get('type', 'users,pages,tags,shouts,shout')\n country = data.get('country', '').upper()\n try:\n types = type_qp.split(',')\n except:\n raise InvalidParameter('type', _(\"Invalid `type`\"))\n\n suggestions = OrderedDict()\n\n if 'users' in types:\n users_qs = User.objects.filter(type=USER_TYPE_PROFILE, is_activated=True).order_by('-date_joined')\n if request.user.is_authenticated():\n users_qs = users_qs.exclude(id=request.user.id)\n if country:\n users_qs = users_qs.filter(profile__country=country)\n users_qs = users_qs.select_related('profile')\n users = ProfileSerializer(users_qs[:page_size], many=True, context={'request': request}).data\n suggestions['users'] = users\n if 'pages' in types:\n pages_qs = User.objects.filter(type=USER_TYPE_PAGE).order_by('-date_joined')\n if request.user.is_authenticated():\n pages_qs = pages_qs.exclude(id=request.user.id)\n if country:\n pages_qs = pages_qs.filter(page__country=country)\n pages_qs = pages_qs.select_related('page')\n pages = ProfileSerializer(pages_qs[:page_size], many=True, context={'request': request}).data\n suggestions['pages'] = pages\n if 'tags' in types:\n tag_slugs = list(Category.objects.values_list('slug', flat=True))\n random.shuffle(tag_slugs)\n tags_qs = Tag.objects.filter(slug__in=tag_slugs[:page_size])\n tags = TagDetailSerializer(tags_qs, many=True, context={'request': request}).data\n suggestions['tags'] = tags\n if 'shouts' in types or 'shout' in types:\n shouts_qs = Shout.objects.get_valid_shouts(country=country).order_by('-published_at')\n if 'shouts' in types:\n shouts = ShoutSerializer(shouts_qs[:page_size], many=True, context={'request': request}).data\n suggestions['shouts'] = shouts\n if 'shout' in types:\n shout = shouts_qs.first()\n if shout:\n shout = ShoutSerializer(shout, context={'request': request}).data\n suggestions['shout'] = shout\n return Response(suggestions)", "def get(self, *, type):\n return self.__class__(terms=[term for term in self if isinstance(term, type)])", "def search_book(self, data):\n query = f\"\"\"SELECT * from {TABLE} where \"{data['type']}\" LIKE \"%{data['text']}%\";\"\"\"\n\n self.cursor.execute(query)\n res = self.cursor.fetchall()\n\n return res", "def species_autocomplete(request, format='csv'):\n \n if request.GET.get('q'):\n q = request.GET.get('q')\n \n species = Species.objects.all().order_by('taxon_code')\n \n # split tokens by period or white space\n q_tokens = split(r'[.\\s]+', q)\n \n # prefix match for each token in the search string against genus name or species name\n for token in q_tokens:\n species = species.filter(Q(species_name__istartswith=token) | Q(genus_name__genus_name__istartswith=token))\n \n \n \n \n # empty species list if no query provided by the user\n else:\n species = []\n \n\n \n \n if format == 'csv':\n # serialize results as CSV\n return CSVResponse(\n [{'species': s.taxon_code} for s in species], \n fields=('species',) )\n \n \n else:\n # serialize results as JSON\n JSON_objects = [{'label': (s.genus_name_id + ' ' + s.species_name), 'value': s.taxon_code} for s in species]\n return JSONResponse({'species': JSON_objects})", "def get_queryset(self):\n\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', \"name\")\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', \"ASC\")\n self.sort_ordr=sort_order\n if search_str:\n a = Q(name__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = self.model.objects.filter(a | b).distinct()\n\n else:\n objects = OrganizationType.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects", "def bentity_autocomplete(request, format='csv'):\n \n if request.GET.get('q'):\n q = request.GET.get('q')\n \n bentities = Bentity.objects.all().order_by('bentity')\n \n # split tokens by period or white space\n q_tokens = split(r'[.\\s]+', q)\n \n # prefix match for each token in the search string against genus name or species name\n for token in q_tokens:\n bentities = bentities.filter(bentity__icontains=token)\n \n \n else:\n bentities = []\n \n \n if format == 'csv':\n # Serislize CSV for API\n return CSVResponse(\n [{'bentity_id': b.gid, 'bentity_name': b.bentity} for b in bentities],\n ('bentity_id', 'bentity_name') )\n \n else:\n # Serialize JSON for bentity-list widget\n json_objects = [{\n 'bentity_id': b.gid,\n 'bentity_name': b.bentity,\n } for b in bentities]\n return JSONResponse({'bentities' : json_objects})", "def typeahead(self, workspace, params={}, **options):\n path = \"/workspaces/%s/typeahead\" % (workspace)\n return self.client.get_collection(path, params, **options)", "def get(self, query):\n result = self._client.execute(query)\n data = json.loads(result)\n return data", "def searchWikidata(input, type):\n # Whenever the user types something in the searchbar open a session\n if len(input) >= 1:\n # The string with API wbsearchentities to suggestions to the user input\n URL = \"https://www.wikidata.org/w/api.php?action=wbsearchentities&search=%s\" \\\n \"&format=json&limit=5&formatversion=2&language=en&type=%s\" % (input, type)\n with requests.Session() as S:\n DATA = S.post(url=URL, headers={\"user-agent\": \"magic browser\", \"Content-Type\": \"application/json\"}).json()\n\n # Whenever a search entity is returned, do something\n if len(DATA[\"search\"]) >= 1:\n # Go through the DATA.json and append an entity label, id and description to a option list\n option_list = []\n for option in DATA[\"search\"]:\n temp_str = \"\"\n\n try:\n temp_str += option[\"label\"] + \" (\"\n except Exception:\n temp_str += \"|\"\n\n try:\n temp_str += option[\"id\"] + \") | \"\n except Exception:\n temp_str += \"|\"\n\n try:\n temp_str += option[\"description\"]\n except Exception:\n \"\"\n\n option_list.append(temp_str)\n\n # Creates a list with the suggested entities\n return html.Ul([html.Li(temp_str) for temp_str in option_list])\n\n # If no results is returned do something\n else:\n return \"No results could be found\"\n\n # Do nothing when no input\n else:\n return \"\"", "async def getAutocompleteKeywordDetail(self, id=None):\n payload = {}\n \n if id:\n payload[\"id\"] = id\n \n\n # Parameter validation\n schema = CatalogValidator.getAutocompleteKeywordDetail()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/search/autocomplete/{id}/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"id\",\"description\":\"A `id` is a unique identifier for a particular detail. Pass the `id` of the keywords which you want to retrieve.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"id\",\"description\":\"A `id` is a unique identifier for a particular detail. Pass the `id` of the keywords which you want to retrieve.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", id=id)\n query_string = await create_query_string(id=id)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/search/autocomplete/{id}/\", id=id), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")", "def get(self):\n\n self.counter = count(1)\n arguments = {}\n for arg_name in ('term', 'page', 'page_width', 'callback'):\n arg_value = self.get_argument(arg_name, None, True)\n if arg_value is not None:\n arguments[arg_name] = arg_value.encode('utf-8')\n\n self.jsonp_callback = arguments.pop('callback', None)\n\n if 'term' not in arguments:\n return self._empty_answer()\n arguments['term'] = self._clean_term(arguments['term'])\n if not arguments['term']:\n return self._empty_answer()\n\n http_client = AsyncHTTPClient()\n url = \"{0}/{1}/?{2}\"\n for search_type in self.response.keys():\n request = HTTPRequest(\n url.format(self.api_url, search_type, urlencode(arguments)),\n method='GET',\n request_timeout=3,\n )\n http_client.fetch(\n request,\n callback=partial(self._handle_request, search_type)\n )", "def search(self, query, type, limit=20, offset=0):\n key = generate_cache_key('search', source='spotify', params=[query, type, limit, offset])\n resp = cache.get(key)\n if not resp:\n resp = requests.get(self.BASE_URL +\n 'search?q=' + urllib.quote(query.encode('utf8')) +\n '&type=' + type +\n '&limit=' + str(limit) +\n '&offset=' + str(offset)).json()\n cache.set(key, resp, DEFAULT_CACHE_EXPIRATION)\n return resp" ]
[ "0.61557144", "0.60404307", "0.60142326", "0.5925829", "0.5909921", "0.587843", "0.58297956", "0.5817675", "0.5782266", "0.572731", "0.57246333", "0.5627573", "0.5599135", "0.55556315", "0.5546945", "0.55009896", "0.5431673", "0.54116696", "0.53997016", "0.53606945", "0.5324815", "0.5294651", "0.5278337", "0.520192", "0.5196534", "0.5172559", "0.5159668", "0.5149842", "0.513882", "0.51381147" ]
0.8000939
0
Returns the page access token for the given page.
def get_page_access_token(self, page_id, batch=False): path = '%s' % page_id args = {'fields': 'access_token'} return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def page_token(self):\n return self._properties.get(\"pageToken\")", "def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']", "def get_access_token(self):\n access_token = self._auth_provider._get_auth_value()\n return access_token", "def access_token(self):\n return self._authentication.access_token", "def access_token(self):\n return self._authentication.access_token", "def access_token(self):\n if self.has_expired():\n self.update()\n\n return self.token['access_token']", "def get_token(self, page, in_post=True, planet=None):\n soup = self.get_soup(page, planet)\n with open(\"log\", \"w\") as f: print(soup, file=f)\n if in_post: soup = soup.find(\"form\", {\"method\": \"POST\"})\n #print(\"\\n\\n\\n\\n\\npost:\", post)\n\n token = soup.find(\"input\", {\"name\": \"token\"})[\"value\"]\n print(\"\\n\\n\\n\\n\\n\\ntoken:\", token)\n return token", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "def token(page):\n index = (page or 0) * getxy().max_results\n k = index//128 - 1\n index -= 128 * k\n f = [8, index]\n if k > 0 or index > 127:\n f.append(k+1)\n f += [16, 0]\n b64 = base64.b64encode(bytes(f)).decode('utf8')\n return b64.strip('=')", "def get_token(url, data):\n try:\n resp = requests.post(url, data)\n return resp.json()['access_token']\n except(KeyError, requests.exceptions.RequestException):\n return ''", "def get_token(self):\n oauth_provider = UserSocialAuth.objects.get(provider='drchrono')\n access_token = oauth_provider.extra_data['access_token']\n return access_token", "def get_access_token(self, code):\n url = get_config(\"login.wechat.access_token_url\") % code\n r = self._access_wxapi_or_raise(url)\n\n return (r[\"access_token\"], r[\"openid\"])", "def _request_access_token(self):\n resp = requests.get(self.TOKEN_URL_FORMAT.format(\n self.creds().consumer_key(), self.creds().app_secret())\n )\n status = resp.status_code\n\n # If the token request fails, try to use the configured app id\n # and secret. This probably won't work, but the docs say that it\n # should. for more info, see:\n # https://developers.facebook.com/docs/facebook-login/access-tokens\n token = \"%s|%s\" % (self.creds().consumer_key(),\n self.creds().app_secret())\n if status == 200:\n token = resp.text.split('access_token=')[1]\n else:\n self.logger.error(\n \"Facebook token request failed with status %d\" % status\n )\n return token", "def get_access_token(self, request) -> str or Exception:\n pass", "def get_access_token(self) -> Optional[Text]:\n return self.access_token", "def access_token(self):\n return self.access_token_str", "def _get_access_token(self, url):\n if self.access_token:\n return self.access_token\n data = \"client_id=%s&client_secret=%s&grant_type=password&username=%s&password=%s&scope=write\" %\\\n (self.client_id, self.client_secret, self.username, self.password)\n\n parsed = urlparse(url)\n path = urlunparse(ParseResult(parsed.scheme, parsed.netloc, \"/oauth2/access_token\", None, None, None))\n\n auth_resp = urlopen(Request(path, data), timeout=10)\n if auth_resp.getcode() != 200:\n self.logger.error(\"Error with client credentials\")\n return self.access_token\n auth_resp_data = json.loads(auth_resp.read())\n\n if \"access_token\" in auth_resp_data:\n self.access_token = auth_resp_data[\"access_token\"]\n else:\n self.logger.error(\"Error with client credentials\")\n return self.access_token", "def get_token(self, access_token):\n if access_token:\n return access_token\n elif self.default_access_token:\n return self.default_access_token\n else:\n return ''", "def next_page_token(self):\n return self._next_page_token", "def next_page_token(self):\n return self._next_page_token", "def access_token(self):\n access_token = self.session.get('component_access_token')\n if access_token:\n if not self.expires_at:\n # user provided access_token, just return it\n return access_token\n\n timestamp = time.time()\n if self.expires_at - timestamp > 60:\n return access_token\n\n self.fetch_access_token()\n return self.session.get('component_access_token')", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def get_access_token():\n\n account = get_account()\n\n account.EnsureCredentials(dbus_interface=GOA_ACCOUNT)\n access_token, _ = account.GetAccessToken(dbus_interface=GOA_ACCOUNT_OAUTH2)\n return str(access_token)", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")" ]
[ "0.6895106", "0.6298637", "0.61150604", "0.59867823", "0.59867823", "0.5959107", "0.5922044", "0.5917908", "0.59148884", "0.5853093", "0.57995343", "0.5794838", "0.574418", "0.57328886", "0.5731778", "0.57293093", "0.56982315", "0.56935376", "0.5673225", "0.5673225", "0.5625471", "0.56191534", "0.56153446", "0.5585804", "0.5585804", "0.5585804", "0.5585804", "0.5585804", "0.5585804", "0.5585804" ]
0.8188665
0
Returns data for the give page post.
def get_page_post(self, page_post_id, fields=None, batch=False): path = '%s' % page_post_id args = {} if fields: args['fields'] = json.dumps(fields) return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def getPostData(self, PostID):\n url = self.urlGen(id=str(PostID))\n XML =None\n with async_timeout.timeout(10):\n async with self.session.get(url=url) as XML:\n XML = await XML.read()\n XML = self.ParseXML(ET.XML(XML))\n data = XML['posts']['post']\n return data\n return None", "def get_post(self):\n\t\tself.post = graph.get_object(POST_ID)", "def _get_post(self):\n return self.get_object().content_object", "def __getData(self, post,is_question=False):\n page = {'entity':'question' if is_question else 'answer', 'uri':self.currenturi}\n css_class_name = 'span' if is_question else 'div'\n if post.find('div', text='Post temporarily unavailable'):\n log.info(self.log_msg('Message Temporarily not available in url %s'%self.currenturi))\n return False\n if post.find('form', id='frm_quick_reply_submit'):\n log.info(self.log_msg('It is not a post'))\n return False\n try:\n page['et_author_name'] = stripHtml(post.find(css_class_name, 'vt_asked_by_user').renderContents())\n except:\n log.info(self.log_msg('Author name not found in %s'% self.currenturi))\n try:\n date_str = stripHtml(post.find(css_class_name,attrs={'class':re.compile('vt_.+?_timestamp')}).renderContents()).replace('replied ','').strip()\n date_str = re.sub(\"(\\d+)(st|nd|rd|th)\",r\"\\1\", date_str)\n page['posted_date'] = datetime.strftime(datetime.strptime(date_str\n , 'on %B %d, %Y'),\"%Y-%m-%dT%H:%M:%SZ\")\n except:\n page['posted_date'] = datetime.strftime(datetime.utcnow(), \"%Y-%m-%dT%H:%M:%SZ\")\n log.info(self.log_msg('Date not be found in %s'% self.currenturi))\n try:\n page['et_author_category'] = stripHtml(post.find('span', 'vt_user_rank').renderContents())\n except:\n log.info(self.log_msg('Author name not found in %s'% self.currenturi))\n try:\n data_tag = post.find('div', 'vt_post_body')\n ads_tag = post.findAll('div',attrs={'class':re.compile('vt_post_body_ad_[l/r]')})\n [each.extract() for each in ads_tag]\n page['data'] = stripHtml(data_tag.renderContents())\n except:\n log.info(post)\n log.info(self.log_msg('Cannot find the Data for this Post %s'%self.currenturi))\n page['data'] = ''\n try:\n if is_question:\n page['title'] = stripHtml(str(post.find('div', 'vt_post_subject').span.next.next))\n else:\n page['title'] = 'Re: ' + self.__hierarchy[-1]\n except:\n log.info(self.log_msg('Cannot find the Data thread details for this Post %s'%self.currenturi))\n page['title'] = ''\n if not (page['data'] and page['title']):\n log.info(self.log_msg('No data found in url %s'%self.currenturi))\n return \n if len(self.__hierarchy) >= 3:\n page['et_thread_topic'] = self.__hierarchy[-1]\n page['et_thread_forum'] = self.__hierarchy[-3]\n page['et_thread_subforum'] = self.__hierarchy[-2]\n return page", "def __getData(self, post, is_question):\n page = {'entity':'question' if is_question else 'answer'}\n try:\n page['title'] = stripHtml(post.find('div', 'lia-message-subject').h1.renderContents())\n except:\n log.info(self.log_msg('Title not found'))\n page['title'] = ''\n try:\n data_tag = post.find('div', 'lia-message-body-content')\n [x.extract() for x in data_tag.findAll('blockquote')]\n page['data'] = stripHtml(data_tag.renderContents())\n except:\n log.info(self.log_msg('Data not found for the url %s'%self.currenturi))\n page['data'] = ''\n #Sometimes only Image is uploaded on the Post, in that case data will be empty\n if not (page['data'] or page['title']): \n log.info(self.log_msg(\"Data and Title are not found for %s,discarding this Post\"%(self.currenturi)))\n return False \n try:\n date_str = re.sub('\\s+', ' ', stripHtml(post.find('span', 'DateTime lia-message-posted-on lia-component-common-widget-date').renderContents())) \n page['posted_date'] = datetime.strftime(datetime.strptime(date_str, '%m-%d-%Y %I:%M %p'), '%Y-%m-%dT%H:%M:%SZ')\n except:\n log.info(self.log_msg('Posted date not found'))\n page['posted_date'] = datetime.strftime(datetime.utcnow(), \"%Y-%m-%dT%H:%M:%SZ\")\n try:\n author_tag = post.find('div', 'lia-message-author-username')\n page['et_author_name'] = stripHtml(author_tag.renderContents())\n except:\n log.info(self.log_msg('author name not found'))\n try:\n page['et_author_category'] = stripHtml(post.find('div','lia-message-author-rank').renderContents())\n except:\n log.info(self.log_msg('author category not found'))\n try:\n page['et_author_profile'] = self.__baseuri + author_tag.a['href'].split(';')[0]\n except:\n log.info(self.log_msg('author profile not found'))\n try:\n page['ei_author_posts_count'] = int(stripHtml(post.find('div', 'lia-message-author-post-count').renderContents()).split(':')[-1].strip())\n except:\n log.info(self.log_msg('author posts count not found'))\n try:\n date_str = stripHtml(post.find('div', 'lia-message-author-registered-date').renderContents()).split(':')[-1].strip()\n page['edate_author_member_since'] = datetime.strftime(datetime.strptime(date_str, '%m-%d-%Y'), '%Y-%m-%dT%H:%M:%SZ')\n except:\n log.info(self.log_msg('author registered date not found'))\n try:\n page['ef_data_rating'] = float(stripHtml(post.find('span' ,'MessageKudosCount').renderContents()))\n except:\n log.info(self.log_msg('Rating not found'))\n try:\n page['ei_data_views_count'] = int(re.sub('[^\\d+]', '', stripHtml(post.find('div', 'lia-message-statistics').findAll('span')[-1].renderContents())))\n except:\n log.info(self.log_msg('datda views count not found')) \n if len(self.__hierarchy) >= 3:\n page['et_thread_topic'] = self.__hierarchy[-1]\n page['et_thread_forum'] = self.__hierarchy[-3]\n page['et_thread_subforum'] = self.__hierarchy[-2]\n else:\n log.info(self.log_msg('Cannot find the Data thread details'))\n return page", "def __getData(self,review,post_type):\r\n page = {'title':''}\r\n try:\r\n page['et_author_name'] = stripHtml(review.find('p','post_title').find('a').renderContents()).replace('> ','').replace('...','')\r\n except:\r\n log.info(self.log_msg('author name not found'))\r\n try:\r\n aut_info = {'ei_author_points_count':'crp_points','ei_author_posts_count':'forum_posts_count'}\r\n for each in aut_info.keys():\r\n page[each] = int(re.search('\\d+',stripHtml(review.find('p',aut_info[each]).renderContents())).group())\r\n except:\r\n log.info(self.log_msg('Author posts count not found'))\r\n try:\r\n page['et_author_membership'] = stripHtml(review.find('p','crp_level').renderContents())\r\n except:\r\n log.info(self.log_msg('Author member ship not found'))\r\n try:\r\n post_tag = review.find('div','commentbox_mid')\r\n page['title'] = stripHtml(post_tag.find('h2','post_title').renderContents())\r\n date_str = stripHtml(post_tag.find('p','post_date').renderContents()).split('|')[0].strip()\r\n page['posted_date'] = datetime.strftime(datetime.strptime(date_str,'%m-%d-%Y %I:%M %p'),\"%Y-%m-%dT%H:%M:%SZ\")\r\n remove_tags = {'div':['commentbox_nav','commentbox_sig'],'h2':['post_title'],'p':['post_date']}\r\n for each_key in remove_tags.keys():\r\n for each in remove_tags[each_key]:\r\n tag = post_tag.find(each_key,each)\r\n if tag:\r\n tag.extract()\r\n tags = review.findAll('blockquote')\r\n for each in tags:\r\n each.extract()\r\n page['data'] = stripHtml(post_tag.renderContents())\r\n except:\r\n log.exception(self.log_msg('title not found'))\r\n return False\r\n try:\r\n if page['title']=='':\r\n if len(page['data']) > 50:\r\n page['title'] = page['data'][:50] + '...'\r\n else:\r\n page['title'] = page['data']\r\n except:\r\n log.exception(self.log_msg('title not found'))\r\n page['title'] = ''\r\n try:\r\n page['et_data_reply_to'] = self.thread_id\r\n except:\r\n log.info(self.log_msg('data reply to is not found'))\r\n try:\r\n page['et_data_post_type'] = post_type\r\n except:\r\n log.info(self.log_msg('Page info is missing'))\r\n try:\r\n page['et_data_forum'] = self.hierarchy[0]\r\n page['et_data_subforum'] = self.hierarchy[1]\r\n page['et_data_topic'] = self.hierarchy[2]\r\n except:\r\n log.exception(self.log_msg('data forum not found'))\r\n## try:\r\n## data_str = review.find('div','threadText')\r\n##\r\n## data_tag = review.find('div','threadDetails')\r\n## [x.findParent('div') for x in data_tag.findAll('blockquote')]\r\n## for each in ['threadSubject','threadLinks']:\r\n## tag = data_tag.find('div',each)\r\n## if tag:\r\n## tag.extract()\r\n## page['data'] = stripHtml(data_tag.renderContents()).replace('______________________________________________________\\nPlease mark replies as answers if they answered your question...','').strip()\r\n## except:\r\n## log.info(self.log_msg('data not found'))\r\n## page['data'] =''\r\n return page", "def run_get_post(m):\n\n doc = get_doc(m)\n assert doc is not None\n\n wp = get_wp(m)\n\n post = find_post(wp, doc.identifier)\n\n if post:\n post.content = \"…content elided…\"\n from pprint import pprint\n pprint(post.struct)\n return\n else:\n warn(f\"Didn't find post for identifier {doc.identifier}\")\n return", "def get_post(self, postid):\n return self.execute('metaWeblog.getPost', postid, self.username, self.password)", "def get_new_data_for_page(page_arg_set):\n api, page_id, config = page_arg_set\n\n log.info( \"FACEBOOK\\tGetting new data for facebook.com/%s\" % page_id )\n \n # fetch account data so we can associate the number of likes with the account AT THAT TIME\n try:\n acct_data = api.get(page_id)\n except Exception as e:\n log.error('FACEBOOK\\t%s does not exist' % page_id)\n return None\n else:\n # determine limit\n if is_insights(page_id, config):\n if config['facebook'].has_key('insights_limit'):\n limit = config['facebook']['insights_limit']\n else:\n limit = 200\n\n else:\n if config['facebook'].has_key('page_limit'):\n limit = config['facebook']['page_limit']\n else:\n limit = 10\n\n # get last {limit} articles for this page\n page = api.get(page_id + \"/posts\", page=False, retry=5, limit=limit)\n post_arg_sets = [(api, post_data, acct_data, page_id, config) for post_data in page['data']]\n \n threaded_or_serial(post_arg_sets, insert_new_post, 30, 200)", "def page_data():\n return scrape()", "def get_data():\n pass", "def get_data(self):", "def _get_new_data(self, page_url, soup):\n data = {}\n data['url'] = page_url\n title = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')\n data['title'] = title.get_text()\n summary = soup.find('div', class_='lemma-summary')\n data['summary'] = summary.get_text()\n return data", "def data(self, **kw):\n return dict(page='data', params=kw)", "def data(self, **kw):\n return dict(page='data', params=kw)", "def get_paginate_data(self, *args, **kwargs):\n pass", "def default_post_response_data(channel, post, user):\n # For some reason, the default values are different for staff and non-staff users\n if user.is_staff:\n user_dependent_defaults = {\"upvoted\": False, \"num_reports\": 0}\n else:\n user_dependent_defaults = {\"upvoted\": True, \"num_reports\": None}\n\n post_obj = Post.objects.get(post_id=post.id)\n article = Article.objects.filter(post=post_obj).first()\n\n text = post.text\n\n if not text and not post.url:\n text = \"\"\n\n if article:\n plain_text = render_article_text(article.content)\n elif text:\n plain_text = markdown_to_plain_text(text)\n else:\n plain_text = None\n\n return {\n \"url\": post.url,\n \"url_domain\": urlparse(post.url).hostname if post.url else None,\n \"cover_image\": None,\n \"thumbnail\": None,\n \"text\": text,\n \"article_content\": article.content if article is not None else None,\n \"plain_text\": plain_text,\n \"post_type\": post_obj.post_type,\n \"title\": post.title,\n \"removed\": False,\n \"deleted\": False,\n \"subscribed\": False,\n \"score\": 1,\n \"author_id\": user.username,\n \"id\": post.id,\n \"slug\": get_reddit_slug(post.permalink),\n \"created\": post.created,\n \"num_comments\": 0,\n \"channel_name\": channel.name,\n \"channel_title\": channel.title,\n \"channel_type\": channel.channel_type,\n \"profile_image\": image_uri(user.profile),\n \"author_name\": user.profile.name,\n \"author_headline\": user.profile.headline,\n \"edited\": False,\n \"stickied\": False,\n **user_dependent_defaults,\n }", "def get_new_page_data(self, draft=False):\n page_data = {\n 'title': 'test page %d' % self.counter,\n 'slug': 'test-page-%d' % self.counter, 'language': 'en',\n 'sites': [1], 'status': Page.DRAFT if draft else Page.PUBLISHED,\n # used to disable an error with connected models\n 'document_set-TOTAL_FORMS': 0, 'document_set-INITIAL_FORMS': 0,\n }\n self.counter = self.counter + 1\n return page_data", "def get_data():\n return", "def api_get_post(request, post_id):\n\n post = get_object_or_404(Post, id=post_id)\n\n json = serializers.serialize(\"json\", [post], fields=(\n \"pub_time\", \"_text_rendered\", \"title\", \"text\", \"image\",\n \"image_width\", \"image_height\", \"replies\", \"tags\"\n ))\n\n return HttpResponse(content=json)", "def post_data(driver):\n post_info = {\n \"post_age\" : \"li.posted\", \n \"page_views\" : \"ul.posting-info li.views\"\n }\n for key, selector in post_info.items():\n try:\n text = driver.find_element_by_css_selector(selector).text\n if key == \"post_age\":\n post_info[key] = parse_post_age(text)\n else:\n post_info[key] = ''.join(list(filter(lambda c: c.isdigit(), text)))\n except Exception as e:\n post_info[key] = \"\"\n pass\n return post_info", "def get_data(self):\n pass", "def get_data(self):\n pass", "def get_data(self):\r\n pass", "def post_contents(self):\r\n return self._post", "def _get_post(self):\n post_pk = self.kwargs.get('post_pk', 0)\n return get_object_or_404(Post, pk=post_pk)", "def get_json(self) -> dict:\n\n try:\n html = get(self.url, sessionid=self.sessionid)\n except HTTPError:\n raise PostIdNotFound(self.post_id)\n parser = Parser()\n parser.feed(html)\n info = parser.Data\n return info", "async def get_post(self, ctx: commands.Context, postid: int):\n async with ctx.typing():\n data = await self.get_post_by_id(ctx, postid)\n post_e = await self.post_data_to_embed(data)\n attach = await self.get_file_from_post_data(data)\n\n await ctx.send(\n data['_'][\"message_content\"] if not data['_'][\"should_embed\"] else None,\n embed=post_e,\n # file=attach,\n )", "def fetch_post(page_num):\n req = POST_API.format(page_num=page_num)\n try:\n response = requests.get(req)\n response.raise_for_status()\n posts = response.json()\n objects = list()\n for json_post in posts:\n fetch_author.delay(json_post.get('author'), json_post.get(\"_links\", dict()).get('authors', []))\n title = BeautifulSoup(json_post.get('title', dict()).get('rendered', \"\"), \"lxml\").text\n content = BeautifulSoup(json_post.get('content', dict()).get('rendered', \"\"), \"lxml\").text\n post = Article(id=json_post.get('id'),\n date=json_post.get('date_gmt', datetime.now()),\n modified=json_post.get('modified_gmt', datetime.now()),\n title=title,\n content=content,\n author_id=json_post.get('author')\n )\n objects.append(post)\n s = Session()\n s.bulk_save_objects(objects)\n s.commit()\n\n except requests.exceptions.HTTPError as error:\n raise Reject(error)\n except Exception as ex:\n raise Reject(ex)", "def get_post(self):\n\n if self.gotten: return\n self.get_text()\n self.get_keywords()\n self.get_poll()\n self.get_schedule()\n self.get_expiration()\n self.get_files()\n self.set_text()\n if Settings.get_performer_category() or self.hasPerformers:\n self.get_performers()\n else:\n self.performers = \"unset\"\n self.gotten = True" ]
[ "0.66914964", "0.6609694", "0.6597313", "0.65585893", "0.65549135", "0.64523876", "0.63400614", "0.63219726", "0.61648834", "0.6111833", "0.60791177", "0.60451114", "0.6033017", "0.60295993", "0.60295993", "0.6004108", "0.5989076", "0.59873974", "0.595117", "0.594713", "0.59380096", "0.5932613", "0.5932613", "0.59231895", "0.59036213", "0.58947694", "0.5887588", "0.5847342", "0.5837652", "0.5824061" ]
0.6994447
0
Creates an ad image in the given ad account.
def create_adimage(self, account_id, image_data, batch=False): path = 'act_%s/adimages' % account_id files = {image_data.name: image_data} return self.make_request(path, 'POST', None, files, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_custom_audience_pixel(self, account_id, batch=False):\n path = \"act_%s/adspixels\" % account_id\n return self.make_request(path, 'POST', batch=batch)", "def create_adcreative(self, account_id, name=None, object_story_id=None, object_story_spec=None, batch=False):\n path = 'act_%s/adcreatives' % account_id\n args = {}\n if name:\n args['name'] = name\n if object_story_id:\n args['object_story_id'] = object_story_id\n if object_story_spec:\n args['object_story_spec'] = json.dumps(object_story_spec)\n\n return self.make_request(path, 'POST', args, batch=batch)", "def create_advert():\r\n advertiser, category, zone = create_objects()\r\n ad = AdBase.objects.create(\r\n title='Ad Title',\r\n url='www.example.com',\r\n advertiser=advertiser,\r\n category=category,\r\n zone=zone,\r\n )\r\n return ad", "def create_image(image_url, owner, permission=\"PRIVATE\"):\n\n image = Image(image_url=image_url,\n owner=owner,\n permission=permission)\n \n db.session.add(image)\n db.session.commit()\n return image", "def create_image(user_id, image_name, tag1, tag2, tag3):\n\n image = Image(user_id=user_id, image_name=image_name, tag1=tag1, tag2=tag2, tag3=tag3)\n\n db.session.add(image)\n db.session.commit()\n\n return image", "def image_create_and_upload(self, upload=True, **kwargs):\n if 'name' not in kwargs:\n name = data_utils.rand_name(self.__name__ + \"-image\")\n kwargs['name'] = name\n\n params = dict(kwargs)\n image = self.create_image(**params)\n self.assertEqual('queued', image['status'])\n if not upload:\n return image\n\n file_content = data_utils.random_bytes()\n image_file = io.BytesIO(file_content)\n self.client.store_image_file(image['id'], image_file)\n\n image = self.client.show_image(image['id'])\n return image", "def builder_will_create_target_image(self, builder, target, image_id, template, parameters):", "def create_one_image(attrs=None):\n attrs = attrs or {}\n\n # Set default attribute\n image_info = {\n 'id': str(uuid.uuid4()),\n 'name': 'image-name' + uuid.uuid4().hex,\n 'owner': 'image-owner' + uuid.uuid4().hex,\n 'container_format': '',\n 'disk_format': '',\n 'min_disk': 0,\n 'min_ram': 0,\n 'is_public': True,\n 'protected': False,\n 'properties': {'Alpha': 'a', 'Beta': 'b', 'Gamma': 'g'},\n 'status': 'status' + uuid.uuid4().hex,\n }\n\n # Overwrite default attributes if there are some attributes set\n image_info.update(attrs)\n\n return image.Image(**image_info)", "def make_image(storage, name, width, height, format='JPEG', mode='RGB'):\n im = Image.new(mode, (width, height))\n draw = ImageDraw.Draw(im)\n draw.rectangle([0, 0, width // 2, height // 2], '#F00')\n draw.rectangle([width // 2, 0, width, height // 2], '#0F0')\n draw.rectangle([0, height // 2, width // 2, height], '#00F')\n draw.rectangle([width // 2, height // 2, width, height], '#000')\n draw.rectangle([width // 4, height // 4, 3 * width // 4, 3 * height // 4], '#FFF')\n im_bytes_io = io.BytesIO()\n im.save(im_bytes_io, format)\n im_bytes_io.seek(0)\n storage.save(name, im_bytes_io)", "def create_image_builder(Name=None, ImageName=None, ImageArn=None, InstanceType=None, Description=None, DisplayName=None, VpcConfig=None, EnableDefaultInternetAccess=None, DomainJoinInfo=None, AppstreamAgentVersion=None):\n pass", "def cli(env, identifier, account_id):\n\n image_mgr = SoftLayer.ImageManager(env.client)\n image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')\n shared_image = image_mgr.share_image(image_id, account_id)\n\n if shared_image:\n env.fout(f\"Image template {identifier} was shared to account {account_id}.\")", "def create_and_upload_image(cls, data=None, **kwargs):\n if 'name' not in kwargs:\n name = data_utils.rand_name(\"kb-image\")\n kwargs['name'] = name\n\n params = cls._get_create_params(**kwargs)\n if data:\n # NOTE: On glance v1 API, the data should be passed on\n # a header. Then here handles the data separately.\n params['data'] = data\n\n image = cls.client.create_image(**params)\n # Image objects returned by the v1 client have the image\n # data inside a dict that is keyed against 'image'.\n if 'image' in image:\n image = image['image']\n cls.created_images.append(image['id'])\n # Upload image to glance artifactory.\n file_content = data_utils.random_bytes()\n image_file = six.BytesIO(file_content)\n cls.client.store_image_file(image['id'], image_file)\n cls.kingbird_client = kb_client.Client(\n kingbird_url=KINGBIRD_URL, auth_token=cls.client.token,\n project_id=cls.client.tenant_id)\n return image", "def _CreateImage(media_service, opener, url):\n # Note: The utf-8 decode is for 2to3 Python 3 compatibility.\n image_data = opener.open(url).read().decode('utf-8')\n image = {\n 'type': 'IMAGE',\n 'data': image_data,\n 'xsi_type': 'Image'\n }\n\n return media_service.upload(image)[0]", "def create():\n # Make `InputRequired` work on `FileField`.\n form_fields = request.form.copy()\n if request.files:\n form_fields.update(request.files)\n\n form = CreateForm(form_fields)\n\n if not form.validate():\n abort(400, 'Form validation failed.')\n\n party_id = form.party_id.data\n creator_id = form.creator_id.data\n image = request.files.get('image')\n\n party = party_service.find_party(party_id)\n if not party:\n abort(400, 'Unknown party ID')\n\n avatar = _create(party.id, creator_id, image)\n\n return avatar.url_path", "def create(self, account):\n model = models.load('Account', account)\n\n return self.client.create_account(model=model)", "def create_asset(ocean, publisher):\n sample_ddo_path = get_resource_path(\"ddo\", \"ddo_sa_sample.json\")\n assert sample_ddo_path.exists(), \"{} does not exist!\".format(sample_ddo_path)\n\n asset = DDO(json_filename=sample_ddo_path)\n asset.metadata[\"main\"][\"files\"][0][\"checksum\"] = str(uuid.uuid4())\n my_secret_store = \"http://myownsecretstore.com\"\n auth_service = ServiceDescriptor.authorization_service_descriptor(my_secret_store)\n return ocean.assets.create(asset.metadata, publisher, [auth_service])", "def create_target_image(self, builder, target, base_image, parameters):", "def share(config: Config, ami: str, account: str) -> None:\n\n ec2_client = boto3.client(\"ec2\", region_name=config.get(\"region\", None))\n\n ec2_client.modify_image_attribute(\n ImageId=ami,\n LaunchPermission={\"Add\": [{\"UserId\": account}]},\n OperationType=\"add\",\n UserIds=[account],\n Value=\"string\",\n DryRun=False,\n )", "def new_image(self, width, height, background=None, mode=\"RGBA\"):\n self.img = PIL.Image.new(mode, (width, height), background)\n self.width,self.height = width,height\n self.drawer = aggdraw.Draw(self.img)", "def create_image(self, instance_id, name,\r\n description=None, no_reboot=False):\r\n params = {'InstanceId' : instance_id,\r\n 'Name' : name}\r\n if description:\r\n params['Description'] = description\r\n if no_reboot:\r\n params['NoReboot'] = 'true'\r\n img = self.get_object('CreateImage', params, Image, verb='POST')\r\n return img.id", "def create_image(storage, filename, size=(100, 100), image_mode='RGB', image_format='PNG'):\n data = BytesIO()\n PIL.Image.new(image_mode, size).save(data, image_format)\n data.seek(0)\n if not storage:\n return data\n image_file = ContentFile(data.read())\n return storage.save(filename, image_file)", "def create(self):\n\n if self.image:\n return self.image\n\n import nova_utils\n nova = nova_utils.nova_client(self.os_creds)\n image_dict = None\n try:\n # TODO/FIXME - Certain scenarios, such as when the name has whitespace,\n # the image with a given name is not found....\n image_dict = nova.images.find(name=self.image_name)\n except Exception as e:\n logger.info('No existing image found with name - ' + self.image_name)\n pass\n\n if image_dict:\n self.image = self.glance.images.get(image_dict.id)\n if self.image:\n logger.info('Found image with name - ' + self.image_name)\n return self.image\n\n self.image_file = self.__get_image_file()\n self.image = self.glance.images.create(name=self.image_name, disk_format=self.image_format,\n container_format=\"bare\")\n logger.info('Uploading image file')\n self.glance.images.upload(self.image.id, open(self.image_file.name, 'rb'))\n logger.info('Image file upload complete')\n return self.image", "def create_base_image(self, builder, template, parameters):", "def addImg(in_dict):\n img = Image(name=in_dict[\"name\"],\n b64str=in_dict[\"b64str\"],\n imgsize=in_dict[\"imgsize\"],\n processed=in_dict[\"processed\"],\n timestamp=in_dict[\"timestamp\"])\n ans = img.save()\n return ans.name", "def _createimage(self, image):\n return self.cv.create_image(0, 0, image=image)", "def test_create_image(self):\n pass", "def create_account():\n\n return render_template('account.html')", "def createAsset(assFolder, *args):\n createAssetUI(assFolder)", "def create_account():\n account = w3.eth.account.create()\n return account", "def add_image(jid, img):\n jrd.hset(_generate_job_key(jid), 'image_status', 'created')\n image_rd.hset(jid, 'image', img)" ]
[ "0.61451757", "0.6033003", "0.5863322", "0.58397895", "0.58106166", "0.56329113", "0.5490508", "0.5458526", "0.53882515", "0.5354105", "0.53367656", "0.533547", "0.53347516", "0.5307588", "0.5294122", "0.52878755", "0.5280777", "0.5263223", "0.5253173", "0.52526194", "0.5241333", "0.5207675", "0.5145843", "0.5141166", "0.5105651", "0.51025313", "0.509487", "0.50875497", "0.50865215", "0.50615805" ]
0.8006185
0
Creates a link page post on the given page.
def create_link_page_post(self, page_id, link=None, message=None, picture=None, thumbnail=None, name=None, caption=None, description=None, published=None, call_to_action=None, batch=False): page_access_token = self.get_page_access_token(page_id) if 'error' in page_access_token: return page_access_token if 'access_token' not in page_access_token: raise AdsAPIError('Could not get page access token. (Do you have manage pages permission?)') path = '%s/feed' % page_id args = { 'access_token': page_access_token['access_token'], } files = {} if link is not None: args['link'] = link if message is not None: args['message'] = message if picture is not None: args['picture'] = picture if thumbnail is not None: files['thumbnail'] = thumbnail if published is not None: args['published'] = published if name is not None: args['name'] = name if caption is not None: args['caption'] = caption if description is not None: args['description'] = description if call_to_action is not None: args['call_to_action'] = json.dumps(call_to_action) return self.make_request(path, 'POST', args, files, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newPost(self, postLink, zserverBlogEntry): #$NON-NLS-1$\r\n atomEntry = self.createNewBlogEntry()\r\n self._populateAtomEntry(atomEntry, zserverBlogEntry)\r\n # publish entry\r\n atomRespEntry = self.createAtomEntry(postLink, atomEntry)\r\n return atomRespEntry", "def create_url_post(post_id, config):\n text = \"https://graph.facebook.com/\"\n text += config[\"graph_api_version\"]\n text += \"/\" + post_id + \"?fields=\"\n text += \"id, message, admin_creator, backdated_time,\"\n text += \"caption,coordinates,created_time,description,\"\n text += \"feed_targeting,from,event,icon,is_popular,link,\"\n text += \"message_tags,name,object_id,type,shares,\"\n text += \"story,source,properties,place,target,targeting,\"\n text += \"status_type,comments.fields(comment_count,from,id,like_count,\"\n text += \"permalink_url,created_time,message,comments.fields(comment_count,\"\n text += \"from,id,like_count,created_time,message,\"\n text += \"reactions.fields(id,name,type,username,profile_type)),\"\n text += \"reactions.fields(id,name,type,username,profile_type)),\"\n text += \"reactions.fields(id,name,type,username,profile_type), sharedposts\"\n text += \"&access_token=\" + config[\"access_token\"]\n\n return text", "def wordpress_new_page(slug, title, content):\n server = ServerProxy(os.environ['WORDPRESS_RPC_URL'])\n return server.wp.newPost(os.environ['WORDPRESS_BLOG_ID'],\n os.environ['WORDPRESS_USERNAME'],\n os.environ['WORDPRESS_PASSWORD'],\n {\n 'post_name': slug,\n 'post_content': content,\n 'post_title': title,\n 'post_parent':\n os.environ['WORDPRESS_PARENT_PAGE_ID'],\n 'post_type': 'page',\n 'post_status': 'publish',\n 'comment_status': 'closed',\n 'ping_status': 'closed',\n })", "def addSitePost(self, site, testo, url_articolo, links, titolo = \"\"):\n from models import SitePost\n if len(titolo) == 0:\n titolo = url_articolo\n #print titolo\n try:\n v = SitePost.objects.get(url_articolo = url_articolo)\n created = False \n except:\n v = SitePost.objects.create(\n testata = site,\n url_articolo = url_articolo,\n testo = testo,\n titolo = titolo\n )\n created = True\n \n \n tot = 0\n print \"Created: %s | URL: %s\" % (created, v.url_articolo)\n\n if created is True:\n links = [self.addPostItem(site,link,'LINK', url_articolo) for link in links if \"http\" in link and len(link)>3]\n for link in links:\n v.links.add(link)\n tot+=1\n print \"AddSitePost: Url: %s | Created? %s | Links created: %s\" % (v.url_articolo, created, tot)\n return created", "def new_post(self, content):\n return self.proxy.wp.newPost(self.blog_id, self.username, self.password,\n content)", "def CreatePage(url1: str) -> Page:\n page = Page(\n name=url1,\n queried=1,\n )\n db.session.add(page)\n db.session.commit()\n return page", "def create_page(self):", "def add_page(self, page): \n self.pages.append(Page(page))", "def __addPost(self, link):\n self.currenturi = link\n self.__setStoneSoupForCurrentUri()\n try:\n page = self.__getData()\n if not page:\n return True \n if checkSessionInfo(self.genre, self.session_info_out, self.currenturi,\\\n self.task.instance_data.get('update'),parent_list\\\n = [self.task.instance_data['uri']]):\n log.info(self.log_msg('Session info returns True'))\n return False\n except:\n log.exception(self.log_msg('Cannot add the post for the url %s'%\\\n self.currenturi))\n return False\n try:\n result=updateSessionInfo(self.genre, self.session_info_out, self.currenturi, \\\n get_hash( page ),'review', self.task.instance_data.get('update'),\\\n parent_list=[self.task.instance_data['uri']])\n if not result['updated']:\n log.exception(self.log_msg('Update session info returns False'))\n return True\n page['parent_path'] = [self.task.instance_data['uri']]\n page['path'] = [self.task.instance_data['uri'], self.currenturi]\n page['uri'] = self.currenturi\n page['entity'] = 'review'\n page['uri_domain'] = urlparse.urlparse(page['uri'])[1]\n page.update(self.__task_elements_dict)\n self.pages.append(page)\n #log.info(page)\n log.info(self.log_msg('page added %s'%self.currenturi))\n return True\n except:\n log.exception(self.log_msg('Error while adding session info'))\n return False", "def create_a_post():\n subj = create_subject()\n post = Post.create(subject=subj, title=\"A great title\", body=\"Just a great day!\")\n post.save()\n return post", "def insert_new_post(post_arg_set):\n api, post_data, acct_data, page_id, config = post_arg_set\n\n try:\n post_id = post_data['id'] if post_data.has_key('id') else None\n\n except Exception as e:\n log.error( e )\n\n else:\n\n # parse date\n if post_data.has_key('created_time') and post_data['created_time'] is not None: \n dt = datetime.strptime(post_data['created_time'], FB_DATE_FORMAT)\n date_time = tz_adj(dt, config)\n time_bucket = round_datetime(date_time, config)\n raw_timestamp = int(date_time.strftime(\"%s\"))\n \n else:\n time_bucket = None\n raw_timestamp = None\n \n # extract message so we can find links within the msg if not in url\n article_urls = [get_fb_link(post_data, config, unshorten=True)]\n message = post_data['message'].encode('utf-8') if post_data.has_key('message') else None\n message_urls = get_message_urls(article_urls, message, config)\n\n # detect article links, unshorten and parse\n article_urls = [\n parse_url(unshorten_link(url, config)) \\\n for url in article_urls + message_urls\n if url is not None\n ]\n\n article_urls = [url for url in article_urls if is_article(url, config)]\n\n if article_urls:\n for article_url in set(article_urls):\n\n # sluggify url\n article_slug = sluggify(article_url)\n\n # format data\n post_value = {\n 'article_slug': article_slug,\n 'article_url': article_url,\n 'time_bucket': time_bucket,\n 'fb_post_created': raw_timestamp,\n 'raw_timestamp': raw_timestamp,\n 'fb_raw_link' : get_fb_link(post_data, config=config),\n 'fb_page_id': page_id,\n 'fb_post_id': post_id,\n 'fb_page_likes': acct_data['likes'] if acct_data.has_key('likes') else None,\n 'fb_page_talking_about': acct_data['talking_about_count'] if acct_data.has_key('talking_about_count') else None,\n 'fb_type': post_data['type'] if post_data.has_key('type') else None,\n 'fb_status_type': post_data['status_type'] if post_data.has_key('status_type') else None,\n 'fb_message': message\n }\n \n # always insert insights data\n if is_insights(page_id, config):\n \n log.info( \"INSIGHTS\\tAdding data from %s re: %s\" % (page_id, article_slug) )\n\n # fetch data\n insights_value = get_insights_data(api, page_id, post_id)\n\n # create datasource name\n data_source = \"facebook_insights_%s\" % page_id \n \n # upsert url\n upsert_url(article_url, article_slug, data_source, config)\n\n # insert id\n db.sadd('facebook_post_ids', post_id)\n\n # format time bucket\n current_time_bucket = gen_time_bucket(config)\n insights_value['time_bucket'] = current_time_bucket\n post_value.pop('time_bucket', None)\n \n value = json.dumps({\n data_source : dict(post_value.items() + insights_value.items())\n })\n\n # upload data to redis\n db.zadd(article_slug, current_time_bucket, value) \n \n # only insert new posts\n if not db.sismember('facebook_post_ids', post_id):\n \n log.info( \"FACEBOOK\\tNew post %s\\t%s\" % (post_id, article_url) )\n \n # insert id\n db.sadd('facebook_post_ids', post_id) \n \n # upsert url\n data_source = \"facebook_%s\" % page_id\n upsert_url(article_url, article_slug, data_source, config)\n\n value = json.dumps( {data_source : post_value} )\n\n\n # upload data to redis\n db.zadd(article_slug, time_bucket, value)", "def new_post(mkp_form, request):\n newpost = Posts()\n newpost.init()\n newpost.authorid = int(request.user.id)\n newpost.title = mkp_form.cleaned_data['title']\n newpost.name = mkp_form.cleaned_data['short_title'] # 缩略名\n newpost.cover = mkp_form.cleaned_data['cover_url']\n newpost.introduction = mkp_form.cleaned_data['introduction']\n newpost.content = js_resize_img(mkp_form.cleaned_data['content'])\n newpost.status = Status.objects.get(id=2) # id为2是已发布的文章,默认为已发布,后面再改\n tagids = mkp_form.cleaned_data['tags']\n if len(tagids) != 0:\n for tagid in tagids:\n tagid = int(tagid)\n tag = Tags.objects.get(id=tagid)\n newpost.tags.add(tag)\n threadtypeid = mkp_form.cleaned_data['threadtypeid']\n newpost.threadtypeid = ThreadTypes.objects.get(id=threadtypeid)\n if mkp_form.cleaned_data['commentnotshow'] != '':\n newpost.comment_status = False\n else:\n newpost.comment_status = True\n return newpost", "def post_entry(self, body, link=None, to=None, **args):\n args.update(body=body)\n if link: args.update(link=link)\n if to: args.update(to=to)\n return self.fetch(\"/entry\", post_args=args)", "def postCreate(post):\n post_list = list()\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n # visible_to = list(post.visibleTo)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list", "def new_post(user_id):\n user = User.query.get_or_404(user_id)\n\n tags = Tag.query.all()\n return render_template('posts/new_page.html', user=user, tags=tags)", "def create_post():\n\n #Get prompt id\n prompt_id = request.form.get('prompt_id')\n\n # Get post text\n post_text = request.form.get('user_post')\n\n # Create post timestamp\n created_at = datetime.now()\n user_facing_date = created_at.strftime(\"%B %d, %Y\")\n\n # Save post and related data to database\n post = crud.create_post(session['user_id'], prompt_id, post_text, session['lat'], session['lng'], session['user_facing_location'], created_at)\n\n return render_template('post_data.html', post=post, user_facing_date=user_facing_date)", "def post(self, post_id=None):\n\n if post_id:\n abort(400)\n else:\n args = parsers.post_post_parser.parse_args(strict=True)\n\n new_post = Post(args['title'])\n new_post.text = args['text']\n # new_post.user = user\n\n if args['tags']:\n for item in args['tags']:\n tag = Tag.query.filter_by(name=item).first()\n # If the tag already exist, append.\n if tag:\n new_post.tags.append(tag)\n # If the tag not exist, create the new one.\n # Will be write into DB with session do.\n else:\n new_tag = Tag(item)\n new_post.tags.append(new_tag)\n db.session.add(new_post)\n db.session.commit()\n return (new_post.id, 201)", "def addPost(self,text,id,url,date):\n self.topComments.append(Post(text,id,url,date))\n return None", "def post(self):\n title = self.request.get(\"title\")\n body = self.request.get(\"body\")\n\n if title and body:\n\n # create a new Post object and store it in the database\n post = Post(\n title=title,\n body=body\n )\n post.put()\n\n # get the id of the new post, so we can render the post's page (via the permalink)\n id = post.key().id()\n self.redirect(\"/blog/%s\" % id)\n else:\n error = \"we need both a title and a body!\"\n #self.render_form(title, body, error)\n self.render(\"newpost.html\", title, body, error)", "def create_review_link(self, project_id, **kwargs):\n endpoint = '/projects/{}/review_links'.format(project_id)\n return self._api_call('post', endpoint, payload=kwargs)", "def post(self):\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n # if user enter good subject and content, redirect them to new post page\n if subject and content:\n p = Post(parent = blog_key(), subject = subject, content = content)\n p.put() # store the post element into database\n self.redirect('/blog/%s' % str(p.key().id()))\n # otherwise, render an error page \n else:\n error = \"subject and content, please!\"\n self.render(\"newpost.html\", subject=subject, content=content, error=error)", "def create(cls, site_id, page, payload):\n result = super(PageViews, cls).create(site_id=site_id, page=page)\n\n message = {\"site_id\": site_id,\n \"page\": page }\n\n producer.send_messages(\"pageviews\", json.dumps(message))\n\n return result", "def make_new_post(user_id):\n user = User.query.get_or_404(user_id)\n tags = Tag.query.all()\n return render_template('posts/new_post.html', user=user, tags=tags)", "def remotePostCreate(host, post):\n post = post.get('posts')[0]\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('origin')\n count = post.get('count')\n comments = remoteCommentList(post)\n source = \"{}/api/posts/{}\".format(DOMAIN, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin, 'count': count,\n 'source': source}\n return post_dict", "def createLink(context, title, link, exclude_from_nav=False):\n oid = idnormalizer.normalize(title, 'es')\n if not hasattr(context, oid):\n context.invokeFactory('Link', id=oid, title=title, remoteUrl=link)\n link = context[oid]\n if exclude_from_nav:\n link.setExcludeFromNav(True)\n link.reindexObject()", "def post(self):\n\n title = self.request.get(\"title\")\n blogPost = self.request.get(\"blogPost\")\n author = self.request.cookies.get('name')\n\n if title and blogPost:\n\n bp = Blogposts(parent=blog_key(), title=title,\n blogPost=blogPost, author=check_secure_val(author))\n\n bp.put()\n\n self.redirect('/%s' % str(bp.key.integer_id()))\n else:\n error = \"Please submit both a title and a blogpost!\"\n self.render(\"newpost.html\", title=title,\n blogPost=blogPost, error=error)", "def create_page_in_admin(comicsite,title,content=\"testcontent\",permission_lvl=\"\"):\n \n if permission_lvl == \"\":\n permission_lvl = Page.ALL\n \n page_admin = PageAdmin(Page,admin.site)\n page = Page.objects.create(title=title,\n comicsite=comicsite,\n html=content,\n permission_lvl=permission_lvl)\n page_admin.first_save(page)\n return page", "def test_create_post(self):\n with self.client:\n result = self.client.post('/users/spike-test/posts', data={\n \"post-title\": \"autotest title\",\n \"post-content\": \"autotest content for blog post testing\"\n }, follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b'autotest title', result.data)", "def page_create(request, slug, template_name='groups/pages/page_form.html'):\n group = get_object_or_404(Group, slug=slug)\n form = GroupPageForm(initial={'group': group})\n\n if request.method == 'POST':\n form = GroupPageForm(request.POST)\n if form.is_valid():\n page = form.save(commit=False)\n page.group = group\n page.save()\n return redirect(request, page)\n\n return render(request, template_name, {\n 'group': group,\n 'form': form\n })", "def pagelink(self, on, pagename='', page=None, **kw):\n FormatterBase.pagelink(self, on, pagename, page, **kw)\n if 'generated' in kw:\n del kw['generated']\n if page is None:\n page = Page(self.request, pagename, formatter=self)\n if self.request.user.show_nonexist_qm and on and not page.exists():\n self.pagelink_preclosed = True\n return (page.link_to(self.request, on=1, **kw) +\n self.text(\"?\") +\n page.link_to(self.request, on=0, **kw))\n elif not on and self.pagelink_preclosed:\n self.pagelink_preclosed = False\n return \"\"\n else:\n return page.link_to(self.request, on=on, **kw)" ]
[ "0.6794848", "0.6332408", "0.62787837", "0.62066936", "0.61507404", "0.60811317", "0.6072727", "0.60461605", "0.5974542", "0.5972283", "0.5949421", "0.5910934", "0.58736694", "0.58677185", "0.5863532", "0.5835382", "0.58146477", "0.5803325", "0.5781549", "0.5762776", "0.57156026", "0.57015705", "0.5686513", "0.56748927", "0.5672892", "0.56718874", "0.5671328", "0.5670911", "0.5648441", "0.56279194" ]
0.77158016
0
Creates an ad campaign group for the given account.
def create_adcampaign_group(self, account_id, name, campaign_group_status, objective=None, batch=False): path = 'act_%s/adcampaign_groups' % account_id args = { 'name': name, 'campaign_group_status': campaign_group_status, } if objective is not None: args['objective'] = objective return self.make_request(path, 'POST', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_adgroup(self, account_id, name, campaign_id,\n creative_id, bid_type=None, bid_info=None, max_bid=None,\n tracking_specs=None, view_tags=None, objective=None,\n adgroup_status=None, targeting=None, conversion_specs=None, batch=False):\n path = 'act_%s/adgroups' % account_id\n args = {\n 'name': name,\n 'campaign_id': campaign_id,\n 'creative': json.dumps({'creative_id': creative_id}),\n }\n if bid_type:\n args['bid_type'] = bid_type\n if max_bid:\n # can only use max_bid with CPM bidding\n args['max_bid'] = max_bid\n elif bid_info:\n args['bid_info'] = json.dumps(bid_info)\n\n if tracking_specs:\n args['tracking_specs'] = json.dumps(tracking_specs)\n if view_tags:\n args['view_tags'] = json.dumps(view_tags)\n if objective:\n args['objective'] = objective\n if adgroup_status:\n args['adgroup_status'] = adgroup_status\n if targeting:\n args['targeting'] = json.dumps(targeting)\n if conversion_specs:\n args['conversion_specs'] = json.dumps(conversion_specs)\n return self.make_request(path, 'POST', args, batch=batch)", "def create_ad_group(client, customer_id, campaign_resource_name):\n ad_group_service = client.get_service(\"AdGroupService\")\n\n # Creates the ad group.\n # Note that the ad group type must not be set.\n # Since the advertising_channel_sub_type is APP_CAMPAIGN,\n # 1- you cannot override bid settings at the ad group level.\n # 2- you cannot add ad group criteria.\n ad_group_operation = client.get_type(\"AdGroupOperation\")\n ad_group = ad_group_operation.create\n ad_group.name = f\"Earth to Mars cruises {uuid4()}\"\n ad_group.status = client.enums.AdGroupStatusEnum.ENABLED\n ad_group.campaign = campaign_resource_name\n\n ad_group_response = ad_group_service.mutate_ad_groups(\n customer_id=customer_id, operations=[ad_group_operation]\n )\n\n ad_group_resource_name = ad_group_response.results[0].resource_name\n print(f'Ad Group created with resource name: \"{ad_group_resource_name}\".')\n return ad_group_resource_name", "def create_campaign(self, name, group):\n payload = self._build_params(name=name, group_uuid=group)\n return Campaign.deserialize(self._post('campaigns', None, payload))", "def create_adset(self, account_id, campaign_group_id, name,\n campaign_status, daily_budget=None, lifetime_budget=None,\n start_time=None, end_time=None,\n bid_type=None, bid_info=None, promoted_object=None, targeting=None, batch=False):\n if daily_budget is None and lifetime_budget is None:\n raise AdsAPIError(\"Either a lifetime_budget or a daily_budget \\\n must be set when creating a campaign\")\n if lifetime_budget is not None and end_time is None:\n raise AdsAPIError(\"end_time is required when lifetime_budget \\\n is specified\")\n\n path = 'act_%s/adcampaigns' % account_id\n args = {\n 'campaign_group_id': campaign_group_id,\n 'name': name,\n 'campaign_status': campaign_status,\n }\n if daily_budget:\n args['daily_budget'] = daily_budget\n if lifetime_budget:\n args['lifetime_budget'] = lifetime_budget\n if start_time:\n args['start_time'] = start_time\n if end_time:\n args['end_time'] = end_time\n if bid_type:\n args['bid_type'] = bid_type\n if bid_info:\n args['bid_info'] = bid_info\n if promoted_object:\n args['promoted_object'] = json.dumps(promoted_object)\n if targeting:\n args['targeting'] = json.dumps(targeting)\n\n return self.make_request(path, 'POST', args, batch=batch)", "def add_account(self, account):\n return self._client.group_memberships.create({\n 'account': account,\n 'group': self,\n })", "def create_campaign(account, row, name, acc_type):\n country = None\n if acc_type == Account.COUNTRY:\n country_name = row['LOCATION']\n country = Country.objects.filter(name__iexact=country_name).first()\n if not country:\n logging.getLogger('peacecorps.sync_accounting').warning(\n \"%s: Country does not exist: %s\",\n row['PROJ_NO'], row['LOCATION'])\n return\n\n account.save()\n summary = clean_description(row['SUMMARY'])\n campaign = Campaign.objects.create(\n name=name, account=account, campaigntype=acc_type,\n description=json.dumps({\"data\": [{\"type\": \"text\",\n \"data\": {\"text\": summary}}]}),\n country=country)\n if acc_type == Account.SECTOR:\n # Make sure we remember the sector this is marked as\n SectorMapping.objects.create(pk=row['SECTOR'], campaign=campaign)", "async def create_contact_group(dbcon: DBConnection, name: str, active: bool) -> str:\n q = \"\"\"insert into contact_groups (name, active) values (%s, %s)\"\"\"\n q_args = (name, active)\n contact_group_id = await dbcon.operation(q, q_args)\n return contact_group_id", "def create_group(self, event):\n body = event['body']\n body = json.loads(body)\n\n # Required field in POST body\n if 'group_name' not in body:\n return self.get_bad_request('POST body missing group_name')\n\n group_name = body['group_name']\n user = self.mealShareUsers.get_user_cognito_data(event)\n user_id = user['user_id']\n \n # Add the creator to the group, as the initial member\n group_id = self.mealShareGroups.create_group(group_name)\n success = self.mealShareGroups.add_user_to_group(user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully created group {} with ID {}'.format(group_name, group_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to create group {} by user {}'.format(group_name, user_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def create_group(group_id, group_name):\n\n kwargs = config.DEFAULT_REST_KWARGS\n kwargs[\"data\"] = {\"id\": group_id, \"name\": group_name}\n http_response = call_rest_api(\"/identities/groups/\", \"post\", **kwargs)\n if http_response.status_code != 201: # 201 = 'new group created'\n raise ValueError(http_response.text)\n logger.log(f\"New custom group, {group_name}, with ID: {group_id}, was created successfully.\")", "def create_group(self, name):\n\t\tdata = {\"name\":name}\n\t\tresponse = self.client.post(self._endpoint + \"/group\", content=data)\n\t\treturn Group(\n\t\t\tresponse.json['group_id'],\n\t\t\tself.user_id,\n\t\t\tself.site_id,\n\t\t\tdata=response.json\n\t\t)", "def create_group(self, tenant_id, group_id):\n maas_client = self._get_maas_client()\n d = maas_client.add_notification_and_plan()\n\n def create_group_in_db((notification, notification_plan)):\n return cass.create_group(\n self._db, tenant_id, group_id, notification, notification_plan)\n d.addCallback(create_group_in_db)\n\n return d", "def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))", "def create_group(self, groupname):\n data = {\"groupname\": groupname}\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(\"groups\")\n res = requests.post(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code == 201:\n return Response(0, u\"Group {} has been created\".format(groupname))\n else:\n return Response(res.status_code, res)", "def create_group(self, group):\n if self.dryrun:\n self.logger.info(\"Would create group %s\", group)\n return FakeGroupId()\n result = self.conn.usergroup.create(name=group)\n groupid = result['usrgrpids'][0]\n self.logger.info(\"Create group %s with id %s\", group, groupid)\n return groupid", "def rpc_campaign_new(self, name):\n\t\tsession = db_manager.Session()\n\t\tcampaign = db_models.Campaign(name=name, user_id=self.basic_auth_user)\n\t\tsession.add(campaign)\n\t\tsession.commit()\n\t\treturn campaign.id", "def capacitygroup_create(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_create(cmd_ctx, cpc, options))", "def create_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n required_properties = {\n 'displayName': str(args.get('display_name')),\n 'mailNickname': str(args.get('mail_nickname')),\n 'mailEnabled': args.get('mail_enabled') == 'true',\n 'securityEnabled': args.get('security_enabled')\n }\n\n # create the group\n group = client.create_group(required_properties)\n\n # display the new group and it's properties\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=f\"{required_properties['displayName']} was created successfully:\",\n t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Mail Enabled'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}\n return human_readable, entry_context, group", "async def create(\n self,\n resource_group_name: str,\n project_name: str,\n group_name: str,\n group: Optional[\"models.Group\"] = None,\n **kwargs\n ) -> \"models.Group\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.Group\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2018-06-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\\w\\._\\(\\)]+$'),\n 'projectName': self._serialize.url(\"project_name\", project_name, 'str'),\n 'groupName': self._serialize.url(\"group_name\", group_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n if group is not None:\n body_content = self._serialize.body(group, 'Group')\n else:\n body_content = None\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n response_headers = {}\n if response.status_code == 200:\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n deserialized = self._deserialize('Group', pipeline_response)\n\n if response.status_code == 201:\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n deserialized = self._deserialize('Group', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, response_headers)\n\n return deserialized", "def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response", "def test_create_account_campaign(self, create):\n \"\"\"Campaigns should be created\"\"\"\n row = {'PROJ_NAME1': 'Argentina Fund', 'PROJ_NO': '789-CFD',\n 'SUMMARY': 'Some Sum'}\n sync.create_account(row, None)\n self.assertTrue(create.called)\n account, row, name, acc_type = create.call_args[0]\n self.assertEqual(account.name, 'Argentina Fund')\n self.assertEqual(account.code, '789-CFD')\n self.assertEqual(account.category, Account.COUNTRY)\n self.assertEqual(0, len(Account.objects.filter(pk=account.pk)))", "def create_group(self, group_name, group_type):\n grp_data = {\"name\": group_name, \"type\": group_type}\n return requests.post(self.groups_url, data=json.dumps(grp_data),\n headers=self.headers)", "def create_group(self, group_name, user_ids=[], role_ids=[]):\n payload = {}\n payload['name'] = group_name\n payload['user_ids'] = user_ids\n payload['role_ids'] = role_ids\n return Client._post(self, payload)", "def create_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)", "def create_adcreative(self, account_id, name=None, object_story_id=None, object_story_spec=None, batch=False):\n path = 'act_%s/adcreatives' % account_id\n args = {}\n if name:\n args['name'] = name\n if object_story_id:\n args['object_story_id'] = object_story_id\n if object_story_spec:\n args['object_story_spec'] = json.dumps(object_story_spec)\n\n return self.make_request(path, 'POST', args, batch=batch)", "def __create_new_group(self, group_name) -> None:\n group = Group(name=group_name)\n group.save()\n\n self.__add_permission_to_group(group)", "def create_app_ad(client, customer_id, ad_group_resource_name):\n # Creates the ad group ad.\n ad_group_ad_service = client.get_service(\"AdGroupAdService\")\n ad_group_ad_operation = client.get_type(\"AdGroupAdOperation\")\n ad_group_ad = ad_group_ad_operation.create\n ad_group_ad.status = client.enums.AdGroupAdStatusEnum.ENABLED\n ad_group_ad.ad_group = ad_group_resource_name\n # ad_data is a 'oneof' message so setting app_ad\n # is mutually exclusive with ad data fields such as\n # text_ad, gmail_ad, etc.\n ad_group_ad.ad.app_ad.headlines.extend(\n [\n create_ad_text_asset(client, \"A cool puzzle game\"),\n create_ad_text_asset(client, \"Remove connected blocks\"),\n ]\n )\n ad_group_ad.ad.app_ad.descriptions.extend(\n [\n create_ad_text_asset(client, \"3 difficulty levels\"),\n create_ad_text_asset(client, \"4 colorful fun skins\"),\n ]\n )\n # Optional: You can set up to 20 image assets for your campaign.\n # ad_group_ad.ad.app_ad.images.extend(\n # [INSERT_AD_IMAGE_RESOURCE_NAME(s)_HERE])\n\n ad_group_ad_response = ad_group_ad_service.mutate_ad_group_ads(\n customer_id=customer_id, operations=[ad_group_ad_operation]\n )\n ad_group_ad_resource_name = ad_group_ad_response.results[0].resource_name\n print(\n \"Ad Group App Ad created with resource name:\"\n f'\"{ad_group_ad_resource_name}\".'\n )", "def security_group_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_security_group(**kwargs)", "def create_group(\n self,\n name,\n group,\n validate_only=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n # Wrap the transport method to add retry and timeout logic.\n if \"create_group\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"create_group\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.create_group,\n default_retry=self._method_configs[\"CreateGroup\"].retry,\n default_timeout=self._method_configs[\"CreateGroup\"].timeout,\n client_info=self._client_info,\n )\n\n request = group_service_pb2.CreateGroupRequest(\n name=name, group=group, validate_only=validate_only,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"name\", name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"create_group\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", "def create(\n self, draft: CustomerGroupDraft, *, expand: OptionalListStr = None\n ) -> CustomerGroup:\n params = self._serialize_params({\"expand\": expand}, traits.ExpandableSchema)\n return self._client._post(\n endpoint=\"customer-groups\",\n params=params,\n data_object=draft,\n response_class=CustomerGroup,\n )" ]
[ "0.76186574", "0.74259675", "0.7047342", "0.68341404", "0.6452709", "0.6438533", "0.6134367", "0.6082146", "0.60691714", "0.59890187", "0.5978779", "0.5971765", "0.5952626", "0.5949488", "0.5948537", "0.5902305", "0.5878081", "0.58696204", "0.5868034", "0.5843602", "0.5812845", "0.58055055", "0.5759436", "0.57486856", "0.5725406", "0.57171965", "0.5713802", "0.56992286", "0.56865096", "0.56821245" ]
0.7662178
0
Updates condition of the given ad campaign group.
def update_adcampaign_group(self, campaign_group_id, name=None, campaign_group_status=None, objective=None, batch=False): path = '%s' % campaign_group_id args = {} if name is not None: args['name'] = name if campaign_group_status is not None: args['campaign_group_status'] = campaign_group_status if objective is not None: args['objective'] = objective return self.make_request(path, 'POST', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_targetgroup(self, group_id, **kwargs):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).update(**kwargs)\r\n self._db.commit()\r\n return result", "def update(self, fieldupdate='abc', condition='INVALID'):\n sql = self.generate_update_sql(fieldupdate, condition)\n self.sqlhistory.append(sql)\n return self.sql_update(sql)", "def capacitygroup_update(cmd_ctx, cpc, capacitygroup, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_update(cmd_ctx, cpc, capacitygroup, options))", "def update(self, consistencygroup, **kwargs):\n if not kwargs:\n return\n\n body = {\"consistencygroup\": kwargs}\n\n return self._update(\"/consistencygroups/%s\" %\n base.getid(consistencygroup), body)", "def update_group(self, group_id, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.patch('groups/%s' % group_id, post_body)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "async def update_contact_group(dbcon: DBConnection, contact_group_id: int, data: Dict[str, str]) -> None:\n\n async def _run(cur: Cursor) -> None:\n for key, value in data.items():\n if key not in ['name', 'active']:\n raise errors.IrisettError('invalid contact key %s' % key)\n q = \"\"\"update contact_groups set %s=%%s where id=%%s\"\"\" % key\n q_args = (value, contact_group_id)\n await cur.execute(q, q_args)\n\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n await dbcon.transact(_run)", "def condition(self, condition):\n\n self._condition = condition", "def update_group(self, group_id, update_group_details, **kwargs):\n resource_path = \"/groups/{groupId}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"groupId\": group_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_group_details,\n response_type=\"Group\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_group_details,\n response_type=\"Group\")", "def set_group(self, data, group, intg, dq=None):\n # TODO: Include a 2-D DQ array to be combined with the GROUPDQ array\n #\n # Copy the input data to a 2-D plane for this group/intg combination.\n # NOTE: This only works if data array is broadcastable so the shape\n # of the data array is checked.\n #\n data = np.asarray(data, dtype=self.data.dtype)\n detector_shape = (self.rows, self.columns)\n if data.shape == detector_shape:\n self.data[intg, group, :, :] = data \n # Invalidate the averaged data\n self._data_averaged = None\n # Update the group data quality array if necessary.\n if dq is not None:\n if self.include_groupdq:\n dq = np.asarray(dq, dtype=self.groupdq.dtype) # Convert to same data type.\n self.groupdq[intg, group, :, :] |= dq\n else:\n strg = \"Incompatible arguments. A groupdq array is \"\n strg += \"provided when include_groupdq=False. \"\n strg += \"The array is ignored.\"\n LOGGER.error(strg)\n else:\n strg = \"Group data array has the wrong shape \"\n strg += \"(%s instead of %s).\" % (str(data.shape),\n str(detector_shape))\n raise TypeError(strg)", "def __init__(__self__, *,\n sku_group_condition: Optional[pulumi.Input['GoogleCloudChannelV1SkuGroupConditionArgs']] = None):\n if sku_group_condition is not None:\n pulumi.set(__self__, \"sku_group_condition\", sku_group_condition)", "def update_adgroup(self, adgroup_id, name=None, adgroup_status=None,\n bid_type=None, bid_info=None, creative_id=None,\n tracking_specs=None, view_tags=None, objective=None,\n targeting=None, conversion_specs=None,\n batch=False):\n path = \"%s\" % adgroup_id\n args = {}\n if name:\n args['name'] = name\n if bid_type:\n args['bid_type'] = bid_type\n if bid_info:\n args['bid_info'] = json.dumps(bid_info)\n\n if creative_id:\n args['creative'] = json.dumps({'creative_id': creative_id})\n if tracking_specs:\n args['tracking_specs'] = json.dumps(tracking_specs)\n if view_tags:\n args['view_tags'] = json.dumps(view_tags)\n if objective:\n args['objective'] = objective\n if adgroup_status:\n args['adgroup_status'] = adgroup_status\n if targeting:\n args['targeting'] = json.dumps(targeting)\n if conversion_specs:\n args['conversion_specs'] = json.dumps(conversion_specs)\n return self.make_request(path, 'POST', args, batch=batch)", "def sku_group_condition(self) -> Optional[pulumi.Input['GoogleCloudChannelV1SkuGroupConditionArgs']]:\n return pulumi.get(self, \"sku_group_condition\")", "def update_group():\n _id = request.form['_id']\n name = request.form['name']\n data, code, message = FIELD_SERVICE.update_group(_id, name)\n return __result(data, code, message)", "def ModifyGroup(self, group, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/groups/%s/modify\" %\n (GANETI_RAPI_VERSION, group)), query, kwargs)", "def update_dynamic_group(self, dynamic_group_id, update_dynamic_group_details, **kwargs):\n resource_path = \"/dynamicGroups/{dynamicGroupId}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_dynamic_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"dynamicGroupId\": dynamic_group_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_dynamic_group_details,\n response_type=\"DynamicGroup\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_dynamic_group_details,\n response_type=\"DynamicGroup\")", "def do_group_update():\n target_group = Group.query.filter_by(id=request.form['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n target_group.name = request.form['name']\n target_group.group_meter_id = request.form['meter']\n target_group.group_production_meter_id_first = request.form['group_production_meter_id_first']\n target_group.group_production_meter_id_second = request.form[\n 'group_production_meter_id_second']\n\n db.session.commit()\n return group_list(\"Updated group \" + target_group.name)", "def update_element(cls, condition=None, async=True, **kwargs):\n\n command = cls.__table__.update().values(**kwargs)\n\n if condition is not None:\n command = command.where(condition)\n\n return DBConnection.execute_command(command=command, async=async)", "def group_update(*, login_manager: LoginManager, group_id: str, **kwargs: Any):\n groups_client = login_manager.get_groups_client()\n\n # get the current state of the group\n group = groups_client.get_group(group_id)\n\n # assemble put data using existing values for any field not given\n # note that the API does not accept the full group document, so we must\n # specify name and description instead of just iterating kwargs\n data = {}\n for field in [\"name\", \"description\"]:\n if kwargs.get(field) is not None:\n data[field] = kwargs[field]\n else:\n data[field] = group[field]\n\n response = groups_client.update_group(group_id, data)\n\n formatted_print(response, simple_text=\"Group updated successfully\")", "def qos_policy_group_modify(self, policy_group, max_throughput=None):\n return self.request( \"qos-policy-group-modify\", {\n 'policy_group': [ policy_group, 'policy-group', [ basestring, 'None' ], False ],\n 'max_throughput': [ max_throughput, 'max-throughput', [ basestring, 'qos-tput' ], False ],\n }, {\n } )", "def update_app_policy_group(self, id, **kwargs):\n resp, body = self.put(self.get_uri(self.resource, id), json.dumps({'application_policy_group':kwargs}))\n body = json.loads(body)\n self.expected_success(http_client.OK, resp.status)\n return rest_client.ResponseBody(resp, body)", "def updateConstraintEvaluation(self, G, PD, id, condition=1):\n if condition == 1:\n if self.gtype == 'U':\n DL = computeDescriptionLength( dlmode=6, C=len(PD.lprevUpdate), gtype=self.gtype, WS=self.Data[id]['Pat'].NCount, W=self.Data[id]['SPat'].NCount, kw=self.Data[id]['SPat'].ECount, excActionType=False, l=self.l, isSimple=self.isSimple, kws=self.Data[id]['SPat'].kws )\n IG = computeInterestingness( self.Data[id]['SPat'].IC_dssg, DL, mode=self.imode )\n self.Data[id]['Pat'].setDL(DL)\n self.Data[id]['Pat'].setI(IG)\n self.Data[id]['SPat'].setDL(DL)\n self.Data[id]['SPat'].setI(IG)\n else:\n DL = computeDescriptionLength( dlmode=6, C=len(PD.lprevUpdate), gtype=self.gtype, WIS=self.Data[id]['Pat'].InNCount, WOS=self.Data[id]['Pat'].OutNCount, WI=self.Data[id]['SPat'].InNL, WO=self.Data[id]['SPat'].OutNL, kw=self.Data[id]['SPat'].ECount, excActionType=False, l=self.l, isSimple=self.isSimple, kws=self.Data[id]['SPat'].kws )\n IG = computeInterestingness( self.Data[id]['SPat'].IC_dssg, DL, mode=self.imode )\n self.Data[id]['Pat'].setDL(DL)\n self.Data[id]['Pat'].setI(IG)\n self.Data[id]['SPat'].setDL(DL)\n self.Data[id]['SPat'].setI(IG)\n elif condition == 2:\n self.evaluateConstraint(G, PD, id)\n return", "def security_group_update(secgroup=None, auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.update_security_group(secgroup, **kwargs)", "def set_group(self, group):\n # Implemented from template for osid.resource.ResourceForm.set_group_template\n if self.get_group_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_boolean(group):\n raise errors.InvalidArgument()\n self._my_map['group'] = group", "def test_update_group(self):\n pass", "def update_tag_group_acl(session, tag_id=None, group_id=None,\n allow_install=False, allow_uninstall=False, allow_reboot=False,\n allow_schedule=False, allow_wol=False, allow_snapshot_creation=False,\n allow_snapshot_removal=False, allow_snapshot_revert=False,\n allow_tag_creation=False, allow_tag_removal=False, allow_read=False,\n date_modified=datetime.now(), username='system_user'\n ):\n session = validate_session(session)\n group = None\n\n if group_id and tag_id:\n group = session.query(TagGroupAccess).\\\n filter(TagGroupAccess.group_id == group_id).\\\n filter(TagGroupAccess.tag_id == tag_id).first()\n if group:\n try:\n group.allow_install = allow_install\n group.allow_uninstall = allow_uninstall\n group.allow_reboot = allow_reboot\n group.allow_schedule = allow_schedule\n group.allow_wol = allow_wol\n group.allow_snapshot_creation = allow_snapshot_creation\n group.allow_snapshot_removal = allow_snapshot_removal\n group.allow_snapshot_revert = allow_snapshot_revert\n group.allow_tag_creation = allow_tag_creation\n group.allow_tag_removal = allow_tag_removal\n group.allow_read = allow_read\n group.date_modified = date_modified\n session.commit()\n return({\n 'pass': True,\n 'message': 'ACL for Group %s was modified for Tag %s' % \\\n (group_id, tag_id)\n })\n except Exception as e:\n session.rollback()\n return({\n 'pass': False,\n 'message': 'Failed to modify ACL for Group %s on Tag %s' % \\\n (group_id, tag_id)\n })\n else:\n return({\n 'pass': False,\n 'message': 'Invalid group_id %s and or tag_id' % \\\n (group_id, tag_id)\n })", "def update(self):\r\n return self.connection._update_group('UpdateAutoScalingGroup', self)", "def set_weather_condition(condition):\n global weather_condition\n\n weather_condition = condition\n\n # Send the new weather condition to all stations.\n if send_status_request([(STAT_CONDITION, weather_condition)]):\n print_log(\"Condition changed to {}\".format(weather_condition))", "def __update(self, condition, values):\n col = _VirtualColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"update\",\n operand1=self,\n operand2=values\n )\n if condition.thisptr[\"type_\"] != \"VirtualBooleanColumn\":\n raise Exception(\"Condition for an update must be a Boolen column.\")\n col.thisptr[\"condition_\"] = condition.thisptr\n return col", "def update_record(self, collection_name, update_record, update_condition):\n try:\n self.logger.info('in update_record()')\n collection = self.get_db()[collection_name]\n collection.update_one(update_condition, {\"$set\": update_record})\n self.logger.info('out update_record()')\n except Exception as e:\n self.logger.error(f'Error occurred while updating record {e}')", "def customer_group_customer_put(user_id, group_id):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n user_group_user_details = {}\n user_group_user_details[\"user_id\"] = user_id\n user_group_user_details[\"group_id\"] = group_id\n\n query = \"\"\"\n Update `users_groups`\n SET `group_id` = \\\"%(group_id)s\\\"\n WHERE `user_id` = \\\"%(user_id)s\\\" \n \"\"\" %(user_group_user_details)\n cursor = db.cursor()\n result = {\"success\" : 0, \"message\" : \"Customer's Group is not updated\"}\n try:\n if cursor.execute(query):\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer updated Successfully\"}\n except Exception as e:\n result = {\"success\" : 1, \"message\" : \"Customer can not be updated in the Group. Error \\\"\\'%s\\'\\\" \\\n Query \\\"\\'%s\\'\\\" \" % (e, query) }\n finally:\n cursor.close()\n db.close()\n return result" ]
[ "0.6300758", "0.56560814", "0.5560324", "0.5507527", "0.5466104", "0.5375947", "0.53550124", "0.53035486", "0.52953994", "0.5244216", "0.5243708", "0.52355415", "0.51365495", "0.51355237", "0.5129476", "0.5116882", "0.51126", "0.5084277", "0.50788194", "0.503468", "0.49954182", "0.49746102", "0.4973487", "0.49445528", "0.4931052", "0.4930396", "0.49289343", "0.49181473", "0.49120903", "0.48505372" ]
0.63916516
0
Creates an adset (formerly called ad campaign) for the given account and the campaign (formerly called "campaign group").
def create_adset(self, account_id, campaign_group_id, name, campaign_status, daily_budget=None, lifetime_budget=None, start_time=None, end_time=None, bid_type=None, bid_info=None, promoted_object=None, targeting=None, batch=False): if daily_budget is None and lifetime_budget is None: raise AdsAPIError("Either a lifetime_budget or a daily_budget \ must be set when creating a campaign") if lifetime_budget is not None and end_time is None: raise AdsAPIError("end_time is required when lifetime_budget \ is specified") path = 'act_%s/adcampaigns' % account_id args = { 'campaign_group_id': campaign_group_id, 'name': name, 'campaign_status': campaign_status, } if daily_budget: args['daily_budget'] = daily_budget if lifetime_budget: args['lifetime_budget'] = lifetime_budget if start_time: args['start_time'] = start_time if end_time: args['end_time'] = end_time if bid_type: args['bid_type'] = bid_type if bid_info: args['bid_info'] = bid_info if promoted_object: args['promoted_object'] = json.dumps(promoted_object) if targeting: args['targeting'] = json.dumps(targeting) return self.make_request(path, 'POST', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_campaign(account, row, name, acc_type):\n country = None\n if acc_type == Account.COUNTRY:\n country_name = row['LOCATION']\n country = Country.objects.filter(name__iexact=country_name).first()\n if not country:\n logging.getLogger('peacecorps.sync_accounting').warning(\n \"%s: Country does not exist: %s\",\n row['PROJ_NO'], row['LOCATION'])\n return\n\n account.save()\n summary = clean_description(row['SUMMARY'])\n campaign = Campaign.objects.create(\n name=name, account=account, campaigntype=acc_type,\n description=json.dumps({\"data\": [{\"type\": \"text\",\n \"data\": {\"text\": summary}}]}),\n country=country)\n if acc_type == Account.SECTOR:\n # Make sure we remember the sector this is marked as\n SectorMapping.objects.create(pk=row['SECTOR'], campaign=campaign)", "def create_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)", "def create_campaign(client, customer_id, budget_resource_name):\n campaign_service = client.get_service(\"CampaignService\")\n campaign_operation = client.get_type(\"CampaignOperation\")\n campaign = campaign_operation.create\n campaign.name = f\"Interplanetary Cruise App #{uuid4()}\"\n campaign.campaign_budget = budget_resource_name\n # Recommendation: Set the campaign to PAUSED when creating it to\n # prevent the ads from immediately serving. Set to ENABLED once you've\n # added targeting and the ads are ready to serve.\n campaign.status = client.enums.CampaignStatusEnum.PAUSED\n # All App campaigns have an advertising_channel_type of\n # MULTI_CHANNEL to reflect the fact that ads from these campaigns are\n # eligible to appear on multiple channels.\n campaign.advertising_channel_type = (\n client.enums.AdvertisingChannelTypeEnum.MULTI_CHANNEL\n )\n campaign.advertising_channel_sub_type = (\n client.enums.AdvertisingChannelSubTypeEnum.APP_CAMPAIGN\n )\n # Sets the target CPA to $1 / app install.\n #\n # campaign_bidding_strategy is a 'oneof' message so setting target_cpa\n # is mutually exclusive with other bidding strategies such as\n # manual_cpc, commission, maximize_conversions, etc.\n # See https://developers.google.com/google-ads/api/reference/rpc\n # under current version / resources / Campaign\n campaign.target_cpa.target_cpa_micros = 1000000\n # Sets the App Campaign Settings.\n campaign.app_campaign_setting.app_id = \"com.google.android.apps.adwords\"\n campaign.app_campaign_setting.app_store = (\n client.enums.AppCampaignAppStoreEnum.GOOGLE_APP_STORE\n )\n # Optimize this campaign for getting new users for your app.\n campaign.app_campaign_setting.bidding_strategy_goal_type = (\n client.enums.AppCampaignBiddingStrategyGoalTypeEnum.OPTIMIZE_INSTALLS_TARGET_INSTALL_COST\n )\n # Optional fields\n campaign.start_date = (datetime.now() + timedelta(1)).strftime(\"%Y%m%d\")\n campaign.end_date = (datetime.now() + timedelta(365)).strftime(\"%Y%m%d\")\n # Optional: If you select the\n # OPTIMIZE_IN_APP_CONVERSIONS_TARGET_INSTALL_COST goal type, then also\n # specify your in-app conversion types so the Google Ads API can focus\n # your campaign on people who are most likely to complete the\n # corresponding in-app actions.\n #\n # campaign.selective_optimization.conversion_actions.extend(\n # [\"INSERT_CONVERSION_ACTION_RESOURCE_NAME_HERE\"]\n # )\n\n # Submits the campaign operation and print the results.\n campaign_response = campaign_service.mutate_campaigns(\n customer_id=customer_id, operations=[campaign_operation]\n )\n resource_name = campaign_response.results[0].resource_name\n print(f'Created App campaign with resource name: \"{resource_name}\".')\n return resource_name", "def create_campaign(self, name, group):\n payload = self._build_params(name=name, group_uuid=group)\n return Campaign.deserialize(self._post('campaigns', None, payload))", "def get_ad_set_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get ad set data for account {}'.format(ad_account['account_id']))\n ad_sets = ad_account.get_ad_sets(\n fields=['id',\n 'name',\n 'campaign_id',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for ad_set in ad_sets:\n result[ad_set['id']] = {'name': ad_set['name'],\n 'campaign_id': ad_set['campaign_id'],\n 'attributes': parse_labels(\n ad_set.get('adlabels', []))}\n return result", "def create_adgroup(self, account_id, name, campaign_id,\n creative_id, bid_type=None, bid_info=None, max_bid=None,\n tracking_specs=None, view_tags=None, objective=None,\n adgroup_status=None, targeting=None, conversion_specs=None, batch=False):\n path = 'act_%s/adgroups' % account_id\n args = {\n 'name': name,\n 'campaign_id': campaign_id,\n 'creative': json.dumps({'creative_id': creative_id}),\n }\n if bid_type:\n args['bid_type'] = bid_type\n if max_bid:\n # can only use max_bid with CPM bidding\n args['max_bid'] = max_bid\n elif bid_info:\n args['bid_info'] = json.dumps(bid_info)\n\n if tracking_specs:\n args['tracking_specs'] = json.dumps(tracking_specs)\n if view_tags:\n args['view_tags'] = json.dumps(view_tags)\n if objective:\n args['objective'] = objective\n if adgroup_status:\n args['adgroup_status'] = adgroup_status\n if targeting:\n args['targeting'] = json.dumps(targeting)\n if conversion_specs:\n args['conversion_specs'] = json.dumps(conversion_specs)\n return self.make_request(path, 'POST', args, batch=batch)", "def test_create_account_campaign(self, create):\n \"\"\"Campaigns should be created\"\"\"\n row = {'PROJ_NAME1': 'Argentina Fund', 'PROJ_NO': '789-CFD',\n 'SUMMARY': 'Some Sum'}\n sync.create_account(row, None)\n self.assertTrue(create.called)\n account, row, name, acc_type = create.call_args[0]\n self.assertEqual(account.name, 'Argentina Fund')\n self.assertEqual(account.code, '789-CFD')\n self.assertEqual(account.category, Account.COUNTRY)\n self.assertEqual(0, len(Account.objects.filter(pk=account.pk)))", "def create_ad_group(client, customer_id, campaign_resource_name):\n ad_group_service = client.get_service(\"AdGroupService\")\n\n # Creates the ad group.\n # Note that the ad group type must not be set.\n # Since the advertising_channel_sub_type is APP_CAMPAIGN,\n # 1- you cannot override bid settings at the ad group level.\n # 2- you cannot add ad group criteria.\n ad_group_operation = client.get_type(\"AdGroupOperation\")\n ad_group = ad_group_operation.create\n ad_group.name = f\"Earth to Mars cruises {uuid4()}\"\n ad_group.status = client.enums.AdGroupStatusEnum.ENABLED\n ad_group.campaign = campaign_resource_name\n\n ad_group_response = ad_group_service.mutate_ad_groups(\n customer_id=customer_id, operations=[ad_group_operation]\n )\n\n ad_group_resource_name = ad_group_response.results[0].resource_name\n print(f'Ad Group created with resource name: \"{ad_group_resource_name}\".')\n return ad_group_resource_name", "def test_create_campaign(self):\n acc1 = Account.objects.create(name='acc1', code='111-111')\n row = {'PROJ_NAME1': 'China Fund', 'PROJ_NO': 'CFD-111',\n 'LOCATION': 'CHINA', 'SUMMARY': 'Ssssss'}\n sync.create_campaign(acc1, row, 'China Fund', Account.COUNTRY)\n campaign = Campaign.objects.filter(name='China Fund').first()\n self.assertEqual(self.china.pk, campaign.country.pk)\n\n acc2 = Account.objects.create(name='acc2', code='222-222')\n row = {'PROJ_NAME1': 'Smith Memorial Fund', 'PROJ_NO': 'SPF-222',\n 'SUMMARY': 'Ssssss'}\n sync.create_campaign(acc2, row, 'Smith Memorial Fund',\n Account.MEMORIAL)\n campaign = Campaign.objects.filter(name='Smith Memorial Fund').first()\n self.assertEqual(None, campaign.country)\n self.assertEqual(\n {\"data\": [{\"type\": \"text\", \"data\": {\"text\": \"Ssssss\"}}]},\n json.loads(campaign.description))\n acc1.delete()\n acc2.delete()", "def get_campaign_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get campaign data for account {}'.format(ad_account['account_id']))\n campaigns = ad_account.get_campaigns(\n fields=['id',\n 'name',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for campaign in campaigns:\n result[campaign['id']] = {'name': campaign['name'],\n 'attributes': parse_labels(\n campaign.get('adlabels', []))}\n return result", "def create_adcampaign_group(self, account_id, name, campaign_group_status,\n objective=None, batch=False):\n path = 'act_%s/adcampaign_groups' % account_id\n args = {\n 'name': name,\n 'campaign_group_status': campaign_group_status,\n }\n if objective is not None:\n args['objective'] = objective\n return self.make_request(path, 'POST', args, batch=batch)", "def add_campaign(self, campaign):\n self._campaigns += [campaign]", "def get_adcampaign_detail(self, account_id, campaign_id, date_preset):\n campaign_fields = [\n 'name', 'campaign_status', 'daily_budget', 'lifetime_budget',\n 'start_time', 'end_time']\n campaign_data_columns = [\n 'campaign_name', 'reach', 'frequency', 'clicks',\n 'actions', 'total_actions', 'ctr', 'spend']\n adgroup_data_columns = [\n 'campaign_id', 'campaign_name', 'adgroup_id', 'adgroup_name',\n 'reach', 'frequency', 'clicks', 'ctr', 'actions', 'cpm', 'cpc',\n 'spend']\n demographic_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'age', 'gender']\n placement_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'placement']\n campaign_filters = [{\n 'field': 'campaign_id', 'type': 'in', 'value': [campaign_id]}]\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaign(campaign_id, campaign_fields, batch=True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', campaign_data_columns,\n campaign_filters, ['action_type'], True),\n self.get_adreport_stats(\n account_id, date_preset, 1, campaign_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', adgroup_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', demographic_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', placement_data_columns,\n campaign_filters, None, True),\n ]\n return self.make_batch_request(batch)", "def main(client, customer_id):\n # Creates the budget for the campaign.\n budget_resource_name = create_budget(client, customer_id)\n\n # Creates the campaign.\n campaign_resource_name = create_campaign(\n client, customer_id, budget_resource_name\n )\n\n # Sets campaign targeting.\n set_campaign_targeting_criteria(client, customer_id, campaign_resource_name)\n\n # Creates an Ad Group.\n ad_group_resource_name = create_ad_group(\n client, customer_id, campaign_resource_name\n )\n\n # Creates an App Ad.\n create_app_ad(client, customer_id, ad_group_resource_name)", "def get_spend_by_campaign_custom(self, budget_id, aw_account_id):\n try:\n budget = Budget.objects.get(id=budget_id)\n google_ads_account = DependentAccount.objects.get(id=aw_account_id)\n except (Budget.DoesNotExist, DependentAccount.DoesNotExist):\n return\n\n client = get_client()\n client.client_customer_id = google_ads_account.dependent_account_id\n\n aw_campaigns = budget.aw_campaigns.filter(account=google_ads_account)\n aw_campaign_ids = list(set([aw_campaign.campaign_id for aw_campaign in aw_campaigns]))\n\n report_downloader = client.GetReportDownloader(version=settings.API_VERSION)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n {\n 'field': 'CampaignId',\n 'operator': 'IN',\n 'values': aw_campaign_ids\n }\n ]\n }\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n start_date = budget.start_date\n end_date = budget.end_date\n\n campaign_report_selector['dateRange'] = {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': end_date.strftime('%Y%m%d')\n }\n\n campaign_report = Reporting.parse_report_csv_new(report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_report:\n print(campaign_row)\n campaign_id = campaign_row['campaign_id']\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=google_ads_account)\n campaign.campaign_name = campaign_row['campaign']\n campaign.save()\n campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n start_date=start_date,\n end_date=end_date)\n\n campaign_spend_object.spend = int(campaign_row['cost']) / 1000000\n campaign_spend_object.save()\n\n yest_campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n {\n 'field': 'CampaignId',\n 'operator': 'IN',\n 'values': aw_campaign_ids\n }\n ]\n }\n\n yest_campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': yest_campaign_report_selector\n }\n\n start_date = budget.start_date\n yest_end_date = datetime.datetime.now() - datetime.timedelta(1)\n\n yest_campaign_report_selector['dateRange'] = {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': yest_end_date.strftime('%Y%m%d')\n }\n\n campaign_report = Reporting.parse_report_csv_new(\n report_downloader.DownloadReportAsString(yest_campaign_report_query))\n for campaign_row in campaign_report:\n campaign_id = campaign_row['campaign_id']\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=google_ads_account)\n campaign.campaign_name = campaign_row['campaign']\n campaign.save()\n campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n start_date=start_date,\n end_date=end_date)\n\n campaign_spend_object.spend_until_yesterday = int(campaign_row['cost']) / 1000000\n campaign_spend_object.save()\n\n # try:\n # campaign_report = \\\n # Reporting.parse_report_csv_new(report_downloader.DownloadReportAsString(yest_campaign_report_query))[0]\n # except IndexError:\n # return\n #\n # campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n # start_date=budget.start_date,\n # end_date=budget.end_date)\n #\n # campaign_spend_object.spend_until_yesterday = int(campaign_report['cost']) / 1000000\n # campaign_spend_object.save()\n\n return 'get_spend_by_campaign_custom'", "def test_create_campaign(self):\n campaign = self.campaign\n\n self.assertTrue(isinstance(campaign, Campaign))\n self.assertEqual(campaign.name, \"Test Campaign\")", "def get_adcampaigns(self, account_id, fields=None, batch=False):\n return self.get_adcampaigns_of_account(account_id, fields, batch=batch)", "def rpc_campaign_new(self, name):\n\t\tsession = db_manager.Session()\n\t\tcampaign = db_models.Campaign(name=name, user_id=self.basic_auth_user)\n\t\tsession.add(campaign)\n\t\tsession.commit()\n\t\treturn campaign.id", "def write_campaign(campaign_data):\n\n campaign = Campaign(**campaign_data)\n campaign.save()\n authorization.make_campaign_public(campaign)\n\n return campaign.id", "def create_advert():\r\n advertiser, category, zone = create_objects()\r\n ad = AdBase.objects.create(\r\n title='Ad Title',\r\n url='www.example.com',\r\n advertiser=advertiser,\r\n category=category,\r\n zone=zone,\r\n )\r\n return ad", "def get_adcampaigns_of_account(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaigns' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)", "def create_adcreative(self, account_id, name=None, object_story_id=None, object_story_spec=None, batch=False):\n path = 'act_%s/adcreatives' % account_id\n args = {}\n if name:\n args['name'] = name\n if object_story_id:\n args['object_story_id'] = object_story_id\n if object_story_spec:\n args['object_story_spec'] = json.dumps(object_story_spec)\n\n return self.make_request(path, 'POST', args, batch=batch)", "def _extend_record(self, campaign, fields, pull_ads):\n campaign_out = campaign.api_get(fields=fields).export_all_data()\n if pull_ads:\n campaign_out[\"ads\"] = {\"data\": []}\n ids = [ad[\"id\"] for ad in campaign.get_ads()]\n for ad_id in ids:\n campaign_out[\"ads\"][\"data\"].append({\"id\": ad_id})\n return campaign_out", "def test_create_new_campaign_by_admin_passes(self):\n response = self.client.post(\n self.endpoint_url,\n json={\n \"logo\": None,\n \"name\": NEW_CAMPAIGN_NAME,\n \"organisations\": [self.test_org.id],\n \"url\": None,\n },\n headers={\"Authorization\": self.session_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response_body, {\"campaignId\": 2})", "def _set_campaign(self, campaign):\n if isinstance(campaign, str):\n campaign = TrackedCampaign.objects.create(name=campaign)\n\n campaign.save()\n\n self.campaign = campaign", "def get_adcampaign_list(self, account_id):\n fields = 'id, name, campaign_status, start_time, end_time, ' \\\n 'daily_budget, lifetime_budget, budget_remaining'\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaigns(account_id, fields, batch=True),\n self.get_stats_by_adcampaign(account_id, batch=True),\n ]\n return self.make_batch_request(batch)", "def get_adcampaigns_of_campaign_group(self, campaign_group_id, fields,\n batch=False):\n path = '%s/adcampaigns' % campaign_group_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def create(self, client_id, subject, name, from_name, from_email, reply_to, html_url,\n text_url, list_ids, segment_ids):\n body = {\n \"Subject\": subject,\n \"Name\": name,\n \"FromName\": from_name,\n \"FromEmail\": from_email,\n \"ReplyTo\": reply_to,\n \"HtmlUrl\": html_url,\n \"TextUrl\": text_url,\n \"ListIDs\": list_ids,\n \"SegmentIDs\": segment_ids}\n response = self._post(\"/campaigns/%s.json\" %\n client_id, json.dumps(body))\n self.campaign_id = json_to_py(response)\n return self.campaign_id", "def testCopyCampaigns(self):\n if self.__class__.campaign1 is None:\n self.testSaveCampaign()\n requests = [{\n 'campaignId': self.__class__.campaign1['id']\n }]\n self.assert_(isinstance(self.__class__.service.CopyCampaigns(requests),\n tuple))", "def test_get_existent_campaigns_returns_campaigns_list(self):\n test_campaign = return_canned_campaign()\n test_campaign.create()\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response_body, {\"campaigns\": [{\"id\": 1, \"name\": \"Test Campaign\"}]}\n )" ]
[ "0.64609", "0.6443122", "0.6287087", "0.62106", "0.59949636", "0.597882", "0.59783816", "0.5938531", "0.5885494", "0.5864603", "0.5810597", "0.5745423", "0.57439005", "0.57156086", "0.57071507", "0.56509084", "0.56193465", "0.5615964", "0.5598836", "0.5580792", "0.55519617", "0.5542986", "0.5522703", "0.5458526", "0.5449792", "0.54300845", "0.54266083", "0.53825694", "0.538179", "0.5334163" ]
0.73807245
0
Updates the given adset.
def update_adset(self, campaign_id, name=None, campaign_status=None, daily_budget=None, lifetime_budget=None, start_time=None, end_time=None, bid_type=None, bid_info=None, promoted_object=None, targeting=None, batch=False): path = '%s' % campaign_id args = {} if name: args['name'] = name if campaign_status: args['campaign_status'] = campaign_status if daily_budget: args['daily_budget'] = daily_budget if lifetime_budget: args['lifetime_budget'] = lifetime_budget if start_time: args['start_time'] = start_time if end_time is not None: args['end_time'] = end_time if bid_type: args['bid_type'] = bid_type if bid_info: args['bid_info'] = bid_info if promoted_object: args['promoted_object'] = json.dumps(promoted_object) if targeting: args['targeting'] = json.dumps(targeting) return self.make_request(path, 'POST', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, sentenceset):\n self._sentences.update(sentenceset.get_sentences())", "def update(self, set, where):\n # Build set\n set_sql = ''\n set_values = []\n for key, value in set.items():\n if key == '*': # Literal expression\n set_sql = value\n else:\n set_sql += ', {} = ?'.format(key)\n set_values.append(value)\n if len(set_values) > 0:\n set_sql = set_sql[2:]\n # Build where\n where_sql, where_values, limit_sql = self._build_where(where)\n # Update\n self.execute(\"update {} set {} {} {}\".format(self.name, set_sql, where_sql, limit_sql),\n set_values + where_values)", "def update(self, contextset):\n self._contexts.update(contextset.get_contexts())", "def update(self, list_of_sets):\n for s in list_of_sets:\n self.add(s)", "def update(self, dict=None, **kwargs):\n data = {}\n if dict:\n data.update(dict, **kwargs)\n else:\n data.update(**kwargs)\n self.multi_set(data)", "def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):\n raise NotImplementedError", "def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):\n raise NotImplementedError", "def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):\n raise NotImplementedError", "def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):\n raise NotImplementedError", "def update(cls, *lst, **dct):\n cls.runtime.set_set(lst, dct)\n return UpdateQuery(cls.runtime)", "def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):\n raise NotImplementedError()", "def update(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> None:\n # convert to RangeSet\n rng_set = RangeSet._to_rangeset(rng_set)\n # merge lists\n self._ranges = RangeSet._merge_ranges(self._ranges + rng_set._ranges)", "def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):\n # Update modified\n modified_dt = iso8601.parse_date(changeset[\"modified\"]).replace(tzinfo=None)\n self.modified = modified_dt\n\n # Update text\n self.text = changeset[\"text\"]\n\n for planet_assoc in changeset[\"planet_assocs\"]:\n if not PlanetAssociation.validate_changeset(planet_assoc):\n app.logger.warning(\"Invalid changeset for planet associated with {}\\n{}\".format(self, changeset))\n else:\n author = Persona.request_persona(planet_assoc[\"author_id\"])\n pid = planet_assoc[\"planet\"][\"id\"]\n\n assoc = PlanetAssociation.filter_by(star_id=self.id).filter_by(planet_id=pid).first()\n if assoc is None:\n planet = Planet.query.get(pid)\n if planet is None:\n planet = Planet.create_from_changeset(planet_assoc[\"planet\"])\n else:\n planet.update_from_changeset(planet_assoc[\"planet\"])\n\n assoc = PlanetAssociation(author=author, planet=planet)\n self.planet_assocs.append(assoc)\n app.logger.info(\"Added {} to {}\".format(planet, self))\n\n app.logger.info(\"Updated {} from changeset\".format(self))", "def _update_from_feature_set(self, feature_set):\n\n self.name = feature_set.name\n self.project = feature_set.project\n self.source = feature_set.source\n self.max_age = feature_set.max_age\n self.features = feature_set.features\n self.entities = feature_set.entities\n self.source = feature_set.source\n self.status = feature_set.status\n self.created_timestamp = feature_set.created_timestamp", "def bulk_add_to_set(self, set_ids, element_ids):\n if len(set_ids) != len(element_ids):\n raise ValueError\n setpairs = zip(set_ids, element_ids)\n setlist = self._aggregate_set_id_element_pairs(setpairs)\n with self.table.batch_write() as batch:\n for pair in setlist:\n set_id, element_ids = pair\n item = self._get_or_create_item('set', set_id)\n if 'value' not in item.keys() or not isinstance(\n item['value'], set):\n item['value'] = set()\n item['value'].update(element_ids)\n batch.put_item(item)", "def update_db(table, set, wherecond):\n query = \"UPDATE \" + table + \" SET \" + set + \" WHERE \" + wherecond\n print(query)\n cursor.execute(query)\n db.commit()\n print(cursor.rowcount, \"record updated in db: \" + table)", "def intersection_update(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> None:\n self._ranges = self.intersection(rng_set)._ranges", "def update_config_set(self, req, id, body):\n config_set_data = body['config_set']\n try:\n updated_config_set = self.db_api.config_set_update(\n req.context, id, config_set_data)\n\n msg = _LI(\"Updating metadata for config_set %(id)s\") % {'id': id}\n LOG.info(msg)\n if 'config_set' not in updated_config_set:\n config_set_data = dict(config_set=updated_config_set)\n return config_set_data\n except exception.Invalid as e:\n msg = (_(\"Failed to update config_set metadata. \"\n \"Got error: %s\") % utils.exception_to_str(e))\n LOG.error(msg)\n return exc.HTTPBadRequest(msg)\n except exception.NotFound:\n msg = _LI(\"config_set %(id)s not found\") % {'id': id}\n LOG.info(msg)\n raise exc.HTTPNotFound(body='config_set not found',\n request=req,\n content_type='text/plain')\n except exception.ForbiddenPublicImage:\n msg = _LI(\"Update denied for config_set %(id)s\") % {'id': id}\n LOG.info(msg)\n raise exc.HTTPForbidden()\n except exception.Forbidden:\n # If it's private and doesn't belong to them, don't let on\n # that it exists\n msg = _LI(\"Access denied to config_set %(id)s but returning\"\n \" 'not found'\") % {'id': id}\n LOG.info(msg)\n raise exc.HTTPNotFound(body='config_set not found',\n request=req,\n content_type='text/plain')\n except exception.Conflict as e:\n LOG.info(utils.exception_to_str(e))\n raise exc.HTTPConflict(body='config_set operation conflicts',\n request=req,\n content_type='text/plain')\n except Exception:\n LOG.exception(_LE(\"Unable to update config_set %s\") % id)\n raise", "def update(self, dt):\n for obj in self.objects:\n obj.update(dt)", "def update(self, es, **kwargs):\n pass", "def update_one_set_inventory(set_num):\n set_inv = reapi.pull_set_inventory(set_num)", "def update_sets(check_update=1):\n\n set_list = reapi.pull_set_catalog()\n secondary_sets.add_sets_to_database(set_list, update=check_update)", "def updateMappingSet(self,mappingSetId:str=None,mappingSet:dict=None)->dict:\n if mappingSetId is None:\n raise ValueError(\"Require a mappingSet ID\")\n if mappingSet is None:\n raise ValueError(\"Require a dictionary as mappingSet\")\n path = f\"/mappingSets/{mappingSetId}\"\n res = self.connector.putData(self.endpoint+path,data=mappingSet)\n return res", "def update(self, data, on='identity'):\n ds_left = (self._meta, self._data)\n update_meta = self._meta.copy()\n update_items = ['columns@{}'.format(name) for name\n in data.columns.tolist()]\n update_meta['sets']['update'] = {'items': update_items}\n ds_right = (update_meta, data)\n merged_meta, merged_data = _hmerge(\n ds_left, ds_right, on=on, from_set='update', verbose=False)\n self._meta, self._data = merged_meta, merged_data\n del self._meta['sets']['update']\n return None", "def update(self, **kwargs):\n return self._object.update(meta=kwargs)", "def test_set_add_updates_new_record(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects(\n partition=partition, cluster=cluster).update(text_set__add=set(('bar',)))\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n self.assertEqual(obj.text_set, set((\"bar\",)))", "def UpdateSet(self, dataset):\r\n for data in dataset:\r\n self.UpdateOddsRatioVsNoNorm(data)", "def MutateAdGroupAds(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _UpdateDataSetValues( self ):\n pass", "def updateResonanceSetAnnotation(resonanceSet):\n \n for resonance in resonanceSet.resonances:\n if not resonance.isDeleted: \n getBoundResonances(resonance, recalculate=True, contribs=None)\n updateResonanceAnnotation(resonance)" ]
[ "0.63086176", "0.60864925", "0.5909108", "0.5838955", "0.56002444", "0.55611414", "0.55611414", "0.55611414", "0.55611414", "0.5538671", "0.55356807", "0.5447409", "0.54078513", "0.53757405", "0.5352285", "0.53373814", "0.5321228", "0.5267401", "0.5262136", "0.521705", "0.52132714", "0.5186813", "0.5178902", "0.5153018", "0.5151054", "0.5147324", "0.51086354", "0.5099134", "0.507304", "0.5061339" ]
0.65183246
0
Delete the given ad campaign.
def delete_adcampaign(self, campaign_id, batch=False): path = '%s' % campaign_id return self.make_request(path, 'DELETE', batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rpc_campaign_delete(self, campaign_id):\n\t\tsession = db_manager.Session()\n\t\tsession.delete(db_manager.get_row_by_id(session, db_models.Campaign, campaign_id))\n\t\tsession.commit()\n\t\tsession.close()\n\t\treturn", "def delete_campaign(self, campaignId, **kwargs) -> ApiResponse:\n return self._request(fill_query_params(kwargs.pop('path'), campaignId), params=kwargs)", "def delete(self, campaign_id):\n campaign = Campaign.query.get(campaign_id)\n if campaign is None:\n return {\"message\": \"Campaign could not be found.\"}, HTTPStatus.NOT_FOUND\n db.session.delete(campaign)\n db.sessioin.commit()\n return {}, HTTPStatus.NO_CONTENT", "def delete_adcampaign_group(self, campaign_group_id, batch=False):\n path = '%s' % campaign_group_id\n return self.make_request(path, 'DELETE', batch=batch)", "def testDeleteCampaign(self):\n if self.__class__.campaign2 is None:\n self.testSaveCampaign()\n self.assertEqual(self.__class__.service.DeleteCampaign(\n self.__class__.campaign2['id']), None)", "def test_delete_campaign_by_admin_passes(self):\n response = self.client.delete(\n f\"{self.endpoint_url}{self.test_campaign.id}/\",\n headers={\"Authorization\": self.admin_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body[\"Success\"], \"Campaign deleted\")", "def deleteCampaignConfig(docName, url=reqmgr_url):\n outcome = True\n headers = {\"Content-type\": \"application/json\", \"Accept\": \"application/json\",\n \"Content-Length\": 0} # this is required for DELETE calls\n conn = make_x509_conn(url)\n url = '/reqmgr2/data/campaignconfig/%s' % docName\n conn.request(\"DELETE\", url, headers=headers)\n resp = conn.getresponse()\n if resp.status >= 400:\n print(\"FAILED to delete campaign: %s. Response status: %s, response reason: %s\"\n % (docName, resp.status, resp.reason))\n outcome = False\n conn.close()\n return outcome", "def cmd_conversation_delete(client, args):\n delete_conversation = client.delete_conversation(args.conversation_id)\n generate_output({'delete_conversation': delete_conversation})", "def test_sms_campaign_view_delete(self):\n # delete campaign through campaign_change\n request = self.factory.post('/sms_campaign/del/1/', follow=True)\n request.user = self.user\n request.session = {}\n response = sms_campaign_del(request, 1)\n self.assertEqual(response['Location'], '/sms_campaign/')\n self.assertEqual(response.status_code, 302)\n\n request = self.factory.post('/sms_campaign/del/', {'select': '1'})\n request.user = self.user\n request.session = {}\n response = sms_campaign_del(request, 0)\n self.assertEqual(response['Location'], '/sms_campaign/')\n self.assertEqual(response.status_code, 302)", "def do_charge_purchase_delete(cs, args):\n cs.charge_purchases.delete(args.charge_purchase_id)", "def del_apt(cal, c_id, apt_id):\n\n cal.events().delete( # pylint: disable=maybe-no-member\n calendarId=c_id,\n eventId=apt_id\n ).execute()", "def delete(self,\n dns_forwarder_zone_id,\n ):\n return self._invoke('delete',\n {\n 'dns_forwarder_zone_id': dns_forwarder_zone_id,\n })", "def delete_podcast(_id):\r\n Podcast.query.filter_by(id=_id).delete()\r\n # filter podcast by id and delete\r\n db.session.commit() # commiting the new change to our database\r", "def delete_campaign_negative_keyword(self, keywordId, **kwargs) -> ApiResponse:\n return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)", "def delete(self, agent_id):\n self._client.delete('scanners/1/agents/%(agent_id)s', path_params={'agent_id': agent_id})\n return True", "def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n project.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def test_delete_non_existent_campaign_fails(self):\n response = self.client.delete(\n f\"{self.endpoint_url}99/\", headers={\"Authorization\": self.admin_token}\n )\n response_body = response.get_json()\n error_details = response_body[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], CAMPAIGN_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], CAMPAIGN_NOT_FOUND_SUB_CODE)", "def delete(self, customerguid, jobguid=\"\", executionparams=None):", "def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)", "async def delete_contact(dbcon: DBConnection, contact_id: int) -> None:\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n q = \"\"\"delete from contacts where id=%s\"\"\"\n await dbcon.operation(q, (contact_id,))", "def test_delete_campaign_by_unauthenticated_user_fails(self):\n response = self.client.delete(f\"{self.endpoint_url}{self.test_campaign.id}/\")\n self.assertEqual(response.status_code, 401)", "def delete(id_=None):\n\n logger.debug('Catch DELETE request by URL /api/departments/%i.', id_)\n ds.delete(id_)\n return '', 204", "def delete_case(\n case_id: str,\n db: Session = Depends(get_db),\n) -> Any:\n return crud.case.remove(db, id=case_id)", "def delete(id):\n elementFromDB = Advertisements().get_one_element(id)\n if elementFromDB is None:\n return abort(500, \"L'élément n'existe pas.\")\n else:\n try:\n elements = Advertisements().delete_element(id)\n result = jsonify(elements)\n result.statut_code = 200\n return result\n except Exception as identifier:\n return abort(500, identifier)", "def cmd_comment_delete(client, args):\n delete_comment = client.delete_comment(args.comment_id)\n generate_output({'delete_comment': delete_comment})", "def delete(self,\n ipfix_dfw_collector_profile_id,\n ):\n return self._invoke('delete',\n {\n 'ipfix_dfw_collector_profile_id': ipfix_dfw_collector_profile_id,\n })", "def rpc_campaign_alerts_unsubscribe(self, campaign_id):\n\t\tusername = self.basic_auth_user\n\t\tsession = db_manager.Session()\n\t\tquery = session.query(db_models.AlertSubscription)\n\t\tquery = query.filter_by(campaign_id=campaign_id, user_id=username)\n\t\tsubscription = query.first()\n\t\tif subscription:\n\t\t\tsession.delete(subscription)\n\t\t\tsession.commit()\n\t\tsession.close()\n\t\treturn", "def get_adcampaign(self, campaign_id, fields, batch=False):\n path = '%s' % campaign_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def delete_agent(self, agent):\r\n return self.delete(self.agent_path % (agent))", "def del_accomment(request, pk):\n\n comment = get_object_or_404(ActorComment, pk=pk)\n comment.delete()\n actor = comment.actor\n url = '../../' + str(comment.actor.pk)\n return redirect(url)" ]
[ "0.81015474", "0.7873376", "0.7808877", "0.7040319", "0.6679311", "0.664675", "0.6078404", "0.59033364", "0.5760971", "0.57252777", "0.5590775", "0.55322367", "0.55265343", "0.55118585", "0.5483344", "0.54706794", "0.54280573", "0.5372056", "0.5353199", "0.53330624", "0.5311885", "0.526796", "0.52617", "0.52512276", "0.5231458", "0.5221362", "0.5216121", "0.51983124", "0.5184832", "0.5179487" ]
0.8450083
0
Creates an ad creative in the given ad account.
def create_adcreative(self, account_id, name=None, object_story_id=None, object_story_spec=None, batch=False): path = 'act_%s/adcreatives' % account_id args = {} if name: args['name'] = name if object_story_id: args['object_story_id'] = object_story_id if object_story_spec: args['object_story_spec'] = json.dumps(object_story_spec) return self.make_request(path, 'POST', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_advert():\r\n advertiser, category, zone = create_objects()\r\n ad = AdBase.objects.create(\r\n title='Ad Title',\r\n url='www.example.com',\r\n advertiser=advertiser,\r\n category=category,\r\n zone=zone,\r\n )\r\n return ad", "def create_adset(self, account_id, campaign_group_id, name,\n campaign_status, daily_budget=None, lifetime_budget=None,\n start_time=None, end_time=None,\n bid_type=None, bid_info=None, promoted_object=None, targeting=None, batch=False):\n if daily_budget is None and lifetime_budget is None:\n raise AdsAPIError(\"Either a lifetime_budget or a daily_budget \\\n must be set when creating a campaign\")\n if lifetime_budget is not None and end_time is None:\n raise AdsAPIError(\"end_time is required when lifetime_budget \\\n is specified\")\n\n path = 'act_%s/adcampaigns' % account_id\n args = {\n 'campaign_group_id': campaign_group_id,\n 'name': name,\n 'campaign_status': campaign_status,\n }\n if daily_budget:\n args['daily_budget'] = daily_budget\n if lifetime_budget:\n args['lifetime_budget'] = lifetime_budget\n if start_time:\n args['start_time'] = start_time\n if end_time:\n args['end_time'] = end_time\n if bid_type:\n args['bid_type'] = bid_type\n if bid_info:\n args['bid_info'] = bid_info\n if promoted_object:\n args['promoted_object'] = json.dumps(promoted_object)\n if targeting:\n args['targeting'] = json.dumps(targeting)\n\n return self.make_request(path, 'POST', args, batch=batch)", "def create_adgroup(self, account_id, name, campaign_id,\n creative_id, bid_type=None, bid_info=None, max_bid=None,\n tracking_specs=None, view_tags=None, objective=None,\n adgroup_status=None, targeting=None, conversion_specs=None, batch=False):\n path = 'act_%s/adgroups' % account_id\n args = {\n 'name': name,\n 'campaign_id': campaign_id,\n 'creative': json.dumps({'creative_id': creative_id}),\n }\n if bid_type:\n args['bid_type'] = bid_type\n if max_bid:\n # can only use max_bid with CPM bidding\n args['max_bid'] = max_bid\n elif bid_info:\n args['bid_info'] = json.dumps(bid_info)\n\n if tracking_specs:\n args['tracking_specs'] = json.dumps(tracking_specs)\n if view_tags:\n args['view_tags'] = json.dumps(view_tags)\n if objective:\n args['objective'] = objective\n if adgroup_status:\n args['adgroup_status'] = adgroup_status\n if targeting:\n args['targeting'] = json.dumps(targeting)\n if conversion_specs:\n args['conversion_specs'] = json.dumps(conversion_specs)\n return self.make_request(path, 'POST', args, batch=batch)", "def create_adimage(self, account_id, image_data, batch=False):\n path = 'act_%s/adimages' % account_id\n files = {image_data.name: image_data}\n return self.make_request(path, 'POST', None, files, batch=batch)", "def create_campaign(client, customer_id, budget_resource_name):\n campaign_service = client.get_service(\"CampaignService\")\n campaign_operation = client.get_type(\"CampaignOperation\")\n campaign = campaign_operation.create\n campaign.name = f\"Interplanetary Cruise App #{uuid4()}\"\n campaign.campaign_budget = budget_resource_name\n # Recommendation: Set the campaign to PAUSED when creating it to\n # prevent the ads from immediately serving. Set to ENABLED once you've\n # added targeting and the ads are ready to serve.\n campaign.status = client.enums.CampaignStatusEnum.PAUSED\n # All App campaigns have an advertising_channel_type of\n # MULTI_CHANNEL to reflect the fact that ads from these campaigns are\n # eligible to appear on multiple channels.\n campaign.advertising_channel_type = (\n client.enums.AdvertisingChannelTypeEnum.MULTI_CHANNEL\n )\n campaign.advertising_channel_sub_type = (\n client.enums.AdvertisingChannelSubTypeEnum.APP_CAMPAIGN\n )\n # Sets the target CPA to $1 / app install.\n #\n # campaign_bidding_strategy is a 'oneof' message so setting target_cpa\n # is mutually exclusive with other bidding strategies such as\n # manual_cpc, commission, maximize_conversions, etc.\n # See https://developers.google.com/google-ads/api/reference/rpc\n # under current version / resources / Campaign\n campaign.target_cpa.target_cpa_micros = 1000000\n # Sets the App Campaign Settings.\n campaign.app_campaign_setting.app_id = \"com.google.android.apps.adwords\"\n campaign.app_campaign_setting.app_store = (\n client.enums.AppCampaignAppStoreEnum.GOOGLE_APP_STORE\n )\n # Optimize this campaign for getting new users for your app.\n campaign.app_campaign_setting.bidding_strategy_goal_type = (\n client.enums.AppCampaignBiddingStrategyGoalTypeEnum.OPTIMIZE_INSTALLS_TARGET_INSTALL_COST\n )\n # Optional fields\n campaign.start_date = (datetime.now() + timedelta(1)).strftime(\"%Y%m%d\")\n campaign.end_date = (datetime.now() + timedelta(365)).strftime(\"%Y%m%d\")\n # Optional: If you select the\n # OPTIMIZE_IN_APP_CONVERSIONS_TARGET_INSTALL_COST goal type, then also\n # specify your in-app conversion types so the Google Ads API can focus\n # your campaign on people who are most likely to complete the\n # corresponding in-app actions.\n #\n # campaign.selective_optimization.conversion_actions.extend(\n # [\"INSERT_CONVERSION_ACTION_RESOURCE_NAME_HERE\"]\n # )\n\n # Submits the campaign operation and print the results.\n campaign_response = campaign_service.mutate_campaigns(\n customer_id=customer_id, operations=[campaign_operation]\n )\n resource_name = campaign_response.results[0].resource_name\n print(f'Created App campaign with resource name: \"{resource_name}\".')\n return resource_name", "def create_drip_campaign(self, name, list_id, description=None):\n new_drip_campaign = DripCampaign(\n user_id=self.user_id,\n name=name,\n list_id=list_id,\n description=description,\n active=False,\n )\n new_drip_campaign.save()\n return new_drip_campaign.id", "def create_campaign(account, row, name, acc_type):\n country = None\n if acc_type == Account.COUNTRY:\n country_name = row['LOCATION']\n country = Country.objects.filter(name__iexact=country_name).first()\n if not country:\n logging.getLogger('peacecorps.sync_accounting').warning(\n \"%s: Country does not exist: %s\",\n row['PROJ_NO'], row['LOCATION'])\n return\n\n account.save()\n summary = clean_description(row['SUMMARY'])\n campaign = Campaign.objects.create(\n name=name, account=account, campaigntype=acc_type,\n description=json.dumps({\"data\": [{\"type\": \"text\",\n \"data\": {\"text\": summary}}]}),\n country=country)\n if acc_type == Account.SECTOR:\n # Make sure we remember the sector this is marked as\n SectorMapping.objects.create(pk=row['SECTOR'], campaign=campaign)", "def create_creative_config(name, advertiser_id):\n\n snippet_file_path = os.path.join(os.path.dirname(__file__),\n 'creative_snippet.html')\n with open(snippet_file_path, 'r') as snippet_file:\n snippet = snippet_file.read()\n\n # https://developers.google.com/doubleclick-publishers/docs/reference/v201802/CreativeService.Creative\n config = {\n 'xsi_type': 'ThirdPartyCreative',\n 'name': name,\n 'advertiserId': advertiser_id,\n 'size': {\n 'width': '1',\n 'height': '1'\n },\n 'snippet': snippet,\n # https://github.com/prebid/Prebid.js/issues/418\n 'isSafeFrameCompatible': False,\n }\n\n return config", "def create_app_ad(client, customer_id, ad_group_resource_name):\n # Creates the ad group ad.\n ad_group_ad_service = client.get_service(\"AdGroupAdService\")\n ad_group_ad_operation = client.get_type(\"AdGroupAdOperation\")\n ad_group_ad = ad_group_ad_operation.create\n ad_group_ad.status = client.enums.AdGroupAdStatusEnum.ENABLED\n ad_group_ad.ad_group = ad_group_resource_name\n # ad_data is a 'oneof' message so setting app_ad\n # is mutually exclusive with ad data fields such as\n # text_ad, gmail_ad, etc.\n ad_group_ad.ad.app_ad.headlines.extend(\n [\n create_ad_text_asset(client, \"A cool puzzle game\"),\n create_ad_text_asset(client, \"Remove connected blocks\"),\n ]\n )\n ad_group_ad.ad.app_ad.descriptions.extend(\n [\n create_ad_text_asset(client, \"3 difficulty levels\"),\n create_ad_text_asset(client, \"4 colorful fun skins\"),\n ]\n )\n # Optional: You can set up to 20 image assets for your campaign.\n # ad_group_ad.ad.app_ad.images.extend(\n # [INSERT_AD_IMAGE_RESOURCE_NAME(s)_HERE])\n\n ad_group_ad_response = ad_group_ad_service.mutate_ad_group_ads(\n customer_id=customer_id, operations=[ad_group_ad_operation]\n )\n ad_group_ad_resource_name = ad_group_ad_response.results[0].resource_name\n print(\n \"Ad Group App Ad created with resource name:\"\n f'\"{ad_group_ad_resource_name}\".'\n )", "def create_microsoft_ad(Name=None, ShortName=None, Password=None, Description=None, VpcSettings=None):\n pass", "def create(self, account):\n model = models.load('Account', account)\n\n return self.client.create_account(model=model)", "def create_custom_audience_from_website(\n self, account_id, name, domain, description=None,\n retention_days=30, prefill=True, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n 'subtype': \"WEBSITE\"\n }\n rule = {'url': {\n 'i_contains': domain,\n }}\n if rule:\n args['rule'] = json.dumps(rule)\n if retention_days:\n args['retention_days'] = retention_days\n if prefill:\n args['prefill'] = prefill\n return self.make_request(path, 'POST', args, batch=batch)", "def create_asset(ocean, publisher):\n sample_ddo_path = get_resource_path(\"ddo\", \"ddo_sa_sample.json\")\n assert sample_ddo_path.exists(), \"{} does not exist!\".format(sample_ddo_path)\n\n asset = DDO(json_filename=sample_ddo_path)\n asset.metadata[\"main\"][\"files\"][0][\"checksum\"] = str(uuid.uuid4())\n my_secret_store = \"http://myownsecretstore.com\"\n auth_service = ServiceDescriptor.authorization_service_descriptor(my_secret_store)\n return ocean.assets.create(asset.metadata, publisher, [auth_service])", "def main(client, customer_id):\n # Creates the budget for the campaign.\n budget_resource_name = create_budget(client, customer_id)\n\n # Creates the campaign.\n campaign_resource_name = create_campaign(\n client, customer_id, budget_resource_name\n )\n\n # Sets campaign targeting.\n set_campaign_targeting_criteria(client, customer_id, campaign_resource_name)\n\n # Creates an Ad Group.\n ad_group_resource_name = create_ad_group(\n client, customer_id, campaign_resource_name\n )\n\n # Creates an App Ad.\n create_app_ad(client, customer_id, ad_group_resource_name)", "def create_account():\n if not request.json or not 'name' in request.json:\n abort(400)\n account = {\n 'id': accounts[-1]['id'] + 1, #last id + 1\n 'name': request.json['name'],\n 'surname': request.json['surname'],\n 'product': request.json.get('product', \"\"),\n 'balance': request.json.get('balance', 0.00)\n }\n\n accounts.append(account)\n\n return json.dumps({'New Account': account}, ensure_ascii=False), 201, {'Content-Type': 'text/css; charset=utf-8'}", "def create_ad_group(client, customer_id, campaign_resource_name):\n ad_group_service = client.get_service(\"AdGroupService\")\n\n # Creates the ad group.\n # Note that the ad group type must not be set.\n # Since the advertising_channel_sub_type is APP_CAMPAIGN,\n # 1- you cannot override bid settings at the ad group level.\n # 2- you cannot add ad group criteria.\n ad_group_operation = client.get_type(\"AdGroupOperation\")\n ad_group = ad_group_operation.create\n ad_group.name = f\"Earth to Mars cruises {uuid4()}\"\n ad_group.status = client.enums.AdGroupStatusEnum.ENABLED\n ad_group.campaign = campaign_resource_name\n\n ad_group_response = ad_group_service.mutate_ad_groups(\n customer_id=customer_id, operations=[ad_group_operation]\n )\n\n ad_group_resource_name = ad_group_response.results[0].resource_name\n print(f'Ad Group created with resource name: \"{ad_group_resource_name}\".')\n return ad_group_resource_name", "def create_acct_packet(self, **args):\n return host.Host.create_acct_packet(self, secret=self.secret, **args)", "def GenerateAssetForCreateRequest(args):\n module = dataplex_api.GetMessageModule()\n resource_spec_field = module.GoogleCloudDataplexV1AssetResourceSpec\n resource_spec = module.GoogleCloudDataplexV1AssetResourceSpec(\n name=args.resource_name,\n type=resource_spec_field.TypeValueValuesEnum(args.resource_type),\n )\n request = module.GoogleCloudDataplexV1Asset(\n description=args.description,\n displayName=args.display_name,\n labels=dataplex_api.CreateLabels(module.GoogleCloudDataplexV1Asset, args),\n resourceSpec=resource_spec,\n )\n discovery = GenerateDiscoverySpec(args)\n if discovery != module.GoogleCloudDataplexV1AssetDiscoverySpec():\n setattr(request, 'discoverySpec', discovery)\n return request", "def create_adcampaign_group(self, account_id, name, campaign_group_status,\n objective=None, batch=False):\n path = 'act_%s/adcampaign_groups' % account_id\n args = {\n 'name': name,\n 'campaign_group_status': campaign_group_status,\n }\n if objective is not None:\n args['objective'] = objective\n return self.make_request(path, 'POST', args, batch=batch)", "def GenerateAssetForCreateRequestAlpha(args):\n module = dataplex_api.GetMessageModule()\n resource_spec_field = module.GoogleCloudDataplexV1AssetResourceSpec\n resource_spec = module.GoogleCloudDataplexV1AssetResourceSpec(\n name=args.resource_name,\n type=resource_spec_field.TypeValueValuesEnum(args.resource_type),\n )\n if args.IsSpecified('resource_read_access_mode'):\n resource_spec.readAccessMode = (\n resource_spec_field.ReadAccessModeValueValuesEnum(\n args.resource_read_access_mode\n )\n )\n request = module.GoogleCloudDataplexV1Asset(\n description=args.description,\n displayName=args.display_name,\n labels=dataplex_api.CreateLabels(module.GoogleCloudDataplexV1Asset, args),\n resourceSpec=resource_spec)\n discovery = GenerateDiscoverySpec(args)\n if discovery != module.GoogleCloudDataplexV1AssetDiscoverySpec():\n setattr(request, 'discoverySpec', discovery)\n return request", "def rpc_campaign_new(self, name):\n\t\tsession = db_manager.Session()\n\t\tcampaign = db_models.Campaign(name=name, user_id=self.basic_auth_user)\n\t\tsession.add(campaign)\n\t\tsession.commit()\n\t\treturn campaign.id", "def get_adcreative(self, creative_id, fields, batch=False):\n path = '%s' % creative_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def create_creatives(creative_configs):\n advertiser_id = creative_configs[0]['advertiserId']\n dfp_client = get_client()\n creative_service = dfp_client.GetService('CreativeService',\n version='v201802')\n\n # All existing creatives\n existing_creatives = dfp.get_creatives.get_creatives_by_advertiser_id(advertiser_id)\n existing_creative_names = map(lambda c: c['name'], existing_creatives)\n\n # Return IDs of existing/created creatives\n creative_ids = []\n for creative_config in creative_configs:\n if creative_config['name'] in existing_creative_names:\n creative_ids.append(find(lambda c: c.name == creative_config['name'], existing_creatives)['id'])\n creative_configs.remove(creative_config)\n\n # Submit request to create the outstanding configs that don't already exist\n creatives = creative_service.createCreatives(creative_configs)\n\n for creative in creatives:\n creative_ids.append(creative['id'])\n logger.info(u'Created creative with ID \"{0}\" and name \"{1}\".'.format(\n creative['id'], creative['name']))\n return creative_ids", "def create_campaign(self, name, group):\n payload = self._build_params(name=name, group_uuid=group)\n return Campaign.deserialize(self._post('campaigns', None, payload))", "def create_account():\n user_id = get_jwt_identity()\n user = User.filter(id=user_id)[0]\n data = json.loads(request.data)\n\n if 'title' not in data:\n return jsonify_response({\"errors\": \"`title` field is required.\"}, 400)\n\n held_accounts = user.get_held_accounts(user.id)\n if held_accounts:\n user_accounts = \",\".join(f\"'{i}'\" for i in held_accounts)\n user_account_names_q = \\\n f\"g.V().hasLabel('{Account.LABEL}')\" + \\\n f\".has('id', within({user_accounts}))\" + \\\n f\".values('title')\"\n user_account_names = client.submit(user_account_names_q).all().result()\n\n if data[\"title\"] in user_account_names:\n return jsonify_response(\n {\"errors\": \"Users with the title already exist\"}, 400)\n\n account = Account.create(title=data[\"title\"])\n edge = UserHoldsAccount.create(user=user.id, account=account.id,\n relationType=\"secondary\")\n\n response = {\n \"title\": account.title\n }\n return jsonify_response(response, 201)", "def create_custom_audience_pixel(self, account_id, batch=False):\n path = \"act_%s/adspixels\" % account_id\n return self.make_request(path, 'POST', batch=batch)", "def create_budget(client, customer_id):\n # Retrieves the campaign budget service.\n campaign_budget_service = client.get_service(\"CampaignBudgetService\")\n # Retrieves a new campaign budget operation object.\n campaign_budget_operation = client.get_type(\"CampaignBudgetOperation\")\n # Creates a campaign budget.\n campaign_budget = campaign_budget_operation.create\n campaign_budget.name = f\"Interplanetary Cruise #{uuid4()}\"\n campaign_budget.amount_micros = 50000000\n campaign_budget.delivery_method = (\n client.enums.BudgetDeliveryMethodEnum.STANDARD\n )\n # An App campaign cannot use a shared campaign budget.\n # explicitly_shared must be set to false.\n campaign_budget.explicitly_shared = False\n\n # Submits the campaign budget operation to add the campaign budget.\n response = campaign_budget_service.mutate_campaign_budgets(\n customer_id=customer_id, operations=[campaign_budget_operation]\n )\n resource_name = response.results[0].resource_name\n print(f'Created campaign budget with resource_name: \"{resource_name}\"')\n return resource_name", "def create_account():\n account = w3.eth.account.create()\n return account", "def api_asset_add(char_code: str, name: str, capital: str, interest: str):\n capital, interest = float(capital), float(interest)\n asset = Asset(char_code=char_code, name=name, capital=capital, interest=interest)\n\n if app.bank.contains(asset):\n return f\"Asset '{name}' already exists\", 403\n\n app.bank.add(asset)\n return f\"Asset '{name}' was successfully added\", 200", "def random_category_ad(context, ad_zone, ad_category):\r\n to_return = {}\r\n\r\n # Retrieve a random ad for the category and zone\r\n ad = AdBase.objects.get_random_ad(ad_zone, ad_category)\r\n to_return['ad'] = ad\r\n\r\n # Record a impression for the ad\r\n if context.has_key('from_ip') and ad:\r\n from_ip = context.get('from_ip')\r\n try:\r\n impression = AdImpression(\r\n ad=ad, impression_date=datetime.now(), source_ip=from_ip)\r\n impression.save()\r\n except:\r\n pass\r\n return to_return" ]
[ "0.69750094", "0.633123", "0.605164", "0.59996223", "0.59628886", "0.5786098", "0.57572544", "0.57314056", "0.57057285", "0.55416316", "0.54866284", "0.54634756", "0.546215", "0.53990257", "0.53784597", "0.5348084", "0.5325721", "0.5310224", "0.52901614", "0.52826846", "0.5270877", "0.52704114", "0.5234946", "0.5225106", "0.5203457", "0.519596", "0.5190507", "0.51870155", "0.5172898", "0.5159346" ]
0.79304534
0
Create a custom audience for the given account.
def create_custom_audience(self, account_id, name, subtype=None, description=None, rule=None, opt_out_link=None, retention_days=30, batch=False): path = "act_%s/customaudiences" % account_id args = { 'name': name, } if subtype: args['subtype'] = subtype if description: args['description'] = description if rule: args['rule'] = json.dumps(rule) if opt_out_link: args['opt_out_link'] = opt_out_link if retention_days: args['retention_days'] = retention_days return self.make_request(path, 'POST', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_custom_audience_from_website(\n self, account_id, name, domain, description=None,\n retention_days=30, prefill=True, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n 'subtype': \"WEBSITE\"\n }\n rule = {'url': {\n 'i_contains': domain,\n }}\n if rule:\n args['rule'] = json.dumps(rule)\n if retention_days:\n args['retention_days'] = retention_days\n if prefill:\n args['prefill'] = prefill\n return self.make_request(path, 'POST', args, batch=batch)", "def create_lookalike_audience(self, account_id, name, audience_id,\n lookalike_spec, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n 'origin_audience_id': audience_id,\n 'lookalike_spec': json.dumps(lookalike_spec),\n }\n return self.make_request(path, 'POST', args, batch)", "def create_account():\n account = w3.eth.account.create()\n return account", "def create(self, account):\n model = models.load('Account', account)\n\n return self.client.create_account(model=model)", "def create(cls, body: CloudAccount):\n\t\tpass", "def get_custom_audiences(self, account_id, fields=None, batch=False):\n path = 'act_%s/customaudiences' % account_id\n args = { 'limit': self.DATA_LIMIT }\n if fields: args['fields'] = fields\n return self.make_request(path, 'GET', args, batch=batch)", "def newaccount(accountname, account, owner, active, memo, posting, create_claimed_account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n if owner is None or active is None or memo is None or posting is None:\n password = click.prompt(\"Keys were not given - Passphrase is used to create keys\\n New Account Passphrase\", confirmation_prompt=True, hide_input=True)\n if not password:\n print(\"You cannot chose an empty password\")\n return\n if create_claimed_account:\n tx = mph.create_claimed_account(accountname, creator=acc, password=password)\n else:\n tx = mph.create_account(accountname, creator=acc, password=password)\n else:\n if create_claimed_account:\n tx = mph.create_claimed_account(accountname, creator=acc, owner_key=owner, active_key=active, memo_key=memo, posting_key=posting)\n else:\n tx = mph.create_account(accountname, creator=acc, owner_key=owner, active_key=active, memo_key=memo, posting_key=posting) \n tx = json.dumps(tx, indent=4)\n print(tx)", "def create_custom_audience_pixel(self, account_id, batch=False):\n path = \"act_%s/adspixels\" % account_id\n return self.make_request(path, 'POST', batch=batch)", "def create_account_alias(self, alias):\r\n params = {'AccountAlias': alias}\r\n return self.get_response('CreateAccountAlias', params)", "def create_account():\n user_id = get_jwt_identity()\n user = User.filter(id=user_id)[0]\n data = json.loads(request.data)\n\n if 'title' not in data:\n return jsonify_response({\"errors\": \"`title` field is required.\"}, 400)\n\n held_accounts = user.get_held_accounts(user.id)\n if held_accounts:\n user_accounts = \",\".join(f\"'{i}'\" for i in held_accounts)\n user_account_names_q = \\\n f\"g.V().hasLabel('{Account.LABEL}')\" + \\\n f\".has('id', within({user_accounts}))\" + \\\n f\".values('title')\"\n user_account_names = client.submit(user_account_names_q).all().result()\n\n if data[\"title\"] in user_account_names:\n return jsonify_response(\n {\"errors\": \"Users with the title already exist\"}, 400)\n\n account = Account.create(title=data[\"title\"])\n edge = UserHoldsAccount.create(user=user.id, account=account.id,\n relationType=\"secondary\")\n\n response = {\n \"title\": account.title\n }\n return jsonify_response(response, 201)", "def consent(self, account_id):\n from pureport_client.commands.accounts.consent import Command\n return Command(self.client, account_id)", "def put_account(self, account):\n \n pass", "def create(self, data):\n url = self.base_url + '/v2/account/create/'\n return self._call_vendasta(url, data)", "def test_create_account_campaign(self, create):\n \"\"\"Campaigns should be created\"\"\"\n row = {'PROJ_NAME1': 'Argentina Fund', 'PROJ_NO': '789-CFD',\n 'SUMMARY': 'Some Sum'}\n sync.create_account(row, None)\n self.assertTrue(create.called)\n account, row, name, acc_type = create.call_args[0]\n self.assertEqual(account.name, 'Argentina Fund')\n self.assertEqual(account.code, '789-CFD')\n self.assertEqual(account.category, Account.COUNTRY)\n self.assertEqual(0, len(Account.objects.filter(pk=account.pk)))", "def create_account(self, short_name, author_name=None, author_url=None,\n replace_token=True):\n response = self._telegraph.method('createAccount', values={\n 'short_name': short_name,\n 'author_name': author_name,\n 'author_url': author_url\n })\n\n if replace_token:\n self._telegraph.access_token = response.get('access_token')\n\n return response", "def set_audience(self, claim=AUDIENCE):\n if api_settings.AUDIENCE is not None:\n self.payload[claim] = api_settings.AUDIENCE", "def setup_call_account(config):\n kwargs = {\n 'insid': get_callaccnt_name(config['shortName'], CALLACCNT_GENERAL),\n 'start_date': config['startDate'],\n 'rate_dict': {'type': 'float',\n 'ref': config['generalCallAccountRateIndex'],\n 'spread': config['generalCallAccountSpread']},\n 'counterparty': config['counterparty'],\n 'prf_name': get_portfolio_name_by_id(\"CALLACCNT\", config['shortName']),\n 'account_name': config['shortName'] + '_Margin',\n 'reinvest': True,\n 'funding_instype': 'Call Prime Brokerage Funding',\n }\n return _setup_account_general(**kwargs)", "def account(self, account_code):\r\n return acc.Account(self, account_code)", "def create_account(row, issue_map):\n acc_type = account_type(row)\n name = row['PROJ_NAME1']\n if Account.objects.filter(name=name).first():\n name = name + ' (' + row['PROJ_NO'] + ')'\n account = Account(name=name, code=row['PROJ_NO'], category=acc_type)\n if acc_type == Account.PROJECT:\n create_pcpp(account, row, issue_map)\n else:\n create_campaign(account, row, name, acc_type)", "def generate_account_sas(\n account_name, # type: str\n account_key, # type: str\n resource_types, # type: Union[ResourceTypes, str]\n permission, # type: Union[AccountSasPermissions, str]\n expiry, # type: Optional[Union[datetime, str]]\n **kwargs # type: Any\n ): # type: (...) -> str\n return generate_blob_account_sas(\n account_name=account_name,\n account_key=account_key,\n resource_types=resource_types,\n permission=permission,\n expiry=expiry,\n **kwargs\n )", "def create_accounts(account_list, user_type):\n account_client = AccountClient()\n for account in account_list:\n try:\n account_client.add_account(account, user_type, email=None)\n except exception.Duplicate:\n pass # Account already exists, no need to create it", "def __init__(__self__,\n resource_name: str,\n args: AccountAliasArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _setup_account_general(insid, start_date, rate_dict, counterparty,\n prf_name, account_name, reinvest,\n funding_instype, external_id=None):\n calendar = acm.FCalendar['ZAR Johannesburg']\n next_bus_day = calendar.AdjustBankingDays(acm.Time.DateToday(), 1)\n day_after_start_date = calendar.AdjustBankingDays(start_date, 1)\n # Make sure that two conditions are met:\n # 1. End date doesn't lie in the past.\n # 2. Start date predates end date.\n end_date = max(next_bus_day, day_after_start_date)\n\n deposit = acm.FInstrument[insid]\n if deposit:\n LOGGER.info(\"The instrument {} already exists\".format(insid))\n if deposit.ExternalId1():\n LOGGER.info(\"Updating the external id from {} to {}\".format(\n deposit.ExternalId1(), external_id))\n deposit.ExternalId1(external_id)\n deposit.Commit()\n return None\n\n LOGGER.info('Creating %s...', insid)\n acm.BeginTransaction()\n try:\n # Instrument\n deposit = acm.FDeposit()\n deposit.Currency(CURRENCY)\n deposit.Name(insid)\n deposit.DayCountMethod(DAY_COUNT_METHOD)\n deposit.SpotBankingDaysOffset(0)\n # this sets the exp_time, which has a higher priority over exp_day,\n # which is set when calling re_rate(...) from ael. If the exp_time\n # is not set, acm (trading manager) uses the exp_day.\n # deposit.ExpiryDate(end_date)\n deposit.ContractSize(1)\n deposit.Quotation('Clean')\n deposit.QuoteType('Clean')\n deposit.OpenEnd('Open End')\n deposit.MinimumPiece(MINIMUM_PIECE)\n deposit.PayOffsetMethod('Business Days')\n if external_id:\n deposit.ExternalId1(external_id)\n\n # Leg\n leg = deposit.CreateLeg(1)\n leg.LegType('Call Fixed Adjustable')\n leg.Decimals(11)\n leg.StartDate(start_date)\n leg.EndDate(end_date)\n leg.EndPeriodUnit('Days')\n leg.DayCountMethod(DAY_COUNT_METHOD)\n if rate_dict['type'] == 'fixed':\n leg.FixedRate(rate_dict['rate'])\n leg.ResetDayOffset(0)\n leg.ResetType('Weighted')\n leg.ResetPeriod('1d')\n leg.ResetDayMethod('Following')\n leg.Currency(CURRENCY)\n leg.NominalFactor(1)\n leg.Rounding('Normal')\n leg.RollingPeriod('1m')\n leg.RollingPeriodBase(acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(\n start_date, 0, 1, 0)))\n leg.PayDayMethod('Following')\n leg.PayCalendar(calendar)\n leg.FixedCoupon(True)\n leg.NominalAtEnd(True)\n leg.FloatRateFactor(1)\n leg.FixedCoupon(True)\n leg.StartPeriod('-1d')\n leg.Reinvest(reinvest)\n if rate_dict['type'] == 'float':\n deposit.AddInfoValue('CallFloatRef', rate_dict['ref'])\n deposit.AddInfoValue('CallFloatSpread', rate_dict['spread'])\n deposit.Commit() # Commits both the instrument and the leg.\n\n # Trade\n trade = acm.FTrade()\n trade.Instrument(deposit)\n trade.Counterparty(counterparty)\n trade.Acquirer('PRIME SERVICES DESK')\n trade.AcquireDay(start_date)\n trade.ValueDay(start_date)\n trade.Quantity(1)\n trade.TradeTime(start_date)\n trade.Currency(CURRENCY)\n trade.Price(0)\n trade.Portfolio(acm.FPhysicalPortfolio[prf_name])\n trade.Type('Normal')\n trade.TradeTime(start_date)\n trade.Status('Simulated') # To allow for delete in case of rollback.\n trade.AddInfoValue('Funding Instype', funding_instype)\n trade.AddInfoValue('Call_Region', 'BB SANDTON')\n trade.AddInfoValue('Account_Name', account_name)\n trade.Commit()\n \n acm.CommitTransaction()\n except Exception as e:\n acm.AbortTransaction()\n LOGGER.exception(\"Could not create call/loan account {}\".format(insid))\n raise e\n\n deposit = acm.FInstrument[insid]\n if deposit:\n trades = deposit.Trades()\n if trades:\n LOGGER.info('The following trade has been created:{}\\n'.format(trades[0].Oid()))\n else:\n raise RuntimeError('Could not create trade!')\n else:\n raise RuntimeError('Could not create deposit!')", "def associate_member_account(memberAccountId=None):\n pass", "def _create_account(user_id: int):\r\n now = datetime.now()\r\n _created_at = now.strftime(\"%m/%d/%Y at %H:%M:%S\")\r\n Wealth.collection.insert_one({\r\n \"_id\": user_id,\r\n \"coins\": 100,\r\n \"cookie\": 0,\r\n \"choc\": 0,\r\n \"poop\": 0,\r\n \"beans\": 0,\r\n \"pizza\": 0,\r\n \"waffles\": 0,\r\n \"Fish\": 0,\r\n \"apple\": 0,\r\n \"afk\": \"No status set, run w/status to set a status\",\r\n \"Reputation\": 0,\r\n \"LastUsed\": \"Isnotset\",\r\n \"TargetMember\": 0,\r\n \"BadgeSlot1\": \"Doesn't Have Noob\",\r\n \"BadgeSlot2\": \"Doesn't Have Beginner\",\r\n \"BadgeSlot3\": \"Doesn't Have Leader\",\r\n \"AccountCreated\": _created_at,\r\n \"Premium\": \"No\",\r\n \"Developer\": \"No\",\r\n \"Bank\": 0,\r\n \"Tickets\": 0,\r\n \"LastWithdraw\": \"No date\",\r\n \"LastTransfer\": \"No date\",\r\n \"MarriedTo\": \"Nobody\",\r\n \"MarriedDate\": \"No date\",\r\n })", "def create_wim_account(self, wim, tenant, properties):\n wim_id = self.get_by_name_or_uuid('wims', wim, SELECT=['uuid'])['uuid']\n tenant = self.get_by_name_or_uuid('nfvo_tenants', tenant,\n SELECT=['uuid', 'name'])\n account = properties.setdefault('name', tenant['name'])\n\n wim_account = self.query_one('wim_accounts',\n WHERE={'wim_id': wim_id, 'name': account},\n error_if_none=False)\n\n transaction = []\n used_uuids = []\n\n if wim_account is None:\n # If a row for the wim account doesn't exist yet, we need to\n # create one, otherwise we can just re-use it.\n account_id = str(generate_uuid())\n used_uuids.append(account_id)\n row = merge_dicts(properties, wim_id=wim_id, uuid=account_id)\n transaction.append({'wim_accounts': _preprocess_wim_account(row)})\n else:\n account_id = wim_account['uuid']\n properties.pop('config', None) # Config is too complex to compare\n diff = {k: v for k, v in properties.items() if v != wim_account[k]}\n if diff:\n tip = 'Edit the account first, and then attach it to a tenant'\n raise WimAccountOverwrite(wim_account, diff, tip)\n\n transaction.append({\n 'wim_nfvo_tenants': {'nfvo_tenant_id': tenant['uuid'],\n 'wim_id': wim_id,\n 'wim_account_id': account_id}})\n\n with self._associate(wim_id, tenant['uuid']):\n self.db.new_rows(transaction, used_uuids, confidential_data=True)\n\n return account_id", "def account():\n\n bank_test = Bank.objects.create(name='R-Bank')\n company_test = Company.objects.create(name='Tre Belarus', country='Belarus')\n account = Account.objects.create(iban_number='TEEdddddddfs', swift_code='tertrefdsf',\n bank=bank_test, company=company_test)\n return account", "def create_account(\n account_name,\n account_email,\n account_role,\n access_to_billing,\n organization_unit_id,\n scp):\n\n client = session.client('organizations')\n\n try:\n create_account_response = client.create_account(Email=account_email, AccountName=account_name,\n RoleName=account_role,\n IamUserAccessToBilling=access_to_billing)\n except botocore.exceptions.ClientError as e:\n print(e)\n sys.exit(1)\n\n time.sleep(10)\n\n account_status = 'IN_PROGRESS'\n while account_status == 'IN_PROGRESS':\n create_account_status_response = client.describe_create_account_status(\n CreateAccountRequestId=create_account_response.get('CreateAccountStatus').get('Id'))\n print(\"Create account status \"+str(create_account_status_response))\n account_status = create_account_status_response.get('CreateAccountStatus').get('State')\n if account_status == 'SUCCEEDED':\n accountid = create_account_status_response.get('CreateAccountStatus').get('AccountId')\n elif account_status == 'FAILED':\n print(\"Account creation failed: \" + create_account_status_response.get('CreateAccountStatus').get('FailureReason'))\n sys.exit(1)\n root_id = client.list_roots().get('Roots')[0].get('Id')\n\n # Move account to the org\n if organization_unit_id is not None:\n try:\n describe_organization_response = client.describe_organizational_unit(\n OrganizationalUnitId=organization_unit_id)\n move_account_response = client.move_account(AccountId=accountid, SourceParentId=root_id,\n DestinationParentId=organization_unit_id)\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r} \"\n message = template.format(type(ex).__name__, ex.args)\n # create_organizational_unit(organization_unit_id)\n print(message)\n\n # Attach policy to account if exists\n if scp is not None:\n attach_policy_response = client.attach_policy(PolicyId=scp, TargetId=accountid)\n print(\"Attach policy response \"+str(attach_policy_response))\n\n return accountid", "def create_account(self, user):\n tx = self.iroha.transaction(\n [\n self.iroha.command(\n \"CreateAccount\",\n account_name=user.gov_id,\n domain_id=\"afyamkononi\",\n public_key=user.public_key,\n )\n ]\n )\n IrohaCrypto.sign_transaction(tx, self.creator_account_details.private_key)\n return self.send_transaction_and_return_status(tx)", "def create_account():\n if not request.json or not 'name' in request.json:\n abort(400)\n account = {\n 'id': accounts[-1]['id'] + 1, #last id + 1\n 'name': request.json['name'],\n 'surname': request.json['surname'],\n 'product': request.json.get('product', \"\"),\n 'balance': request.json.get('balance', 0.00)\n }\n\n accounts.append(account)\n\n return json.dumps({'New Account': account}, ensure_ascii=False), 201, {'Content-Type': 'text/css; charset=utf-8'}" ]
[ "0.69903755", "0.6747098", "0.5886402", "0.57386774", "0.56866586", "0.5684474", "0.5681401", "0.56380796", "0.5489181", "0.54728997", "0.54438716", "0.53915864", "0.53431445", "0.53252304", "0.52722365", "0.5258361", "0.5257858", "0.524561", "0.51642907", "0.5159867", "0.5132499", "0.51191765", "0.51016927", "0.5089005", "0.5087687", "0.50357646", "0.5026181", "0.5014058", "0.5011955", "0.5006808" ]
0.8034021
0
Adds users to a Custom Audience, based on a list of unique user tracking ids. There is a limit imposed by Facebook that only 10000 users may be uploaded at a time. schema Allowed values are "UID", "EMAIL_SHA256", "PHONE_SHA256", "MOBILE_ADVERTISER_ID" app_ids List of app ids. This is required for schema type UID, as of API v2.2
def add_users_to_custom_audience(self, custom_audience_id, tracking_ids, schema='MOBILE_ADVERTISER_ID', app_ids=None, batch=False): path = "%s/users" % custom_audience_id payload = {'schema': schema, 'data': tracking_ids} if app_ids: payload['app_ids'] = app_ids args = { 'payload': json.dumps(payload) } return self.make_request(path, 'POST', args, batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_all_friends(twitter, users):\n ###TODO-- Completed\n\n #calling get_friends here to receive friends ID's for all the values of screen_name,\n # limiting the values to receive to 5000\n for user in users:\n user['friends'] = get_friends(twitter, user['screen_name'])[:5000]\n #print(len(user['friends']))", "async def add_list(\n self, name: str, user_ids: Optional[List[int]] = None, **kwargs\n ) -> friends.AddListResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.addList\", params)\n model = friends.AddListResponse\n return model(**response).response", "async def add_users(self, users, collection):\n\n for user in users:\n if await collection.find_one({'id': user.id}) is None:\n await collection.insert_one({'id': user.id, 'name': user.name,\n 'data': {}, 'transactions': [], 'unverified': [],\n 'unapproved': []})", "def add_users(self, *users):\r\n pass", "def _SetUserAccounts(self, knowledge_base_object, users):\n for user in users:\n identifier = user.get('sid', user.get('uid', None))\n if not identifier:\n continue\n\n user_account_artifact = artifacts.UserAccountArtifact(\n identifier=identifier, user_directory=user.get('path', None),\n username=user.get('name', None))\n\n knowledge_base_object.AddUserAccount(user_account_artifact)", "def __insertBasicUsersQueries(self,_count,user_ids,credit,owner_id,group_id,ibs_query):\n for user_id in user_ids:\n ibs_query+=self.__insertBasicUserQuery(user_id,credit,owner_id,group_id)", "def forwards(apps, schema_editor):\n Referral = apps.get_model(\"core\", \"Referral\")\n\n for referral in Referral.objects.all():\n if hasattr(referral, \"user\"):\n referral.users.add(referral.user)\n referral.save()", "def add_users_from_file(args):\n with open(args.users_from_file) as file:\n for line in file:\n name, email_address = line.split(',')\n add_user(name, email_address.strip().lower())", "def add(self, user_id, aspect_ids):\n for aid in aspect_ids: Aspect(self._connection, aid).addUser(user_id)", "def put(self, uid):\n json_data = request.get_json()\n event_dicts = json_data[\"data\"]\n ids = list()\n\n for event_dict in event_dicts:\n event_id = create_fb_event(event_dict, uid)\n ids.append(event_id)\n\n return {\n \"ids\": ids\n }", "def user_follow_users(self, ids=None, **kwargs):\n return self._put(\n API.MY_FOLLOWING.value, type=\"user\", ids=\",\".join(ids or []), **kwargs\n )", "def create_custom_audience_pixel(self, account_id, batch=False):\n path = \"act_%s/adspixels\" % account_id\n return self.make_request(path, 'POST', batch=batch)", "def add_users(data: list[dict]):\n l = []\n already_exist = []\n for d in data:\n if \"username\" in d:\n if Users.username_exists(d[\"username\"]):\n already_exist.append(d[\"username\"])\n else:\n u = Users.new(**d)\n l.append(u)\n if u.is_admin:\n cache.delete_memoized(get_admin_emails)\n db.session.bulk_save_objects(l)\n db.session.commit()\n return already_exist", "def add_all_friends(twitter, users):\n for u_dict in users:\n u_dict['friends'] = get_friends(twitter,u_dict['screen_name'])", "def add(self, asset_ids=None):\n if asset_ids is not None and isinstance(asset_ids, list):\n for h in asset_ids:\n self.asset_ids.append(h[:self.idlen_conf[\"asset_id\"]])", "def add_from_uuid_list(self):\n\n uuids = self._read_file()\n if not uuids:\n return\n\n for uuid in uuids:\n uuid = uuid.split('\\n')[0]\n\n # Checks if lenght of the uuid is correct\n if not check_uuid_authenticity(uuid):\n self.report.add('Invalid uuid lenght.')\n continue\n \n self.add_record.push_record_by_uuid(self.global_counters, uuid)\n return", "def create_users (users_file_name = 'lookup.csv'):\n users_file = open (users_file_name, 'r')\n for line in users_file:\n # user_fields = line.split ()\n user_data_list = parse_user_info_list (line.split (','))\n print user_data_list\n create_user (*user_data_list)\n users_file.close ()\n print 'All users created successfully.'", "def add_users_to_workspace(self, workspace_id: str, user_ids: list, kind: WorkspaceSubscriberKind, *args, **kwargs):\n \n users_data = api.add_users_to_workspace(\n workspace_id,\n user_ids, \n kind,\n *args,\n api_key=self.__creds.api_key_v2,\n **kwargs)\n\n return [en.User(creds=self.__creds, **data) for data in users_data]", "def user_ids(self):\r\n raise NotImplementedError", "def addUserId(self, user_id):\n self.__register_user_ids.add(user_id)", "def get_many_user_type(session, access_token, ids):\n data = {\n \"requests\":\n [\n {\n \"url\": \"/users/\"+id+\"/userType\",\n \"method\": \"GET\",\n \"id\": i\n }\n for i, id in enumerate(ids)\n ]\n }\n data = json.loads(json.dumps(data))\n endpoint = \"https://graph.microsoft.com/v1.0/$batch\"\n r = session.post(endpoint, json=data, headers={\"Authorization\": \"Bearer \" + access_token, \"Content-type\": \"application/json\"})\n responses = json.loads(r.text)['responses']\n out = {}\n for i, response in enumerate(responses):\n \n out[ids[i]] = {\n 'userType': response['body']['value']\n }\n \n return out", "def create_many(self, users: 'List[User]', privileges: 'Optional[List[str]]' = None) -> 'Optional[List[User]]':\n for user in users:\n if user.id is None:\n user.id = self.get_new_id()\n return self._create_many(schema=UserSchema(), entities=users, privileges=privileges)", "def bulk_subscribe(self, work_batch, user_ids, reason=WorkBatchSubscriptionReason.unknown):\n user_ids = set(user_ids)\n\n # 5 retries for race conditions where\n # concurrent subscription attempts cause integrity errors\n for i in range(4, -1, -1): # 4 3 2 1 0\n\n existing_subscriptions = set(WorkBatchSubscription.objects.filter(\n user_id__in=user_ids,\n work_batch=work_batch,\n ).values_list('user_id', flat=True))\n\n subscriptions = [\n WorkBatchSubscription(\n user_id=user_id,\n work_batch=work_batch,\n is_active=True,\n reason=reason,\n )\n for user_id in user_ids\n if user_id not in existing_subscriptions\n ]\n\n try:\n with transaction.atomic():\n self.bulk_create(subscriptions)\n return True\n except IntegrityError as e:\n if i == 0:\n raise e", "def add_users(caller, role, *users):\r\n _check_caller_authority(caller, role)\r\n role.add_users(*users)", "def auto_follow_followers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n not_following_back = followers - following\n\n for user_id in not_following_back:\n try:\n t.friendships.create(user_id=user_id, follow=False)\n except Exception as e:\n print(\"error: %s\" % (str(e)))", "def list(self, user_ids: Optional[List[UserId]]) -> List[U]:\n ...", "def create_custom_audience_from_website(\n self, account_id, name, domain, description=None,\n retention_days=30, prefill=True, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n 'subtype': \"WEBSITE\"\n }\n rule = {'url': {\n 'i_contains': domain,\n }}\n if rule:\n args['rule'] = json.dumps(rule)\n if retention_days:\n args['retention_days'] = retention_days\n if prefill:\n args['prefill'] = prefill\n return self.make_request(path, 'POST', args, batch=batch)", "def add_users_to_db(self, user_pks, skip_saved=True, is_follower=False, am_following=False):\n\n skip_user_pks = set()\n\n #Add the saved user PKs from MongoDB to the Set\n if skip_saved:\n saved_user_pks = self._users_collection.find({}, {'pk': 1, '_id': 0})\n for saved_user_pk in saved_user_pks:\n skip_user_pks.add(saved_user_pk['pk'])\n\n\n for user_pk in user_pks:\n if user_pk in skip_user_pks:\n print(\"Skipping: \" + str(user_pk))\n continue\n\n #New user, get their information\n try:\n raw_user_result = self.getUsernameInfo(user_pk)\n raw_user = self.LastJson[\"user\"]\n\n #Error getting user from Instagram API - sleep then try again\n except requests.exceptions.RequestException as e:\n print(\"Requests exception: %s\" % (e))\n all_followers.append(follower)\n time.sleep(random.randint(180, 10 * 180))\n\n #No error - let's insert the user into Mongo\n else:\n user = InstagramUser(raw_user, \n is_follower=is_follower, \n am_following=am_following)\n user.add_update(\"inserted\")\n\n try:\n inserted_result = self._users_collection.insert_one(user.storage_dict())\n\n #User already exists in MongoDB - let's replace\n except pymongo.errors.DuplicateKeyError:\n self._users_collection.delete_one({\"pk\": user.pk})\n inserted_result = self._users_collection.insert_one(user.storage_dict())\n\n finally:\n if inserted_result.acknowledged:\n print(\"Upserted: %s\\t%s\\t%s\" % (user.full_name, user.username, \n inserted_result.inserted_id))\n else:\n print(\"ERROR UPSERTING: %s\", user_info)\n\n\n #Sleep for a bit before getting the next user\n sleep_delay = random.randint(0, 10) # 180))\n time.sleep(sleep_delay)", "def add_user(self):\n\n pin, code = self.get_auth_pin() \n print(\"Enter the PIN '{}' into the Add Application window and click Add Application\".format(pin))\n input(\"waiting press enter to continue...\")\n\n access_token, refresh_token = self.get_tokens(code)\n user_id = self.tokens.get_next_user_id()\n self.tokens.insert_user(user_id, access_token, refresh_token)\n tstat_ids = self.get_tstat_ids(access_token)\n for tstat_id in tstat_ids:\n logger.info(\"Adding Thermostat ID: {}\".format(tstat_id))\n self.tokens.insert_tstat(user_id, tstat_id)", "def auto_follow_followers_for_user(user_screen_name, count=5):\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers_for_user = set(t.followers.ids(screen_name=user_screen_name)[\"ids\"][:count]);\n do_not_follow = get_do_not_follow_list()\n \n for user_id in followers_for_user:\n try:\n if (user_id not in following and \n user_id not in do_not_follow):\n\n t.friendships.create(user_id=user_id, follow=False)\n print(\"followed %s\" % user_id)\n\n except TwitterHTTPError as e:\n print(\"error: %s\" % (str(e)))" ]
[ "0.53511375", "0.53367573", "0.5245062", "0.51884025", "0.5148435", "0.5121158", "0.5026242", "0.50204164", "0.5012364", "0.5008828", "0.49615628", "0.49122536", "0.48883018", "0.48880377", "0.487912", "0.4874466", "0.4869419", "0.4867456", "0.48221523", "0.4818214", "0.480262", "0.47656164", "0.47510302", "0.47331065", "0.47320747", "0.4699257", "0.46989137", "0.46950817", "0.46752274", "0.46719277" ]
0.8153835
0
Create a custom audience pixel for the given account. This method only needed once per ad account.
def create_custom_audience_pixel(self, account_id, batch=False): path = "act_%s/adspixels" % account_id return self.make_request(path, 'POST', batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_custom_audience(self, account_id, name, subtype=None,\n description=None, rule=None, opt_out_link=None,\n retention_days=30, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n }\n if subtype:\n args['subtype'] = subtype\n if description:\n args['description'] = description\n if rule:\n args['rule'] = json.dumps(rule)\n if opt_out_link:\n args['opt_out_link'] = opt_out_link\n if retention_days:\n args['retention_days'] = retention_days\n return self.make_request(path, 'POST', args, batch=batch)", "def create_custom_audience_from_website(\n self, account_id, name, domain, description=None,\n retention_days=30, prefill=True, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n 'subtype': \"WEBSITE\"\n }\n rule = {'url': {\n 'i_contains': domain,\n }}\n if rule:\n args['rule'] = json.dumps(rule)\n if retention_days:\n args['retention_days'] = retention_days\n if prefill:\n args['prefill'] = prefill\n return self.make_request(path, 'POST', args, batch=batch)", "def create_lookalike_audience(self, account_id, name, audience_id,\n lookalike_spec, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n 'origin_audience_id': audience_id,\n 'lookalike_spec': json.dumps(lookalike_spec),\n }\n return self.make_request(path, 'POST', args, batch)", "def create_offsite_pixel(self, account_id, name, tag, batch=False):\n path = 'act_%s/offsitepixels' % account_id\n args = {\n 'name': name,\n 'tag': tag,\n }\n return self.make_request(path, 'POST', args, batch=batch)", "def get_custom_audiences(self, account_id, fields=None, batch=False):\n path = 'act_%s/customaudiences' % account_id\n args = { 'limit': self.DATA_LIMIT }\n if fields: args['fields'] = fields\n return self.make_request(path, 'GET', args, batch=batch)", "def account(self, account_code):\r\n return acc.Account(self, account_code)", "def share(config: Config, ami: str, account: str) -> None:\n\n ec2_client = boto3.client(\"ec2\", region_name=config.get(\"region\", None))\n\n ec2_client.modify_image_attribute(\n ImageId=ami,\n LaunchPermission={\"Add\": [{\"UserId\": account}]},\n OperationType=\"add\",\n UserIds=[account],\n Value=\"string\",\n DryRun=False,\n )", "def _setaccount_with_institution_57A(self, val):\n self.swift_obj.AccountWithInstitution_A = val\n self.swift_obj.AccountWithInstitution_A.swiftTag = '57A'", "def get_account(self, account):\n \n pass", "def configure_account(self, alias: Alias = sentinel, margin_rate: DecimalNumber = sentinel):\n pass", "def create_account():\n account = w3.eth.account.create()\n return account", "def create_wim_account(self, wim, tenant, properties):\n wim_id = self.get_by_name_or_uuid('wims', wim, SELECT=['uuid'])['uuid']\n tenant = self.get_by_name_or_uuid('nfvo_tenants', tenant,\n SELECT=['uuid', 'name'])\n account = properties.setdefault('name', tenant['name'])\n\n wim_account = self.query_one('wim_accounts',\n WHERE={'wim_id': wim_id, 'name': account},\n error_if_none=False)\n\n transaction = []\n used_uuids = []\n\n if wim_account is None:\n # If a row for the wim account doesn't exist yet, we need to\n # create one, otherwise we can just re-use it.\n account_id = str(generate_uuid())\n used_uuids.append(account_id)\n row = merge_dicts(properties, wim_id=wim_id, uuid=account_id)\n transaction.append({'wim_accounts': _preprocess_wim_account(row)})\n else:\n account_id = wim_account['uuid']\n properties.pop('config', None) # Config is too complex to compare\n diff = {k: v for k, v in properties.items() if v != wim_account[k]}\n if diff:\n tip = 'Edit the account first, and then attach it to a tenant'\n raise WimAccountOverwrite(wim_account, diff, tip)\n\n transaction.append({\n 'wim_nfvo_tenants': {'nfvo_tenant_id': tenant['uuid'],\n 'wim_id': wim_id,\n 'wim_account_id': account_id}})\n\n with self._associate(wim_id, tenant['uuid']):\n self.db.new_rows(transaction, used_uuids, confidential_data=True)\n\n return account_id", "def api_asset_add(char_code: str, name: str, capital: str, interest: str):\n capital, interest = float(capital), float(interest)\n asset = Asset(char_code=char_code, name=name, capital=capital, interest=interest)\n\n if app.bank.contains(asset):\n return f\"Asset '{name}' already exists\", 403\n\n app.bank.add(asset)\n return f\"Asset '{name}' was successfully added\", 200", "def create_account(row, issue_map):\n acc_type = account_type(row)\n name = row['PROJ_NAME1']\n if Account.objects.filter(name=name).first():\n name = name + ' (' + row['PROJ_NO'] + ')'\n account = Account(name=name, code=row['PROJ_NO'], category=acc_type)\n if acc_type == Account.PROJECT:\n create_pcpp(account, row, issue_map)\n else:\n create_campaign(account, row, name, acc_type)", "def associate_member_account(memberAccountId=None):\n pass", "def put_account(self, account):\n \n pass", "def add_custom_asset(self, custom_asset: CustomAsset) -> str:\n self._raise_if_custom_asset_exists(custom_asset)\n with GlobalDBHandler().conn.write_ctx() as global_db_write_cursor:\n global_db_write_cursor.execute(\n 'INSERT INTO assets(identifier, name, type) VALUES (?, ?, ?)',\n (\n custom_asset.identifier,\n custom_asset.name,\n AssetType.CUSTOM_ASSET.serialize_for_db(),\n ),\n )\n global_db_write_cursor.execute(\n 'INSERT INTO custom_assets(identifier, type, notes) VALUES(?, ?, ?)',\n custom_asset.serialize_for_db(),\n )\n with self.db.user_write() as db_write_cursor:\n self.db.add_asset_identifiers(db_write_cursor, [custom_asset.identifier])\n return custom_asset.identifier", "def setup_call_account(config):\n kwargs = {\n 'insid': get_callaccnt_name(config['shortName'], CALLACCNT_GENERAL),\n 'start_date': config['startDate'],\n 'rate_dict': {'type': 'float',\n 'ref': config['generalCallAccountRateIndex'],\n 'spread': config['generalCallAccountSpread']},\n 'counterparty': config['counterparty'],\n 'prf_name': get_portfolio_name_by_id(\"CALLACCNT\", config['shortName']),\n 'account_name': config['shortName'] + '_Margin',\n 'reinvest': True,\n 'funding_instype': 'Call Prime Brokerage Funding',\n }\n return _setup_account_general(**kwargs)", "def add_assets(char_code, name, capital, interest):\n try:\n capital = float(capital)\n interest = float(interest)\n except:\n redirect(url_for(\"page_not_found\"))\n if name in app.bank:\n abort(403)\n app.bank[name] = Asset(name, char_code, capital, interest)\n return f\"Asset '{name}' was successfully added\", 200", "def GenerateAssetForCreateRequestAlpha(args):\n module = dataplex_api.GetMessageModule()\n resource_spec_field = module.GoogleCloudDataplexV1AssetResourceSpec\n resource_spec = module.GoogleCloudDataplexV1AssetResourceSpec(\n name=args.resource_name,\n type=resource_spec_field.TypeValueValuesEnum(args.resource_type),\n )\n if args.IsSpecified('resource_read_access_mode'):\n resource_spec.readAccessMode = (\n resource_spec_field.ReadAccessModeValueValuesEnum(\n args.resource_read_access_mode\n )\n )\n request = module.GoogleCloudDataplexV1Asset(\n description=args.description,\n displayName=args.display_name,\n labels=dataplex_api.CreateLabels(module.GoogleCloudDataplexV1Asset, args),\n resourceSpec=resource_spec)\n discovery = GenerateDiscoverySpec(args)\n if discovery != module.GoogleCloudDataplexV1AssetDiscoverySpec():\n setattr(request, 'discoverySpec', discovery)\n return request", "def _setup_account_general(insid, start_date, rate_dict, counterparty,\n prf_name, account_name, reinvest,\n funding_instype, external_id=None):\n calendar = acm.FCalendar['ZAR Johannesburg']\n next_bus_day = calendar.AdjustBankingDays(acm.Time.DateToday(), 1)\n day_after_start_date = calendar.AdjustBankingDays(start_date, 1)\n # Make sure that two conditions are met:\n # 1. End date doesn't lie in the past.\n # 2. Start date predates end date.\n end_date = max(next_bus_day, day_after_start_date)\n\n deposit = acm.FInstrument[insid]\n if deposit:\n LOGGER.info(\"The instrument {} already exists\".format(insid))\n if deposit.ExternalId1():\n LOGGER.info(\"Updating the external id from {} to {}\".format(\n deposit.ExternalId1(), external_id))\n deposit.ExternalId1(external_id)\n deposit.Commit()\n return None\n\n LOGGER.info('Creating %s...', insid)\n acm.BeginTransaction()\n try:\n # Instrument\n deposit = acm.FDeposit()\n deposit.Currency(CURRENCY)\n deposit.Name(insid)\n deposit.DayCountMethod(DAY_COUNT_METHOD)\n deposit.SpotBankingDaysOffset(0)\n # this sets the exp_time, which has a higher priority over exp_day,\n # which is set when calling re_rate(...) from ael. If the exp_time\n # is not set, acm (trading manager) uses the exp_day.\n # deposit.ExpiryDate(end_date)\n deposit.ContractSize(1)\n deposit.Quotation('Clean')\n deposit.QuoteType('Clean')\n deposit.OpenEnd('Open End')\n deposit.MinimumPiece(MINIMUM_PIECE)\n deposit.PayOffsetMethod('Business Days')\n if external_id:\n deposit.ExternalId1(external_id)\n\n # Leg\n leg = deposit.CreateLeg(1)\n leg.LegType('Call Fixed Adjustable')\n leg.Decimals(11)\n leg.StartDate(start_date)\n leg.EndDate(end_date)\n leg.EndPeriodUnit('Days')\n leg.DayCountMethod(DAY_COUNT_METHOD)\n if rate_dict['type'] == 'fixed':\n leg.FixedRate(rate_dict['rate'])\n leg.ResetDayOffset(0)\n leg.ResetType('Weighted')\n leg.ResetPeriod('1d')\n leg.ResetDayMethod('Following')\n leg.Currency(CURRENCY)\n leg.NominalFactor(1)\n leg.Rounding('Normal')\n leg.RollingPeriod('1m')\n leg.RollingPeriodBase(acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(\n start_date, 0, 1, 0)))\n leg.PayDayMethod('Following')\n leg.PayCalendar(calendar)\n leg.FixedCoupon(True)\n leg.NominalAtEnd(True)\n leg.FloatRateFactor(1)\n leg.FixedCoupon(True)\n leg.StartPeriod('-1d')\n leg.Reinvest(reinvest)\n if rate_dict['type'] == 'float':\n deposit.AddInfoValue('CallFloatRef', rate_dict['ref'])\n deposit.AddInfoValue('CallFloatSpread', rate_dict['spread'])\n deposit.Commit() # Commits both the instrument and the leg.\n\n # Trade\n trade = acm.FTrade()\n trade.Instrument(deposit)\n trade.Counterparty(counterparty)\n trade.Acquirer('PRIME SERVICES DESK')\n trade.AcquireDay(start_date)\n trade.ValueDay(start_date)\n trade.Quantity(1)\n trade.TradeTime(start_date)\n trade.Currency(CURRENCY)\n trade.Price(0)\n trade.Portfolio(acm.FPhysicalPortfolio[prf_name])\n trade.Type('Normal')\n trade.TradeTime(start_date)\n trade.Status('Simulated') # To allow for delete in case of rollback.\n trade.AddInfoValue('Funding Instype', funding_instype)\n trade.AddInfoValue('Call_Region', 'BB SANDTON')\n trade.AddInfoValue('Account_Name', account_name)\n trade.Commit()\n \n acm.CommitTransaction()\n except Exception as e:\n acm.AbortTransaction()\n LOGGER.exception(\"Could not create call/loan account {}\".format(insid))\n raise e\n\n deposit = acm.FInstrument[insid]\n if deposit:\n trades = deposit.Trades()\n if trades:\n LOGGER.info('The following trade has been created:{}\\n'.format(trades[0].Oid()))\n else:\n raise RuntimeError('Could not create trade!')\n else:\n raise RuntimeError('Could not create deposit!')", "def set_audience(self, claim=AUDIENCE):\n if api_settings.AUDIENCE is not None:\n self.payload[claim] = api_settings.AUDIENCE", "def create(cls, body: CloudAccount):\n\t\tpass", "def get_ads_pixels(self, account_id, fields=None, batch=False):\n path = 'act_%s/adspixels' % account_id\n args = {'fields': fields} if fields else {}\n return self.make_request(path, 'GET', args, batch=batch)", "def on_account(self, account: AccountData):\n # self.on_event(EVENT_ACCOUNT, account)\n # self.on_event(EVENT_ACCOUNT + account.vt_accountid, account)\n pass", "def __init__(__self__,\n resource_name: str,\n args: AccountAliasArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _make_custom_config(name: str = \"dummy_agent\", skills_num: int = 1) -> dict:\n # noqa\n def _make_skill(id_: int) -> Skill:\n return AEATestWrapper.make_skill(\n config=SkillConfig(name=f\"sc{id_}\", author=\"fetchai\"),\n handlers={\"dummy_handler\": DummyHandler},\n )\n\n return {\n \"name\": name,\n \"components\": [_make_skill(i) for i in range(skills_num)],\n }", "def account(self, account: str):\n self._account = account", "def create(self, account):\n model = models.load('Account', account)\n\n return self.client.create_account(model=model)", "def create_creative_config(name, advertiser_id):\n\n snippet_file_path = os.path.join(os.path.dirname(__file__),\n 'creative_snippet.html')\n with open(snippet_file_path, 'r') as snippet_file:\n snippet = snippet_file.read()\n\n # https://developers.google.com/doubleclick-publishers/docs/reference/v201802/CreativeService.Creative\n config = {\n 'xsi_type': 'ThirdPartyCreative',\n 'name': name,\n 'advertiserId': advertiser_id,\n 'size': {\n 'width': '1',\n 'height': '1'\n },\n 'snippet': snippet,\n # https://github.com/prebid/Prebid.js/issues/418\n 'isSafeFrameCompatible': False,\n }\n\n return config" ]
[ "0.6403576", "0.62565124", "0.5535345", "0.5103156", "0.5053856", "0.49941942", "0.49930894", "0.49631172", "0.49131462", "0.49072927", "0.48941672", "0.48804632", "0.48549256", "0.4849889", "0.48173803", "0.48096266", "0.47709522", "0.47580764", "0.47232842", "0.47097245", "0.4707876", "0.46699256", "0.4648222", "0.46321276", "0.4603356", "0.45983958", "0.45871785", "0.45711964", "0.45676747", "0.45638788" ]
0.80888146
0
Create a custom audience from website for the given account.
def create_custom_audience_from_website( self, account_id, name, domain, description=None, retention_days=30, prefill=True, batch=False): path = "act_%s/customaudiences" % account_id args = { 'name': name, 'subtype': "WEBSITE" } rule = {'url': { 'i_contains': domain, }} if rule: args['rule'] = json.dumps(rule) if retention_days: args['retention_days'] = retention_days if prefill: args['prefill'] = prefill return self.make_request(path, 'POST', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_custom_audience(self, account_id, name, subtype=None,\n description=None, rule=None, opt_out_link=None,\n retention_days=30, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n }\n if subtype:\n args['subtype'] = subtype\n if description:\n args['description'] = description\n if rule:\n args['rule'] = json.dumps(rule)\n if opt_out_link:\n args['opt_out_link'] = opt_out_link\n if retention_days:\n args['retention_days'] = retention_days\n return self.make_request(path, 'POST', args, batch=batch)", "def create_lookalike_audience(self, account_id, name, audience_id,\n lookalike_spec, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n 'origin_audience_id': audience_id,\n 'lookalike_spec': json.dumps(lookalike_spec),\n }\n return self.make_request(path, 'POST', args, batch)", "def create_account():\n account = w3.eth.account.create()\n return account", "def create_account():\n user_id = get_jwt_identity()\n user = User.filter(id=user_id)[0]\n data = json.loads(request.data)\n\n if 'title' not in data:\n return jsonify_response({\"errors\": \"`title` field is required.\"}, 400)\n\n held_accounts = user.get_held_accounts(user.id)\n if held_accounts:\n user_accounts = \",\".join(f\"'{i}'\" for i in held_accounts)\n user_account_names_q = \\\n f\"g.V().hasLabel('{Account.LABEL}')\" + \\\n f\".has('id', within({user_accounts}))\" + \\\n f\".values('title')\"\n user_account_names = client.submit(user_account_names_q).all().result()\n\n if data[\"title\"] in user_account_names:\n return jsonify_response(\n {\"errors\": \"Users with the title already exist\"}, 400)\n\n account = Account.create(title=data[\"title\"])\n edge = UserHoldsAccount.create(user=user.id, account=account.id,\n relationType=\"secondary\")\n\n response = {\n \"title\": account.title\n }\n return jsonify_response(response, 201)", "def create_custom_audience_pixel(self, account_id, batch=False):\n path = \"act_%s/adspixels\" % account_id\n return self.make_request(path, 'POST', batch=batch)", "def audience(self):\n return \"HealthProfessional\"", "def create(cls, body: CloudAccount):\n\t\tpass", "def newaccount(accountname, account, owner, active, memo, posting, create_claimed_account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n if owner is None or active is None or memo is None or posting is None:\n password = click.prompt(\"Keys were not given - Passphrase is used to create keys\\n New Account Passphrase\", confirmation_prompt=True, hide_input=True)\n if not password:\n print(\"You cannot chose an empty password\")\n return\n if create_claimed_account:\n tx = mph.create_claimed_account(accountname, creator=acc, password=password)\n else:\n tx = mph.create_account(accountname, creator=acc, password=password)\n else:\n if create_claimed_account:\n tx = mph.create_claimed_account(accountname, creator=acc, owner_key=owner, active_key=active, memo_key=memo, posting_key=posting)\n else:\n tx = mph.create_account(accountname, creator=acc, owner_key=owner, active_key=active, memo_key=memo, posting_key=posting) \n tx = json.dumps(tx, indent=4)\n print(tx)", "def get_custom_audiences(self, account_id, fields=None, batch=False):\n path = 'act_%s/customaudiences' % account_id\n args = { 'limit': self.DATA_LIMIT }\n if fields: args['fields'] = fields\n return self.make_request(path, 'GET', args, batch=batch)", "def create(self, account):\n model = models.load('Account', account)\n\n return self.client.create_account(model=model)", "def _create_account(user_id: int):\r\n now = datetime.now()\r\n _created_at = now.strftime(\"%m/%d/%Y at %H:%M:%S\")\r\n Wealth.collection.insert_one({\r\n \"_id\": user_id,\r\n \"coins\": 100,\r\n \"cookie\": 0,\r\n \"choc\": 0,\r\n \"poop\": 0,\r\n \"beans\": 0,\r\n \"pizza\": 0,\r\n \"waffles\": 0,\r\n \"Fish\": 0,\r\n \"apple\": 0,\r\n \"afk\": \"No status set, run w/status to set a status\",\r\n \"Reputation\": 0,\r\n \"LastUsed\": \"Isnotset\",\r\n \"TargetMember\": 0,\r\n \"BadgeSlot1\": \"Doesn't Have Noob\",\r\n \"BadgeSlot2\": \"Doesn't Have Beginner\",\r\n \"BadgeSlot3\": \"Doesn't Have Leader\",\r\n \"AccountCreated\": _created_at,\r\n \"Premium\": \"No\",\r\n \"Developer\": \"No\",\r\n \"Bank\": 0,\r\n \"Tickets\": 0,\r\n \"LastWithdraw\": \"No date\",\r\n \"LastTransfer\": \"No date\",\r\n \"MarriedTo\": \"Nobody\",\r\n \"MarriedDate\": \"No date\",\r\n })", "def create_advert():\r\n advertiser, category, zone = create_objects()\r\n ad = AdBase.objects.create(\r\n title='Ad Title',\r\n url='www.example.com',\r\n advertiser=advertiser,\r\n category=category,\r\n zone=zone,\r\n )\r\n return ad", "def account():\n\n bank_test = Bank.objects.create(name='R-Bank')\n company_test = Company.objects.create(name='Tre Belarus', country='Belarus')\n account = Account.objects.create(iban_number='TEEdddddddfs', swift_code='tertrefdsf',\n bank=bank_test, company=company_test)\n return account", "def create(self, data):\n url = self.base_url + '/v2/account/create/'\n return self._call_vendasta(url, data)", "def create_account_user_link(\n account_id: str, email_address: str, transport: str = None\n):\n client = AnalyticsAdminServiceClient(transport=transport)\n user_link = client.create_user_link(\n CreateUserLinkRequest(\n parent=f\"accounts/{account_id}\",\n user_link=UserLink(\n email_address=email_address, direct_roles=[\"predefinedRoles/read\"]\n ),\n notify_new_user=True,\n )\n )\n\n print(\"Result:\")\n print(user_link)", "def create_accounts(account_list, user_type):\n account_client = AccountClient()\n for account in account_list:\n try:\n account_client.add_account(account, user_type, email=None)\n except exception.Duplicate:\n pass # Account already exists, no need to create it", "def put_account(self, account):\n \n pass", "def create_whitelist_account():\n print('Creating whitelist account')\n\n env = KinEnvironment('LOCAL', HORIZON_ENDPOINT, PASSPHRASE)\n root_client = KinClient(env)\n root_seed = derive_root_account_seed(PASSPHRASE)\n builder = Builder(env.name, root_client.horizon, 100, root_seed)\n\n builder.append_create_account_op(WHITELIST_ADDRESS, str(100e5))\n builder.sign()\n builder.submit()", "def create_wim_account(self, wim, tenant, properties):\n wim_id = self.get_by_name_or_uuid('wims', wim, SELECT=['uuid'])['uuid']\n tenant = self.get_by_name_or_uuid('nfvo_tenants', tenant,\n SELECT=['uuid', 'name'])\n account = properties.setdefault('name', tenant['name'])\n\n wim_account = self.query_one('wim_accounts',\n WHERE={'wim_id': wim_id, 'name': account},\n error_if_none=False)\n\n transaction = []\n used_uuids = []\n\n if wim_account is None:\n # If a row for the wim account doesn't exist yet, we need to\n # create one, otherwise we can just re-use it.\n account_id = str(generate_uuid())\n used_uuids.append(account_id)\n row = merge_dicts(properties, wim_id=wim_id, uuid=account_id)\n transaction.append({'wim_accounts': _preprocess_wim_account(row)})\n else:\n account_id = wim_account['uuid']\n properties.pop('config', None) # Config is too complex to compare\n diff = {k: v for k, v in properties.items() if v != wim_account[k]}\n if diff:\n tip = 'Edit the account first, and then attach it to a tenant'\n raise WimAccountOverwrite(wim_account, diff, tip)\n\n transaction.append({\n 'wim_nfvo_tenants': {'nfvo_tenant_id': tenant['uuid'],\n 'wim_id': wim_id,\n 'wim_account_id': account_id}})\n\n with self._associate(wim_id, tenant['uuid']):\n self.db.new_rows(transaction, used_uuids, confidential_data=True)\n\n return account_id", "def _setup_account_general(insid, start_date, rate_dict, counterparty,\n prf_name, account_name, reinvest,\n funding_instype, external_id=None):\n calendar = acm.FCalendar['ZAR Johannesburg']\n next_bus_day = calendar.AdjustBankingDays(acm.Time.DateToday(), 1)\n day_after_start_date = calendar.AdjustBankingDays(start_date, 1)\n # Make sure that two conditions are met:\n # 1. End date doesn't lie in the past.\n # 2. Start date predates end date.\n end_date = max(next_bus_day, day_after_start_date)\n\n deposit = acm.FInstrument[insid]\n if deposit:\n LOGGER.info(\"The instrument {} already exists\".format(insid))\n if deposit.ExternalId1():\n LOGGER.info(\"Updating the external id from {} to {}\".format(\n deposit.ExternalId1(), external_id))\n deposit.ExternalId1(external_id)\n deposit.Commit()\n return None\n\n LOGGER.info('Creating %s...', insid)\n acm.BeginTransaction()\n try:\n # Instrument\n deposit = acm.FDeposit()\n deposit.Currency(CURRENCY)\n deposit.Name(insid)\n deposit.DayCountMethod(DAY_COUNT_METHOD)\n deposit.SpotBankingDaysOffset(0)\n # this sets the exp_time, which has a higher priority over exp_day,\n # which is set when calling re_rate(...) from ael. If the exp_time\n # is not set, acm (trading manager) uses the exp_day.\n # deposit.ExpiryDate(end_date)\n deposit.ContractSize(1)\n deposit.Quotation('Clean')\n deposit.QuoteType('Clean')\n deposit.OpenEnd('Open End')\n deposit.MinimumPiece(MINIMUM_PIECE)\n deposit.PayOffsetMethod('Business Days')\n if external_id:\n deposit.ExternalId1(external_id)\n\n # Leg\n leg = deposit.CreateLeg(1)\n leg.LegType('Call Fixed Adjustable')\n leg.Decimals(11)\n leg.StartDate(start_date)\n leg.EndDate(end_date)\n leg.EndPeriodUnit('Days')\n leg.DayCountMethod(DAY_COUNT_METHOD)\n if rate_dict['type'] == 'fixed':\n leg.FixedRate(rate_dict['rate'])\n leg.ResetDayOffset(0)\n leg.ResetType('Weighted')\n leg.ResetPeriod('1d')\n leg.ResetDayMethod('Following')\n leg.Currency(CURRENCY)\n leg.NominalFactor(1)\n leg.Rounding('Normal')\n leg.RollingPeriod('1m')\n leg.RollingPeriodBase(acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(\n start_date, 0, 1, 0)))\n leg.PayDayMethod('Following')\n leg.PayCalendar(calendar)\n leg.FixedCoupon(True)\n leg.NominalAtEnd(True)\n leg.FloatRateFactor(1)\n leg.FixedCoupon(True)\n leg.StartPeriod('-1d')\n leg.Reinvest(reinvest)\n if rate_dict['type'] == 'float':\n deposit.AddInfoValue('CallFloatRef', rate_dict['ref'])\n deposit.AddInfoValue('CallFloatSpread', rate_dict['spread'])\n deposit.Commit() # Commits both the instrument and the leg.\n\n # Trade\n trade = acm.FTrade()\n trade.Instrument(deposit)\n trade.Counterparty(counterparty)\n trade.Acquirer('PRIME SERVICES DESK')\n trade.AcquireDay(start_date)\n trade.ValueDay(start_date)\n trade.Quantity(1)\n trade.TradeTime(start_date)\n trade.Currency(CURRENCY)\n trade.Price(0)\n trade.Portfolio(acm.FPhysicalPortfolio[prf_name])\n trade.Type('Normal')\n trade.TradeTime(start_date)\n trade.Status('Simulated') # To allow for delete in case of rollback.\n trade.AddInfoValue('Funding Instype', funding_instype)\n trade.AddInfoValue('Call_Region', 'BB SANDTON')\n trade.AddInfoValue('Account_Name', account_name)\n trade.Commit()\n \n acm.CommitTransaction()\n except Exception as e:\n acm.AbortTransaction()\n LOGGER.exception(\"Could not create call/loan account {}\".format(insid))\n raise e\n\n deposit = acm.FInstrument[insid]\n if deposit:\n trades = deposit.Trades()\n if trades:\n LOGGER.info('The following trade has been created:{}\\n'.format(trades[0].Oid()))\n else:\n raise RuntimeError('Could not create trade!')\n else:\n raise RuntimeError('Could not create deposit!')", "def get_account(self, account):\n \n pass", "def test_create_account_campaign(self, create):\n \"\"\"Campaigns should be created\"\"\"\n row = {'PROJ_NAME1': 'Argentina Fund', 'PROJ_NO': '789-CFD',\n 'SUMMARY': 'Some Sum'}\n sync.create_account(row, None)\n self.assertTrue(create.called)\n account, row, name, acc_type = create.call_args[0]\n self.assertEqual(account.name, 'Argentina Fund')\n self.assertEqual(account.code, '789-CFD')\n self.assertEqual(account.category, Account.COUNTRY)\n self.assertEqual(0, len(Account.objects.filter(pk=account.pk)))", "def _setaccount_with_institution_57A(self, val):\n self.swift_obj.AccountWithInstitution_A = val\n self.swift_obj.AccountWithInstitution_A.swiftTag = '57A'", "def create_account(self, short_name, author_name=None, author_url=None,\n replace_token=True):\n response = self._telegraph.method('createAccount', values={\n 'short_name': short_name,\n 'author_name': author_name,\n 'author_url': author_url\n })\n\n if replace_token:\n self._telegraph.access_token = response.get('access_token')\n\n return response", "def set_audience(self, claim=AUDIENCE):\n if api_settings.AUDIENCE is not None:\n self.payload[claim] = api_settings.AUDIENCE", "def create_account(row, issue_map):\n acc_type = account_type(row)\n name = row['PROJ_NAME1']\n if Account.objects.filter(name=name).first():\n name = name + ' (' + row['PROJ_NO'] + ')'\n account = Account(name=name, code=row['PROJ_NO'], category=acc_type)\n if acc_type == Account.PROJECT:\n create_pcpp(account, row, issue_map)\n else:\n create_campaign(account, row, name, acc_type)", "def addUser(self, accountId, username, accesstype, **kwargs):\n #put your code here to implement this method\n raise NotImplementedError (\"not implemented method addUser\")", "def consent(self, account_id):\n from pureport_client.commands.accounts.consent import Command\n return Command(self.client, account_id)", "def test_consent_3(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"consent-example-CDA.json\"\n inst = consent.Consent.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Consent\" == inst.resource_type\n\n impl_consent_3(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Consent\" == data[\"resourceType\"]\n\n inst2 = consent.Consent(**data)\n impl_consent_3(inst2)", "def create_account(self, user):\n tx = self.iroha.transaction(\n [\n self.iroha.command(\n \"CreateAccount\",\n account_name=user.gov_id,\n domain_id=\"afyamkononi\",\n public_key=user.public_key,\n )\n ]\n )\n IrohaCrypto.sign_transaction(tx, self.creator_account_details.private_key)\n return self.send_transaction_and_return_status(tx)" ]
[ "0.7066297", "0.5953314", "0.5626082", "0.5513869", "0.53991276", "0.5362004", "0.52921796", "0.52679944", "0.5255033", "0.5245276", "0.5200376", "0.51080674", "0.51068354", "0.51045805", "0.51035374", "0.5065382", "0.5017878", "0.49941573", "0.49939954", "0.49919534", "0.49875054", "0.4962661", "0.49614006", "0.49455214", "0.49434245", "0.49395603", "0.49275532", "0.48919347", "0.48662928", "0.485308" ]
0.80276316
0
Create a lookalike audience for the given target audience.
def create_lookalike_audience(self, account_id, name, audience_id, lookalike_spec, batch=False): path = "act_%s/customaudiences" % account_id args = { 'name': name, 'origin_audience_id': audience_id, 'lookalike_spec': json.dumps(lookalike_spec), } return self.make_request(path, 'POST', args, batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_custom_audience(self, account_id, name, subtype=None,\n description=None, rule=None, opt_out_link=None,\n retention_days=30, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n }\n if subtype:\n args['subtype'] = subtype\n if description:\n args['description'] = description\n if rule:\n args['rule'] = json.dumps(rule)\n if opt_out_link:\n args['opt_out_link'] = opt_out_link\n if retention_days:\n args['retention_days'] = retention_days\n return self.make_request(path, 'POST', args, batch=batch)", "def _augment_gain(audio, low=0.5, high=1.5):\n g = low + np.random.random_sample(1) * (high - low)\n return audio * g", "def audience(self):\n return \"HealthProfessional\"", "def match_target_amplitude(audio, target_volume):\r\n if audio.dBFS < target_volume:\r\n required_gain = target_volume - audio.dBFS\r\n return audio.apply_gain(required_gain)\r\n else:\r\n return audio", "def _augment_gain(audio, low=0.25, high=1.25):\n gain = low + torch.rand(1) * (high - low)\n return audio * gain", "def interference(target,\n others,\n relative_loudness=0,\n sample_rate=None):\n\n target, accompaniment = utilities.target_accompaniment(target,\n others,\n sample_rate)\n\n if relative_loudness is not None:\n\n accompaniment.loudness = target.loudness + relative_loudness\n\n interference_anchor = target + accompaniment\n interference_anchor.loudness = target.loudness\n\n return interference_anchor", "def create_custom_audience_from_website(\n self, account_id, name, domain, description=None,\n retention_days=30, prefill=True, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n 'subtype': \"WEBSITE\"\n }\n rule = {'url': {\n 'i_contains': domain,\n }}\n if rule:\n args['rule'] = json.dumps(rule)\n if retention_days:\n args['retention_days'] = retention_days\n if prefill:\n args['prefill'] = prefill\n return self.make_request(path, 'POST', args, batch=batch)", "def an(text):\n text = force_unicode(text)\n if not CONSONANT_SOUND.match(text) and VOWEL_SOUND.match(text):\n return 'an'\n return 'a'", "def test_alaw(self):\n duration = 1\n num_channels = 1\n sample_rate = 8000\n path = self.get_temp_path(\"data.wav\")\n sox_utils.gen_audio_file(\n path, sample_rate=sample_rate, num_channels=num_channels, bit_depth=8, encoding=\"a-law\", duration=duration\n )\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == 8\n assert info.encoding == \"ALAW\"", "def generate_audio():\n text, lang = introduction()\n ses = boto3.Session(profile_name=\"default\")\n pol = ses.client(\"polly\")\n res = pol.synthesize_speech(Text=text, LanguageCode=lang, OutputFormat=\"mp3\", VoiceId=VOICE)\n return res", "def target_sound_quality(target,\n distortion_factor_target=0.2,\n distortion_factor_noise=0.99,\n lowpass_cutoff_target=3500,\n lowpass_cutoff_noise=3500,\n num_points=2048,\n window_type='hann',\n sample_rate=None):\n\n if not isinstance(target, audio.Wave):\n target = audio.Wave(target, sample_rate)\n\n signals_to_sum = [\n\n distorted_target(target,\n distortion_factor_target,\n lowpass_cutoff_target,\n num_points,\n window_type,\n sample_rate),\n\n musical_noise(target,\n distortion_factor_noise,\n lowpass_cutoff_noise,\n num_points,\n window_type,\n sample_rate),\n ]\n\n for signal in signals_to_sum:\n signal.loudness = -23\n\n target_sound_quality_anchor = sum(signals_to_sum)[:target.num_frames]\n target_sound_quality_anchor.loudness = target.loudness\n\n return target_sound_quality_anchor", "def prepare_audio(a_name, target=False):\n samprate = 16000 # Sampling Rate\n length = 16 # Amount of blocks for 1 walkthrough\n overlap = 8 # Step between samples in amount of blocks\n fft = 1024 # Length of block (64ms)\n\n # Upload and preparing data sets\n # audio_path = \"raw_data_wav/\"\n # full_a_name = audio_path + a_name\n print('loading %s' % a_name)\n audio, _ = lr.load(a_name, sr=samprate)\n audio = filter_audio(audio) # Removing silence and spaces between words\n data = lr.stft(audio, n_fft=fft).swapaxes(0, 1) # Export spectrogram\n samples = []\n\n for i in range(0, len(data) - length, overlap):\n samples.append(np.abs(data[i:i + length])) # Create training sample\n\n results_shape = (len(samples), 1)\n results = np.ones(results_shape) if target else np.zeros(results_shape)\n\n return np.array(samples), results", "def audiences(self, audiences):\n if audiences is None:\n raise ValueError(\"Invalid value for `audiences`, must not be `None`\") # noqa: E501\n\n self._audiences = audiences", "def set_audience(self, claim=AUDIENCE):\n if api_settings.AUDIENCE is not None:\n self.payload[claim] = api_settings.AUDIENCE", "def overall_quality(target,\n others,\n distortion_factor_target=None,\n distortion_factor_noise=0.99,\n lowpass_cutoff_target=3500,\n lowpass_cutoff_noise=None,\n relative_loudness=0,\n num_points=2048,\n window_type='hann',\n sample_rate=None):\n\n target, accompaniment = utilities.target_accompaniment(target,\n others,\n sample_rate)\n\n signals_to_sum = [\n\n distorted_target(target,\n distortion_factor_target,\n lowpass_cutoff_target,\n num_points,\n window_type,\n sample_rate),\n\n musical_noise(target,\n distortion_factor_noise,\n lowpass_cutoff_noise,\n num_points,\n window_type,\n sample_rate),\n accompaniment\n ]\n\n for signal in signals_to_sum:\n signal.loudness = -23\n\n signals_to_sum[2].loudness = -23 + relative_loudness\n\n overall_quality_anchor = sum(signals_to_sum)[:target.num_frames]\n overall_quality_anchor.loudness = target.loudness\n\n return overall_quality_anchor", "def createTTSGoal(text, lang_id='', wait_before_speaking=rospy.Duration(0.0)):\n sound_goal = SoundGoal()\n sound_goal.text=text\n sound_goal.lang_id=lang_id\n sound_goal.wait_before_speaking=wait_before_speaking\n return sound_goal", "def perform_create(self, serializer):\r\n serializer.save(user_type=\"SPEAKER\")", "def to_adverb(self):\n\n text = self.text\n ending = text[-1]\n if ending == \"e\":\n text = text[0:-1]+\"ly\"\n else:\n text = text+\"ly\"\n\n return self.copy_with(typ=\"AdverbPhrase\",\n text=text)\n\n # return AdverbPhrase(**self.locals(skip=[\"text\", \"typ\", \"variants\"]),\n # text=text,\n # **self.variants)", "def create_alien(settings, screen, aliens):\n alien = Alien(settings, screen)\n aliens.add(alien)", "def ar(\n cls,\n freqoffset=0,\n freqscale=1,\n specifications_array_ref=None,\n ):\n import supriya.synthdefs\n calculation_rate = supriya.CalculationRate.AUDIO\n ugen = cls._new_expanded(\n calculation_rate=calculation_rate,\n freqoffset=freqoffset,\n freqscale=freqscale,\n specifications_array_ref=specifications_array_ref,\n )\n return ugen", "def audience_simple(country):\r\n if country == 'US':\r\n return 'USA'\r\n elif country == 'IN':\r\n return 'India'\r\n else:\r\n return 'Other'", "def create_wan(api_auth, parameters, contexts):\n try:\n WAN_Name = parameters[\"WANType\"] \n\n except KeyError as e:\n error_string = \"Error processing createWAN intent. {0}\".format(e)\n logging.error(error_string)\n return error_string\n\n res = api_auth.wan.create_wan(WAN_Name) #Calls SteelConnectAPI and creates the WAN\n\n if res.status_code == 200:\n speech = \"A WAN called {} was created\".format(WAN_Name)\n elif res.status_code == 400:\n speech = \"Invalid parameters: {}\".format(res.json()[\"error\"][\"message\"])\n elif res.status_code == 500:\n speech = \"Error: Could not create WAN {}\".format(WAN_Name)\n else:\n speech = \"Error: Could not connect to SteelConnect\"\n\n logging.debug(speech)\n\n return speech", "def normalize_audio(audio_path: str, output_path: str, name: str):\n sound = AudioSegment.from_file(audio_path + os.sep + name + '.wav',\n \"wav\")\n change_in_d_bfs = (-20.0) - sound.dBFS\n sound = sound.apply_gain(change_in_d_bfs)\n sound.export(output_path + os.sep + name + '.wav', format=\"wav\")", "async def karaoke(\n client,\n event,\n filter_band : P('float', 'Filter band' , min_value = 0.0, max_value = 48000.0) = 220.0,\n filter_width: P('float', 'Filter width' , min_value = 0.0, max_value = 48000.0) = 100.0,\n level : P('float', 'Effect level' , min_value = 0.0, max_value = 5.0) = 1.0,\n mono_level : P('float', 'Effect mono level', min_value = 0.0, max_value = 5.0) = 1.0,\n):\n player = get_player_or_abort(client, event)\n \n filter = Karaoke(filter_band = filter_band, filter_width = filter_width, level = level, mono_level = mono_level)\n player.add_filter(filter)\n await player.apply_filters()\n \n return create_filter_added_embed(filter)", "def make_audio(audio_path):\n content, sample_rate = librosa.load(audio_path, sr=16000)\n del sample_rate\n if content.dtype in (np.float32, np.float64):\n content = (content * np.iinfo(np.int16).max).astype(np.int16)\n return speech.RecognitionAudio(content=content.tobytes())", "def generateAudio(audiotype: str, audiometadata: dict):\n try:\n audiotype = audiotype.lower()\n\n if audiotype == \"song\":\n file = Song(audiometadata)\n elif audiotype == \"podcast\":\n file = Podcast(audiometadata)\n elif audiotype == \"audiobook\":\n file = Audiobook(audiometadata)\n else:\n return None\n\n return file\n\n except MetadataValueError as error:\n raise MetadataValueError(error)\n\n except MetadataGenerationError as error:\n raise MetadataGenerationError(error)", "def create_audio_file():\n # Get the response from boto3\n raw_audio = generate_audio()\n # pull the Audiostream object from the response from boto3\n raw_audio = raw_audio[\"AudioStream\"]\n # create output location\n # process the whole block\n with closing(raw_audio) as audio:\n with open(\"output_audio.mp3\", \"wb\") as file:\n file.write(raw_audio.read())", "def create_asa(self):\n self.asa_id = blockchain_utils.create_algorand_standard_asset(client=self.client,\n creator_private_key=self.app_creator_pk,\n unit_name=self.asa_unit_name,\n asset_name=self.asa_asset_name,\n total=1,\n decimals=0,\n manager_address=self.app_creator_address,\n reserve_address=self.app_creator_address,\n freeze_address=self.app_creator_address,\n clawback_address=self.app_creator_address,\n default_frozen=True)", "def create_audiobook():\n\n f = open(\"static/files/book.txt\", \"r\", encoding=\"utf-8\")\n summary = f.read()\n print('total chars: ', len(summary))\n all_words = summary.split('.')\n aflr.api_key = \"b6b1434676d14bdfbf9f50ca2157ed5c\"\n VOICE=\"Matthew\"\n current, total_chars, chunk_num, TEXT = 0,0,0,[]\n while current < len(all_words) - 1:\n while total_chars <= 4999:\n TEXT.append(all_words[current])\n total_chars += len(all_words[current]) + 1\n current += 1\n if current == len(all_words):\n break\n \n if current < len(all_words):\n TEXT.pop()\n current -= 1\n total_chars = 0\n\n TEXT = \".\".join(TEXT)\n\n SPEED=80\n script = aflr.Script().create(\n scriptText=TEXT,\n projectName=\"may_the_4th\",\n moduleName=\"evil\",\n scriptName=f\"{chunk_num}_evil_{VOICE}\",\n )\n print(f\"Connect to the dev star: \\n {script} \\n\")\n\n scriptId = script[\"scriptId\"]\n\n response = aflr.Speech().create(\n scriptId=scriptId, voice=VOICE, speed=SPEED, #effect=EFFECT\n )\n # print(f\"Response from dev star: \\n {response} \\n\")\n # mastering current\n response = aflr.Mastering().create(\n scriptId=scriptId, #backgroundTrackId=BACKGROUNDTRACK\n )\n # print(f\"Using the force: \\n {response} \\n\")\n\n url = aflr.Mastering().retrieve(scriptId=scriptId)\n #print(f\"url to download the track: \\n {url} \\n\")\n\n # or download\n file = aflr.Mastering().download(\n scriptId=scriptId, destination=MINI_PATH\n )\n # print(f\"Listen to the results of the force: \\n {file} \\n\")\n\n print(\"finished\",chunk_num)\n\n TEXT = []\n chunk_num += 1\n\n play_audio()", "def to_adverb(self):\n\n if 'AdverbPhrase' in self.variants:\n return self.variants['AdverbPhrase']\n\n return self.copy_with(typ=\"AdverbPhrase\",\n text=\"with \" + self.text)" ]
[ "0.5625991", "0.5403802", "0.5344336", "0.52933955", "0.51975965", "0.5187194", "0.5138217", "0.4931905", "0.49309886", "0.4832704", "0.47631374", "0.46816403", "0.46627328", "0.46468496", "0.46085468", "0.45594066", "0.45591733", "0.4554906", "0.45450443", "0.45251665", "0.4520834", "0.45087266", "0.44788408", "0.4438393", "0.44260287", "0.4423747", "0.4422545", "0.4404253", "0.43928292", "0.43747196" ]
0.73080236
0
Creates an offsite pixel for the given account.
def create_offsite_pixel(self, account_id, name, tag, batch=False): path = 'act_%s/offsitepixels' % account_id args = { 'name': name, 'tag': tag, } return self.make_request(path, 'POST', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_custom_audience_pixel(self, account_id, batch=False):\n path = \"act_%s/adspixels\" % account_id\n return self.make_request(path, 'POST', batch=batch)", "def createNeoPixelObject():\r\n return neopixel.NeoPixel(machine.Pin(PIN), NUM_NEOPIXELS)", "def get_offsite_pixels(self, account_id, batch=False):\n path = 'act_%s/offsitepixels' % account_id\n return self.make_request(path, 'GET', batch=batch)", "def get_offsite_pixel(self, offsite_pixel_id, batch=False):\n path = '%s' % offsite_pixel_id\n return self.make_request(path, 'GET', batch=batch)", "def place_pixel_image(self, id):\n\n\t\t# Log the page view and user id for tracking purposes.\n\t\tself.log_page_view(self.path, id)\n\t\t# Just a fake pixel image. In a real application it would make a call to a tracking service.\n\t\treturn \"<img src=data:image/gif;base64,R0lGODlhAQABAIAAAP///wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw==>\"", "def new_image(x, y, out, data):\n img = Image.new('RGB', (x, y))\n img.putdata(data)\n img.save(out)", "def get_ads_pixels(self, account_id, fields=None, batch=False):\n path = 'act_%s/adspixels' % account_id\n args = {'fields': fields} if fields else {}\n return self.make_request(path, 'GET', args, batch=batch)", "def get_remarketing_pixel(self, account_id, batch=False):\n logger.warn(\"This method is deprecated and is replaced with get_ads_pixels.\")\n path = 'act_%s/remarketingpixelcode' % account_id\n return self.make_request(path, 'GET', batch=batch)", "def pixel(self, x: int, y: int, colour: int, /) -> None:", "def create_point(xa,ya,col):\n disque = canvas.create_oval(xa-(rayon),ya-(rayon),xa+(rayon),ya+(rayon),fill=\"white\",outline=col)\n return disque", "def draw_pixel(x, y, col):\n unicornhathd.set_pixel(x, 12 - y, col[0], col[1], col[2])", "def get_image(self):\n image = Image.new('1', (8, 16))\n draw = ImageDraw.Draw(image)\n for x in xrange(8):\n for y in xrange(16):\n draw.point((x,y),self.get_pixel(x, y))\n return image", "def draw_point(x, y):\n map_image = Image.open('map.png')\n map_image.putpixel((x, y), (0, 255, 0))\n map_image.save('map.png')\n map_image.show('map.png')", "def create_full_pic(self):\n self.create_half_pic()\n mirror_update(self.flag)", "def share(config: Config, ami: str, account: str) -> None:\n\n ec2_client = boto3.client(\"ec2\", region_name=config.get(\"region\", None))\n\n ec2_client.modify_image_attribute(\n ImageId=ami,\n LaunchPermission={\"Add\": [{\"UserId\": account}]},\n OperationType=\"add\",\n UserIds=[account],\n Value=\"string\",\n DryRun=False,\n )", "def generate_xy(number_pixels, center=0.0 * apu.arcsec, pixel_size=1.0 * apu.arcsec):\n x = (np.arange(number_pixels) - number_pixels / 2 + 0.5) * pixel_size + center\n return x", "def _create_layer() -> Image:\n data = np.random.random((32, 16))\n return Image(data)", "def _draw_hex(screen):\n # Initialize one hex and draw it on the screen.\n my_tile = HexTile(350, 350, 50)\n pygame.draw.polygon(screen, COLORS[HexType.Land], my_tile.corners)", "def make_image(self, mode=\"L\") -> Image:\r\n return Image.fromarray(self.fb, mode=\"L\")", "def create_white_picture(pic_width, pic_height):\n white_picture = Image.new(\"1\", (pic_width, pic_height), (1))\n return white_picture", "def add_neopixel(self, number, neo_dict):\n if number > self.numPixels:\n self.numPixels = number + 1\n pixel_number = self.cardNum + '-' + str(number)\n pixel = OPPNeopixel(pixel_number, self)\n neo_dict[pixel_number] = pixel\n return pixel", "def naive_paste(pixSrc, pixPng, src_id, logo_id):\n pixSrc[src_id] = pixPng[logo_id]\n return pixSrc", "def pixel( self, point, color ):\n\t\tif self.limit_pixel( point ):\n\t\t\treturn\n\t\tself.lcd.setCursorAddr(self.x_offset+point[0], self.y_offset+point[1], self.x_offset+point[0], self.y_offset+point[1] )\n\t\tself.lcd.writeToRam()\n\t\tcolorBuf = bytes( [(color >> 8) & 0xFF, color & 0xFF] )\n\t\tself.lcd.writeDatBytes(colorBuf)", "def get_local_image(self, trace_dict):\n coords = None\n trace_id_map = []\n\n counter = 0\n for trace in self.traces:\n if coords is None:\n coords = np.array(trace.coords)\n else:\n coords = np.concatenate((coords, trace.coords), axis=1)\n trace_id_map += [counter for i in range(trace.coords.shape[1])]\n counter += 1\n\n #self.calc_cmy_asc_des()\n\n max_width = np.max(coords[0, :]) - np.min(coords[0, :])\n max_height = np.max(coords[1, :]) - np.min(coords[1, :])\n max_length = max(max_width, max_height)\n coords[0, :] = (coords[0, :] - np.min(coords[0, :])) * 25.0 // (max_length + 1e-12)\n coords[1, :] = (coords[1, :] - np.min(coords[1, :])) * 25.0 // (max_length + 1e-12)\n center_x = (np.max(coords[0, :]) - np.min(coords[0, :])) // 2\n center_y = (np.max(coords[1, :]) - np.min(coords[1, :])) // 2\n coords[0, :] = coords[0, :] - center_x + 16\n coords[1, :] = coords[1, :] - center_y + 16\n coords = coords.astype(np.int)\n prev_id = -1\n image = np.zeros((32, 32), dtype=np.uint8)\n for idx in range(coords.shape[1]):\n if prev_id == trace_id_map[idx]:\n rr, cc = line(prev_x, prev_y, coords[0, idx], coords[1, idx])\n image[cc, rr] = 255\n prev_id = trace_id_map[idx]\n prev_x = coords[0, idx]\n prev_y = coords[1, idx]\n\n return image", "def create_image(path, pxcount):\n img = Image.open(path, 'r').convert('L')\n pixels = img.load()\n for i in range(pxcount):\n x = randint(0, img.size[0]-1)\n y = randint(0, img.size[0]-1)\n if pixels[x, y] == 0:\n pixels[x, y] = 255\n else:\n pixels[x, y] = 0\n return img", "def create_account():\n account = w3.eth.account.create()\n return account", "def set_pixel(self, x, y, value):\n if x < 0 or x > 7 or y < 0 or y > 7:\n # Ignore out of bounds pixels.\n return\n\n self.set_led(y * 16 + ((x + 7) % 8), value)", "def set_pixel(self, x, y, value):\n if x < 0 or x > 7 or y < 0 or y > 15:\n # Ignore out of bounds pixels.\n return\n if y < 8:\n self.set_led( y * 16 + x, value)\n else:\n self.set_led((y-8) * 16 + (x+8), value)", "def setPixel(self, px, py, color):\n if not self.inBounds(px,py):\n return\n idx = py*self.w + px\n self.data[idx] = color", "def set_pixel(image, pt, color):\n\timage[pt[0], pt[1]] = color" ]
[ "0.6237579", "0.55753154", "0.54230386", "0.53433794", "0.5102814", "0.5056824", "0.50096446", "0.49614307", "0.49268848", "0.479304", "0.47540647", "0.4753462", "0.46652997", "0.46337742", "0.4580019", "0.45632777", "0.45229763", "0.4512287", "0.45116362", "0.45088103", "0.45051742", "0.44848734", "0.44760683", "0.44321427", "0.44272697", "0.43905658", "0.43861473", "0.43595457", "0.43579155", "0.43533412" ]
0.7383441
0
Returns facebook connection objects for given account
def get_connection_objects(self, account_id, business_id=None, batch=False): path = 'act_{}/connectionobjects'.format(account_id) args = {} if business_id: args['business_id'] = business_id return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_accounts():\n graph = facebook.GraphAPI(mytoken)\n pages = graph.get_object('me/accounts')\n pages_info=[]\n for page in pages['data']:\n pages_info.append( ( page['name'], page['access_token'] ) )\n return pages_info", "def social_connection(request):\n backend_map = {'facebook': {'name': 'facebook', 'connected': False,\n 'dc_url': reverse('social:disconnect', kwargs={'backend': 'facebook'})},\n 'google': {'name': 'google-oauth2', 'connected': False,\n 'dc_url': reverse('social:disconnect', kwargs={'backend': 'google-oauth2'})}\n }\n accounts = UserSocialAuth.objects.filter(user=request.user)\n\n for account in accounts:\n for k, v in backend_map.iteritems():\n if v['name'] == account.provider:\n backend_map[k]['connected'] = True\n\n return render_to_response('base/social_account.html',\n context_instance=RequestContext(request, {'accounts': backend_map}))", "def getConnectedAccounts(**kwargs):\n strProdURL = kwargs[\"strProdURL\"]\n orgID = kwargs[\"ORG_ID\"]\n sessiontoken = kwargs[\"sessiontoken\"]\n\n accounts = get_connected_accounts_json(strProdURL, orgID, sessiontoken)\n orgtable = PrettyTable(['OrgID'])\n orgtable.add_row([orgID])\n print(str(orgtable))\n table = PrettyTable(['Account Number','id'])\n for i in accounts:\n table.add_row([i['account_number'],i['id']])\n \n print(\"Connected Accounts\")\n print(table)", "def get_accounts(self):\n me = objects.AdUser(fbid=\"me\")\n my_accounts = list(me.get_ad_accounts(fields=[\n 'id',\n 'name',\n 'timezone_name',\n 'amount_spent',\n 'currency']))\n return my_accounts", "def _get_linkedin_accounts(self, linkedin_access_token):\n response = requests.get(\n 'https://api.linkedin.com/v2/me?projection='\n + '(id,localizedLastName,localizedFirstName,'\n + 'profilePicture(displayImage~:playableStreams))',\n headers={\n 'Authorization': 'Bearer ' + linkedin_access_token,\n 'cache-control': 'no-cache',\n 'X-Restli-Protocol-Version': '2.0.0'\n }\n ).json()\n\n if ('id' in response and 'localizedLastName' in response\n and 'localizedFirstName' in response):\n linkedin_account_id = 'urn:li:person:' + response['id']\n\n try:\n image_url = response['profilePicture']['displayImage~']['elements'][0]['identifiers'][0]['identifier']\n linkedin_profile_image = base64.b64encode(requests.get(image_url).content)\n except Exception:\n linkedin_profile_image = ''\n\n # TODO - STD: add each companies page\n return [{\n 'name': response['localizedLastName'] + ' ' + response['localizedFirstName'],\n 'linkedin_account_id': linkedin_account_id,\n 'linkedin_access_token': linkedin_access_token,\n 'image': linkedin_profile_image\n }]\n\n return []", "def user_connections(self):\r\n return users.UserConnections(self)", "def list(self, request, *args, **kwargs):\n queryset = BankConnections.objects.filter(user=self.request.user)\n \n response = [{\n \"connected\": True if connection.isTokenValid else False,\n \"bank\": connection.bank_branch.bank.id\n } for connection in queryset]\n\n return Response(response)", "async def get_all(self) -> typing.List[Connection]:\n return [Connection.from_dict(conn) for conn in await self.query(CONNECTION_URL)]", "def get_connections(network, user):\n if not user in network:\n return None\n if not 'connections' in network[user]:\n return []\n return network[user]['connections']", "def fetch_account_streamers(account:str):\n for config in accounts:\n if account in config['streamers']:\n return config['streamers']\n return", "def accounts(self):\n return SleipnirMapper(self, self.ACCOUNTS_API, ignore=[\"me\"])", "def get_all(self) -> typing.List[Connection]:\n return [Connection.from_dict(conn) for conn in self.query(CONNECTION_URL)]", "def accounts_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account/all', param, self.timeout)", "def get_account(self, account):\n \n pass", "async def get_follow_counts(db, account: str):\n account_id = await _get_account_id(db, account)\n sql = \"\"\"SELECT following, followers\n FROM hive_accounts\n WHERE id = :account_id\"\"\"\n return dict(await db.query_row(sql, account_id=account_id))", "def GetAccount(host):\n return FetchUrlJson(host, 'accounts/self')", "def list_accounts(self):\n information = []\n for provider in self._accounts.values():\n information.append({\n 'token': provider.credentials.token,\n 'url': provider.credentials.url,\n })\n\n return information", "def accounts(self):\r\n return accounts.Accounts(self)", "def get_connections_accountname(self):\n account_info = self.get_account()\n return getattr(account_info, 'account_name', None)", "def get_accounts(self):\n return self.accounts.all()", "def fetch_account_catalogs(account:str):\n for config in accounts:\n if account in config['streamers']:\n return config['catalogs']\n return", "def accounts(web3):\n return web3.eth.accounts", "def connections(self, account_id):\n from pureport_client.commands.accounts.connections import Command\n return Command(self.client, account_id)", "def get_account():\n\n bus = session_bus()\n\n goa_manager = bus.get_object(GOA_NAME, GOA_PATH)\n\n goa_objects = goa_manager.GetManagedObjects(dbus_interface=OBJECT_MANAGER)\n\n accounts = [\n obj for obj in goa_objects\n if obj != GOA_MANAGER_PATH\n ]\n\n if len(accounts) > 1:\n sys.exit(\"More than one account found.\")\n\n (account_path,) = accounts\n\n return bus.get_object(GOA_NAME, account_path)", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def get_wim_accounts(self, **kwargs):\n kwargs.setdefault('postprocess', _postprocess_wim_account)\n kwargs.setdefault('WHERE', {\"sdn\": \"false\"})\n return self.query(FROM=_WIM_ACCOUNT_JOIN, **kwargs)", "def list_accounts(self):\n pass", "def fbconnect():\n\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n access_token = request.data\n access_token = access_token.decode(\"utf-8\")\n # Exchange client token for long-lived server-side token with GET \n # /oauth/access_toke?grant_type=fb_exchange_token&client_id={app-id}&\n # client_secret={app-secret}&fb_exchange_token={short-lived-token}\n app_id = json.loads(open('fb_client_secrets.json','r').read())['web']['app_id']\n app_secret = json.loads(open('fb_client_secrets.json','r').read())['web']['app_secret']\n url = \"https://graph.facebook.com/oauth/access_token?grant_type=\"+\\\n \"fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s\"\\\n % (app_id, app_secret, access_token)\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n result_string = result.decode(\"utf-8\")\n # Use token to get user info from API\n userinfo_url = \"https://graph.facebook.com/v3.2/me\"\n # Strip expire tag from access token\n token = result_string.split(',')[0].split(':')[1].replace('\"', '')\n url = \"https://graph.facebook.com/v3.2/\"+\\\n \"me?access_token=%s&fields=name,id,email\" % token\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n result_string = result.decode(\"utf-8\")\n data = json.loads(result_string)\n login_session['provider'] = 'facebook'\n login_session['username'] = data[\"name\"]\n login_session['email'] = data[\"email\"]\n login_session['facebook_id'] = data[\"id\"]\n # Get user picture\n url = \"https://graph.facebook.com/v3.3/\"+\\\n \"me?access_token=%s&fields=picture\" % token\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n data = json.loads(result)\n login_session['picture'] = data[\"picture\"][\"data\"][\"url\"]\n # See if user exists\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n output = ''\n output += '<div class=\"container text-center\">'+\\\n '<div class=\"row justify-content-md-center\">'+\\\n '<div class=\"col-md-8 border p-1 m-1\">'+\\\n '<pclass=\"m-1\">Welcome, '\n output += login_session['username']\n output += '!</p>'\n output += '<div class=\"d-flex justify-content-center m-1\">'+\\\n '<img class=\"rounded mx-auto d-block\" width=\"30%\" src=\"'\n output += login_session['picture']\n output += '\"></div></div></div></div>'\n return output", "def get_user_friends(acct, KEY, SECRET): # this isn't true - evaluate what needs to be returned tomorrow.\n\n new_gr_session = OAuth1Session(\n consumer_key=KEY,\n consumer_secret=SECRET,\n access_token=acct.access_token,\n access_token_secret=acct.access_token_secret\n )\n\n user_id = str(acct.user.gr_id)\n current_page = 1\n\n total, friends = get_friends_page(new_gr_session, user_id, current_page)\n\n # check for no friends first\n if len(friends) == 0:\n flash(\"No Goodreads friends found.\")\n print \"No friends!\"\n\n # friends requests return a list of 30 at a time\n # get total number of pages required.\n total_pages = int(math.ceil(total / float(30)))\n # creates new users and adds friendship relationships to db\n add_user_friendships(friends, acct)\n\n # check for more than 30 friends\n if total_pages > 1:\n\n current_page = 2\n while current_page <= total_pages:\n\n print \"******YOU HAVE MORE FRIENDS*******\"\n\n # wait 1 second between calls, per GR policy\n time.sleep(1.00)\n\n # create new query with updated current_page\n total, friends = get_friends_page(new_gr_session, user_id, current_page)\n add_user_friendships(friends, acct)\n current_page += 1\n\n return None", "def get_accounts(self):\n\n\t\treturn self.__accounts" ]
[ "0.62899584", "0.59851533", "0.57604873", "0.5618666", "0.5599102", "0.5426997", "0.5426842", "0.5399525", "0.53759795", "0.5356126", "0.5348449", "0.5337424", "0.5334071", "0.52884686", "0.5246142", "0.5237798", "0.5228069", "0.5206267", "0.5201796", "0.51963705", "0.5180999", "0.51741534", "0.5170348", "0.51570153", "0.51305497", "0.5110707", "0.5102732", "0.5099931", "0.5067723", "0.50668114" ]
0.6598974
0
Get broad targeting categories for the given account
def get_broad_targeting_categories(self, account_id, user_adclusters=None, excluded_user_adclusters=None, batch=False): path = 'act_{}/broadtargetingcategories'.format(account_id) args = {} if user_adclusters: args['user_adclusters'] = user_adclusters if excluded_user_adclusters: args['excluded_user_adclusters'] = excluded_user_adclusters return self.make_request(path, 'GET', args, batch=batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_categories(self, blogid=1):\n return self.execute('metaWeblog.getCategories', blogid, self.username, self.password)", "def get_categories(self):\n _url = urljoin(self.base_url, self.API_CATEGORIES)\n return requests.get(_url)", "def get_categories(teach_id):\n query = \"SELECT category_id FROM teacher_categories WHERE teacher_account_id = %s;\"\n args = (teach_id,)\n return database.connection.get_data(query, args)", "def get_trending_categories(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Trending/Categories/\"))", "def categories(self):\n cur = self.con.execute('select category from cc');\n return [d[0] for d in cur]", "def get_categories(self) -> list:\n headers_dict = {\n 'user-key': self.user_key.key\n }\n\n endpoint = f'{const.API_HOST}{const.API_SNAPSHOTS_TAXONOMY_BASEPATH}'\n\n response = req.api_send_request(method='GET', endpoint_url=endpoint, headers=headers_dict)\n\n if response.status_code == 200:\n return [entry['attributes']['name'] for entry in response.json()['data']]\n\n raise RuntimeError('API Request returned an unexpected HTTP status')", "def categories(self):\n pass", "def get_blog_categories(parser, token):\n\ttry:\n\t\ttag_name, arg = token.contents.split(None, 1)\n\texcept ValueError:\n\t\traise template.TemplateSyntaxError, \"%s tag requires arguments\" % token.contents.split()[0]\n\tm = re.search(r'as (\\w+)', arg)\n\tif not m:\n\t\traise template.TemplateSyntaxError, \"%s tag had invalid arguments\" % tag_name\n\tvar_name = m.groups()[0]\n\treturn BlogCategories(var_name)", "def getCategory():", "def GetAllCategoryOfCost():\n\n logs.logger.debug(\n \"Start to get back all categories of Cost objects from database.\")\n try:\n searchedCostsItems = session.query(Cost.Cost).all()\n logs.logger.info(\n \"Get back all categories of Cost objects from database.\")\n return [CostItems.category for CostItems in searchedCostsItems]\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def SearchForCategories(self, searchterm, domain='US'):\n # Check if valid domain\n if domain not in dcodes:\n raise Exception('Invalid domain code')\n\n payload = {'key': self.accesskey,\n 'domain': dcodes.index(domain),\n 'type': 'category',\n 'term': searchterm}\n\n r = requests.get('https://api.keepa.com/search/?', params=payload)\n response = r.json()\n\n if response['categories'] == {}:\n log.info('Categories search results not yet available or no ' +\n 'search terms found.')\n else:\n return response['categories']", "def categories(self):\n return self.env.categories", "def get_category_for_account(account_name):\n value = _reverse.get(account_name, _default_category)\n\n if value == _default_category:\n print 'Returning: %s -> %s' % (account_name, _default_category)\n\n return value", "def get_categories(self,pipeline, object_name):\n result = self.get_object_categories(pipeline, object_name,\n {self.object_name.value: [] })\n return result", "def get_categories(self) -> tuple:\n return self.categories", "def get_categories(self):\r\n return self.ancestors.filter(category=True)", "def _get_categories(self, *args):\n raise NotImplementedError(self, \"_get_categories\")", "def get_categories():\n categories = []\n r = requests.get(\"http://api.meetup.com/2/categories\", params=params)\n try:\n json = r.json()\n if 'code' in json:\n if \"key\" in params:\n print(\" Client throttled, \" +\n \"use another key or try it again later\")\n else:\n print(\"MeetUp key required. Add it by calling add_key() \" +\n \"function\")\n sys.exit(0)\n categories.extend(json['results'])\n return categories\n except Exception:\n print(\" Reading error, trying again\")\n return get_categories()", "def search_categories(self):\n with Transaction().start(DBNAME, 1):\n categorieslist = self.Category.search(['parent', '=', 'Ingredients'])\n return tuple(i.name for i in categorieslist)", "def get_categories(self):\n if self.is_child:\n return self.parent.categories.browsable()\n else:\n return self.categories.browsable()", "def get_categories(self, project):\n serializer = CategorySerializer(\n project.categories.all().exclude(fields=None), many=True)\n return serializer.data", "def get_cats(bfo):\n primary_report_numbers = bfo.fields('037__')\n additional_report_numbers = bfo.fields('088__')\n report_numbers = primary_report_numbers\n report_numbers.extend(additional_report_numbers)\n\n cat = [num.get('c','') for num in report_numbers if num.get('9') == 'arXiv' or num.get('s')=='arXiv']\n\n return cat", "def get_target_data_categories(self) -> List[str]:\n return [target.data_category for target in self.targets]", "def request_categories(self, request_url):\n\n resp = requests.get(request_url)\n\n return resp.json()", "def get_descendant_categories(self, include_self: bool=True) -> Type[QuerySet]:\n return self.get_descendants(include_self=include_self)", "def categories(self):\n\t\treturn self._categories", "def categories(self):\n game_categories = self.game_categories.all()\n return [ gc.category for gc in game_categories ]", "def Categories():\n cat = {\n \t \"Featured\": 0,\n \t \"All\": 1,\n \t \"Collectibles\": 2,\n \t \"Clothing\": 3,\n \t \"BodyParts\": 4,\n \t \"Gear\": 5,\n \t \"Models\": 6,\n \t \"Plugins\": 7,\n\t \"Decals\": 8,\n \t \"Audio\": 9,\n \t \"Meshes\": 10,\n\t \"Accessories\": 11,\n\t \"AvatarAnimations\": 12,\n\t \"CommunityCreations\": 13,\n\t \"Video\": 14,\n\t \"Recommended\": 15\n }\n return cat", "def get_categories():\n try:\n result = {\n \"success\": True,\n \"categories\": get_all_categories()\n }\n return jsonify(result)\n\n except Exception as exp:\n abort(exp.code)", "def get_categories():\n item_type = \"categories\"\n info_dict = spotify.categories()\n items = info_dict[item_type][\"items\"]\n categories = []\n for i in range(len(items)):\n category_name = items[i][\"name\"]\n category_id = items[i][\"id\"]\n categories.append({\"Category Name\": category_name,\n \"Category ID\": category_id\n })\n return categories" ]
[ "0.6130099", "0.59468395", "0.5759809", "0.5756579", "0.57555073", "0.5742155", "0.55958956", "0.5588522", "0.5574741", "0.55350673", "0.5518989", "0.5507046", "0.5492137", "0.5491411", "0.54852617", "0.54808754", "0.54740864", "0.544625", "0.5427394", "0.5423977", "0.5418358", "0.53976417", "0.53801656", "0.53581625", "0.53449136", "0.5344758", "0.53443307", "0.5342639", "0.5321779", "0.52972233" ]
0.7665715
0
Sorts a list of indexrings. Takes a list of unsorted index rings and sorts them into an "exterior" and "interior" components. Any doublynested rings are considered exterior rings.
def sort_rings(index_rings, vertices): # sort index_rings into corresponding "polygons" areas = list() for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = dict() _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = list() for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = list() for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort_rings(ring_list, om_pickle_file):\n basic_output_on.dprint(\"\\nSorting closed rings...\",'nr')\n bdry_ring = max(ring_list, key=lambda rg: rg.maxR)\n outside_point = bdry_ring.center + 2*bdry_ring.maxR # is outside all rings\n\n sorted_closed_ring_indices = ['core']\n sorted_closed_ring_indices += \\\n sorted([rl_ind for rl_ind, r in enumerate(ring_list) if r.isClosed()],\n key=lambda idx: ring_list[idx].maxR)\n\n closed_pairs = [ClosedPair(ring_list,\n outside_point,\n sorted_closed_ring_indices[k-1],\n sorted_closed_ring_indices[k])\n for k in range(1, len(sorted_closed_ring_indices))]\n\n # Find the lines to the boundary and the path given\n if not use_alternative_sorting_method:\n center = ring_list[0].center\n d = 1.5 * bdry_ring.maxR\n pts = [center - d + d*1j, center - d - d*1j,\n center + d - d*1j, center + d + d*1j]\n rectangle_containing_bdry = \\\n Path(*[Line(pts[i], pts[(i+1) % 4]) for i in range(4)])\n for r in ring_list:\n if not r.isClosed():\n r.findLines2Bdry(rectangle_containing_bdry)\n\n # figure out which open (incomplete) rings live between which closed rings\n basic_output_on.dprint(\n \"Done, closed rings sorted.\\nNow determining which open rings \"\n \"lie between which closed pairs of rings...\", 'nr'\n )\n start_time = current_time()\n unlocated_open_ring_indices = \\\n set(i for i, r in enumerate(ring_list) if not r.isClosed())\n\n for cp in closed_pairs:\n cp.contents = [r_idx for r_idx in unlocated_open_ring_indices\n if cp.contains(r_idx)]\n unlocated_open_ring_indices -= set(cp.contents)\n\n # there should not be any unlocated open ring indices\n # in case there are, this is likely caused by intersections\n if unlocated_open_ring_indices:\n debug_unlocated_rings_and_raise_error(\n unlocated_open_ring_indices, ring_list, closed_pairs)\n\n basic_output_on.dprint(\n \"\\rFinished locating open rings. Total time elapsed: %s\"\n \"\" % format_time(current_time()-start_time))\n\n# ###DEBUG ONLY TEST slideshow (of which rings are put in which closed ring pairs)\n# basic_output_on.dprint(\"creating slideshow of which rings are located between which closed ring pairs...\",'nr')\n# from os import path as os_path\n# from options4rings import output_directory\n# from andysSVGpathTools import svgSlideShow\n# save_dir = os_path.join(output_directory,'debug','slideshow_closed_pair_inclusions')\n# pathcolortuplelist = []\n# paths = [ring.path for ring in ring_list]\n# for cp in closed_pairs:\n# colors = ['yellow']*len(paths)\n# if cp.inner_index !='core':\n# colors[cp.inner_index] = 'red'\n# colors[cp.outer_index] = 'green'\n# for i in cp.contents:\n# colors[i] = 'blue'\n# pathcolortuplelist.append((paths,colors))\n# svgSlideShow(pathcolortuplelist,save_directory=save_dir,clear_directory=True,suppressOutput=not basic_output_on.b)\n# ###End of DEBUG ONLY TEST slideshow (of which rings are put in which closed ring pairs)\n\n # sort the open rings inside each pair of closed rings\n start_time = current_time()\n \n ordering_matrices_pickle_extant = False\n if look_for_user_sort_input:\n try:\n ordering_matrices = pickle.load(open(om_pickle_file, \"rb\"))\n ordering_matrices_pickle_extant = True\n except:\n from warnings import warn\n warn(\"No ordering matrices pickle file found.\");sleep(1)\n\n class RingIndexCmp:\n def __init__(self, outer_closed_path):\n self.boundary = outer_closed_path\n\n if opt.use_alternative_sorting_method:\n def __call__(self, idx1, idx2):\n return ring1_isoutside_ring2_cmp_alt(\n ring_list, idx1, idx2, boundary=self.boundary\n )\n else:\n def __call__(self, idx1, idx2):\n return ring1_isoutside_ring2_cmp(\n ring_list[idx1], ring_list[idx2], outside_point,\n self.boundary\n )\n\n basic_output_on.dprint(\"Sorting open rings inside each cp...\")\n start_time_cp_sorting = current_time()\n et = 0\n cp_oms = []\n flag_count = 0\n num_seg_pairs2check = sum([sum([len(ring_list[i].path)*(len(ring_list[j].path)-1)/2 for (i,j) in combinations(cp.contents,2)]) for cp in closed_pairs])\n num_seg_pairs_checked = 0\n cyclic_dependencies = []\n for k, cp in enumerate(closed_pairs):\n if not len(cp.contents):\n if not ordering_matrices_pickle_extant:\n cp_oms.append([])\n continue\n if ordering_matrices_pickle_extant:\n om = ordering_matrices[k]\n #THIS BLOCK IS REPLACED BELOW (DELETE BLOCK)...\n# for i in len(om):\n# for j in len(om):\n# if isnan(om[i,j]):\n# om[i,j] = ask_user_to_sort(i,j,ring_list,make_svg=True,ask_later=False)\n# om[j,i] = -om[i,j] #...THIS BLOCK IS REPLACED BELOW (DELETE BLOCK)\n tmp_time = current_time()\n for i,j in transpose(where(isnan(om))):\n if i<j:\n om[i,j] = ask_user_to_sort(cp.contents[i], \n cp.contents[j],\n ring_list,make_svg=True, \n ask_later=False)\n om[j,i] = -om[i,j]\n start_time_cp_sorting -= current_time() - tmp_time \n else:\n om = createOrderingMatrix(cp.contents, RingIndexCmp(cp.outer.path))\n cp_oms.append(om)\n try:\n assert not any(flattenList(isnan(om)))\n except AssertionError:\n flag_count += 1\n pass\n num_seg_pairs_checked += sum(\n len(ring_list[i].path) * (len(ring_list[j].path) - 1) / 2\n for i, j in combinations(cp.contents, 2)\n )\n\n try: # lazy fix for test cases where num_seg_pairs2check==0\n percent_complete = num_seg_pairs_checked/num_seg_pairs2check\n except ZeroDivisionError:\n percent_complete = k/len(closed_pairs)\n pass\n\n if not flag_count:\n psorting, cp_cyclic_dependencies = \\\n topo_sorted(cp.contents, RingIndexCmp(cp.outer.path), ordering_matrix=om)\n if cp_cyclic_dependencies:\n cyclic_dependencies.append(cp_cyclic_dependencies)\n\n cp.contents = [cp.contents[index] for index in flattenList(psorting)]\n cp.contents_psorting = psorting\n et_tmp = current_time() - start_time_cp_sorting\n \n if et_tmp > et + 3:\n et = et_tmp\n etr = (1-percent_complete)*et/percent_complete\n basic_output_on.dprint(\"%s percent complete. Time Elapsed = %s | ETR = %s\"%(int(percent_complete*100),format_time(et),format_time(etr)))\n\n if cyclic_dependencies:\n deps_string = '\\n'.join(map(str, cyclic_dependencies))\n message = f\"The following cyclic dependencies were found:\\n{deps_string}\"\n message += \"\\nPlease see the following debug files for visuals:\\n\"\n for i, cp_cyclic_dependencies in enumerate(cyclic_dependencies):\n for k, v in cp_cyclic_dependencies.items():\n paths, path_colors = [], ''\n paths.append(ring_list[k].path)\n paths.extend([ring_list[vk].path for vk in v])\n path_colors += 'r' + 'b' * len(v)\n fp = os.path.join(opt.output_directory_debug,\n f'cyclic_dependency_{i}_{k}.svg')\n disvg(paths, path_colors, filename=fp)\n message += f'{fp}\\n'\n raise ValueError(message)\n\n #Output problem cases for manual sorting\n from options4rings import output_directory\n from os import path as os_path\n from andysmod import output2file\n manual_sort_csvfile = os_path.join(output_directory,\"interactive_sorting\",ring_list[0].svgname,\"manual_comparisons.csv\")\n str_out = ''\n if flag_count:\n pickle.dump(cp_oms, open(om_pickle_file, \"wb\"))\n output2file(str_out,filename=manual_sort_csvfile,mode='w')\n for k,om in enumerate(cp_oms):\n cp = closed_pairs[k]\n problem_pairs = [(cp.contents[i],cp.contents[j]) for i,j in transpose(where(isnan(om))) if i<j]\n problem_pairs = sorted(problem_pairs,key=itemgetter(0))\n for (idx_i,idx_j) in problem_pairs:\n str_out+='%s,%s,\\n'%(idx_i,idx_j)\n output2file(str_out,filename=manual_sort_csvfile,mode='a')\n\n raise Exception(\"There are %s rings pairs that need to be manually sorted. Please set 'look_for_user_sort_input=True' and run this svg again. Note: When you run again, there will be an interactive interface to help you sort, but it may be easier to manually enter the needed comparisons in\\n%s\"%(flag_count,manual_sort_csvfile))\n basic_output_on.dprint(\"Done with inner ring sorting (in %s). Finished with %s error flags.\"%(format_time(current_time()-start_time),flag_count))\n\n # Note: sort_lvl info records the number of other rings in the same \n # sort level, so in the future I can output psort_index values as 3.0, 3.1, etc\n ring_sorting = [cp.contents+[cp.outer_index] for cp in closed_pairs]\n ring_sorting = flattenList(ring_sorting)\n sort_lvl_info = []\n# for cp in closed_pairs:\n# for sort_lvl in cp.contents_psorting:\n# sort_lvl_info += [len(sort_lvl)]*len(sort_lvl)\n# sort_lvl_info += [1] # for outer ring in cp\n return ring_sorting, sort_lvl_info", "def sort_indices(self, list_of_indexes):\n return sorted(list_of_indexes, key=lambda index: tuple(int(i) for i in index.split(\".\")))", "def _sorting(self, notsorted_list, predecessors):\n remaining_nodes = []\n sorted_part = []\n for nd in notsorted_list:\n if not predecessors[nd.name]:\n sorted_part.append(nd)\n else:\n remaining_nodes.append(nd)\n return sorted_part, remaining_nodes", "def indexSortInterfaces(self, index):\n\n self.cell_1 = self.cell_1[index]\n self.cell_2 = self.cell_2[index]\n self.rep_1 = self.rep_1[index]\n self.rep_2 = self.rep_2[index]\n\n self.eps_11 = self.eps_11[index]\n self.eps_22 = self.eps_22[index]\n self.eps_12 = self.eps_12[index]\n self.eps_mas = self.eps_mas[index]\n\n self.atoms = self.atoms[index]\n self.ang = self.ang[index]\n self.e_int_c = self.e_int_c[index]\n self.w_sep_c = self.w_sep_c[index]\n self.w_seps_c = self.w_seps_c[index]\n\n self.e_int_d = self.e_int_d[index]\n self.w_sep_d = self.w_sep_d[index]\n self.w_seps_d = self.w_seps_d[index]\n \n self.order = self.order[index]", "def sortAssemsByRing(self):\n sortKey = lambda a: a.spatialLocator.getRingPos()\n self._children = sorted(self._children, key=sortKey)", "def sort_L3():\n for item in d_list:\n item.sort(key=operator.itemgetter(1))", "def sort_list_of_lists(L, index, rvrs=True):\n return sorted(L, key=operator.itemgetter(index), reverse=rvrs)", "def __SortLists(self): \n\n \n AS=argsort(self.__NumList)\n\n self.__IndList=[self.__IndList[i] for i in AS]#list(self.__IndList[AS])\n self.__ObjList=[self.__ObjList[i] for i in AS]#list(self.__IndList[AS])\n self.__NumList=[self.__NumList[i] for i in AS]", "def sortStingListByNumber(inlist, n):\n indices = []\n outlist = []\n nums = []\n number = re.compile(ur\"\\d+\")\n # print \"inlist: \", inlist\n for e in inlist:\n cur_nums = re.findall(number, e)\n cur_nums_list = []\n for s in cur_nums:\n cur_nums_list.append(int(s))\n num = int(re.findall(number, e)[n])\n if indices:\n if indices[0] > num:\n indices = [num] + indices\n outlist = [e] + outlist\n nums = [cur_nums_list] + nums\n elif indices[-1] < num:\n indices.append(num)\n outlist = outlist + [e]\n nums = nums + [cur_nums_list]\n for i in range(1, len(indices)):\n if indices[i-1] < num and indices[i] > num:\n indices = indices[:i] + [num] + indices[i:]\n outlist = outlist[:i] + [e] + outlist[i:]\n nums = nums[:i] + [cur_nums_list] + nums[i:]\n else:\n indices.append(num)\n outlist.append(e)\n nums.append(cur_nums_list)\n\n return outlist, nums", "def sort_multi_lists(labels):\n unilabels = uniform_list_length(labels)\n intlist = [[i] * 3 for i in range(len(unilabels))]\n # sort_func = itemgetter(*range(len(unilabels[0])))\n sort_func = lambda item: (item[0][0], item[0][1], item[0][2])\n sort_idx = [ii[0] for (i, ii) in sorted(zip(unilabels, intlist), key=sort_func)]\n sort_labels = [unilabels[i] for i in sort_idx]\n return undo_uniform_list_length(sort_labels), sort_idx", "def merge_sort(list):\n\n\tif len(list) <= 1:\n\t\treturn list\n\n\tleft_half, right_half = split(list)\n\tleft = merge_sort(left_half)\n\tright = merge_sort(right_half)\n\n\treturn merge(left, right)", "def unsort(sorted_list, oidx):\r\n assert len(sorted_list) == len(oidx), \"Number of list elements must match with original indices.\"\r\n _, unsorted = [list(t) for t in zip(*sorted(zip(oidx, sorted_list)))]\r\n return unsorted", "def sort_cutpoly_by_angle(self, polydata, cells_list, num_cells, numPoints):\n # 1: get numpy array of points from vtk polydata object\n points = polydata.GetPoints()\n pts = np.zeros((numPoints, 3), dtype=float)\n\n index = 0\n for cell in cells_list:\n for id in cell:\n pts[index] = np.asarray(points.GetPoint(id))\n index += 1\n\n # 2: convert them to 2d points and obtain the R rotation matrix\n pts_2d = project_onto_xy_plane(pts)\n\n # 3: compute center (average of all points)\n center_pt = np.mean(pts_2d, axis=0)\n\n # 4: find top points by pointdata label\n\n # 4: compute angles from center to average cell pts:nt vertical_dir = vert_pt_2d - center_pt # get vertical direction vector (from center pt to tp1)\n signed_angles = np.zeros((numPoints,), dtype=float)\n\n for i in range(numPoints):\n current_vec = pts_2d[i] - center_pt\n signed_angles[i] = compute_angle_between(vertical_dir, current_vec)\n self.pers_var = 1\n if self.pers_var: # ctrl-w for exit window key\n plt.figure()\n plt.scatter(center_pt[0], center_pt[1], color='r', s=2)\n plt.scatter(pts_2d[:,0], pts_2d[:,1], color='b', s=0.5)\n plt.scatter(vert_pt_2d[0], vert_pt_2d[1], color='g', s=10)\n plt.scatter(pts_2d[i][0], pts_2d[i][1], color='k', s=7)\n plt.xlabel('angle = ' + str(signed_angles[i]))\n figManager = plt.get_current_fig_manager()\n figManager.window.showMaximized()\n plt.show()\n\n # 5: sort angles (no matter ascending or descending)\n sorted_idxs = np.argsort(signed_angles)\n\n # 6: sorted points\n sorted_pts = pts[sorted_idxs]\n\n return sorted_pts, sorted_idxs", "def contour_sort(l):\n length = len(l)\n if length <= 1:\n return l\n else:\n pivot = l.pop(int(length / 2))\n less, more = [], []\n for x in l:\n if cv2.contourArea(x) >= cv2.contourArea(pivot):\n less.append(x)\n else:\n more.append(x)\n return contour_sort(less) + [pivot] + contour_sort(more)", "def sort(points):\n if len(points) == 0:\n return []\n \n starting_vertex = min(points)\n reference_point = starting_vertex + Point2D(0, 1)\n \n return sorted(points, key=partial(\n get_angle_and_distance, point_2=starting_vertex, point_3=reference_point\n ))", "def indexsort(d, reverse=False):\n \n return [ i for (i,j) in sorted(enumerate(d), \\\n key=operator.itemgetter(1), reverse = reverse)]", "def _sort_index(self):\n\n allAltPos = np.array(sorted(list(set(list(self.data['altitude'])))))[::-1]\n allAziPos = np.array(sorted(list(set(list(self.data['azimuth'])))))\n\n indON = [[None for azi in allAziPos] for alt in allAltPos]; indOFF = [[None for azi in allAziPos] for alt in allAltPos]\n\n for i, traceItem in enumerate(self.data):\n alt = traceItem['altitude'];azi = traceItem['azimuth'];sign = traceItem['sign']\n for j, altPos in enumerate(allAltPos):\n for k, aziPos in enumerate(allAziPos):\n if alt==altPos and azi==aziPos:\n if sign==1:\n if indON[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign: 1!')\n else: indON[j][k]=i\n\n if sign==-1:\n if indOFF[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign:-1!')\n else: indOFF[j][k]=i\n\n indON = np.array([np.array(x) for x in indON]); indOFF = np.array([np.array(x) for x in indOFF])\n\n return indON,indOFF,allAltPos,allAziPos", "def unsort(sorted_list, oidx):\n assert len(sorted_list) == len(oidx), \"Number of list elements must match with original indices.\"\n _, unsorted = [list(t) for t in zip(*sorted(zip(oidx, sorted_list)))]\n return unsorted", "def mergesort(lst, inversions):\n\t# inversions contains inverted list elements, once for each inversion\n\tif len(lst) == 1:\n\t\treturn lst\n\tcut_idx = (len(lst) + 1) / 2\n\tleft = lst[:cut_idx]\n\tright = lst[cut_idx:]\n\tleft = mergesort(left, inversions)\n\tright = mergesort(right, inversions)\n\treturn merge(left, right, inversions)", "def selection_sort(l):\n # Raise value\n if not isinstance(l, list):\n raise TypeError(\"Not a list\")\n \n # Initialize variables to count\n r = c = w = 0\n\n for i in range(len(l)):\n # Assign the smallest to the first item of the unsorted segment\n index_temp_min_value = i \n # Loop iterates over the unsorted items\n for j in range(i + 1, len(l)):\n c += 1 \n r += 2 \n if l[j] < l[index_temp_min_value]:\n index_temp_min_value = j \n \n c += 1\n if index_temp_min_value != i:\n # swap values of the lowest unsorted ele with the first unsorted ele \n l[i], l[index_temp_min_value] = l[index_temp_min_value], l[i]\n w += 2\n r += 2\n\n return c, r, w", "def f_way_sort(buffer_size: int, input_paths: list, output_path: str):\n pass", "def merge_sort(list):\r\n \r\n if len(list) <= 1:\r\n return list\r\n \r\n left_half, right_half = split(list)\r\n left = merge_sort(left_half)\r\n right = merge_sort(right_half)\r\n \r\n return merge(left, right)", "def merge_sort(list):\n # Base Condition\n if len(list) <= 1:\n return list\n\n left_half, right_half = split(list)\n left = merge_sort(left_half)\n right = merge_sort(right_half)\n\n return merge(left,right)", "def ring1_isoutside_ring2_cmp_alt(ringlist, ring1_index, ring2_index,\n N_lines2use=opt.alt_sort_N,\n increase_N_if_zero=True, boundary=None):#####TOL\n ring1 = ringlist[ring1_index]\n ring2 = ringlist[ring2_index]\n if ring1.path == ring2.path:\n return 0\n\n dbrlist = ringlist if opt.debug_lines_used_to_sort_full else None\n debug12, debug21 = '', ''\n if opt.debug_lines_used_to_sort:\n rec_num = 0 if increase_N_if_zero else 1\n debug12 = os.path.join(opt.output_directory_debug,\n f'sorting_lines_{ring1_index}-{ring2_index}_it{rec_num}.svg')\n debug21 = os.path.join(opt.output_directory_debug,\n f'sorting_lines_{ring2_index}-{ring1_index}_it{rec_num}.svg')\n\n countHits12 = ring1_isbelow_ring2_numHits(\n ring1, ring2, N_lines2use, debug_name=debug12, ring_list=dbrlist)\n countHits21 = ring1_isbelow_ring2_numHits(\n ring2, ring1, N_lines2use, debug_name=debug21, ring_list=dbrlist)\n if countHits12 == 0 or countHits21 == 0:\n if countHits12 > 0:\n return -1\n elif countHits21 > 0:\n return 1\n elif increase_N_if_zero:\n N_upped = N_lines2use * max(len(ring1.path), len(ring2.path))\n improved_res = ring1_isoutside_ring2_cmp_alt(\n ringlist, ring1_index, ring2_index, N_lines2use=N_upped,\n increase_N_if_zero=False, boundary=boundary)\n if improved_res != 0:\n return improved_res\n elif ring1.isClosed() or ring2.isClosed():\n if opt.manually_fix_sorting:\n return ask_user_to_sort(\n ring1_index, ring2_index, ringlist, make_svg=True)\n else:\n raise Exception(\n \"Problem sorting rings... set \"\n \"'manually_fix_sorting=True' in options4rings.py \"\n \"to fix manually.\"\n )\n else:\n return 0\n else:\n return 0\n\n # neither of the counts were zero\n ratio21over12 = countHits21/countHits12\n try:\n upper_bound = 1.0/percentage_for_disagreement\n except ZeroDivisionError:\n from numpy import Inf\n upper_bound = Inf\n\n if percentage_for_disagreement < ratio21over12< upper_bound:\n\n debug12, debug21 = '', ''\n if opt.debug_lines_used_to_sort:\n debug12 = os.path.join(opt.output_directory_debug,\n f'sorting_lines_{ring1_index}-{ring2_index}_it2.svg')\n debug21 = os.path.join(opt.output_directory_debug,\n f'sorting_lines_{ring2_index}-{ring1_index}_it2.svg')\n\n # still not sure, so use more lines\n N_upped = N_lines2use * max(len(ring1.path), len(ring2.path))\n countHits12 = ring1_isbelow_ring2_numHits(\n ring1, ring2, N_upped, debug_name=debug12, ring_list=dbrlist)\n countHits21 = ring1_isbelow_ring2_numHits(\n ring2, ring1, N_upped, debug_name=debug21, ring_list=dbrlist)\n ratio21over12 = countHits21/countHits12\n\n if percentage_for_disagreement < ratio21over12 < upper_bound:\n # still not sure, ask user, if allowed\n if opt.manually_fix_sorting:\n return ask_user_to_sort(\n ring1_index, ring2_index, ringlist, make_svg=True)\n else:\n raise Exception(\n \"Problem sorting rings... set \"\n \"'manually_fix_sorting=True' in options4rings.py to \"\n \"fix manually.\"\n )\n if countHits12 > countHits21:\n return -1\n elif countHits12 < countHits21:\n return 1\n else:\n return 0", "def merge_sort(items):\r\n # TODO: Check if list is so small it's already sorted (base case)\r\n # TODO: Split items list into approximately equal halves\r\n # TODO: Sort each half by recursively calling merge sort\r\n # TODO: Merge sorted halves into one list in sorted order\r", "def sort_by_assignments(peaklist, order=None, commented_at_end=False):\n anchors = peaklist.anchors\n anchored = tuple(i for anchor in anchors for i in anchor)\n unanchored = set(range(peaklist.dims)) - set(anchored)\n default_order = anchored + tuple(sorted(unanchored))\n order = order if order is not None else default_order\n peaklist.sort(key=lambda peak: tuple(peak[i] for i in order))\n if commented_at_end:\n peaklist.sort(key=lambda peak: peak.commented)\n return peaklist", "def sort_all(batch, lens):\r\n unsorted_all = [lens] + [range(len(lens))] + list(batch)\r\n sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]\r\n return sorted_all[2:], sorted_all[1]", "def natsort_icase(lst):\n lst.sort(key=natsort_key_icase)", "def merge_sort(self, lst):\r\n [sorted_lst, number_of_inversions] = self.sort_and_get_number_of_inversions(lst)\r\n \r\n return sorted_lst", "def sort(a_list, base):\n\n passes = int(log(max(a_list), base) + 1)\n\n items = a_list[:]\n for digit_index in xrange(passes):\n buckets = [[] for _ in xrange(base)] # Buckets for sorted sublists.\n for item in items:\n digit = _get_digit(item, base, digit_index)\n buckets[digit].append(item)\n\n items = []\n for sublists in buckets:\n items.extend(sublists)\n\n return items" ]
[ "0.66547817", "0.6604381", "0.5821195", "0.56872296", "0.56680274", "0.5609566", "0.5548704", "0.5486942", "0.5448702", "0.5442542", "0.540447", "0.5400091", "0.5396913", "0.5388861", "0.538794", "0.53742737", "0.53636485", "0.5361872", "0.53531635", "0.5348274", "0.53459334", "0.53447187", "0.5341755", "0.5329896", "0.53242344", "0.5311097", "0.5307508", "0.52961105", "0.52949995", "0.5290078" ]
0.72343975
0
Extraction of overlapping reads with a nondefault minimal overlap.
def test_overlapping_alignments_2(): generate_bam_file(gqd.sam_content, gqd.sam_bam_prefix) gqd.gene_wise_quantification._min_overlap = 5 sam = pysam.Samfile(gqd.sam_bam_prefix + ".bam") # 1 overlapping base in the 5' end of the reads => not enough assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments( sam, Gff3EntryMoc("chrom", 1, 10))) == [] # 4 overlapping base in the 5' end of the reads => not enough assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments( sam, Gff3EntryMoc("chrom", 1, 13))) == [] # 5 overlapping base in the 5' end of the reads => okay assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments( sam, Gff3EntryMoc("chrom", 1, 14))) == [ "myread:01", "myread:02", "myread:03", "myread:04", "myread:05"] # 1 overlapping base in the 3' end of the reads => not enough assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments( sam, Gff3EntryMoc("chrom", 19, 23))) == [] # 4 overlapping base in the 3' end of the reads => not enough assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments( sam, Gff3EntryMoc("chrom", 16, 23))) == [] # 5 overlapping base in the 3' end of the reads => not enough assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments( sam, Gff3EntryMoc("chrom", 15, 23))) == [ "myread:01", "myread:02", "myread:03", "myread:04", "myread:05"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combine_reads(filtered_reads, positions):\n\n\tcombined_reads = []\n\ttrue_reads = []\n\n\tfor r in filtered_reads:\n\t\t# Find associated position\n\t\tr_position = float(r.get_position())\n\t\tdesired_start = -1\n\n\t\tfor p in positions:\n\t\t\tlow_position = p - 25\n\t\t\thigh_position = p + 25\n\t\t\tif r_position > low_position and r_position < high_position:\n\t\t\t\tdesired_start = p\n\t\t\t\tbreak\n\n\t\tif desired_start is not -1:\n\t\t\t# Find another read that overlaps\n\t\t\tif r_position < desired_start:\n\t\t\t\toffset = desired_start - r_position\n\t\t\t\tfor r2 in filtered_reads:\n\t\t\t\t\tr2_position = float(r2.get_position())\n\t\t\t\t\tif r2_position > desired_start and r2_position <= desired_start + offset and r2_position != r_position:\n\t\t\t\t\t\tfuse_read = r2\n\t\t\t\t\t\tbreak\n\t\t\telif r_position == desired_start:\n\t\t\t\tfuse_read = None\n\t\t\telse:\n\t\t\t\toffset = r_position - desired_start\n\t\t\t\tfor r2 in filtered_reads:\n\t\t\t\t\tr2_position = float(r2.get_position())\n\t\t\t\t\tr2_end = r2_position + 49\n\t\t\t\t\tif r2_end + 49 > desired_start and r2_end >= r_position - 1 and r2_position != r_position:\n\t\t\t\t\t\tfuse_read = r2 \n\t\t\t\t\t\tbreak\n\n\t\t\tif fuse_read is None:\n\t\t\t\tif r_position == desired_start:\n\t\t\t\t\ttrue_reads.append(r)\n\t\t\telse:\n\t\t\t\tr.fuse_read(fuse_read, desired_start)\n\t\t\t\tcombined_reads.append(r)\n\t\t\t\t\n\n\tdef f(x): return len(x.get_read()) == 50\n\tcombined_reads = filter(f, combined_reads)\n\n\t# for c in combined_reads:\n\t# \tc.print_read()\n\t# \tprint '\\n'\n\t\n\treturn (combined_reads, true_reads)", "def read_overlap(self, amount=None, increment=None):\n if amount is None:\n amount = self._length\n if increment is None:\n increment = amount\n\n # Check available read size\n if amount == 0 or amount > self._length:\n return self._data[0:0].copy()\n\n idxs = self.get_indexes(self._start, amount, self.maxsize)\n self.move_start(increment)\n return self._data[idxs].copy()", "def get_overlaps(file_name):\r\n\r\n place = {}\r\n size = {}\r\n sap = {}\r\n overlapping = []\r\n active_list = []\r\n max_width = 0\r\n\r\n with open(file_name + \".scl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if line.split()[0] == \"Sitespacing\":\r\n sitespacing = line.split()[2]\r\n if line.split()[0] == \"SubrowOrigin\":\r\n starting_x = line.split()[2]\r\n ending_x = int(starting_x) + int(sitespacing) * int(line.split()[5])\r\n if ending_x > max_width:\r\n max_width = ending_x\r\n\r\n divider = max_width // 10\r\n\r\n with open(file_name + \".nodes\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if len(line.split()) == 3:\r\n size[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n\r\n with open(file_name + \".pl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if line.split()[0] in size:\r\n place[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n sap_num = int(line.split()[1]) // divider\r\n if sap_num not in sap.keys():\r\n sap[sap_num] = []\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]), int(line.split()[2]),\r\n \"start\"])\r\n\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]),\r\n int(line.split()[2]) + int(size[line.split()[0]][1]), \"end\"])\r\n\r\n for lista in sap.values():\r\n lista.sort(key=lambda x: x[3])\r\n lista.sort(key=lambda x: x[4], reverse=True)\r\n for element in lista:\r\n if element[4] == \"start\":\r\n if len(active_list) == 0:\r\n active_list.append(element[0])\r\n else:\r\n for node in active_list:\r\n if int(place[node][0]) < int(place[element[0]][0]) + int(size[element[0]][0]) \\\r\n and int(place[node][0]) + int(size[node][0]) > int(place[element[0]][0]) \\\r\n and int(place[node][1]) < int(place[element[0]][1]) + int(size[element[0]][1]) \\\r\n and int(place[node][1]) + int(size[node][1]) > int(place[element[0]][1]):\r\n overlap = (node, element[0])\r\n overlapping.append(overlap)\r\n active_list.append(element[0])\r\n else:\r\n active_list.remove(element[0])\r\n return overlapping", "def get_overlapping_indices(self):\n return self._get_atomic_overlaps()", "def parse_reads_and_select_candidates(self, reads):\n st_time = time.time()\n # read_id_list = []\n total_reads = 0\n read_unique_id = 0\n for read in reads:\n # check if the read is usable\n if read.mapping_quality >= DEFAULT_MIN_MAP_QUALITY and read.is_secondary is False \\\n and read.is_supplementary is False and read.is_unmapped is False and read.is_qcfail is False:\n\n read.query_name = read.query_name + '_' + str(read_unique_id)\n if self.find_read_candidates(read=read):\n # read_id_list.append(read.query_name)\n total_reads += 1\n read_unique_id += 1\n\n if total_reads == 0:\n return []\n\n selected_allele_list = []\n postprocess_read_id_list = set()\n for pos in self.positional_allele_dictionary:\n if pos < self.region_start_position or pos > self.region_end_position:\n continue\n ref = self.reference_dictionary[pos]\n\n all_allele_dictionary = self.positional_allele_dictionary[pos]\n all_mismatch_count = 0\n for allele in all_allele_dictionary:\n all_mismatch_count += all_allele_dictionary[allele]\n\n # pick the top 2 most frequent allele\n allele_frequency_list = list(sorted(all_allele_dictionary.items(), key=operator.itemgetter(1, 0),\n reverse=True))[:PLOIDY]\n allele_list = self._filter_alleles(pos, allele_frequency_list)\n alt1 = allele_list[0] if len(allele_list) >= 1 else None\n alt2 = allele_list[1] if len(allele_list) >= 2 else '.'\n if alt1 is None:\n continue\n mq_rms = round(math.sqrt(self.rms_mq[pos]/self.coverage[pos]), 3) if self.coverage[pos] > 0 else 0\n dp = self.coverage[pos]\n ref_count = self.coverage[pos] - all_mismatch_count\n candidate_record = [self.chromosome_name] + self._get_record(pos, alt1, alt2, ref, ref_count) + [mq_rms] + [dp]\n postprocess_read_id_list.update(self.read_id_by_position[pos])\n selected_allele_list.append(candidate_record)\n\n postprocess_read_id_list = list(postprocess_read_id_list)\n if len(selected_allele_list) > 0:\n self.postprocess_reference()\n self.postprocess_reads(postprocess_read_id_list)\n\n return selected_allele_list", "def get_overlaps(self, offset, length):\n # In case entity's offset points to a space just before the entity.\n if ''.join([chunk.word for chunk in self])[offset] == ' ':\n offset += 1\n index = 0\n result = []\n for chunk in self:\n if offset < index + len(chunk.word) and index < offset + length:\n result.append(chunk)\n index += len(chunk.word)\n return result", "def get_read_alignments(sam_f):\n sparser = samparser.SamParser(sam_f=sam_f, aligned_only=True, mapq=20, mismatches=1)\n \n # parse all the hits into this to make sure multi mapping hits map to the same contig\n hit_dict = {}\n ambig_reads = 0\n processed_reads = 0\n for hit in sparser.parse_sam_file():\n processed_reads += 1\n if hit_dict.get(hit['qname'], 0):\n if hit_dict[hit['qname']] != hit['rname']:\n print(\"Warning read: {} aligns to two different contigs\".format(hit['qname']), file=sys.stderr)\n ambig_reads += 1\n else:\n continue\n else:\n hit_dict[hit['qname']] = hit['rname']\n\n print(\"{} of {} processed reads were ambiguous.\".format(ambig_reads, processed_reads))\n\n # condense the hit dict into a contig dict\n contig_dict = {}\n for read, contig in hit_dict.items():\n if contig_dict.get(contig, 0):\n contig_dict[contig].append(read)\n else:\n contig_dict[contig] = [read]\n\n return contig_dict", "def determine_crossmapped_reads(self, read_alignment_path):\n references_by_species = self._get_references_by_species()\n crossmapped_reads = set()\n done_replicon_comparison = []\n with pysam.AlignmentFile(read_alignment_path) as bam:\n for org, replicon_ids in references_by_species.items():\n for replicon_id in replicon_ids:\n self._read_ids = set()\n # First, collect the ids of the aligned reads of\n # this replicon\n for alignment in bam.fetch(reference=replicon_id):\n self._read_ids.add(alignment.qname)\n # Then compare them to the alignments of each\n # replicon of the other organism(s)\n for (\n comp_org,\n comp_replicon_ids,\n ) in references_by_species.items():\n # Only compare replicons of different species\n if org == comp_org:\n continue\n for comp_replicon_id in comp_replicon_ids:\n comparison = sorted([replicon_id, comp_replicon_id])\n # Check if comparison of the two replicons\n # has been done already\n if comparison in done_replicon_comparison:\n continue\n done_replicon_comparison.append(comparison)\n # Compare all read ids of the comparison\n # replicon to the query replicon read ids\n for alignment in bam.fetch(\n reference=comp_replicon_id\n ):\n if alignment.qname in self._read_ids:\n crossmapped_reads.add(alignment.qname)\n no_of_crossmapped_reads = len(crossmapped_reads)\n return crossmapped_reads", "def _sequence_overlap(seq1: Sequence, seq2: Sequence):\n diff = SequenceMatcher(None, seq1, seq2)\n # Drop last matching block, since this is always a dummy entry, with length=1.\n return diff.get_matching_blocks()[:-1]", "def test_fasta_get_overlapping_repeats(self):\r\n\r\n overlapping_repeats = mfau.get_overlapping_repeats(\"ACAACAACAACA\", 3)\r\n\r\n self.assertGreater(len(overlapping_repeats), 0)", "def get_all_offgrid_pin(self, pin, insufficient_list):\n #print(\"INSUFFICIENT LIST\",insufficient_list)\n # Find the coordinate with the most overlap\n any_overlap = set()\n for coord in insufficient_list:\n full_pin = self.convert_track_to_pin(coord)\n # Compute the overlap with that rectangle\n overlap_rect=pin.compute_overlap(full_pin)\n # Determine the max x or y overlap\n max_overlap = max(overlap_rect)\n if max_overlap>0:\n any_overlap.update([coord])\n \n return any_overlap", "def get_best_offgrid_pin(self, pin, insufficient_list):\n # Find the coordinate with the most overlap\n best_coord = None\n best_overlap = -math.inf\n for coord in insufficient_list:\n full_pin = self.convert_track_to_pin(coord)\n # Compute the overlap with that rectangle\n overlap_rect=pin.compute_overlap(full_pin)\n # Determine the min x or y overlap\n min_overlap = min(overlap_rect)\n if min_overlap>best_overlap:\n best_overlap=min_overlap\n best_coord=coord\n \n return set([best_coord])", "def read_ao_overlap(self, path_rwfdump, fn_rwf):\n os.system(path_rwfdump + f\" {fn_rwf} ao_overlap.dat 514R\")\n\n with open('ao_overlap.dat', \"r\") as f:\n log = f.read()\n\n tmp = re.findall('[-]?\\d+\\.\\d+D[+-]\\d\\d', log)\n tmp = [float(x.replace('D', 'e')) for x in tmp]\n \n tmp_ovr = np.zeros((self.nbasis * 2, self.nbasis * 2))\n \n cnt = 0\n for ibasis in range(self.nbasis * 2):\n for jbasis in range(ibasis + 1):\n tmp_ovr[ibasis, jbasis] = tmp[cnt]\n cnt += 1\n\n tmp_ovr += np.transpose(tmp_ovr) - np.diag(np.diag(tmp_ovr))\n\n # Slicing the components between t and t+dt\n return tmp_ovr[:self.nbasis, self.nbasis:]", "def merge_reads(s1, s2, q1, q2, amplen):\n # If the amplicon is of length L and the reads are lengths l1, l2 then:\n # - read 1 from 0 to L-l2-1 inclusive doesn't overlap\n # - read 1 from L-l2 to l1-1 inclusive overlaps with read 2\n # - read 2 from 0 to l1+l2-L-1 inclusive overlaps with read 1\n # - read 2 from l1+l2-L to its end doesn't overlap\n\n # A picture for clarity:\n # s1 coords: 0 l1-1\n # | |\n # ----------------------------------------\n # ------------------------------\n # | | |\n # s1 coords: L-l2 | L-1\n # s2 coords: 0 l1+l2-L-1\n\n # Reverse complement read 2 and reverse its quality scores.\n s2 = reverse_complement(s2)\n q2 = q2[::-1]\n\n # This is where we'll put the merged sequence and quality score.\n s = np.zeros(amplen, dtype=np.int8)\n q = np.zeros(amplen, dtype=np.int8)\n\n # If the reads overlap correctly, then s1[offset+i] == s2[i], assuming s2 is\n # the reverse complement of the reverse read.\n offset = amplen - len(s2)\n\n # Fill in the parts of the merged sequence where the reads don't overlap.\n s[:offset] = s1[:offset]\n q[:offset] = q1[:offset]\n s[len(s1):] = s2[len(s1)+len(s2)-amplen:]\n q[len(s1):] = q2[len(s1)+len(s2)-amplen:]\n\n # Create a set of views into the overlapping region. We can directly compare\n # vs1[i] to vs2[i] and use that to fill in vs[i] with all indexing taken\n # care of.\n vs1 = s1[offset:]\n vq1 = q1[offset:]\n vs2 = s2[:len(vs1)]\n vq2 = q2[:len(vs1)]\n vs = s[offset:len(s1)]\n vq = q[offset:len(s1)]\n\n # Quality score of matching bases is the larger of the two quality\n # scores (this is a somewhat conservative low estimate). Quality\n # score of mismatched bases is the difference of the two quality\n # scores. If the mismatched bases have equal quality scores, the\n # base is written as an N with the minimum possible quality.\n\n # Positions where the reads agree.\n ieq = vs1 == vs2\n vs[ieq] = vs1[ieq]\n vq[ieq] = np.maximum(vq1[ieq], vq2[ieq])\n\n # Positions where the reads disagree.\n ineq = vs1 != vs2\n mismatches = ineq.sum()\n\n # Positions where the reads disagree and read 1 has the higher quality.\n ir1 = np.logical_and(ineq, vq1 > vq2)\n vs[ir1] = vs1[ir1]\n vq[ir1] = MIN_QUAL + vq1[ir1] - vq2[ir1]\n\n # Positions where the reads disagree and read 2 has the higher quality.\n ir2 = np.logical_and(ineq, vq2 > vq1)\n vs[ir2] = vs2[ir2]\n vq[ir2] = MIN_QUAL + vq2[ir2] - vq1[ir2]\n\n # Positions where the reads disagree and they have equal qualities.\n irn = np.logical_and(ineq, vq1 == vq2)\n vs[irn] = bN\n vq[irn] = MIN_QUAL\n\n return s, q, mismatches", "def pick_Maximal_overlap(reads, k):\n reada, readb = None, None\n best_olen = 0\n for a, b in permutations(reads, 2):\n olen = Overlap(a, b, min_length=k)\n if olen > best_olen:\n reada, readb = a, b\n best_olen = olen\n return reada, readb, best_olen", "def extract_upstream(indicies, genome, amount, overlap, min_length=8):\n\n records = []\n prev_end = -1\n index = 0\n for feature in filter(lambda f: f.type == \"CDS\", genome.features):\n if index in indicies:\n end = int(feature.location.start)\n start = max(end - amount, 0)\n if not overlap:\n start = max(start, prev_end)\n\n if (end - start) > min_length:\n upstream = genome[start:end]\n upstream.id = \"{0}|{1}\".format(genome.id, feature.qualifiers[\"locus_tag\"][0])\n records.append(upstream)\n\n index += 1\n prev_end = int(feature.location.end)\n\n return records", "def test_overlap_set_basic_d(test_input_scheme, overlapped_records_generate):\n callers = ['MuTect2', 'MuSE', 'SomaticSniper']\n maf_lines = [\n [\n 'chr1\\t1\\t1\\tSNP\\tA\\tC\\tA\\tC\\tA\\tA\\t10\\t2\\t8\\t8\\t8\\t0\\t\\t\\n',\n 'chr1\\t1\\t1\\tINS\\t-\\tCC\\t-\\tCC\\t-\\t-\\t10\\t2\\t8\\t8\\t8\\t0\\t\\t\\n',\n ],\n 'chr1\\t1\\t1\\tSNP\\tA\\tC\\tA\\tC\\tA\\tA\\t20\\t2\\t18\\t8\\t8\\t0\\t\\t\\n',\n 'chr1\\t1\\t1\\tSNP\\tA\\tT\\tA\\tT\\tA\\tA\\t20\\t2\\t18\\t8\\t8\\t0\\t\\t\\n'\n ]\n\n record = overlapped_records_generate(\n test_input_scheme,\n maf_lines,\n callers)\n\n assert not record.is_singleton()\n\n assert ['MuSE', 'MuTect2', 'SomaticSniper'] == record.callers\n\n assert ('INS', 'SNP') == record.variant_types\n\n assert '1:1:C' in record.locus_allele_map\n assert '1:1:CC' in record.locus_allele_map\n assert '1:1:T' in record.locus_allele_map\n assert len(record.locus_allele_map) == 3\n assert len(record.locus_allele_map['1:1:C']) == 2\n assert len(record.locus_allele_map['1:1:T']) == 1\n assert len(record.locus_allele_map['1:1:CC']) == 1\n\n assert ('MuTect2', 'SNP') in record.caller_type_map \\\n and ('MuSE', 'SNP') in record.caller_type_map \\\n and ('SomaticSniper', 'SNP') in record.caller_type_map \\\n and ('MuTect2', 'INS') in record.caller_type_map\n\n assert len(record.caller_type_map) == 4\n\n assert record.all_single_record() is False", "def slice_reads(reads, max_coverage):\n\t\n\tSEED = 448\n\trandom.seed(SEED)\n\tshuffled_indices = list(range(len(reads)))\n\trandom.shuffle(shuffled_indices)\n\n\tposition_list = reads.get_positions()\n\tlogger.info('Found %d SNP positions', len(position_list))\n\n\t# dictionary to map SNP position to its index\n\tposition_to_index = { position: index for index, position in enumerate(position_list) }\n\n\t# List of slices, start with one empty slice ...\n\tslices = [IndexSet()]\n\t# ... and the corresponding coverages along each slice\n\tslice_coverages = [CoverageMonitor(len(position_list))]\n\tskipped_reads = 0\n\taccessible_positions = set()\n\tfor index in shuffled_indices:\n\t\tread = reads[index]\n\t\t# Skip reads that cover only one SNP\n\t\tif len(read) < 2:\n\t\t\tskipped_reads += 1\n\t\t\tcontinue\n\t\tfor position, base, allele in read:\n\t\t\taccessible_positions.add(position)\n\t\tfirst_position, first_base, first_allele = read[0]\n\t\tlast_position, last_base, last_allele = read[len(read)-1]\n\t\tbegin = position_to_index[first_position]\n\t\tend = position_to_index[last_position] + 1\n\t\tslice_id = 0\n\t\twhile True:\n\t\t\t# Does current read fit into this slice?\n\t\t\tif slice_coverages[slice_id].max_coverage_in_range(begin, end) < max_coverage:\n\t\t\t\tslice_coverages[slice_id].add_read(begin, end)\n\t\t\t\tslices[slice_id].add(index)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tslice_id += 1\n\t\t\t\t# do we have to create a new slice?\n\t\t\t\tif slice_id == len(slices):\n\t\t\t\t\tslices.append(IndexSet())\n\t\t\t\t\tslice_coverages.append(CoverageMonitor(len(position_list)))\n\tlogger.info('Skipped %d reads that only cover one SNP', skipped_reads)\n\n\tunphasable_snps = len(position_list) - len(accessible_positions)\n\tif position_list:\n\t\tlogger.info('%d out of %d variant positions (%.1d%%) do not have a read '\n\t\t\t'connecting them to another variant and are thus unphasable',\n\t\t\tunphasable_snps, len(position_list),\n\t\t\t100. * unphasable_snps / len(position_list))\n\n\t# Print stats\n\tfor slice_id, index_set in enumerate(slices):\n\t\tlogger.info('Slice %d contains %d reads', slice_id, len(index_set))\n\n\treturn reads.subset(slices[0])", "def getmissedoutregions(peakfile,treatment_bamfile, min_size, min_coverage_gain_over_average,window_size):\n total_read_count=getmappedreadcount(treatment_bamfile)\n chrsizedict=getchrsizes(treatment_bamfile)\n total_chr_size=sum([chrsizedict[chrnm] for chrnm in chrsizedict])\n avg_reads=1.0*total_read_count/total_chr_size\n indexed_bamfile=ChipSeqIndex.ChipSeqIndex(treatment_bamfile)\n tempmissedoutregionslist=[]\n prev_chrinterval=['chr1',0,0]\n lncnt=0\n for lntxt in open(peakfile):\n lncnt+=1\n ln=lntxt.rstrip('\\n').split('\\t')\n chrinterval=[ln[0],int(ln[1]),int(ln[2])]\n cadidategaplist=[]\n if prev_chrinterval[0]==chrinterval[0]:\n if (chrinterval[1]-prev_chrinterval[2]-1)>min_size:\n cadidategaplist.append([chrinterval[0],prev_chrinterval[2]+1,chrinterval[1]-1])\n else:\n if (chrsizedict[prev_chrinterval[0]]-prev_chrinterval[2])>min_size:\n cadidategaplist.append([prev_chrinterval[0],prev_chrinterval[2]+1,chrsizedict[prev_chrinterval[0]]])\n if (chrinterval[1]-1)>min_size:\n cadidategaplist.append([chrinterval[0],1,chrinterval[1]-1])\n for cadidatelargegap in cadidategaplist:\n for i in range((cadidatelargegap[2]-cadidatelargegap[1])/window_size+1):\n cadidategap=[cadidatelargegap[0],cadidatelargegap[1]+i*window_size,min(cadidatelargegap[1]+(i+1)*window_size,cadidatelargegap[2])]\n chrrange='%s:%d-%d'%tuple(cadidategap)\n readcounts=indexed_bamfile.countreadsinrange(chrrange)\n if readcounts*1.0/(chrinterval[1]-prev_chrinterval[2])> min_coverage_gain_over_average*avg_reads:\n tempmissedoutregionslist.append(cadidategap)\n prev_chrinterval=chrinterval[0:]\n\n #join adjacent regions\n missedoutregionslist=[]\n if len(tempmissedoutregionslist)!=0:\n prev_region=tempmissedoutregionslist[0]\n for region in tempmissedoutregionslist[1:]:\n if region[0]==prev_region[0] and (region[1]-prev_region[2])<=1:\n prev_region[2]=region[2]\n else:\n missedoutregionslist.append(prev_region)\n prev_region=region[0:]\n missedoutregionslist.append(prev_region)\n \n return missedoutregionslist", "def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True", "def sample_partial_overlap(example, *, minimum_overlap, maximum_overlap):\n rng = get_rng_example(example, 'offset')\n overlap = rng.uniform(minimum_overlap, maximum_overlap)\n num_samples = example[keys.NUM_SAMPLES][keys.ORIGINAL_SOURCE]\n assert len(num_samples) == 2, (len(num_samples), num_samples)\n overlap_samples = sum(num_samples) * overlap / (1 + overlap)\n offset = [0, int(max(num_samples[0] - overlap_samples, 0))]\n assign_offset(example, offset)\n return example", "def fullIntersection(self, data):\n # try:\n return modules.fullIntersection(data, self.caseInsensitive)\n # except TypeError:\n # print(\"ERROR: Combine replicates before calling fullIntersection\")\n # return None", "def test_overlap_set_basic_b(test_input_scheme, overlapped_records_generate):\n callers = ['MuTect2', 'MuSE', 'SomaticSniper']\n maf_lines = [\n 'chr1\\t1\\t1\\tSNP\\tA\\tC\\tA\\tC\\tA\\tA\\t10\\t2\\t8\\t8\\t8\\t0\\t\\t\\n',\n 'chr1\\t1\\t1\\tSNP\\tA\\tC\\tA\\tC\\tA\\tA\\t20\\t2\\t18\\t8\\t8\\t0\\t\\t\\n',\n ''\n ]\n\n record = overlapped_records_generate(\n test_input_scheme,\n maf_lines,\n callers)\n\n assert not record.is_singleton()\n\n assert ['MuSE', 'MuTect2'] == record.callers\n\n assert ('SNP',) == record.variant_types\n\n assert '1:1:C' in record.locus_allele_map\n assert len(record.locus_allele_map) == 1\n assert len(record.locus_allele_map['1:1:C']) == 2\n\n assert ('MuTect2', 'SNP') in record.caller_type_map and ('MuSE', 'SNP') in record.caller_type_map\n assert len(record.caller_type_map) == 2\n\n assert record.all_single_record() is True", "def overlap(annotations1, annotations2):\n return [val for val in annotations1 if val in annotations2]", "def merge_ranges():", "def _prune_non_overlapping_boxes(self, boxes1, boxes2, min_overlap=0.0):\n with tf.name_scope('prune_non_overlapping_boxes'):\n ioa = self._ioa(boxes2, boxes1) # [M, N] tensor\n ioa = tf.reduce_max(ioa, axis=0) # [N] tensor\n keep_bool = tf.greater_equal(ioa, tf.constant(min_overlap))\n keep_inds = tf.squeeze(tf.where(keep_bool), axis=1)\n boxes = tf.gather(boxes1, keep_inds)\n return boxes, keep_inds", "def overlaps(geometry, sr=None):\r\n return _filter(geometry, sr, 'esriSpatialRelOverlaps')", "def parse_visit_overlaps(visits, buffer=15.):\n import copy\n from shapely.geometry import Polygon\n \n N = len(visits)\n\n exposure_groups = []\n used = np.arange(len(visits)) < 0\n \n for i in range(N):\n f_i = visits[i]['product'].split('-')[-1]\n if used[i]:\n continue\n \n im_i = pyfits.open(glob.glob(visits[i]['product']+'_dr?_sci.fits')[0])\n wcs_i = pywcs.WCS(im_i[0])\n fp_i = Polygon(wcs_i.calc_footprint()).buffer(buffer/3600.)\n \n exposure_groups.append(copy.deepcopy(visits[i]))\n \n for j in range(i+1, N):\n f_j = visits[j]['product'].split('-')[-1]\n if (f_j != f_i) | (used[j]):\n continue\n \n im_j = pyfits.open(glob.glob(visits[j]['product']+'_dr?_sci.fits')[0])\n wcs_j = pywcs.WCS(im_j[0])\n fp_j = Polygon(wcs_j.calc_footprint()).buffer(buffer/3600.)\n \n olap = fp_i.intersection(fp_j)\n if olap.area > 0:\n used[j] = True\n fp_i = fp_i.union(fp_j)\n exposure_groups[-1]['footprint'] = fp_i\n exposure_groups[-1]['files'].extend(visits[j]['files'])\n \n for i in range(len(exposure_groups)):\n flt_i = pyfits.open(exposure_groups[i]['files'][0])\n product = flt_i[0].header['TARGNAME'].lower() \n if product == 'any':\n product = 'par-'+radec_to_targname(header=flt_i['SCI',1].header)\n \n f_i = exposure_groups[i]['product'].split('-')[-1]\n product += '-'+f_i\n exposure_groups[i]['product'] = product\n \n return exposure_groups", "def get_overlap_mask(self):\n self.overlap_mask = np.bitwise_and(\n self.structure_mask.astype(np.bool), self.unknown_mask.astype(np.bool)\n )", "def get_overlap(a, b):\n return max(0, min(a[1], b[1]) - max(a[0], b[0]))" ]
[ "0.6202679", "0.5979547", "0.59313875", "0.58274823", "0.56923795", "0.5663266", "0.5656279", "0.56381226", "0.5562358", "0.5528029", "0.5521263", "0.5499392", "0.5463479", "0.5456421", "0.5451385", "0.5450649", "0.5383085", "0.53803396", "0.53411436", "0.5327845", "0.5281667", "0.5281024", "0.5270028", "0.5258955", "0.525781", "0.5254851", "0.52518755", "0.52326876", "0.5232334", "0.5220733" ]
0.61179787
1
test extract keywords with mult cols
def test_extract_multiple_columns_basic(self): keywords = ["PatientID", "SOPInstanceUID"] self.dicom.extract_keywords(keywords) # ensure columns were added columns = self.dicom.metadata.column_names if u'PatientID' not in columns: raise Exception("PatientID not added to columns") if u'SOPInstanceUID' not in columns: raise Exception("SOPInstanceUID not added to columns") # compare expected and actual result expected_result = self._get_expected_column_data_from_xml(keywords) take_result = self.dicom.metadata.take(self.count, columns=keywords) numpy.testing.assert_equal(take_result, expected_result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_guided():\n top_n = 5\n seed_keywords = [\"time\", \"night\", \"day\", \"moment\"]\n keywords = model.extract_keywords(doc_one,\n min_df=1,\n top_n=top_n,\n seed_keywords=seed_keywords)\n\n assert isinstance(keywords, list)\n assert isinstance(keywords[0], tuple)\n assert isinstance(keywords[0][0], str)\n assert isinstance(keywords[0][1], float)\n assert len(keywords) == top_n", "def test_keyword_extractor(self):\n data = [{\"Header\": \"This is a Header\", \"Paragraph\": \"This is a Paragraph\", \"slide\": 10}]\n keywords = keyword_extractor(data)\n data[0][\"Header_keywords\"] = [\"header\"]\n data[0][\"Paragraph_keywords\"] = [\"paragraph\"]\n self.assertEqual(keywords, data)", "def separate_keywords_corpus(df: pd.DataFrame\n ) -> Tuple[List[str], ...]:\n keywords = list(df['keyword'].unique())\n sentences = list(df['sentence'])\n return keywords, sentences", "def extract_keywords(df):\n df[\"key_words\"] = \"\"\n\n for index, row in df.iterrows():\n plot = row[\"Plot\"]\n\n rake = Rake()\n\n rake.extract_keywords_from_text(plot)\n\n key_words_dict_scores = rake.get_word_degrees()\n\n row[\"key_words\"] = list(key_words_dict_scores.keys())\n\n df.drop(columns=[\"Plot\"], inplace=True)\n df.set_index(\"Title\", inplace=True)\n\n return df", "def test_extract_multiple_invalid_columns(self):\n keywords = [\"invalid\", \"another_invalid_col\"]\n self.dicom.extract_keywords(keywords)\n\n # test that columns were added\n columns = self.dicom.metadata.column_names\n if u'invalid' not in columns:\n raise Exception(\"invalid column not added to columns\")\n if u'another_invalid_col' not in columns:\n raise Exception(\"another_invalid_col not added to columns\")\n\n # compare actual with expected result\n invalid_columns = self.dicom.metadata.take(self.count, columns=keywords)\n expected_result = [[None, None] for x in range(0, self.count)]\n self.assertEqual(invalid_columns, expected_result)", "def test_extract_keywords_single_doc(keyphrase_length, mmr, maxsum, vectorizer):\n top_n = 5\n keywords = model._extract_keywords_single_doc(doc_one,\n top_n=top_n,\n keyphrase_ngram_range=keyphrase_length,\n use_mmr=mmr,\n use_maxsum=maxsum,\n diversity=0.5,\n vectorizer=vectorizer)\n assert isinstance(keywords, list)\n assert isinstance(keywords[0][0], str)\n assert isinstance(keywords[0][1], float)\n assert len(keywords) == top_n\n for keyword in keywords:\n assert len(keyword[0].split(\" \")) <= keyphrase_length[1]", "def test_extract_keywords_multiple_docs(keyphrase_length):\n top_n = 5\n keywords_list = model._extract_keywords_multiple_docs([doc_one, doc_two],\n top_n=top_n,\n keyphrase_ngram_range=keyphrase_length)\n assert isinstance(keywords_list, list)\n assert isinstance(keywords_list[0], list)\n assert len(keywords_list) == 2\n\n for keywords in keywords_list:\n assert len(keywords) == top_n\n\n for keyword in keywords:\n assert len(keyword[0].split(\" \")) <= keyphrase_length[1]", "def keyword_extraction(file_content):\n\n # [question, question....]\n for key, value in file_content.items():\n seg, hidden = ltp.seg([key])\n # ner: [[('Nh', 2, 2)]]\n ner = ltp.ner(hidden)\n # keywords: [('PERSON', \"吴轩\")], tuple_item: ('Nh', 2, 2)\n keywords = [(tag_to_name[tuple_item[0]], to_string(seg[0][tuple_item[1]: tuple_item[2]+1])) for tuple_item in ner[0]]\n file_content[key].keywords = keywords\n\n return file_content", "def parsewokkeys(keywords):\n resultstring = '<td class=\"resultTitle\">Material</td><td class=\"resultTitle\">Publications</td>'\n\n for key in keywords:\n resultstring += '<td class=\"resultTitle\">' + key + '</td>'\n\n return resultstring", "def getmetakeywords(allcontent, corpus):\n for i in range(0, len(allcontent)):\n words = re.split(\"[, ]+\", allcontent[i])\n if words[0] == \"Meta\":\n for j in range(3, len(words)):\n if len(processword(words[j])) > 0:\n corpus.append(processword(words[j]))", "def test_extract_one_column_basic(self):\n self.dicom.extract_keywords([\"PatientID\"])\n\n # ensure column was added\n columns = self.dicom.metadata.column_names\n if u'PatientID' not in columns:\n raise Exception(\"PatientID not added to columns\")\n\n # compare expected results with extract_keywords result\n expected_result = self._get_expected_column_data_from_xml([\"PatientID\"])\n take_result = self.dicom.metadata.take(self.count, columns=['PatientID'])\n numpy.testing.assert_equal(take_result, expected_result)", "def determine_keywords(self):\n\n split = dict()\n split['email_cc'] = re.compile(\"^\\s*CC[-_]?MAIL[:=]\\s*(.*)\")\n split['email_cc2'] = re.compile(\"^\\s*C[Cc][:=]\\s*(.*)\")\n split['fixed_in'] = re.compile(\"^\\s*FIXED[-_]?IN[:=]\\s*(.*)\")\n\n numeric = dict()\n numeric['bug_fixed'] = re.compile(\"^\\s*(?:BUGS?|FEATURE)[:=]\\s*(.+)\")\n numeric['bug_cc'] = re.compile(\"^\\s*CCBUGS?[:=]\\s*(.+)\")\n\n presence = dict()\n presence['email_gui'] = re.compile(\"^\\s*GUI:\")\n presence['silent'] = re.compile(\"(?:CVS|SVN|GIT|SCM).?SILENT\")\n presence['notes'] = re.compile(\"(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')\")\n\n results = defaultdict(list)\n for line in self.commit.message.split(\"\\n\"):\n # If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off\n # This allows for people to fill keywords in the Differential Summary and have this work smoothly for them\n line = re.sub(\"^Summary: (.+)\", \"\\g<1>\", line)\n\n # Start processing our keywords...\n for (name, regex) in split.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += [result.strip() for result in match.group(1).split(\",\")]\n\n for (name, regex) in numeric.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += re.findall(\"(\\d{1,10})\", match.group(1))\n\n for (name, regex) in presence.iteritems():\n if re.match( regex, line ):\n results[name] = True\n\n self.keywords = results", "def text_feature_extract(df):\n return df", "def column_selection(type1, cat):\n col_selection = []\n for col in cat.colnames:\n if col == \"_RAJ2000\":\n continue\n if col == \"_DEJ2000\":\n continue\n desc = cat[col].info.description\n f = any([(ban in desc) for ban in BANNED_KEYWORDS])\n if f is False:\n col_selection.append(col)\n return col_selection", "def test_filtered_instrument_keywords():\n kw = []\n for ins in JWST_INSTRUMENTS:\n kw.append(mm.instrument_keywords(ins, caom=False)['keyword'].tolist())\n\n assert kw[0] != kw[1] != kw[2] != kw[3] != kw[4]", "def get_keywords_for_movie(url):\n pass", "def get_keywords():\n \n #get all movies from db\n movies_df = movie_helper.get_movies_df() \n \n with tqdm(total=len(movies_df)) as pbar:\n for index, row in movies_df.iterrows(): \n \n #if imbdid exists use it to look up the API\n if (row['imdbId']):\n \n #get list of keywords and created delimted string\n movie = ia.get_movie(str(row['imdbId']), info='keywords')\n try:\n keywords = \",\".join(movie['keywords'])\n except:\n keywords = None\n \n #update the movies table in the db\n database_helper.update_data(\"movies\", update_params = {\"keywords\" : keywords}, select_params = {\"movieId\" : row[\"movieId\"]})\n pbar.update(1)", "def get_paper_keywords(tree):\n\tpath = '//table/tr/th[text() = \"Keywords:\"]/following-sibling::td/text()'\n\tkeywords = tree.xpath(path)\n\t# xpath returns a list with the keywords as a single string element separated by new lines, commas or semi-colons\n\t# Make this into a list of keywords\n\tif keywords:\n\t\t# Split on new lines, commas and semi-colons\n\t\tkeywords = re.split('[\\\\n,;]', keywords[0])\n\t\t# Remove trailing white space and empty strings\n\t\tkeywords = [kw.strip() for kw in keywords if kw]\n\n\treturn keywords", "def test_combine_multiple(self):\n inv_search = 'author:\"gattringer, c*\" keyword:chiral keyword:symmetry -title:chiral'\n spi_search = \"find a c gattringer and k chiral symmetry and not title chiral\"\n self._compare_searches(inv_search, spi_search)", "def _fe_keyword_match(self, sample):\n result = OrderedDict()\n\n for item in self._keywords:\n result[item + \"_kw\"] = 1 if item in sample['fqdn'] else 0\n\n return result", "def completer_func_cols(text, state):\n return [x for x in lc_keys if x.startswith(text)][state]", "def test_keywords(self):\n\n test_cases = (\n makeTestCase('adele 21',\n AlbumResultMatcher(title=Equals('21'), artist=Equals('adele')),\n ArtistResultMatcher(title=Equals('adele'))),\n makeTestCase('kanye power',\n TrackResultMatcher(title=Equals('power', artist=Equals('kanye west'))),\n ArtistResultMatcher(title=Equals('kanye west')),\n AlbumResultMatcher(title=Equals('my beautiful dark twisted fantasy'))),\n makeTestCase('ratat party with children',\n TrackResultMatcher(title=Equals('party with children', artist=Equals('ratatat'))),\n ArtistResultMatcher(title=Equals('ratatat'))),\n makeTestCase('flobot fight with tools handlebars',\n TrackResultMatcher(title=Equals('handlebars')),\n ArtistResultMatcher(title=Equals('flobots')),\n AlbumResultMatcher(title=Equals('fight with tools')))\n )\n\n self._run_tests(tests, {})", "def test_error():\n with pytest.raises(AttributeError):\n doc = []\n model._extract_keywords_single_doc(doc)", "def match_keywords_descriptions(input_company):\n company = input_company[\"name\"]\n key_list = input_company[\"key_list\"]\n\n if key_list == []:\n # if there's no key words, then just pull apart the description.\n # kill any words that are generic amongst all descriptions. we want to\n # get something close to being a key word without having key words\n stop = stopwords.words(\"english\")\n too_generic = [\"developer\", \"provider\", \"operator\", \"owner\", \n \"manufacturer\", \"manufactures\", \"company\"]\n\n key_list = [word for word in input_company[\"desc\"].lower().strip()\\\n .split(\" \") if (word not in stop) and \\\n (word not in too_generic)]\n\n syns_list = []\n\n for word in key_list:\n syns = get_synonyms(word)\n\n if syns:\n for s in syns:\n syns_list.append(s)\n\n keyword_matches = company_data[(company_data.key_list.map(lambda x: \\\n [word in x for word in key_list] != [False for word in key_list])) \\\n & (company_data.name != company)]\n\n # add columns for keyword and synonym metrics\n keyword_matches[\"match_fraction\"] = \\\n keyword_matches.key_list.map(lambda x: \\\n sum([word in x for word in key_list])/float(len(key_list)))\n\n keyword_matches[\"syn_match_frac\"] = \\\n keyword_matches.key_list.map(lambda x: \\\n sum([word in x for word in syns_list])/float(len(syns_list)))\n\n return keyword_matches", "def test_caom_instrument_keywords():\n kw = []\n for ins in JWST_INSTRUMENTS:\n kw.append(mm.instrument_keywords(ins, caom=True)['keyword'].tolist())\n\n assert kw[0] == kw[1] == kw[2] == kw[3] == kw[4]", "def extractGTs( Preds, df, theme_kw ) :\n ukt = uniqueKwdTheme( theme_kw ) # keys : kwd, value : unique corresponding theme\n\n sirens_gt = Preds[ Preds[\"is_GT\"]==1 ][\"SIREN\"].tolist()\n df_selected_cols = [\"SIREN\", \"oad_comment\", \"extracted_kw\", \"inTheme\"]\n cols = df_selected_cols + [\"isPG\"] if \"isPG\" in df.columns else df_selected_cols\n dfgt = df[ df.SIREN.isin( sirens_gt ) ][ cols ]\n dfgt[\"matched_keywords\"] = dfgt[\"extracted_kw\"].apply( lambda x : sorted(list(x.keys())) )\n\n #dfgt[ \"inTheme_maximal\" ] = dfgt[ \"inTheme\" ].apply( Counter ).apply( lambda d : [ k for k in d if d[k]>1 ] )\n #dfgt[ \"inTheme_minimal\" ] = dfgt[ \"keywords\" ].apply( lambda x : [ ukt[k] for k in x if k in list(ukt) ] )\n #dfgt[ \"themeP\"] = dfgt[ \"inTheme_maximal\" ] + dfgt[ \"inTheme_minimal\" ]\n #dfgt[ \"themeP\"] = dfgt[ \"themeP\"].apply( lambda x : list( set(x) ) )\n\n dfgt[ \"sector\" ] = dfgt[ [\"inTheme\", \"matched_keywords\"] ].apply( giveTheme, axis=1 )\n\n dfgt.reset_index(inplace=True)\n dfgt = dfgt.drop(columns=[\"extracted_kw\", \"index\", \"inTheme\"])\n #dfgt = dfgt.drop(columns=[\"extracted_kw\", \"index\", \"inTheme\", \"inTheme_maximal\", \"inTheme_minimal\"])\n\n return dfgt", "def test_single_doc(keyphrase_length, vectorizer):\n top_n = 5\n\n keywords = model.extract_keywords(doc_one,\n keyphrase_ngram_range=keyphrase_length,\n min_df=1,\n top_n=top_n,\n vectorizer=vectorizer)\n\n assert isinstance(keywords, list)\n assert isinstance(keywords[0], tuple)\n assert isinstance(keywords[0][0], str)\n assert isinstance(keywords[0][1], float)\n assert len(keywords) == top_n\n for keyword in keywords:\n assert len(keyword[0].split(\" \")) <= keyphrase_length[1]", "def test_findcorrectkeyword(self):\n mic = mi.MicrophoneToText()\n\n mic.keywordsshort[\"street\"] = [\"adresse lautet amselweg\", 'useless']\n mic.keywordsshort['location'] = [\"der ort lautet berlin\", 'useless']\n mic.keywordsshort['capital'] = [\"der Kaufpreis lautet vierhunderttausend\", 'useless']\n mic.keywordsshort['income'] = [\"das Eigenkapital lautet 200000\", 'useless']\n mic.keywordsshort['price'] = [\"der kaufpreis beträgt fünfundzwanzigtausend\", 'useless']\n\n mic.find_correct_keyword()\n\n self.assertEqual(mic.resultkeywords['street'], ['amselweg'])\n self.assertEqual(mic.resultkeywords['location'], ['berlin'])\n self.assertEqual(mic.resultkeywords['capital'], [400000])\n self.assertEqual(mic.resultkeywords['income'], [200000])\n self.assertEqual(mic.resultkeywords['price'], [25000])", "def test_queryKeywordFlag(self):\n self._keywordFilteringTest(\"keyword\")", "def extract_keywords(raw_text,id):\n\n print(\"Extracting keywords for \"+id)\n\n stemmer = nltk.PorterStemmer()\n\n # Construct text\n\n # Tokens\n tokens = nltk.word_tokenize(raw_text)\n # filter undesirable words and format\n words = [w.replace('\\'','') for w in tokens if len(w)>=3]\n text = nltk.Text(words)\n\n tagged_text = nltk.pos_tag(text)\n #nouns = [tg[0] for tg in tagged_text if tg[1]=='NN' or tg[1]=='NNP' ]\n #print(nouns)\n\n # multi-term\n multiterms = set()\n stem_dico = {}\n for i in range(len(tagged_text)) :\n # max length 4 for multi-terms ==> 3\n for l in range(1,4) :\n if i+l < len(tagged_text) :\n tags = [tagged_text[k] for k in range(i,i+l)]\n if potential_multi_term(tags) :\n multistemlist = [str.lower(stemmer.stem(tagged_text[k][0])) for k in range(i,i+l)]\n #multistem.sort(key=str.lower)\n\t\t #python 3 : remove .encode('ascii','ignore')\n multistem = functools.reduce(lambda s1,s2 : s1+' '+s2,multistemlist)\n rawtext = functools.reduce(lambda s1,s2 : s1+' '+s2,[str.lower(tagged_text[k][0]) for k in range(i,i+l)])\n multiterms.add(multistem)\n if multistem in stem_dico :\n stem_dico[multistem].add(rawtext)\n else :\n stem_dico[multistem] = set([rawtext])\n\n return [list(multiterms),stem_dico]" ]
[ "0.6677044", "0.6215139", "0.6187469", "0.6122244", "0.60693324", "0.5876271", "0.58554405", "0.5853709", "0.5804128", "0.5798819", "0.57620394", "0.56455034", "0.56223565", "0.5598598", "0.55157304", "0.54846966", "0.5472483", "0.546747", "0.54632664", "0.54573935", "0.544548", "0.54256314", "0.53962445", "0.5395939", "0.5373473", "0.53471893", "0.5324514", "0.53108996", "0.53090155", "0.52919436" ]
0.6477459
1
test extract keyword with invalid column
def test_extract_invalid_column(self): self.dicom.extract_keywords(["invalid"]) # ensure column was added columns = self.dicom.metadata.column_names if u'invalid' not in columns: raise Exception("Invalid column not added") # compare expected and actual result invalid_column = self.dicom.metadata.take(self.count, columns=[u'invalid']) expected_result = [[None] for x in range(0, self.count)] self.assertEqual(invalid_column, expected_result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_extract_multiple_invalid_columns(self):\n keywords = [\"invalid\", \"another_invalid_col\"]\n self.dicom.extract_keywords(keywords)\n\n # test that columns were added\n columns = self.dicom.metadata.column_names\n if u'invalid' not in columns:\n raise Exception(\"invalid column not added to columns\")\n if u'another_invalid_col' not in columns:\n raise Exception(\"another_invalid_col not added to columns\")\n\n # compare actual with expected result\n invalid_columns = self.dicom.metadata.take(self.count, columns=keywords)\n expected_result = [[None, None] for x in range(0, self.count)]\n self.assertEqual(invalid_columns, expected_result)", "def test_error():\n with pytest.raises(AttributeError):\n doc = []\n model._extract_keywords_single_doc(doc)", "def test_queryUnkeywordFlag(self):\n self._keywordFilteringTest(\"unkeyword\")", "def test_extract_column_zero_and_one_correct_content():\n data = \"\"\"[email protected],Records manager\[email protected],Network engineer\[email protected],Electrical engineer\[email protected],Science writer\"\"\"\n result = extract.extract_data_given_column(data, 0)\n assert \"[email protected]\" in result\n result = extract.extract_data_given_column(data, 1)\n assert \"Records manager\" in result", "def test_in_word(self):\n with self.assertRaises(ValueError):\n term, rmd = util.parse_date(\"notasearch1902foradatepartial\")", "def valid_key(key: str) -> str:\n keywords = {\"link\": [\"URL\"], \"label\": [\"Name\"], \"id\": [\"ID\"]}\n\n for k, value in keywords.items():\n if key in value:\n return k\n raise InvalidCSVFormat(f\"Unknown Column Name : {key}\")", "def test_columns_non_string_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=1)", "def test_syntax_error(self):\n\n self.assertRaises(SyntaxError, self.table.where, 'foo bar')", "def regex_catch(self, column):\n ## considering currency_list and metric_list are globally defined.\n pattern_dict = {\"Column_name\": column, \"email-id\": False, \"website\": False, \"Percentage\": False,\n \"CurrencyCol\": {\"value\": False, \"currency\": None}, \"MetricCol\": {\"value\": False, \"metric\": None},\n \"SepSymbols\": {\"value\": False, \"Symbol\": None}}\n column_val = self.data_frame[column].astype(str).str.strip()\n df1 = self.data_frame[column_val.apply(validate_email)]\n if df1.shape[0] >= (0.8 * self.data_frame.shape[0]):\n pattern_dict[\"email-id\"] = True\n elif column_val.str.contains(\"%$\", na=True).all():\n pattern_dict[\"Percentage\"] = True\n elif column_val.str.contains(\"^https:|^http:|^www.\", na=True).all():\n pattern_dict[\"website\"] = True\n elif column_val.str.contains(\"[0-9]+[.]{0,1}[0-9]*\\s*[Aa-zZ]{1,2}$\").all():\n metric = list(map(lambda x: re.sub(\"[0-9]+[.]{0,1}[0-9]*\\s*\", \"\", x), column_val))\n if len(set(metric)) == 1:\n if metric[0] in Metric_list:\n pattern_dict[\"MetricCol\"][\"value\"] = True\n pattern_dict[\"MetricCol\"][\"metric\"] = metric[0]\n elif column_val.str.contains(\"([0-9]+[.]{0,1}[0-9]*\\s*\\W$)|(^\\W[0-9]+[.]{0,1}[0-9]*)\").all():\n currency = list(map(lambda x: re.sub(\"[0-9.\\s]+\", \"\", x), column_val))\n if len(set(currency)) == 1:\n if currency[0] in currency_list:\n pattern_dict[\"CurrencyCol\"][\"value\"] = True\n pattern_dict[\"CurrencyCol\"][\"currency\"] = currency[0]\n elif column_val.str.contains(\"\\S+\\s*[\\W_]+\\s*\\S+\").all():\n seperators = list(map(lambda x: re.sub(\"\\s*[a-zA-Z0-9]+$\", \"\", x),\n list(map(lambda x: re.sub('^[a-zA-Z0-9]+\\s*', '', x), column_val))))\n if len(set(seperators)) == 1:\n if seperators[0] == \"\":\n seperators[0] = ' '\n pattern_dict[\"SepSymbols\"][\"value\"] = True\n pattern_dict[\"SepSymbols\"][\"Symbol\"] = seperators[0]\n return pattern_dict", "def test_no_timestamp(self):\n self.assertRaises(PyntsError, extract_columns, self.data[['a', 'b']], ['a'], ['timestamp'])", "def test_wrong_keyword(self):\n # TODO: Exception\n with self.assertRaises(Exception):\n self.test_table.change_header(Path=1, Sectionname=2, Value=3)", "def test_daal_pca_bad_column_name(self):\n with self.assertRaisesRegexp(Exception, \"column ERR was not found\"):\n self.context.daaltk.models.dimreduction.pca.train(self.frame,\n [\"ERR\", \"X2\", \"X3\", \"X4\", \"X5\",\n \"X6\", \"X7\", \"X8\", \"X9\", \"X10\"],\n k=10)", "def test_regex_bad_case_sensitivity(self):\n with self.assertRaises(despydb.UnknownCaseSensitiveError):\n self.dbh.get_regex_clause(\"'ABC'\", 'a.*', 'F')", "def apply_keyword(df, keyword, field, is_high):\n add, other = ('high', 'low') if is_high else ('low', 'high')\n df.loc[\n (df[field].str.contains(keyword, flags=re.IGNORECASE) &\n (df.annotation_quality != other)),\n 'annotation_quality'\n ] = add\n # give mismatches a tbd label\n mismatch = df.loc[\n (df[field].str.contains(keyword, flags=re.IGNORECASE) &\n (df.annotation_quality == other)),\n 'annotation_quality'\n ] = 'tbd'", "def test_empty_condition(self):\n\n self.assertRaises(SyntaxError, self.table.where, '')", "def test_extract_multiple_columns_basic(self):\n keywords = [\"PatientID\", \"SOPInstanceUID\"]\n self.dicom.extract_keywords(keywords)\n\n # ensure columns were added\n columns = self.dicom.metadata.column_names\n if u'PatientID' not in columns:\n raise Exception(\"PatientID not added to columns\")\n if u'SOPInstanceUID' not in columns:\n raise Exception(\"SOPInstanceUID not added to columns\")\n\n # compare expected and actual result\n expected_result = self._get_expected_column_data_from_xml(keywords)\n take_result = self.dicom.metadata.take(self.count, columns=keywords)\n numpy.testing.assert_equal(take_result, expected_result)", "def test_extract_one_column_basic(self):\n self.dicom.extract_keywords([\"PatientID\"])\n\n # ensure column was added\n columns = self.dicom.metadata.column_names\n if u'PatientID' not in columns:\n raise Exception(\"PatientID not added to columns\")\n\n # compare expected results with extract_keywords result\n expected_result = self._get_expected_column_data_from_xml([\"PatientID\"])\n take_result = self.dicom.metadata.take(self.count, columns=['PatientID'])\n numpy.testing.assert_equal(take_result, expected_result)", "def test_queryKeywordFlag(self):\n self._keywordFilteringTest(\"keyword\")", "def test_no_column(self):\n\n self.assertRaises(ValueError, self.table.where, 'True')", "def test_invalid_columns():\n train = ((\"Lorem ipsum dolor sit amet\", 3),\n (\"Sed ut perspiciatis unde\", 5.5))\n with pytest.raises(ValueError):\n TabularDataset(train, named_columns=['some_random_col'])", "def _nonkey():\n def not_key(s):\n return not (lexer.singularize(s.lower()) in pattern_key)\n def p(tok):\n return tok.type == 'WORD' and not_key(tok.value)\n return next_word().if_test(p)", "def test_using_nonexistant_column_names_in_query_args_raises_error(self):\r\n with self.assertRaises(AttributeError):\r\n TestModel.objects(TestModel.nonsense == 5)", "def test_clean_row_punctuation(self):\n\t\tobj_ut = sentiment.clean_row(\n\t\t\t'100\\tan \"apple...:\" is it yellow-green, or red/orange?')\n\t\tself.assertEqual(obj_ut[1], \"an apple is it yellowgreen or redorange\")", "def test_invalid_variable_name(self, parse_input_mocked_metadata):\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float name = 5\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float target = 5\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float version = 5\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float array name =\\n\\t-0.1, 0.2\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float array target =\\n\\t-0.1, 0.2\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float array version =\\n\\t-0.1, 0.2\")", "def test_contains_raises_key_error_when_partial_word_searched(full_trie):\n with pytest.raises(KeyError):\n full_trie.contains(\"he\")", "def text_feature_extract(df):\n return df", "def _skipbykeyword(self, keywordexpr):\n if not keywordexpr:\n return\n chain = self.listchain()\n for key in filter(None, keywordexpr.split()):\n eor = key[:1] == '-'\n if eor:\n key = key[1:]\n if not (eor ^ self._matchonekeyword(key, chain)):\n return True", "def test_unknown_lower(self):\n self.assertRaises(ParseException, self.flag.parseString, 'u')", "def test_clean_row_lowercase(self):\n\t\tobj_ut = sentiment.clean_row(\n\t\t\t'100\\tAn APPLE so GOODforme')\n\t\tself.assertEqual(obj_ut[1], \"an apple so goodforme\")", "def test_dataset_sql_with_non_string():\n svl_string = \"\"\"\n DATASETS\n bigfoot \"bigfoot.csv\"\n bigfoot_with_location SQL 3\n PIE bigfoot_with_location AXIS has_location\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)" ]
[ "0.67696226", "0.62111384", "0.60892504", "0.5949599", "0.5847804", "0.5776143", "0.5772083", "0.5753665", "0.5750476", "0.5643494", "0.5636122", "0.5633555", "0.5622981", "0.5614792", "0.55934906", "0.5545634", "0.5531861", "0.552274", "0.5515511", "0.54943573", "0.54789686", "0.5455828", "0.5423585", "0.54214686", "0.5414794", "0.54142773", "0.54016733", "0.5358965", "0.5354084", "0.53438914" ]
0.7023457
0
test extract keyword mult invalid cols
def test_extract_multiple_invalid_columns(self): keywords = ["invalid", "another_invalid_col"] self.dicom.extract_keywords(keywords) # test that columns were added columns = self.dicom.metadata.column_names if u'invalid' not in columns: raise Exception("invalid column not added to columns") if u'another_invalid_col' not in columns: raise Exception("another_invalid_col not added to columns") # compare actual with expected result invalid_columns = self.dicom.metadata.take(self.count, columns=keywords) expected_result = [[None, None] for x in range(0, self.count)] self.assertEqual(invalid_columns, expected_result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_extract_invalid_column(self):\n self.dicom.extract_keywords([\"invalid\"])\n\n # ensure column was added\n columns = self.dicom.metadata.column_names\n if u'invalid' not in columns:\n raise Exception(\"Invalid column not added\")\n\n # compare expected and actual result\n invalid_column = self.dicom.metadata.take(self.count, columns=[u'invalid'])\n expected_result = [[None] for x in range(0, self.count)]\n self.assertEqual(invalid_column, expected_result)", "def test_extract_multiple_columns_basic(self):\n keywords = [\"PatientID\", \"SOPInstanceUID\"]\n self.dicom.extract_keywords(keywords)\n\n # ensure columns were added\n columns = self.dicom.metadata.column_names\n if u'PatientID' not in columns:\n raise Exception(\"PatientID not added to columns\")\n if u'SOPInstanceUID' not in columns:\n raise Exception(\"SOPInstanceUID not added to columns\")\n\n # compare expected and actual result\n expected_result = self._get_expected_column_data_from_xml(keywords)\n take_result = self.dicom.metadata.take(self.count, columns=keywords)\n numpy.testing.assert_equal(take_result, expected_result)", "def test_extract_column_zero_and_one_correct_content():\n data = \"\"\"[email protected],Records manager\[email protected],Network engineer\[email protected],Electrical engineer\[email protected],Science writer\"\"\"\n result = extract.extract_data_given_column(data, 0)\n assert \"[email protected]\" in result\n result = extract.extract_data_given_column(data, 1)\n assert \"Records manager\" in result", "def test_extract_one_column_basic(self):\n self.dicom.extract_keywords([\"PatientID\"])\n\n # ensure column was added\n columns = self.dicom.metadata.column_names\n if u'PatientID' not in columns:\n raise Exception(\"PatientID not added to columns\")\n\n # compare expected results with extract_keywords result\n expected_result = self._get_expected_column_data_from_xml([\"PatientID\"])\n take_result = self.dicom.metadata.take(self.count, columns=['PatientID'])\n numpy.testing.assert_equal(take_result, expected_result)", "def test_error():\n with pytest.raises(AttributeError):\n doc = []\n model._extract_keywords_single_doc(doc)", "def test_columns_non_string_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=1)", "def test_gather_columns(self):\n expected_gathered_columns = [\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: date_0 | extra_date_rule | \"(\" + date + \")\"\n datetime.2: datetime_0 | extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_0 | datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: str_0 | str_1 | ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: date_0 | extra_date_rule | \"(\" + date + \")\"\n datetime.2: datetime_0 | extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_0 | datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: bool_0 | TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: str_0 | str_1 | str_2 | ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: extra_date_rule | \"(\" + date + \")\"\n datetime.2: extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: str_0 | str_1 | ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: extra_date_rule | \"(\" + date + \")\"\n datetime.2: extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: str_0 | str_1 | ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: date_0 | extra_date_rule | \"(\" + date + \")\"\n datetime.2: datetime_0 | extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_0 | datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: str_0 | str_1 | ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: extra_date_rule | \"(\" + date + \")\"\n datetime.2: extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n ]\n for selectable, expected_gathered in zip(\n self.selectables, expected_gathered_columns\n ):\n columns = make_column_collection_for_selectable(selectable)\n gathered_columns = f\"\"\"\n {gather_columns(\"unusable_col\", columns, \"unusable\")}\n {gather_columns(\"date.1\", columns, \"date\", additional_rules=[\"extra_date_rule\"])}\n {gather_columns(\"datetime.2\", columns, \"datetime\", additional_rules=[\"extra_datetime_rule\"])}\n {gather_columns(\"datetime_end.1\", columns, \"datetime\", additional_rules=[\"datetime_end_conv\", \"datetime_aggr\"])}\n {gather_columns(\"boolean.1\", columns, \"bool\", additional_rules=[\"TRUE\", \"FALSE\", \"extra_bool_rule\"])}\n {gather_columns(\"string.1\", columns, \"str\", additional_rules=[\"ESCAPED_STRING\", \"extra_string_rule\"])}\n {gather_columns(\"num.1\", columns, \"num\", additional_rules=[\"NUMBER\", \"extra_num_rule\"])}\n \"\"\"\n self.assertEqual(\n str_dedent(gathered_columns), str_dedent(expected_gathered)\n )", "def valid_col_tester(self, state):\n vert_state = self.cols(state)\n for line in vert_state:\n line_index = vert_state.index(line)\n vert_word = self.check_word(vert_state[line_index])\n if not(vert_word):\n return False\n return True", "def test_invalid_columns():\n train = ((\"Lorem ipsum dolor sit amet\", 3),\n (\"Sed ut perspiciatis unde\", 5.5))\n with pytest.raises(ValueError):\n TabularDataset(train, named_columns=['some_random_col'])", "def test_no_timestamp(self):\n self.assertRaises(PyntsError, extract_columns, self.data[['a', 'b']], ['a'], ['timestamp'])", "def test_daal_pca_bad_column_name(self):\n with self.assertRaisesRegexp(Exception, \"column ERR was not found\"):\n self.context.daaltk.models.dimreduction.pca.train(self.frame,\n [\"ERR\", \"X2\", \"X3\", \"X4\", \"X5\",\n \"X6\", \"X7\", \"X8\", \"X9\", \"X10\"],\n k=10)", "def test_extract_column_zero_and_one_correct_length():\n data = \"\"\"[email protected],Records manager\[email protected],Network engineer\[email protected],Electrical engineer\[email protected],Science writer\"\"\"\n result = extract.extract_data_given_column(data, 0)\n assert len(result) == 4\n result = extract.extract_data_given_column(data, 1)\n assert len(result) == 4", "def test_daal_pca_bad_column_type(self):\n with self.assertRaisesRegexp(Exception, \"columns must be a list of strings\"):\n self.context.daaltk.models.dimreduction.pca.train(self.frame, 10, k=10)", "def is_cols_valid(bd):\n for col in cols:\n seen = []\n for num in nums:\n if bd[col[num]] == \" \":\n continue\n elif bd[col[num]] not in seen:\n seen += [bd[col[num]]]\n else:\n return False\n else:\n continue\n return True", "def text_feature_extract(df):\n return df", "def test_guided():\n top_n = 5\n seed_keywords = [\"time\", \"night\", \"day\", \"moment\"]\n keywords = model.extract_keywords(doc_one,\n min_df=1,\n top_n=top_n,\n seed_keywords=seed_keywords)\n\n assert isinstance(keywords, list)\n assert isinstance(keywords[0], tuple)\n assert isinstance(keywords[0][0], str)\n assert isinstance(keywords[0][1], float)\n assert len(keywords) == top_n", "def regex_catch(self, column):\n ## considering currency_list and metric_list are globally defined.\n pattern_dict = {\"Column_name\": column, \"email-id\": False, \"website\": False, \"Percentage\": False,\n \"CurrencyCol\": {\"value\": False, \"currency\": None}, \"MetricCol\": {\"value\": False, \"metric\": None},\n \"SepSymbols\": {\"value\": False, \"Symbol\": None}}\n column_val = self.data_frame[column].astype(str).str.strip()\n df1 = self.data_frame[column_val.apply(validate_email)]\n if df1.shape[0] >= (0.8 * self.data_frame.shape[0]):\n pattern_dict[\"email-id\"] = True\n elif column_val.str.contains(\"%$\", na=True).all():\n pattern_dict[\"Percentage\"] = True\n elif column_val.str.contains(\"^https:|^http:|^www.\", na=True).all():\n pattern_dict[\"website\"] = True\n elif column_val.str.contains(\"[0-9]+[.]{0,1}[0-9]*\\s*[Aa-zZ]{1,2}$\").all():\n metric = list(map(lambda x: re.sub(\"[0-9]+[.]{0,1}[0-9]*\\s*\", \"\", x), column_val))\n if len(set(metric)) == 1:\n if metric[0] in Metric_list:\n pattern_dict[\"MetricCol\"][\"value\"] = True\n pattern_dict[\"MetricCol\"][\"metric\"] = metric[0]\n elif column_val.str.contains(\"([0-9]+[.]{0,1}[0-9]*\\s*\\W$)|(^\\W[0-9]+[.]{0,1}[0-9]*)\").all():\n currency = list(map(lambda x: re.sub(\"[0-9.\\s]+\", \"\", x), column_val))\n if len(set(currency)) == 1:\n if currency[0] in currency_list:\n pattern_dict[\"CurrencyCol\"][\"value\"] = True\n pattern_dict[\"CurrencyCol\"][\"currency\"] = currency[0]\n elif column_val.str.contains(\"\\S+\\s*[\\W_]+\\s*\\S+\").all():\n seperators = list(map(lambda x: re.sub(\"\\s*[a-zA-Z0-9]+$\", \"\", x),\n list(map(lambda x: re.sub('^[a-zA-Z0-9]+\\s*', '', x), column_val))))\n if len(set(seperators)) == 1:\n if seperators[0] == \"\":\n seperators[0] = ' '\n pattern_dict[\"SepSymbols\"][\"value\"] = True\n pattern_dict[\"SepSymbols\"][\"Symbol\"] = seperators[0]\n return pattern_dict", "def testBadGetColumnSolution(self):\n actionlist = [\"ISBN\",9,8,10,\"5\",\"\",\"1\"]\n for action in actionlist:\n val = getColumnSelection(action)\n self.assertFalse(val)", "def test_queryUnkeywordFlag(self):\n self._keywordFilteringTest(\"unkeyword\")", "def extract_description_features(df, desc_col_name, remove_var=False):\n df['isAcctNo'] = df[desc_col_name].str.contains('$ACCT_NO', regex=False).astype('int')\n df['isForeignCurr'] = df[desc_col_name].str.contains('$CURRENCY', regex=False).astype('int')\n # df['isForeignCountry'] = df[desc_col_name].str.contains('$FOREIGN_COUNTRY', regex=False).astype('int')\n\n if remove_var:\n regex = '\\$ACCT_NO|\\$CURRENCY|\\$FOREIGN_COUNTRY'\n df[desc_col_name] = df[desc_col_name].str.replace(regex, '', regex=True)\n return df", "def test_extract_metadata_column(self):\r\n obs = extract_metadata_column(self.otu_table.SampleIds,\r\n self.metadata, category='CAT2')\r\n exp = ['A', 'B', 'A', 'B', 'A', 'B']\r\n self.assertEqual(obs, exp)", "def get_cols_dummy():", "def valid_key(key: str) -> str:\n keywords = {\"link\": [\"URL\"], \"label\": [\"Name\"], \"id\": [\"ID\"]}\n\n for k, value in keywords.items():\n if key in value:\n return k\n raise InvalidCSVFormat(f\"Unknown Column Name : {key}\")", "def test_wrong_keyword(self):\n # TODO: Exception\n with self.assertRaises(Exception):\n self.test_table.change_header(Path=1, Sectionname=2, Value=3)", "def test_row_from_columns_no_errors(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def test_nested_col(self):\n\n self.assertRaises(TypeError, self.table.where, 'c_nested')", "def test_slice_name_age(self):\n self.insert()\n data = self.tbl['name', 'age']\n assert self.check(self.idata[:, [0, 1, 2]], data)", "def test_keyword_extractor(self):\n data = [{\"Header\": \"This is a Header\", \"Paragraph\": \"This is a Paragraph\", \"slide\": 10}]\n keywords = keyword_extractor(data)\n data[0][\"Header_keywords\"] = [\"header\"]\n data[0][\"Paragraph_keywords\"] = [\"paragraph\"]\n self.assertEqual(keywords, data)", "def test_columns_str_error(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=None)\n\n x.columns = \"a\"\n\n with pytest.raises(ValueError):\n\n x.columns_check(X=df)", "def test_extract_fields_should_extract_fields(self):\n s = \"KEY1::VALUE1\\tKEY2::VALUE2 KEY3::VALUE3\"\n fields = self.collector._extract_fields(s)\n self.assertEqual(fields.get('KEY1'), 'VALUE1')\n self.assertFalse('KEY2' in fields)" ]
[ "0.7062179", "0.6464477", "0.61323917", "0.60444987", "0.58714956", "0.5868322", "0.57832515", "0.5718176", "0.56897604", "0.5673974", "0.5634569", "0.5604178", "0.5597052", "0.55365217", "0.549694", "0.5495881", "0.548916", "0.5480683", "0.5460633", "0.54129183", "0.5369866", "0.5365994", "0.5349037", "0.53356403", "0.5319077", "0.5301559", "0.5300541", "0.52960026", "0.5293214", "0.5273555" ]
0.7282601
0
Write a content into several files.
def write(self, content): for f in self.files: f.write(content) f.flush() # Want this content is displayed immediately on file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeMultipleFiles(self, filePaths, ss): \n \n for i,filePath in enumerate(filePaths): \n self.writeSingleFileLines(filePath, [ss[i]])", "def write(self, txt):\n for fp in self.files:\n fp.write(txt)", "def save(self, content_dir):\n print_verbose(\n \"INFO : Writing random HTML documents to files...\",\n self.args.verbose,\n )\n for i in range(self.n):\n dir_path = content_dir + \"/\" + \"staticpage\" + str(i)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n index_file = os.path.join(dir_path, \"index.html\") \n with open(index_file, \"w\") as file:\n file.write(self.doc_strings[i].decode(\"utf-8\"))", "def _write_file(output_path: str, file_content: Iterable[str]) -> None:\n with open(output_path, \"w+\", encoding=\"utf-8\") as f:\n f.writelines(file_content)\n\n logging.info(f\"wrote to '{output_path}'\")", "def _write_files(files, prefix=None, clobber=False):\n [_write_file(infile, prefix, clobber) for infile in files]", "def write(self,path,content):\n file_path = os.path.join( self.directory, path)\n with open(file_path, \"w\") as file:\n file.write( content )", "def write_to_files():\n\t# Create output files\n\toutput = [None, \\\n\t\t open(\"priority-1.txt\", \"w\"), \\\n\t\t open(\"priority-2.txt\", \"w\"), \\\n\t\t open(\"priority-3.txt\", \"w\"), \\\n\t\t open(\"priority-4.txt\", \"w\"), \\\n\t\t open(\"priority-5.txt\", \"w\"), ]\n\n\t# Loop over all fields and write them to the correct file\n\tfor field in sorted(reportlog.keys()):\n\t\tpriority = reportlog[field]['priority']\n\t\tlabel = reportlog[field]['label']\n\n\t\toutput[priority].write(\"intphas_%s\\t%s\\n\" % (field, label))\n\t\toutput[priority].flush()\n\n\t# Close files\n\tfor i in [1,2,3,4,5]:\n\t\toutput[i].close()", "def write_file(self, directory, name, content):\n\n try:\n f = open(os.path.join(directory, name), 'w')\n f.write(content)\n f.close()\n except:\n print \"Content not written to file: %s\" % name", "def write_to_master_file(\n self, all_files=[], filename=sys.argv[2], separator=sys.argv[3]\n ) -> None:\n if filename == \"\":\n raise EnvironmentError(\"No filename provided!\")\n\n first_file = all_files[0]\n\n with open(filename, \"w+\") as master:\n with open(first_file, \"r+\") as initial_write:\n for line in initial_write:\n master.write(line)\n\n if len(all_files) > 1:\n for i in range(1, len(all_files)):\n master.write(separator)\n with open(all_files[i], \"r+\") as file_to_append:\n for line in file_to_append:\n master.write(line)", "def fileWrite(content):\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()", "def write(self, content):\n ...", "def write_to_file(content, filename):\n if not os.path.isfile(filename): # Checking if file already exists, don't append data if it does.\n for j in range(len(content)): # For each dialog in dialogues array.\n with open(filename, 'a') as file: # Open a text file in append mode and write data into it.\n for k in range(len(content[j][0])):\n file.write('{0} {1}\\n'.format(str(content[j][0][k]).lower().split(\"(\")[0],\n str(content[j][1][k])).lower())", "def write_data_files(self):\n \n logging.info('\\n Start writing data files \\n')\n \n for i, (data_file, label_file) in enumerate(self.files):\n data_file, label_file = Path(data_file), Path(label_file)\n logging.info('Writing .hdf5 file for : [{}]'.format(str(data_file)))\n \n file_name = self.save_data_folder / '{}.hdf5'.format(label_file.name[:-4])\n if file_name.exists():\n continue\n \n with h5py.File(str(file_name), 'w') as writer:\n self.serialize_samples(\n writer, data_file, label_file)", "def write_files(self, basedir):\n outdir = basedir / self.type\n outdir.mkdir(parents=True, exist_ok=True)\n\n for point, row in zip(self.points, self.array):\n filepath = outdir / point\n with filepath.open('w') as f:\n idx = 0\n for ikey in self.pardict.keys():\n f.write(\"{} {}\\n\".format(ikey, row[idx]))\n idx += 1\n logging.debug('wrote %s', filepath)", "def save_content(content, dir_path, file_name):\r\n if not os.path.exists(dir_path):\r\n os.mkdir(dir_path)\r\n with open(f'{dir_path}\\\\{file_name}', 'w') as output_file:\r\n output_file.write(content)", "def write_urls_to_file(urls, file_name):\n with open(file_name, 'w') as file_handler:\n for url in urls:\n content = read_url(url)\n pretty_content = pretty_print_content(content)\n file_handler.write(pretty_content)", "def write(afile, seqs): \n for s in seqs :\n writeseq(afile, s)", "def _write_content(i, content):\n fpath = io_mgr.get_parties_json(i)\n with open(fpath, 'w') as fstream:\n fstream.write(json.dumps(content, indent=4))", "def write_file(self, contents):\n fd = open(os.path.join(os.path.dirname(__file__),\n 'data', 'test.html'), 'w')\n fd.write(contents)\n fd.close()", "def writeSeqSitesToFiles(path, filenames, seqCharSitesDicList):\n ## filenames is a list of file anmes, get the number of total files\n nFiles = len(filenames)\n if not os.path.exists(path):\n os.makedirs(path)\n for i in range(0, nFiles):\n filepath = os.path.join(path, filenames[i])\n align_file = open(filepath, \"w\")\n sequenceSet = []\n sequenceSet.append(''.join(seqCharSitesDicList[i][\"t1\"]))\n sequenceSet.append(''.join(seqCharSitesDicList[i][\"t2\"]))\n records =[]\n for (index,seq) in enumerate(sequenceSet):\n records.append(SeqRecord(Seq(seq, IUPAC.unambiguous_dna), id= (\"t\"+str(index+1)), description=''))\n SeqIO.write(records, open(os.path.join(path, filenames[i]), \"w\"), \"fasta\") \n align_file.close()", "def write(self, path, content):\n this_file = open(path, 'w')\n this_file.write(content)\n this_file.close()", "def save_list_to_file(content: list, dst_path: str, append=False) -> None:\n with io.open(file=dst_path, mode=\"a\" if append else \"w\", encoding='utf-8') as destination_file:\n for element in content:\n destination_file.write(element + \"\\n\")", "def write_file_content(path, file_name, content):\n if not os.path.exists(path):\n os.makedirs(path)\n f = io.open(path + file_name, \"w\", encoding = 'utf-8')\n f.write(content)\n f.close()", "def write(self):\n with open(self.filename, 'w') as outfile:\n [outfile.write(element) for element in self.preamble]\n [outfile.write(element) for element in self.body]", "def _write_files(output_root, contents, generated_suffix_map=None):\r\n _ensure_dir(output_root)\r\n to_delete = set(file.basename() for file in output_root.files()) - set(contents.keys())\r\n\r\n if generated_suffix_map:\r\n for output_file in contents.keys():\r\n for suffix, generated_suffix in generated_suffix_map.items():\r\n if output_file.endswith(suffix):\r\n to_delete.discard(output_file.replace(suffix, generated_suffix))\r\n\r\n for extra_file in to_delete:\r\n (output_root / extra_file).remove_p()\r\n\r\n for filename, file_content in contents.iteritems():\r\n output_file = output_root / filename\r\n\r\n not_file = not output_file.isfile()\r\n\r\n # not_file is included to short-circuit this check, because\r\n # read_md5 depends on the file already existing\r\n write_file = not_file or output_file.read_md5() != hashlib.md5(file_content).digest() # pylint: disable=E1121\r\n if write_file:\r\n LOG.debug(\"Writing %s\", output_file)\r\n output_file.write_bytes(file_content)\r\n else:\r\n LOG.debug(\"%s unchanged, skipping\", output_file)", "def write_to_file(content: Union[article_content, None],\n save_path: str,\n keyword: str,\n num_file: int\n ) -> None:\n if content is None: \n return\n \n # Establish the filename for the text file, which will have the form\n # \"[keyword]_[num].txt\"\n file_name = f\"{keyword.replace(' ', '_')}_{num_file:02d}\"\n with open(f\"{save_path}/{file_name}.txt\", 'w') as new_article_txt: \n \n # Write the head at the top of the text file\n try: \n new_article_txt.write(f\"{content.head}\\n\\n\")\n except UnicodeEncodeError: \n new_article_txt.write(\n f\"{content.head.encode(FORMAT, 'replace')}\\n\\n\"\n )\n \n # Write the article body under the heading, separating each sentence\n # with a single newline\n for line in content.body:\n try:\n new_article_txt.write(f\"{line}\\n\")\n except UnicodeEncodeError:\n new_article_txt.write(f\"{line.encode(FORMAT, 'replace')}\\n\")", "def fwrite(filename,content):\n\tf = open (filename,\"w\")\n\tcontent = [\" \".join(elem) for elem in content]\t#Join the words with a space in between\n\tcontent = \"\\n\".join(content).replace(\"&^&%+\",\" \")\t#Join the lines with a new line character\n\tf.write(content)\t#Write the content to the file\n\tprint \"Saving \"+filename+\"...Done\"\n\tf.close()", "def writingtxt(pathf,ticker,nb,typ):\n\n assert len(pathf) == nb, 'the number of issues does not match' # an assert to check that the number of paths parsed is equal nb\n \n \n filename = [typ+ticker.lower()+str(2020-i)+'.txt' for i in reversed(range(0,nb))] # loop to create list of filenames\n \n for p,n in zip(pathf,filename): # looping through files and filesname\n\n raw_html = open(p,encoding = 'utf-8') # opening the html given the path\n \n soup = bsoup(raw_html, 'lxml')# decoding the html\n\n ltables = soup.find_all('table') # locating the tables\n \n for z in ltables: # removing the table\n z.decompose()\n \n bla = open(n,\"w+\", encoding = 'utf-8') #opening a new .txt file with filename\n bla.write(soup.text) # writing the text of the report in the .txt file\n \n\n return None", "def write_file(filename, content):\n codecs.open(filename, \"w\", encoding='utf-8').writelines(content)", "def create_file(self):\n for data_element in self.data:\n title = data_element['title']\n anchor = data_element['href']\n example = data_element['example']\n content = data_element['content']\n if example:\n abstract = '<section class=\"prog__container\">{}<br>{}</section>'.format(content, example)\n\n list_of_data = [\n title, # api title\n 'A', # type is article\n '', # no redirect data\n '', # ignore\n '', # no categories\n '', # ignore\n '', # no related topics\n '', # ignore\n '', # no external link\n '', # no disambiguation\n '', # images\n abstract, # abstract\n anchor # url to doc\n ]\n self.output_file.write('{}\\n'.format('\\t'.join(list_of_data)))" ]
[ "0.6787142", "0.6576334", "0.6377544", "0.6341663", "0.624191", "0.6238459", "0.62333816", "0.62244797", "0.6208775", "0.61102337", "0.6087412", "0.6084252", "0.6036357", "0.60239494", "0.6023563", "0.6013627", "0.5981921", "0.5963397", "0.59628314", "0.5941813", "0.5930109", "0.5929623", "0.59137887", "0.59129995", "0.59116906", "0.59040153", "0.58795273", "0.58671635", "0.58522034", "0.58363664" ]
0.7432771
0
If "l" is exist in sys.argv or test_status is Failed then keeping the log file. If test_status is Passed and missing "l" from sys.argv then deleting log file.
def save_log(self, test_status: str = Status.FAILED): self.__log.close() sys.stdout = self.__original_stdout if test_status == Status.PASSED and Logger.__KEEP_LOG_FLAG not in sys.argv: if os.path.isfile(self.__log_file_path): os.remove(self.__log_file_path) print(Colors.OKBLUE + "\nLog file has been removed\n" + Colors.ENDC) return if os.path.isfile(self.__log_file_path): print(Colors.OKBLUE + "\nLog file has been kept at: {}\n".format(self.__log_file_path) + Colors.ENDC)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tearDown(self):\n if os.path.isfile(LOGFILENAME):\n os.remove(LOGFILENAME)", "def test_999_remove_testfiles(self):\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __test_filename = consts.TEST_FILENAME\n __dir_game_testfile = os.path.join(__dir_game_saves, __test_filename)\n __test_filename_append1 = __test_filename + \"__1\"\n __dir_game_testfile_append1 = os.path.join(__dir_game_saves, __test_filename_append1)\n __test_filename_append2 = __test_filename + \"__2\"\n __dir_game_testfile_append2 = os.path.join(__dir_game_saves, __test_filename_append2)\n __test_filename_append3 = __test_filename + \"__3\"\n __dir_game_testfile_append3 = os.path.join(__dir_game_saves, __test_filename_append3)\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __test_logname = __test_filename + \"_log.txt\"\n __dir_game_logfile = os.path.join(__dir_game_log, __test_logname)\n os.remove(__dir_game_logfile)\n self.assertFalse(os.path.isfile(__dir_game_logfile))\n __list_files = os.listdir(__dir_game_log)\n if len(__list_files) == 0:\n os.removedirs(__dir_game_log)\n os.remove(__dir_game_testfile)\n self.assertFalse(os.path.isfile(__dir_game_testfile))\n os.remove(__dir_game_testfile_append1)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append1))\n os.remove(__dir_game_testfile_append2)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append2))\n os.remove(__dir_game_testfile_append3)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append3))\n __list_files = os.listdir(__dir_game_saves)\n if len(__list_files) == 0:\n os.removedirs(__dir_game_saves)", "def test_passing_log_fname(self):\n\n log_env_file = \"test.log\"\n log_file = \"test_2.log\"\n whole_env_log_file = os.path.join(LOG_FOLDER, log_env_file)\n whole_log_file = os.path.join(LOG_FOLDER, log_file)\n\n # remove both files if they exist\n for file in (whole_env_log_file, whole_log_file):\n if os.path.exists(file):\n os.remove(file)\n\n os.environ[ENV_WORK_DIR] = TMP_DIR\n os.environ[ENV_LOG_FNAME] = log_env_file\n\n logger = pgo_logger.get_logger(log_file_name=log_file)\n assert logger is not None\n\n logger.info(\"test\")\n\n assert os.path.exists(whole_log_file) is True\n assert os.path.isfile(whole_log_file) is True\n assert os.path.exists(whole_env_log_file) is False", "def test_despasito_log_file():\n\n fname = \"despasito_{}.log\".format(random.randint(1, 10))\n despasito.initiate_logger(log_file=fname, verbose=10)\n logger.info(\"test\")\n\n if os.path.isfile(fname):\n flag = True\n despasito.initiate_logger(log_file=False)\n try:\n os.remove(fname)\n except Exception:\n print(\"Error removing log file\")\n else:\n flag = False\n\n assert flag", "def clean_file_before_test():\n\n if os.path.exists(LOG_FOLDER):\n for file in os.listdir(LOG_FOLDER):\n os.remove(LOG_FOLDER + \"/\" + file)", "def setUp(self):\n if os.path.isfile(LOGFILENAME):\n os.remove(LOGFILENAME)", "def exitLogCleanup(*args):\n for logFile in args:\n os.unlink(logFile)\n return None", "def pytest_sessionfinish(session, exitstatus):\n\n # dat files are created when using attachements\n print(\"\\n-------------------------\\nClean dpytest_*.dat files\")\n fileList = glob.glob('./dpytest_*.dat')\n for filePath in fileList:\n try:\n os.remove(filePath)\n except Exception:\n print(\"Error while deleting file : \", filePath)", "def test_fail(self):\n os.chdir(self.directory)\n if (os.path.isfile('optimization_test')):\n shutil.rmtree('optimization_test',ignore_errors=True)", "def test_DDSim_runIt_failure_LogFile_ignore(self):\n self.ddsim.platform = \"Windows\"\n self.ddsim.applicationLog = self.logFileName\n self.ddsim.InputFile = \"pairs.hepmc\"\n self.ddsim.ignoreapperrors = True\n ## side effect for Script, userlibs, log, logAfter\n with patch(\"os.path.exists\", new=Mock(side_effect=[False, False, False, False] ) ):\n res = self.ddsim.runIt()\n assertDiracSucceeds( res, self )", "def test_passing_env(self):\n\n log_file = \"test.log\"\n whole_log_file = os.path.join(LOG_FOLDER, log_file)\n if os.path.exists(whole_log_file):\n os.remove(whole_log_file)\n\n os.environ[ENV_WORK_DIR] = TMP_DIR\n os.environ[ENV_LOG_FNAME] = log_file\n\n logger = pgo_logger.get_logger()\n\n assert logger is not None\n\n logger.info(\"test\")\n assert os.path.exists(whole_log_file) is True\n assert os.path.isfile(whole_log_file) is True", "def test_DDSim_runIt_failure_LogFile(self):\n self.ddsim.platform = \"Windows\"\n self.ddsim.applicationLog = self.logFileName\n self.ddsim.InputFile = \"pairs.hepmc\"\n self.ddsim.ignoreapperrors = False\n ## side effect for Script, userlibs, log, logAfter\n with patch(\"os.path.exists\", new=Mock(side_effect=[False, False, False, False] ) ):\n res = self.ddsim.runIt()\n self.assertIn( \"did not produce the expected log\", res['Message'] )", "def cleanup_test(self):\n test_variables = _get_test_variables()\n test_status = test_variables['${TEST STATUS}']\n if test_status == 'FAIL':\n # Test failed: keep directory for later inspection\n return\n\n # Test passed: remove the execution directory but preserve all\n # important log files, if any (valgrind, gcov, ...)\n\n if len(self._preserve_files) == 0:\n shutil.rmtree(self._execdir, True)\n return\n\n # Move all the files to preserve to a temporary directory\n\n backup_dir = self._execdir + '.preserve'\n os.makedirs(backup_dir)\n for file in self._preserve_files:\n shutil.move(file, backup_dir)\n\n # Delete the execution directory and rename the temporary directory\n\n shutil.rmtree(self._execdir, True)\n os.rename(backup_dir, self._execdir)", "def test_04_logs(self):\n\n file_name = 'train-test.log'\n request_json = {'file':'train-test.log'}\n r = requests.get('http://localhost:{}/logs/{}'.format(port,file_name))\n\n with open(file_name, 'wb') as f:\n f.write(r.content)\n \n self.assertTrue(os.path.exists(file_name))\n\n if os.path.exists(file_name):\n os.remove(file_name)", "def main():\n dir_path = '/home/ubuntu/test_files' # path for the log files that needs to be pruned\n stat_file_name = 'file_status_info' # temp file will be created to store the stat of each files to calculate when to delete\n \n # Get the list of all the files where we want to perfrom the delete operations\n file_list = get_list_of_files_in_dir(dir_path)\n\n # Get the current system date\n current_date = get_current_date()\n\n # Iterate through all the log, error, info files in the specified directory path and check for the criteria of file older than 5 days and delete.\n for fil in file_list:\n get_file_stat(dir_path, stat_file_name, fil)\n filename, file_date = get_file_last_modification_date(stat_file_name)\n\n print(\"*********** %s file stat is written **************\" % fil)\n days = abs(current_date - file_date).days\n \n # Check if the file modification date if older than 5 days.\n if days > 5:\n remove_files(os.path.join(dir_path, fil))\n else:\n print(\"No eligible file(s) found to be deleted\")", "def test_creation_logfile(self):\n log_file = os.path.join(DATA_DIR, 'sample_log.txt')\n manager = execution.LogManager('MainThread', log_file)\n LOGGER.debug('Log me!')\n manager.close()\n self.assertEqual(count_lines(log_file), 1)\n os.remove(log_file)", "def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It wasn't found directory {directory_path}\"", "def tearDown(self):\n testing_dir = os.path.split(os.path.realpath(__file__))[0]\n for f in glob.glob(os.path.join(testing_dir, \"*\")):\n if f.split(\".\")[-1] in [\"o\", \"out\", \"pyc\", \"log\"]:\n subprocess.call(['rm', f])", "def delete_log():\n log_path = Path.cwd() / \"premise.log\"\n if log_path.exists():\n log_path.unlink()", "def test_dry_run():\n config = get_config(\"delete.conf\")\n path = get_config_path(config)\n test_file = make_test_file(path)\n\n console.pushbroom(config, dry_run=True)\n assert test_file.exists()\n\n console.pushbroom(config)\n assert not test_file.exists()\n\n path.rmdir()", "def SetUnexpectedFailure(test_result):\n test_result['status'] = 'FAIL'\n test_result['expected'] = False\n logging.error('Processing failed for test %s', test_result['testPath'])", "def NOtearDown(self):\n\n for f in self.testoutput:\n if os.path.exists(f):\n os.remove(f)", "def test_failed():\n build()\n sh(\"%s %s --last-failed\" % (PYTHON, RUNNER_PY))", "def tearDown(self) -> None:\n\n logging.info(f\"{'=' * 20}Test completed!{'=' * 20}\")\n logging.info(\"Failed to execute the following parameter combinations: \")\n if self.error_params:\n for each in self.error_params:\n logging.info(each)", "def tearDown(self):\n if not self.test_manager.leave_output:\n shutil.rmtree(self.directory)", "def pytest_sessionfinish(session, exitstatus):\n if exitstatus == 0:\n shutil.rmtree(TMP_PATH)\n else:\n print(\"non-zero exit status, leaving for reference folder: {}\".format(TMP_PATH))", "def write_test_log(t, output_dir):\n if t.log_to_file is not None and hasattr(t, \"stop_time\"):\n filename = type(t).__name__ + \"-\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".txt\"\n testtime = t.stop_time - t.start_time\n with open(os.path.join(output_dir, filename), \"w\") as log:\n log.write(\"\\t=======================================================\")\n log.write(\"\\n\\tTest case ID: %s\" % (type(t).__name__))\n log.write(\"\\n\\tTest case Description: %s\" % (type(t).__doc__))\n log.write(\"\\n\\t=======================================================\\n\")\n log.write(t.log_to_file)\n log.write(\"\\n\\t=======================================================\")\n log.write(\"\\n\\t%s test result: %s\" % (type(t).__name__, t.result_grade))\n log.write(\"\\n\\tTotal test time: %s seconds\" % testtime)\n log.write(\"\\n\\t=======================================================\")", "def write_test_log(t, output_dir):\n if t.log_to_file is not None and hasattr(t, \"stop_time\"):\n filename = type(t).__name__ + \"-\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".txt\"\n testtime = t.stop_time - t.start_time\n with open(os.path.join(output_dir, filename), \"w\") as log:\n log.write(\"\\t=======================================================\")\n log.write(f\"\\n\\tTest case ID: {type(t).__name__}\")\n log.write(f\"\\n\\tTest case Description: {type(t).__doc__}\")\n log.write(\"\\n\\t=======================================================\\n\")\n log.write(t.log_to_file)\n log.write(\"\\n\\t=======================================================\")\n log.write(f\"\\n\\t{type(t).__name__} test result: {t.result_grade}\")\n log.write(f\"\\n\\tTotal test time: {testtime} seconds\")\n log.write(\"\\n\\t=======================================================\")", "def clean_exit(exitcode, logfile, dry=False):\n if not dry and logfile is not None:\n logfile.close()\n sys.exit(exitcode)", "def stopTestRun(self, test):\n\n self.runTime = time.time()-self.runTime\n self.logger.debug(\"\\nCompleted test suite\\n\")\n self.logger.info(\"\\n<run=%d errors=%d fail=%d in %.2fs>\\n\" % (self.testsRun,len(self.errors),len(self.failures), self.runTime))\n if self.wasSuccessful(): self.logger.info(\"All tests passed successfully\\n\")\n else:\n self.logger.info(\"Some tests failed or had errors!\\n\")\n sys.exit(1)" ]
[ "0.6096151", "0.5994757", "0.5989817", "0.59152883", "0.57627594", "0.57280034", "0.5577114", "0.5575816", "0.55489534", "0.55463153", "0.5500023", "0.5493735", "0.547919", "0.54714", "0.5455382", "0.5437092", "0.5426079", "0.5415492", "0.5404435", "0.5372536", "0.53498775", "0.53365374", "0.5319812", "0.53082144", "0.52945924", "0.5284392", "0.5277014", "0.52733344", "0.5272865", "0.5269764" ]
0.72162974
0
Create log_files folder if it is not exist.
def __init_log_folder(): try: os.makedirs(Logger.__log_dir) except OSError as e: if e.errno != errno.EEXIST: raise e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_log_dir():\n if not os.path.exists(FLASK_APP.config[\"LOG_DIR\"]):\n os.makedirs(FLASK_APP.config[\"LOG_DIR\"])", "def _make_log_dir(self, path):\n\n try:\n os.makedirs('/'.join([self._logpath, path]))\n except OSError, e:\n # Return True if dir already exists\n if e.args[0] is 17:\n return\n\n # Some other error; raise exception\n raise e\n\n return", "def create_dirs():\n os.makedirs(ORIGINAL_LOG_DIR, exist_ok=True)", "def setup_log_dir():\n log_dir = get_log_dir()\n if log_dir.endswith('latest'):\n shutil.rmtree(log_dir, ignore_errors=True)\n mkdirs(log_dir)\n return log_dir", "def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()", "def createLogFolders():\n os.chdir(\"ARCHIVES\")\n logFolder = datetime.datetime.now().strftime(\"ARCHIVE_%d_%b_%Y_%H_%M_%S_0\")\n while logFolder in os.listdir():\n split = logFolder.split('_')\n curIndex = int(split[7])\n nextIndex = curIndex + 1\n split[7] = str(nextIndex)\n logFolder = '_'.join(split)\n os.mkdir(logFolder)\n os.chdir(logFolder)\n os.mkdir(\"Premigration\")\n os.mkdir(\"Migration\")\n os.mkdir(\"Postmigration\")\n os.mkdir(\"Other\")\n print(\"Storing All Logs in ARCHIVES/%s\"%logFolder)\n globs.ARCHIVEFOLDER = os.getcwd()\n os.chdir(globs.PROGDIR)", "def _initialize_log_file(config):\n for settings in config[\"handlers\"].values():\n if _is_file_handler(settings):\n log_path = Path(settings[\"filename\"])\n log_path.parent.mkdir(parents=True, exist_ok=True)\n log_path.touch(exist_ok=True)", "def check_if_dir_exists():\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\")\n logger.debug(\"Dir for logs has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed\")\n\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\")\n logger.debug(\"Dir for DB has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed\")", "def _ensure_dirs(dirpath):\n if not os.path.isdir(dirpath):\n if os.path.exists(dirpath):\n err = \"log path ({}) exists but is not a directory\"\n raise ConfigError(err.format(dirpath))\n os.makedirs(dirpath, 0o777)", "def initialize_logger():\n if not os.path.exists(LOGGING_DIRECTORY):\n os.makedirs(LOGGING_DIRECTORY)\n os.chmod(LOGGING_DIRECTORY, 0o777)", "def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)", "def CreateLoggingDirectories(\n dataset_root: Path, model_name: str, analysis: str, run_id: str = None\n) -> Path:\n run_id = run_id or time.strftime(\"%y:%m:%dT%H:%M:%S\")\n log_dir = dataset_root / \"logs\" / model_name / analysis / run_id\n if log_dir.is_dir():\n raise OSError(\n f\"Logs directory already exists. Refusing to overwrite: {log_dir}\"\n )\n logging.info(\"Writing logs to %s\", log_dir)\n log_dir.mkdir(parents=True)\n (log_dir / \"epochs\").mkdir()\n (log_dir / \"checkpoints\").mkdir()\n (log_dir / \"graph_loader\").mkdir()\n return log_dir", "def init_log_file(self):\r\n try:\r\n os.makedirs(config[\"server_log_path\"])\r\n except OSError:\r\n if not os.path.isdir(config[\"server_log_path\"]):\r\n raise\r\n server_log_file = logging.FileHandler(\r\n config[\"server_log_path\"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')\r\n server_log_file.setLevel(logging.DEBUG)\r\n server_log_file.setFormatter(file_formatter)\r\n server_log.addHandler(server_log_file)", "def start_check():\n if not os.path.exists(outfancy_temp_files):\n os.mkdir(outfancy_temp_files)\n if not os.path.exists(outfancy_temp_files + log_file):\n os.system('touch ' + outfancy_temp_files + log_file)", "def _mkdir_if_not_exist(path):\n if not(os.path.isdir(path)):\n os.mkdir(path)\n else:\n _logger.info('Skipping existing directory %s' % path)", "def _default_log_dir():\n config_dir = os.path.abspath(os.path.dirname(self.config_filepath))\n log_dir = os.path.join(config_dir, \"logs\")\n if not os.path.isdir(log_dir):\n os.mkdir(log_dir)\n return log_dir", "def _init_log(self):\n if not os_path_exists(self.log_file):\n self._write('', 'w')", "def archive_logs(self):\n source = GAConfig[\"log_file_location\"]\n destination = source + \"Archive/\"\n\n if not os.path.exists(source):\n os.makedirs(source)\n if not os.path.exists(destination):\n os.makedirs(destination)\n\n if len(os.listdir(source)) > 1:\n specific_folder = destination + str(\n len(os.listdir(destination))) + '/'\n os.makedirs(specific_folder)\n for f in os.listdir(source):\n if((\".log\" in f) or (\".zip\" in f)):\n shutil.move(source + f, specific_folder)", "def _create_logdir(self, job_id):\n job_logdir = os.path.join(self.log_dir, self.alias, job_id)\n if not os.path.isdir(job_logdir):\n self.logger.debug(\"creating log directory '%s'\" % job_logdir)\n os.makedirs(job_logdir)\n\n return job_logdir", "def mkdir(path, logMsg=True):\n try:\n if(logMsg): \n log(f'Creating dir: {path}')\n os.mkdir(path)\n except OSError as ex:\n if ex.errno != 17:\n raise ex", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files", "def setup_directories():\n run('mkdir -p %(path)s' % env)\n run('mkdir -p %(env_path)s' % env)\n run('mkdir -p %(log_path)s;' % env)\n sudo('chgrp -R www-data %(log_path)s; chmod -R g+w %(log_path)s;' % env)\n \n with settings(warn_only=True):\n run('ln -s %(log_path)s %(path)s/logs' % env)", "def _create_folder_if_not_exist(filename):\n os.makedirs(os.path.dirname(filename), exist_ok=True)", "def create_folder(self):\n self.config.csv_path.mkdir(parents=True, exist_ok=True)\n self.config.images_path.mkdir(parents=True, exist_ok=True)", "def _mkdir_if_not_exist(path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(path):\n logger.warning(\n 'be happy if some process has already created {}'.format(\n path))\n else:\n raise OSError('Failed to mkdir {}'.format(path))", "def ensure_dirs_exists(self):\n os.makedirs(os.path.join(self.location, \"batches\"), exist_ok=True)\n os.makedirs(os.path.join(self.location, \"results\"), exist_ok=True)", "def make_dirs():\n global paths_made\n\n #Have we done this already? Then why are we trying to do it again?\n if paths_made:\n return\n\n #Make the dirs\n os.makedirs(log_dir, exist_ok=True)\n os.makedirs(datastream_dir, exist_ok=True)\n paths_made = True", "def checkDir(directory):\n ## test if directory is there\n if not os.path.exists(directory):\n os.mkdir(directory)\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Making new directory: \" + directory + \"\\n\")\n else:\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Found directory: \" + directory + \"\\n\")", "def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)" ]
[ "0.823581", "0.78673404", "0.7663976", "0.7578191", "0.74438775", "0.7357689", "0.726618", "0.6999556", "0.69408005", "0.69014996", "0.6886535", "0.6809073", "0.67766184", "0.6761393", "0.6745055", "0.67383313", "0.6719447", "0.6610581", "0.66070086", "0.6602989", "0.6599631", "0.65901464", "0.6570739", "0.6557671", "0.65487766", "0.6509098", "0.6480493", "0.64689255", "0.6446554", "0.6446277" ]
0.80325365
1
rips should be a union of angle bins as a Dionysus simplicial complex. n is the number of data points in each bin. numbins is the number of bins of data points, EXCLUDING unions. Returns times the zigzag birth and death times (list of lists length n).
def build_zigzag_times(rips,n,numbins): times = [[] for x in range(0,rips.__len__())] i=0 for x in rips: dim = x.dimension() t = []; for k in range(0,dim+1): t.append(x[k]) xmin = math.floor(min(t)/n) xmax = math.floor(max(t)/n) if xmax == 0: bd = [0,1] elif xmin == numbins-1: bd = [2*xmin-1,2*xmin] elif xmax == xmin: bd = [2*xmin-1,2*xmin+1] elif xmax > xmin: bd = [2*xmax-1,2*xmax-1] else: print("Something has gone horribly wrong!") times[i] = bd i = i+1 return times
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def phase_bins(nbins, phase, x):\n\n min_phase, max_phase = 0, 1\n phase_bins = np.linspace(min_phase, max_phase, nbins + 1)\n x_binned, phase_binned = [], []\n x_means, Ns, per_bin_variances = [np.empty(nbins) for i in range(3)]\n for j in range(nbins):\n m = (phase_bins[j] < phase) * (phase < phase_bins[j + 1])\n Ns[j] = len(x[m])\n x_means[j] = np.mean(x[m])\n x_binned.append(x[m])\n phase_binned.append(phase[m])\n per_bin_variances[j] = sj2(x[m], x_means[j], Ns[j])\n\n return x_means, phase_bins, \\\n Ns, per_bin_variances, \\\n x_binned, phase_binned", "def test_bins(self):\n\n \n for filename in ['data/population_padang_1.asc', \n 'data/test_grid.asc']: \n \n R = read_coverage(filename)\n \n min, max = R.get_extrema() #use_numeric=True)\n \n for N in [2,3,5,7,10,16]:\n linear_intervals = R.get_bins(N=N, quantiles=False) \n \n assert linear_intervals[0] == min\n assert linear_intervals[-1] == max \n \n d = (max-min)/N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], min + i*d) \n \n \n quantiles = R.get_bins(N=N, quantiles=True)\n\n A = R.get_data(nan=True).flat[:] \n \n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask) \n l2 = len(A)\n \n if filename == 'data/test_grid.asc':\n # Check that NaN's were removed\n \n assert l1 == 35\n assert l2 == 30\n \n \n # Assert that there are no NaN's \n assert not numpy.alltrue(numpy.isnan(A))\n \n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements/N\n \n # Count elements in each bin and check\n\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n \n \n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no more than 1\n assert abs(count - refcount) <= 1 \n assert abs(count - average_elements_per_bin) <= 3\n \n \n else:\n # The last bin is allowed vary by more\n pass\n \n i0 = i1", "def z_to_agebins_rescale(zstart, nbins_sfh=7, amin=7.1295):\n\n agelims = np.zeros(nbins_sfh+1)\n agelims[0] = cosmo.lookback_time(zstart).to(u.yr).value # shift the start of the agebin\n tuniv = cosmo.lookback_time(15).to(u.yr).value # cap at z~15, for the onset of star formation\n tbinmax = tuniv - (tuniv-agelims[0]) * 0.10\n agelims[-2] = tbinmax\n agelims[-1] = tuniv\n\n if zstart <= 3.0:\n agelims[1] = agelims[0] + 3e7 # 1st bin is 30 Myr wide\n agelims[2] = agelims[1] + 1e8 # 2nd bin is 100 Myr wide\n i_age = 3\n nbins = len(agelims)-3\n else:\n agelims[1] = agelims[0] + 10**amin\n i_age = 2\n nbins = len(agelims)-2\n\n if agelims[0] == 0:\n with np.errstate(invalid='ignore', divide='ignore'):\n agelims = np.log10(agelims[:i_age]).tolist()[:-1] + np.squeeze(np.linspace(np.log10(agelims[i_age-1]),np.log10(tbinmax),nbins)).tolist() + [np.log10(tuniv)]\n agelims[0] = 0\n\n else:\n agelims = np.log10(agelims[:i_age]).tolist()[:-1] + np.squeeze(np.linspace(np.log10(agelims[i_age-1]),np.log10(tbinmax),nbins)).tolist() + [np.log10(tuniv)]\n\n agebins = np.array([agelims[:-1], agelims[1:]]).T\n return 10**agebins", "def SFSchmidt10(jd,mag,errmag,nbin=0.1,bmin=5,bmax=2000):\n\n dtarray, dmagarray, sigmaarray = SFarray(jd,mag,errmag)\n ndt=np.where((dtarray<=365))\n dtarray=dtarray[ndt]\n dmagarray=dmagarray[ndt]\n sigmaarray=sigmaarray[ndt]\n\n bins=bincalc(nbin,bmin,bmax)\n #print(len(bins))\n\n\n sf_list=[]\n tau_list=[]\n numobj_list=[]\n\n for i in range(0,len(bins)-1):\n n=np.where((dtarray>=bins[i]) & (dtarray<bins[i+1]))\n nobjbin=len(n[0])\n if nobjbin>=6:\n dmag1=np.abs(dmagarray[n])\n derr1=np.sqrt(sigmaarray[n])\n sf=(np.sqrt(np.pi/2.0)*dmag1-derr1)\n sff=np.mean(sf)\n sf_list.append(sff)\n numobj_list.append(nobjbin)\n #central tau for the bin\n tau_list.append((bins[i]+bins[i+1])*0.5)\n\n\n SF=np.array(sf_list)\n nob=np.array(numobj_list)\n tau=np.array(tau_list)\n nn=np.where(nob>6)\n tau=tau[nn]\n SF=SF[nn]\n\n\n return (tau/365.,SF)", "def obj_s2n_z(s2n_dict, z_bins, flux_bins, otype, outfile=None, ax=None):\n logs = get_logger()\n nz = z_bins.size\n nfx = flux_bins.size\n s2n_sum = np.zeros((nz-1,nfx-1))\n s2n_N = np.zeros((nz-1,nfx-1)).astype(int)\n # Loop on exposures+wedges (can do just once if these are identical for each)\n for jj, wave in enumerate(s2n_dict['waves']):\n # Turn wave into z\n zELG = wave / 3728. - 1.\n z_i = np.digitize(zELG, z_bins) - 1\n m_i = np.digitize(s2n_dict['OII'][jj]*1e17, flux_bins) - 1\n mmm = []\n for ll in range(nfx-1): # Only need to do once\n mmm.append(m_i == ll)\n #\n for kk in range(nz-1):\n all_s2n = s2n_dict['s2n'][jj][:,z_i==kk]\n for ll in range(nfx-1):\n if np.any(mmm[ll]):\n s2n_sum[kk, ll] += np.sum(all_s2n[mmm[ll],:])\n s2n_N[kk, ll] += np.sum(mmm[ll]) * all_s2n.shape[1]\n\n sty_otype = get_sty_otype()\n\n # Plot\n if ax is None:\n fig = plt.figure(figsize=(6, 6.0))\n ax= plt.gca()\n # Title\n fig.suptitle('{:s}: Redshift Summary'.format(sty_otype[otype]['lbl']),\n fontsize='large')\n\n # Plot em up\n z_cen = (z_bins + np.roll(z_bins,-1))/2.\n lstys = ['-', '--', '-.', ':', (0, (3, 1, 1, 1))]\n mxy = 1e-9\n for ss in range(nfx-1):\n if np.sum(s2n_N[:,ss]) == 0:\n continue\n lbl = 'OII(1e-17) = [{:0.1f},{:0.1f}]'.format(flux_bins[ss], flux_bins[ss+1])\n ax.plot(z_cen[:-1], s2n_sum[:,ss]/s2n_N[:,ss], linestyle=lstys[ss],\n label=lbl, color=sty_otype[otype]['color'])\n mxy = max(mxy, np.max(s2n_sum[:,ss]/s2n_N[:,ss]))\n\n ax.set_xlabel('Redshift')\n ax.set_xlim(z_bins[0], z_bins[-1])\n ax.set_ylabel('Mean S/N per Ang in dz bins')\n ax.set_yscale(\"log\", nonposy='clip')\n ax.set_ylim(0.1, mxy*1.1)\n\n legend = plt.legend(loc='lower right', scatterpoints=1, borderpad=0.3,\n handletextpad=0.3, fontsize='medium', numpoints=1)\n\n # Finish\n plt.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.3)\n plt.subplots_adjust(top=0.92)\n if outfile is not None:\n plt.savefig(outfile, dpi=600)\n print(\"Wrote: {:s}\".format(outfile))", "def test_bins(self):\n\n for filename in ['%s/population_padang_1.asc' % TESTDATA,\n '%s/test_grid.asc' % TESTDATA]:\n\n R = read_layer(filename)\n rmin, rmax = R.get_extrema()\n\n for N in [2, 3, 5, 7, 10, 16]:\n linear_intervals = R.get_bins(N=N, quantiles=False)\n\n assert linear_intervals[0] == rmin\n assert linear_intervals[-1] == rmax\n\n d = (rmax - rmin) / N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], rmin + i * d)\n\n quantiles = R.get_bins(N=N, quantiles=True)\n A = R.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask)\n l2 = len(A)\n\n if filename == '%s/test_grid.asc' % TESTDATA:\n # Check that NaN's were removed\n assert l1 == 35\n assert l2 == 30\n\n # Assert that there are no NaN's\n assert not numpy.alltrue(numpy.isnan(A))\n\n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements / N\n\n # Count elements in each bin and check\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n\n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no\n # more than 1\n assert abs(count - refcount) <= 1\n assert abs(count - average_elements_per_bin) <= 3\n else:\n # The last bin is allowed vary by more\n pass\n\n i0 = i1", "def n_particles_bins(DG, bins=[0, 0.5, 3, 10, 100]):\n radii = fid.rss(DG.gas['Coordinates'][()])\n hist, bin_edges = np.histogram(radii, bins)\n\n return hist, bin_edges", "def uniform_binning(ts, bins):\n symb = np.asarray(bins * (ts - ts.min()) / (ts.max() - ts.min() + 1e-12), dtype=int)\n return symb", "def Rouwenhorst(rho, sig_z, num):\n import numpy as np\n p = (1+rho)/2.0\n q = (1+rho)/2.0\n psi = ((num-1)**0.5)*sig_z\n \n z = np.linspace(-psi, psi, num)\n \n T = np.array([[p, 1-p], [1-q, q]])\n \n if num == 2:\n return [z, T]\n elif num > 2:\n for i in range(3, num+1):\n\n # print(T) \n # print(np.zeros(i-1))\n # print(np.zeros(i))\n #print(p*np.vstack((np.c_[T, np.zeros(i-1)], np.zeros(i))))\n #print((1-p)*np.vstack((np.c_[np.zeros(i-1), T], np.zeros(i))))\n #print((1-q)*np.vstack((np.zeros(i), np.c_[T, np.zeros(i-1)])))\n #print(q*np.vstack((np.zeros(i), np.c_[np.zeros(i-1), T])))\n \n \n T = p*np.vstack((np.c_[T, np.zeros(i-1)], np.zeros(i))) + (1-p)*np.vstack((np.c_[np.zeros(i-1), T], np.zeros(i))) + (1-q)*np.vstack((np.zeros(i), np.c_[T, np.zeros(i-1)])) + q*np.vstack((np.zeros(i), np.c_[np.zeros(i-1), T]))\n \n for i in range(num):\n #print(T[i,:])\n T[i,:] = T[i,:] / np.sum(T[i,:])\n T[i,:] = T[i,:] / np.sum(T[i,:])#I need this part to normalize rigorously\n #print(\"{0:.20f}\".format(sum(T[i,:])))\n \n return [z, T]\n else:\n print(\"Error: the number of discretization must be larger than 1.\")\n return None", "def bin_definition(n_bins_gammaness, n_bins_theta2):\n max_gam = 0.9\n max_th2 = 0.05 * u.deg * u.deg\n min_th2 = 0.005 * u.deg * u.deg\n\n gammaness_bins = np.linspace(0, max_gam, n_bins_gammaness)\n theta2_bins = np.linspace(min_th2, max_th2, n_bins_theta2)\n\n return gammaness_bins, theta2_bins", "def gen_muons(n_muons: int):\r\n thetas = np.random.uniform(-90,90, size=n_muons)\r\n heights = gen_heights(n_muons)\r\n pathlengths_flat = gen_pathLengthFlat(thetas, heights).reshape(n_muons,1)\r\n pathlengths_round = gen_pathLength(thetas, heights, R_earth_SB).reshape(n_muons,1)\r\n\r\n heights = heights.reshape(n_muons,1)\r\n energies = gen_energies(n_muons).reshape(n_muons,1)\r\n thetas = thetas.reshape(n_muons,1)\r\n\r\n muons = np.concatenate((np.abs(pathlengths_flat * 1000), np.abs(pathlengths_round * 1000), energies * 1000, thetas), axis=1)\r\n return muons", "def mi_bin_ccd_time(x, y, z, bins):\n n_times, n_trials = x.shape\n mi = np.zeros((n_times), dtype=np.float32)\n for t in range(n_times):\n mi[t] = mi_bin_ccd(x[t, :], y, z, bins)\n return mi", "def time_surrogate_for_mi(self, bins=16, sample_range=100, tau_max=1,\n lag_mode='all'):\n\n if bins < 255:\n dtype = 'uint8'\n else:\n dtype = 'int16'\n\n perm = numpy.random.permutation(\n range(tau_max, self.total_time - tau_max))[:sample_range]\n\n # get the bin quantile steps\n bin_edge = numpy.ceil(sample_range/float(bins))\n\n symbolic_array = numpy.empty((2*tau_max + 1, self.N, sample_range),\n dtype=dtype)\n\n for t in range(2*tau_max + 1):\n tau = t - tau_max\n\n array = self.dataarray[:, perm + tau]\n\n # get the lower edges of the bins for every time series\n edges = numpy.sort(array, axis=1)[:, ::bin_edge]\n bins = edges.shape[1]\n\n # This gives the symbolic time series\n symbolic_array[t] = \\\n (array.reshape(self.N, sample_range, 1)\n >= edges.reshape(self.N, 1, bins)).sum(axis=2) - 1\n\n return self._calculate_mi(symbolic_array, corr_range=sample_range,\n bins=bins, tau_max=tau_max,\n lag_mode=lag_mode)", "def bininator(magbins, dlt, mags, err):\n meds = np.zeros_like(magbins)\n for i in range(len(magbins)):\n ind = (mags > magbins[i] - dlt) & (mags <= magbins[i] + dlt)\n ind = ind & (err < 10.)\n meds[i] = np.median(err[ind])\n return meds", "def autobin_stats(x,y,n_bins=8,stat='average',n_points=None):\n \n if not ascend(x):\n ix=argsort(x)\n x=take(x,ix)\n y=take(y,ix)\n n=len(x)\n if n_points==None: \n #This throws out some points\n n_points=n/n_bins\n else: \n n_bins=n/n_points\n #if there are more that 2 points in the last bin, add another bin\n if n%n_points>2: n_bins=n_bins+1\n \n if n_points<=1:\n print('Only 1 or less points per bin, output will be sorted input vector with rms==y')\n return x,y\n xb,yb=[],[]\n \n #print 'stat', stat\n if stat=='average' or stat=='mean': func=mean\n elif stat=='median': func=median\n elif stat=='rms' or stat=='std' : func=std\n elif stat=='std_robust' or stat=='rms_robust': func=std_robust\n elif stat=='mean_robust': func=mean_robust\n elif stat=='median_robust': func=median_robust\n elif stat=='p2p': func=p2p # --DC\n elif stat=='min': func=min # --DC\n elif stat=='max': func=max # --DC\n \n for i in range(n_bins):\n xb.append(mean(x[i*n_points:(i+1)*n_points]))\n if func==std and n_points==2:\n print('n_points==2; too few points to determine rms')\n print('Returning abs(y1-y2)/2. in each bin as rms')\n yb.append(abs(y[i*n_points]-y[i*n_points+1])/2.)\n else:\n yb.append(func(y[i*n_points:(i+1)*n_points]))\n if i>2 and xb[-1]==xb[-2]: \n yb[-2]=(yb[-2]+yb[-1])/2.\n xb=xb[:-1]\n yb=yb[:-1]\n return array(xb),array(yb)", "def G_bin2d(self, mgs, mnus, g_corr, bins, resample=1, ig_nums=0):\n # half of the bin number\n ny, nx = int((len(bins[0]) - 1)/2), int((len(bins[1]) - 1)/2)\n chi_sq = 0\n mu = [0, 0]\n cov = [[abs(2*g_corr), g_corr],\n [g_corr, abs(2*g_corr)]]\n data_len = len(mgs[0])\n for i in range(resample):\n\n g_distri = numpy.random.multivariate_normal(mu,cov,data_len)\n\n mg1 = mgs[0] - mnus[0]*g_distri[:,0]\n mg2 = mgs[1] - mnus[1]*g_distri[:,1]\n num_arr = numpy.histogram2d(mg1, mg2, bins)[0]\n # | arr_1 | arr_2 |\n # | arr_3 | arr_4 |\n # chi square = 0.2*SUM[(arr_2 + arr_3 - arr_1 - arr_4)**2/(arr_1 + arr_2 + arr_3 + arr_4)]\n arr_1 = num_arr[0:ny, 0:nx][:,range(ny-1,-1,-1)]\n arr_2 = num_arr[0:ny, nx:2*nx]\n arr_3 = num_arr[ny:2*ny, 0:nx][range(ny-1,-1,-1)][:,range(nx-1,-1,-1)]\n arr_4 = num_arr[ny:2*ny, nx:2*nx][range(ny-1,-1,-1)]\n chi_sq += 0.5 * numpy.sum(((arr_2 + arr_3 - arr_1 - arr_4) ** 2) / (arr_1 + arr_2 + arr_3 + arr_4))\n\n return chi_sq/resample", "def shuffled_surrogate_for_mi(self, fourier=False, bins=16, tau_max=0,\n lag_mode='all'):\n if bins < 255:\n dtype = 'uint8'\n else:\n dtype = 'int16'\n\n # Normalize anomaly time series to zero mean and unit variance for all\n # lags, array contains normalizations for all lags\n corr_range = self.total_time - 2*tau_max\n\n # Shuffle a copy of dataarray seperatly for each node\n array = numpy.copy(self.dataarray)\n if fourier:\n array = self.correlatedNoiseSurrogates(array)\n else:\n for i in range(self.N):\n numpy.random.shuffle(array[i])\n\n # get the bin quantile steps\n bin_edge = numpy.ceil(corr_range/float(bins))\n\n symbolic_array = numpy.empty((1, self.N, corr_range), dtype=dtype)\n\n array = array[:, :corr_range]\n\n # get the lower edges of the bins for every time series\n edges = numpy.sort(array, axis=1)[:, ::bin_edge]\n bins = edges.shape[1]\n\n # This gives the symbolic time series\n symbolic_array[0] = \\\n (array.reshape(self.N, corr_range, 1)\n >= edges.reshape(self.N, 1, bins)).sum(axis=2) - 1\n\n res = self._calculate_mi(symbolic_array, corr_range=corr_range,\n bins=bins, tau_max=0, lag_mode='all')\n\n if lag_mode == 'all':\n corrmat = numpy.repeat(res, 2*tau_max + 1, axis=0)\n elif lag_mode == 'sum':\n corrmat = numpy.array([res[0], res[0]]) * (tau_max+1.)\n elif lag_mode == 'max':\n corrmat = numpy.array(\n [res[0], numpy.random.randint(-tau_max, tau_max+1,\n (self.N, self.N))])\n\n return corrmat", "def get_srr_bins(p_data):\n \n n_data = len(p_data)\n \n n_bins = np.sqrt(n_data)\n \n return int(n_bins)", "def create_bins(start, end, n_bins):\n bins = np.linspace(start, end, n_bins)\n return bins", "def make_obs_phase_plot(data_file, period, ref_mjd=58369.30, nbins=40, save=False,\n show=False, log=False, min_freq=200, max_freq=2500):\n\n burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict = open_json(data_file)\n\n bursts = []\n for k in burst_dict.keys():\n bursts = bursts + burst_dict[k]\n\n obs_duration = []\n for k in obs_duration_dict.keys():\n obs_duration = obs_duration + obs_duration_dict[k]\n\n obs_startmjds = []\n for k in obs_startmjds_dict.keys():\n obs_startmjds = obs_startmjds + obs_startmjds_dict[k]\n\n assert len(obs_startmjds) == len(obs_duration)\n\n bursts = np.array(bursts)\n obs_duration = np.array(obs_duration)\n obs_startmjds = np.array(obs_startmjds)\n\n obs_start_phases = get_phase(obs_startmjds, period, ref_mjd=ref_mjd)\n hist, bin_edges_obs = np.histogram(obs_start_phases, bins=nbins)\n\n obs_start_phases_dict = {}\n duration_per_phase_dict = {}\n burst_per_phase_dict = {}\n duration_per_phase_tot = np.empty(nbins)\n for k in obs_startmjds_dict.keys():\n obs_start_phases_dict[k] = get_phase(np.array(obs_startmjds_dict[k]),\n period, ref_mjd=ref_mjd)\n durations = np.array(obs_duration_dict[k])\n start_phases = obs_start_phases_dict[k]\n\n d_hist = []\n for i in range(len(bin_edges_obs)):\n if i>0:\n dur = durations[(start_phases < bin_edges_obs[i]) &\n (start_phases > bin_edges_obs[i-1])].sum()\n d_hist.append(dur)\n duration_per_phase_tot[i-1] += dur\n duration_per_phase_dict[k] = np.array(d_hist)\n\n obs_duration = np.array(obs_duration)\n duration_hist = []\n for i in range(len(bin_edges_obs)):\n if i>0:\n duration_hist.append(\n obs_duration[(obs_start_phases < bin_edges_obs[i]) &\n (obs_start_phases > bin_edges_obs[i-1])].sum())\n\n duration_hist = np.array(duration_hist)\n bin_mids = (bin_edges_obs[:-1] + bin_edges_obs[1:])/2\n phase_lst = []\n for i,k in enumerate(burst_dict.keys()):\n print(\"phase list\", k, len(burst_dict[k]))\n phase_lst.append(list(get_phase(np.array(burst_dict[k]), period,\n ref_mjd=ref_mjd)))\n burst_per_phase_dict[k], _ = np.histogram(phase_lst[-1],\n bins=nbins, range=(0,1))\n\n phase_tot = [p for l in phase_lst for p in l]\n phase_tot.sort()\n burst_tot, _ = np.histogram(phase_tot, bins=nbins, range=(0,1))\n\n # PRINTING AVERAGE RATE PER INSTRUMENT\n for i,k in enumerate(burst_dict.keys()):\n tobs = np.sum(obs_duration_dict[k])\n nbursts = len(burst_dict[k])\n rate = nbursts / tobs\n print(\"Average rate {}: {:.3f} / h\".format(k, rate))\n\n # off = np.where(burst_per_phase_dict[k] == 0)[0]\n # on = np.where(burst_per_phase_dict[k] > 0)[0]\n # print(\"Hours Apertif observed TOTAL: {:.2f}\".format(\n # np.sum(duration_per_phase_dict[k])))\n # print(\"Hours Apertif observed during on phase: {:.2f}\".format(\n # np.sum(duration_per_phase_dict[k][on])))\n # print(\"Hours Apertif observed during off phase: {:.2f}\".format(\n # np.sum(duration_per_phase_dict[k][off])))\n\n # DEFINING COLORS\n cm = plt.cm.get_cmap('Spectral_r')\n\n burst_hist_colors = []\n obs_hist_colors = {}\n if 'uGMRT650' in obs_duration_dict.keys():\n fcen_dict['uGMRT650'] = 1000\n for i,k in enumerate(obs_duration_dict.keys()):\n freq = np.log10(fcen_dict[k])\n col = (np.log10(max_freq)-freq)/(np.log10(max_freq)-np.log10(min_freq))\n color = cm(col)\n print(k, mpl.colors.to_hex(color))\n if k in burst_dict.keys():\n burst_hist_colors.append(color)\n obs_hist_colors[k] = color\n rate_colors = {\n 'high': cm((np.log10(max_freq)-np.log10(1800))/(np.log10(max_freq)-np.log10(min_freq))),\n 'middle': cm((np.log10(max_freq)-np.log10(500))/(np.log10(max_freq)-np.log10(min_freq))),\n 'low': cm((np.log10(max_freq)-np.log10(300))/(np.log10(max_freq)-np.log10(min_freq)))\n }\n if 'uGMRT650' in obs_duration_dict.keys():\n fcen_dict['uGMRT650'] = 650\n\n # PLOTTING\n fig, ax = plt.subplots(2, 1, sharex=True, figsize=(9,7),\n gridspec_kw={'height_ratios': [1,1]})\n ax1 = ax[0]\n yhist,xhist,_ = ax1.hist(phase_lst, bins=bin_edges_obs, stacked=True,\n density=False, label=burst_dict.keys(),\n edgecolor='black', linewidth=0.5, color=burst_hist_colors)\n\n ax1.set_ylabel('N. Bursts')\n ax1.set_xlim(0,1)\n print(\"YLIM\", 0, int(yhist[-1].max()*1.1))\n ax1.set_ylim(0, max(int(yhist[-1].max()*1.1), 4))\n ax1.legend(loc=2)\n ax1.text(-0.07, 0.95, \"a\", transform=ax1.transAxes, weight='bold')\n\n ax2 = ax[1]\n cum_ds = np.zeros(nbins)\n for i, k in enumerate(duration_per_phase_dict):\n d = duration_per_phase_dict[k]\n ax2.bar(bin_edges_obs[:-1], d, width=bin_edges_obs[1]-bin_edges_obs[0],\n align='edge', bottom=cum_ds, alpha=1,\n label=\"{} {:d} MHz\".format(k, int(fcen_dict[k])),\n edgecolor='black', linewidth=0.2, color=obs_hist_colors[k])\n cum_ds += d\n ax2.set_xlabel('Phase')\n ax2.set_ylabel('Obs. Duration (h)')\n ax2.legend(loc=2)\n ax2.text(-0.07, 0.95, \"b\", transform=ax2.transAxes, weight='bold')\n plt.tight_layout()\n\n if save:\n print('Plot saved: ./burst_obs_phase_hist.png')\n plt.savefig('./burst_obs_phase_hist.png', pad_inches=0,\n bbox_inches='tight', dpi=200)\n plt.savefig('./burst_obs_phase_hist.pdf', pad_inches=0,\n bbox_inches='tight', dpi=200)\n if show:\n plt.show()\n\n # SAVING COUNTS, OBS_DURATION AND PHASE BIN\n if log:\n print(\"Writing log\")\n dir_out = '/home/ines/Documents/projects/R3/periodicity/burst_phases/'\n with open(dir_out+'counts_per_phase_p{:.2f}.txt'.format(period), 'w') as f:\n f.write(\"# phase_bin counts chime_counts arts_counts lofar_counts obs_duration chime_duration arts_duration lofar_duration\\n\")\n for i in range(nbins):\n f.write(\"{:.3f} {} {} {} {} {:.3f} {:.3f} {:.3f} {:.3f}\\n\".format(\n bin_mids[i], burst_tot[i],\n burst_per_phase_dict[\"CHIME/FRB\"][i],\n burst_per_phase_dict[\"Apertif\"][i],\n burst_per_phase_dict[\"LOFAR\"][i],\n duration_per_phase_tot[i],\n duration_per_phase_dict[\"CHIME/FRB\"][i],\n duration_per_phase_dict[\"Apertif\"][i],\n duration_per_phase_dict[\"LOFAR\"][i]))\n for i,k in enumerate(burst_dict.keys()):\n if k == \"CHIME/FRB\":\n inst = k.replace(\"/FRB\", \"\")\n else:\n inst = k\n np.save(dir_out + 'phase_{}_p{:.2f}_f{:.1f}'.format(inst, period,\n fcen_dict[k]), [burst_dict[k], phase_lst[i]])", "def bin_by_npixels(self, npix):\n\n disp = self.dispersion\n dbins = disp[1:] - disp[:-1]\n bin_boundary = disp[:-1] + 0.5 * dbins\n\n lbins = bin_boundary[:-1]\n rbins = bin_boundary[1:]\n mbins = disp[1:-1]\n dbins = rbins - lbins\n flux = self.flux[1:-1]\n flux_err = self.flux_err[1:-1]\n num_bins = len(mbins)\n\n num_new_bins = int((num_bins - (num_bins % npix)) / npix)\n\n new_wave = np.zeros(num_new_bins)\n new_flux = np.zeros(num_new_bins)\n new_flux_err = np.zeros(num_new_bins)\n\n for idx in range(num_new_bins):\n\n _new_flux = 0\n _new_flux_err = 0\n _new_dbin = 0\n\n for jdx in range(npix):\n _new_flux += flux[idx * npix + jdx] * dbins[idx * npix + jdx]\n _new_dbin += dbins[idx * npix + jdx]\n _new_flux_err += (flux_err[idx * npix + jdx] * dbins[\n idx * npix + jdx]) ** 2\n\n rbin = rbins[npix * idx + npix - 1]\n lbin = lbins[npix * idx]\n _new_wave = (rbin - lbin) * 0.5 + lbin\n\n new_wave[idx] = _new_wave\n new_flux[idx] = _new_flux / _new_dbin\n new_flux_err[idx] = np.sqrt(_new_flux_err) / _new_dbin\n\n return SpecOneD(dispersion=new_wave, flux=new_flux,\n flux_err=new_flux_err, unit='f_lam')", "def radprojsim_bin(B0, B1, n_samples=1000000, n_iter=1, dz=None,\n gaussian_sigma=None):\n global r_edges \n global n_bins\n\n result = np.zeros(n_bins, dtype=np.float64)\n\n for iter_idx in range(n_iter):\n\n # Simulate the radial displacements using inverse CDF sampling.\n # These are selected with uniform probability from a thin spherical\n # shell\n r = np.cbrt(B0**3 + (B1**3 - B0**3) * np.random.random(size=n_samples))\n\n # Simulate angular displacements by sampling on the surface of \n # the unit sphere\n a = np.random.normal(size=(n_samples, 3))\n a = (a.T / np.sqrt((a**2).sum(axis=1))).T \n\n # Combine radial and angular parts \n a = (a.T * r).T \n\n # If desired, simulate a finite range of observation in z\n if not dz is None:\n hz = dz * 0.5\n\n # Uniform probability of detection in *z*\n if gaussian_sigma is None:\n a[:,0] = a[:,0] + np.random.uniform(-hz, hz, size=n_samples)\n a = a[np.abs(a[:,0])<=hz, :]\n\n # Gaussian probability of detection in *z*\n else:\n start = np.random.normal(scale=gaussian_sigma, size=n_samples)\n outside = np.abs(start) > hz\n while outside.any():\n start[outside] = np.random.normal(\n scale=gaussian_sigma, size=outside.sum())\n outside = np.abs(start) > hz\n a[:,0] = a[:,0] + start\n\n # Take the XY displacements \n r = np.sqrt((a[:,1:]**2).sum(axis=1))\n H = np.histogram(r, bins=r_edges)[0].astype(np.float64)\n result += H \n\n result /= (n_iter * n_samples)\n return result", "def num_55():\n import itertools as IT\n axis=0\n cs = [-1, 0, 1]\n bins = [-1, 0, 1, 2]\n n = len(cs)\n a = np.array([i for i in IT.combinations_with_replacement(cs, n)])\n r = np.vstack([np.histogram(a[i], bins)[0] for i in range(len(a))])\n r_t = np.vstack([np.histogram(a.T[i], bins)[0] for i in range(len(a.T))])\n frmt = \"\"\"\n {}\n :classes: {}\n :values (a):\n {}\n :frequency for 'a' by row, axis=0\n {}\n :values (a_t)\n {}\n :frequency for 'r_t', by col, axis=1\n : Note... a.T = a_t\n : transform, a from axis 0 to axis 1 orientation\n {}\n \"\"\"\n p = \" . \"\n args = [num_55.__doc__, cs, \n indent(str(a), prefix=p),\n indent(str(r), prefix=p),\n a.T, r_t]\n print(dedent(frmt).format(*args))\n return a, r, r.T", "def get_l_n_u_inegral(ppc, lower_bound, upper_bound, Nhrs=2):\n \"\"\"either lower and upper bound must be positive\"\"\"\n gens_hrs = ppc['gen'][:, 0]\n gens_hrs = np.sort(gens_hrs)\n \n n_buses = set_n_buses(ppc, Nhrs)\n n_gens = len(gens_hrs) // 2 \n l = np.zeros(n_buses)\n u = np.zeros(n_buses)\n for i in range(len(l)):\n if (i+1) in gens_hrs:\n l[i] = lower_bound\n u[i] = upper_bound\n else:\n l[i] = -np.inf\n u[i] = np.inf\n return l, u", "def generate_binned_values( lower_lim, upper_lim, chr_length, snps_per_chr, indels_per_chr, resolution ):\n\t\n\tsnp_data = []\n\tindel_data = []\n\twhile True:\n\t\tif upper_lim >= chr_length:\n\t\t\tbreak\n\t\telse:\n\t\t\tsnp_tmp = []\n\t\t\tindel_tmp = []\n\t\t\tfor SNP in snps_per_chr:\n\t\t\t\tif SNP <= upper_lim and SNP > lower_lim:\n\t\t\t\t\tsnp_tmp.append( 'X' )\n\t\t\tfor indel in indels_per_chr:\n\t\t\t\tif indel <= upper_lim and indel > lower_lim:\n\t\t\t\t\tindel_tmp.append( 'X' )\n\t\t\tsnp_data.append( len( snp_tmp ) )\n\t\t\tindel_data.append( len( indel_tmp ) )\n\t\tupper_lim += resolution\n\t\tlower_lim += resolution\n\treturn max( snp_data ), max( indel_data ), snp_data, indel_data", "def get_times_list(binout):\r\n return sorted([float(\"{0:15.6f}\".format(t)) for t in\r\n binout.recordarray[\"totim\"]])", "def calculate_bin_edges(n_bins, geo):\n #Gefittete offsets: x,y,factor: factor*(x+x_off)\n #[6.19, 0.064, 1.0128]\n \n #print \"Reading detector geometry in order to calculate the detector dimensions from file \" + fname_geo_limits\n #geo = np.loadtxt(fname_geo_limits)\n\n # derive maximum and minimum x,y,z coordinates of the geometry input [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]\n geo_limits = np.nanmin(geo, axis = 0), np.nanmax(geo, axis = 0)\n #print ('Detector dimensions [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]: ' + str(geo_limits))\n\n x_bin_edges = np.linspace(geo_limits[0][1] - 9.95, geo_limits[1][1] + 9.95, num=n_bins[0] + 1) #try to get the lines in the bin center 9.95*2 = average x-separation of two lines\n y_bin_edges = np.linspace(geo_limits[0][2] - 9.75, geo_limits[1][2] + 9.75, num=n_bins[1] + 1) # Delta y = 19.483\n z_bin_edges = np.linspace(geo_limits[0][3] - 4.665, geo_limits[1][3] + 4.665, num=n_bins[2] + 1) # Delta z = 9.329\n\n #offset_x, offset_y, scale = [6.19, 0.064, 1.0128]\n #x_bin_edges = (x_bin_edges + offset_x )*scale\n #y_bin_edges = (y_bin_edges + offset_y )*scale\n\n #calculate_bin_edges_test(geo, y_bin_edges, z_bin_edges) # test disabled by default. Activate it, if you change the offsets in x/y/z-bin-edges\n\n return x_bin_edges, y_bin_edges, z_bin_edges", "def computation_gr(particles,p_types,dist,i,j,nbins, rmax):\n i=np.where(p_types == i)[0][0]\n j=np.where(p_types == j)[0][0]\n\n\n if len(p_types)>1:\n #indexes to delete if there is more than one type of particles\n i_axis0=[]\n i_axis1=[]\n for k in range(len(p_types)):\n if k!=i:\n i_axis0.append(particles[k])\n if k!=j:\n i_axis1.append(particles[k])\n dist = np.delete(dist,np.hstack(i_axis0), axis=0)\n dist = np.delete(dist,np.hstack(i_axis1), axis=1)\n\n\n\n bin_count = np.zeros((nbins,3))\n bin_ends = -rmax*np.cos(np.linspace(np.pi/2,np.pi,num=nbins+1))\n\n vol_old=0\n for i in range(nbins):\n bin_count[i,0]=0.5*(bin_ends[i+1]+bin_ends[i]) #Count position in the middle of the bin only needed in the first\n rmax_bin=bin_ends[i+1]\n indexes=np.where(dist<=rmax_bin)\n dist[indexes]=1000\n bin_count[i,1]=len(indexes[0])/len(particles[j])\n print(len(particles[j]))\n vol_new=4/3*np.pi*rmax_bin**3\n bin_count[i,2]=bin_count[i,1]/(vol_new-vol_old)\n\n rho_ave=256/6.71838**3 #np.sum(bin_count[:,1])/(4/3*np.pi*rmax**3)\n\n print(rho_ave)\n\n bin_count[:,2]=bin_count[:,2]/rho_ave**2 #g(r)=rho(r)/rho_ave\n\n return bin_count", "def count_r_bins(self, rmax, Nr, zmin=None, rmin=0., zmax=None, plotfig=False):\n rArr = np.mgrid[rmin:rmax:Nr*1j]\n if zmin != None:\n if zmax == None: zmax = zmin + 10.\n ind = (self.z >= zmin)*(self.z <= zmax)\n xin = self.x[ind]; yin = self.y[ind]; zin = self.z[ind]\n else:\n xin = self.x.copy();yin = self.y.copy();zin = self.z.copy()\n R = np.sqrt(xin**2+yin**2)\n self.RR = R\n self.rbins = np.zeros(rArr.size-1)\n for ir in xrange(Nr-1):\n r0 = rArr[ir]; r1 = rArr[ir+1]\n print r0, r1\n N = np.where((R>=r0)*(R<r1))[0].size\n self.rbins[ir] = N#/np.pi/(r1**2-r0**2)\n self.rArr = rArr[:-1]\n if plotfig:\n plt.plot(self.rArr, self.rbins, 'o', ms=3)\n plt.show()\n self.area = np.pi*((rArr[1:])**2-(rArr[:-1])**2)\n self.rbins_norm = self.rbins / self.area\n return", "def get_overlap_values(self, cbins, rbins, thbins):\n\n\t\tdr = (cbins - 0.5) / rbins\n\t\tdth = (pi / 2) / thbins\n\t\tthbins_reduced = int(ceil(thbins / 2))\n\n\t\tdef overlap_value(x, y, r, th):\n\t\t\t\"\"\"\n\t\t\tFind the overlap area between a cartesian and a polar bin.\n\t\t\t\"\"\"\n\n\t\t\tthmin = max(th - dth/2, atan2(y - 0.5, x + 0.5))\n\t\t\tthmax = min(th + dth/2, atan2(y + 0.5, x - 0.5))\n\n\t\t\trin = lambda theta: maximum(r - dr/2, maximum((x - 0.5) / npcos(theta), (y - 0.5) / npsin(theta)))\n\t\t\trout = lambda theta: minimum(r + dr/2, minimum((x + 0.5) / npcos(theta), (y + 0.5) / npsin(theta)))\n\n\t\t\tintegrand = lambda theta: maximum(rout(theta)**2 - rin(theta)**2, 0)\n\n\t\t\treturn 0.5 * quad(integrand, thmin, thmax)[0]\n\n\t\texpected = int(pi*rbins**2)\n\t\trs = empty(expected, dtype=int)\n\t\tths = empty(expected, dtype=int)\n\t\txs = empty(expected, dtype=int)\n\t\tys = empty(expected, dtype=int)\n\t\tvals = empty(expected, dtype=float)\n\t\tfound = 0\n\n\t\tfor thi in arange(thbins_reduced):\n\t\t\tth = (thi + 0.5) * dth\n\t\t\tfor ri in arange(rbins):\n\t\t\t\tr = (ri + 0.5) * dr\n\t\t\t\tfor x in arange(round((r - dr/2) * cos(th + dth/2)), min(cbins, round((r + dr/2) * cos(th - dth/2)) + 1)):\n\t\t\t\t\tfor y in arange(round((r - dr/2) * sin(th - dth/2)), min(cbins, round((r + dr/2) * sin(th + dth/2)) + 1)):\n\t\t\t\t\t\tif ((x - 0.5)**2 + (y - 0.5)**2 < (r + dr/2)**2) and \\\n\t\t\t\t\t\t ((x + 0.5)**2 + (y + 0.5)**2 > (r - dr/2)**2) and \\\n\t\t\t\t\t\t (atan2(y + 0.5, x - 0.5) > th - dth/2) and \\\n\t\t\t\t\t\t (atan2(y - 0.5, x + 0.5) < th + dth/2):\n\t\t\t\t\t\t area = overlap_value(x, y, r, th)\n\t\t\t\t\t\t if area > 0:\n\t\t\t\t\t\t \trs[found] = ri\n\t\t\t\t\t\t \tths[found] = thi\n\t\t\t\t\t\t \txs[found] = x\n\t\t\t\t\t\t \tys[found] = y\n\t\t\t\t\t\t \tvals[found] = area\n\t\t\t\t\t\t \tfound+=1\n\n\t\treturn rs[:found], ths[:found], xs[:found], ys[:found], vals[:found]" ]
[ "0.51185775", "0.505989", "0.50545937", "0.504589", "0.49802086", "0.4979069", "0.49762407", "0.49633297", "0.49416855", "0.49260277", "0.49035257", "0.48899776", "0.48686045", "0.4860368", "0.4850373", "0.48298892", "0.48205316", "0.48146975", "0.48073372", "0.47839314", "0.47372994", "0.47366685", "0.4729683", "0.47216752", "0.47174042", "0.47137654", "0.47016948", "0.46979016", "0.46877655", "0.46858013" ]
0.69338244
0
Import a population seed from a pickled AEGIS object.
def get_seed(self, seed_path): try: infile = open(seed_path, "rb") obj = pickle.load(infile) if not isinstance(obj, Population): s = "Seed import failed: {} does not hold a Population\ object.".format(seed_path) self.abort(TypeError, s) infile.close() return obj except IOError: s = "Seed import failed: no file or directory under {}".format(seed_path) self.abort(IOError, s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_data(seed: object = 42) -> object:\n\n # Read input data\n df = pd.read_csv(\"x_train_gr_smpl.csv\").astype(int)\n\n # label data-frame rows based on sample data\n for x in range(10):\n index = ~pd.read_csv(\"y_train_smpl_%s.csv\" % x, squeeze=True).astype(bool) # reversed flags (~)\n df.loc[index, 'label'] = str(x)\n\n input_data_ordered = df.iloc[:, 0:2304].to_numpy()\n output_data_ordered = df.iloc[:, 2304].to_numpy()\n\n # Randomise instance order (forcing the same result each time)\n np.random.seed(seed)\n permutation = np.random.permutation(df.shape[0])\n\n # Create base input and output arrays\n input_data = input_data_ordered[permutation]\n output_data = output_data_ordered[permutation]\n\n return input_data, output_data, df, input_data_ordered, output_data_ordered", "def load_individual_species():\n\n print (\"individual species\")\n\n SpeciesIndividual.query.delete()\n\n with open(\"seed_data/species_seed.psv\") as species:\n for row in species:\n species_name, group_id = row.strip().split(\"|\")\n\n species = SpeciesIndividual(species_name=species_name,\n species_group_id=group_id)\n\n db.session.add(species)\n\n db.session.commit()", "def get_seed_all(self, seed_path):\n self.logprint(\"Reading seed population from ./{}\".format(seed_path))\n if seed_path.endswith(\".pop\"):\n self.logprint(\"Import succeeded.\")\n return [self.get_seed(seed_path)]\n elif os.path.isdir(seed_path):\n pop_files = [f for f in os.listdir(seed_path) if f.endswith(\".pop\")]\n if len(pop_files) != self.conf[\"n_runs\"]:\n s = \"Number of seed files does not equal to number of runs.\\nTried getting seeds from: ./{}.\".format(seed_path)\n self.abort(ImportError, s)\n pop_files.sort()\n seeds = [self.get_seed(os.path.join(seed_path,l)) for l in pop_files]\n self.logprint(\"Import succeeded.\")\n return seeds\n # Otherwise abort\n s = \"Seed path must point to a *.pop file or a directory containing *.pop files.\"\n self.abort(ImportError, s)", "def test_seed_initial_population():\n # Test if a initial population can be read in from CSV\n i_population = owp.seed_initial_population('initial_populations.csv')\n # Test if a new population can be generated with i_population as the base\n pop_size = 30\n population = sga.generate_population(pop_size, i_population)\n assert type(i_population) is list\n assert len(population) == pop_size", "def load_population(self):\r\n checkpoint = load_pickle('spike_swarm_sim/checkpoints/populations/' + self.checkpoint_name)\r\n logging.info('Resuming CMA-ES evolution using checkpoint ' + self.checkpoint_name)\r\n key = tuple(self.populations.keys())[0]\r\n for key, pop in checkpoint['populations'].items():\r\n self.populations[key].strategy_m = checkpoint['mu'][key]\r\n self.populations[key].strategy_C = checkpoint['C'][key]\r\n self.populations[key].cc = checkpoint['cc'][key]\r\n self.populations[key].cs = checkpoint['cs'][key]\r\n self.populations[key].mu_cov = checkpoint['mu_cov'][key]\r\n self.populations[key].c_cov = checkpoint['c_cov'][key]\r\n self.populations[key].ds = checkpoint['ds'][key]\r\n self.populations[key].evo_path = checkpoint['evo_path'][key]\r\n self.populations[key].ps = checkpoint['ps'][key]\r\n self.populations[key].B = checkpoint['B'][key]\r\n self.populations[key].Bt = checkpoint['Bt'][key]\r\n self.populations[key].D = checkpoint['D'][key]\r\n self.populations[key].sigma = checkpoint['sigma'][key]\r\n self.populations[key].num_evals = checkpoint['num_evals'][key]\r\n self.populations[key].population = self.populations[key].sample()\r\n self.init_generation = checkpoint['generation']\r\n self.evolution_history = checkpoint['evolution_hist']", "def load_pickle(path: Path):\n # Before investing significant time processing, ensure server is up\n with get_session() as sess:\n _b = sess.execute(sa.select(sch.Batch).limit(1))\n\n data = pickle.load(open(path, 'rb'))\n required_cols = ['from_name', 'from_email', 'raw_from_string',\n 'to_name', 'to_email', 'raw_to_string',\n 'cc_name', 'cc_email', 'raw_cc_string',\n 'subject',\n 'date', 'raw_date_string',\n 'message_id',\n 'in_reply_to', 'refs',\n 'body_text', 'flagged_abuse',\n 'filename',\n # time_stamp is when it was imported... not important for us.\n #'time_stamp',\n ]\n # Will raise error if any columns not found\n data = data[required_cols]\n\n entities = collections.defaultdict(lambda: {})\n flush_count = [0]\n with get_session() as sess:\n clean_for_ingest(sess)\n\n resource = f'ocean-{os.path.basename(path)}'\n sch.Batch.cls_reset_resource(resource, session=sess)\n\n batch = sch.Batch(resource=resource)\n sess.add(batch)\n\n # The `db_get_*()` functions return the id for the chosen Entity, as an\n # optimization\n def db_get_message(id):\n rid = entities['message'].get(id)\n if rid is None:\n rid = sch.Entity(name=f'Message {id}',\n type=sch.EntityTypeEnum.message, attrs={},\n batch=batch)\n sess.add(rid)\n flush_count[0] += 1\n entities['message'][id] = rid\n return rid\n def db_get_user(name, email):\n id = f'{name} <{email}>'\n rid = entities['user'].get(id)\n if rid is None:\n rid = sch.Entity(name=id, type=sch.EntityTypeEnum.person,\n batch=batch,\n attrs={\n 'name': name,\n 'email': email,\n })\n sess.add(rid)\n flush_count[0] += 1\n entities['user'][id] = rid\n return rid\n\n for m_idx, m in tqdm.tqdm(data.iterrows(), desc='importing messages',\n total=len(data)):\n\n # No date --> useless\n if m['raw_date_string'] is None:\n continue\n\n def user_resolve(prefix):\n if m[f'{prefix}_name'] is None:\n return None\n name = m[f'{prefix}_name']\n email = m[f'{prefix}_email']\n return db_get_user(name, email)\n frm = user_resolve('from')\n to = user_resolve('to')\n cc = user_resolve('cc')\n\n try:\n message_time = date_field_resolve(m['date'], m['raw_date_string'])\n except:\n raise ValueError(f\"Bad date: {m['message_id']} {m['date']} {m['raw_date_string']}\")\n\n def fixnull(v):\n \"Some ocean data has \\x00 bytes... remove those\"\n if not isinstance(v, str):\n return v\n return v.replace('\\x00', '<NULL>')\n message = db_get_message(m['message_id'])\n message.attrs.update({\n 'origin_filename': fixnull(m['filename']),\n 'subject': fixnull(m['subject']),\n 'body_text': fixnull(m['body_text']),\n 'flagged_abuse': m['flagged_abuse'],\n 'time': message_time.timestamp(), # float for JSON\n })\n\n if frm is not None:\n message.obs_as_dst.append(sch.Observation(src=frm, batch=batch,\n type=sch.ObservationTypeEnum.message_from,\n time=message_time))\n if to is not None:\n message.obs_as_src.append(sch.Observation(dst=to, batch=batch,\n type=sch.ObservationTypeEnum.message_to,\n time=message_time))\n if cc is not None:\n message.obs_as_src.append(sch.Observation(dst=cc, batch=batch,\n type=sch.ObservationTypeEnum.message_cc,\n time=message_time))\n for r in m['refs']:\n message.obs_as_src.append(sch.Observation(\n dst=db_get_message(r['ref']), batch=batch,\n type=sch.ObservationTypeEnum.message_ref,\n time=message_time))\n\n if flush_count[0] > 10000:\n sess.flush()\n flush_count[0] = 0\n\n print(f'Finished with batch {batch.id}; committing')", "def load_individual(path):\n with open(path, 'rb') as input:\n ind = pickle.load(input)\n input.close()\n return ind", "def load_species_groups():\n\n print(\"Species groups\")\n\n SpeciesGroup.query.delete()\n\n with open(\"seed_data/species_group_seed.psv\") as species:\n for row in species:\n species_group_id, species_group_name = row.strip().split(\"|\")\n\n group = SpeciesGroup(species_group_id = species_group_id,\n species_group = species_group_name)\n\n db.session.add(group)\n\n db.session.commit()", "def load_sundaes():\n\n print('load_sundaes')\n\n User.query.delete()\n\n for row in open(\"seed_data/sundaes.csv\"):\n row = row.rstrip()\n email, postal_code = row.split(',')\n\n\n usr = User(email=email,\n postal_code=postal_code)\n\n db.session.add(usr)\n\n db.session.commit()", "def from_pickle(self, path):\n with open(path, 'rb') as hex_pickle:\n tmp_hexes = pickle.load(hex_pickle)\n for key,hex in tmp_hexes.items():\n\n new_hex = Hex(self,key)\n new_hex.suitability = hex.properties['suitability']\n\n new_hex.set_quality()\n self.hexes[key] = new_hex\n\n for hex in self.hexes.values():\n hex.is_occupied = 0\n hex.fono = 0\n hex.set_fon()", "def load_pop(path):\n with open(path, 'rb') as input:\n pop = pickle.load(input)\n input.close()\n return pop", "def loadPSet(self):\n self.logger.info(\"Working dir: %s\", os.getcwd())\n # Pickle original pset configuration\n procScript = \"edm_pset_pickler.py\"\n cmd = \"%s --input %s --output_pkl %s\" % (\n procScript,\n os.path.join(self.stepSpace.location, self.psetFile),\n os.path.join(self.stepSpace.location, self.configPickle))\n self.scramRun(cmd)\n\n try:\n with open(os.path.join(self.stepSpace.location, self.configPickle), 'rb') as f:\n self.process = Unpickler(f).load()\n except ImportError as ex:\n msg = \"Unable to import pset from %s:\\n\" % self.psetFile\n msg += str(ex)\n self.logger.error(msg)\n raise ex\n\n return", "def manual_import_genesis(self, path):\n dtu = DtuLoader.DtuLoader(path)\n fbx_path = dtu.get_fbx_path()\n self.genesis_import(fbx_path, dtu)", "def load_users():\n filepath = \"./seed_data/u.user\"\n users = open(filepath)\n\n\n for user in users:\n user = user.rstrip().split('|')\n db_user = User(user_id=user[0], age=user[1], zipcode=user[4])\n db.session.add(db_user)\n\n db.session.commit()", "def import_population(self, resume_from):\n location = GAConfig[\"log_file_location\"]\n file_name = location + \"evo\" + str(resume_from) + \".log\"\n\n with open(file_name, 'r') as in_file:\n for line in in_file:\n if len(line) < len(self.phones):\n self.population[-1].set_fitness(float(line))\n continue\n\n genes = line.split('\\t')\n new_chromosome = Chromosome(GAConfig[\"num_categories\"])\n for i in range(len(genes) - 1):\n for gene in genes[i]:\n new_chromosome.insert_into_category(i, gene)\n self.population.append(new_chromosome)\n\n self.display_population(resume_from)", "def seed(seed: int):\n # all sampling is actually happening in the move_cube module\n move_cube.seed(seed)", "def load_inst(self):\n self.sanity_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n self.pub_auth_all = pickle.load(open(fname_pub_auth_all, 'rb'))\n self.pub_auth_top = pickle.load(open(fname_pub_auth_top, 'rb'))\n self.pub_inst_all = pickle.load(open(fname_pub_inst_all, 'rb'))\n self.pub_inst_top = pickle.load(open(fname_pub_inst_top, 'rb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n self.history = pickle.load(open(fname_pub_history, 'rb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n self.staff = pickle.load(open(fname_pub_staff, 'rb'))", "def from_pickle(input_path):\n with open(input_path, 'rb') as f:\n unpickler = pickle.Unpickler(f)\n return unpickler.load()", "def test_import_pmodel(self, make_objects):\n # test that the various values are correctly loaded\n pmodel = make_objects\n assert pmodel.epsilons[0] == 0.0\n assert pmodel.epsilons[496] == 1.0\n\n assert pmodel.use_pairs[10][0] == 0\n assert pmodel.use_pairs[10][1] == 26\n assert pmodel.use_pairs[844][0] == 42\n assert pmodel.use_pairs[844][1] == 50", "def load(self, gen=None):\n try:\n path = f\"population{'_backup' if self.use_backup else ''}/\" \\\n f\"storage/\" \\\n f\"{self.folder_name}/\" \\\n f\"{self}/\" \\\n f\"generations/\"\n if gen is None:\n # Load in all previous populations\n populations = glob(f\"{path}gen_*\")\n if not populations: raise FileNotFoundError\n \n # Find newest population and save generation number under 'gen'\n populations = [p.replace('\\\\', '/') for p in populations]\n regex = r\"(?<=\" + re.escape(f'{path}gen_') + \")[0-9]*\"\n gen = max([int(re.findall(regex, p)[0]) for p in populations])\n \n # Load in the population under the specified generation\n pop = load_pickle(f'{path}/gen_{gen:05d}')\n self.best_fitness = pop.best_fitness\n self.best_genome = pop.best_genome\n self.best_genome_hist = pop.best_genome_hist\n self.config = pop.config\n self.generation = pop.generation\n self.population = pop.population\n self.reporters = pop.reporters\n self.reproduction = pop.reproduction\n self.species = pop.species\n self.species_hist = pop.species_hist\n self.log(f\"\\nPopulation '{self}' loaded successfully! Current generation: {self.generation}\")\n return True\n except FileNotFoundError:\n return False", "def load_user():\n\n for i, row in enumerate(open(\"seed_data/role.user\")):\n row = row.rstrip()\n name, description = row.split(\"|\")\n role = RoleModel(name=name, description=description)\n db.session.add(role)\n\n for i, row in enumerate(open(\"seed_data/user.user\")):\n row = row.rstrip()\n name, phone, email, password, confirmed_at, role_id = row.split(\"|\")\n user = UserModel(name=name,\n phone=phone,\n email=email,\n password=password,\n confirmed_at=confirmed_at,\n role_id=role_id)\n db.session.add(user)\n\n # for i, row in enumerate(open(\"seed_data/order.user\")):\n # row = row.rstrip()\n # active, user_id, product_location_id = row.split(\"|\")\n # order = OrderrModel(\n # active=active, \n # user_id=user_id, \n # product_location_id=product_location_id)\n # db.session.add(order)\n\n db.session.commit()", "def seed(self, seed=None):\n raise NotImplementedError()", "def seed(self, seed=None):\n raise NotImplementedError()", "def from_pickle_file(filename):\n with open(filename, \"rb\") as infile:\n obj = pickle.load(infile)\n assert isinstance(obj, ExperimentList)\n return obj", "def _import_insee_zipcode(self, cr, uid, ids, data_dir, context=None):\n if context is None:\n context = {}\n filepath = os.path.abspath(os.path.join(\n data_dir, 'insee_codes_postaux.csv'))\n zipcode_obj = self.pool.get('insee.zipcode')\n with open(filepath, 'rb') as zipcode_file:\n reader = csv.DictReader(zipcode_file, delimiter=';')\n for row in reader:\n values = {\n 'commune': row['COMMUNE'],\n 'codepos': row['CODEPOS'],\n 'dep': row['DEP'],\n 'insee': row['INSEE'],\n }\n zipcode_obj.create(cr, uid, values, context=context)", "def test_pickle_load(self):\n l = [1, 2, 3, 4, 5]\n self.plugin.save_data(l)\n\n l = self.plugin.load_data()\n self.assertIn(4, l)", "def object_import(request, simulation, object_name):\n try:\n if object_name == 'function':\n parent = simulation.scenario.supply.functionset\n else:\n parent = simulation.scenario.supply.network\n query = get_query(object_name, simulation)\n user_id_set = set(query.values_list('user_id', flat=True))\n if object_name == 'link':\n # To import links, we retrieve the user ids of all centroids, crossings\n # and functions and we build mappings between ids and objects.\n centroids = get_query('centroid', simulation)\n centroid_ids = set(centroids.values_list('user_id', flat=True))\n crossings = get_query('crossing', simulation)\n crossing_ids = set(crossings.values_list('user_id', flat=True))\n node_ids = centroid_ids.union(crossing_ids)\n # Mapping between the user id and the id of the nodes.\n node_mapping = dict()\n for centroid in centroids:\n node_mapping[centroid.user_id] = centroid.id\n for crossing in crossings:\n node_mapping[crossing.user_id] = crossing.id\n functions = get_query('function', simulation)\n function_ids = set(functions.values_list('user_id', flat=True))\n # Mapping between the user id and the id of the functions.\n function_id_mapping = dict()\n # Mapping between the user id and the instance of the functions\n function_mapping = dict()\n for function in functions:\n function_id_mapping[function.user_id] = function.id\n function_mapping[function.user_id] = function\n # Convert imported file to a csv DictReader.\n encoded_file = request.FILES['import_file']\n tsv_file = StringIO(encoded_file.read().decode())\n reader = csv.DictReader(tsv_file, delimiter='\\t')\n to_be_updated = set()\n to_be_created = list()\n # Store the user_id of the imported instance to avoid two instances\n # with the same id.\n imported_ids = set()\n if object_name == 'centroid':\n # Do not import centroid with same id as a crossing.\n crossings = get_query('crossing', simulation)\n imported_ids = set(crossings.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Centroid(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'crossing':\n # Do not import crossing with same id as a centroid.\n centroids = get_query('centroid', simulation)\n imported_ids = set(centroids.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Crossing(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'function':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], row['expression'])\n )\n else:\n to_be_created.append(\n Function(user_id=id, name=row['name'],\n expression=row['expression'])\n )\n elif object_name == 'link':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'],\n node_mapping[int(row['origin'])],\n node_mapping[int(row['destination'])],\n function_id_mapping[int(row['function'])],\n float(row['lanes']), float(row['length']),\n float(row['speed']), float(row['capacity']))\n )\n else:\n if int(row['origin']) in node_ids \\\n and int(row['destination']) in node_ids \\\n and int(row['function']) in function_ids:\n # Ignore the links with unidentified origin,\n # destination or function.\n to_be_created.append(\n Link(user_id=id, name=row['name'],\n origin=node_mapping[int(row['origin'])],\n destination=node_mapping[int(row['destination'])],\n vdf=function_mapping[int(row['function'])],\n lanes=float(row['lanes']),\n length=float(row['length']),\n speed=float(row['speed']),\n capacity=float(row['capacity']))\n )\n if to_be_updated:\n if object_name in ('centroid', 'crossing'):\n values = set(query.values_list('user_id', 'name', 'x', 'y'))\n elif object_name == 'function':\n values = set(query.values_list('user_id', 'name', 'expression'))\n elif object_name == 'link':\n values = set(query.values_list('user_id', 'name', 'origin',\n 'destination', 'vdf_id', 'lanes',\n 'length', 'speed', 'capacity'))\n # Find the instances that really need to be updated (the values have\n # changed).\n to_be_updated = to_be_updated.difference(values)\n if object_name in ('centroid', 'crossing', 'function'):\n # Update the objects (it would be faster to delete and re-create\n # them but this would require to also change the foreign keys of\n # the links).\n for values in to_be_updated:\n # Index 0 of values is the id column i.e. the user_id.\n instance = query.filter(user_id=values[0])\n if object_name in ('centroid', 'crossing'):\n instance.update(name=values[1], x=values[2], y=values[3])\n else: # Function\n instance.update(name=values[1], expression=values[2])\n elif object_name == 'link':\n # Delete the links and re-create them.\n ids = list(query.values_list('id', 'user_id'))\n # Create a mapping between the user ids and the ids.\n id_mapping = dict()\n for i in range(len(values)):\n id_mapping[ids[i][1]] = ids[i][0]\n # Retrieve the ids of the links to be updated with the mapping and\n # delete them.\n to_be_updated_ids = [id_mapping[values[0]]\n for values in to_be_updated]\n with connection.cursor() as cursor:\n chunk_size = 20000\n chunks = [\n to_be_updated_ids[x:x + chunk_size]\n for x in range(0, len(to_be_updated_ids), chunk_size)\n ]\n for chunk in chunks:\n # Delete the relations first.\n cursor.execute(\n \"DELETE FROM Network_Link \"\n \"WHERE link_id IN %s;\",\n [chunk]\n )\n cursor.execute(\n \"DELETE FROM Link \"\n \"WHERE id IN %s;\",\n [chunk]\n )\n # Create a mapping between the id and the instance of the\n # functions.\n function_mapping = dict()\n for function in functions:\n function_mapping[function.id] = function\n # Now, create the updated instances with the new values.\n to_be_created += [\n Link(user_id=values[0], name=values[1], origin=values[2],\n destination=values[3], vdf=function_mapping[values[4]],\n lanes=values[5], length=values[6], speed=values[7],\n capacity=values[8])\n for values in to_be_updated\n ]\n # Create the new objects in bulk.\n # The chunk size is limited by the MySQL engine (timeout if it is too big).\n chunk_size = 10000\n chunks = [to_be_created[x:x + chunk_size]\n for x in range(0, len(to_be_created), chunk_size)]\n # Remove the orphan instances.\n if object_name == 'function':\n query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all()) \\\n .delete()\n else:\n query.model.objects.exclude(network__in=Network.objects.all()).delete()\n for chunk in chunks:\n # Create the new instances.\n query.model.objects.bulk_create(chunk, chunk_size)\n # Retrieve the newly created instances and add the many-to-many\n # relation.\n # Add the many-to-many relation.\n if object_name == 'function':\n new_instances = query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all())\n for instance in new_instances:\n instance.functionset.add(parent)\n else:\n new_instances = query.model.objects \\\n .exclude(network__in=Network.objects.all())\n for instance in new_instances:\n instance.network.add(parent)\n simulation.has_changed = True\n simulation.save()\n return HttpResponseRedirect(\n reverse('metro:object_list', args=(simulation.id, object_name,))\n )\n except Exception as e:\n print(e)\n context = {\n 'simulation': simulation,\n 'object': object_name,\n }\n return render(request, 'metro_app/import_error.html', context)", "def load_users():\n\n print('load_users')\n\n for row in open(\"seed_data/users.csv\"):\n row = row.rstrip()\n\n email, \\\n postal_code, \\\n fname, \\\n lname, \\\n username, \\\n password, \\\n phone, \\\n role = row.split(',')\n\n\n usr = User(email=email,\n postal_code=postal_code,\n fname=fname,\n lname=lname,\n username=username,\n password=password,\n phone=phone,\n role=role)\n\n db.session.add(usr)\n\n db.session.commit()", "def population_archiver(random, population, archive, args):\r\n new_archive = []\r\n for ind in population:\r\n new_archive.append(ind)\r\n return new_archive", "def load(self):\n cwd = os.getcwd()\n path = os.path.join(*[cwd, 'data', 'weighted_clusters',\n f\"weighted_clusters_WIJK{self.input}.dat\"])\n sys.path.append(path)\n\n with open(path, \"rb\") as f:\n unpickler = pickle.Unpickler(f)\n house_batt = unpickler.load()\n\n self.houses, self.batteries = house_batt[0], house_batt[1]" ]
[ "0.61895996", "0.5868044", "0.5637332", "0.5533471", "0.53152096", "0.5220497", "0.5161766", "0.51506436", "0.51140994", "0.5089465", "0.5054022", "0.5033288", "0.5019555", "0.49898914", "0.49854407", "0.49566922", "0.4949991", "0.4930906", "0.49032718", "0.48958465", "0.48874527", "0.48708618", "0.48708618", "0.48575443", "0.48379365", "0.48132706", "0.47976762", "0.4788956", "0.47802812", "0.47626436" ]
0.6940851
0
Import N = number of runs populations from given seed path.
def get_seed_all(self, seed_path): self.logprint("Reading seed population from ./{}".format(seed_path)) if seed_path.endswith(".pop"): self.logprint("Import succeeded.") return [self.get_seed(seed_path)] elif os.path.isdir(seed_path): pop_files = [f for f in os.listdir(seed_path) if f.endswith(".pop")] if len(pop_files) != self.conf["n_runs"]: s = "Number of seed files does not equal to number of runs.\nTried getting seeds from: ./{}.".format(seed_path) self.abort(ImportError, s) pop_files.sort() seeds = [self.get_seed(os.path.join(seed_path,l)) for l in pop_files] self.logprint("Import succeeded.") return seeds # Otherwise abort s = "Seed path must point to a *.pop file or a directory containing *.pop files." self.abort(ImportError, s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_seed(self, seed_path):\n try:\n infile = open(seed_path, \"rb\")\n obj = pickle.load(infile)\n if not isinstance(obj, Population):\n s = \"Seed import failed: {} does not hold a Population\\\n object.\".format(seed_path)\n self.abort(TypeError, s)\n infile.close()\n return obj\n except IOError:\n s = \"Seed import failed: no file or directory under {}\".format(seed_path)\n self.abort(IOError, s)", "def seed(path):\n return os.path.join(os.path.split(os.path.realpath(__file__))[0], path)", "def load_individual_species():\n\n print (\"individual species\")\n\n SpeciesIndividual.query.delete()\n\n with open(\"seed_data/species_seed.psv\") as species:\n for row in species:\n species_name, group_id = row.strip().split(\"|\")\n\n species = SpeciesIndividual(species_name=species_name,\n species_group_id=group_id)\n\n db.session.add(species)\n\n db.session.commit()", "def load_seed(self) -> np.ndarray:\n return np.loadtxt(CONFIG_DIR / self.name_seed).view(complex).reshape(-1, 1)", "def test_seed_initial_population():\n # Test if a initial population can be read in from CSV\n i_population = owp.seed_initial_population('initial_populations.csv')\n # Test if a new population can be generated with i_population as the base\n pop_size = 30\n population = sga.generate_population(pop_size, i_population)\n assert type(i_population) is list\n assert len(population) == pop_size", "def import_data(seed: object = 42) -> object:\n\n # Read input data\n df = pd.read_csv(\"x_train_gr_smpl.csv\").astype(int)\n\n # label data-frame rows based on sample data\n for x in range(10):\n index = ~pd.read_csv(\"y_train_smpl_%s.csv\" % x, squeeze=True).astype(bool) # reversed flags (~)\n df.loc[index, 'label'] = str(x)\n\n input_data_ordered = df.iloc[:, 0:2304].to_numpy()\n output_data_ordered = df.iloc[:, 2304].to_numpy()\n\n # Randomise instance order (forcing the same result each time)\n np.random.seed(seed)\n permutation = np.random.permutation(df.shape[0])\n\n # Create base input and output arrays\n input_data = input_data_ordered[permutation]\n output_data = output_data_ordered[permutation]\n\n return input_data, output_data, df, input_data_ordered, output_data_ordered", "def trainIncr(path, numIter):\n\ttrainer = loadTrainer(path)\n\tfor i in range(numIter):\n\t\tprint(\"\\n**** next iteration \" + str(i))\n\t\tHiLoPricingEnv.count = 0\n\t\tresult = trainer.train()\n\t\tprint(pretty_print(result))\n\t\tprint(\"env reset count \" + str(HiLoPricingEnv.count))\n\treturn trainer", "def import_population(self, resume_from):\n location = GAConfig[\"log_file_location\"]\n file_name = location + \"evo\" + str(resume_from) + \".log\"\n\n with open(file_name, 'r') as in_file:\n for line in in_file:\n if len(line) < len(self.phones):\n self.population[-1].set_fitness(float(line))\n continue\n\n genes = line.split('\\t')\n new_chromosome = Chromosome(GAConfig[\"num_categories\"])\n for i in range(len(genes) - 1):\n for gene in genes[i]:\n new_chromosome.insert_into_category(i, gene)\n self.population.append(new_chromosome)\n\n self.display_population(resume_from)", "def create_populations(self, nb:int=1):\n self.populations.append(self.config.create(self.pop_size))", "def _generate_raw_environments(self, num, seed):", "def load_species_groups():\n\n print(\"Species groups\")\n\n SpeciesGroup.query.delete()\n\n with open(\"seed_data/species_group_seed.psv\") as species:\n for row in species:\n species_group_id, species_group_name = row.strip().split(\"|\")\n\n group = SpeciesGroup(species_group_id = species_group_id,\n species_group = species_group_name)\n\n db.session.add(group)\n\n db.session.commit()", "def run(chunk_size: int, path: str, name: str, start=None, end=None):\n # _input = str(input(\"enter the file name from which the data will be fetched: \")) -> path\n # _to = str(input(\"enter the db table name to which the fetched data will be injected: \")) -> name\n if name in ('kospi', 'kosdaq'):\n result = seed_kospi_or_kosdaq(name=path, stock_type=name, chunk_size=chunk_size, start=start, end=end)\n else:\n result = seed_other_dataset(name=path, chunk_size=chunk_size, start=start, end=end)\n\n return result", "def test_multiple_import(self):\n # Currently, there are no PowerPlant or Project objects in the database\n self.assertEqual(PowerPlant.objects.count(), 0)\n self.assertEqual(Project.objects.count(), 0)\n # There are no InfrastructureType objects in the database\n self.assertEqual(InfrastructureType.objects.count(), 0)\n # There are no Fuel or FuelCategory objects in the database\n self.assertEqual(Fuel.objects.count(), 0)\n self.assertEqual(FuelCategory.objects.count(), 0)\n # There are no Countries or Regions in the database\n self.assertEqual(Country.objects.count(), 0)\n self.assertEqual(Region.objects.count(), 0)\n # There are no OwnerStakes in the database\n self.assertEqual(PlantOwnerStake.objects.count(), 0)\n # There are no Initiatives in the database\n self.assertEqual(Initiative.objects.count(), 0)\n # There are no ProjectFunding objects in the database\n self.assertEqual(ProjectFunding.objects.count(), 0)\n\n # Run the import\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # The file has 3 'Plant' rows and 3 'Project' rows, but the project_liaoning\n # also gets a parent PowerPlant created for it\n self.assertEqual(PowerPlant.objects.count(), 4)\n self.assertEqual(Project.objects.count(), 3)\n # There is now 1 InfrastructureType object in the database\n self.assertEqual(InfrastructureType.objects.count(), 1)\n # There are now 4 Fuel and 3 FuelCategory objects in the database\n self.assertEqual(Fuel.objects.count(), 4)\n self.assertEqual(FuelCategory.objects.count(), 3)\n # There are now 4 Countries and 3 Regions in the database\n self.assertEqual(Country.objects.count(), 4)\n self.assertEqual(Region.objects.count(), 3)\n # There are now 4 OwnerStakes in the database\n self.assertEqual(PlantOwnerStake.objects.count(), 4)\n # There is now Initiative in the database\n self.assertEqual(Initiative.objects.count(), 1)\n # There are now 3 ProjectFunding objects in the database\n self.assertEqual(ProjectFunding.objects.count(), 3)\n\n # Run the import again\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # There are still 4 PowerPlant and 3 Project objects in the database\n self.assertEqual(PowerPlant.objects.count(), 4)\n self.assertEqual(Project.objects.count(), 3)\n # There is still 1 InfrastructureType object in the database\n self.assertEqual(InfrastructureType.objects.count(), 1)\n # There are still 4 Fuel and 3 FuelCategory objects in the database\n self.assertEqual(Fuel.objects.count(), 4)\n self.assertEqual(FuelCategory.objects.count(), 3)\n # There are still 4 Countries and 3 Regions in the database\n self.assertEqual(Country.objects.count(), 4)\n self.assertEqual(Region.objects.count(), 3)\n # There are still 4 OwnerStakes in the database\n self.assertEqual(PlantOwnerStake.objects.count(), 4)\n # There is still Initiative in the database\n self.assertEqual(Initiative.objects.count(), 1)\n # There are still 3 ProjectFunding objects in the database\n self.assertEqual(ProjectFunding.objects.count(), 3)", "def generate_paths( self, initial_path=None, n_paths=1, max_attempts=10000, outfile=\"tse_ensemble.json\"):\n all_paths = []\n self.load_path(initial_path)\n orig_path = copy.deepcopy(self.init_path)\n if ( n_paths >= max_attempts ):\n raise ValueError( \"The number of paths requested exceeds the maximum number of attempts. Increase the maximum number of attempts\" )\n counter = 0\n min_slice,max_slice = self.find_timeslices_in_transition_region()\n\n n_paths_found = 0\n overall_num_paths = 0\n while( overall_num_paths < n_paths and counter < max_attempts ):\n self.log(\"Total number of paths found: {}\".format(overall_num_paths))\n counter += 1\n self.init_path = copy.deepcopy(orig_path)\n timeslice = np.random.randint(low=min_slice,high=max_slice)\n if ( self.shooting_move(timeslice) ):\n all_paths.append(self.init_path)\n n_paths_found += 1\n\n overall_num_paths = n_paths_found\n\n self.save_tse_ensemble( all_paths, fname=outfile )", "def import_experiments_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)", "def generate_population(population_size, nn_architecture):\n population = []\n for _ in range(population_size):\n population.append(nn.create_nn_from_arch(nn_architecture))\n\n return population", "def taxi_rides_sample(path, n, storage_options=None):\n return next(taxi_rides_iterator(path, n, storage_options))", "def mean_experiment_load_for_user_subset(num_users, seed=None):\n loads = tempfeeder_exp()\n if seed is None:\n seed = np.random.randint(1, 2**16)\n user_ids = np.random.RandomState(seed).permutation(loads.user_ids)[:num_users]\n return [l / len(user_ids) for l in total_load_in_experiment_periods(loads, user_ids)]", "def load(self, path, nr_of_saves, test_it=-1):\n with self.graph.as_default():\n print(\"Loading networks...\")\n checkpoint_dir = os.path.join(os.environ['APPROXIMATOR_HOME'], path, \"network-\"+str(test_it))\n self.saver = tf.train.Saver(max_to_keep=nr_of_saves+1)\n try:\n self.saver.restore(self.sess, checkpoint_dir)\n print(\"Loaded: {}\".format(checkpoint_dir))\n except Exception:\n if test_it <= 0:\n # Initialize the variables\n self.sess.run(tf.global_variables_initializer())\n print(\"Failed! Initializing the network variables...\")\n else:\n raise", "def generate_population(size, w, h, N):\r\n population = []\r\n for _ in range(size):\r\n entity = gen_mines(w, h, randint(0, w*h))\r\n entity = (entity[:], count_numbers(gen_board(w, h, entity), N))\r\n population.append(entity)\r\n \r\n return population", "def load_users():\n\n for i, row in enumerate(open('seed_data/users.csv')):\n data = row.rstrip().split(\",\")\n user_id, email, password = data\n\n user = User(user_id=user_id, email=email,\n password=password)\n\n db.session.add(user)\n\n # For testing, just to see it was happening\n # if i % 100 == 0:\n # print i\n\n db.session.commit()", "def manual_import_genesis(self, path):\n dtu = DtuLoader.DtuLoader(path)\n fbx_path = dtu.get_fbx_path()\n self.genesis_import(fbx_path, dtu)", "def load_ith_from_pgn(filename, i):\n file_tmp = open(filename) # we reload pgn\n\n game = chess.pgn.read_game(file_tmp)\n for _ in range(i): # skip to i-th game in pgn\n game = chess.pgn.read_game(file_tmp)\n\n return game", "def __init__(self,\n seed: int = 42,\n num_games: int = 100,\n num_rounds: int = 20,\n game_data_path: str = 'games.json'):\n self.seed = seed\n self.num_games = num_games\n self.num_rounds = num_rounds\n\n _root_path = os.path.dirname(os.path.realpath(__file__))\n self.game_data_path = os.path.join(_root_path, game_data_path)", "def populate(self, pop_size):\n for _ in range(pop_size):\n sample = next(self._exp_source)\n self._add(sample)", "def get_num_examples(path_in):\n i = 0\n with open(path_in, 'r', encoding='utf8') as f:\n for _ in f:\n i += 1\n return i", "def generate_N_doping(path, N_graphitic, N_pyridinic, N_pyrrolic, filename1):\n global bond_list\n bond_list = bond_list_1 + bond_list_3\n atom_list = read_in_graphene(path)\n rings = find_rings(atom_list)\n bond_list = bond_list_1 + bond_list_3\n map_3, map_2, map_2n = filter_carbon_atoms(atom_list, rings)\n graphitic = N_graphitic \n pyridinic = N_pyridinic\n pyrrolic = N_pyrrolic\n attempt = len(atom_list) / 10\n choices = [1, 2, 3]\n while (((N_graphitic > 0) or (N_pyridinic > 0) or (N_pyrrolic > 0)) and (attempt > 0)):\n print(\"Left to add: \", \"N_graphitic \", N_graphitic, \"N_pyridinic \", N_pyridinic, \"N_pyrrolic \", N_pyrrolic)\n if (N_graphitic == 0):\n try:\n choices.remove(1)\n except:\n pass\n if (N_pyridinic == 0):\n try:\n choices.remove(2)\n except:\n pass\n if (N_pyrrolic == 0):\n try:\n choices.remove(3)\n except:\n pass\n choice = random.choice(choices)\n if (choice == 1):\n while ((N_graphitic > 0) and (len(map_3) > 0)):\n random_atom = random.choice(map_3)\n N_graphitic -= 1\n N = Atom(random_atom.atom_number, \"N3\", \"N3A\", str(graphitic - N_graphitic), float(\"{0:.3f}\".format(random_atom.x)), float(\"{0:.3f}\".format(random_atom.y)), float(\"{0:.3f}\".format(random_atom.z)))\n if ((len(identify_bonds(random_atom, atom_list)) == 3) and ((identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CY\")) and ((identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CX\") or identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CY\") and ((identify_bonds(random_atom, atom_list)[2][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[2][0].atom_name == \"CY\"))):\n for ring in rings:\n if (random_atom in ring):\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n try:\n atom_list.remove(random_atom)\n except:\n pass\n atom_list.append(N)\n else:\n attempt -= 1\n elif (choice == 2):\n while ((N_pyridinic > 0) and (len(map_2) > 0)): \n random_atom = random.choice(map_2)\n N_pyridinic -= 1\n N = Atom(random_atom.atom_number, \"N2\", \"N2A\", str(pyridinic - N_pyridinic), float(\"{0:.3f}\".format(random_atom.x)), float(\"{0:.3f}\".format(random_atom.y)), float(\"{0:.3f}\".format(random_atom.z)))\n if ((len(identify_bonds(random_atom, atom_list)) == 2) and ((identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CY\")) and ((identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CX\") or identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CY\") ):\n found = False\n for ring in rings:\n if (random_atom in ring):\n found = True\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n if (found == False):\n try:\n map_3.remove(random_atom)\n except:\n pass\n try:\n map_2.remove(random_atom)\n except:\n pass\n try:\n map_2n.remove(random_atom)\n except:\n pass\n atom_list.remove(random_atom)\n atom_list.append(N)\n else:\n attempt -= 1\n else: \n attempt -= 1\n elif (choice == 3):\n while ((N_pyrrolic > 0) and (len(map_2n) > 0)):\n random_atom_1 = random.choice(map_2n)\n for neighbour in identify_bonds(random_atom_1, atom_list):\n if (len(identify_bonds(neighbour[0], atom_list)) == 2):\n random_atom_2 = neighbour[0]\n break\n for ring in rings:\n if (random_atom_1 in ring):\n center_6 = {}\n center_6['x'] = 0\n center_6['y'] = 0\n center_6['z'] = 0\n center_4 = {}\n center_4['x'] = 0\n center_4['y'] = 0\n center_4['z'] = 0\n for atom in ring:\n center_6['x'] += atom.x\n center_6['y'] += atom.y\n center_6['z'] += atom.z\n if ((atom != random_atom_1) and (atom != random_atom_2)):\n center_4['x'] += atom.x\n center_4['y'] += atom.y\n center_4['z'] += atom.z\n center_6['x'] /= 6\n center_6['y'] /= 6\n center_6['z'] /= 6\n center_4['x'] /= 4\n center_4['y'] /= 4\n center_4['z'] /= 4\n N_pyrrolic -= 1\n p = 0.6\n limit = 0.3\n if ((-limit < center_4['x'] - center_6['x'] < limit) and (-limit < center_4['y'] - center_6['y'] < limit)): \n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((-limit < center_4['x'] - center_6['x'] < limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((-limit < center_4['x'] - center_6['x'] < limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (-limit < center_4['y'] - center_6['y'] < limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (-limit < center_4['y'] - center_6['y'] < limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n for ring in rings:\n if (random_atom_1 in ring):\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n for mol in identify_bonds(atom, atom_list):\n try:\n map_2n.remove(mol[0])\n except:\n pass\n try:\n atom_list.remove(random_atom_1)\n atom_list.remove(random_atom_2)\n except:\n pass\n atom_list.append(N)\n else:\n attempt -= 1\n attempt -= 1\n writepdb(atom_list, filename1)\n print(\"done.\")\n return 'done.'", "def initialize_population(self, params: dict):\n if params.save_example_batch:\n create_folder_if_not_exists(self.run_folder + \"/messages\")\n\n if params.single_pool:\n create_folder_if_not_exists(self.run_folder + \"/agents\")\n if params.evolution:\n create_folder_if_not_exists(self.run_folder + \"/agents_genotype\")\n else:\n create_folder_if_not_exists(self.run_folder + \"/senders\")\n create_folder_if_not_exists(self.run_folder + \"/receivers\")\n if params.evolution:\n create_folder_if_not_exists(self.run_folder + \"/senders_genotype\")\n create_folder_if_not_exists(self.run_folder + \"/receivers_genotype\")\n\n for i in range(params.population_size):\n sender_genotype = None\n receiver_genotype = None\n if params.evolution:\n sender_genotype = generate_genotype(num_nodes=params.init_nodes)\n receiver_genotype = generate_genotype(num_nodes=params.init_nodes)\n\n if params.single_pool:\n self.agents.append(\n SingleAgent(\n self.run_folder, params, genotype=sender_genotype, agent_id=i\n )\n )\n else:\n self.senders.append(\n SenderAgent(\n self.run_folder, params, genotype=sender_genotype, agent_id=i\n )\n )\n self.receivers.append(\n ReceiverAgent(\n self.run_folder, params, genotype=receiver_genotype, agent_id=i\n )\n )", "def choose_random(N):\n db = pymongo.MongoClient('localhost',27020).chembldb\n # Get all CHEMBL IDs\n db.molecules.ensure_index('chembl_id')\n chembl_ids = [m['chembl_id'] for m in db.molecules.find().sort('chembl_id')]\n print len(chembl_ids)\n random.seed(201405291515)\n rands = random.sample(chembl_ids, N)\n return(rands)", "def test_large_import(self):\n self.create_sample_data_set_dir(\"node59p1.dat\", TELEM_DIR)\n self.assert_initialize()\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED,1,60)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED,750,400)" ]
[ "0.5271225", "0.5231763", "0.50995815", "0.50318307", "0.501359", "0.5006482", "0.4980782", "0.49768418", "0.49583787", "0.49352026", "0.49234396", "0.49177733", "0.49165896", "0.49038863", "0.48806596", "0.48763213", "0.48065647", "0.478646", "0.47783965", "0.4767288", "0.47535717", "0.47372743", "0.473644", "0.4728996", "0.47157067", "0.47149596", "0.47106797", "0.4696333", "0.46803096", "0.46744347" ]
0.6520888
0
Print an error message to the Simulation log, then abort.
def abort(self, errtype, message): self.log += "\n{0}: {1}\n".format(errtype.__name__, message) self.endtime = timenow(False) self.log += "\nSimulation terminated {}.".format(timenow()) self.logsave() raise errtype(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def abort(self, msg):\n\n print\n print \"*** ERROR in module [ \" + self.name + \" ]: DEBUG INFO: \" + str(self.parent.debugInfo)\n print\n print \"*** ERROR in module [ \" + self.name + \" ]: \" + msg\n\n if (\"logfile\" in dir(self)):\n print\n print \"Logfile for failed module: \" + self.logfile\n\n # write error to logfile\n try:\n getoutput( \"echo \\\"*** Error in module [ \" + self.name + \" ]: DEBUG INFO: \" + str(self.parent.debugInfo).replace(\"\\n\",\"\") + \"\\\" >> \" + self.logfile )\n getoutput( \"echo \\\"*** Error in module [ \" + self.name + \" ]: \" + str(msg).replace(\"\\n\",\"\") + \"\\\" >> \" + self.logfile )\n except:\n pass\n sys.exit(1)", "def error(msg):\n print(msg, file=sys.stderr)\n sys.exit()", "def abort(msg=''):\n if msg:\n print >> sys.stderr, msg\n sys.exit(1)", "def error(message):\n print message\n sys.exit(2)", "def abort(message):\n\n sys.stderr.write(message + '\\n')\n sys.exit(1)", "def finalize_error():\n print('')\n exit(-1)", "def quit_with_error(msg):\n import traceback\n stack = traceback.extract_stack()\n frame = stack[-3]\n print(msg)\n if (frame[3] is None):\n suffix = ''\n else:\n suffix = \": \"+frame[3]\n print('Line',repr(frame[1]),'of',frame[0] + suffix)\n print('Quitting with Error')\n raise SystemExit()", "def error(self, message):\n ErrorExit('error: {}\\n'.format(message), 2)", "def printError(s):\r\n sys.stderr.write(\"ERROR: %s\\n\" % s)\r\n sys.exit(-1)", "def error(msg: str) -> None:\n print('ERROR: {msg}'.format(msg=msg))\n sys.exit()", "def abort(self):\n print(\"abort\")", "def error(message):\n if DEBUG:\n with print_lock:\n print((Colours.FAIL + 'ERROR: ' + Colours.END_COLOUR + message).strip())", "def error(error_no):\n print('--] Encountered unrecoverable ERROR [%s] ... leaving' % error_no)\n write_termination_message(error_no)\n sys.exit(0)", "def __abort_script(message):\n print(message)\n sys.exit()", "def die(msg):\n errorPrint(msg)\n sys.exit(1)", "def error(msg):\n print 'ERROR: %s' % msg\n sys.exit(1)", "def stop_err(msg):\n sys.stderr.write('%s\\n' % msg)\n sys.exit(-1)", "def print_std_error(self):\n print(self.std_error)\n sys.exit()", "def error(message):\n print(message, file=sys.stderr)", "def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)", "def _error(self, *args, **kwargs):\n print(\"[{}]\".format(self.type), *args, file=sys.stderr, **kwargs)\n sys.exit(1)", "def die_screaming(instr):\n LOG.error(instr)\n sys.exit(1)", "def die_screaming(instr):\n LOG.error(instr)\n sys.exit(1)", "def Die(msg):\n print(msg, file=sys.stderr)\n sys.exit(1)", "def print_error_and_exit(error_message):\n\n print(\"Error: \" + error_message)\n sys.exit()", "def exit_with_error_message (msg):\n print (\"[ERROR] %s\\n\" % msg)\n raise SystemExit", "def bail( msg ):\n # Terminate, with helpful error message:\n print(\"ERROR: \" + msg + \"... exiting.\", file=sys.stderr)\n exit(1)", "def error_exit(text):\n logging.error(text)\n exit(1)", "def ErrorExit(msg):\r\n print >>sys.stderr, msg\r\n sys.exit(1)", "def _error_and_die(errorMessage):\n\n print(errorMessage, file=sys.stderr)\n sys.exit(1)" ]
[ "0.74888575", "0.71220666", "0.71215695", "0.70035875", "0.69939435", "0.6936464", "0.67833465", "0.67662495", "0.67484874", "0.6704806", "0.67036784", "0.66994905", "0.6696277", "0.6694784", "0.6692499", "0.6643295", "0.6635498", "0.66208345", "0.6586786", "0.6577234", "0.6571371", "0.6570542", "0.6570542", "0.6541136", "0.65408254", "0.65385133", "0.65378475", "0.65378374", "0.6535017", "0.65085423" ]
0.7880541
0
Execute simulation runs in series, with no external parallelisation.
def execute_series(self): for n in xrange(self.conf["n_runs"]): self.runs[n].execute()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, steps):\n self.sim.run(steps)", "def run(self):\n for worker in self.simulation_workers:\n worker.start()", "def main():\n run_simulation(spectral=False, ml=False, num_procs=1)\n run_simulation(spectral=True, ml=False, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=1)\n run_simulation(spectral=True, ml=True, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=10)\n run_simulation(spectral=True, ml=True, num_procs=10)", "def run_simulation(self, num_games=10):\n for _ in range(num_games):\n self.result.append(self.single_game())", "def run():\n step = 0\n while traci.simulation.getMinExpectedNumber() > 0:\n traci.simulationStep()\n step+=1\n traci.close()\n sys.stdout.flush()", "def run_sim(mass, start, stop, sampling_rate):\n axion = Axion(mass=mass)\n return axion.do_fast_axion_sim(start,\n stop,\n sampling_rate)", "def run_simulation(self):\n\n # Create agents for simulation\n self.spawn_agents(self.num_agents)\n\n if self.force_personalities != None:\n self.force_personalities(self)\n\n if self.visualizer == True:\n V.Visualizer.createVisualizer(types=self.visualizerOptions, showAtEnd=True)\n\n TM.TimeManager.createManager()\n for x in range (self.time_to_run):\n for agent in self.agents:\n agent.take_turn()\n while self.agents_to_settle:\n self.agents_to_settle.pop().settle_reposts()\n if self.data_collector != None:\n self.data_collector.collector_turn(x, agent)\n if self.visualizer == True:\n self.generate_visualizations(x)\n TM.TimeManager.sharedManager.increaseTime()\n if self.data_collector != None:\n self.data_collector.collector_round(x)\n self.generate_statistics(x)\n\n if self.visualizer == True:\n V.Visualizer.sharedVisualizer.updateEverything()\n\n if self.data_collector != None:\n self.data_collector.finalize()", "def performSimulation(self):\n \n if self.parameters['verbose']:\n print(\"=====================\\nStarting simulation with parameters\\n\",self.parameters)\n print(\"=====================\\nInitial Graph\\n\")\n self.showState()\n print(\"=====================\")\n\n while self.parameters['steps'] > 0:\n if self.parameters['verbose']: print(\"Performing step\")\n self.performStep()\n if self.parameters['verbose']: self.showState()\n\n if self.parameters['verbose']:\n print(\"=====================\\nFinished Simulation\\n\\nResult graph:\")\n self.showState()\n #self.showGraph(self.parameters['file_name'])\n #self.showState()\n #self.showStats()", "def run_single(self):\n self.run_sim_time(1)", "def run_simulator(self):\n\n self.update_settings()\n\n # Pass in the progress bar and the master so that the simulator can\n # update the progress bar and then refresh the screen when the progress\n # checkpoints are hit\n\n self.sim_results = self.sim.run(self.progress_bar, self.master)\n self.graph_results()", "def run(self, cores=-1):\n \n self.simRunner.do_simulation(cores=cores)\n \n return True", "def run(self):\n if self.mode == 'remote':\n raise NotImplementedError('No auto run for remote jobs.')\n finished = False\n while not finished:\n if self.current_epoch == self.nepochs:\n logger.info('Reached {} epochs. Finishing.'.format(self.current_epoch))\n finished = True\n elif self.app.available_gpus >= self.nmin:\n n_spawns = self.app.available_gpus\n if self.current_epoch == 1:\n self.app.initialize_folders()\n gen_folders = '{}/*'.format(self.app.generator_folder)\n spawn_folders = self.app.move_generators_to_input(gen_folders)\n else:\n # First move possibly finished productions to their data folder\n try:\n self.app.move_trajs_to_folder(spawn_folders)\n except:\n pass\n self.app.update_metadata()\n self.fit_model()\n self.spawns = self.respawn_from_MSM(search_type='counts', n_spawns=n_spawns)\n spawn_folders = self.app.prepare_spawns(self.spawns, self.current_epoch)\n # Plot where chosen spawns are in the tICA landscape\n f, ax = plot_tica_landscape(self.ttrajs)\n plot_spawns(self.spawns, self.ttrajs, ax=ax)\n fig_fname = '{today}_e{epoch}_spawns.pdf'.format(today=datetime.date.today().isoformat(), epoch=self.current_epoch)\n f.savefig(fig_fname)\n\n self.app.run_GPUs_bash(\n folders=spawn_folders\n )\n self.current_epoch += 1\n else:\n logger.info('{} available GPUs. Minimum per epoch is {}'.format(self.app.available_gpus, self.nmin))\n logger.info('Going to sleep for {} seconds'.format(self.sleeptime))\n sleep(self.sleeptime)", "def runall():\n sclogic.runall()", "def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)", "def run_simulation(run):\n # Write the argument file used by metrosim.\n simulation = run.simulation\n metrosim_dir = settings.BASE_DIR + '/metrosim_files/'\n metrosim_file = '{0}execs/metrosim'.format(metrosim_dir)\n arg_file = (\n '{0}arg_files/simulation_{1!s}_run_{2!s}.txt'.format(metrosim_dir,\n simulation.id,\n run.id)\n )\n with open(arg_file, 'w') as f:\n database = settings.DATABASES['default']\n db_host = database['HOST']\n db_name = database['NAME']\n db_user = database['USER']\n db_pass = database['PASSWORD']\n log = metrosim_dir + 'logs/run_{}.txt'.format(run.id)\n tmp = metrosim_dir + 'output'\n stop = metrosim_dir + 'stop_files/run_{}.stop'.format(run.id)\n arguments = ('-dbHost \"{0}\" -dbName \"{1}\" -dbUser \"{2}\" '\n + '-dbPass \"{3}\" -logFile \"{4}\" -tmpDir \"{5}\" '\n + '-stopFile \"{6}\" -simId \"{7!s}\" -runId \"{8!s}\"'\n ).format(db_host, db_name, db_user, db_pass, log, tmp,\n stop, simulation.id, run.id)\n f.write(arguments)\n\n # Run the script 'prepare_run.py' then run metrosim then run the script \n # 'run_end.py'.\n # The two scripts are run with the run.id as an argument.\n prepare_run_file = settings.BASE_DIR + '/metro_app/prepare_run.py'\n build_results_file = settings.BASE_DIR + '/metro_app/build_results.py'\n log_file = (\n '{0}/website_files/script_logs/run_{1}.txt'.format(\n settings.BASE_DIR, run.id\n )\n )\n # Command looks like: \n #\n # python3 ./metro_app/prepare_results.py y\n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n # && ./metrosim_files/execs/metrosim\n # ./metrosim_files/arg_files/simulation_x_run_y.txt \n # && python3 ./metro_app/build_results.py y \n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n #\n # 2>&1 | tee is used to redirect output and errors to file.\n command = ('python3 {first_script} {run_id} 2>&1 | tee {log} && '\n + '{metrosim} {argfile} && '\n + 'python3 {second_script} {run_id} 2>&1 | tee {log}')\n command = command.format(first_script=prepare_run_file, run_id=run.id,\n log=log_file, metrosim=metrosim_file,\n argfile=arg_file,\n second_script=build_results_file)\n subprocess.Popen(command, shell=True)", "def run(self):\n # Create queue of experiment configurations\n queue = collections.deque(self.settings.EXPERIMENT_QUEUE)\n # Calculate number of experiments and number of processes\n self.n_exp = len(queue) * self.settings.N_REPLICATIONS\n self.n_proc = self.settings.N_PROCESSES \\\n if self.settings.PARALLEL_EXECUTION \\\n else 1\n logger.info('Starting simulations: %d experiments, %d process(es)'\n % (self.n_exp, self.n_proc))\n\n if self.settings.PARALLEL_EXECUTION:\n # This job queue is used only to keep track of which jobs have\n # finished and which are still running. Currently this information\n # is used only to handle keyboard interrupts correctly\n job_queue = collections.deque()\n # Schedule experiments from the queue\n while queue:\n experiment = queue.popleft()\n for _ in range(self.settings.N_REPLICATIONS):\n job_queue.append(self.pool.apply_async(run_scenario,\n args=(self.settings, experiment,\n self.seq.assign(), self.n_exp),\n callback=self.experiment_callback))\n self.pool.close()\n # This solution is probably not optimal, but at least makes\n # KeyboardInterrupt work fine, which is crucial if launching the\n # simulation remotely via screen.\n # What happens here is that we keep waiting for possible\n # KeyboardInterrupts till the last process terminates successfully.\n # We may have to wait up to 5 seconds after the last process\n # terminates before exiting, which is really negligible\n try:\n while job_queue:\n job = job_queue.popleft()\n while not job.ready():\n time.sleep(5)\n except KeyboardInterrupt:\n self.pool.terminate()\n self.pool.join()\n\n else: # Single-process execution\n while queue:\n experiment = queue.popleft()\n for _ in range(self.settings.N_REPLICATIONS):\n self.experiment_callback(run_scenario(self.settings,\n experiment, self.seq.assign(),\n self.n_exp))\n if self._stop:\n self.stop()\n\n logger.info('END | Planned: %d, Completed: %d, Succeeded: %d, Failed: %d',\n self.n_exp, self.n_fail + self.n_success, self.n_success, self.n_fail)", "def run(self):\n last = self.system.last_timestep\n start = last.timestep + 1 if last else 0\n del last\n end = self.system.cg_steps\n \n logging.info(\"running timesteps {} to {}\".format(start, end))\n \n for _ in range(start, end):\n self.system.begin_timestep()\n self.atomistic_step()\n self.cg_step()\n self.system.end_timestep()\n \n logging.info(\"completed all {} timesteps\".format(end-start))", "def run(self):\n self.axs[0][0].clear()\n simulate(params=self.params,plt=plt,callback=self.callback,home=self.home,work=self.work, positions=self.initial_positions, stopping_t=150)", "def run():\n\n for simulation in range(0, N_SIMULATIONS):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n # TODO: Change later enforce_deadline=True\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=N_TRIALS) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n if simulation == N_SIMULATIONS - 1:\n\n with open('results.csv', 'a') as csvfile:\n fieldnames = ['alpha', 'gamma', 'epsilon', 'success_rate', 'last_failure']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n for index in range(0,len(simulation_rates)):\n writer.writerow({\n 'alpha': get_simulation_params(0)[0],\n 'gamma': get_simulation_params(0)[1],\n 'epsilon': get_simulation_params(0)[2],\n 'success_rate': simulation_rates[index],\n 'last_failure': last_errors[index]})\n\n\n if N_SIMULATIONS > 1: #multiple simulation AND last simulation\n\n plt.figure(1)\n\n plt.subplot(211)\n plt.plot(simulation_rates)\n plt.title('Success Rate/Simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Success Rate')\n\n plt.subplot(212)\n plt.plot(last_errors)\n plt.title('Last failed trial per simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Last failed trial')\n\n plt.show()", "def runSim(self):\n if self.verbose:\n print(\"Running Simulation, This may take a while\")\n self.makeXData(float(self.pretime))\n pool = Pool(processes=len(self.powers))\n jobs = []\n self.gem_pair = []\n self.electron = []\n self.hole = []\n self.filled = []\n self.signal = []\n self.gsignal = []\n self.ehsignal = []\n self.gloss = []\n self.tloss = []\n self.qk = []\n for power, pulse in zip(self.powers, self.pulses):\n inputs = [power, pulse, self.steps, self.trap, self.tolerance,\n self.EHdecay, self.Etrap, self.FHloss, self.Gdecay,\n self.G2decay, self.G3decay, self.GHdecay, self.Gescape,\n self.Gform, self.G3loss, self.Keq, self.trackQ,\n self.verbose]\n jobs.append(pool.apply_async(powerRun, inputs))\n for job in jobs:\n gem_pair, electron, hole, filled, signal, gsignal, ehsignal, gloss, tloss, qk = job.get()\n self.signal.append(signal * self.scalar / self.step)\n self.gsignal.append(gsignal * self.scalar / self.step)\n self.ehsignal.append(ehsignal * self.scalar / self.step)\n self.gloss.append(gloss * self.scalar / self.step)\n self.tloss.append(tloss * self.scalar / self.step)\n self.gem_pair.append(gem_pair)\n self.electron.append(electron)\n self.hole.append(hole)\n self.filled.append(filled)\n self.qk.append(qk)\n pool.close()", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def run(self):\n self._display_sims(self._compute_sims())", "def run_simulation(self, number_runs = 1):\n for i in range(0, number_runs):\n self.ques = [self.start for i in range(0, self.numQueues)]\n run = self.__single_sim_results()\n run_results = pd.DataFrame({'simulation':i,\n 'num_items': len(run),\n 'wait_count': len(run[run['wait_time']>datetime.timedelta(seconds=0)]),\n 'avg_wait_time': run.wait_time.mean(),\n 'close_time': max(run['appt_end_time'])}, index=[i])\n self.results = pd.concat([self.results, run_results], ignore_index=True)\n self.results['last_appt_to_close_minutes'] = (self.results['close_time']-self.end).dt.total_seconds().div(60)\n return", "def execute(self, debug=False):\n\n if debug:\n # Set some default times for execution (debugging)\n start_time = datetime(year=2016, month=10, day=19, hour=12, minute=28, tzinfo=UTC)\n duration = timedelta(seconds=5)\n end_time = start_time + duration\n\n relative_interval = RelativeTimeInterval(0, 0)\n time_interval = TimeInterval(start_time, end_time)\n # workflow_id = \"lda_localisation_model_predict\"\n else:\n duration = 0 # not needed\n relative_interval = self.hyperstream.config.online_engine.interval\n time_interval = relative_interval.absolute(utcnow())\n\n for _ in range(self.hyperstream.config.online_engine.iterations):\n if not debug:\n # if this takes more than x minutes, kill myself\n signal.alarm(self.hyperstream.config.online_engine.alarm)\n\n logging.info(\"Online engine starting up.\")\n\n # self.hyperstream.workflow_manager.set_requested_intervals(workflow_id, TimeIntervals([time_interval]))\n self.hyperstream.workflow_manager.set_all_requested_intervals(TimeIntervals([time_interval]))\n self.hyperstream.workflow_manager.execute_all()\n\n logging.info(\"Online engine shutting down.\")\n logging.info(\"\")\n\n sleep(self.hyperstream.config.online_engine.sleep)\n\n if debug:\n time_interval += duration\n else:\n time_interval = TimeInterval(time_interval.end, utcnow() + timedelta(seconds=relative_interval.end))", "def run(self):\n self.log.overall('Starting run')\n run_start = time()\n for epoch in xrange(self.n_epochs):\n self.agent.reset()\n self.n_epoch = epoch\n self._run_epoch()\n self.log.overall('End of run ({:.2f} s)'.format(time() - run_start))", "def startSimulation(self):\n self.saveParameters()\n self.simulation.main()", "def run(self, phys, forces, step, ts, *args):\r\n # TMC 1-13-08: Check if args is actually necessary\r\n #self.recache(phys)\r\n\r\n self.runOutput(phys, forces, step, ts, *args)\r\n self.runPlots(phys, forces, step, ts)", "def run(self):\n count = self.neuron_count\n for i in range(0, count):\n self.run(i)", "def run(self):\n\n self.create_trials() # create them *before* running!\n self.start_experiment()\n\n for trail in self.trials:\n trial.run()\n\n self.close()", "def simulate(self, start='0', stop='86400', step='60', solver='rungekutta', args=[]):\n\t\tstart = str(parse_var_val(start, 's'))\n\t\tstop = str(parse_var_val(stop, 's'))\n\t\tstep = str(parse_var_val(step, 's'))\n\t\tsim_args = [\n\t\t\t'-override',\n\t\t\t'startTime='+start+',stopTime='+stop+',stepSize='+step,\n\t\t\t'-s', solver,\n\t\t\t'-f', self.init_out_fn,\n\t\t\t'-r', self.res_fn,\n\t\t\t]\n\t\tsp.call(['./'+self.model] + sim_args + args)" ]
[ "0.7059126", "0.6820462", "0.6784799", "0.6616469", "0.6533175", "0.6498618", "0.647814", "0.6432363", "0.638237", "0.6335111", "0.6320436", "0.63103276", "0.6306981", "0.62909955", "0.62752616", "0.62674004", "0.625852", "0.623389", "0.6202473", "0.6198507", "0.6148332", "0.6126168", "0.6123305", "0.6094955", "0.60683876", "0.6063092", "0.60562927", "0.60543305", "0.6037519", "0.60236603" ]
0.8158664
0
Ensure that a default exception/code is used if invalid code is provided.
def test_raise_using_invalid_code(self): with self.assertRaises(CloudantFeedException) as cm: raise CloudantFeedException('foo') self.assertEqual(cm.exception.status_code, 100)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def raise_if_exception(code):\n if isinstance(code, IndexError or Exception):\n raise code\n else:\n return code", "def from_code(code):\n builtin = _awscrt.get_corresponding_builtin_exception(code)\n if builtin:\n return builtin()\n\n name = _awscrt.get_error_name(code)\n msg = _awscrt.get_error_message(code)\n return AwsCrtError(code=code, name=name, message=msg)", "def _check_return(self, name, ret_code):\n if ret_code == 0:\n pass\n else:\n raise RuntimeError('An error occured setting %s: %d' % (name, ret_code))", "def raise_500():\n raise ValueError('Foo!')", "def raise_on_error(error_code):\n if error_code == 0: # SUCCESS\n return\n elif error_code == 1: # ERR_BAD_MORPH_OP\n raise ValueError('invalid morhology operation code')\n elif error_code == 2: # ERR_BAD_TYPE\n raise ValueError('invalid type')\n elif error_code == 3: # ERR_BAD_CUDA_DEVICE\n raise ValueError('invalid device number')\n elif error_code == 4: # ERR_NO_AVAILABLE_CUDA_DEVICE\n raise RuntimeError('no CUDA device available')\n elif error_code == 5: # ERR_BAD_APPROX_TYPE\n raise RuntimeError('invalid approximation type')\n elif error_code == -1: # ERR_UNCAUGHT_EXCEPTION\n raise RuntimeError('an unaught C++ exception occured')\n else:\n raise ValueError('invalid error code: {}'.format(error_code))", "def test_raise_without_code(self):\n with self.assertRaises(CloudantFeedException) as cm:\n raise CloudantFeedException()\n self.assertEqual(cm.exception.status_code, 100)", "def test_invalid_model_code(self) -> None:\n model_code = 90\n res = self.app.get('/model-parameters', query_string={\"number\": model_code})\n self.assertEqual(400, res.status_code)", "def Invalid(\r\n self, s: str = \"\", e: Type[BaseException] = None, fail: bool = False\r\n) -> None:\r\n ...", "def set_error(self, code: Optional[int] = None, text: Optional[str] = None) -> None:\n if code is not None:\n self.error_code = code\n if text is not None:\n self.error_text = text", "def test_raise_with_proper_code_and_args(self):\n with self.assertRaises(CloudantFeedException) as cm:\n raise CloudantFeedException(101)\n self.assertEqual(cm.exception.status_code, 101)", "def __check_status_code(cls, status_code):\n if status_code >= 400:\n raise IOError(\"error status_code: %d\" % status_code)", "def error(message, code=None):\n print_error(message)\n sys.exit(code or 1)", "def error_from_code(code):\n if code in _by_codes:\n return _by_codes[code]\n else:\n return XTTError(code)", "def error_code(self, error_code):\n # type: (int) -> None\n\n if error_code is not None:\n if not isinstance(error_code, int):\n raise TypeError(\"Invalid type for `error_code`, type has to be `int`\")\n\n self._error_code = error_code", "def error(self, code, message = ''):\n self.response.set_status(404)\n raise Exception(message)", "def set_error_from_exc(self, exc: Exception, code: Optional[int] = ERR_UNKNOWN) -> None:\n self.set_error(code=code, text=str(exc))", "def _validate_code(self, key, code):\n \n if code is None:\n code = self.name\n \n if not isinstance(code, (str, unicode)):\n raise TypeError(\"Sequence.code should be an instance of str or \"\n \"unicode, not %s\" % type(code))\n \n code = Project._condition_code(code)\n \n return code", "def error_code(self) -> CustomErrorCode:\n enforce(self.is_set(\"error_code\"), \"'error_code' content is not set.\")\n return cast(CustomErrorCode, self.get(\"error_code\"))", "async def test_404_on_unknown(request_format): # type: ignore[no-untyped-def]\n response: HTTPResponse = await request_format(\n formatter=\"UNKNOWN\",\n code=[SIMPLE_VALID_PYTHON_CODE],\n options={},\n raise_error=False,\n )\n assert response.code == 404", "def test_bad_values_for_validate_locale_code(bad_value):\n with pytest.raises(ValidationError):\n bcvalidators.validate_locale_code(bad_value)", "def test_bad_request_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_bad_request_code.__iter__()\n length = self.test_bad_request_code.__len__()\n\n while value < self.MAX_BAD_REQUEST_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_BAD_REQUEST_CODE_VALUE:\n value += 1\n\n length -= 1", "def raise_on_error(code, data):\n # get detailed server response message\n if code != 200:\n try:\n server_info = data.read()\n except Exception:\n server_info = None\n else:\n server_info = server_info.decode('ASCII', errors='ignore')\n if server_info:\n server_info = \"\\n\".join(\n line for line in server_info.splitlines() if line)\n # No data.\n if code == 204:\n raise FDSNNoDataException(\"No data available for request.\",\n server_info)\n elif code == 400:\n msg = (\"Bad request. If you think your request was valid \"\n \"please contact the developers.\")\n raise FDSNBadRequestException(msg, server_info)\n elif code == 401:\n raise FDSNUnauthorizedException(\"Unauthorized, authentication \"\n \"required.\", server_info)\n elif code == 403:\n raise FDSNForbiddenException(\"Authentication failed.\",\n server_info)\n elif code == 413:\n raise FDSNRequestTooLargeException(\"Request would result in too much \"\n \"data. Denied by the datacenter. \"\n \"Split the request in smaller \"\n \"parts\", server_info)\n # Request URI too large.\n elif code == 414:\n msg = (\"The request URI is too large. Please contact the ObsPy \"\n \"developers.\", server_info)\n raise NotImplementedError(msg)\n elif code == 429:\n msg = (\"Sent too many requests in a given amount of time ('rate \"\n \"limiting'). Wait before making a new request.\", server_info)\n raise FDSNTooManyRequestsException(msg, server_info)\n elif code == 500:\n raise FDSNInternalServerException(\"Service responds: Internal server \"\n \"error\", server_info)\n elif code == 503:\n raise FDSNServiceUnavailableException(\"Service temporarily \"\n \"unavailable\",\n server_info)\n elif code is None:\n if \"timeout\" in str(data).lower() or \"timed out\" in str(data).lower():\n raise FDSNTimeoutException(\"Timed Out\")\n else:\n raise FDSNException(\"Unknown Error (%s): %s\" % (\n (str(data.__class__.__name__), str(data))))\n # Catch any non 200 codes.\n elif code != 200:\n raise FDSNException(\"Unknown HTTP code: %i\" % code, server_info)", "def Invalid(\r\n self, s: str = \"\", e: Type[BaseException] = None, fail: bool = False\r\n ) -> None:\r\n ...", "def is_valid(self, value=None, raise_exception=True, name=None, **kwargs):\n valid = self._is_valid(value, **kwargs)\n if not valid:\n foo = InvalidCode(value, type(self).__name__)\n if raise_exception:\n raise foo\n else:\n logging.warning(foo.msg)\n return valid", "def _set_returncode(self, code):\n if code >= self._return_code:\n self._return_code = code", "def _assert_raise_error(\n self,\n probabilities: List[float],\n random_nums: List[int],\n error: ValidationError,\n code: int\n ) -> None:\n with self.assertRaises(error) as context:\n self._setup_random_gen(probabilities, random_nums)\n self.assertEqual(context.exception.code, code)", "def raise_error(msg: str, code: int = 400) -> None:\n response = make_response(jsonify(message=msg), code)\n abort(response)", "def test_py2_application_exception_message_bytes_english():\n try:\n raise ValueError(BYTES_ENGLISH)\n except ValueError:\n app = application()\n notice_error(application=app)", "def test_query_no_def_invalid(self):\n with self.assertRaises(ValueError) as context:\n query_yes_no(question=\"Is anyone wiser than Socrates?\", default=\"xxx\")", "def _missing_(cls, value):\n if not (isinstance(value, int) and 0 <= value <= 255):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n if 144 <= value <= 252:\n # [Internet Assigned Numbers Authority]\n extend_enum(cls, 'Unassigned [%d]' % value, value)\n return cls(value)\n return super()._missing_(value)" ]
[ "0.66130775", "0.6153171", "0.61259", "0.5967256", "0.59447294", "0.58569986", "0.582162", "0.57786494", "0.5746705", "0.5723983", "0.5721174", "0.5689755", "0.5681505", "0.5680904", "0.5679687", "0.5622346", "0.5620689", "0.55826855", "0.55438507", "0.54782844", "0.5449786", "0.54475856", "0.5409993", "0.53878766", "0.5382482", "0.5381287", "0.5358089", "0.535026", "0.5345259", "0.5335092" ]
0.63326275
1
Test constructing an infinite feed when no feed option is set
def test_constructor_no_feed_option(self): feed = InfiniteFeed(self.db, chunk_size=1, timeout=100) self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes'])) self.assertIsInstance(feed._r_session, Session) self.assertFalse(feed._raw_data) self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100}) self.assertEqual(feed._chunk_size, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_constructor_with_feed_option(self):\n feed = InfiniteFeed(self.db, chunk_size=1, timeout=100, feed='continuous')\n self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_infinite_feed(self):\n self.populate_db_with_documents()\n feed = InfiniteFeed(self.db, timeout=100)\n\n # Create a proxy for the feed._start method so that we can track how\n # many times it has been called.\n feed._start = MethodCallCount(feed._start)\n\n changes = list()\n for change in feed:\n self.assertSetEqual(set(change.keys()), set(['seq', 'changes', 'id']))\n changes.append(change)\n if len(changes) in (100, 200):\n sleep(1) # 1 second > .1 second (timeout)\n self.populate_db_with_documents(off_set=len(changes))\n elif len(changes) == 300:\n feed.stop()\n expected = set(['julia{0:03d}'.format(i) for i in range(300)])\n self.assertSetEqual(set([x['id'] for x in changes]), expected)\n self.assertIsNone(feed.last_seq)\n # Compare infinite/continuous with normal\n normal = Feed(self.db)\n self.assertSetEqual(\n set([x['id'] for x in changes]), set([n['id'] for n in normal]))\n\n # Ensuring that the feed._start method was called 3 times, verifies that\n # the continuous feed was started/restarted 3 separate times.\n self.assertEqual(feed._start.called_count, 3)", "def test_constructor_with_invalid_feed_option(self):\n feed = InfiniteFeed(self.db, feed='longpoll')\n with self.assertRaises(CloudantArgumentError) as cm:\n invalid_feed = [x for x in feed]\n self.assertEqual(\n str(cm.exception),\n 'Invalid infinite feed option: longpoll. Must be set to continuous.'\n )", "def feed() -> None:\n ...", "def test_feed_generator(self):\n moksha.feed_cache = FakeCache()\n feed = Feed(url='http://lewk.org/rss')\n iter = feed.iterentries()\n data = iter.next()\n assert iter.next()", "def wrap_feed(feed, max_iter=-1, **devtype):\n return FeedMover(FeedLimiter(feed, max_iter), **devtype)", "def data_feeder_2():\n return random.sample(range(100), 10)", "def test_constructor_db_updates(self):\n feed = InfiniteFeed(self.client, chunk_size=1, timeout=100, feed='continuous')\n self.assertEqual(feed._url, '/'.join([self.client.server_url, '_db_updates']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_infinite_db_updates_feed(self):\n feed = InfiniteFeed(self.client, since='now', timeout=100)\n\n # Create a proxy for the feed._start method so that we can track how\n # many times it has been called.\n feed._start = MethodCallCount(feed._start)\n\n new_dbs = list()\n try:\n new_dbs.append(self.client.create_database(self.dbname()))\n for change in feed:\n self.assertTrue(all(x in change for x in ('seq', 'type')))\n new_dbs.append(self.client.create_database(self.dbname()))\n if feed._start.called_count >= 3 and len(new_dbs) >= 3:\n feed.stop()\n if len(new_dbs) >= 15:\n # We stop regardless after 15 databases have been created\n feed.stop()\n finally:\n [db.delete() for db in new_dbs]\n # The test is considered a success if feed._start was called 2+ times.\n # If failure occurs it does not necessarily mean that the InfiniteFeed\n # is not functioning as expected, it might also mean that we reached the\n # db limit threshold of 15 before a timeout and restart of the\n # InfiniteFeed could happen.\n self.assertTrue(feed._start.called_count > 1)", "def infinite_loop():\n return True", "def __init__(self, inp: VarFeeder, features: Iterable[tf.Tensor], n_pages: int, mode: ModelMode, n_epoch=None,\n batch_size=127, runs_in_burst=1, verbose=True, predict_window=60, train_window=500,\n train_completeness_threshold=1, predict_completeness_threshold=1, back_offset=0,\n train_skip_first=0, rand_seed=None):\n self.n_pages = n_pages\n self.inp = inp\n self.batch_size = batch_size\n self.rand_seed = rand_seed\n self.back_offset = back_offset\n if verbose:\n print(\"Mode:%s, data days:%d, Data start:%s, data end:%s, features end:%s \" % (\n mode, inp.data_days, inp.data_start, inp.data_end, inp.features_end))\n\n if mode == ModelMode.TRAIN:\n # reserve predict_window at the end for validation\n assert inp.data_days - predict_window > predict_window + train_window, \\\n \"Predict+train window length (+predict window for validation) is larger than total number of days in dataset\"\n self.start_offset = train_skip_first\n elif mode == ModelMode.TRAIN_SKIP_PREDICT:\n assert inp.data_days >= predict_window + train_window, \"Predict+train window length is larger than total number of days in dataset\"\n self.start_offset = train_skip_first\n elif mode == ModelMode.EVAL or mode == ModelMode.PREDICT:\n self.start_offset = inp.data_days - train_window - back_offset\n if verbose:\n train_start = inp.data_start + pd.Timedelta(self.start_offset, 'D')\n eval_start = train_start + pd.Timedelta(train_window, 'D')\n end = eval_start + pd.Timedelta(predict_window - 1, 'D')\n print(\"Train start %s, predict start %s, end %s\" % (train_start, eval_start, end))\n assert self.start_offset >= 0\n\n self.train_window = train_window\n self.predict_window = predict_window\n self.attn_window = train_window - predict_window + 1\n self.max_train_empty = int(round(train_window * (1 - train_completeness_threshold)))\n self.max_predict_empty = int(round(predict_window * (1 - predict_completeness_threshold)))\n self.mode = mode\n self.verbose = verbose\n\n # Reserve more processing threads for eval/predict because of larger batches\n num_threads = 3 if mode == ModelMode.TRAIN else 6\n\n # Choose right cutter function for current ModelMode\n cutter = {ModelMode.TRAIN: self.cut_train,\n ModelMode.TRAIN_SKIP_PREDICT: self.cut_train_skip_predict,\n ModelMode.EVAL: self.cut_eval,\n ModelMode.PREDICT: self.cut_eval}\n # Create dataset, transform features and assemble batches\n root_ds = tf.data.Dataset.from_tensor_slices(tuple(features)).repeat(n_epoch)\n batch = (root_ds\n .map(cutter[mode])\n .filter(self.reject_filter)\n .map(self.make_features, num_parallel_calls=num_threads)\n .batch(batch_size)\n .prefetch(runs_in_burst * 2)\n )\n\n self.iterator = batch.make_initializable_iterator()\n it_tensors = self.iterator.get_next()\n\n # Assign all tensors to class variables\n self.true_x, self.time_x, self.norm_x, self.lagged_x, self.true_y, self.time_y, self.norm_y, self.norm_mean, \\\n self.norm_std, self.ucdoc_features, self.page_ix = it_tensors\n\n self.encoder_features_depth = self.time_x.shape[2].value", "def feed(self) -> None:", "def test_feed_subclassing(self):\n moksha.feed_cache = FakeCache()\n class MyFeed(Feed):\n url = 'http://lewk.org/rss'\n feed = MyFeed()\n assert feed.url == 'http://lewk.org/rss'\n assert feed.num_entries() > 0\n for entry in feed.iterentries():\n pass\n for entry in feed.get_entries():\n pass", "def test_non_finite_filter_1D(fitter, weights):\n\n x = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, np.inf])\n\n m_init = models.Gaussian1D()\n fit = fitter()\n\n if weights is not None:\n weights[[1, 4]] = np.nan\n\n with pytest.warns(\n AstropyUserWarning,\n match=r\"Non-Finite input data has been removed by the fitter\",\n ):\n fit(m_init, x, y, filter_non_finite=True, weights=weights)", "def feed(self):\n # or intelligence discard\n pass", "def test_invalid_source_couchdb(self):\n with self.assertRaises(CloudantFeedException) as cm:\n invalid_feed = [x for x in InfiniteFeed(self.client)]\n self.assertEqual(str(cm.exception),\n 'Infinite _db_updates feed not supported for CouchDB.')", "def feed(ser_if):\n while query_feed(ser_if): # true if empty\n feed_continue = raw_input(\"Card reservoir is empty, continue? [Y/n] > \") or \"Y\"\n if (feed_continue == \"n\") or (feed_continue == \"N\"):\n print(\"Goodbye then. Exiting.\")\n quit()\n ser_if.write('f')\n return check_response(ser_if)", "def test_feed_value_throws_on_invalid_data(self):\n self.assertRaises(\n ValueError, self.factory.make_from_feed_value, \"foo\", 1\n )", "def feed(self, handle, consumer, do_features=...): # -> bool:\n ...", "def test_feed_creation(self):\n items = []\n feed = Feed(items)\n assert isinstance(feed, Feed)\n assert items == feed.items", "def create_feed_net(dataset, correct, t_len, dims, n_classes):\n with nengo.Network(label=\"feed\") as feed:\n feed.d_f = DataFeed(dataset, correct, t_len, dims, n_classes)\n feed.q_in = nengo.Node(feed.d_f.feed, size_out=dims)\n feed.set_ans = nengo.Node(feed.d_f.set_answer, size_in=n_classes)\n feed.get_ans = nengo.Node(feed.d_f.get_answer, size_out=n_classes)\n\n return feed", "def stream():\n while True:\n yield random_point()", "def test_non_finite_filter_2D(fitter, weights):\n\n x, y = np.mgrid[0:10, 0:10]\n\n m_true = models.Gaussian2D(amplitude=1, x_mean=5, y_mean=5, x_stddev=2, y_stddev=2)\n with NumpyRNGContext(_RANDOM_SEED):\n z = m_true(x, y) + np.random.rand(*x.shape)\n z[0, 0] = np.nan\n z[3, 3] = np.inf\n z[7, 5] = -np.inf\n\n if weights is not None:\n weights[1, 1] = np.nan\n weights[4, 3] = np.inf\n\n m_init = models.Gaussian2D()\n fit = fitter()\n\n with pytest.warns(\n AstropyUserWarning,\n match=r\"Non-Finite input data has been removed by the fitter\",\n ):\n fit(m_init, x, y, z, filter_non_finite=True, weights=weights)", "def generate_feeds():\n os.makedirs(Config.FEED_ROOT_PATH, exist_ok=True)\n use_batching = Config.DAILY_DIGEST is not None\n\n while True:\n _generate_feeds_once(use_batching=use_batching)\n interval = _interval_between_generating_feeds(Config.REFRESH_INTERVAL_SECONDS, Config.DAILY_DIGEST)\n logging.info('Sleeping %ss before attempting to generate feeds again.', interval)\n time.sleep(interval)", "def test_feeding_less(self):\n available_food = 5\n new_weight = \\\n self.herb.weight + available_food * self.herb.params['beta']\n expected = 0\n nt.assert_equal(self.herb.feeding(available_food), expected)\n nt.assert_equals(self.herb.weight, new_weight)", "def choose_to_stop_early(self):\n # return self.cumulated_num_tests > 10 # Limit to make 10 predictions\n # return np.random.rand() < self.early_stop_proba\n batch_size = 30 # See ingestion program: D_train.init(batch_size=30, repeat=True)\n num_examples = self.metadata_.size()\n num_epochs = self.cumulated_num_steps * batch_size / num_examples\n return num_epochs > self.num_epochs_we_want_to_train # Train for certain number of epochs then stop", "def is_finite(self):\n return False", "def isinf(x):\n return False", "def __call__(self, input=None): # pragma: no cover\n while False:\n yield None", "def test_withCountIntervalZero(self):\n clock = task.Clock()\n accumulator = []\n\n def foo(cnt):\n accumulator.append(cnt)\n if len(accumulator) > 4:\n loop.stop()\n\n loop = task.LoopingCall.withCount(foo)\n loop.clock = clock\n deferred = loop.start(0, now=False)\n\n clock.advance(0)\n self.successResultOf(deferred)\n\n self.assertEqual([1, 1, 1, 1, 1], accumulator)" ]
[ "0.7384605", "0.6958748", "0.68842244", "0.63966936", "0.59803945", "0.55666584", "0.55181646", "0.54984444", "0.5465487", "0.5432391", "0.5425945", "0.5402392", "0.53030765", "0.52970344", "0.5292091", "0.5266229", "0.5241125", "0.5219706", "0.5172429", "0.51707923", "0.5149116", "0.51395273", "0.51379263", "0.5121935", "0.5086929", "0.5085175", "0.507852", "0.5076453", "0.50736624", "0.50687313" ]
0.7717671
0
Test constructing an infinite feed when the continuous feed option is set.
def test_constructor_with_feed_option(self): feed = InfiniteFeed(self.db, chunk_size=1, timeout=100, feed='continuous') self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes'])) self.assertIsInstance(feed._r_session, Session) self.assertFalse(feed._raw_data) self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100}) self.assertEqual(feed._chunk_size, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_constructor_no_feed_option(self):\n feed = InfiniteFeed(self.db, chunk_size=1, timeout=100)\n self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_infinite_feed(self):\n self.populate_db_with_documents()\n feed = InfiniteFeed(self.db, timeout=100)\n\n # Create a proxy for the feed._start method so that we can track how\n # many times it has been called.\n feed._start = MethodCallCount(feed._start)\n\n changes = list()\n for change in feed:\n self.assertSetEqual(set(change.keys()), set(['seq', 'changes', 'id']))\n changes.append(change)\n if len(changes) in (100, 200):\n sleep(1) # 1 second > .1 second (timeout)\n self.populate_db_with_documents(off_set=len(changes))\n elif len(changes) == 300:\n feed.stop()\n expected = set(['julia{0:03d}'.format(i) for i in range(300)])\n self.assertSetEqual(set([x['id'] for x in changes]), expected)\n self.assertIsNone(feed.last_seq)\n # Compare infinite/continuous with normal\n normal = Feed(self.db)\n self.assertSetEqual(\n set([x['id'] for x in changes]), set([n['id'] for n in normal]))\n\n # Ensuring that the feed._start method was called 3 times, verifies that\n # the continuous feed was started/restarted 3 separate times.\n self.assertEqual(feed._start.called_count, 3)", "def test_constructor_with_invalid_feed_option(self):\n feed = InfiniteFeed(self.db, feed='longpoll')\n with self.assertRaises(CloudantArgumentError) as cm:\n invalid_feed = [x for x in feed]\n self.assertEqual(\n str(cm.exception),\n 'Invalid infinite feed option: longpoll. Must be set to continuous.'\n )", "def test_constructor_db_updates(self):\n feed = InfiniteFeed(self.client, chunk_size=1, timeout=100, feed='continuous')\n self.assertEqual(feed._url, '/'.join([self.client.server_url, '_db_updates']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def infinite_loop():\n return True", "def test_infinite_db_updates_feed(self):\n feed = InfiniteFeed(self.client, since='now', timeout=100)\n\n # Create a proxy for the feed._start method so that we can track how\n # many times it has been called.\n feed._start = MethodCallCount(feed._start)\n\n new_dbs = list()\n try:\n new_dbs.append(self.client.create_database(self.dbname()))\n for change in feed:\n self.assertTrue(all(x in change for x in ('seq', 'type')))\n new_dbs.append(self.client.create_database(self.dbname()))\n if feed._start.called_count >= 3 and len(new_dbs) >= 3:\n feed.stop()\n if len(new_dbs) >= 15:\n # We stop regardless after 15 databases have been created\n feed.stop()\n finally:\n [db.delete() for db in new_dbs]\n # The test is considered a success if feed._start was called 2+ times.\n # If failure occurs it does not necessarily mean that the InfiniteFeed\n # is not functioning as expected, it might also mean that we reached the\n # db limit threshold of 15 before a timeout and restart of the\n # InfiniteFeed could happen.\n self.assertTrue(feed._start.called_count > 1)", "def is_finite(self):\n return False", "def test_finite(self):\n \n Number_of_tests = 1000\n low = -1000\n high = 1000\n for i in range(Number_of_tests):\n x = np.random.rand(100) * (high - low) + low\n y = aux_functions.softmax_base(x)\n\n # This should be True if all are finite\n all_finite = np.isfinite(y).all()\n self.assertTrue(all_finite)", "def loop_forever(self):\n while True:\n if self.get_parameter_value(\"publishing_mode\") == \"continuous\":\n self.publishMeasure()", "def isinf(x):\n return False", "def evaluate_continuous(self, init=None):\n res = init\n try:\n while True:\n res = self.evaluate(res)\n except StopIteration:\n return res", "def test_generator_continuous():\n RANGE_MAX = 100\n prev_value = RANGE_MAX // 2\n for msg in it.islice(generate_msgs(0, RANGE_MAX), 0, 42):\n curr_value = Message.parse(msg).power\n assert curr_value - prev_value <= 1\n prev_value = curr_value", "def choose_to_stop_early(self):\n # return self.cumulated_num_tests > 10 # Limit to make 10 predictions\n # return np.random.rand() < self.early_stop_proba\n batch_size = 30 # See ingestion program: D_train.init(batch_size=30, repeat=True)\n num_examples = self.metadata_.size()\n num_epochs = self.cumulated_num_steps * batch_size / num_examples\n return num_epochs > self.num_epochs_we_want_to_train # Train for certain number of epochs then stop", "def test_non_finite_filter_1D(fitter, weights):\n\n x = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, np.inf])\n\n m_init = models.Gaussian1D()\n fit = fitter()\n\n if weights is not None:\n weights[[1, 4]] = np.nan\n\n with pytest.warns(\n AstropyUserWarning,\n match=r\"Non-Finite input data has been removed by the fitter\",\n ):\n fit(m_init, x, y, filter_non_finite=True, weights=weights)", "def isfinite(data):\n return _make.isfinite(data)", "def feed() -> None:\n ...", "def is_finite(self) -> bool:\n normal = self.to_normal_form()\n di_graph = nx.DiGraph()\n for production in normal.productions:\n body = production.body\n if len(body) == 2:\n di_graph.add_edge(production.head, body[0])\n di_graph.add_edge(production.head, body[1])\n try:\n nx.find_cycle(di_graph, orientation=\"original\")\n except nx.exception.NetworkXNoCycle:\n return True\n return False", "def test_non_finite_filter_2D(fitter, weights):\n\n x, y = np.mgrid[0:10, 0:10]\n\n m_true = models.Gaussian2D(amplitude=1, x_mean=5, y_mean=5, x_stddev=2, y_stddev=2)\n with NumpyRNGContext(_RANDOM_SEED):\n z = m_true(x, y) + np.random.rand(*x.shape)\n z[0, 0] = np.nan\n z[3, 3] = np.inf\n z[7, 5] = -np.inf\n\n if weights is not None:\n weights[1, 1] = np.nan\n weights[4, 3] = np.inf\n\n m_init = models.Gaussian2D()\n fit = fitter()\n\n with pytest.warns(\n AstropyUserWarning,\n match=r\"Non-Finite input data has been removed by the fitter\",\n ):\n fit(m_init, x, y, z, filter_non_finite=True, weights=weights)", "def isinfinite(self) -> bool:\n return self._ranges.first.value.start == -Inf or self._ranges.last.value.end == Inf", "def test_continuous():\n # assert the distribution of the samples is close to the distribution of the data\n # using kstest:\n # - uniform (assert p-value > 0.05)\n # - gaussian (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)", "def test_feed_generator(self):\n moksha.feed_cache = FakeCache()\n feed = Feed(url='http://lewk.org/rss')\n iter = feed.iterentries()\n data = iter.next()\n assert iter.next()", "def test_withCountIntervalZero(self):\n clock = task.Clock()\n accumulator = []\n\n def foo(cnt):\n accumulator.append(cnt)\n if len(accumulator) > 4:\n loop.stop()\n\n loop = task.LoopingCall.withCount(foo)\n loop.clock = clock\n deferred = loop.start(0, now=False)\n\n clock.advance(0)\n self.successResultOf(deferred)\n\n self.assertEqual([1, 1, 1, 1, 1], accumulator)", "def __init__(self, inp: VarFeeder, features: Iterable[tf.Tensor], n_pages: int, mode: ModelMode, n_epoch=None,\n batch_size=127, runs_in_burst=1, verbose=True, predict_window=60, train_window=500,\n train_completeness_threshold=1, predict_completeness_threshold=1, back_offset=0,\n train_skip_first=0, rand_seed=None):\n self.n_pages = n_pages\n self.inp = inp\n self.batch_size = batch_size\n self.rand_seed = rand_seed\n self.back_offset = back_offset\n if verbose:\n print(\"Mode:%s, data days:%d, Data start:%s, data end:%s, features end:%s \" % (\n mode, inp.data_days, inp.data_start, inp.data_end, inp.features_end))\n\n if mode == ModelMode.TRAIN:\n # reserve predict_window at the end for validation\n assert inp.data_days - predict_window > predict_window + train_window, \\\n \"Predict+train window length (+predict window for validation) is larger than total number of days in dataset\"\n self.start_offset = train_skip_first\n elif mode == ModelMode.TRAIN_SKIP_PREDICT:\n assert inp.data_days >= predict_window + train_window, \"Predict+train window length is larger than total number of days in dataset\"\n self.start_offset = train_skip_first\n elif mode == ModelMode.EVAL or mode == ModelMode.PREDICT:\n self.start_offset = inp.data_days - train_window - back_offset\n if verbose:\n train_start = inp.data_start + pd.Timedelta(self.start_offset, 'D')\n eval_start = train_start + pd.Timedelta(train_window, 'D')\n end = eval_start + pd.Timedelta(predict_window - 1, 'D')\n print(\"Train start %s, predict start %s, end %s\" % (train_start, eval_start, end))\n assert self.start_offset >= 0\n\n self.train_window = train_window\n self.predict_window = predict_window\n self.attn_window = train_window - predict_window + 1\n self.max_train_empty = int(round(train_window * (1 - train_completeness_threshold)))\n self.max_predict_empty = int(round(predict_window * (1 - predict_completeness_threshold)))\n self.mode = mode\n self.verbose = verbose\n\n # Reserve more processing threads for eval/predict because of larger batches\n num_threads = 3 if mode == ModelMode.TRAIN else 6\n\n # Choose right cutter function for current ModelMode\n cutter = {ModelMode.TRAIN: self.cut_train,\n ModelMode.TRAIN_SKIP_PREDICT: self.cut_train_skip_predict,\n ModelMode.EVAL: self.cut_eval,\n ModelMode.PREDICT: self.cut_eval}\n # Create dataset, transform features and assemble batches\n root_ds = tf.data.Dataset.from_tensor_slices(tuple(features)).repeat(n_epoch)\n batch = (root_ds\n .map(cutter[mode])\n .filter(self.reject_filter)\n .map(self.make_features, num_parallel_calls=num_threads)\n .batch(batch_size)\n .prefetch(runs_in_burst * 2)\n )\n\n self.iterator = batch.make_initializable_iterator()\n it_tensors = self.iterator.get_next()\n\n # Assign all tensors to class variables\n self.true_x, self.time_x, self.norm_x, self.lagged_x, self.true_y, self.time_y, self.norm_y, self.norm_mean, \\\n self.norm_std, self.ucdoc_features, self.page_ix = it_tensors\n\n self.encoder_features_depth = self.time_x.shape[2].value", "def test_invalid_source_couchdb(self):\n with self.assertRaises(CloudantFeedException) as cm:\n invalid_feed = [x for x in InfiniteFeed(self.client)]\n self.assertEqual(str(cm.exception),\n 'Infinite _db_updates feed not supported for CouchDB.')", "def test_setting_continuous_processing(processor):\n processor.continuous_processing = False\n assert not processor._state.test('continuous_processing')\n processor.continuous_processing = True\n assert processor._state.test('continuous_processing')", "def test_withCountIntervalZeroDelayThenNonZeroInterval(self):\n clock = task.Clock()\n deferred = defer.Deferred()\n accumulator = []\n\n def foo(cnt):\n accumulator.append(cnt)\n if len(accumulator) == 2:\n return deferred\n\n loop = task.LoopingCall.withCount(foo)\n loop.clock = clock\n loop.start(0, now=False)\n\n # Even if a lot of time pass, loop will block at the third call.\n clock.advance(10)\n self.assertEqual([1, 1], accumulator)\n\n # When a new interval is set, once the waiting call got a result the\n # loop continues with the new interval.\n loop.interval = 2\n deferred.callback(None)\n\n # It will count skipped steps since the last loop call.\n clock.advance(7)\n self.assertEqual([1, 1, 3], accumulator)\n\n clock.advance(2)\n self.assertEqual([1, 1, 3, 1], accumulator)\n\n clock.advance(4)\n self.assertEqual([1, 1, 3, 1, 2], accumulator)", "def isfinite ( x ) : \n y = float ( x ) \n return ( not math.isinf ( y ) ) and ( not math.isnan ( y ) )", "def isinf(data):\n return _make.isinf(data)", "def test_non_finite_error(fitter, weights):\n\n x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])\n y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])\n\n m_init = models.Gaussian1D()\n fit = fitter()\n\n # Raise warning, notice fit fails due to nans\n with pytest.raises(\n NonFiniteValueError, match=r\"Objective function has encountered.*\"\n ):\n fit(m_init, x, y, weights=weights)", "def isInfinite(value):\n if value == float('inf') or value == float('-inf'):\n return True\n return False" ]
[ "0.7359666", "0.72050756", "0.6745793", "0.5607414", "0.55818623", "0.5508452", "0.5485362", "0.54570514", "0.54559946", "0.53594476", "0.5344274", "0.53138936", "0.5304105", "0.5261766", "0.52536184", "0.5185192", "0.51846963", "0.5160076", "0.5123175", "0.51123613", "0.5069149", "0.50471556", "0.50462496", "0.5043527", "0.5028654", "0.5017404", "0.4985839", "0.49768892", "0.49707317", "0.49556038" ]
0.74537617
0
Test constructing an infinite feed when a feed option is set to an invalid value raises an exception.
def test_constructor_with_invalid_feed_option(self): feed = InfiniteFeed(self.db, feed='longpoll') with self.assertRaises(CloudantArgumentError) as cm: invalid_feed = [x for x in feed] self.assertEqual( str(cm.exception), 'Invalid infinite feed option: longpoll. Must be set to continuous.' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_feed_value_throws_on_invalid_data(self):\n self.assertRaises(\n ValueError, self.factory.make_from_feed_value, \"foo\", 1\n )", "def test_constructor_no_feed_option(self):\n feed = InfiniteFeed(self.db, chunk_size=1, timeout=100)\n self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_constructor_with_feed_option(self):\n feed = InfiniteFeed(self.db, chunk_size=1, timeout=100, feed='continuous')\n self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_invalid_source_couchdb(self):\n with self.assertRaises(CloudantFeedException) as cm:\n invalid_feed = [x for x in InfiniteFeed(self.client)]\n self.assertEqual(str(cm.exception),\n 'Infinite _db_updates feed not supported for CouchDB.')", "def test_infinite_feed(self):\n self.populate_db_with_documents()\n feed = InfiniteFeed(self.db, timeout=100)\n\n # Create a proxy for the feed._start method so that we can track how\n # many times it has been called.\n feed._start = MethodCallCount(feed._start)\n\n changes = list()\n for change in feed:\n self.assertSetEqual(set(change.keys()), set(['seq', 'changes', 'id']))\n changes.append(change)\n if len(changes) in (100, 200):\n sleep(1) # 1 second > .1 second (timeout)\n self.populate_db_with_documents(off_set=len(changes))\n elif len(changes) == 300:\n feed.stop()\n expected = set(['julia{0:03d}'.format(i) for i in range(300)])\n self.assertSetEqual(set([x['id'] for x in changes]), expected)\n self.assertIsNone(feed.last_seq)\n # Compare infinite/continuous with normal\n normal = Feed(self.db)\n self.assertSetEqual(\n set([x['id'] for x in changes]), set([n['id'] for n in normal]))\n\n # Ensuring that the feed._start method was called 3 times, verifies that\n # the continuous feed was started/restarted 3 separate times.\n self.assertEqual(feed._start.called_count, 3)", "def test_epsf_build_invalid_fitter(self):\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=EPSFFitter, maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter(), maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter, maxiters=3)", "def check_throw():\n while True:\n try:\n yield\n except ValueError:\n pass", "def test_exct_on_infinity_loop(self):\n with self.assertRaises(ExecutionException):\n pyint = Interpreter()\n pyint.run(code=BF_INFINITY_LOOP)", "def test_non_finite_error(fitter, weights):\n\n x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])\n y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])\n\n m_init = models.Gaussian1D()\n fit = fitter()\n\n # Raise warning, notice fit fails due to nans\n with pytest.raises(\n NonFiniteValueError, match=r\"Objective function has encountered.*\"\n ):\n fit(m_init, x, y, weights=weights)", "def testSetWithNegativeInt(self):\n def setSat():\n self.node.sat = -20\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSat\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSat()\n\n self.assertEqual(\n Decimal('0.0'),\n self.node.sat\n )", "def test_missing_values():\n data = CategoryBasedSpiderData(\"some-progress-file-dir\", \"some-spider-name\", \"http://some-recipes.com\")\n\n with pytest.raises(ValueError):\n CategoryBasedSpider(Mock(), Mock(), None)\n\n with pytest.raises(ValueError):\n CategoryBasedSpider(Mock(), None, data)\n\n with pytest.raises(ValueError):\n data.start_url = None\n CategoryBasedSpider(Mock(), Mock(), data)", "def testCatchInfinityInDatasetMapFunction(self):\n check_numerics_callback.enable_check_numerics()\n\n def generate_nan(x):\n \"\"\"Intentionally generates NaNs by taking log of negative number.\"\"\"\n casted_x = math_ops.cast(x, dtypes.float32)\n return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x\n\n dataset = dataset_ops.Dataset.range(10).map(generate_nan)\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: self.evaluate(iterator.get_next()))\n\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"Log\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float32\", message))\n self.assertIn(\"shape: (2, 2)\\n\", message)\n self.assertTrue(re.search(r\"Input tensor.*Tensor.*Log/x:0\", message))\n self.assertIn(\"generate_nan\", message)", "def test_init_err_limit(self):\n with self.assertRaises(InitializationException):\n pyint = Interpreter(limit=INVALID_LIMIT)", "def test_invalid(self):\n x = np.array([-5, -3, -2, -2, 100])\n with self.assertRaises(ValueError):\n npinterval.interval(x, 1.01)\n with self.assertRaises(ValueError):\n npinterval.interval(x, 0)", "def testSetWithNegativeFloat(self):\n def setSat():\n self.node.sat = -20.1\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSat\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSat()\n\n self.assertEqual(\n Decimal('0.0'),\n self.node.sat\n )", "def test_setup_raises(fold, fh, fold_strategy, load_pos_and_neg_data):\n\n from pycaret.time_series import setup\n\n with pytest.raises(ValueError) as errmsg:\n _ = setup(\n data=load_pos_and_neg_data,\n fold=fold,\n fh=fh,\n fold_strategy=fold_strategy,\n )\n\n exceptionmsg = errmsg.value.args[0]\n\n assert exceptionmsg == \"Not Enough Data Points, set a lower number of folds or fh\"", "def test_float_range_3():\n try:\n float_range('foobar')\n assert False # Should be unreachable\n except Exception:\n pass", "def test_invalid_strategy(self):\r\n with pytest.raises(ValueError, match=\"Unknown strategy\"):\r\n finite_diff_coeffs(1, 1, 1)", "def test_invalid_furl(self, mocked_furl):\n mocked_furl.side_effect = ValueError(\"error\")\n with self.assertRaises(URLParameterError):\n create_url(host=\"www.example.com\", scheme_no_ssl=\"http\")", "def test_bad_iterations(self):\r\n with pytest.raises(ValueError, match=\"Number of iterations must be a positive int\"):\r\n clique.search(clique=[0, 1, 2, 3], graph=nx.complete_graph(5), iterations=-1)", "def test_notrunerror(self, MetricClass):\n m = MetricClass()\n with pytest.raises(NotRunError):\n RandomTrader(seed=42).evaluate(m)", "def testSatSetNegative(self):\n def setSat():\n self.cc.sat = -376.23\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSat\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSat()\n\n self.assertEqual(\n Decimal('0.0'),\n self.cc.sat\n )", "def test_value_error(self):\n self._error_test(ValueError)", "def test_inf_treatment(self):\n values_with_infs = np.array([1, 2, 3, -np.inf, +np.inf, +np.inf])\n\n with self.subTest(\n \"Test if the warning for number of inf values is raised in hist_w_unc\"\n ):\n with LogCapture(\"puma\") as log:\n _ = hist_w_unc(values_with_infs, bins=np.linspace(0, 3, 3))\n log.check(\n (\n \"puma\",\n \"WARNING\",\n \"Histogram values contain 3 +-inf values!\",\n )\n )\n with self.subTest(\n \"Test if error is raised if inf values are in input but no range is defined\"\n ), self.assertRaises(ValueError):\n hist_w_unc(values_with_infs, bins=10)", "def test_float_range_2():\n try:\n float_range('2.0')\n assert False # Should be unreachable\n except Exception:\n pass", "def _fail(url, reason):\n LOG.debug(\"Failed to parse feed '{}': {}\".format(url, reason))\n raise FeedParseError(reason)", "def test_error_if_negative_more_than_population(self):\n model = PoincareModel(self.data, negative=5)\n with self.assertRaises(ValueError):\n model.train(epochs=1)", "def test_invalid_instantiation(invalid_instance):\n with pytest.raises(ValueError):\n invalid_instance()", "def test_rng_invalid_value(self):\n with pytest.raises(ValueError) as exc:\n check_random_state(\"oh_no_oh_no\")\n\n assert \"'oh_no_oh_no' cannot be used to seed\" in str(exc.value)", "def test_init_TypeError_when_window_size_is_not_int():\n window_size = 'not_valid_type'\n err_msg = re.escape(\n f\"Argument `window_size` must be an int. Got {type(window_size)}.\"\n )\n with pytest.raises(TypeError, match = err_msg):\n ForecasterAutoregMultiSeriesCustom(\n regressor = LinearRegression(),\n fun_predictors = create_predictors,\n window_size = window_size\n )" ]
[ "0.72401696", "0.70973885", "0.6830153", "0.6814605", "0.6310616", "0.6292677", "0.6060589", "0.6053885", "0.6037701", "0.5884711", "0.5862154", "0.58607936", "0.5860144", "0.5858658", "0.5857198", "0.578971", "0.5787647", "0.5776569", "0.5743222", "0.5715131", "0.5706377", "0.56917304", "0.56885743", "0.56875116", "0.56575453", "0.56530166", "0.56489015", "0.56318724", "0.561436", "0.56115407" ]
0.8217915
0
Ensure that a CouchDB client cannot be used with an infinite feed.
def test_invalid_source_couchdb(self): with self.assertRaises(CloudantFeedException) as cm: invalid_feed = [x for x in InfiniteFeed(self.client)] self.assertEqual(str(cm.exception), 'Infinite _db_updates feed not supported for CouchDB.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_constructor_with_invalid_feed_option(self):\n feed = InfiniteFeed(self.db, feed='longpoll')\n with self.assertRaises(CloudantArgumentError) as cm:\n invalid_feed = [x for x in feed]\n self.assertEqual(\n str(cm.exception),\n 'Invalid infinite feed option: longpoll. Must be set to continuous.'\n )", "def test_constructor_no_feed_option(self):\n feed = InfiniteFeed(self.db, chunk_size=1, timeout=100)\n self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_infinite_feed(self):\n self.populate_db_with_documents()\n feed = InfiniteFeed(self.db, timeout=100)\n\n # Create a proxy for the feed._start method so that we can track how\n # many times it has been called.\n feed._start = MethodCallCount(feed._start)\n\n changes = list()\n for change in feed:\n self.assertSetEqual(set(change.keys()), set(['seq', 'changes', 'id']))\n changes.append(change)\n if len(changes) in (100, 200):\n sleep(1) # 1 second > .1 second (timeout)\n self.populate_db_with_documents(off_set=len(changes))\n elif len(changes) == 300:\n feed.stop()\n expected = set(['julia{0:03d}'.format(i) for i in range(300)])\n self.assertSetEqual(set([x['id'] for x in changes]), expected)\n self.assertIsNone(feed.last_seq)\n # Compare infinite/continuous with normal\n normal = Feed(self.db)\n self.assertSetEqual(\n set([x['id'] for x in changes]), set([n['id'] for n in normal]))\n\n # Ensuring that the feed._start method was called 3 times, verifies that\n # the continuous feed was started/restarted 3 separate times.\n self.assertEqual(feed._start.called_count, 3)", "def test_infinite_db_updates_feed(self):\n feed = InfiniteFeed(self.client, since='now', timeout=100)\n\n # Create a proxy for the feed._start method so that we can track how\n # many times it has been called.\n feed._start = MethodCallCount(feed._start)\n\n new_dbs = list()\n try:\n new_dbs.append(self.client.create_database(self.dbname()))\n for change in feed:\n self.assertTrue(all(x in change for x in ('seq', 'type')))\n new_dbs.append(self.client.create_database(self.dbname()))\n if feed._start.called_count >= 3 and len(new_dbs) >= 3:\n feed.stop()\n if len(new_dbs) >= 15:\n # We stop regardless after 15 databases have been created\n feed.stop()\n finally:\n [db.delete() for db in new_dbs]\n # The test is considered a success if feed._start was called 2+ times.\n # If failure occurs it does not necessarily mean that the InfiniteFeed\n # is not functioning as expected, it might also mean that we reached the\n # db limit threshold of 15 before a timeout and restart of the\n # InfiniteFeed could happen.\n self.assertTrue(feed._start.called_count > 1)", "def test_constructor_db_updates(self):\n feed = InfiniteFeed(self.client, chunk_size=1, timeout=100, feed='continuous')\n self.assertEqual(feed._url, '/'.join([self.client.server_url, '_db_updates']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_get_stream_too_many_requests(req):\n req.get(ENTREZ_URL, text=u'Whoa, slow down', status_code=429, headers={\"Retry-After\": \"2\"})\n params = dict(id='FAKE')\n with pytest.raises(TooManyRequests):\n core.get_stream(ENTREZ_URL, params)", "def test_limit_items(self):\n AnnouncementFactory(\n title=\"Not going to be there\",\n expires_at=timezone.now() - datetime.timedelta(days=1),\n )\n for i in range(5):\n AnnouncementFactory()\n\n response = self.get(\"announcements:feed\")\n\n assert \"Not going to be there\" not in response.content.decode()", "def __ensure_fetching_rate_limit(self) -> None:\n current = datetime.now()\n difference = current - self.fetched_last\n time_to_wait = FETCH_MINIMUM_WAIT_SECONDS - difference.total_seconds()\n if time_to_wait > 0:\n time.sleep(time_to_wait)\n\n self.fetched_last = datetime.now()", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def check_limit(redis_client):\n if redis_client.llen('query_counter') >= API_RATE_LIMIT:\n left_val = redis_client.lpop('query_counter')\n parsed_left_val = float(left_val.decode('utf-8'))\n current_api_window = (datetime.utcnow() - timedelta(minutes=API_WINDOW_PERIOD)).timestamp()\n if parsed_left_val > current_api_window:\n redis_client.lpush('query_counter', left_val)\n return False\n return True", "def test_graph_list_bad(self):\n fuseki = GraphStore()\n with self.assertRaises(ConnectionError):\n fuseki._graph_list()", "def test_constructor_with_feed_option(self):\n feed = InfiniteFeed(self.db, chunk_size=1, timeout=100, feed='continuous')\n self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def _not_exhausted(last_fetched):\n return len(last_fetched) == 100", "def test_feeds_do_not_exist(self):\n rv = self.client.get('/user/who.xml')\n eq_(rv.status_code, 404)\n\n rv = self.client.get('/project/fake.xml')\n eq_(rv.status_code, 404)\n\n rv = self.client.get('/team/not-real.xml')\n eq_(rv.status_code, 404)", "def check(self):\n self.__check_request_limit()", "def bad_client(): # pylint: disable=missing-param-doc,missing-type-doc\n api = CometApi()\n app = api.create_app()\n with app.app_context():\n yield app.test_client()", "def test_graph_retrieve_bad(self):\n fuseki = GraphStore()\n with self.assertRaises(ConnectionError):\n fuseki._graph_retrieve(\"default\")", "def test_live_request_to_draft_index_fails(self):\n self.setup_inline_index(live=False)\n\n response = self.client.get(self.inline_index.url)\n\n self.assertEqual(response.status_code, 404)", "def test_bad_iterations(self):\r\n with pytest.raises(ValueError, match=\"Number of iterations must be a positive int\"):\r\n clique.search(clique=[0, 1, 2, 3], graph=nx.complete_graph(5), iterations=-1)", "def unable_to_crawl(self) -> bool:\n return pulumi.get(self, \"unable_to_crawl\")", "def _cb_ignore_read_exception(self, exception, client):\n return False", "def _cb_ignore_read_exception(self, exception, client):\n return False", "def cdb_check():\n logfile = 'janusess'\n logger = logging.getLogger(logfile)\n\n check_time = 0.5\n\n log = 'Checking CouchDB every {0} sec until operational.'.format(check_time)\n logger.debug(log)\n\n count = 1\n while True:\n\n # Issue CouchDB GET request and process result\n http_resp = requests.get('http://127.0.0.1:5984/')\n\n # Successful GET request\n if http_resp.status_code == 200:\n log = 'CouchDB is operational.'\n logger.info(log)\n MPQ_ACT.put_nowait([\n datetime.now().isoformat(' '),\n 'INFO',\n log\n ])\n MPQ_STAT.put_nowait([\n 'base',\n [\n 'couchdb',\n STAT_LVL['op']\n ]\n ])\n break\n\n # All GET errors\n else:\n log = 'CouchDB is not operational, failed with http ' +\\\n 'response {0}. Making another attempt.'.format(http_resp.status_code)\n logger.warning(log)\n MPQ_ACT.put_nowait([\n datetime.now().isoformat(' '),\n 'WARNING',\n log\n ])\n MPQ_STAT.put_nowait([\n 'base',\n [\n 'couchdb',\n STAT_LVL['cfg_err']\n ]\n ])\n\n count += count\n time.sleep(check_time)", "def _check_empty_feed(self, items, rest_of_world):\n if not items or (len(items) == 1 and items[0].get('shelf')):\n # Empty feed.\n if rest_of_world:\n return -1\n return 0\n return 1", "def test_get_posts_missing_ids(client):\n response = client.simulate_get('/page/get_records')\n assert response.status_code == 400", "def test_empty_db(client):\n\n get = client.get('/')\n assert b'natural_wonders_api_docs' in get.data", "def infinite_loop():\n return True", "def limit_handled(cursor):\n # TODO: possibly need this function to limit request frequency\n while True:\n try:\n yield cursor.next()\n except tweepy.RateLimitError:\n time.sleep(60)", "def test_query_train_jobs_with_exceeded_limit(self, client):\n params = dict(offset=0, limit=1000)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('error_code') == '50540002'", "def _prevent_dos(cls):\n if cls._last_query is not None:\n if time.time() - cls._last_query < cls._dos_timeout:\n raise RuntimeError(\"Too many scheduler requests within a short time!\")\n cls._last_query = time.time()" ]
[ "0.6145271", "0.6022078", "0.58353573", "0.57538754", "0.5435746", "0.536181", "0.53121054", "0.52930915", "0.51392436", "0.51142395", "0.5085797", "0.5076746", "0.50685304", "0.5026746", "0.49882182", "0.4977487", "0.49086696", "0.4906861", "0.48924825", "0.48916247", "0.48899135", "0.48586243", "0.48347947", "0.48160127", "0.48095092", "0.47944078", "0.47933042", "0.4789151", "0.47807384", "0.4774032" ]
0.7346228
0