query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Get a single database
def get_database(self, database, instance=None): return self._get(_database.Database, database)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_database() -> Database:\n db_config = DatabaseConfig(DB_NAME)\n return connect_to_db(db_config)", "def get_database(self, instance, name):\n return instance.get_database(name)", "def get_db(self, dbname, **params):\n return Database(self._db_uri(dbname), server=self, **params)", "def _get_database(self, options):\n database_key = options.get('database')\n if not database_key:\n if len(settings.DATABASES) >= 2:\n errmsg = \"Because this project contains more than one database, you\"\n errmsg += \" must specify the --database option.\"\n raise CommandError(errmsg)\n database_key = settings.DATABASES.keys()[0]\n return settings.DATABASES[database_key]", "def get_database(self):\n return self.database", "def get_database(self, name):\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)", "def get_db(self):\n self.logger.info('in get_db()')\n try:\n return self.client[self.db_name]\n except Exception as e:\n self.logger.error(f'Error occurred while getting client {e}')", "def get_database(self, database=None):\n\t\tdatabase = database if database !=None else self.database\n\t\t\n\t\tif self._database is None:\n\t\t\tconn = self.get_connection()\n\t\t\tdb = conn[database]\n\t\t\tself._database = db\n\t\t\n\t\treturn self._database", "def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db", "def get_db():\n if ( g.get( 'db' ) is None ):\n g.db = connect_db()\n\n return g.db.connect()", "def get_database(conn, name):\n\n if conn.hasDatabase(name) is False:\n return conn.createDatabase(name)\n\n return conn[name]", "def get_db(db_label):\n defaults = get_defaults()\n db_name = defaults[db_label]\n m = re.match('(\\w+)://.*?/([\\w.]+)', db_name)\n if m is None:\n logger.error(\"Poorly formed db name: %s\" % db_name)\n return\n sqltype = m.groups()[0]\n return DatabaseManager(db_name, sqltype=sqltype, label=db_label)", "def get_db(db_name):\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db", "def get_database() -> StandardDatabase:\n client = get_arango_client()\n return client.db(DB_NAME, username=ARANGO_USER, password=ARANGO_PASSWORD)", "def get_database(self):\n if self._database is None:\n conn = self.get_connection()\n db = conn[self.database]\n self._database = db\n\n return self._database", "def database():\n return conf().database", "def get_db():\n if not hasattr(g, 'mongo_db'):\n g.db = get_mongo_db()\n\n return g.db", "def get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(current_app.config['DB_NAME'])\n return db", "def get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(str(current_app.config['DATABASE']))\n return g.db", "def get_db():\n\tpath = get_path_db()\n\tif path is None:\n\t\tprint(\"\\n=> Info - Cannot fetch database yet because it has not been configured.\\n\")\n\telse:\n\t\tdb = SqliteExtDatabase(path)\n\t\treturn db", "def getDb(self):\n return self.db", "def get_database(database: Optional[str] = None,\n instance: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseResult:\n __args__ = dict()\n __args__['database'] = database\n __args__['instance'] = instance\n __args__['project'] = project\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:sqladmin/v1beta4:getDatabase', __args__, opts=opts, typ=GetDatabaseResult).value\n\n return AwaitableGetDatabaseResult(\n charset=pulumi.get(__ret__, 'charset'),\n collation=pulumi.get(__ret__, 'collation'),\n etag=pulumi.get(__ret__, 'etag'),\n instance=pulumi.get(__ret__, 'instance'),\n kind=pulumi.get(__ret__, 'kind'),\n name=pulumi.get(__ret__, 'name'),\n project=pulumi.get(__ret__, 'project'),\n self_link=pulumi.get(__ret__, 'self_link'),\n sqlserver_database_details=pulumi.get(__ret__, 'sqlserver_database_details'))", "def get_db(db=None):\n if db is None:\n db = ideagenstest\n return get_mongodb(db['url'],\n db['port'],\n db['dbName'],\n db['user'],\n db['pswd'])", "def get_db(self):\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = connect(DATABASE)\n return db", "def get_db():\n if not hasattr(g, \"sql_db\"):\n g.sql_db = connect_db()\n return g.sql_db", "def get_db():\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client.seattle\n return db", "def get_db():\n if \"db\" not in g:\n host = current_app.config[\"HOST\"]\n dbname = current_app.config[\"DATABASE\"]\n #params = \"host='{}' dbname='{}' user=root\".format(host, dbname)\n params = \"dbname='{}' user=root\".format(dbname)\n g.db = psycopg2.connect(params)\n # 'g.db' corresponsds to a DB conn\n return g.db", "def database(dburl=None, **params):\n if not dburl and not params:\n dburl = os.environ['DATABASE_URL']\n if dburl:\n params = dburl2dict(dburl)\n dbn = params.pop('dbn')\n if dbn in _databases:\n return _databases[dbn](**params)\n else:\n raise UnknownDB, dbn", "def get_db(database):\n db = getattr(g, '_database', None)\n if db is None:\n intents_db = IntentsDatabaseEngine()\n expressions_db = ExpressionsDatabaseEngine()\n database_dict = {'intents': intents_db,\n 'expressions': expressions_db}\n g._database = db = database_dict\n return db[database]", "def get_db():\n\n if not hasattr(g, 'mongo_db'):\n client = MongoClient(C.MONGODB_DATABASE_URI)\n g.db = client.test\n return g.db" ]
[ "0.8189091", "0.7899076", "0.7844062", "0.7762001", "0.7726166", "0.772395", "0.7723282", "0.7685286", "0.765148", "0.762424", "0.7616151", "0.76047456", "0.7597356", "0.7583495", "0.7570153", "0.75534976", "0.74211323", "0.74180853", "0.7410544", "0.7392949", "0.7318623", "0.7315056", "0.73138237", "0.731299", "0.7289476", "0.7260866", "0.7227758", "0.72201204", "0.7200716", "0.71671736" ]
0.7978113
1
Get a single flavor
def get_flavor(self, flavor): return self._get(_flavor.Flavor, flavor)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_flavor(name):\r\n return nova.flavors.find(name=name)", "def flavor(self, name=None):\n return self.find(self.flavors(), name=name)", "def get_flavor(self, flavor_id):\n return self._flavor_manager.get(flavor_id)", "def get_flavor(self, flavor_id):\n url = '%s/flavors/%s' % (self.catalog['compute'], flavor_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavor']\n else:\n LOG.error('Get flavor failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def flavor(self):\n return self._flavor", "def get_flavor_by_uuid(cls, flavor_uuid):\n return cls.dbdriver.get_flavor_by_uuid(flavor_uuid)", "def get_flavor(self, request, tenant_id, flavor_id):\n response_data = get_flavor(flavor_id)\n request.setResponseCode(response_data[1])\n return json.dumps(response_data[0])", "def get_flavor(flavor_id, include_deleted=False):\n\n try:\n flavor_id = int(flavor_id)\n if include_deleted:\n return Flavor.objects.get(id=flavor_id)\n else:\n return Flavor.objects.get(id=flavor_id, deleted=include_deleted)\n except (ValueError, TypeError):\n raise faults.BadRequest(\"Invalid flavor ID '%s'\" % flavor_id)\n except Flavor.DoesNotExist:\n raise faults.ItemNotFound('Flavor not found.')", "def flavor(self, name=None):\n raise NotImplementedError", "def test_get_flavor(self):\n response = self.flavors_client.get_flavor_details(self.flavor_ref)\n flavor = response.entity\n self.assertEqual(self.flavor_ref, flavor.id)", "def _existing_flavor(self):\n return instance_types.get_all_types().keys()[0]", "def test_aws_service_api_flavor_get(self):\n pass", "def find_flavor(self, name_or_id, ignore_missing=True):\n return self._find(\n _flavor.Flavor, name_or_id, ignore_missing=ignore_missing\n )", "def find_flavor(self, name_or_id, ignore_missing=False):\n return self._find(_flavor.Flavor, name_or_id,\n ignore_missing=ignore_missing)", "def get_flavors() -> dict:\n flavor_rq = request(\n method=\"GET\", url=app.config[\"FLAVORS_REF\"], headers=build_header(),\n )\n\n if not flavor_rq.ok:\n HTTPError(f\"Can not get flavor id for virtual machine: {flavor_rq.status_code}\")\n\n return flavor_rq.json()", "def flavor_id(self):\n return self._flavor_id", "def flavor_id(self):\n return self._flavor_id", "def FlavorName(flavor):\n if isinstance(flavor, tuple):\n return flavor[0]\n else:\n return flavor", "def get_flavor_id(self, flavor_name):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" +\\\n self.cloud_admin_info[\"project_id\"] + \\\n \"/flavors/detail\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from server while getting flavors.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get flavor ID Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n\n for flavors in output['flavors']:\n if flavors['name'].lower() == flavor_name.lower():\n LOG_OBJ.debug(\"Flavor Name: %s, ID: %s\" % (flavor_name,\n flavors['id']))\n return flavors['id']\n\n LOG_OBJ.error(\"Flavor:%s is NOT found\" % flavor_name)", "def get(self, request, flavor_id):\n conn = get_sdk_connection(request)\n flavor = conn.load_balancer.find_flavor(flavor_id)\n return _get_sdk_object_dict(flavor)", "def flavors(self, **kwargs):\n if kwargs is None:\n result = self.get_list(self.cloudman.compute.flavors(),\n kind=\"flavor\")\n if \"name\" in kwargs:\n result = self.flavor(name=kwargs['name'])\n\n else:\n result = self.get_list(self.cloudman.compute.flavors(**kwargs),\n kind=\"flavor\")\n\n return result", "def _get_flavor_ref(self, flavor):\n flavor_obj = None\n if isinstance(flavor, CloudDatabaseFlavor):\n flavor_obj = flavor\n elif isinstance(flavor, int):\n # They passed an ID or a size\n try:\n flavor_obj = self.get_flavor(flavor)\n except exc.NotFound:\n # Must be either a size or bad ID, which will\n # be handled below\n pass\n if flavor_obj is None:\n # Try flavor name\n flavors = self.list_flavors()\n try:\n flavor_obj = [flav for flav in flavors\n if flav.name == flavor][0]\n except IndexError:\n # No such name; try matching RAM\n try:\n flavor_obj = [flav for flav in flavors\n if flav.ram == flavor][0]\n except IndexError:\n raise exc.FlavorNotFound(\"Could not determine flavor from \"\n \"'%s'.\" % flavor)\n # OK, we have a Flavor object. Get the href\n href = [link[\"href\"] for link in flavor_obj.links\n if link[\"rel\"] == \"self\"][0]\n return href", "def GetFlavor(params):\n flavors = {\n 'cygwin': 'win',\n 'win32': 'win',\n 'darwin': 'mac',\n }\n if 'flavor' in params:\n return params['flavor']\n if sys.platform in flavors:\n return flavors[sys.platform]\n if sys.platform.startswith('sunos'):\n return 'solaris'\n if sys.platform.startswith('freebsd'):\n return 'freebsd'\n if sys.platform.startswith('openbsd'):\n return 'openbsd'\n if sys.platform.startswith('netbsd'):\n return 'netbsd'\n if sys.platform.startswith('aix'):\n return 'aix'\n if sys.platform.startswith('zos'):\n return 'zos'\n if sys.platform.startswith('os390'):\n return 'zos'\n return 'linux'", "def show_flavors():\n return get_flavors()", "def get_flavors(self):\n url = '%s/flavors/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavors']\n else:\n LOG.error('Get flavors failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def flavors(self, details=True):\n flv = _flavor.FlavorDetail if details else _flavor.Flavor\n return list(self._list(flv, paginated=True))", "def find_flavor_id(flavor_name: str):\n for flavor in get_flavors()[\"flavors\"]:\n if flavor_name == flavor[\"name\"]:\n return flavor[\"id\"]\n\n raise AttributeError(f\"No flavor '{flavor_name}' found\")", "def flavor(self, flavor):\n self._flavor = flavor", "def _get_deployment_flavor():\n flavor = cfg.CONF.paste_deploy.flavor\n return '' if not flavor else ('-' + flavor)", "def flavors(self, **query):\n return self._list(_flavor.Flavor, **query)" ]
[ "0.82900196", "0.8277593", "0.8069578", "0.789696", "0.7692458", "0.7663924", "0.75685847", "0.7417431", "0.7335429", "0.72927356", "0.71877533", "0.71117985", "0.69786406", "0.6909632", "0.6736771", "0.67233574", "0.67233574", "0.66672426", "0.66635484", "0.6631124", "0.66134924", "0.6592379", "0.6573417", "0.6547451", "0.63565516", "0.63455194", "0.63276637", "0.63066673", "0.6249983", "0.6176625" ]
0.86241716
1
Return a generator of flavors
def flavors(self, **query): return self._list(_flavor.Flavor, **query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flavors(self, **kwargs):\n raise NotImplementedError", "def flavors(self, details=True):\n flv = _flavor.FlavorDetail if details else _flavor.Flavor\n return list(self._list(flv, paginated=True))", "def flavors(self, **kwargs):\n if kwargs is None:\n result = self.get_list(self.cloudman.compute.flavors(),\n kind=\"flavor\")\n if \"name\" in kwargs:\n result = self.flavor(name=kwargs['name'])\n\n else:\n result = self.get_list(self.cloudman.compute.flavors(**kwargs),\n kind=\"flavor\")\n\n return result", "def show_flavors():\n return get_flavors()", "def list_flavors(cls):\n return cls.dbdriver.list_flavors()", "def display_flavors(self):\r\n print(\"We have the following flavors\"\"\")\r\n for flavor in self.flavors:\r\n print(\" ...\" + str(flavor.title()))", "def list_flavors(self, limit=None, marker=None):\n return self._flavor_manager.list(limit=limit, marker=marker)", "def get_flavors(self):\n url = '%s/flavors/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavors']\n else:\n LOG.error('Get flavors failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def flavor(self, name=None):\n return self.find(self.flavors(), name=name)", "def get_changed_flavors(changed_files, flavors):\n changed_flavors = []\n for f in changed_files:\n pattern = r\"^(mlflow|tests)/(.+?)(_autolog(ging)?)?(\\.py|/)\"\n # ~~~~~\n # # This group captures a flavor name\n match = re.search(pattern, f)\n\n if (match is not None) and (match.group(2) in flavors):\n changed_flavors.append(match.group(2))\n\n return changed_flavors", "def flavors(request): # pylint: disable=unused-argument\n # We call our method\n response = BACKEND.flavors()\n return JsonResponse(response)", "def display_flavors(self):\n for flavor in self.flavors:\n print(f\"- {flavor}\")", "def FlavorHashes(versions, flavor):\n if isinstance(flavor, tuple):\n return [HashSelect(versions, i) for i in flavor[1:]]\n else:\n return [HashSelect(versions, flavor)]", "def _generate_benchmark_variants(benchmark_spec):\n variants = []\n # Cold start.\n variants.append({\n 'name': benchmark_spec['name'] + ' (cold start)',\n 'app': benchmark_spec['app'],\n 'duration': benchmark_spec['duration'],\n 'measurements': benchmark_spec['measurements'],\n 'shell-args': benchmark_spec.get('shell-args',\n []) + _COLD_START_SHELL_ARGS})\n # Warm start.\n variants.append({\n 'name': benchmark_spec['name'] + ' (warm start)',\n 'app': benchmark_spec['app'],\n 'duration': benchmark_spec['duration'],\n 'measurements': benchmark_spec['measurements'],\n 'shell-args': benchmark_spec.get('shell-args', [])})\n return variants", "def get_flavors() -> dict:\n flavor_rq = request(\n method=\"GET\", url=app.config[\"FLAVORS_REF\"], headers=build_header(),\n )\n\n if not flavor_rq.ok:\n HTTPError(f\"Can not get flavor id for virtual machine: {flavor_rq.status_code}\")\n\n return flavor_rq.json()", "def _variants_gen(self, test):\n return self._get_variants_gen(test).gen(test)", "def flavor(self, name=None):\n raise NotImplementedError", "def FlavorName(flavor):\n if isinstance(flavor, tuple):\n return flavor[0]\n else:\n return flavor", "def gen_flavor_parameters(self, servers):\n\n # get all the flavors\n flavors = self.novaclient.flavors.list()\n server_flavors = set([x.flavor[\"id\"] for x in servers])\n self.set_of_flavors = set(filter(lambda flavor: flavor.id in server_flavors, flavors))\n flavor_idx = \"\"\n for idx, flavor in enumerate(self.set_of_flavors):\n data = {\"type\": \"string\",\n \"description\": \"Flavor to use for servers\",\n \"default\": flavor.name}\n self.compute_data[\"parameters\"][\"flavor%s\" % flavor_idx] = data\n if len(self.set_of_flavors) >= 1:\n flavor_idx = str(1+idx)", "def get_flavor(name):\r\n return nova.flavors.find(name=name)", "def get_initial_spectra(self, t, E, flavors=Flavor):\n pass", "def test_aws_service_api_flavors_get(self):\n pass", "def display_flavor(self):\n print(\"\\nWe currently count with the next flavors:\")\n for flavor in self.flavors:\n print(f\"{flavor}\")", "def getTransferDataFlavors(self) -> List[java.awt.datatransfer.DataFlavor]:\n ...", "def GetFlavor(params):\n flavors = {\n 'cygwin': 'win',\n 'win32': 'win',\n 'darwin': 'mac',\n }\n if 'flavor' in params:\n return params['flavor']\n if sys.platform in flavors:\n return flavors[sys.platform]\n if sys.platform.startswith('sunos'):\n return 'solaris'\n if sys.platform.startswith('freebsd'):\n return 'freebsd'\n if sys.platform.startswith('openbsd'):\n return 'openbsd'\n if sys.platform.startswith('netbsd'):\n return 'netbsd'\n if sys.platform.startswith('aix'):\n return 'aix'\n if sys.platform.startswith('zos'):\n return 'zos'\n if sys.platform.startswith('os390'):\n return 'zos'\n return 'linux'", "def create_flavor(cls, values):\n return cls.dbdriver.create_flavor(values)", "async def flavors():\n berry = []\n apple = []\n honey = []\n mango = []\n earthy = []\n mint = []\n blueberry = []\n ammonia = []\n coffee = []\n vanilla = []\n rose = []\n pine = []\n citrus = []\n sweet = []\n pineapple = []\n skunk = []\n orange = []\n strawberry = []\n lemon = []\n grape = []\n lime = []\n pepper = []\n lavender = []\n\n for i in list(range(len(strain))):\n if 'Coffee' in strain['flavor'][i]:\n coffee.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Pepper' in strain['flavor'][i]:\n pepper.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Lavender' in strain['flavor'][i]:\n lavender.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Mango' in strain['flavor'][i]:\n mango.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Earthy' in strain['flavor'][i]:\n earthy.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Citrus' in strain['flavor'][i]:\n citrus.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Lemon' in strain['flavor'][i]:\n lemon.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Strawberry' in strain['flavor'][i]:\n strawberry.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Pine' in strain['flavor'][i]:\n pine.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Vanilla' in strain['flavor'][i]:\n vanilla.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Honey' in strain['flavor'][i]:\n honey.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Pineapple' in strain['flavor'][i]:\n pineapple.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Blueberry' in strain['flavor'][i]:\n blueberry.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Orange' in strain['flavor'][i]:\n orange.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Skunk' in strain['flavor'][i]:\n skunk.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Grape' in strain['flavor'][i]:\n grape.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Berry' in strain['flavor'][i]:\n berry.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Lime' in strain['flavor'][i]:\n lime.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Rose' in strain['flavor'][i]:\n rose.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Sweet' in strain['flavor'][i]:\n sweet.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Apple' in strain['flavor'][i]:\n apple.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Mint' in strain['flavor'][i]:\n mint.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Ammonia' in strain['flavor'][i]:\n ammonia.append(strain['name'][i])\n\n json_berry = json.dumps(berry)\n json_apple = json.dumps(apple)\n json_honey = json.dumps(honey)\n json_mango = json.dumps(mango)\n json_earthy = json.dumps(earthy)\n json_mint = json.dumps(mint)\n json_bluberry = json.dumps(blueberry)\n json_ammonia = json.dumps(ammonia)\n json_coffee = json.dumps(coffee)\n json_vanilla = json.dumps(vanilla)\n json_rose = json.dumps(rose)\n json_pine = json.dumps(pine)\n json_citrus = json.dumps(citrus)\n json_sweet = json.dumps(sweet)\n json_pineapple = json.dumps(pineapple)\n json_skunk = json.dumps(skunk)\n json_orange = json.dumps(orange)\n json_strawberry = json.dumps(strawberry)\n json_lemon = json.dumps(lemon)\n json_grape = json.dumps(grape)\n json_lime = json.dumps(lime)\n json_pepper = json.dumps(pepper)\n json_lavender = json.dumps(lavender)\n\n return 'Berry', json_berry, 'Apple', json_apple, 'Honey', json_honey,\\\n 'Mango', json_mango, 'Earthy', json_earthy, 'Mint', json_mint,\\\n 'Blueberry', json_bluberry, 'Ammonia', json_ammonia, 'Coffee', json_coffee,\\\n 'Vanilla', json_vanilla, 'Rose', json_rose, 'Pine', json_pine,\\\n 'Citrus', json_citrus, 'Sweet', json_sweet, 'Pineapple', json_pineapple,\\\n 'Skunk', json_skunk, 'Orange', json_orange, 'Strawberry', json_strawberry,\\\n 'Lemon', json_lemon, 'Grape', json_grape, 'Lime', json_lime,\\\n 'Pepper', json_pepper, 'Lavender', json_lavender", "def get_flavors_black_list(self):\n return self._sanitize(CONF.powervc.flavor_black_list)", "def get_flavor(self, flavor):\n return self._get(_flavor.Flavor, flavor)", "def get_flavor(self, flavor):\n return self._get(_flavor.Flavor, flavor)" ]
[ "0.74028367", "0.6779597", "0.6729681", "0.64055777", "0.6195069", "0.59106874", "0.59053063", "0.58540213", "0.5842893", "0.5784032", "0.57393897", "0.5738477", "0.5722725", "0.5588289", "0.5528176", "0.54953", "0.5488104", "0.54655933", "0.54139316", "0.5388655", "0.53481954", "0.53343064", "0.5302553", "0.5294203", "0.5262708", "0.5231103", "0.5220609", "0.5194343", "0.5077146", "0.5077146" ]
0.679078
1
Find a single instance
def find_instance(self, name_or_id, ignore_missing=True): return self._find( _instance.Instance, name_or_id, ignore_missing=ignore_missing )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_instance(cls, identifier):\r\n for instance in cls.all:\r\n if instance.identifier == identifier:\r\n return instance\r\n return None", "def find(cls, **kwargs):\n return cls.query.filter_by(**kwargs).first()", "def find(self, **kwargs):\n rl = self.findall(**kwargs)\n num = len(rl)\n\n if num == 0:\n msg = \"No %s matching %s.\" % (self.resource_class.__name__, kwargs)\n raise exceptions.NotFound(msg)\n elif num > 1:\n raise exceptions.NoUniqueMatch\n else:\n return self.get(rl[0].id)", "def find(self, **kwargs):\n matches = self.findall(**kwargs)\n num_matches = len(matches)\n if num_matches == 0:\n msg = \"No %s matching %s.\" % (self.resource_class.__name__, kwargs)\n raise exceptions.NotFound(404, msg)\n elif num_matches > 1:\n raise exceptions.NoUniqueMatch\n else:\n return matches[0]", "def find_one(self, criteria):\n return self.connection.find_one(criteria)", "def first(self, **kwargs):\n return self.find(**kwargs).first()", "def find_exact(self, **kwargs):\n results = list(self.find(**kwargs))\n if len(results) == 1:\n return results[0]\n return None", "def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')", "def find_one(cls, attr):\n result = cls.db().find_one(attr, True)\n\n if result is not None:\n return cls(result)\n\n return None", "def find(cls, id=None):\n return cls.query.filter_by(id=id).one_or_none()", "def find_instance_by_id ( ec2_conn, instance_id ) :\n instance_results = ec2_conn.get_only_instances( instance_ids = [ instance_id ] )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None", "def find(cls, uuid):\n entries = cls.objects.filter(uuid=uuid)\n if not entries:\n return None\n else:\n return entries.first()", "def get_instance(tag):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n reservations = ec2.get_all_instances()\n for res in reservations:\n for inst in res.instances:\n if \"tag\" in inst.tags.keys():\n if inst.tags[\"tag\"] == tag and inst.state == \"running\":\n #print \"Found %s\"%tag\n return inst\n print \"Couldn't find instance\"\n return None", "def findWhere(cls, args):\n return cls.search(args)[0][0]", "def find_first(cls, **kwargs):\n return cls.query.filter_by(**kwargs).first()", "def get(self, **search_terms):\n instances = self.filter(**search_terms)\n\n if not instances:\n raise NotFoundError(\"Nothing has been found.\")\n\n if len(instances) > 1:\n raise NotUniqueError(\"Serveral instance have been found.\")\n\n return instances[0]", "def find_instance_by_type ( ec2_conn, base_name, instance_type ) :\n instance_name = get_instance_name( base_name, instance_type )\n instance_results = ec2_conn.get_only_instances( filters = { \"tag:Name\": [ instance_name ] } )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None", "def get_instance(self, ix=None, name=None):\n assert ix is None or name is None\n if ix is None:\n instance = [ex for ex in self.instances if ex.name == name]\n assert len(instance) == 1\n return instance[0]\n else:\n return self.instances[ix]", "def get_instance(self, instance):\n return self._get(_instance.Instance, instance)", "def get_instance(self, instance_id):\n return self.instances.get(instance_id)", "def find_one(self, user_id):\n pass", "def find_object(self, obj_type, obj_name):\n try:\n # Simply look it up by type and name.\n obj = self.model_map['object'][obj_type][obj_name][1]\n except KeyError:\n # No dice. This object doesn't exist in the model.\n obj = None\n\n return obj", "def find(self, *args, **kwds):\n return self.collection.find(*args, **kwds)", "def find_by_id(cls, iid: int):\n return cls.query.filter_by(id=iid).first()", "def find_one_bywhereclause(cls, whereclause):\n return cls.dbm().modelclass_find_one_bywhereclause(cls, whereclause)", "def _get_instance(self):\n #return '_earth_instance_' + rospy.get_name().strip('/')\n return self.instance", "def get_fixture_instance(self, id=None, name=None, verify_exists=True):\n query = {}\n if id:\n query['_id'] = id\n if name:\n query['name_lower'] = name\n if not query:\n raise AXIllegalArgumentException(\"No query filters supplied\")\n fix_doc = self.instances.find_one(query)\n if not fix_doc:\n if verify_exists:\n raise AXApiResourceNotFound(\"No instances found matching: {}\".format(id or name))\n return None\n return FixtureInstance.deserialize_mongodoc(fix_doc)", "def get(cls, id_: int):\n query = DBSESSION.query(cls)\n instance = query.get(id_)\n if not instance:\n raise ObjectNotFound(f\"Register of {cls.str_representation} not found for id = {id_}.\")\n return instance", "def find(self):\n raise NotImplementedError", "async def find_one(self, **query):\n\n return await self._expand(await self.db.get_one(**query))" ]
[ "0.7728593", "0.71363354", "0.7030264", "0.6881119", "0.68568933", "0.68549156", "0.6848733", "0.6823757", "0.6800267", "0.6779677", "0.67150754", "0.6676291", "0.6646991", "0.6642961", "0.65998936", "0.6545709", "0.6520098", "0.6490596", "0.64855844", "0.6475394", "0.6446852", "0.6445242", "0.6426985", "0.6425823", "0.64223117", "0.64114696", "0.6409071", "0.6397761", "0.6392555", "0.6322973" ]
0.7567719
1
Get a single instance
def get_instance(self, instance): return self._get(_instance.Instance, instance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetInstance():\n pass", "def get_instance(self, instance_id):\n return self.instances.get(instance_id)", "def get_instance(cls, *args, **kwargs):\n if cls._instance is not None:\n return cls._instance\n return cls(*args, **kwargs)", "def _get_instance(self):\n #return '_earth_instance_' + rospy.get_name().strip('/')\n return self.instance", "def getInstance(cls):\n cls.locker.acquire()\n try:\n if not cls.instance:\n cls.instance = cls()\n return cls.instance\n finally:\n cls.locker.release()", "def _get_instance(self, id):\n if id not in self._instances:\n self._instances[id] = self._load_constructor(id)\n\n return self._instances[id]", "def getinstance():\n if cls not in instances:\n instances[cls] = cls()\n return instances[cls]", "def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance", "def get(cls):\n return cls.instance", "def _get_instance(self):", "def _get_instance(self):", "def instance(cls):\n if not hasattr(cls, \"_instance\"):\n cls._instance = cls()\n\n return cls._instance", "def instance(cls):\n if not hasattr(cls, '_instance'):\n cls._instance = cls()\n return cls._instance", "def get_instance(self, name):\n return self.store.instance.id", "def instance(self):\n return self.__instance", "def instance(self):\n return self._instance", "def get_instance(self, name):\n return self.website.instance.id", "def _get_instance(cls, configuration, auth_type):\n if configuration in cls._INSTANCES:\n return cls._INSTANCES[configuration]\n return cls._create_instance(configuration, auth_type)", "def get_instance(self, name):\n klass = self.get_class(name)\n return klass()", "def get_instance(self, ix=None, name=None):\n assert ix is None or name is None\n if ix is None:\n instance = [ex for ex in self.instances if ex.name == name]\n assert len(instance) == 1\n return instance[0]\n else:\n return self.instances[ix]", "def get(self, **search_terms):\n instances = self.filter(**search_terms)\n\n if not instances:\n raise NotFoundError(\"Nothing has been found.\")\n\n if len(instances) > 1:\n raise NotUniqueError(\"Serveral instance have been found.\")\n\n return instances[0]", "def getInstance():\n if Car.inst is None: Car.inst = Car()\n return Car.inst", "def get_instance(self, instance):\n\n title = list(instance.keys())[0]\n instance = instance.get(title)\n return instance", "def instance(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance\")", "def instance(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance\")", "def instance(self) -> str:\n return pulumi.get(self, \"instance\")", "def instance(self) -> str:\n return pulumi.get(self, \"instance\")", "def get_instance(self):\n try:\n return self._instance\n except AttributeError:\n self._instance = self._decorated()\n return self._instance", "def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')", "def get(cls, id_: int):\n query = DBSESSION.query(cls)\n instance = query.get(id_)\n if not instance:\n raise ObjectNotFound(f\"Register of {cls.str_representation} not found for id = {id_}.\")\n return instance" ]
[ "0.7694641", "0.75701267", "0.75683355", "0.7447133", "0.7252449", "0.7242679", "0.7239021", "0.7208093", "0.71733356", "0.71482056", "0.71482056", "0.7138967", "0.70699036", "0.7006299", "0.69852996", "0.69710827", "0.69649774", "0.69454527", "0.69406265", "0.6925822", "0.6866705", "0.6852299", "0.6851537", "0.68156976", "0.6790521", "0.6790394", "0.6790394", "0.6788033", "0.6723616", "0.6703117" ]
0.7944661
0
Add a user to 'prospects' unless the user is the campaign owner or is already linked to 'workers', 'prospects', or 'blacklist'. Also decline to add prospects when the campaign is not active. user A TcsUser instance to link to 'prospects'
def addProspect(self, user): if self.is_active and (user != self.owner) and not self.prospects.filter(pk=user.id).exists() \ and not self.workers.filter(pk=user.id) and not self.blacklist.filter(pk=user.id).exists(): self.prospects.add(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n self.blacklist.remove(user)\n return self\n return None", "def addToBlacklist(self, user):\n if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists():\n self.blacklist.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n return None", "def add_player(self, user):\n # Make sure the user can play\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n raise ValueError(\"Not enough credits to pay entrance fee.\")\n if self.is_user_playing(user):\n raise ValueError(\"User already in tournament.\")\n \n # Handle the money transfer to join the tournament\n user_profile.credits = user_profile.credits - self.entrance_fee\n user_profile.save()\n self.prize_pool = self.prize_pool + self.entrance_fee\n self.save()\n \n # Join the tournament\n new_player = Player(user=user,\n tournament=self,\n credits=self.starting_credits)\n new_player.save()\n return True", "def check_professor(doc_user):\n info = doc_user[\"user_info\"]\n my_sharing_calendar = col_sharing.find_one({\"User\": doc_user[\"_id\"]})\n if info[\"professor\"]:\n logger.info('{}: sharing calendar start'.format(\n doc_user[\"user_id\"]))\n my_sharing_calendar = {\"User\": doc_user[\"_id\"],\n \"schedules\": []}\n col_sharing.insert_one(my_sharing_calendar)\n return True\n \n return False", "def can_add_player(self, user):\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n return False\n if self.is_user_playing(user):\n return False\n return True", "def add_talk(talk):\n # Check if this user is already registered\n exists = check_attendee_exists(talk.userId, talk.profile)\n if not exists[0]:\n return False\n\n talk.put()\n return True", "def toggle_interested(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n workshop = self.context['workshop']\n\n if workshop in profile.interested_workshops.all():\n workshop.interested_users.remove(profile)\n else:\n workshop.interested_users.add(profile)", "def create_users(cls):\n for p in Player.objects.exclude(race__can_play=False):\n p.get_extension(GrandChallengeUser)", "def createProfile(self):\n if self.profile:\n return\n from soc.modules.gsoc.models.profile import GSoCProfile\n user = self.createUser()\n properties = {'link_id': user.link_id, 'student_info': None, 'user': user,\n 'parent': user, 'scope': self.program, 'status': 'active'}\n self.profile = seeder_logic.seed(GSoCProfile, properties)", "def test_user_settings_for_subscribing_other_users(self) -> None:\n user_profile = self.example_user(\"cordelia\")\n invitee_user_id = user_profile.id\n realm = user_profile.realm\n\n do_set_realm_property(\n realm, \"create_public_stream_policy\", Realm.POLICY_MEMBERS_ONLY, acting_user=None\n )\n do_set_realm_property(\n realm, \"invite_to_stream_policy\", Realm.POLICY_ADMINS_ONLY, acting_user=None\n )\n do_change_user_role(self.test_user, UserProfile.ROLE_MODERATOR, acting_user=None)\n result = self.common_subscribe_to_streams(\n self.test_user,\n [\"stream1\"],\n {\"principals\": orjson.dumps([invitee_user_id]).decode()},\n allow_fail=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")\n\n do_change_user_role(self.test_user, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)\n self.common_subscribe_to_streams(\n self.test_user, [\"stream1\"], {\"principals\": orjson.dumps([invitee_user_id]).decode()}\n )\n\n do_set_realm_property(\n realm, \"invite_to_stream_policy\", Realm.POLICY_MODERATORS_ONLY, acting_user=None\n )\n do_change_user_role(self.test_user, UserProfile.ROLE_MEMBER, acting_user=None)\n # Make sure that we are checking the permission with a full member,\n # as full member is the user just below moderator in the role hierarchy.\n self.assertFalse(self.test_user.is_provisional_member)\n result = self.common_subscribe_to_streams(\n self.test_user,\n [\"stream2\"],\n {\"principals\": orjson.dumps([invitee_user_id]).decode()},\n allow_fail=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")\n\n do_change_user_role(self.test_user, UserProfile.ROLE_MODERATOR, acting_user=None)\n self.common_subscribe_to_streams(\n self.test_user, [\"stream2\"], {\"principals\": orjson.dumps([invitee_user_id]).decode()}\n )\n self.unsubscribe(user_profile, \"stream2\")\n\n do_set_realm_property(\n realm, \"invite_to_stream_policy\", Realm.POLICY_MEMBERS_ONLY, acting_user=None\n )\n do_change_user_role(self.test_user, UserProfile.ROLE_GUEST, acting_user=None)\n result = self.common_subscribe_to_streams(\n self.test_user,\n [\"stream2\"],\n {\"principals\": orjson.dumps([invitee_user_id]).decode()},\n allow_fail=True,\n )\n self.assert_json_error(result, \"Not allowed for guest users\")\n\n do_change_user_role(self.test_user, UserProfile.ROLE_MEMBER, acting_user=None)\n self.common_subscribe_to_streams(\n self.test_user,\n [\"stream2\"],\n {\"principals\": orjson.dumps([self.test_user.id, invitee_user_id]).decode()},\n )\n self.unsubscribe(user_profile, \"stream2\")\n\n do_set_realm_property(\n realm,\n \"invite_to_stream_policy\",\n Realm.POLICY_FULL_MEMBERS_ONLY,\n acting_user=None,\n )\n do_set_realm_property(realm, \"waiting_period_threshold\", 100000, acting_user=None)\n result = self.common_subscribe_to_streams(\n self.test_user,\n [\"stream2\"],\n {\"principals\": orjson.dumps([invitee_user_id]).decode()},\n allow_fail=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")\n\n do_set_realm_property(realm, \"waiting_period_threshold\", 0, acting_user=None)\n self.common_subscribe_to_streams(\n self.test_user, [\"stream2\"], {\"principals\": orjson.dumps([invitee_user_id]).decode()}\n )", "def test_add_coach_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_ADD_COACH, self.classrooms[1]))", "def addParticipant(self, participant):\n if len(self.participants) < self.maxParticipants:\n self.participants[participant.discordId] = participant\n else:\n raise ValueError('Max number of participants has been reached')", "def add_user_with_status_granted(caller, user):\r\n if _add_user(user, CourseCreator.GRANTED):\r\n update_course_creator_group(caller, user, True)", "def create_investor(sender, **kwargs):\n u = kwargs[\"instance\"]\n try:\n \n if not InvestorProfile.objects.filter(username=u.username):\n inv = InvestorProfile(username=u.username,user=u)\n inv.save()\n g = DjangoGroup.objects.get(name='Investors') \n g.user_set.add(u)\n except Exception as e:\n print e", "def add_user(request):\n profile = get_object_or_404(UserProfile, user=request.user)\n\n # make sure only managers and admins can add a team\n if profile.level == 'admin' or profile.level == 'manager':\n\n if request.method == 'POST':\n form = UserProfileForm(request.POST)\n user_email = UserForm(request.POST)\n\n if user_email.is_valid() and form.is_valid():\n user = User.objects.create_user(username=random_username(),\n email=request.POST.get('email'),\n password='EggBox900')\n messages.success(request, 'Profile added successfully')\n\n user.userprofile.first_name = form.data['first_name']\n user.userprofile.last_name = form.data['last_name']\n user.userprofile.company_id = profile.company_id\n # user.userprofile.start_date = form.data['start_date']\n # user.userprofile.end_date = form.data['end_date']\n user.userprofile.level = form.data['level']\n user.userprofile.team = Team.objects.get(pk=form.data['team'])\n user.userprofile.contract_type = form.data['contract_type']\n user.userprofile.contract_percentage = form.data['contract_percentage']\n user.userprofile.agent_goal = form.data['agent_goal']\n user.userprofile.save()\n else:\n messages.error(request, 'Update failed. Please ensure the form is valid.')\n\n users = UserProfile.objects.filter(company_id=profile.company_id)\n\n template = 'profiles/user_management.html'\n context = {\n 'users': users,\n 'profile': profile\n }\n\n return render(request, template, context)\n\n else:\n form = UserProfileForm()\n user_email = UserForm()\n\n template = 'profiles/add_user.html'\n context = {\n 'form': form,\n 'profile': profile,\n 'user_email': user_email\n }\n\n return render(request, template, context)\n else:\n messages.info(request, \"Sorry, you are not authorized to add users. Ask a Manager or Admin.\")\n\n return redirect(reverse('planning', ))", "def add_candidate(self, user):\n weight = (\n self.assignment_related_users.aggregate(models.Max(\"weight\"))[\"weight__max\"]\n or 0\n )\n defaults = {\"weight\": weight + 1}\n self.assignment_related_users.update_or_create(user=user, defaults=defaults)", "def test_can_subscribe_other_users(self) -> None:\n\n def validation_func(user_profile: UserProfile) -> bool:\n user_profile.refresh_from_db()\n return user_profile.can_subscribe_other_users()\n\n self.check_has_permission_policies(\"invite_to_stream_policy\", validation_func)", "def create_player_profile(sender, **kwargs):\n if kwargs.get('created') is True:\n PlayerProfile.objects.create(user=kwargs.get('instance'))", "def create_user_profile(email, **kwargs): # POST\n user_exists = coll(\"users\").find_one({\"_id\": email})\n\n if user_exists:\n return {\"message\": \"User already exists\"}, 400\n\n # NOTE Doesn't make sense for a person to have prizes only a team should have this\n coll(\"users\").insert_one(\n {\n \"_id\": email,\n \"skills\": kwargs[\"skills\"],\n \"prizes\": kwargs[\"prizes\"],\n \"bio\": kwargs[\"bio\"],\n \"github\": kwargs[\"github\"],\n \"interests\": kwargs[\"interests\"],\n \"seriousness\": kwargs[\"seriousness\"],\n \"team_id\": \"\",\n \"hasateam\": False,\n }\n )\n return {\"message\": \"User profile successfully created\"}, 201", "def update_user_profile(sender, instance, created, **kwargs):\n if created:\n GameplanUser.objects.create(user=instance)\n instance.gameplanuser.save()", "def add_user_to_course_cohort(cohort_name, course_id, user):\n if cohort_name is not None:\n cohort = get_cohort_by_name(course_id, cohort_name)\n try:\n add_user_to_cohort(cohort, user)\n except ValueError:\n # user already in cohort, probably because they were un-enrolled and re-enrolled\n logger.exception('Cohort re-addition')", "def save_user_receiver(sender, instance, created, *args, **kwargs):\n print(\"profile created\", instance)\n if created:\n new_profile = UserProfile.objects.get_or_create(owner=instance)", "def add_participant(self, address):\n if address in [u.address for u in User.all()]:\n db.run_in_transaction(self._add_participantTx, address)\n xmpp.send_invite(address, self.jid)", "def setup_whitelisted_section():\n setup_unrelated_section()\n\n # whitelist user to the course\n cs61a = Course.objects.get(name=\"CS61A\")\n user = User.objects.get(username=\"demo_user\")\n cs61a.whitelist.add(user)", "def _add_user(user, state):\r\n if not user.is_staff and CourseCreator.objects.filter(user=user).count() == 0:\r\n entry = CourseCreator(user=user, state=state)\r\n entry.save()\r\n return True\r\n\r\n return False", "def create(cls, user_from, user_to, round):\n grand_challenge = cls.objects.create(round=round)\n user_from = user_from.user.get_profile()\n user_to = user_to.user.get_profile()\n grand_challenge.challenge = Challenge.create(user_from.get_extension(ChallengeUser), user_to.get_extension(ChallengeUser))\n grand_challenge.challenge.accept()\n grand_challenge.save()\n return grand_challenge", "def is_attended(value, user: User):\n return value.is_attended(user)", "def test_add_coach_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_ADD_COACH, self.classrooms[1]))", "def test_teams_add_user_to_team_v2(self):\n pass", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)" ]
[ "0.57529235", "0.57101214", "0.56048286", "0.549154", "0.52644926", "0.516734", "0.51544005", "0.50623524", "0.49674854", "0.49344334", "0.48778322", "0.48658597", "0.48227632", "0.481681", "0.4816524", "0.48090467", "0.48052084", "0.47986007", "0.4791924", "0.47789344", "0.47755465", "0.47655156", "0.4752161", "0.47516337", "0.47505718", "0.4746542", "0.4709363", "0.47012112", "0.4699854", "0.46997496" ]
0.8156176
0
Remove the user from the lists of workers and prospects, if applicable, and add the user to the blacklist. Note that adding somebody as a worker removes the person from the blacklist. user A TcsUser instance to link to the blacklist
def addToBlacklist(self, user): if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists(): self.blacklist.add(user) if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) if self.workers.filter(pk=user.id).exists(): self.workers.remove(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def add_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n try:\r\n self.settings['blacklist'].append(user.id)\r\n await ctx.send(\"User blacklisted.\")\r\n except:\r\n await ctx.send(\"An error occured.\")\r\n else:\r\n await ctx.send(\"User already blacklisted.\")", "async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n await ctx.send(\"User is not blacklisted.\")\r\n else:\r\n self.settings['blacklist'].remove(user.id)\r\n await ctx.send(\"User removed from blacklist.\")", "def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n self.blacklist.remove(user)\n return self\n return None", "async def blacklist_remove(self, ctx: commands.Context, target):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n\r\n if isinstance(target, discord.User):\r\n check = await self.check_user(target.id, table)\r\n target = target.id\r\n else:\r\n check = await self.check_user(int(target), table)\r\n target = int(target)\r\n\r\n if check[0]:\r\n await self.remove_blacklist(target, table)\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is not blacklisted.\")", "def delete_from_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.delete().where(and_(\n self.table.c.user_id == user_id,\n self.table.c.blacklisted_id == blacklist_user_id )).execute() \n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible", "def removeWorker(self, user):\n if user == self.owner:\n return None\n # Without these queries, there's no way to tell if anything actually gets removed.\n # Calling remove() on a user that is not in the set does not raise an error.\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n return self\n return None", "async def blacklist_global(self, ctx, user: discord.User, *, reason):\n await self.bot.db.execute(\n \"INSERT IGNORE blacklisted_user VALUES (%s, %s)\", user.id, reason\n )\n self.bot.cache.blacklist[\"global\"][\"user\"].add(user.id)\n await util.send_success(ctx, f\"**{user}** can no longer use Miso Bot!\")", "def add_to_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.insert().values( user_id=user_id,\n blacklisted_id=blacklist_user_id).execute()\n except sqlalchemy.exc.IntegrityError as e:\n if e.orig.args[0] == 1062 :\n # duplicate entry, don't care !\n pass\n elif e.orig.args[0] == 1452 :\n self.log(e, self.identifier)\n raise egg_errors.UnknownUserOrBadgeIDException\n else:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible\n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible", "async def blacklist_add(self, ctx: commands.Context, target, *, reason: str = \"No reason given.\"):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n guild = None\r\n\r\n try:\r\n check = await self.check_user(target.id, table)\r\n except Exception:\r\n guild = discord.utils.get(self.bot.guilds, id=int(target))\r\n if not guild:\r\n return\r\n\r\n check = await self.check_user(int(target), table)\r\n\r\n if not check[0]:\r\n if isinstance(target, discord.User):\r\n await self.add_blacklist(target.id, table, reason)\r\n else:\r\n await self.add_blacklist(int(target), table, reason)\r\n\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n if not isinstance(target, discord.User):\r\n embed = discord.Embed(color=self.bot.colors.red,\r\n description=f\"Your guild / server has been blacklisted. \"\r\n f\"If you wish to know the reason, join the \"\r\n f\"[Support server]({self.bot.invite_url})\")\r\n await guild.owner.send(embed=embed)\r\n await guild.leave()\r\n self.bot.logger.info(f\"Added guild with ID {target} to blacklist.\")\r\n else:\r\n self.bot.logger.info(f\"Added user with ID {target.id} to blacklist\")\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is already blacklisted.\")", "def nuke_users(modeladmin, request, queryset):\n users = None\n form = BlacklistForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})\n contenttype = ContentType.objects.get_for_model(queryset.model)\n # Because we want this action available from comments or user admin lists, sort out content type\n ctype_as_string = unicode(contenttype)\n if ctype_as_string == 'user':\n users = queryset\n if ctype_as_string == 'comment':\n # build list of unique users within comment list.\n users = []\n for comment in queryset:\n if not comment.user in users:\n users.append(comment.user)\n\n if ctype_as_string == 'contact':\n # build list of unique users from contact list.\n users = []\n for c in queryset:\n if c.user and c.user not in users:\n users.append(c.user)\n if not users:\n # we haven't built out a content-type appropriate user list.\n return HttpResponse(\"Error finding content type: %s\" % contenttype)\n\n if 'apply_blacklist' in request.POST: # we're returning from the intermediate page and are ready to do some work.\n form = BlacklistForm(request.POST)\n if form.is_valid():\n reason = form.cleaned_data['reason']\n spammer = form.cleaned_data['is_spammer']\n for user in users:\n # Deactivate user accounts\n # Note: Update is more efficient,\n # but we can't use it because we may have a list (from comments)\n # rather than a proper queryset.\n user.is_active = False\n user.save()\n\n for c in user.comment_comments.all(): # remove their comments from public view.\n if spammer:\n c.delete()\n else:\n c.is_public = False\n c.is_removed = True\n c.save()\n for c in user.contact_set.all(): # and contact messages\n if spammer:\n c.delete()\n else:\n c.publish = False\n c.save()\n # remove their session. -- Is there a more efficient way than looping through all sessions? That can become a mighty big table.\n for s in Session.objects.all():\n decoded_session = s.get_decoded()\n if '_auth_user_id' in decoded_session and decoded_session['_auth_user_id'] == user.id:\n s.delete()\n # and add them to the blacklist\n blacklist = Blacklist(\n user = user,\n blacklister = request.user,\n reason = reason,\n )\n blacklist.save()\n\n if spammer:\n resp_str = 'Any related accounts will still be visible, but related comments have been deleted.'\n else:\n resp_str = 'Any related accounts and comments will still be visible in the admin.'\n\n count = len(users)\n if count == 1:\n modeladmin.message_user(request, \"%s was removed and blocked from the site. %s\" % (users[0].username, resp_str))\n else:\n modeladmin.message_user(request, \"%s users were removed and blocked from the site. %s\" % (count, resp_str))\n return HttpResponseRedirect(request.get_full_path())\n else:\n return HttpResponse(\"error!\")\n # We haven't captured intermediate page data. Go there...\n return render(request, 'admin/blacklist.html', {'users': users, 'form': form})", "def blacklist_remove():\n db = unitdata.kv()\n blacklist = db.get(BLACKLIST_KEY, [])\n for device in get_devices():\n try:\n blacklist.remove(device)\n except ValueError:\n raise Error('{}: Device not in blacklist.'.format(device))\n db.set(BLACKLIST_KEY, blacklist)\n db.flush()", "def test_remove_from_blacklist(self):\n\n self.feature_test.add_to_blacklist(3)\n self.feature_test.remove_from_blacklist(3)\n self.assertFalse(3 in Feature(\"testing\").blacklist)", "async def blacklist(self, ctx, add_or_remove: AddOrRemove = None, id: int = 0):\n # view\n if add_or_remove is None or not id:\n return await ctx.send(f\"```py\\n{self.bot._blacklist}\\n```\")\n\n # add\n elif add_or_remove is True:\n if id not in self.bot._blacklist:\n self.bot._blacklist.add(id)\n else:\n return await ctx.send(\"That id is already blacklisted!\")\n # remove\n else:\n if id in self.bot._blacklist:\n self.bot._blacklist.remove(id)\n else:\n return await ctx.send(\"That id is not blacklisted!\")\n\n # confirm\n self.bot.dump_blacklist()\n await ctx.send(\"Done!\")", "def update_exam_blacklist(sender, instance, **kwargs):\n exams = Exam.objects.filter(\n course_instance__instructors=instance.instructor)\n if instance.permission_allowed is False:\n exams.exclude(blacklisted=True).update(blacklisted=True)\n else:\n for exam in exams:\n if exam.has_permission():\n exam.blacklisted = False\n exam.save()", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "def BUM(tw, user, action):\n\n if (user in twStart.WHITE_LIST_USERS):\n return\n\n if(action == \"B\"):\n print(_(\"Blocked: {0}\").format(user))\n # TODO: Uncomment the code below\n # tw.blocks.create(user_id=usrId, skip_status=1, include_entities=False)\n return\n elif (action == \"M\"):\n print(_(\"Muted: {0}\").format(user))\n # TODO: Uncomment the code below\n # tw.users.mutes(user_id=usrId)\n return\n elif(action == \"U\"):\n print(_(\"Unfollowed: {0}\").format(user))\n # TODO: Uncomment the code below\n # tw.friendships.destroy(user_id=usrId)\n return", "def addProspect(self, user):\n if self.is_active and (user != self.owner) and not self.prospects.filter(pk=user.id).exists() \\\n and not self.workers.filter(pk=user.id) and not self.blacklist.filter(pk=user.id).exists():\n self.prospects.add(user)\n return self\n return None", "def unfollowing_and_removing(self, user_id):\n if self.unfollowing(user_id):\n ind = [i for i, j in enumerate(self.monitored_users) if j.get('user', '') == user_id]\n if ind:\n self.monitored_users.remove(self.monitored_users[ind[0]])", "def addWhitelist(self, user, guildId):\n flag = self.con.addUserToWhitelist(user, guildId)\n\n if flag:\n self.whitelist[str(guildId)].append(user)\n\n return flag", "def anti_bot(self, message):\n msg_list = self.ts.get_human_readable_message(message).lower().split(' ')\n bot_creation_date = self._get_creation_date(msg_list[1])\n viewers = self.ts.fetch_chatters_from_API()['viewers']\n mod_list = self.ts.get_mods()\n with codecs.open('whitelist.json', 'r', 'utf-8') as f:\n whitelist = json.load(f)\n for viewer in viewers:\n if self._get_creation_date(viewer) == bot_creation_date and viewer not in whitelist:\n self.ts.send_message('/ban {}'.format(viewer))\n mod_str = ', '.join(mod_list)\n self._add_to_whisper_queue(viewer, 'We\\'re currently experiencing a bot attack. If you\\'re a human and were accidentally banned, please whisper a mod: {}'.format(mod_str))", "def delWhitelist(self, user, guildId):\n flag = False\n\n for item in self.whitelist[str(guildId)]:\n if str(user) == item:\n flag = True\n break\n\n if flag:\n if self.con.removeUserOfWhitelist(user, guildId):\n self.whitelist[str(guildId)].remove(user)\n\n return flag", "def remove_candidate(self, user):\n self.assignment_related_users.filter(user=user).delete()\n inform_changed_data(self)", "def _remove(self, room, regex, user):\n regexes_for_room = self.notifications[room]\n users_for_regex = regexes_for_room[regex]\n\n # users may have been added multiple times in the past, so make sure\n # we remove them all.\n while user in users_for_regex:\n users_for_regex.remove(user)\n\n if not users_for_regex:\n # remove regex from room when there are no users left to notify\n del regexes_for_room[regex]", "async def oauth_whitelist(self, ctx, target: Union[Role, utils.User]):\n whitelisted = self.bot.config[\"oauth_whitelist\"]\n\n # target.id is not int??\n if target.id in whitelisted:\n whitelisted.remove(target.id)\n removed = True\n else:\n whitelisted.append(target.id)\n removed = False\n\n await self.bot.config.update()\n\n embed = Embed(color=self.bot.main_color)\n embed.title = \"Success\"\n\n if not hasattr(target, \"mention\"):\n target = self.bot.get_user(target.id) or self.bot.modmail_guild.get_role(\n target.id\n )\n\n embed.description = (\n f\"{'Un-w' if removed else 'W'}hitelisted \" f\"{target.mention} to view logs.\"\n )\n\n await ctx.send(embed=embed)", "def whitelist(self, message):\n user = self.ts.get_user(message)\n msg_list = self.ts.get_human_readable_message(message).lower().split(' ')\n try:\n with codecs.open('whitelist.json', 'r', 'utf-8') as f:\n holder_list = json.load(f)\n except json.decoder.JSONDecodeError:\n holder_list = []\n if msg_list[1] not in holder_list:\n holder_list.append(msg_list[1])\n with codecs.open('whitelist.json', 'w', 'utf-8') as f:\n json.dump(holder_list, f, ensure_ascii=False)\n self._add_to_whisper_queue(user, '{} has been added to the whitelist'.format(msg_list[1]))\n else:\n self._add_to_whisper_queue(user, '{} is already in the whitelist!'.format(msg_list[1]))", "async def blacklist_member(self, ctx, *members):\n successes = []\n fails = []\n for member_arg in members:\n try:\n member = await commands.MemberConverter().convert(ctx, member_arg)\n except commands.errors.BadArgument:\n fails.append(f\"Cannot find member {member_arg}\")\n else:\n if member == ctx.author:\n fails.append(\"You cannot blacklist yourself!\")\n continue\n\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO blacklisted_member (user_id, guild_id)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE\n user_id = VALUES(user_id)\n \"\"\",\n member.id,\n ctx.guild.id,\n )\n try:\n self.bot.cache.blacklist[str(ctx.guild.id)][\"member\"].add(member.id)\n except KeyError:\n self.bot.cache.blacklist[str(ctx.guild.id)] = {\n \"member\": {member.id},\n \"command\": set(),\n }\n successes.append(f\"Blacklisted {member.mention}\")\n\n await util.send_tasks_result_list(ctx, successes, fails)", "def user_disappears(self, user):\n pass", "async def cmd_galtoguserwl(self, ctx, user_id): \n\n # ===== CHECK IF INPUT IS VALID\n try:\n user_id = int(user_id.replace(\"<\", '').replace(\"@\", '').replace(\"!\", '').replace(\">\", ''))\n except (IndexError, ValueError):\n await ctx.send_help('galtoguserwl', delete_after=Gallery.delete_after)\n return \n\n # ===== REMOVE OR ADD USER TO THE WHITELIST\n ret_msg = \"\"\n\n if user_id in self.cogset['user_wl']:\n self.cogset['user_wl'].remove(user_id)\n ret_msg = f'<@{user_id} has been **removed** from the gallery whitelist.'\n \n else:\n self.cogset['user_wl'].append(user_id)\n ret_msg = f'<@{user_id} has been **added** to the gallery whitelist.'\n\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=ret_msg, delete_after=Gallery.delete_after)\n return", "def remove_users(users_to_remove: list, users_dict: dict,\n end_of_service: str) -> None:\n for reciever in users_to_remove:\n if reciever in users_dict:\n send_message(reciever,\n 'Subscription expired\\n',\n end_of_service,\n users_dict[reciever]['carrier'])\n del users_dict[reciever]", "def blacklist_meme(self, id):\n cursor = self.conn.cursor()\n cursor.execute(f\"update memes set blacklisted = 1 where meme_id = ?\", (id, ))\n self.conn.commit()\n cursor.close()" ]
[ "0.74218553", "0.7352311", "0.7122453", "0.67325133", "0.6435446", "0.6364521", "0.6361342", "0.6294082", "0.6188603", "0.58658487", "0.5860538", "0.58456814", "0.58305186", "0.58287203", "0.57763344", "0.57541144", "0.56883603", "0.5684058", "0.55988926", "0.55928737", "0.5575431", "0.5563341", "0.55434424", "0.5528555", "0.5486386", "0.54816455", "0.54713535", "0.54643005", "0.5445286", "0.5443467" ]
0.81987846
0
Remove the user from 'prospects' and 'blacklist', if applicable, and add the user to 'workers'. Note that adding somebody as a worker removes the person from the blacklist. user A TcsUser instance to link to workers
def addWorker(self, user): if (user != self.owner) and not self.workers.filter(pk=user.id).exists(): self.workers.add(user) if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) if self.blacklist.filter(pk=user.id).exists(): self.blacklist.remove(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addToBlacklist(self, user):\n if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists():\n self.blacklist.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n return None", "def removeWorker(self, user):\n if user == self.owner:\n return None\n # Without these queries, there's no way to tell if anything actually gets removed.\n # Calling remove() on a user that is not in the set does not raise an error.\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n return self\n return None", "def addProspect(self, user):\n if self.is_active and (user != self.owner) and not self.prospects.filter(pk=user.id).exists() \\\n and not self.workers.filter(pk=user.id) and not self.blacklist.filter(pk=user.id).exists():\n self.prospects.add(user)\n return self\n return None", "async def add_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n try:\r\n self.settings['blacklist'].append(user.id)\r\n await ctx.send(\"User blacklisted.\")\r\n except:\r\n await ctx.send(\"An error occured.\")\r\n else:\r\n await ctx.send(\"User already blacklisted.\")", "async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n await ctx.send(\"User is not blacklisted.\")\r\n else:\r\n self.settings['blacklist'].remove(user.id)\r\n await ctx.send(\"User removed from blacklist.\")", "def remove_candidate(self, user):\n self.assignment_related_users.filter(user=user).delete()\n inform_changed_data(self)", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "async def blacklist_remove(self, ctx: commands.Context, target):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n\r\n if isinstance(target, discord.User):\r\n check = await self.check_user(target.id, table)\r\n target = target.id\r\n else:\r\n check = await self.check_user(int(target), table)\r\n target = int(target)\r\n\r\n if check[0]:\r\n await self.remove_blacklist(target, table)\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is not blacklisted.\")", "def remove(self, user):\n self.packet.send_room([\"rp\", user.get_int_id(self.rooms),\n user.data.id], user.room)\n self.rooms[user.room][\"users\"].remove(user)", "def _remove(self, room, regex, user):\n regexes_for_room = self.notifications[room]\n users_for_regex = regexes_for_room[regex]\n\n # users may have been added multiple times in the past, so make sure\n # we remove them all.\n while user in users_for_regex:\n users_for_regex.remove(user)\n\n if not users_for_regex:\n # remove regex from room when there are no users left to notify\n del regexes_for_room[regex]", "def add_to_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.insert().values( user_id=user_id,\n blacklisted_id=blacklist_user_id).execute()\n except sqlalchemy.exc.IntegrityError as e:\n if e.orig.args[0] == 1062 :\n # duplicate entry, don't care !\n pass\n elif e.orig.args[0] == 1452 :\n self.log(e, self.identifier)\n raise egg_errors.UnknownUserOrBadgeIDException\n else:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible\n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible", "def copy_to_user(self, user):\n user_pod = user.profile.get_user_pod()\n checklist_copy = self\n checklist_copy.pk = None\n checklist_copy.collaborators.clear()\n checklist_copy.save()\n\n # Copy all the checklist entries, but don't save the checked\n # state or any of the optional details - the new checklist\n # should be \"blank\"\n copied_entries = []\n for entry in self.entries:\n item_copy = ChecklistEntry(plant_name=entry.plant_name,\n checklist=checklist_copy)\n copied_entries.append(item_copy)\n\n ChecklistEntry.objects.bulk_create(copied_entries)\n\n # Assign ownership of the new checklist to the user\n ownership = ChecklistCollaborator(collaborator=user_pod,\n checklist=checklist_copy, is_owner=True)\n ownership.save()", "def test_teams_remove_user_from_team_v2(self):\n pass", "def _RemoveUsers(self, remove_users):\n for username in remove_users:\n self.utils.RemoveUser(username)\n self.user_ssh_keys.pop(username, None)\n self.invalid_users -= set(remove_users)", "def toggle_interested(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n workshop = self.context['workshop']\n\n if workshop in profile.interested_workshops.all():\n workshop.interested_users.remove(profile)\n else:\n workshop.interested_users.add(profile)", "async def blacklist_add(self, ctx: commands.Context, target, *, reason: str = \"No reason given.\"):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n guild = None\r\n\r\n try:\r\n check = await self.check_user(target.id, table)\r\n except Exception:\r\n guild = discord.utils.get(self.bot.guilds, id=int(target))\r\n if not guild:\r\n return\r\n\r\n check = await self.check_user(int(target), table)\r\n\r\n if not check[0]:\r\n if isinstance(target, discord.User):\r\n await self.add_blacklist(target.id, table, reason)\r\n else:\r\n await self.add_blacklist(int(target), table, reason)\r\n\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n if not isinstance(target, discord.User):\r\n embed = discord.Embed(color=self.bot.colors.red,\r\n description=f\"Your guild / server has been blacklisted. \"\r\n f\"If you wish to know the reason, join the \"\r\n f\"[Support server]({self.bot.invite_url})\")\r\n await guild.owner.send(embed=embed)\r\n await guild.leave()\r\n self.bot.logger.info(f\"Added guild with ID {target} to blacklist.\")\r\n else:\r\n self.bot.logger.info(f\"Added user with ID {target.id} to blacklist\")\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is already blacklisted.\")", "def test_teams_remove_user_from_team_v1(self):\n pass", "def nuke_users(modeladmin, request, queryset):\n users = None\n form = BlacklistForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})\n contenttype = ContentType.objects.get_for_model(queryset.model)\n # Because we want this action available from comments or user admin lists, sort out content type\n ctype_as_string = unicode(contenttype)\n if ctype_as_string == 'user':\n users = queryset\n if ctype_as_string == 'comment':\n # build list of unique users within comment list.\n users = []\n for comment in queryset:\n if not comment.user in users:\n users.append(comment.user)\n\n if ctype_as_string == 'contact':\n # build list of unique users from contact list.\n users = []\n for c in queryset:\n if c.user and c.user not in users:\n users.append(c.user)\n if not users:\n # we haven't built out a content-type appropriate user list.\n return HttpResponse(\"Error finding content type: %s\" % contenttype)\n\n if 'apply_blacklist' in request.POST: # we're returning from the intermediate page and are ready to do some work.\n form = BlacklistForm(request.POST)\n if form.is_valid():\n reason = form.cleaned_data['reason']\n spammer = form.cleaned_data['is_spammer']\n for user in users:\n # Deactivate user accounts\n # Note: Update is more efficient,\n # but we can't use it because we may have a list (from comments)\n # rather than a proper queryset.\n user.is_active = False\n user.save()\n\n for c in user.comment_comments.all(): # remove their comments from public view.\n if spammer:\n c.delete()\n else:\n c.is_public = False\n c.is_removed = True\n c.save()\n for c in user.contact_set.all(): # and contact messages\n if spammer:\n c.delete()\n else:\n c.publish = False\n c.save()\n # remove their session. -- Is there a more efficient way than looping through all sessions? That can become a mighty big table.\n for s in Session.objects.all():\n decoded_session = s.get_decoded()\n if '_auth_user_id' in decoded_session and decoded_session['_auth_user_id'] == user.id:\n s.delete()\n # and add them to the blacklist\n blacklist = Blacklist(\n user = user,\n blacklister = request.user,\n reason = reason,\n )\n blacklist.save()\n\n if spammer:\n resp_str = 'Any related accounts will still be visible, but related comments have been deleted.'\n else:\n resp_str = 'Any related accounts and comments will still be visible in the admin.'\n\n count = len(users)\n if count == 1:\n modeladmin.message_user(request, \"%s was removed and blocked from the site. %s\" % (users[0].username, resp_str))\n else:\n modeladmin.message_user(request, \"%s users were removed and blocked from the site. %s\" % (count, resp_str))\n return HttpResponseRedirect(request.get_full_path())\n else:\n return HttpResponse(\"error!\")\n # We haven't captured intermediate page data. Go there...\n return render(request, 'admin/blacklist.html', {'users': users, 'form': form})", "def delete_from_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.delete().where(and_(\n self.table.c.user_id == user_id,\n self.table.c.blacklisted_id == blacklist_user_id )).execute() \n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible", "async def blacklist_global(self, ctx, user: discord.User, *, reason):\n await self.bot.db.execute(\n \"INSERT IGNORE blacklisted_user VALUES (%s, %s)\", user.id, reason\n )\n self.bot.cache.blacklist[\"global\"][\"user\"].add(user.id)\n await util.send_success(ctx, f\"**{user}** can no longer use Miso Bot!\")", "def join_player(self, data, user):\n self.remove(user)\n\n user.room = \"100\"\n user.x = \"0\"\n user.y = \"0\"\n user.frame = \"0\"\n\n self.add(user)", "def _remove(users, room_name):\n global users_removed\n users_removed = []\n\n try:\n\n for word in users['message']['text'].split():\n\n if word == 'myself':\n user = users['message']['sender']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n \n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append('Not found ->> ' + \"<\" + user + \">\")\n\n check_continue = 1\n text = '```User removed: %s ```' % (','.join(users_removed))\n\n for _item in range(len(users['message']['text'].split())):\n\n _item = _item + 1\n\n try:\n _type = users['message']['annotations'][_item]['userMention']['user']['type']\n user = users['message']['annotations'][_item]['userMention']['user']['name']\n \n if _type == 'BOT':\n\n if check_continue == 1:\n continue\n else:\n text = 'Please add user with @'\n continue\n \n user = users['message']['annotations'][_item]['userMention']['user']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n\n except:\n pass\n\n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append(\"Not found ->> \" + \"<\" + user + \">\")\n text = \"```Removed users: %s ```\" % (','.join(list(set(users_removed))))\n return text\n except:\n\n text = 'Please add user with @'\n return text", "def add_new_user(network, user, games):\n if user not in network:\n network[user] = [[], games]\n return network", "def remove(self, user_id):\n pass", "def create_retirement_request_and_deactivate_account(user):\n # Add user to retirement queue.\n UserRetirementStatus.create_retirement(user)\n\n # Unlink LMS social auth accounts\n UserSocialAuth.objects.filter(user_id=user.id).delete()\n\n # Change LMS password & email\n user.email = get_retired_email_by_email(user.email)\n user.set_unusable_password()\n user.save()\n\n # TODO: Unlink social accounts & change password on each IDA.\n # Remove the activation keys sent by email to the user for account activation.\n Registration.objects.filter(user=user).delete()\n\n # Delete OAuth tokens associated with the user.\n retire_dot_oauth2_models(user)\n AccountRecovery.retire_recovery_email(user.id)", "def add_excl_parts(db, usernames):\n desc = \"Replicating the effect \" + \\\n \"of priming with common vs rare ideas in individual \" + \\\n \"brainstorming with revised interface\"\n exp_id= 'tN33ATDiCukWfj5G7'\n # exps = db.experiments.find()\n exp = db.experiments.find_one({'_id': exp_id})\n\n db.experiments.update({'_id': exp_id},\n {'$set': {'excludeUsers': list(usernames), 'description': desc}})\n # exp['excludeUsers'] = list(usernames)\n exp = db.experiments.find_one({'_id': exp_id})\n print exp['excludeUsers']\n print exp['description']", "def remove_users(users_to_remove: list, users_dict: dict,\n end_of_service: str) -> None:\n for reciever in users_to_remove:\n if reciever in users_dict:\n send_message(reciever,\n 'Subscription expired\\n',\n end_of_service,\n users_dict[reciever]['carrier'])\n del users_dict[reciever]", "async def cmd_galtoguserwl(self, ctx, user_id): \n\n # ===== CHECK IF INPUT IS VALID\n try:\n user_id = int(user_id.replace(\"<\", '').replace(\"@\", '').replace(\"!\", '').replace(\">\", ''))\n except (IndexError, ValueError):\n await ctx.send_help('galtoguserwl', delete_after=Gallery.delete_after)\n return \n\n # ===== REMOVE OR ADD USER TO THE WHITELIST\n ret_msg = \"\"\n\n if user_id in self.cogset['user_wl']:\n self.cogset['user_wl'].remove(user_id)\n ret_msg = f'<@{user_id} has been **removed** from the gallery whitelist.'\n \n else:\n self.cogset['user_wl'].append(user_id)\n ret_msg = f'<@{user_id} has been **added** to the gallery whitelist.'\n\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=ret_msg, delete_after=Gallery.delete_after)\n return", "def remove_user(self, user: discord.User) -> bool:\n\t\tif not self.user_has_entry(user):\n\t\t\treturn False\n\t\t\n\t\tdef data_interaction(cur: Cursor):\n\t\t\tsql = f\"DELETE FROM {StrikeConsts.STRIKE_TABLE} WHERE id=%s;\"\n\t\t\tcur.execute(sql, (user.id,))\n\t\t\t\n\t\t\treturn [True]\n\t\t\t\n\t\treturn self.connect_and_execute(data_interaction)[1][0]", "def unfollowing_and_removing(self, user_id):\n if self.unfollowing(user_id):\n ind = [i for i, j in enumerate(self.monitored_users) if j.get('user', '') == user_id]\n if ind:\n self.monitored_users.remove(self.monitored_users[ind[0]])" ]
[ "0.7682945", "0.6766191", "0.6598563", "0.62174505", "0.60659224", "0.5727882", "0.56629395", "0.5472015", "0.54637945", "0.54543555", "0.53860176", "0.53835195", "0.53404295", "0.5299187", "0.5298164", "0.5247873", "0.5234306", "0.5230454", "0.52074", "0.51751196", "0.5164737", "0.51634574", "0.51532143", "0.5134601", "0.51339114", "0.5132001", "0.51275533", "0.508116", "0.5071606", "0.5069833" ]
0.7764532
0
Return active constituent voters who have not been contacted since the last election and have not been served to a supporter in the last two days. Don't limit the size of the result set here; let APIs do that.
def getVotersToContact(self): two_days_ago = date.today() - timedelta(2) year_ago = date.today() - timedelta(365) return self.voters.filter( Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago), Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago), campaignstovoters__is_active=True, is_active=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive", "def getVotersToDial(self):\n return self.getVotersToContact().exclude(\n (Q(phone_number1='') | Q(wrong_phone_number1__gt=1)),\n (Q(phone_number2='') | Q(wrong_phone_number2__gt=1)))", "def getUnconfirmedVolunteers(self, query):\n query = Volunteer.query(Volunteer.confirmed == False)\n return query", "async def get_non_voters(self, guild: discord.Guild, uservotes: dict):\n\n player_role = guild.get_role(\n await self.config.guild(guild).player_id()\n )\n\n for member in guild.members:\n if player_role in member.roles:\n userkey = f\"{member.name}#{member.discriminator}\"\n if userkey not in uservotes:\n uservotes[userkey] = \"No vote\"\n\n return uservotes", "def remove_expired(cls):\n max_trailers = 10\n current_trailers = cls.get_all(collection='approved_trailers')\n current_trailers.reverse()\n queued_trailers = cls.get_all(collection='queued_trailers')\n\n if len(current_trailers) >= max_trailers and len(queued_trailers) > 0:\n for trailer in current_trailers:\n time_active = trailer.date.timetuple().tm_yday - datetime.now().timetuple().tm_yday\n if time_active >= 14 and len(queued_trailers) > 0:\n cls.move(trailer, 'approved_trailers', 'archived_trailers')\n cls.move(queued_trailers[0], 'queued_trailers', 'approved_trailers')", "def get_all_volunteers(self):\n volunteers = []\n for user in User.objects.all():\n if not OcAuth(user.id).is_admin():\n volunteers.append(user)\n return volunteers", "def get_victors(self):\n if self.is_game_over():\n scores = [p.get_score() for p in self.state.get_players()]\n if len(scores) == 0:\n return []\n max_score = max(scores)\n victors = []\n for p in self.state.get_players():\n if p.get_color() not in self.violators and p.get_score() == max_score:\n victors.append(self.players[p.get_color()])\n return victors\n else:\n return None", "def find_outdated_game_dates(self):\n state = 'preview.gameData.status.detailedState'\n old = self._db.Games.find({state : {'$nin' : ['Final']}})\n return set([x['date'] for x in old])", "def find_users_missing_standup():\n token = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_SLACK_TOKEN))['Plaintext']\n sc = SlackClient(token)\n channels = sc.api_call('channels.list')['channels']\n standup = (i for i in channels if i['name'] == SLACK_CHANNEL).next()\n members = standup['members']\n messages = sc.api_call('channels.history', channel=standup['id'])['messages']\n messages_within_last_10_hours = filter(check_in_date_range, messages) \n users_posted = (i['user'] for i in messages_within_last_10_hours if\n 'user' in i.keys())\n difference = set(members).difference(users_posted)\n return difference", "def get_voters():", "def get_voters():", "def get_current_visitors():\n return Visitor.objects.filter(acknowledged=False).order_by(\"arrival_time\")", "def get_expired_invoices(self):\n return self.invoice_set.filter(\n expiration_date__lte=date.today(),\n paid=False,\n debited=False,\n canceled=False,\n uncollectible=False,\n )", "def restricted_teams(self, user):\n return []", "def filter_only_remaining(self,now):\n\t\ttimeshift = now.replace(tzinfo=\"Europe/London\")\n\t\treturn Programs([program for program in self.list if program.end > timeshift and program.end < now])", "def get_available_invitees(self):\n return User.objects.exclude(pk=self.request.user.pk)", "def upcoming_meetups_query(cls):\r\n # Warning, this timestamp inequality is actually done as a string comparison\r\n # in the db for some reason. BUT, since epoch seconds won't get another digit\r\n # for another 275 years, we're good for now...\r\n return Meetup._query(Meetup.c.timestamp > time.time() - g.meetup_grace_period, data=True, sort='_date')", "def has_victim(self):\n # first-party\n from tcex.api.tc.v3.victims.victim_filter import VictimFilter\n\n victims = VictimFilter(Tql())\n self._tql.add_filter('hasVictim', TqlOperator.EQ, victims, TqlType.SUB_QUERY)\n return victims", "def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]", "def get_recent_matches(self, limit=None):\n matches = (Match.objects\n .filter(Q(winner=self) | Q(loser=self))\n .order_by('-played_time'))\n return matches[:limit or MATCH_RESULT_LIMIT]", "def get_active_deposits():\n skip = 0\n graphql_client = GraphQLClient('https://api.thegraph.com/subgraphs/name/miracle2k/all-the-keeps')\n members = GqlQuery().fields(['address']).query('members').generate()\n bondedECDSAKeep = GqlQuery().fields(['totalBondAmount', members]).query('bondedECDSAKeep').generate()\n deposits_query = GqlQuery().fields(['id', 'lotSizeSatoshis', bondedECDSAKeep]).query('deposits', input={\n \"first: 1000 skip: $skip where\": \"{currentState: ACTIVE}\"}).operation('query', name='GetActiveDeposits',\n input={\"$skip\": \"Int!\"}).generate()\n\n params = {\"skip\": skip}\n result = jsonpickle.decode(graphql_client.execute(deposits_query, variables=params))[\"data\"][\"deposits\"]\n deposits = result\n while len(result) == 1000:\n params[\"skip\"] += 1000\n result = jsonpickle.decode(graphql_client.execute(deposits_query, variables=params))[\"data\"][\"deposits\"]\n deposits += result\n return deposits", "def get_recent_matches(self, limit=None):\n return (Match.objects\n .filter(company=self)\n .order_by('-played_time')[:limit or MATCH_RESULT_LIMIT]\n )", "def get_users_with_missing_data() -> Set[str]:\n users_data = {user[\"_source\"][\"VENDOR_UUID\"] for user in Handlers.elastic_handler.get_all_today_data(\n _type=\"status\",\n date_start=dt.date.today() + dt.timedelta(days=1),\n date_end=dt.date.today() + dt.timedelta(days=7),\n )}\n\n all_tokens = Handlers.token_handler.get_all_today_data(_type=\"token\")\n to_dict = {dict_[\"_source\"][\"VENDOR_UUID\"]: dict_[\"_source\"][\"TOKEN\"] for dict_ in all_tokens}\n\n return set(dict(filter(lambda item_tup: item_tup[0] not in users_data, to_dict.items())).values())", "def eligible(cls, lost_count):\n return cls.base_query().filter(lost=lost_count)", "def get_queryset(self):\n return Person.objects.filter(expiry_date__gt=timezone.now())", "def getAvailableTimeslots(self, allTimeslots) -> [Timeslot]:\r\n # List with all Timeslots any of the Teachers is not available at.\r\n notAvailableTimeslotsTeachers = flatMap(lambda t: t.not_available_timeslots, self.teachers)\r\n # notAvailableTimeslotsTeachers = [item for sublist in map(lambda t: t.not_available_timeslots, self.teachers) for item in sublist]\r\n # If Lesson can only take place on forenoon, create list with all afternoon timeslots.\r\n if self.course.only_forenoon:\r\n notAvailableTimeslotsForenoon = list(filter(lambda t: t.number not in Timeslot.getForenoonTimeslotNumbers(), allTimeslots))\r\n else:\r\n notAvailableTimeslotsForenoon = []\r\n\r\n timeslots = [x for x in allTimeslots if x not in (notAvailableTimeslotsTeachers + notAvailableTimeslotsForenoon)]\r\n if self.available_timeslots: # If list is not empty. Else no restrictions.\r\n timeslots = [x for x in timeslots if x in self.available_timeslots]\r\n\r\n return timeslots", "def newbies(self):\n newness = datetime.now() - timedelta(days=self.DAYS_FOR_NEWBIE_CHECK)\n newbies = (\n self.valid_choices.filter(\n Q(roster__accounthistory__start_date__gte=newness)\n & Q(roster__accounthistory__end_date__isnull=True)\n )\n .distinct()\n .order_by(\"db_key\")\n )\n return list(newbies)", "def get_unresolved_future_prices():\n #TODO this is inefficient, hits the db A LOT\n latest_bitcoin_time = get_latest_bitcoin_time()\n\n potentially_unresolved = Future_Price.objects.filter(\n time_to_match_price__lte=latest_bitcoin_time\n #TODO would like a __gt condition somehow\n )\n\n unresolved_future_prices = []\n for p in potentially_unresolved:\n has_no_returned_amounts_from_before_window = Returned_Amount.objects.filter(to_prediction__future_price=p, from_received_amount__time__lt=F('from_received_amount__prediction__future_price__time_window_closes')).count() == 0\n if has_no_returned_amounts_from_before_window:\n has_received_amounts_from_before_window = Received_Amount.objects.filter(prediction__future_price=p, time__lt=F('prediction__future_price__time_window_closes')).count() > 0\n if has_received_amounts_from_before_window:\n bitcoin_price_exists = Bitcoin_Price.objects.filter(time=p.time_to_match_price).count() == 1\n if bitcoin_price_exists:\n unresolved_future_prices.append(p)\n\n return unresolved_future_prices\n\n \"\"\"\n The following commented-out method:\n - assumes that there is always a bitcoin_price for every minute before the\n last bitcoin_price\n - assumes that every future_prediction before the last returned_amount has\n been evaluated\n ...I am not willing to make these assumptions\n \n latest_bitcoin_time = get_latest_bitcoin_time()\n\n try:\n latest_returned_amount = Returned_Amount.objects.order_by('-from_received_amount__prediction__future_price__time_to_match_price')[0]\n latest_returned_time = latest_returned_amount.from_received_amount.prediction.future_price.time_to_match_price\n except IndexError:\n latest_returned_time = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, utc)\n\n unresolved_future_prices = Future_Price.objects.filter(\n time_to_match_price__lte=latest_bitcoin_time,\n time_to_match_price__gt=latest_returned_time\n )\n\n return unresolved_future_prices\n \"\"\"", "def get_queryset(self):\n unitlist = get_units_visible_to_user(self.request.user)\n\n return Candidate.objects.filter(\n appointments__committee__unit__in=unitlist,\n )", "def to_exclude(self):\n midnight = now().replace(hour=0, minute=0, microsecond=0)\n return CenterOpen.objects.filter(\n creation_date__gte=midnight,\n ).values_list('phone_number', flat=True)" ]
[ "0.6028861", "0.5758767", "0.5515115", "0.54826623", "0.5425473", "0.5403842", "0.53461355", "0.5314975", "0.5251976", "0.5220554", "0.5220554", "0.5193499", "0.51809436", "0.51725066", "0.517217", "0.5158285", "0.51561767", "0.5152371", "0.51470834", "0.51421833", "0.51222056", "0.51121193", "0.5101381", "0.5098493", "0.5082552", "0.5066987", "0.50647473", "0.5043818", "0.5035207", "0.5031032" ]
0.65486276
0
Return active constituent voters with valid phone contact information who have not been contacted since the last election. Don't limit the size of the result set here; let APIs do that.
def getVotersToDial(self): return self.getVotersToContact().exclude( (Q(phone_number1='') | Q(wrong_phone_number1__gt=1)), (Q(phone_number2='') | Q(wrong_phone_number2__gt=1)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getVotersToContact(self):\n two_days_ago = date.today() - timedelta(2)\n year_ago = date.today() - timedelta(365)\n return self.voters.filter(\n Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),\n Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago),\n campaignstovoters__is_active=True,\n is_active=True)", "def get_all_active_members(debug, contactsUrl):\n\n valid_date = str(datetime.date.today() - datetime.timedelta(days=7)) # 7 days ago in yyyy-mm-dd format\n\n #params = {'$filter': 'member eq true AND Status eq Active',\n # '$async': 'false'}\n params = {'$filter': \"member eq true AND ( Status eq Active OR ( Status eq PendingRenewal AND 'Renewal due' ge \" + valid_date + \"))\",\n '$async': 'false'}\n request_url = contactsUrl + '?' + urllib.parse.urlencode(params)\n if debug: print('Making api call to get contacts')\n return api.execute_request(request_url).Contacts", "def to_exclude(self):\n midnight = now().replace(hour=0, minute=0, microsecond=0)\n return CenterOpen.objects.filter(\n creation_date__gte=midnight,\n ).values_list('phone_number', flat=True)", "def get_recent_contacts(user, limit=5, timespan_days=14) -> typing.List[Contact]:\n timespan_recent = datetime.now().astimezone() - timedelta(days=timespan_days)\n contacts_recent = (\n Contact.objects.filter(interactions__was_at__gt=timespan_recent)\n .filter(user=user)\n .annotate(count=Count(\"interactions\"))\n .order_by(\"-count\")[:limit]\n )\n return list(contacts_recent)", "def get_active_contact(self):\n list_contact = Contact.objects.filter(phonebook__campaign=self.id,\n status=CONTACT_STATUS.ACTIVE).all()\n if not list_contact:\n return False\n return list_contact", "def get_not_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(seller_id=seller_id, status__in=[1, 3])", "def get_last_conversations(self):\n email_token = auth.current_user()[0]\n user_data, last_messages = self.friend_database.get_conversations(email_token)\n last_messages = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in last_messages]\n for i in range(len(last_messages)):\n last_messages[i][\"timestamp\"] = last_messages[i][\"timestamp\"].isoformat()\n response = []\n for i in range(len(last_messages)):\n response.append({\"user\": user_data[i], \"last_message\": last_messages[i]})\n return json.dumps(response), 200", "def imps_by_me(self):\n return self.caller.roster.accounthistory_set.last().initiated_contacts.all()", "def get_frequent_contacts(user, limit=5) -> typing.List[Contact]:\n contacts_frequent = (\n Contact.objects.filter(user=user)\n .annotate(count=Count(\"interactions\"))\n .order_by(\"-count\")[:limit]\n )\n return list(contacts_frequent)", "def previous_imps_by_me(self):\n return FirstContact.objects.filter(\n from_account__in=self.caller.roster.previous_history\n )", "def get_all_candidates(self) -> list:", "def get_allowed_vos():\n return get_vos(filter_by_existing_users(filter_out_bans(read_mapfiles(), read_banfile())))", "def getValidCertifications(self):\n certs = []\n today = date.today()\n for c in self.getCertifications():\n validfrom = c.getValidFrom() if c else None\n validto = c.getValidTo() if validfrom else None\n if not validfrom or not validto:\n continue\n validfrom = validfrom.asdatetime().date()\n validto = validto.asdatetime().date()\n if (today >= validfrom and today <= validto):\n certs.append(c)\n return certs", "def active_comics():\n # FUTURE: Should not include ended comics?\n return Comic.objects.exclude(active=False)", "def get_explicit_community_match(self) -> list:\n return self.matching", "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive", "def get_candidates(data):\n return data.groups[\"Candidates\"].objects", "def get_active(self):\n return self.get_challenges().filter(status='A')", "def get_clients_to_be_reactivated(file=\"db.json\") -> List[Client]:\n with TinyDB(file) as db:\n query = Query()\n result = db.search(query[\"rem date\"].test(contact_now))\n output = []\n for client in result:\n output.append(Client(client[\"first name\"], client[\"last name\"],\n client[\"last visit\"], client[\"rem date\"],\n client[\"email\"]\n ))\n return output", "def candidates(self):\n return self.related_users.all()", "def candidates_all():\n return jsonify(candidate.get_candidates())", "def get_recipients(self):\n return [\n self.obj.activity.owner\n ] + [\n slot_participant.participant.user for slot_participant\n in self.obj.slot_participants.all()\n if (\n slot_participant.status == 'registered' and\n slot_participant.participant.status == 'accepted'\n )\n ]", "def teammates(self):\n return [\n p for p in self.roster.participants\n if p.participant_id != self.participant_id\n ]", "def ldap_get_live_onfloor():\n members = []\n onfloor = _ldap_get_group_members('onfloor')\n for member in onfloor:\n if ldap_get_roomnumber(member) and not ldap_is_eboard(member):\n members.append(member)\n\n return members", "def customers_presence(self):\n return self._customers_presence", "def get_candidates(self, cloud_name, jobs, count, return_only_all_idle=False):\n\n asg_info = self.phantom_client.get_autoscale_groups_info(self.phantom_client.asg.name)\n all_instances_info = asg_info[self.phantom_client.asg.name]['instances']\n instances = self.phantom_client.get_alive_instnaces(all_instances_info)\n\n localjobs = copy.copy(jobs)\n\n idle_list = []\n nonidle_list = []\n for instance in instances:\n if instances[instance]['cloud_name'] != cloud_name:\n continue\n job_matching_found = False\n for job in localjobs.list:\n if instances[instance]['public_dns'] == job.node:\n #nonidle_list.append( (instance, job.running, instances[instance]) )\n nonidle_list.append( (instance, job.progress, instances[instance]) )\n\n localjobs.list.remove(job)\n job_matching_found = True\n break\n if not job_matching_found:\n idle_list.append( (instance, instances[instance]) )\n\n # Truncate idle list if needed (in case there are more idle instances than count)\n # Does not do anything if count >= len(idle_list)\n\n if return_only_all_idle:\n # DONE if this flag is set\n return idle_list\n\n idle_list = idle_list[:count]\n\n if idle_list:\n idle_list_str = \"\"\n for instance in idle_list:\n idle_list_str += \"%s:%s,\" % (instance[0], instance[1]['public_dns'])\n LOG.info(\"OO found idle candidates for termination in %s: %s\" % (cloud_name, idle_list_str))\n\n # Sort by the run time in the decreasing order\n sorted_nonidle_list = sorted(nonidle_list, key=operator.itemgetter(1), reverse=True)\n\n remaining_count = count - len(idle_list)\n # Truncate sorted non-idle list if needed (in case remaining_count < len(sorted_nonidle_list))\n sorted_nonidle_list = sorted_nonidle_list[:remaining_count]\n\n sorted_nonidle_list_instances_only = []\n if sorted_nonidle_list:\n nonidle_list_str = \"\"\n for atuple in sorted_nonidle_list:\n nonidle_list_str += \"%s:%s:%s,\" % (atuple[0], atuple[2]['public_dns'], atuple[1])\n sorted_nonidle_list_instances_only.append((atuple[0], atuple[2] ))\n LOG.info(\"OO found non-idle candidates for termination in %s: %s\" % (cloud_name, nonidle_list_str))\n\n total_found = len(idle_list)+len(sorted_nonidle_list_instances_only)\n if not total_found == count:\n LOG.info(\"OO can't supply enough (%d) instances for termination. Found only %d\", count, total_found)\n\n return idle_list, sorted_nonidle_list_instances_only", "def retrieve_exact_commutes(self):\n for destination in self.tenants:\n try:\n results = retrieve_exact_commute_rent_algorithm(self.homes[:NUMBER_OF_EXACT_COMMUTES_COMPUTED],\n destination,\n destination.commute_type,\n with_traffic=destination.traffic_option)\n\n # Store the results to the homes\n for i in range(len(results)):\n duration_seconds = results[i][0][0]\n distance_meters = results[i][0][1]\n if duration_seconds is not None and distance_meters is not None:\n self.homes[i].exact_commute_times[destination] = int(duration_seconds / 60)\n\n except Distance_Matrix_Exception as e:\n print(\"Caught: \" + e.__class__.__name__)", "def clients_with_team_access(self):\n from lastuser_core.models.client import CLIENT_TEAM_ACCESS\n return [cta.client for cta in self.client_team_access if cta.access_level == CLIENT_TEAM_ACCESS.ALL]", "def test_special_contacts(self):\n\n vcards = []\n\n # Generate a contact with no email address\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = \"Mr.\"\n current_contact.last_name = \"Smiley\"\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a 2nd contact with the same name but different phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = \"Mr.\"\n current_contact.last_name = \"Smiley\"\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with no name\n current_contact = bt_contacts_utils.VCard()\n current_contact.email = \"{}@gmail.com\".format(\n bt_contacts_utils.generate_random_string())\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with random characters in its name\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = bt_contacts_utils.generate_random_string()\n current_contact.last_name = bt_contacts_utils.generate_random_string()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with only a phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a 2nd contact with only a phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n bt_contacts_utils.create_new_contacts_vcf_from_vcards(\n self.contacts_destination_path, PSE_CONTACTS_FILE, vcards)\n\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n\n return self.connect_and_verify(phone_numbers_added)", "def list_valid(self):\n contacts = AccountHistory.objects.claimed_impressions(self.caller.roster)\n if \"list\" in self.switches:\n if \"previous\" in self.switches:\n contacts = AccountHistory.objects.filter(\n contacted_by__in=self.caller.roster.previous_history\n )\n self.msg(\n \"{wCharacters you have written first impressions of:{n %s\"\n % \", \".join(str(ob.entry) for ob in contacts)\n )\n return\n qs = AccountHistory.objects.unclaimed_impressions(self.caller.roster)\n if \"outstanding\" in self.switches:\n impressions = self.imps_of_me.filter(private=False, from_account__in=qs)\n authors_and_imps = [\n '{c%s{n: \"%s\"' % (ob.writer, ob.summary) for ob in impressions\n ]\n self.msg(\n \"First Impressions you have not yet reciprocated: \\n%s\"\n % \"\\n\".join(authors_and_imps)\n )\n return\n location = \"\"\n if \"here\" in self.switches:\n location = \"at your location \"\n qs = qs.filter(entry__character__db_location=self.caller.location)\n # filter out masked people\n qs = [\n ob\n for ob in qs\n if ob.entry.player.username.capitalize() == str(ob.entry.character)\n ]\n players = sorted(\n set(ob.entry.player for ob in qs), key=lambda x: x.username.capitalize()\n )\n self.msg(\n \"{wPlayers %syou haven't written a first impression for yet:{n %s\"\n % (location, \", \".join(str(ob) for ob in players))\n )" ]
[ "0.6920536", "0.57854915", "0.5222731", "0.50926673", "0.5040607", "0.502312", "0.50023216", "0.48708686", "0.48509404", "0.48265207", "0.48217788", "0.48114735", "0.47936308", "0.47893497", "0.47839564", "0.47823417", "0.47818604", "0.4778862", "0.47737798", "0.47727492", "0.47644523", "0.47436103", "0.4732465", "0.47127", "0.47012612", "0.4696725", "0.4692097", "0.46916696", "0.4673781", "0.4667727" ]
0.671149
1
Remove the user from 'workers' or 'prospects', if applicable. user A TcsUser instance to remove from workers
def removeWorker(self, user): if user == self.owner: return None # Without these queries, there's no way to tell if anything actually gets removed. # Calling remove() on a user that is not in the set does not raise an error. if self.workers.filter(pk=user.id).exists(): self.workers.remove(user) return self if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_candidate(self, user):\n self.assignment_related_users.filter(user=user).delete()\n inform_changed_data(self)", "def remove(self, user):\n self.packet.send_room([\"rp\", user.get_int_id(self.rooms),\n user.data.id], user.room)\n self.rooms[user.room][\"users\"].remove(user)", "def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n self.blacklist.remove(user)\n return self\n return None", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "def remove(self, user_id):\n pass", "def delete(self, user: 'UserCondensed'):\n self._delete(entity=user)", "def remove_user(user):\n # user.confirmed = False\n # user = get_user_by_phone(phone_num)\n db.session.delete(user)\n db.session.commit()\n\n return user\n # DELETE FROM users WHERE user.phone_num == phone)", "async def removeuser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be removed from a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await ctx.message.delete()", "def _remove(self, room, regex, user):\n regexes_for_room = self.notifications[room]\n users_for_regex = regexes_for_room[regex]\n\n # users may have been added multiple times in the past, so make sure\n # we remove them all.\n while user in users_for_regex:\n users_for_regex.remove(user)\n\n if not users_for_regex:\n # remove regex from room when there are no users left to notify\n del regexes_for_room[regex]", "def sipserver_user_remove(self, user: str) -> None:\n self.remove_endpoint_from_sipserver(endpoint=user)", "def remove_judge(contest, user):\n _remove_role(contest, user, pcm.Judge)", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "def _purge_user(self, user):\n self.user_order.remove(user)\n del self.user_queue[user]\n del self.user_skip[user]", "def test_teams_remove_user_from_team_v1(self):\n pass", "def remove_user(self, u: \"Node\") -> None:\n\n if u in self.users_:\n self.users_[u] -= 1\n if self.users_[u] == 0:\n del self.users_[u]", "def delete_user(network, user):\n if user in network:\n del network[user]\n for u in network:\n connections = get_connections(network, u)\n if user in connections:\n i = connections.index(user)\n del connections[i]\n return network", "def test_teams_remove_user_from_team_v2(self):\n pass", "def remove_user(users, curr_username, user_role, request_ip):\n #TODO: error checking\n log_connector.add_log('DELETE USER', \"Removed {} user(s)\".format(len(users)), curr_username, user_role, request_ip)\n user_connector.remove_user(users)", "def remove_user(self, user: discord.User) -> bool:\n\t\tif not self.user_has_entry(user):\n\t\t\treturn False\n\t\t\n\t\tdef data_interaction(cur: Cursor):\n\t\t\tsql = f\"DELETE FROM {StrikeConsts.STRIKE_TABLE} WHERE id=%s;\"\n\t\t\tcur.execute(sql, (user.id,))\n\t\t\t\n\t\t\treturn [True]\n\t\t\t\n\t\treturn self.connect_and_execute(data_interaction)[1][0]", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def delete_user(self, user):\n self.delete(user)", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def remove_user(self, user):\n\n data = user.to_json()\n key = \"%s:%s\" % (self.channel_id, user.username)\n\n logging.info(data)\n # remove our users timestamp\n affected = self.redis_server.zrem(ENVIRONMENT['REDIS_PREFIX'] + 'users_timestamp',key)\n logging.info(\"removed user timestamp(%d): %s\" % (affected, key))", "def _RemoveUsers(self, remove_users):\n for username in remove_users:\n self.utils.RemoveUser(username)\n self.user_ssh_keys.pop(username, None)\n self.invalid_users -= set(remove_users)", "async def kick(self, user: User):\n coro = self._state.remove_team_member(self.id, user.id)\n await coro", "def remove_users(self, *users):\r\n pass", "def removeUserId(self, user_id):\n self.__register_user_ids.discard(user_id)", "def view_remove_user(self, user, username):\r\n user.realm._checker.removeUser(username)", "def remove_user(self, user_id):\n if user_id in self:\n user = self[user_id]\n del self[user_id]\n return user" ]
[ "0.6894938", "0.68542784", "0.685336", "0.67998946", "0.6638343", "0.6393456", "0.63634795", "0.6293484", "0.6282741", "0.6280759", "0.6247314", "0.62332475", "0.6231962", "0.6227879", "0.61912465", "0.6178004", "0.6158775", "0.6148308", "0.6134449", "0.6126714", "0.6120114", "0.60983396", "0.6096013", "0.606499", "0.5955821", "0.5939528", "0.5925723", "0.5921348", "0.5916311", "0.59012145" ]
0.7817377
0
Return the number of voters a user has contacted for the campaign.
def voterContactCount(self, user): return self.votercontact_set.filter(user=user).count()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_number_of_ver_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'VerificationSponsor'])\n return n_agents", "def nay_voter_cnt(self):\n\n return len(self._nay_voters())", "def present_voter_cnt(self):\n\n return len(self._present_voters())", "def people_count(self):\n return len(self.__users)", "def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])", "def get_agent_number_of_players(players):\n return sum([count_players(player) for player in players\n if player.startswith('agent')])", "def abstain_voter_cnt(self):\n\n return len(self._abstain_voters())", "def yay_voter_cnt(self):\n\n return len(self._yay_voters())", "def getVotersToContact(self):\n two_days_ago = date.today() - timedelta(2)\n year_ago = date.today() - timedelta(365)\n return self.voters.filter(\n Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),\n Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago),\n campaignstovoters__is_active=True,\n is_active=True)", "async def _vote_count(\n self, ctx: Context, *, channel: discord.TextChannel = None\n ):\n\n guild: discord.Guild = ctx.guild\n\n if not channel:\n channel = await self.get_vote_channel(guild)\n if isinstance(channel, str):\n return await ctx.send(channel)\n\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"I couldn't identify a voting channel. Please specify one explicitly.\"\n ))\n else:\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"That channel has too many messages!\"\n \" Please ask a host for manual vote count.\"\n ))\n\n if len(history) < 1:\n return await ctx.send(_(\"{} is empty.\").format(channel.mention))\n\n user_votes = {}\n player_role = guild.get_role(\n await self.config.guild(guild).player_id()\n )\n\n for message in history:\n author = message.author\n if player_role not in author.roles:\n continue\n vote = self.get_vote_from_message(message)\n if not vote:\n continue\n user_votes[f\"{author.name}#{author.discriminator}\"] = vote\n\n user_votes = await self.get_non_voters(guild, user_votes)\n\n votes = {}\n for user in user_votes:\n val = user_votes[user].capitalize()\n try:\n votes[val].append(user)\n except KeyError:\n votes[val] = [user]\n\n # max votes first\n votes = dict(sorted(\n votes.items(), key=lambda item: len(item[1]), reverse=True\n ))\n\n # Pop and add stuff back to dict for ordering purpose.\n try:\n votes[\"VTNL\"] = votes.pop(\"Vtnl\")\n except KeyError:\n pass\n try:\n votes[\"No vote\"] = votes.pop(\"No vote\")\n except KeyError:\n pass\n\n txt = \"\"\n\n for i, vote in enumerate(votes, start=1):\n voters = votes[vote]\n\n if vote == \"VTNL\":\n txt += _(\"\\n\\n**{}** - {} ({})\").format(vote, len(voters), \", \".join(voters))\n elif vote == \"No vote\":\n txt += _(\"\\n\\n**Not voting** - {} ({})\").format(len(voters), \", \".join(voters))\n else:\n txt += _(\"\\n{}. **{}** - {} ({})\").format(i, vote, len(voters), \", \".join(voters))\n\n title = _(\"Vote Count\")\n\n embed = discord.Embed(\n color=0x00CDFF, title=title,\n description=_(\"__Counting from {} channel.__\\n\\n{}\").format(\n channel.mention, txt.strip()\n )\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\n f\"**{title}**\\n\\n__Counting from {channel.mention}\"\n f\" channel.__\\n\\n{txt.strip()}\"\n )", "def count_subscribers(self):\n return self.request(\"count:Contact\", [ None ])", "async def users(ctx):\n\n if ctx.channel.name.lower() in channels:\n await ctx.send(f\"\"\"# of members: {ctx.guild.member_count}\"\"\")", "def get_voters():", "def get_voters():", "def get_user_view_count(self, username):\n self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return self.cur.fetchone()[0]", "def number_of_players(self) -> int:\n return self.param.number_of_players", "def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))", "def get_number_of_char_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'CharitableSponsor'])\n return n_agents", "def amount_of_receivers(self) -> int:\n return sum([1 for _ in self.receivers])", "def get_total_number_of_buildings_for_user(request):\n buildings_count = get_buildings_for_user_count(request.user)\n\n return {'status': 'success', 'buildings_count': buildings_count}", "def bounced_member_count(self):\n return self._bounced_member_count", "def get_number_of_agents(model):\n\n n_agents = len(model.schedule.agents_by_type['Customer'])\n return n_agents", "def get_amount_users() -> User:\n return User.objects.all().count()", "async def membercount(ctx, *args):\n if ctx.message.channel.is_private:\n await bot.delete_message(ctx.message)\n return\n\n g = ctx.message.server\n\n gid = g.id\n membs = str(len(g.members))\n membs_on = str(len([m for m in g.members if not m.status == Status.offline]))\n users = str(len([m for m in g.members if not m.bot]))\n users_on = str(len([m for m in g.members if not m.bot and not m.status == Status.offline]))\n bots = str(len([m for m in g.members if m.bot]))\n bots_on = str(len([m for m in g.members if m.bot and not m.status == Status.offline]))\n created = str(g.created_at)\n \n em = Embed(title=\"Membercount\")\n em.description = \"```\\n\" \\\n \"Members: %s (%s)\\n\" \\\n \" Users: %s (%s)\\n\" \\\n \" Bots: %s (%s)\\n\" \\\n \"Created: %s\\n\" \\\n \"```\" % (membs, membs_on, users, users_on, bots, bots_on, created)\n\n await client.send_message(ctx.message.channel, embed=em)\n await client.delete_message(ctx.message)", "def candidate_count(self):\n return self.candidate_set.count()", "def candidate_count(self):\n return self.candidate_set.count()", "def getNumVassals(self, iPlayer):\n\t\tiCounter = 0\n\t\tfor iCiv in range(con.iNumPlayers):\n\t\t\tif iCiv != iPlayer:\n\t\t\t\tif gc.getPlayer(iCiv).isAlive():\n\t\t\t\t\tif gc.getTeam(gc.getPlayer(iCiv).getTeam()).isVassal(iPlayer):\n\t\t\t\t\t\tiCounter += 1\n\t\treturn iCounter", "def num_votes(self):\n return sum(self.votes_per_count)", "async def vouch(self, ctx, user: discord.Member=None):\n\n if user:\n if user.id == self.bot.user.id:\n user = ctx.message.author\n response = \"- thanks for vouching for me, your robot overlord.\"\n await self.bot.say(user.mention + response)\n\n elif user.id == ctx.message.author.id:\n response = \"- you can't vouch for yourself, you silly goose\"\n await self.bot.say(user.mention + response)\n\n else:\n # see if this author has previously vouched for this user.\n for item in self.vouchers:\n if item['VOUCHER'] == ctx.message.author.display_name:\n if item['USER'] == user.display_name:\n response = \" you already vouched for this user\"\n await self.bot.say(ctx.message.author.mention +\n response)\n return\n\n # check if USER has already been vouched, record the new name\n for item in self.vouchers:\n if item['USER'] == user.display_name:\n if not item['VOUCHER'] == \\\n ctx.message.author.display_name:\n # case: we have a USER who has already been vouched\n # vouched for again, by a different discord member\n item['VOUCHER'] = item['VOUCHER'] + \", \" + \\\n ctx.message.author.display_name\n fileIO(\"data/vouchers/vouchers.json\", \"save\",\n self.vouchers)\n await self.bot.say(ctx.message.author.mention +\n \", recorded.\")\n await self.bot.say(user.display_name +\n \" now has multple vouches.\")\n return\n\n # record the vouching\n self.vouchers.append({\"VOUCHER\": ctx.message.author.display_name,\n \"USER\": user.display_name, \"ID\": user.id,\n \"DATE\": str(\"{:%B %d, %Y}\".format(\n datetime.datetime.now()))})\n fileIO(\"data/vouchers/vouchers.json\", \"save\", self.vouchers)\n response = \" - your voucher for \" + user.mention + \\\n \" has been recorded.\"\n await self.bot.say(ctx.message.author.mention + response)\n\n else:\n response = \"Usage: !vouch <user>\"\n await self.bot.say(response)", "def get_members_count(self, *args, **kwargs):\n return self.bot.get_chat_members_count(self.id, *args, **kwargs)" ]
[ "0.6371157", "0.63698155", "0.63018495", "0.60611516", "0.6054053", "0.6027923", "0.6018428", "0.59854454", "0.5949555", "0.59482515", "0.5914123", "0.58427924", "0.57395315", "0.57395315", "0.57389605", "0.5730802", "0.57079923", "0.57007176", "0.56978345", "0.5668029", "0.5659213", "0.56583387", "0.5647654", "0.560794", "0.5596477", "0.5596477", "0.5586592", "0.55668133", "0.5557891", "0.55402887" ]
0.7715844
0
Returns an indented representation of the nested dictionary.
def pretty_repr(self, num_spaces=4): def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return f'FrozenDict({pretty_dict(self._dict)})'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _format_dict(self, dict_, indent=0):\n prefix = indent*\" \"*4\n output = \"{\\n\"\n for key, val in sorted(dict_.items()):\n if isinstance(val, dict):\n rval = self._format_dict(val, indent+1)\n else:\n rval = repr(val)\n output += prefix + \" \"*4 + repr(key) + \" : \" + rval + \",\\n\"\n output += prefix + \"}\"\n return output", "def format_dict(dictionary, depth=0):\n tab = \" \" * 4\n string = \"{\\n\"\n for key, val in dictionary.items():\n string += depth * tab \n string += \"{}: \".format(key)\n if type(val) is dict:\n string += format_dict(val, depth + 1)\n \n else:\n if type(val) is str:\n fmt = \"'{}'\\n\"\n else:\n fmt = \"{}\\n\"\n string += fmt.format(val)\n string += (depth) * tab + '}\\n'\n return string", "def pretty_print(d, indent=0):\n for key, value in d.items():\n print('\\t' * indent + str(key) + \":\")\n if isinstance(value, dict):\n pretty_print(value, indent + 1)\n else:\n print('\\t' * (indent + 1) + str(value))", "def print_dictionary(\n d, nested_level=0, output=sys.stdout, spacing=' ', separator=None,\n):\n if separator:\n print(separator, file=output)\n\n if type(d) == dict:\n print('%s{' % (nested_level * spacing), file=output)\n for k, v in list(d.items()):\n if hasattr(v, '__iter__'):\n print('%s%s:' % ((nested_level + 1) * spacing, k), file=output)\n print_dictionary(v, nested_level + 1, output)\n else:\n print(\n '%s%s: %s' % ((nested_level + 1) * spacing, k, v),\n file=output\n )\n print('%s}' % (nested_level * spacing), file=output)\n elif type(d) == list:\n print('%s[' % (nested_level * spacing), file=output)\n for v in d:\n if hasattr(v, '__iter__'):\n print_dictionary(v, nested_level + 1, output)\n else:\n print('%s%s' % ((nested_level + 1) * spacing, v), file=output)\n print('%s]' % (nested_level * spacing), file=output)\n else:\n print('%s%s' % (nested_level * spacing, d), file=output)", "def prettyPrintDictHelper_ (d, stream, indent, pretty_print=True, indent_additive=4) :\r\n \r\n # Base case, empty table\r\n entries = len(d)\r\n if entries==0 :\r\n stream.write(\"{ }\")\r\n return\r\n\r\n # Recursive case\r\n stream.write(\"{\")\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n ii=0\r\n keys = d.keys()\r\n keys.sort()\r\n for key in keys : # Sorted order on keys\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n stream.write(repr(key)+\":\")\r\n value = d[key]\r\n specialStream_(value, stream, indent, pretty_print, indent_additive)\r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n ii += 1\r\n \r\n if pretty_print : indentOut_(stream, indent) \r\n stream.write(\"}\")", "def pretty(d, indent=0):\n\tret_str = ''\n\tfor key, value in d.items():\n\n\t\tif isinstance(value, collections.Mapping):\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\n'\n\t\t\tret_str = ret_str + pretty(value, indent + 1)\n\t\telse:\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\t' * (indent + 1) + ' => ' + str(value) + '\\n'\n\n\treturn ret_str", "def dict_pretty_print(D: dict, indent_lvl=0):\n print(\"Using 3 decimal places.\")\n base_indent = indent_lvl * \" \"\n indent = (indent_lvl+2)*\" \"\n print(f\"{base_indent}\" + \"{\")\n for key, value in D.items():\n print(f\"{indent}{key}: \", end=\"\")\n if type(value) is dict:\n print(\"\")\n dict_pretty_print(value, indent_lvl + 2)\n else:\n print(f\"{value:.3f}\")\n print(f\"{base_indent}\" + \"}\")", "def pretty_print(dictionary: dict):\n return json.dumps(dictionary, indent=4)", "def print_dict_tree(d, max_depth=None, indent=0):\n def _recurse(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict) and indent != max_depth:\n print(); _recurse(value, indent + 1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))\n \n return _recurse(d)", "def prettyPrint(self):\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n x=pp.pformat(self.__dict__)\n print x\n return", "def prettyPrintODictHelper_ (d, stream, indent, pretty_print=True, indent_additive=4) :\r\n global OTabRepr\r\n # Base case, empty table\r\n entries = len(d)\r\n if entries==0 :\r\n stream.write(OTabEmpty[OTabRepr]) # \"o{ }\"\r\n return\r\n\r\n # Recursive case\r\n stream.write(OTabLeft[OTabRepr]) # \"o{\"\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n ii=0\r\n keys = d.keys()\r\n for key in keys : # Insertion order on keys\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n if OTabRepr == 0 :\r\n stream.write(\"(\"+repr(key)+\", \")\r\n else :\r\n stream.write(repr(key)+\":\")\r\n value = d[key]\r\n specialStream_(value, stream, indent, pretty_print, indent_additive)\r\n if OTabRepr == 0 :\r\n stream.write(\")\")\r\n \r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n ii += 1\r\n \r\n if pretty_print : indentOut_(stream, indent) \r\n stream.write(OTabRight[OTabRepr]) # \"}\"\r", "def nice_dict_format(d):\n return ''.join([key+\": \"+str(d[key])+\"\\n\" for key in list(d.keys())])", "def format_dictionary(dct, indent=4):\n return json.dumps(dct, indent=indent, sort_keys=True)", "def print_json_tree(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict):\n print(); print_json_tree(value, indent+1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))", "def pprint(self):\n import json\n return json.dumps(OrderedDict(self.items()), indent=4)", "def pretty_dict(d):\n return '{%s}' % ', '.join('%r: %r' % (k, v)\n for k, v in sorted(d.items(), key=repr))", "def _pretty_print(self, json_dict):\n if self.prettyprint:\n return \"\\n\" + json.dumps(json_dict, indent=self.indent)\n return json.dumps(json_dict)", "def tree_view(dictionary, level=0, sep=\"| \"):\n return \"\".join([\"{0}{1}\\n{2}\".format(sep * level, k,\n tree_view(v, level + 1, sep=sep) if isinstance(v, dict)\n else \"\") for k, v in dictionary.items()])", "def recursive_dict_key_print(dict_in, spacer=\"\"):\n if type(dict_in) is not dict:\n return\n next_spacer = spacer + \" \"\n for key, value in dict_in.items():\n try:\n print(spacer, f\"{key} : {value.shape}\")\n except(AttributeError):\n print(spacer, key)\n recursive_dict_key_print(value, next_spacer)", "def prettify(tree, indent=0):\n for key, value in six.iteritems(tree):\n if key == FILE_MARKER:\n if value:\n print((' ' * indent + str(value)))\n else:\n print((' ' * indent + str(key)))\n if isinstance(value, dict):\n prettify(value, indent+1)\n else:\n print((' ' * (indent+1) + str(value)))", "def json_pretty_print(dictionary):\n return json.dumps(dictionary, sort_keys=True,\n indent=2, separators=(',', ': '))", "def __repr__(self, indent=2):\n return pprint.pformat(self.to_dict(), indent=indent)", "def pretty(d, indent=0):\n sp = \" \"\n t = \"\"\n \n if isinstance(d, dict):\n l = len(d)\n c = 0\n t += \"<type 'dict'>:{\\n\"\n for key, value in d.items():\n t += sp * (indent + 1) + \"'\" + str(key) + \"':\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \"}\"\n elif isinstance(d, list):\n l = len(d)\n c = 0\n t += \"<type 'list'>:[\\n\"\n for value in d:\n t += sp * (indent + 1) + str(c) + \":\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \"]\"\n elif isinstance(d, tuple):\n l = len(d)\n c = 0\n t += \"<type 'tuple'>:(\\n\"\n for value in d:\n t += sp * (indent + 1) + str(c) + \":\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \")\"\n else:\n t += str(type(d)) + \":'\" + str(d) + \"'\"\n \n return t", "def _pretty_print(value, indent=''):\n keys = list(value.keys())\n keys.sort()\n for k in keys:\n v = value[k]\n if type(v) == dict:\n print(\"%s%s:\"%(indent, k))\n _pretty_print(v, indent+' ')\n elif type(v) == str:\n if '\\n' in v:\n print(indent+'%s: |'%k)\n for l in v.split('\\n'):\n print(indent+' '+l)\n else:\n print(\"%s%s: %s\"%(indent, k, v))\n else:\n dump = yaml.dump(v)\n # #1617\n # newer versions of python-yaml append the '...' document end\n # syntax. as YAML functions fine w/o it, and as it is\n # confusing to users who are just getting a single scalar, we\n # strip it\n if dump.endswith('\\n...\\n'):\n dump = dump[:-4]\n \n sys.stdout.write(\"%s%s: %s\"%(indent, k, dump))", "def ppdict(d):\n print '{'\n keys=d.keys()\n keys.sort()\n for k in keys:\n spacing=\" \" * (16-(len(repr(k))+1))\n print \"%s:%s%s,\" % (repr(k),spacing,repr(d[k]))\n print '}'", "def dumps(self, indent=1):\n str_keys_dict = OrderedDict({str(k): v for k, v in self.items()})\n for k, v in str_keys_dict.items():\n if isinstance(v, dict):\n str_keys_dict[k] = OrderedDict({str(k1): v1 for k1, v1 in v.items()})\n for k1, v1 in str_keys_dict[k].items():\n if isinstance(v1, dict):\n str_keys_dict[k][k1] = OrderedDict({str(k2): v2 for k2, v2 in v1.items()})\n return json.dumps(str_keys_dict, indent=indent)", "def print_recursive(value, indent=0):\n tabs = lambda count: '' + str(' ' * (indent + count))\n if isinstance(value, dict):\n to_print = '{}{}'.format(tabs(1), '{')\n for key, item in value.iteritems():\n to_print += '\\n{}{}:\\n{}'.format(tabs(2), key, print_recursive(item, indent + 2))\n return to_print + '{}{}'.format('\\n' + tabs(1) if len(value) > 0 else ' ', '}')\n if isinstance(value, list):\n to_print = '{}['.format(tabs(1))\n for item in value:\n to_print += '\\n' + print_recursive(item, indent + 1)\n return to_print + '{}{}'.format('\\n' + tabs(1) if len(value) > 0 else ' ', ']')\n if isinstance(value, str) or isinstance(value, unicode):\n return tabs(1) + '\\'' + value + '\\''\n if len(str(value)) > 0:\n return tabs(1) + str(value) + ''\n return ''", "def _walk(self, d, depth=0):\n\n output = ''\n indent = 3\n header_width = 35 - depth*indent\n\n for k, v in sorted(d.items(), key=lambda x: x[0]):\n if isinstance(v, dict):\n output += \"\".ljust(depth * indent)+k+'\\n'\n output += self._walk(v, depth + 1)\n else:\n if isinstance(v, np.ndarray):\n # np array or matrix\n shape = v.shape\n if len(shape) == 1:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"array (%d)\" % (v.shape[0]) + '\\n'\n\n elif len(shape) == 2:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"matrix (%d,%d)\" % (v.shape[0], v.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], str):\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : \" + str(item) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], np.ndarray):\n # List of arrays\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n if len(item.shape) == 1:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : array (%d)\" % (item.shape[0]) + '\\n'\n\n elif len(item.shape) == 2:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : matrix (%d,%d)\" % (item.shape[0], item.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], dict):\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent) + \"[\"+str(item_id)+\"]\" + '\\n'\n output += self._walk(item, depth + 2)\n\n else:\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : \" + str(v) + '\\n'\n\n return output", "def _pretty_json_dump(d):\n return json.dumps(d, sort_keys=True, indent=3)", "def pretty_repr(x: Any, num_spaces: int = 4) -> str:\n\n if isinstance(x, FrozenDict):\n return x.pretty_repr()\n else:\n\n def pretty_dict(x):\n if not isinstance(x, dict):\n return repr(x)\n rep = ''\n for key, val in x.items():\n rep += f'{key}: {pretty_dict(val)},\\n'\n if rep:\n return '{\\n' + _indent(rep, num_spaces) + '}'\n else:\n return '{}'\n\n return pretty_dict(x)" ]
[ "0.73564094", "0.7016583", "0.7004065", "0.69742304", "0.69219863", "0.6862406", "0.68234503", "0.6813462", "0.6663069", "0.6650337", "0.66487944", "0.6608814", "0.65994126", "0.65836185", "0.6566666", "0.6555802", "0.6501829", "0.6487838", "0.6477041", "0.6438375", "0.6435267", "0.6265773", "0.62417954", "0.62247425", "0.619704", "0.61966306", "0.6168665", "0.61645025", "0.6160097", "0.6127049" ]
0.70187014
1
Create a new FrozenDict with additional or replaced entries.
def copy( self, add_or_replace: Mapping[K, V] = MappingProxyType({}) ) -> 'FrozenDict[K, V]': return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}", "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def new_dict(key, value, n_keys=0):\n # With JIT disabled, ignore all arguments and return a Python dict.\n return dict()", "def __copy__(self):\n d = dict()\n d.update(self.items())\n return d", "def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))", "def impl_new_dict(key, value, n_keys=0):\n if any([\n not isinstance(key, Type),\n not isinstance(value, Type),\n ]):\n raise TypeError(\"expecting *key* and *value* to be a numba Type\")\n\n keyty, valty = key, value\n\n def imp(key, value, n_keys=0):\n if n_keys < 0:\n raise RuntimeError(\"expecting *n_keys* to be >= 0\")\n dp = _dict_new_sized(n_keys, keyty, valty)\n _dict_set_method_table(dp, keyty, valty)\n d = _make_dict(keyty, valty, dp)\n return d\n\n return imp", "def _mask_dict(self, value):\n\n return MaskedDict(value)", "def copy(self):\n return AttrDict(dict(self).copy())", "def make_globals(\n self, d: t.Optional[t.MutableMapping[str, t.Any]]\n ) -> t.MutableMapping[str, t.Any]:\n if d is None:\n d = {}\n\n return ChainMap(d, self.globals)", "def copy(self) -> AF:\n if self._base == OrderedDict:\n kopied = dict(self)\n else:\n kopied = self._base.copy(self)\n return self.__class__(kopied, use_fuzzy=self.use_fuzzy, dottable=self._dottable)", "def copy(self):\n import copy\n MultiDict.__setitem__ = dict.__setitem__\n cp = copy.deepcopy(self)\n MultiDict.__setitem__ = MultiDict._setitem_list\n return cp", "def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict", "def copy(self):\n return pdict(dict.copy(self))", "def freeze(value):\n if isinstance(value, list):\n value = tuple(freeze(e) for e in value)\n elif isinstance(value, set):\n value = frozenset(freeze(e) for e in value)\n elif isinstance(value, dict):\n value = frozendict({k: freeze(v) for k, v in value.items()})\n elif isinstance(value, tuple):\n value = tuple(freeze(e) for e in value)\n else:\n pass\n return value", "def fillDict(valDict, nowDate=datetime.now()):\n copyDict = copy.deepcopy(valDict)\n copyDict[names.year] = nowDate.year\n copyDict[names.month] = nowDate.month\n copyDict[names.day] = nowDate.day\n return copyDict", "def memodict(f):\r\n class memodict(defaultdict):\r\n def __missing__(self, key):\r\n ret = self[key] = f(key)\r\n return ret\r\n return memodict().__getitem__", "def unfreeze(cls, frozen):\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)", "def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result", "def copy(self):\r\n new = WeakKeyIdentityDict()\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n new[o] = value\r\n return new", "def extend(d, k, v):\n\tn = d.copy()\n\tn[k] = v\n\treturn n", "def test_dict_merge_immutable():\n x1 = {'one': 1, 'two': 2}\n x1_cop = x1.copy()\n ir.dict_merge(x1, {'three': 3, 'two': None})\n assert x1 == x1_cop\n ir.dict_merge({'ten': 10, 'one': '1'}, x1)\n assert x1 == x1_cop", "def _update_loose (self, dict):\n self.__dict__.update(dict)", "def __post_init__(self) -> None:\n setattr(self, _FROZEN, True)", "def frozenset(self) -> frozenset:\n return frozenset(self)", "def update(self, *args, **kwargs):\n super(ReadOnlyDict, self).update(*args, **kwargs) # pragma: no cover" ]
[ "0.7146128", "0.68393993", "0.65283275", "0.6507282", "0.5816068", "0.5484915", "0.5304193", "0.5286141", "0.5152492", "0.5124631", "0.5105132", "0.50970227", "0.5096764", "0.5051437", "0.5042281", "0.49775112", "0.49529138", "0.4934736", "0.49311805", "0.48632023", "0.48579502", "0.48420447", "0.48328725", "0.4832583", "0.48117724", "0.48107445", "0.4801841", "0.477287", "0.47656885", "0.4764035" ]
0.70710015
1
Deep copy unfrozen dicts to make the dictionary FrozenDict safe.
def _prepare_freeze(xs: Any) -> Any: if isinstance(xs, FrozenDict): # we can safely ref share the internal state of a FrozenDict # because it is immutable. return xs._dict # pylint: disable=protected-access if not isinstance(xs, dict): # return a leaf as is. return xs # recursively copy dictionary to avoid ref sharing return {key: _prepare_freeze(val) for key, val in xs.items()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x", "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def _app_cache_deepcopy(obj):\n if isinstance(obj, dict):\n return dict((_app_cache_deepcopy(key), _app_cache_deepcopy(val))\n for key, val in obj.items())\n elif isinstance(obj, list):\n return list(_app_cache_deepcopy(val) for val in obj)\n elif isinstance(obj, SortedDict):\n return deepcopy(obj)\n return obj", "def deepish_copy(org):\n out = dict().fromkeys(org)\n for k,v in org.items():\n try:\n out[k] = v.copy() # dicts, sets\n except AttributeError:\n try:\n out[k] = v[:] # lists, tuples, strings, unicode\n except TypeError:\n out[k] = v # ints\n \n return out", "def dict2frozenset(d):\n return frozenset(d.items())", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def deepcopy(obj):\n if isinstance(obj, dict):\n return {deepcopy(key): deepcopy(value) for key, value in obj.items()}\n if hasattr(obj, '__iter__'):\n return type(obj)(deepcopy(item) for item in obj)\n return obj", "def copy(self):\n import copy\n MultiDict.__setitem__ = dict.__setitem__\n cp = copy.deepcopy(self)\n MultiDict.__setitem__ = MultiDict._setitem_list\n return cp", "def unfreeze(obj, ignore_types=[]):\n if obj is None:\n return obj\n\n to_process = [obj]\n while len(to_process) > 0:\n _obj = to_process.pop()\n\n for attr in dir(_obj):\n if attr.startswith(\"__\"):\n continue\n value = getattr(_obj, attr)\n if isinstance(value, FrozenDict):\n value = {k: v for k, v in value.items()}\n to_process.extend(value.values())\n elif isinstance(value, FrozenList):\n value = [x for x in value]\n to_process.extend(value)\n elif not callable(value) and not isinstance(value, tuple(ignore_types)):\n to_process.append(value)\n\n try:\n setattr(_obj, attr, value)\n except BaseException:\n pass\n\n return obj", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))", "def copy(self):\n return pdict(dict.copy(self))", "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def __copy__(self):\n d = dict()\n d.update(self.items())\n return d", "def copy_forward_mapping(self) -> Dict[str, Set[str]]:\n return deepcopy(self._forward_mapping)", "def freeze(value):\n if isinstance(value, list):\n value = tuple(freeze(e) for e in value)\n elif isinstance(value, set):\n value = frozenset(freeze(e) for e in value)\n elif isinstance(value, dict):\n value = frozendict({k: freeze(v) for k, v in value.items()})\n elif isinstance(value, tuple):\n value = tuple(freeze(e) for e in value)\n else:\n pass\n return value", "def __deepcopy__(self, memo):\n from copy import deepcopy\n return self.__class__(deepcopy(self.items(), memo), self.strict)", "def args_frozen(self):\n return {k: v for k, v in self.args.items() if k not in self._traversable}", "def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result", "def copy_dict(source_dict, diffs):\n result = dict(source_dict)\n result.update(diffs)\n return result", "def copy(self):\n return AttrDict(dict(self).copy())", "def deep_copy(old_dict, parent=None, depth=None, main=None):\n\n # Is this a copy starting from the top level?\n if isinstance(old_dict, configobj.ConfigObj):\n new_dict = configobj.ConfigObj('',\n encoding=old_dict.encoding,\n default_encoding=old_dict.default_encoding,\n interpolation=old_dict.interpolation)\n else:\n # No. It's a copy of something deeper down. If no parent or main is given, then\n # adopt the parent and main of the incoming dictionary.\n new_dict = configobj.Section(parent if parent is not None else old_dict.parent,\n depth if depth is not None else old_dict.depth,\n main if main is not None else old_dict.main)\n for entry in old_dict:\n # Avoid interpolation by using the version of __getitem__ from dict\n old_value = dict.__getitem__(old_dict, entry)\n if isinstance(old_value, configobj.Section):\n new_value = deep_copy(old_value, new_dict, new_dict.depth+1, new_dict.main)\n elif isinstance(old_value, list):\n # Make a copy\n new_value = list(old_value)\n elif isinstance(old_value, tuple):\n # Make a copy\n new_value = tuple(old_value)\n else:\n # It's a scalar\n new_value = old_value\n new_dict[entry] = new_value\n return new_dict", "def flatten(self):\n flat = {}\n for d in self.dicts:\n flat.update(d)\n return flat", "def get_pure_data_copy(self):\n import copy\n data=copy.copy(self)\n data.xp = data.xp.get_pure_data_copy()\n data.timetable = data.timetable.get_pure_data_copy() \n return data", "def copy(self):\r\n new = WeakKeyIdentityDict()\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n new[o] = value\r\n return new", "def copy(self) -> AF:\n if self._base == OrderedDict:\n kopied = dict(self)\n else:\n kopied = self._base.copy(self)\n return self.__class__(kopied, use_fuzzy=self.use_fuzzy, dottable=self._dottable)", "def copy_obs_dict(obs):\n return {k: np.copy(v) for k, v in obs.items()}", "def deepupdate(self, other, copy=False):\n for k in other:\n if isinstance(other[k], self.__class__):\n if not k in self:\n self[k] = self.__class__()\n elif isinstance(self[k], self.__class__):\n pass\n elif isinstance(self[k], dict):\n self[k] = self.__class__(self[k]).rconvert()\n else:\n self[k] = self.__class__()\n self[k].deepupdate(other[k])\n else:\n if copy: self[k] = copymod.deepcopy(other[k])\n else: self[k] = other[k]\n return self" ]
[ "0.71733785", "0.7037678", "0.65864396", "0.6474158", "0.62573695", "0.6152454", "0.61428803", "0.6087053", "0.6052837", "0.60174745", "0.5997487", "0.59899086", "0.5918377", "0.5899443", "0.5898595", "0.5892593", "0.5735094", "0.572419", "0.5684948", "0.56120795", "0.5609732", "0.55691975", "0.5562032", "0.555424", "0.5553709", "0.55536914", "0.55452317", "0.5534845", "0.5526414", "0.5512912" ]
0.7742294
0
Unfreeze a FrozenDict. Makes a mutable copy of a `FrozenDict` mutable by transforming it into (nested) dict.
def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]: if isinstance(x, FrozenDict): # deep copy internal state of a FrozenDict # the dict branch would also work here but # it is much less performant because jax.tree_util.tree_map # uses an optimized C implementation. return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore elif isinstance(x, dict): ys = {} for key, value in x.items(): ys[key] = unfreeze(value) return ys else: return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}", "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def unfreeze(cls, frozen):\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)", "def unfreeze(obj, ignore_types=[]):\n if obj is None:\n return obj\n\n to_process = [obj]\n while len(to_process) > 0:\n _obj = to_process.pop()\n\n for attr in dir(_obj):\n if attr.startswith(\"__\"):\n continue\n value = getattr(_obj, attr)\n if isinstance(value, FrozenDict):\n value = {k: v for k, v in value.items()}\n to_process.extend(value.values())\n elif isinstance(value, FrozenList):\n value = [x for x in value]\n to_process.extend(value)\n elif not callable(value) and not isinstance(value, tuple(ignore_types)):\n to_process.append(value)\n\n try:\n setattr(_obj, attr, value)\n except BaseException:\n pass\n\n return obj", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def pop(\n x: Union[FrozenDict, Dict[str, Any]], key: str\n) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]:\n\n if isinstance(x, FrozenDict):\n return x.pop(key)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n value = new_dict.pop(key)\n return new_dict, value\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def flat_dict(d):\n nd = {}\n for (key, value) in d.items():\n nd[key] = value.pop()\n\n return nd", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def dict2frozenset(d):\n return frozenset(d.items())", "def freeze(value):\n if isinstance(value, list):\n value = tuple(freeze(e) for e in value)\n elif isinstance(value, set):\n value = frozenset(freeze(e) for e in value)\n elif isinstance(value, dict):\n value = frozendict({k: freeze(v) for k, v in value.items()})\n elif isinstance(value, tuple):\n value = tuple(freeze(e) for e in value)\n else:\n pass\n return value", "def fl_unfreeze_form(ptr_flform):\n _fl_unfreeze_form = library.cfuncproto(\n library.load_so_libforms(), \"fl_unfreeze_form\", \\\n None, [cty.POINTER(xfdata.FL_FORM)], \\\n \"\"\"void fl_unfreeze_form(FL_FORM * form) \"\"\")\n library.check_if_flinitialized()\n library.verify_flformptr_type(ptr_flform)\n library.keep_elem_refs(ptr_flform)\n _fl_unfreeze_form(ptr_flform)", "def unflatten_dict(flat):\n unflattened = dict()\n\n for key, value in sorted(flat.items(), key=_key_order):\n if '__' in key:\n key, subkey = key.split('__', 1)\n subkey, name = subkey.rsplit('__', 1)\n\n if name.isdigit():\n column_index = int(name)\n row_index = int(subkey)\n\n array = unflattened.setdefault(key, list())\n\n if len(array) == row_index:\n row = list()\n array.append(row)\n elif len(array) == row_index + 1:\n row = array[row_index]\n else:\n # This should never happen\n raise ValueError('There was an error unflattening the extension.')\n\n if len(row) == column_index:\n row.append(value)\n else:\n # This should never happen\n raise ValueError('There was an error unflattening the extension.')\n\n else:\n subdict = unflattened.setdefault(key, dict())\n if subkey.isdigit():\n subkey = int(subkey)\n\n inner = subdict.setdefault(subkey, dict())\n inner[name] = value\n\n else:\n unflattened[key] = value\n\n return unflattened", "def _unparse_dict(d, strategies=None):\n\n def _unparse_val(val):\n for instance_type, func in strategies:\n if isinstance(val, instance_type):\n return func(val)\n else:\n return val\n\n strategies = strategies or []\n out = dict()\n for k, v in d.items():\n if isinstance(v, dict):\n v = _unparse_dict(v, strategies=strategies)\n elif isinstance(v, list):\n v = [_unparse_val(val) for val in v]\n elif isinstance(v, tuple):\n v = tuple(_unparse_val(val) for val in v)\n else:\n v = _unparse_val(v)\n out[k] = v\n return out", "def invert_dict(d):\r\n if isinstance(d, dict):\r\n temp = d\r\n else:\r\n temp = dict(d)\r\n result = {}\r\n for key, val in temp.iteritems():\r\n if val not in result:\r\n result[val] = []\r\n result[val].append(key)\r\n return result", "def clean_dict(d):\n if not isinstance(d, dict):\n return d\n return dict((clean_dict(k), v) for k, v in d.items() if k is not 'dates')", "def CleanUpDict(dct):\n SanityCheck.ValidateTypes(((dct, dict),))\n\n new_dct = {}\n for key in dct:\n if dct[key]:\n new_dct[key] = dct[key]\n\n return new_dct", "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def deep_update(d, u):\n for k, v in six.iteritems(u):\n dv = d.get(k, {})\n if not isinstance(dv, collections.abc.Mapping):\n d[k] = v\n elif isinstance(v, collections.abc.Mapping):\n d[k] = deep_update(dv, v)\n else:\n d[k] = v\n return d", "def revert_dictionary(dictionary):\n return {v: k for k, v in dictionary.items()}", "def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:\n value = self[key]\n new_dict = dict(self._dict)\n new_dict.pop(key)\n new_self = type(self)(new_dict)\n return new_self, value", "def fl_unfreeze_all_forms():\n _fl_unfreeze_all_forms = library.cfuncproto(\n library.load_so_libforms(), \"fl_unfreeze_all_forms\", \\\n None, [], \\\n \"\"\"void fl_unfreeze_all_forms() \"\"\")\n library.check_if_flinitialized()\n _fl_unfreeze_all_forms()", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n dict2 = copy.deepcopy(data)\n for item in dict2:\n for key in redundant_keys:\n item.pop(key)\n return dict2", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def unflatten(\n d: Dict[str, Any],\n base: Dict[str, Any] = None,\n) -> Dict[str, Any]:\n if base is None:\n base = {}\n\n for key, value in d.items():\n root = base\n\n ###\n # If a dotted path is encountered, create nested dicts for all but\n # the last level, then change root to that last level, and key to\n # the final key in the path. This allows one final setitem at the bottom\n # of the loop.\n if '.' in key:\n *parts, key = key.split('.')\n\n for part in parts:\n root.setdefault(part, {})\n root = root[part]\n\n if isinstance(value, dict):\n value = unflatten(value, root.get(key, {}))\n\n root[key] = value\n\n return base", "def rconvert(self):\n for k in self:\n if isinstance(self[k], dict):\n if not isinstance(self[k], AttrDict):\n self[k] = AttrDict(self[k])\n self[k].rconvert()\n return self", "def InvertDict(dict_in):\n return dict(zip(dict_in.values(), dict_in.keys()))", "def detachDict(dict, key1, key2):\n\n for key in dict.keys():\n if key == key1 or key == key2:\n del dict[key]\n else:\n for subkey in dict[key].keys():\n if subkey == key1 or subkey == key2:\n del dict[key][subkey]", "def _rev_dict(d):\n return {v: k for k, v in d.items()}", "def deepcopy(obj):\n if isinstance(obj, dict):\n return {deepcopy(key): deepcopy(value) for key, value in obj.items()}\n if hasattr(obj, '__iter__'):\n return type(obj)(deepcopy(item) for item in obj)\n return obj", "def clean_dict(to_clean):\n for k in list(to_clean.keys()):\n if not to_clean.get(k):\n to_clean.pop(k)" ]
[ "0.678866", "0.63871247", "0.63075334", "0.6243246", "0.61387753", "0.61324066", "0.5833921", "0.56678116", "0.5610028", "0.5581237", "0.547987", "0.528664", "0.5166779", "0.51488274", "0.5131935", "0.513004", "0.5127286", "0.51143354", "0.5108335", "0.5104038", "0.50658894", "0.5040167", "0.50181496", "0.4999344", "0.49814385", "0.49768424", "0.49741423", "0.4973419", "0.49233168", "0.491484" ]
0.80311483
0
Create a new dict with additional and/or replaced entries. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.copy`.
def copy( x: Union[FrozenDict, Dict[str, Any]], add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict( {} ), ) -> Union[FrozenDict, Dict[str, Any]]: if isinstance(x, FrozenDict): return x.copy(add_or_replace) elif isinstance(x, dict): new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x new_dict.update(add_or_replace) return new_dict raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}", "def extend_dict(source_dict, diff=None, deep=False):\n if deep:\n new_dict = deepcopy(source_dict)\n else:\n new_dict = copy(source_dict)\n\n if diff:\n new_dict.update(diff)\n return new_dict", "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def updated_with(orig_dict, *new_values):\n newdict = dict(orig_dict)\n for vals in new_values:\n if vals:\n newdict.update(vals)\n return newdict", "def dict_merge(base, upd, inplace=False):\n assert quacks_like_dict(base), quacks_like_dict(upd)\n dst = base if inplace else deepcopy(base)\n\n stack = [(dst, upd)]\n while stack:\n current_dst, current_src = stack.pop()\n for key in current_src:\n if key not in current_dst:\n current_dst[key] = current_src[key]\n else:\n if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :\n stack.append((current_dst[key], current_src[key]))\n else:\n current_dst[key] = current_src[key]\n return dst", "def override_dict_values(d1, d2):\n new = d1.copy()\n for k, v in d2.items():\n if isinstance(v, dict):\n new[k] = override_dict_values(new[k], d2[k])\n else:\n new[k] = v\n\n return new", "def add_to_dict(from_dict, to_dict):\n for k, v in list(from_dict.items()):\n if hasattr(v, 'copy') and callable(getattr(v, 'copy')):\n to_dict[k] = v.copy()\n else:\n to_dict[k] = v", "def copy_dict(source_dict, diffs):\n result = dict(source_dict)\n result.update(diffs)\n return result", "def merge_dict(d: dict, overwrite=False, inplace=False, **kwargs):\n nd = dict([(k, v) for k, v in d.items()] + [(k, v) for k, v in kwargs.items() if overwrite or k not in d])\n if inplace:\n d.update(nd)\n return d\n return nd", "def test_dict_merge_immutable():\n x1 = {'one': 1, 'two': 2}\n x1_cop = x1.copy()\n ir.dict_merge(x1, {'three': 3, 'two': None})\n assert x1 == x1_cop\n ir.dict_merge({'ten': 10, 'one': '1'}, x1)\n assert x1 == x1_cop", "def extend(primary: Mapping, *others: Mapping, in_place=False):\n others = flatten(others)\n if not in_place:\n primary = dict(primary or {})\n for other in others:\n if other is None:\n continue\n for key, value in other.items():\n primary[key] = value\n return primary", "def __copy__(self):\n d = dict()\n d.update(self.items())\n return d", "def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))", "def overwrite_dict(dict_base, dict_new, base_path=None):\n assert isinstance(dict_new, dict)\n for k in dict_new:\n # Add the current key to the path\n k_path = str(k) if base_path is None else f'{base_path}.{str(k)}'\n # Make sure that the key in the new dictionary matches one from the base dictionary\n assert k in dict_base, f'Could not find path {k_path} in the base dictionary'\n # Check that the types match between the base dictionary entry and the new one\n if dict_base[k] is not None:\n assert isinstance(type(dict_base[k]), type(dict_new[k])), \\\n 'The types at {} in the base dictionary do not match (expected {}, got {})'.format(\n k_path, str(type(dict_base[k])), str(type(dict_new[k])))\n # Recursively replace dictionary entries\n if isinstance(dict_base[k], dict):\n overwrite_dict(dict_base[k], dict_new[k], k_path)\n else:\n # Simply copy over leaf entries\n dict_base[k] = dict_new[k]", "def deepish_copy(org):\n out = dict().fromkeys(org)\n for k,v in org.items():\n try:\n out[k] = v.copy() # dicts, sets\n except AttributeError:\n try:\n out[k] = v[:] # lists, tuples, strings, unicode\n except TypeError:\n out[k] = v # ints\n \n return out", "def get_added_dicts(a, b):\n tmp = copy.deepcopy(a)\n for key, val in b.iteritems():\n if key not in tmp:\n tmp[key] = val\n return tmp", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def update_dict(original_dict, new_dict):\n if new_dict == None: return original_dict\n for k in new_dict:\n if k not in original_dict:\n original_dict[k] = []\n original_dict[k].append(new_dict[k])\n else: original_dict[k].append(new_dict[k])\n return original_dict", "def mergedict(x, y):\n z = x.copy()\n z.update(y)\n return z", "def test_merge_overwrite_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)", "def copy_obs_dict(obs):\n return {k: np.copy(v) for k, v in obs.items()}", "def _merge(old_dict, new_dict):\n dict3 = old_dict.copy()\n for k, v in new_dict.items():\n if k in dict3:\n dict3[k].append(v)\n else:\n dict3[k] = [v]\n return dict3", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x", "def copy(self):\n return pdict(dict.copy(self))", "def update_dict(new,old):", "def add_to_dict(source_dict=None, working_dict=None, new_key=None, new_dict=None):\n if source_dict is None or working_dict is None or new_key is None or new_dict is None:\n raise RuntimeError(\"Invalid arguments passed, one of is == None.\")\n\n if working_dict[new_key] is None:\n working_dict[new_key] = new_dict\n else:\n working_dict[new_key].update(new_dict)\n\n return source_dict.update(working_dict)", "def safe_update(dict_to, dict_from):\n for key, val in dict(dict_from).iteritems():\n if key in dict_to:\n raise KeyError(key)\n dict_to[key] = val\n return dict_to", "def merge_dict(own: dict, other: dict) -> dict:\n for element in other:\n if own.get(element, None) is None:\n own[element] = other[element]\n else:\n raise ValueError('Conflicting kwargs')\n return own" ]
[ "0.7882248", "0.69423527", "0.6703827", "0.66988987", "0.65337425", "0.6381466", "0.6357913", "0.63407815", "0.63111323", "0.6289166", "0.6279252", "0.626923", "0.62294686", "0.61992794", "0.6063225", "0.60579437", "0.6037078", "0.6023079", "0.5997253", "0.59942675", "0.5949621", "0.5936017", "0.5930166", "0.5910017", "0.5903298", "0.59001744", "0.5896106", "0.5849969", "0.5841193", "0.58295816" ]
0.8017656
0
Create a new dict where one entry is removed. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.pop`.
def pop( x: Union[FrozenDict, Dict[str, Any]], key: str ) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]: if isinstance(x, FrozenDict): return x.pop(key) elif isinstance(x, dict): new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x value = new_dict.pop(key) return new_dict, value raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:\n value = self[key]\n new_dict = dict(self._dict)\n new_dict.pop(key)\n new_self = type(self)(new_dict)\n return new_self, value", "def dict_pop(d, key):\n return d.pop(key)", "def remove_element( self, dictionary, key):\n\n _dict = dictionary.copy()\n _dict.pop(key, None)\n return _dict", "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x", "def pop(self, key, *args):\n return super(ReadOnlyDict, self).pop(key, *args) # pragma: no cover", "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def clear_dict(d: dict) -> dict:\n # TODO delete if not used\n return {k: v for k, v in d.items() if v is not None}", "def popitem(self):\n return super(ReadOnlyDict, self).popitem()", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n dict2 = copy.deepcopy(data)\n for item in dict2:\n for key in redundant_keys:\n item.pop(key)\n return dict2", "def remove_keys(_dict, keys):\n if not _dict:\n return None\n new = dict(_dict)\n for key in keys:\n new.pop(key, None)\n return new", "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def pop(self, key, d=None):\n if self._can_del(key):\n r = dict.pop(self, key, d)\n self._post_del(key)\n return r\n else:\n raise Exception('Cannot `pop`, deletion of key \"{}\" failed.'.format(key))", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def _dpop(dictionary, key, default=None):\n try:\n ret = dictionary[key]\n del dictionary[key]\n except KeyError:\n ret = default\n\n return ret", "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}", "def __delitem__(self, key):\n if not self._set:\n raise TypeError('This dict is read-only')\n return self._set(key, None)", "def cut(d, k):\n\tif isinstance(d, dict):\n\t\tn = d.copy()\n\t\tif k in n:\n\t\t\tdel n[k]\n\t\treturn n\n\treturn [v for v in d if v != k]", "def detachDict(dict, key1, key2):\n\n for key in dict.keys():\n if key == key1 or key == key2:\n del dict[key]\n else:\n for subkey in dict[key].keys():\n if subkey == key1 or subkey == key2:\n del dict[key][subkey]", "def CleanUpDict(dct):\n SanityCheck.ValidateTypes(((dct, dict),))\n\n new_dct = {}\n for key in dct:\n if dct[key]:\n new_dct[key] = dct[key]\n\n return new_dct", "def _remove_keys(results: dict, remove: list) -> dict:\n removed = {}\n for key, val in results.items():\n if key not in remove:\n removed[key] = val\n return removed", "def flat_dict(d):\n nd = {}\n for (key, value) in d.items():\n nd[key] = value.pop()\n\n return nd", "def discard(m: MutableMapping[KT, VT], key: KT) -> None:\n try:\n del m[key]\n except KeyError:\n pass", "def remove_keys(d, keys):\n pp = deepcopy(d)\n if isinstance(keys, (list, tuple)):\n for k in keys:\n pp.pop(k, None)\n else:\n pp.pop(keys, None)\n return pp", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def remove_from_multidict(d: MultiDict, key: str, item: typing.Any):\n # works by popping all, removing, then re-adding into\n i = d.popall(key, [])\n if item in i:\n i.remove(item)\n\n for n in i:\n d.add(key, n)\n\n return d", "def removeDic(dic, key):\n pass", "def unfreeze(cls, frozen):\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)", "def clean_dict(to_clean):\n for k in list(to_clean.keys()):\n if not to_clean.get(k):\n to_clean.pop(k)", "def remove(enforcer_dict, key):\n del enforcer_dict['f']\n assert other.keystring == 'abcde'\n assert other.valuesum == 15\n\n enforcer_dict['a'] = 2\n assert other.keystring == 'bcdea'\n assert other.valuesum == 16\n\n enforcer_dict.clear()\n assert other.keystring == ''\n assert other.valuesum == 0", "def delete_dict_entries(dictionary, entries):\n\n for key in entries:\n if key in dictionary:\n del dictionary[key]\n\n return dictionary\n # parameters = {key: parameters[key] for key in parameters if key not in del_parameter}" ]
[ "0.70482343", "0.67293966", "0.6490357", "0.6465873", "0.61706996", "0.6144822", "0.6001672", "0.59998095", "0.5967065", "0.59639794", "0.5955744", "0.59308225", "0.588004", "0.58719283", "0.58055025", "0.57446265", "0.57119524", "0.56699175", "0.5650597", "0.56066054", "0.55868727", "0.55837643", "0.5558249", "0.5529116", "0.55180746", "0.55026454", "0.5492859", "0.5492513", "0.54797816", "0.5472006" ]
0.7112404
0
Returns an indented representation of the nested dictionary. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.pretty_repr`. If x is any other dtype, this function will return `repr(x)`.
def pretty_repr(x: Any, num_spaces: int = 4) -> str: if isinstance(x, FrozenDict): return x.pretty_repr() else: def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return pretty_dict(x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pretty_repr(self, num_spaces=4):\n\n def pretty_dict(x):\n if not isinstance(x, dict):\n return repr(x)\n rep = ''\n for key, val in x.items():\n rep += f'{key}: {pretty_dict(val)},\\n'\n if rep:\n return '{\\n' + _indent(rep, num_spaces) + '}'\n else:\n return '{}'\n\n return f'FrozenDict({pretty_dict(self._dict)})'", "def pretty(d, indent=0):\n\tret_str = ''\n\tfor key, value in d.items():\n\n\t\tif isinstance(value, collections.Mapping):\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\n'\n\t\t\tret_str = ret_str + pretty(value, indent + 1)\n\t\telse:\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\t' * (indent + 1) + ' => ' + str(value) + '\\n'\n\n\treturn ret_str", "def dict_pretty_print(D: dict, indent_lvl=0):\n print(\"Using 3 decimal places.\")\n base_indent = indent_lvl * \" \"\n indent = (indent_lvl+2)*\" \"\n print(f\"{base_indent}\" + \"{\")\n for key, value in D.items():\n print(f\"{indent}{key}: \", end=\"\")\n if type(value) is dict:\n print(\"\")\n dict_pretty_print(value, indent_lvl + 2)\n else:\n print(f\"{value:.3f}\")\n print(f\"{base_indent}\" + \"}\")", "def pretty_print(d, indent=0):\n for key, value in d.items():\n print('\\t' * indent + str(key) + \":\")\n if isinstance(value, dict):\n pretty_print(value, indent + 1)\n else:\n print('\\t' * (indent + 1) + str(value))", "def pretty_dict(d):\n return '{%s}' % ', '.join('%r: %r' % (k, v)\n for k, v in sorted(d.items(), key=repr))", "def prettyPrintDictHelper_ (d, stream, indent, pretty_print=True, indent_additive=4) :\r\n \r\n # Base case, empty table\r\n entries = len(d)\r\n if entries==0 :\r\n stream.write(\"{ }\")\r\n return\r\n\r\n # Recursive case\r\n stream.write(\"{\")\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n ii=0\r\n keys = d.keys()\r\n keys.sort()\r\n for key in keys : # Sorted order on keys\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n stream.write(repr(key)+\":\")\r\n value = d[key]\r\n specialStream_(value, stream, indent, pretty_print, indent_additive)\r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n ii += 1\r\n \r\n if pretty_print : indentOut_(stream, indent) \r\n stream.write(\"}\")", "def print_dict_tree(d, max_depth=None, indent=0):\n def _recurse(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict) and indent != max_depth:\n print(); _recurse(value, indent + 1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))\n \n return _recurse(d)", "def _format_dict(self, dict_, indent=0):\n prefix = indent*\" \"*4\n output = \"{\\n\"\n for key, val in sorted(dict_.items()):\n if isinstance(val, dict):\n rval = self._format_dict(val, indent+1)\n else:\n rval = repr(val)\n output += prefix + \" \"*4 + repr(key) + \" : \" + rval + \",\\n\"\n output += prefix + \"}\"\n return output", "def prettyPrintODictHelper_ (d, stream, indent, pretty_print=True, indent_additive=4) :\r\n global OTabRepr\r\n # Base case, empty table\r\n entries = len(d)\r\n if entries==0 :\r\n stream.write(OTabEmpty[OTabRepr]) # \"o{ }\"\r\n return\r\n\r\n # Recursive case\r\n stream.write(OTabLeft[OTabRepr]) # \"o{\"\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n ii=0\r\n keys = d.keys()\r\n for key in keys : # Insertion order on keys\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n if OTabRepr == 0 :\r\n stream.write(\"(\"+repr(key)+\", \")\r\n else :\r\n stream.write(repr(key)+\":\")\r\n value = d[key]\r\n specialStream_(value, stream, indent, pretty_print, indent_additive)\r\n if OTabRepr == 0 :\r\n stream.write(\")\")\r\n \r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n ii += 1\r\n \r\n if pretty_print : indentOut_(stream, indent) \r\n stream.write(OTabRight[OTabRepr]) # \"}\"\r", "def nice_dict_format(d):\n return ''.join([key+\": \"+str(d[key])+\"\\n\" for key in list(d.keys())])", "def pretty(d, indent=0):\n sp = \" \"\n t = \"\"\n \n if isinstance(d, dict):\n l = len(d)\n c = 0\n t += \"<type 'dict'>:{\\n\"\n for key, value in d.items():\n t += sp * (indent + 1) + \"'\" + str(key) + \"':\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \"}\"\n elif isinstance(d, list):\n l = len(d)\n c = 0\n t += \"<type 'list'>:[\\n\"\n for value in d:\n t += sp * (indent + 1) + str(c) + \":\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \"]\"\n elif isinstance(d, tuple):\n l = len(d)\n c = 0\n t += \"<type 'tuple'>:(\\n\"\n for value in d:\n t += sp * (indent + 1) + str(c) + \":\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \")\"\n else:\n t += str(type(d)) + \":'\" + str(d) + \"'\"\n \n return t", "def format_dict(dictionary, depth=0):\n tab = \" \" * 4\n string = \"{\\n\"\n for key, val in dictionary.items():\n string += depth * tab \n string += \"{}: \".format(key)\n if type(val) is dict:\n string += format_dict(val, depth + 1)\n \n else:\n if type(val) is str:\n fmt = \"'{}'\\n\"\n else:\n fmt = \"{}\\n\"\n string += fmt.format(val)\n string += (depth) * tab + '}\\n'\n return string", "def print_dictionary(\n d, nested_level=0, output=sys.stdout, spacing=' ', separator=None,\n):\n if separator:\n print(separator, file=output)\n\n if type(d) == dict:\n print('%s{' % (nested_level * spacing), file=output)\n for k, v in list(d.items()):\n if hasattr(v, '__iter__'):\n print('%s%s:' % ((nested_level + 1) * spacing, k), file=output)\n print_dictionary(v, nested_level + 1, output)\n else:\n print(\n '%s%s: %s' % ((nested_level + 1) * spacing, k, v),\n file=output\n )\n print('%s}' % (nested_level * spacing), file=output)\n elif type(d) == list:\n print('%s[' % (nested_level * spacing), file=output)\n for v in d:\n if hasattr(v, '__iter__'):\n print_dictionary(v, nested_level + 1, output)\n else:\n print('%s%s' % ((nested_level + 1) * spacing, v), file=output)\n print('%s]' % (nested_level * spacing), file=output)\n else:\n print('%s%s' % (nested_level * spacing, d), file=output)", "def _pretty_print(value, indent=''):\n keys = list(value.keys())\n keys.sort()\n for k in keys:\n v = value[k]\n if type(v) == dict:\n print(\"%s%s:\"%(indent, k))\n _pretty_print(v, indent+' ')\n elif type(v) == str:\n if '\\n' in v:\n print(indent+'%s: |'%k)\n for l in v.split('\\n'):\n print(indent+' '+l)\n else:\n print(\"%s%s: %s\"%(indent, k, v))\n else:\n dump = yaml.dump(v)\n # #1617\n # newer versions of python-yaml append the '...' document end\n # syntax. as YAML functions fine w/o it, and as it is\n # confusing to users who are just getting a single scalar, we\n # strip it\n if dump.endswith('\\n...\\n'):\n dump = dump[:-4]\n \n sys.stdout.write(\"%s%s: %s\"%(indent, k, dump))", "def print_recursive(value, indent=0):\n tabs = lambda count: '' + str(' ' * (indent + count))\n if isinstance(value, dict):\n to_print = '{}{}'.format(tabs(1), '{')\n for key, item in value.iteritems():\n to_print += '\\n{}{}:\\n{}'.format(tabs(2), key, print_recursive(item, indent + 2))\n return to_print + '{}{}'.format('\\n' + tabs(1) if len(value) > 0 else ' ', '}')\n if isinstance(value, list):\n to_print = '{}['.format(tabs(1))\n for item in value:\n to_print += '\\n' + print_recursive(item, indent + 1)\n return to_print + '{}{}'.format('\\n' + tabs(1) if len(value) > 0 else ' ', ']')\n if isinstance(value, str) or isinstance(value, unicode):\n return tabs(1) + '\\'' + value + '\\''\n if len(str(value)) > 0:\n return tabs(1) + str(value) + ''\n return ''", "def prettyPrint(self):\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n x=pp.pformat(self.__dict__)\n print x\n return", "def pretty_print(dictionary: dict):\n return json.dumps(dictionary, indent=4)", "def pretty_print(name, input, val_width=40, key_width=0):\n\n # root\n pretty_str = name + ': {\\n'\n\n # determine key width\n for key in input.keys(): key_width = max(key_width, len(str(key)) + 4)\n\n # cycle keys\n for key in input.keys():\n\n val = input[key]\n\n # round values to 3 decimals..\n if type(val) == np.ndarray: val = np.round(val, 3).tolist()\n\n # difficult formatting\n val_str = str(val)\n if len(val_str) > val_width:\n val_str = pprint.pformat(val, width=val_width, compact=True)\n val_str = val_str.replace('\\n', '\\n{tab}')\n tab = ('{0:' + str(4 + key_width) + '}').format('')\n val_str = val_str.replace('{tab}', tab)\n\n # more difficult formatting\n format_str = '{0:' + str(4) + '}{1:' + str(key_width) + '} {2:' + str(val_width) + '}\\n'\n pretty_str += format_str.format('', key + ':', val_str)\n\n # close root object\n pretty_str += '}'\n\n return pretty_str", "def pretty_print(data, indent=4):\n if type(data) == dict:\n print(json.dumps(data, indent=indent, sort_keys=True))\n else:\n print(data)", "def recursive_dict_key_print(dict_in, spacer=\"\"):\n if type(dict_in) is not dict:\n return\n next_spacer = spacer + \" \"\n for key, value in dict_in.items():\n try:\n print(spacer, f\"{key} : {value.shape}\")\n except(AttributeError):\n print(spacer, key)\n recursive_dict_key_print(value, next_spacer)", "def dumps(data):\n def _dump(d, indent=0):\n for key, value in six.iteritems(d):\n if isinstance(value, dict):\n yield '%s%s {\\n' % (' ' * indent, _escape(key))\n for subs in _dump(value, indent + 2):\n yield subs\n yield '%s}\\n' % (' ' * indent)\n elif isinstance(value, list):\n yield '%s%s = {\\n' % (' ' * indent, _escape(key))\n for subvalue in value:\n if type(subvalue) == dict:\n yield '%s{\\n' % (' ' * (indent + 2))\n for subs in _dump(subvalue, indent + 4):\n yield subs\n yield '%s}\\n' % (' ' * (indent + 2))\n else:\n yield '%s%s\\n' % (' ' * (indent + 2),\n _escape(subvalue))\n\n yield '%s}\\n' % (' ' * indent)\n elif type(value) == bool:\n yield '%s%s = %s\\n' % (' ' * indent, _escape(key),\n _escape(str(value).lower()))\n else:\n yield '%s%s = %s\\n' % (' ' * indent, _escape(key),\n _escape(str(value)))\n return ''.join(list(_dump(data)))", "def _pretty_json_dump(d):\n return json.dumps(d, sort_keys=True, indent=3)", "def print_data(d, indent=0):\n prefix = indent * ' '\n for k in sorted(d):\n v = d[k]\n k = prefix + str(k)\n if isinstance(v, dict):\n print(k)\n print_data(v, indent + 1)\n else:\n if k.endswith('cent'):\n v = ' '.join(\n str(tuple(int(j) if j.is_integer() else j for j in i))\n for i in v\n )\n elif isinstance(v, np.ndarray):\n v = str(v).replace('\\n', '')\n print(k, '=', v)", "def print_json_tree(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict):\n print(); print_json_tree(value, indent+1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))", "def pretty_repr(src, indent=0, no_indent_start=False, max_indent=20):\n if _simple(src) or indent >= max_indent:\n indent = 0 if no_indent_start else indent\n if isinstance(src, (six.binary_type, six.text_type)):\n if isinstance(src, six.binary_type):\n string = src.decode(\n encoding='utf-8',\n errors='backslashreplace'\n )\n prefix = 'b'\n else:\n string = src\n prefix = 'u'\n return _formatters['text'](\n spc='',\n indent=indent,\n prefix=prefix,\n string=string\n )\n return _formatters['simple'](\n spc='',\n indent=indent,\n val=src\n )\n if isinstance(src, dict):\n prefix, suffix = '{', '}'\n result = ''\n max_len = len(max([repr(key) for key in src])) if src else 0\n for key, val in src.items():\n result += _formatters['dict'](\n spc='',\n indent=indent + 4,\n size=max_len,\n key=key,\n val=pretty_repr(val, indent + 8, no_indent_start=True)\n )\n return (\n '\\n{start:>{indent}}'.format(\n start=prefix,\n indent=indent + 1\n ) +\n result +\n '\\n{end:>{indent}}'.format(end=suffix, indent=indent + 1)\n )\n if isinstance(src, list):\n prefix, suffix = '[', ']'\n elif isinstance(src, tuple):\n prefix, suffix = '(', ')'\n else:\n prefix, suffix = '{', '}'\n result = ''\n for elem in src:\n if _simple(elem):\n result += '\\n'\n result += pretty_repr(elem, indent + 4) + ','\n return (\n '\\n{start:>{indent}}'.format(\n start=prefix,\n indent=indent + 1) +\n result +\n '\\n{end:>{indent}}'.format(end=suffix, indent=indent + 1)\n )", "def __repr__(self, indent=2):\n return pprint.pformat(self.to_dict(), indent=indent)", "def json_pretty_print(dictionary):\n return json.dumps(dictionary, sort_keys=True,\n indent=2, separators=(',', ': '))", "def format_dictionary(dct, indent=4):\n return json.dumps(dct, indent=indent, sort_keys=True)", "def tree_view(dictionary, level=0, sep=\"| \"):\n return \"\".join([\"{0}{1}\\n{2}\".format(sep * level, k,\n tree_view(v, level + 1, sep=sep) if isinstance(v, dict)\n else \"\") for k, v in dictionary.items()])", "def _pretty_print(self, json_dict):\n if self.prettyprint:\n return \"\\n\" + json.dumps(json_dict, indent=self.indent)\n return json.dumps(json_dict)" ]
[ "0.7736835", "0.7247312", "0.7021937", "0.69802797", "0.69656223", "0.6918987", "0.6832074", "0.68211126", "0.67912984", "0.678275", "0.67453086", "0.6687921", "0.66458195", "0.654063", "0.6504709", "0.6453231", "0.64472985", "0.63632375", "0.6358067", "0.6356899", "0.63254225", "0.62625194", "0.625399", "0.6248269", "0.61995083", "0.61536145", "0.61233455", "0.6115827", "0.6093083", "0.6032953" ]
0.83444524
0
Load a subset of the COCO dataset.
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None, class_names=None, class_map=None, return_coco=False, auto_download=False): if auto_download is True: self.auto_download(dataset_dir, subset, year) coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year)) if subset == "minival" or subset == "valminusminival": subset = "val" image_dir = "{}/{}{}".format(dataset_dir, subset, year) # Select class_ids from class_names: if class_names: class_ids = sorted(coco.getCatIds(catNms=class_names)) # Load all classes or a subset? if not class_ids: # All classes class_ids = sorted(coco.getCatIds()) # All images or a subset? if class_ids: image_ids = [] for id in class_ids: imgs = [] # list of images to add to image_ids # Select at most COCO_IMAGES_PER_OBJECT and select only the images # that have at most COCO_MAX_NUM_MASK_PER_IMAGE masks inside them: for imgid in list(coco.getImgIds(catIds=[id])): if len(imgs) >= COCO_IMAGES_PER_OBJECT: break if len(coco.loadAnns(coco.getAnnIds(imgIds=[imgid], catIds=class_ids, iscrowd=None))) <= COCO_MAX_NUM_MASK_PER_IMAGE: imgs.append(imgid) image_ids.extend(imgs) #image_ids.extend(list(coco.getImgIds(catIds=[id]))[:COCO_IMAGES_PER_OBJECT]) # Remove duplicates image_ids = list(set(image_ids)) else: # All images image_ids = list(coco.imgs.keys()) # Add classes for i in class_ids: self.add_class("coco", i, coco.loadCats(i)[0]["name"]) # Add images for i in image_ids: #print(len(coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None)))) self.add_image( "coco", image_id=i, path=os.path.join(image_dir, coco.imgs[i]['file_name']), width=coco.imgs[i]["width"], height=coco.imgs[i]["height"], annotations=coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None))) if return_coco: return coco
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(cfg, train_mode, split, shot, query,\n bs, test_bs, num_workers, pin_memory,\n ret_name=False):\n if train_mode == \"train\":\n dataset = COCOTrain(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=bs,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n else:\n dataset = COCOTest(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=test_bs, # Large batch for evaluation\n shuffle=False,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n num_classes = 80\n return dataset, data_loader, num_classes", "def load_cifar100(data_path=None, data_home=None, subsets=None,\n label_mode='fine'):\n if data_path is None:\n data_path = _utils.validate_data_home(data_home)\n data_path /= 'cifar-100-python.tar.gz'\n url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n _ds_utils.get_file(data_path, url)\n \n if subsets is None:\n subsets = ['training', 'test']\n subsets = _ds_utils.validate_tvt(subsets, return_list=True)\n\n label_mode = _utils.validate_option(label_mode, ['fine', 'coarse'],\n name='label_mode')\n \n X, Y = [], []\n with arlib.open(data_path) as ar:\n for subset in subsets:\n if subset == 'training':\n name = [x for x in ar.member_names if x.endswith('train')]\n elif subset == 'test':\n name = [x for x in ar.member_names if x.endswith('test')]\n else:\n raise ValueError('Subset:', subset, ' not supported.')\n assert len(name) == 1\n name = name[0]\n tmp = _load_cifar_batch(ar.open_member(name, 'rb'),\n label_key=label_mode + '_labels')\n X.append(tmp[0])\n Y.append(tmp[1])\n return np.concatenate(X), np.concatenate(Y)", "def load_occupancy_dataset(trainsize=500, testsize=1000):\n filename = 'datasets/numericsequence.csv'\n dataset = loadcsv(filename)\n trainset, testset = splitdataset(dataset, trainsize, testsize)\n return trainset, testset", "def generate_coco_dataset(args):\n\targs.data_root = Path(args.data_root)\n\targs.save_root = Path(args.save_root)\n\targs.save_root.mkdir()\n\n\tgenerate_coco_dataset_sub(args, 'train', 'A', args.cat1)\n\tgenerate_coco_dataset_sub(args, 'train', 'B', args.cat2)\n\tgenerate_coco_dataset_sub(args, 'val', 'A', args.cat1)\n\tgenerate_coco_dataset_sub(args, 'val', 'B', args.cat2)", "def load_dataset(self, subset):\n assert subset in ('train', 'val')\n\n # Add classes\n for id, name in self.class_mapper.items():\n self.add_class('nodule', id, name)\n\n # Add images\n self.df = self.df_all[self.df_all['subset'] == subset]\n\n image_ids = set()\n for row in self.df.itertuples():\n image_id = (row.seriesuid, row.coordZ)\n path = os.path.join(cur_dir, 'data', 'train', '{}_{}.npy'.format(row.seriesuid, row.coordZ))\n if image_id in image_ids:\n continue\n self.add_image(\"nodule\", image_id=image_id, path=path)\n image_ids.add(image_id)", "def load_data(filen, model):\n mass_sel = select_bin(model.fit_var, *model.fit_range)\n selections = [mass_sel]\n for var, bounds in model.get_load_vars():\n selections.append(\n select_bin(var, *[float(v) for v in bounds.split(',')]))\n\n load_vars = ['{costh,phi}_HX_fold'] + collect_requirements(selections)\n\n return apply_selections(get_dataframe(filen, columns=load_vars),\n selections)", "def loadSubset(self, loadsubset):\n libxml2mod.xmlParserSetLoadSubset(self._o, loadsubset)", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def load_cifar10(data_path=None, data_home=None, subsets=None):\n if data_path is None:\n data_path = _utils.validate_data_home(data_home)\n data_path /= 'cifar-10-python.tar.gz'\n url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n _ds_utils.get_file(data_path, url)\n \n if subsets is None:\n subsets = ['training', 'test']\n subsets = _ds_utils.validate_tvt(subsets, return_list=True)\n X, Y = [], []\n with arlib.open(data_path) as ar:\n for subset in subsets:\n if subset == 'training':\n for i in range(1, 6):\n mname = [x for x in ar.member_names\n if x.endswith('data_batch_'+str(i))]\n assert len(mname) == 1\n mname = mname[0]\n tmp = _load_cifar_batch(ar.open_member(mname,'rb'))\n X.append(tmp[0])\n Y.append(tmp[1])\n elif subset == 'test':\n mname = [x for x in ar.member_names if x.endswith('test_batch')]\n assert len(mname) == 1\n mname = mname[0]\n tmp = _load_cifar_batch(ar.open_member(mname, 'rb'))\n X.append(tmp[0])\n Y.append(tmp[1])\n else:\n raise ValueError('Subset:', subset, ' not supported.')\n return np.concatenate(X), np.concatenate(Y)", "def __init__(self, image_set, root_path, data_path, category='all', task='detection'):\n super(coco, self).__init__('COCO', image_set, root_path, data_path)\n self.root_path = root_path\n self.data_path = data_path\n self.category = category\n self.task = task\n self.name = self.name + '_' + category\n # deal with data name\n view_map = {'minival2014': 'val2014',\n 'valminusminival2014': 'val2014'}\n self.data_name = view_map[image_set] if image_set in view_map else image_set", "def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")", "def load_subset_data(data_path, subset_name, timesteps):\n\n selected_subset_paths = subset_paths(os.path.join(data_path, subset_name))\n selected_subset_arrays = subset_arrays(selected_subset_paths)\n\n load_selected_timesteps = lambda x: np.load(x)\n\n if timesteps is not None:\n selected_subset_timesteps = load_selected_timesteps(timesteps)\n else:\n selected_subset_timesteps = np.array(range(int(np.sum(selected_subset_arrays[\"seq_lens\"]))))\n\n return selected_subset_arrays, selected_subset_timesteps", "def load_susy(trainsize=500, testsize=1000):\n filename = 'datasets/susysubset.csv'\n dataset = loadcsv(filename)\n trainset, testset = splitdataset(dataset, trainsize, testsize)\n return trainset, testset", "def F_subset_S5PHCHO(self,path): \n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_OFFL_L2__HCHO___'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n # not sure about cloud fraction\n # the time_utc string is empty?! why are you doing this to the user!\n data_fields = ['/PRODUCT/SUPPORT_DATA/INPUT_DATA/cloud_fraction_crb',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_albedo',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['cloud_fraction','latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','albedo','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n f1 = outp_nc['SolarZenithAngle'] <= maxsza\n f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def get_coco_dataset():\n ds = AttrDict()\n # classes = [\n # '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n # 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n # 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n # 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n # 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n # 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n # 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n # 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n # 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n # 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n # 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n # 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n # 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n # ]\n # classes = ['__background__', 'lane']\n #\n base_classes = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n ]\n classes = ['__background__',\n 'guard rail',\n # 'car',\n 'dashed',\n 'solid',\n 'solid solid',\n 'dashed dashed',\n 'dashed-solid',\n 'solid-dashed',\n 'yellow dashed',\n 'yellow solid',\n 'yellow solid solid',\n 'yellow dashed dashed',\n 'yellow dashed-solid',\n 'yellow solid-dashed',\n 'boundary',\n 'fork_line',\n 'fork_edge',\n 'arrow_s',\n 'arrow_r',\n 'arrow_l',\n 'arrow_lr',\n 'arrow_inclined_r',\n 'arrow_r_s',\n 'arrow_l_s',\n 'sidewalk',\n 'handrail'\n ]\n base_classes.extend(classes[1:])\n classes = base_classes\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds", "def load_dataset():\n try:\n data_path = ROOT_PATH.joinpath('data', 'Complete_TAVG_Daily_LatLong1_1880.nc')\n ds = xarray.open_dataset(data_path)\n return ds\n except FileNotFoundError:\n raise", "def set_data_subset(self, subset):\n self.data_subset = subset", "def generate_coco_dataset_sub(args, idx1, idx2, cat):\n\tdata_path = args.data_root / '{}2017'.format(idx1)\n\tanno_path = args.data_root / 'annotations/instances_{}2017.json'.format(idx1)\t# eg. anno_path is \"datasets/COCO/annotations/instances_train2017.json\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# or \"datasets/COCO/annotations/instances_val2017.json\"\n\tcoco = COCO(anno_path) # COCO API\n\n\n\timg_path = args.save_root / '{}{}'.format(idx1, idx2)\t\t# eg. img_path is \"datasets/shp2gir_coco/trainA\" or \"datasets/shp2gir_coco/trainB\"\n\tseg_path = args.save_root / '{}{}_seg'.format(idx1, idx2)\t# eg. img_path is \"datasets/shp2gir_coco/trainA_seg\" or \"datasets/shp2gir_coco/trainB_seg\"\n\timg_path.mkdir()\t\t\t\t\t\t\t\t\t\t\t# they are empty, therefore mkdir()s\n\tseg_path.mkdir()\n\n\tcat_id = coco.getCatIds(catNms=cat)\t\t# cat is \"sheep\" or \"giraffe\",get the category's id\n\timg_id = coco.getImgIds(catIds=cat_id)\t# get the ids of sheep/giraffe images,获得所有绵羊的图片id,或者所有长颈鹿的图片id\n\timgs = coco.loadImgs(img_id)\t\t\t# 获得所有绵羊的图片(很多张),或者所有长颈鹿的图片\n\n\t# tqdm表示进度条,progress\n\t# refer:https://tqdm.github.io/\n\tpb = tqdm(total=len(imgs))\n\tpb.set_description('{}{}'.format(idx1, idx2))\n\tfor img in imgs:\n\t\tann_ids = coco.getAnnIds(imgIds=img['id'], catIds=cat_id)\t# get annotation'id\n\t\tanns = coco.loadAnns(ann_ids)\t\t\t\t\t\t\t\t# get the annotation(many)\n\n\t\tcount = 0\n\t\tfor i in range(len(anns)):\t\t\t\t# 真正从标签生成mask的地方。\n\t\t\tseg = coco.annToMask(anns[i])\t\t# annotation to mask, the type is array now\n\t\t\tseg = Image.fromarray(seg * 255)\t# turn the seg array to seg image,each pix multi 255. why?\n\t\t\tseg = resize(seg, args.image_size)\t# resize the seg image\n\t\t\t# np.sum\n\t\t\tif np.sum(np.asarray(seg)) > 0:\t\t\t\t\t\t\t\t# 保存seg\n\t\t\t\tseg.save(seg_path / '{}_{}.png'.format(pb.n, count))\t# pb.n 表示?\n\t\t\t\tcount += 1\n\n\t\tif count > 0: # at least one instance exists\n\t\t\timg = Image.open(data_path / img['file_name'])\n\t\t\timg = resize(img, args.image_size)\n\t\t\timg.save(img_path / '{}.png'.format(pb.n))\n\n\t\tpb.update(1)\n\tpb.close()", "def load_data(subset: str):\n df_train = pd.read_csv(f\"{DATA_PATH}/train_durations_per_speaker.csv\")\n df_test = pd.read_csv(f\"{DATA_PATH}/val_durations_per_speaker.csv\")\n df_global = pd.read_csv(f\"{DATA_PATH}/global_durations_per_speaker.csv\")\n if (subset == \"train\"):\n df = df_train\n elif (subset == \"val\"):\n df = df_test\n else:\n df = df_global\n return df", "def load_cityscapes(self, dataset_dir, subset):\n self.class_labels = {\n 'unlabeled':0,\n 'ego vehicle':1, \n 'rectification border':2,\n 'out of roi':3, \n 'static':4, \n 'dynamic':5, \n 'ground':6, \n 'road':7, \n 'sidewalk':8, \n 'parking':9, \n 'rail track':10, \n 'building':11, \n 'wall':12, \n 'fence':13, \n 'guard rail':14, \n 'bridge':15, \n 'tunnel':16, \n 'pole':17, \n 'polegroup':18, \n 'traffic light':19, \n 'traffic sign':20, \n 'vegetation':21, \n 'terrain':22, \n 'sky':23, \n 'person':24, \n 'rider':25, \n 'car':26, \n 'truck':27, \n 'bus':28, \n 'caravan':29, \n 'trailer':30, \n 'train':31, \n 'motorcycle':32, \n 'bicycle':33, \n 'license plate':34}\n \n annotation_dir = dataset_dir + 'gtFine_trainvaltest/' + subset + '_all.json'\n self.image_info = json.load(open(annotation_dir, 'r'))\n \n # Add classes\n for i in range(len(self.class_labels)):\n self.add_class(\"cityscape\", i, list(self.class_labels.keys())[i])", "def load_subset(self, vocab):\n if self.reserve_zero:\n vocab.insert(0, '__ZERO__')\n if self.allow_oov:\n vocab.insert(self.oov_index, '__OUT_OF_VOCAB__')\n indices = []\n for word in vocab:\n try:\n indices.append(self._index_dict[word])\n except KeyError:\n indices.append(self.oov_index)\n else:\n indices = [self._index_dict[word] for word in vocab]\n matrix = self.matrix[indices]\n return Vectors(matrix=matrix, vocab=vocab)", "def load_coco_ann_files(self):\n if self.type == 'train':\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'train2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_train2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'train2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_train2017.json'))),\n # (os.path.join(self.dataset_root, 'mpii', 'images'),\n # COCO(os.path.join(self.dataset_root, 'mpii',\n # 'annotations', 'train.json')))\n ]\n else:\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'val2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_val2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'val2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_val2017.json')))\n ]\n\n dict_list = []\n for dataset_path, dataset in datasets:\n img_ids = dataset.getImgIds()\n\n for idx in img_ids:\n try:\n img = dataset.loadImgs([idx])[0]\n ann_ids = dataset.getAnnIds([idx])\n anns = dataset.loadAnns(ann_ids)\n\n if [ann['keypoints'] for ann in anns] and not all([ann['keypoints'] == [0]*51 for ann in anns]):\n keypoints = [ann['keypoints'] for ann in anns if ann['keypoints'] != [0]*51]\n for i in range(len(keypoints)):\n if 'coco' in dataset_path:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][17] and keypoints[i][20])\n else [(keypoints[i][15] + keypoints[i][18]) // 2, (keypoints[i][16] + keypoints[i][19]) // 2, 1])\n else:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][41] and keypoints[i][38])\n else [(keypoints[i][39] + keypoints[i][36]) // 2, (keypoints[i][40] + keypoints[i][37]) // 2, 1])\n\n if len([kp for kp in keypoints if kp != [0]*54]) <= 4:\n dict_list.append({'path': os.path.join(dataset_path, img[\"file_name\"]),\n 'keypoints': [kp for kp in keypoints if kp != [0]*54]})\n except:\n print(f'Skipped: {idx}')\n\n final_dataset = pd.DataFrame.from_dict(dict_list)\n\n return final_dataset", "def loadData():\n datfile = glob.glob(DATA_PATH + 'consolidated.npy')\n return np.load(datfile[0])", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def F_subset_S5PNO2(self,path): \n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_RPRO_L2__NO2____'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/DETAILED_RESULTS/cloud_fraction_crb_nitrogendioxide_window',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_albedo_nitrogendioxide_window',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time_utc',\\\n '/PRODUCT/nitrogendioxide_tropospheric_column',\\\n '/PRODUCT/nitrogendioxide_tropospheric_column_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['cloud_fraction','latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','albedo','latc','lonc','qa_value','time_utc',\\\n 'column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n f1 = outp_nc['SolarZenithAngle'] <= maxsza\n f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def get_coco_dataset():\n ds = AttrDict()\n classes = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n ]\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds", "def load():\n filepath = dirname(abspath(__file__))\n data = recfromtxt(filepath + '/scotvote.csv', delimiter=\",\",\n names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load_dataset_cifar10():\n dirname = 'cifar-10-batches-py'\n origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n path = get_file(dirname, origin=origin, untar=True)\n\n num_train_samples = 50000\n\n x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\n y_train = np.empty((num_train_samples,), dtype='uint8')\n\n for i in range(1, 6):\n fpath = os.path.join(path, 'data_batch_' + str(i))\n (x_train[(i - 1) * 10000: i * 10000, :, :, :],\n y_train[(i - 1) * 10000: i * 10000]) = load_batch(fpath)\n\n fpath = os.path.join(path, 'test_batch')\n x_test, y_test = load_batch(fpath)\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n return (x_train, y_train), (x_test, y_test)", "def load_cifar():\n print('==> Preparing data..')\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=1024, shuffle=True, num_workers=8)\n\n testset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=128, shuffle=False, num_workers=8)\n return trainloader, testloader", "def load_data(path=None, num_words=None, skip_top=0,\n maxlen=None, seed=113,\n start_char=1, oov_char=2, index_from=3, **kwargs):\n # Legacy support\n if 'nb_words' in kwargs:\n warnings.warn('The `nb_words` argument in `load_data` '\n 'has been renamed `num_words`.')\n num_words = kwargs.pop('nb_words')\n if kwargs:\n raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))\n\n if path is None:\n path = '/home/lupeng/neural-network/data/codeforces_full.pkl'\n f = load_pickle(path)\n xs = f['datas']\n ys = f['labels']\n \n if start_char is not None:\n xs = [[start_char] + [w + index_from for w in x] for x in xs]\n elif index_from:\n xs = [[w + index_from for w in x] for x in xs]\n\n if maxlen:\n new_xs = []\n new_ys = []\n for x, y in zip(xs, ys):\n if len(x) < maxlen:\n new_xs.append(x)\n new_ys.append(y)\n xs = new_xs\n ys = new_ys\n if not xs:\n raise ValueError('After filtering for sequences shorter than maxlen=' +\n str(maxlen) + ', no sequence was kept. '\n 'Increase maxlen.')\n if not num_words:\n num_words = max([max(x) for x in xs])\n\n # by convention, use 2 as OOV word\n # reserve 'index_from' (=3 by default) characters:\n # 0 (padding), 1 (start), 2 (OOV)\n if oov_char is not None:\n xs = [[oov_char if (w >= num_words or w < skip_top) else w for w in x] for x in xs]\n else:\n new_xs = []\n for x in xs:\n nx = []\n for w in x:\n if w >= num_words or w < skip_top:\n nx.append(w)\n new_xs.append(nx)\n xs = new_xs\n\n train_data,train_label,test_data,test_label = get_balanced_data(xs, ys)\n\n np.random.seed(seed)\n np.random.shuffle(train_data)\n np.random.seed(seed)\n np.random.shuffle(train_label)\n \n np.random.seed(2*seed)\n np.random.shuffle(test_data)\n np.random.seed(2*seed)\n np.random.shuffle(test_label)\n \n \n x_train = np.array(train_data)\n y_train = np.array(train_label)\n\n x_test = np.array(test_data)\n y_test = np.array(test_label)\n\n return (x_train, y_train), (x_test, y_test)" ]
[ "0.62512195", "0.6004767", "0.5874558", "0.5784675", "0.57171506", "0.56779504", "0.5658244", "0.5657759", "0.56274396", "0.5618919", "0.5617201", "0.55943984", "0.55781955", "0.55633026", "0.55596197", "0.5551278", "0.54980606", "0.54674935", "0.5462153", "0.54568654", "0.5433031", "0.54185146", "0.54123944", "0.54052603", "0.54035753", "0.5396853", "0.5372947", "0.53500956", "0.53428036", "0.534053" ]
0.64328516
0
Updates this store's current state with incoming data from the network. data should be a mapping containing 'metacontacts', 'order', and 'info' structures (see comment at top of file)
def update_data(self, data): rebuild = False # This method needs to substitute some defaultdicts for the normal # dictionaries that come back from the server. # Metacontact information #if data['metacontacts'] mc_dict = data.get('metacontacts', {}) if not isinstance(mc_dict, dict): log.critical('invalid metacontacts dictionary') mc_dict = {} # Contact information like SMS numbers and email addresses. self.info = defaultdict(dict) si = self.info if 'info' in data: for (k, v) in data['info'].iteritems(): if isinstance(k, str): cmpk = k.decode('utf8') else: cmpk = k if not isinstance(cmpk, unicode): continue if cmpk.startswith('Meta') or any((cmpk.endswith('_' + prot) for prot in protocols.iterkeys())): if any(v.values()): si[k] = v for c, v in si.iteritems(): for attr in ('email', 'sms'): if attr in v: self.contact_info_changed(c, attr, v[attr]) self.metacontacts = MetaContactManager(self, mc_dict) if hasattr(self, 'new_sorter'): on_thread('sorter').call(self.new_sorter.removeAllContacts) rebuild = True # Manual ordering of groups try: self.order = deepcopy(data['order']) self.order['groups'] = list(oset(self.order['groups'])) contacts = self._filtered_contacts() self.order['contacts'] = defaultdict(list) self.order['contacts'].update(contacts) except Exception: log.critical('error receiving order') self._init_order() # note: loading tofrom data from the network is deprecated. this data # now goes out to disk. see save/load_local_data if 'tofrom' in data and isinstance(data['tofrom'], dict) and \ 'im' in data['tofrom'] and 'email' in data['tofrom']: self.dispatch.set_tofrom(deepcopy(data['tofrom'])) if rebuild: self.rebuild() self.update_order()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_state_notification(self, data):\n\n self.channel_data.update(data)\n\n # synchronize DataManager data with processed update & entity data\n self.sync_data_update_ha()", "def update(self, data):\n logging.info('update state', data)\n self._client.update_state(data)\n\n # Also locally update our state so things aren't out of sync\n self._state.update(data)", "def update(self, data):\n self.data.update(data)", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data[self._json_key]\n self._attributes = self.data_service.attributes[self._json_key]", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data\n self._attributes = self.data_service.attributes", "def update_data():\n pass", "def update(self, data):\n return self._data.update(data)", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data[self._json_key]", "def update(self):\n self.data_service.update()\n attr = self.data_service.attributes.get(self._json_key)\n self._state = attr[\"soc\"]", "def update(self,data):\r\n data = data.split(':',1)\r\n\r\n self.log('Signal','Received an update: %s...' % repr(data)[0:10],'update')\r\n \r\n #print \"*** local: \" + repr(data)\r\n \r\n if data[0] == 'Results':\r\n\r\n self.log('Signal','The local node returned these passwords: %s' % repr(data[1]),'update')\r\n\r\n self.addResult(data[1])\r\n elif data[0] == 'Bench':\r\n self.log('Signal','The local node returned these benches: %s' % repr(data[1]),'update')\r\n \r\n self.addBench(data[1])\r\n\r\n elif data[0] == 'Work':\r\n if data[1] == 'Done':\r\n self.finished += 1\r\n if self.finished >= len(self.nodes):\r\n self.runningWork = False\r\n self.log('Signal','Finished working','update')\r\n\r\n notification = 'Work:Done'\r\n self.notifyObservers(notification)", "def _update_data(self, data, update_original=False):\n self._data.update(dict((key, self._deserialize(key, value))\n for key, value in data.items()))\n\n if update_original:\n self._original_data = copy.deepcopy(self._data)", "def update(self, data):\n return data", "def update(self):\n self._data.update()\n\n self._state = self._data.get_value(self._type)", "def update(self, data):\n\n if not isinstance(data, (dict, list, set)):\n raise TypeError(\"Unsupported type\")\n\n if self.payload_undefined:\n\n if isinstance(data, dict):\n self._attr = {}\n elif isinstance(data, set):\n self._attr = set()\n elif isinstance(data, list):\n self._attr = []\n\n if not self.is_payload(type(data)):\n p_type = str(type(self._attr))\n d_type = str(type(data))\n msg = (\n f\"The type of the update data '{d_type}' doesn't match current payload's \"\n f\"type: '{p_type}'\"\n )\n raise TypeError(msg)\n\n if self.is_payload(dict):\n for k, v in data.items():\n if isinstance(v, dict):\n self._attr[k] = Pinnate(v)\n else:\n self._attr[k] = v\n\n elif self.is_payload(list):\n\n for v in data:\n if isinstance(v, dict):\n self._attr.append(Pinnate(v))\n else:\n self._attr.append(v)\n\n elif self.is_payload(set):\n\n for v in data:\n if isinstance(v, dict):\n self._attr.add(Pinnate(v))\n else:\n self._attr.add(v)", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data.get(self._json_key)\n self._attributes = self.data_service.attributes.get(self._json_key)\n self._unit_of_measurement = self.data_service.unit", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data.get(self._json_key)\n self._attributes = self.data_service.attributes.get(self._json_key)\n self._unit_of_measurement = self.data_service.unit", "def update(self, data):\n if not isinstance(data, list): data = [data] # otherwise no conversion is necessary\n master = Handler.ALL_VERS_DATA\n for record in data:\n #print(record)\n for k,v in iteritems(record): # ensure record contents aretyped appropriately\n try: record[k] = int(v)\n except ValueError: record[k] = v\n try: label = record[\"label\"] # verify this record has the required 'label' key\n except KeyError:\n raise ValueError(\"Must provide a valid label argument. Given:%s%s\"%(\\\n os.linesep, (\"%s \"%(os.linesep)).join(\n [\"%15s:%s\"%(k,v) for k,v in iteritems(kwargs)]\n )))\n try: masterLabel = master[label] # identify the already existing record that matches this to-be-updated record, if any\n except KeyError: # master hasn't been defined yet\n master[label] = record\n self._updated = True # a new record should also be saved\n continue\n for k,v in iteritems(record): # determine whether master needs to be updated\n try:\n if masterLabel[k] == v: continue # whether an entry in the record needs to be updated (doesn't match)\n except KeyError: pass # this condition means that k is a new key, so the record must be updated\n self._updated = True\n try: master[label].update(record) # index each record by its label\n except KeyError: break", "def update_from_dict(self, data: dict) -> \"Device\":\n if \"info\" in data and data[\"info\"]:\n self.info = Info.from_dict(data[\"info\"])\n\n if \"locations\" in data and data[\"locations\"]:\n locations = [Location.from_dict(location) for location in data[\"locations\"]]\n self.locations = locations\n\n return self", "def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n raise NotImplementedError", "def update_from_dict(self, data: dict):\n self.algo = data[\"algo\"]\n self.version = data[\"sbx\"]\n self.next_session = data[\"next\"]\n self.last_session = data[\"last\"]\n self.past_quality = unpack_int_list(data[\"pastq\"])\n\n # Revert to length of past_quality if reps are not set\n possible_rep = len(self.past_quality)\n self.actual_repetitions = data.get(\"reps\", possible_rep)\n\n # Other keys are used by algorithm\n self.algo_state = data.copy()\n for required_key in REQUIRED_FIELDS:\n del self.algo_state[required_key]", "def update_current_data(self, data):\n if self.current_data is not None:\n current_results = self.get_results()\n self._history.append((self.current_data, current_results))\n\n self.current_data = data", "def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")", "def _async_process_data(self):\n _LOGGER.debug(\"Update switch called\")\n\n data = self._api.get_device_data(self._dev_id)\n\n if not data:\n _LOGGER.error(\"Received no data for device %s\", self._name)\n self.async_write_ha_state()\n return\n\n if \"relay\" in data:\n self._is_on = data[\"relay\"]\n\n self.async_write_ha_state()", "def set_state(self, sync_data):\n # Send an echo update message immediately\n if JUPYTER_WIDGETS_ECHO:\n echo_state = {}\n for attr, value in sync_data.items():\n if attr in self.keys and self.trait_metadata(attr, 'echo_update', default=True):\n echo_state[attr] = value\n if echo_state:\n echo_state, echo_buffer_paths, echo_buffers = _remove_buffers(echo_state)\n msg = {\n 'method': 'echo_update',\n 'state': echo_state,\n 'buffer_paths': echo_buffer_paths,\n }\n self._send(msg, buffers=echo_buffers)\n\n # The order of these context managers is important. Properties must\n # be locked when the hold_trait_notification context manager is\n # released and notifications are fired.\n with self._lock_property(**sync_data), self.hold_trait_notifications():\n for name in sync_data:\n if name in self.keys:\n from_json = self.trait_metadata(name, 'from_json',\n self._trait_from_json)\n self.set_trait(name, from_json(sync_data[name], self))", "def update(self, data: bytes):\n self.send(data)" ]
[ "0.6476063", "0.62110347", "0.61491877", "0.6079043", "0.6079043", "0.6079043", "0.6079043", "0.60150313", "0.59856397", "0.592851", "0.5848428", "0.58295083", "0.58125436", "0.579534", "0.5732395", "0.5716034", "0.56988144", "0.5688092", "0.5683766", "0.5636451", "0.5636451", "0.56280696", "0.5590911", "0.55697817", "0.5553052", "0.554236", "0.5535166", "0.55267394", "0.5511158", "0.55028915" ]
0.70325893
0
Error in label only whitespace allowed, no tabs if checked label differs, raise an error
def CheckLabel(Line): for i in Line: if i == '\t': #can't detect leading tabs, stops at the first \ raise InputError(Line,"malformed input") elif i != ' ': break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_label(self):\n nt = NewickTokenizer(newick=\"(a\\n'b',(b,c),(d,e));\")\n self.assertRaises(ValueError, nt.tokens)", "def checkLabel(label):\n\n label = str(label)\n if not label:\n raise ValueError('label cannot be empty string')\n\n label = str(label)\n\n if not label:\n raise ValueError('label cannot be empty string')\n\n if not label[0].isalpha():\n raise ValueError('label must start with a letter')\n\n if not (''.join(label.split('_'))).isalnum():\n raise ValueError('label may contain alphanumeric characters and '\n 'underscore, {0} is not valid'.format(label))\n\n if isReserved(label):\n raise ValueError('{0} is a reserved word and cannot be used '\n 'as a label'.format(repr(label)))\n\n if label in READONLY:\n raise AttributeError('{0} is read-only'.format(label))\n\n return label", "def test_is_valid_label_value_invalid_input():\n # test length violations\n assert not is_valid_label_value(value=f\"{'v' * 64}\") # value too long\n # test first character violations (not alphanum)\n assert not is_valid_label_value(value=\"-\")\n assert not is_valid_label_value(value=\"-a\")\n assert not is_valid_label_value(value=\".b\")\n assert not is_valid_label_value(value=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_value(value=\"a-\")\n assert not is_valid_label_value(value=\"b.\")\n assert not is_valid_label_value(value=\"c \")\n assert not is_valid_label_value(value=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_value(value=\"a$$a\")\n assert not is_valid_label_value(value=\"b b\")", "def checkcontent(label, c):\n if len(c) > 0:\n raise ValueError(\"{} with content={}\".format(label, c))", "def _is_label(self, words):\n if words[0] == 'label':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_LABEL command.\".format(self._file_line))\n return True\n else:\n return False", "def test_is_valid_label_value_valid_input():\n # test valid label values\n assert is_valid_label_value(value=None)\n assert is_valid_label_value(value=\"\")\n assert is_valid_label_value(value=\"l0L\")\n assert is_valid_label_value(value=\"L-l\")\n assert is_valid_label_value(value=\"L.L\")\n assert is_valid_label_value(value=\"l_4\")\n assert is_valid_label_value(value=\"4-you\")\n assert is_valid_label_value(value=\"You.2\")", "def want_label(self, op):\n return self.want_line(r'\\s*\\S*(%s)\\S*\\:.*' % (op))", "def test_whitespace(self):\n self.assertRaises(ParseException, self.flag.parseString, ' ')", "def test_label_with_non_string():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot\n Y classification COUNT\n SPLIT BY classification\n X date BY YEAR LABEL 1.2\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_label(self):\n try:\n t = self.OntTerm(label='diffuse')\n raise AssertionError(f'should fail {t!r}')\n except TypeError:\n pass", "def test_arg_env_invalid(self, dfparser, instruction, label):\n dfparser.lines = [\"FROM fedora\\n\",\n \"{0} v=v\\n\".format(instruction),\n \"LABEL TEST={0}\\n\".format(label)]\n try:\n dfparser.labels['TEST']\n except KeyError:\n pass", "def test_label_cannot_be_converted_to_string(self):\n\n class NoStr:\n def __str__(self) -> str:\n raise NotImplementedError\n\n with pytest.raises(TypeError, match=\"The given label\"):\n State(\"water\", label=NoStr())", "def validateLabel(cls, label: str, labeling_version: int) -> bool:\r\n\r\n return len(label.split('.')) in [2, 3]", "def test_label(self):\n xs = t.Label(t.Exactly(\"x\"), 'CustomLabel')\n self.assertEqual(writePython(xs),\n dd(\"\"\"\n def _G_label_1():\n _G_exactly_2, lastError = self.exactly('x')\n self.considerError(lastError, None)\n return (_G_exactly_2, self.currentError)\n _G_label_3, lastError = self.label(_G_label_1, \"CustomLabel\")\n self.considerError(lastError, None)\n _G_label_3\n \"\"\"))", "def test_label_not_in_config(self):\n with self.assertRaisesRegex(\n ValueError, 'The config \\'Label\\' field should contain the positive'\n ' class label.'):\n self.ci.run_with_metadata(\n indexed_inputs=self.dataset.indexed_examples,\n model=self.model,\n dataset=self.dataset,\n )", "def test_no_start_open_parens(self):\n self.assertRaises(ValueError, NewickTokenizer, newick='hi')", "def test_whitespace_not_used_if_layout():\n grammar = \"\"\"\n S: 'a' 'b';\n LAYOUT: 'k' | EMPTY;\n \"\"\"\n g = Grammar.from_string(grammar)\n parser = Parser(g)\n with pytest.raises(ParseError):\n parser.parse('a b')", "def validate_label(self, label):\n if label != self.label:\n raise KeypointsSchemaError(\n \"Label '%s' does not match keypoints schema\" % label\n )", "def test_issue_remove_label(self):\n pass", "def _is_label(self) -> bool:\n return self.lines[self.counter].startswith(\"(\") and self.lines[\n self.counter\n ].endswith(\")\")", "def t_error(t):\n print(\"Illegal character '%s'\" % repr(t.value[0]))\n t.lexer.skip(1)", "def catch_tabs(self):\n lnum = 1\n for line in self.text:\n cnum = line.find(\"\\t\")\n if 0 <= cnum:\n self.errmsg(\"TAB detected in input. Please use spaces.\",\n pos=(lnum,cnum))\n lnum += 1", "def check_sanity(self):\n # ensure numeric labels\n try:\n list(map(int, flatten(self.labels[:1])))\n except ValueError as ve:\n error(\"Non-numeric label encountered: {}\".format(ve))\n except TypeError as ve:\n warning(\"Non-collection labelitem encountered: {}\".format(ve))", "def label(cls) -> str:\n return \"!lobotomy.error\"", "def labels_validation(ele,actultext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(ele))\r\n print \"Current label returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(actultext)+\" label does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if actultext.strip() == text_heading.strip():\r\n print (str(actultext)+\" label has been found!!!\")\r\n else:\r\n print(\"Sorry!!!lable has been mismatched,it should be \"+str(actultext))\r\n print (\"label shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def test_invalid_input_tag(self):\r\n with self.assertRaisesRegexp(Exception, \"Error in xml\"):\r\n self.check_group('checkboxtextgroup', 'invalid', 'checkbox')", "def verify_labeled(self, d_stmt, table):\n d_label = d_stmt.find_first(\"p_name\")\n if d_label:\n self.label = d_label.value\n table.check_table(d_stmt.linespan, Symbol(self.label, DanaType(\"label\")))", "def test_info_whitespace():\n pytest.raises(SaltInvocationError, mac_group.info, \"white space\")", "def is_label_definition(line):\n\n return line.startswith(\"LABEL \")", "def test_is_valid_label_key_invalid_input():\n # test length violations\n assert not is_valid_label_key(key=None) # Too short\n assert not is_valid_label_key(key=\"\") # Too short\n assert not is_valid_label_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_label_key(key=\"/n\") # prefix too short\n assert not is_valid_label_key(key=\"p/\") # name too short\n assert not is_valid_label_key(key=\"a\" * 254) # name too long\n assert not is_valid_label_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_label_key(key=\"-a\")\n assert not is_valid_label_key(key=\".b\")\n assert not is_valid_label_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_key(key=\"a-\")\n assert not is_valid_label_key(key=\"b.\")\n assert not is_valid_label_key(key=\"c \")\n assert not is_valid_label_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_key(key=\"a$$a\")\n assert not is_valid_label_key(key=\"b b\")" ]
[ "0.67585087", "0.63081664", "0.6136866", "0.6121706", "0.6079675", "0.6045867", "0.5986271", "0.5970314", "0.5936593", "0.5910567", "0.58990806", "0.58863115", "0.5881466", "0.5871345", "0.58665997", "0.5764735", "0.57456005", "0.5732533", "0.5719328", "0.57080424", "0.56995803", "0.56872946", "0.5654367", "0.5654298", "0.5642969", "0.5623617", "0.5618477", "0.56180114", "0.5611361", "0.55977714" ]
0.7484639
0
parsing a given text file containing labels and sequences load file, tidy it, process each line in the file return the labels and sequences as list[tuple(string,string)]
def ParseSeqFile(FilePath): SeqFile = rSeqFile(FilePath) TidyFile = TidyLines(SeqFile) result = [] for line in TidyFile: t = ( ProcessLine(line) ) result.append(t) return(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_file(filename):\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n try:\n label,content = line.strip().split('\\t')\n contents.append(list(content))\n labels.append(label)\n except:\n pass\n return contents,labels", "def readSequences(lines):\n seqs = []\n label = None\n seq_lines = []\n for line in lines:\n line = line.strip() # strip off white space\n if not line: # skip empty lines\n continue\n if line.startswith(';'): # ignore comment lines\n continue\n # check for start of next sequence:\n if line.startswith('>'): # label line\n # first, store the previous sequence if we had one:\n if seq_lines:\n seqs.append(Sequence(label, ''.join(seq_lines)))\n seq_lines = []\n # get the label (name) for the next sequence\n label = line[1:].strip()\n else:\n # collect all lines with sequence information for this sequence:\n seq_lines.append(line)\n # take care of the last sequence in the file\n seqs.append(Sequence(label, ''.join(seq_lines)))\n return seqs", "def read_data(cls, input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n lines = []\n for line in f:\n line = line.strip()\n if line.startswith('-DOCSTART-'):\n continue\n else:\n word_labels = line.split('-seq-')\n assert len(word_labels) == 2\n\n words = word_labels[0]\n labels = word_labels[1]\n lines.append([words, labels])\n\n return lines", "def fasta(path):\n label = None\n sequence = None\n with open(path, 'r') as data:\n for line in data:\n line = line.strip()\n if line.startswith('>'):\n if label and sequence:\n yield (label, sequence)\n label = line[1:]\n sequence = \"\"\n else:\n sequence += line\n\n if label and sequence:\n yield (label, sequence)", "def read_data(input_file):\n\n def process_line(labels, words):\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append((l, w))\n words = []\n labels = []\n return words, labels, lines\n\n rf = open(input_file, 'r')\n lines = [];\n words = [];\n labels = []\n for line in rf:\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n # here we dont do \"DOCSTART\" check\n\n if len(line.strip()) == 0: # and words[-1] == '.'\n words, labels, lines = process_line(labels, words)\n words.append(word)\n labels.append(label)\n rf.close()\n return lines", "def read_processed_data_from_file(file, encoding='latin1'):\n\n with open(file, encoding=encoding) as f:\n raw = f.read()\n\n lines = raw.split('\\n')\n labeled_texts = []\n n = len(lines) - 1\n for i, line in enumerate(lines):\n print(f'\\rLoading review {i} of {n}', end='')\n if line == '':\n continue\n tagged_words = re.findall(r'(.+?\\\\.+?) ', line)\n label = re.findall(r'#(\\d+.\\d)#', line)[0]\n labeled_texts.append((tagged_words, label))\n print()\n return labeled_texts", "def read_file(filename):\n reads = []\n labels = []\n\n with open(filename) as f:\n content = f.readlines()\n\n for line in content:\n _, read, label = re.sub('[null\\t\\n\\[\\]\\\"]', '', line).replace(' ', '').split(',')\n reads.append(read)\n labels.append(label)\n \n return reads, labels", "def _process(self, file: bytes) -> Sequence[List[Tuple[str]]]:\n train_data = file[: -2 * self.num_eval_symbols]\n val_data = file[-2 * self.num_eval_symbols: -self.num_eval_symbols]\n test_data = file[-self.num_eval_symbols:]\n\n symbol = '' if self.remove_end_of_line else str(ord('\\n'))\n train = ' '.join([str(c) if c != ord('\\n') else symbol for c in train_data])\n val = ' '.join([str(c) if c != ord('\\n') else symbol for c in val_data])\n test = ' '.join([str(c) if c != ord('\\n') else symbol for c in test_data])\n\n return [(train,)], [(val,)], [(test,)]", "def seqs_from_file(ids, file_lines):\r\n\r\n for label, seq in parse_fasta(file_lines):\r\n\r\n if id_from_fasta_label_line(label) in ids:\r\n yield label, seq", "def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs", "def recover_original_data(data_path, sequence_pairs):\n # initialize variables\n num_labels = 0\n num_sequences = 0\n num_correct_labels = 0\n num_correct_sequences = 0\n with open(data_path, \"r\") as input_file:\n # sequence of workds in each sentence\n word_sequence = []\n # gold/original labels for each word in each sentence\n gold_label_sequence = []\n # prediction labels for each word in each sentence\n pred_label_sequence = []\n for line in input_file:\n # split line into tokens\n tokens = line.split()\n # check if line is not empty\n if tokens:\n # a label exists\n num_labels += 1\n # the word is the first token\n word = tokens[0]\n # the original label is the second token\n gold_label = tokens[1]\n # the prediction label is the third token\n pred_label = tokens[2]\n # check if prediction equals to real label\n if pred_label == gold_label:\n num_correct_labels += 1\n # build the sequence of words, labels, and predictions for each sentence\n word_sequence.append(word)\n gold_label_sequence.append(gold_label)\n pred_label_sequence.append(pred_label)\n # line is empty\n else:\n # count number of sequences (=sentences)\n num_sequences += 1\n # check if word_sequence is empty\n if word_sequence:\n sequence_pairs.append([word_sequence, gold_label_sequence])\n # check if we predicted correctly the whole sequence\n if pred_label_sequence == gold_label_sequence:\n num_correct_sequences += 1\n # flush lists for next sequence\n word_sequence = []\n gold_label_sequence = []\n pred_label_sequence = []\n # here is the case where the file does not end with an empty line\n # repeat the process for the last sequence of the file\n if word_sequence:\n num_sequences += 1\n sequence_pairs.append([word_sequence, gold_label_sequence])\n if pred_label_sequence == gold_label_sequence:\n num_correct_sequences += 1\n # calculate per instance (=word) accuracy and per sequence (=sentence) accuracy\n per_instance_accuracy = float(num_correct_labels) / num_labels * 100\n per_sequence_accuracy = float(num_correct_sequences) / num_sequences * 100\n return per_instance_accuracy, per_sequence_accuracy", "def convert_bmes_to_sequence_tagging(source_file: str, output_file: str):\n # 1. read all lines and split it to sentences\n sentences: List[str] = []\n labels: List[str] = []\n with open(source_file, 'r+', encoding='utf-8') as f:\n\n # 1. 一个文件中的token和labels\n sentence_tokens, sentence_labels = [], []\n for line in f:\n line = line.strip()\n if not line:\n sentences.append(sentence_tokens)\n labels.append(sentence_labels)\n sentence_tokens, sentence_labels = [], []\n else:\n line_tokens, line_labels = read_line(line)\n\n sentence_tokens.extend(line_tokens)\n sentence_labels.extend(line_labels)\n\n assert len(sentences) == len(labels)\n \n # 2. write tokens and labels to the file\n with open(output_file, 'w+', encoding='utf-8') as f:\n\n for index in range(len(sentences)):\n tokens, sentence_labels = sentences[index], labels[index]\n\n items = [\n '###'.join([tokens[i], sentence_labels[i]]) for i in range(len(tokens))]\n\n f.write('\\t'.join(items) + '\\n')", "def load_file(filename):\n\tlabels = []\n\tdocs = []\n\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tcontent = line.split('\\t')\n\n\t\t\tif len(content) > 2:\n\t\t\t\tprint('incorrect read')\n\t\t\t\texit()\n\n\t\t\tif len(content[1]) == 0: continue\n\n\t\t\tdocs.append(str(content[1]).strip('\\r').strip('\\n').strip('\\r\\n'))\n\t\t\tlabels.append(content[0])\n\n\treturn docs, labels", "def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data", "def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n # Replace end of line tokens\n if self.eol is not None and not self.split_by_sentence:\n decoded_text = decoded_text.replace('\\n', self.eol)\n\n # Split by sentence or unroll\n if self.split_by_sentence:\n nltk.download('punkt', quiet=True)\n text = [(sent.strip(),) for sent in nltk.tokenize.sent_tokenize(decoded_text)]\n else:\n text = [(decoded_text,)]\n\n return text", "def read_traindata (filename, labels = ['pos', 'neg']):\n def split (l):\n \"\"\"split one line into words and label\"\"\"\n segs = l.strip().split ('\\t')\n label = segs [-1]\n words = segs [:-1]\n return words, label\n \n encoding = chardet.detect(open (filename).read ()) ['encoding']\n \n with codecs.open (filename, 'r', encoding) as f:\n for line in f.readlines ():\n row = split (line)\n assert len (row) == 2\n assert isinstance(row [0], list)\n assert isinstance(row [1], basestring)\n print row [1]\n assert row [1] in labels\n yield row", "def parse_data(filename):\r\n labels = []\r\n documents = []\r\n with open(filename, 'r') as f:\r\n for line in f:\r\n values = line.split()\r\n label = values[0]\r\n document = []\r\n for wordCount in values[1:]:\r\n parsed = wordCount.split(':')\r\n word = parsed[0]\r\n count = int(parsed[1])\r\n document.append((word, count))\r\n labels.append(label)\r\n documents.append(document)\r\n return (labels, documents)", "def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)", "def _preprocess(self, txt_seq):\n input = []\n label = []\n punc = \" \"\n for token in txt_seq.split():\n if token in self.punc2id:\n punc = token\n else:\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n label.append(self.punc2id[punc])\n punc = \" \"\n input.append(self.word2id[\"<END>\"])\n label.append(self.punc2id[punc])\n input = torch.LongTensor(input)\n label = torch.LongTensor(label)\n # input = np.array(input)\n # label = np.array(label)\n return input, label", "def process_data(self) -> Tuple[list, List[Sequence[int]]]:\n features, labels = self.load_files()\n x = [self.process_text(f, self.max_length) for f in features]\n y = [self._convert_labels(l, self.max_length) for l in labels]\n return x, y", "def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))", "def load_labels(path):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r\"[:\\s]+\", content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n # print(labels)\n return labels", "def get_sequence_list(files):\n sequence_list = []\n for i in range(0,len(files)):\n with open(files[i], \"r\") as fasta_file:\n fasta_seq_all = fasta_file.read()\n \n\n fasta_seq_all = fasta_seq_all.split(\">\")\n\n for j in range(0, len(fasta_seq_all)):\n fasta_seq = fasta_seq_all[j]\n if len(fasta_seq) > 2:\n \n fasta_seq = fasta_seq.splitlines()\n label = _format_label(files[i], fasta_seq.pop(0))\n format_fasta_seq = []\n for k in range(0,len(fasta_seq)):\n try:\n if fasta_seq[k][0] == \"\\n\":\n break\n \n format_fasta_seq.append(fasta_seq[k])\n except:\n break\n format_fasta_seq = \"\".join(format_fasta_seq)\n format_fasta_seq.strip()\n if len(format_fasta_seq) > 2:\n sequence_list.append(Sequence(format_fasta_seq, label))\n \n return sequence_list", "def load_dataset(filepath):\n \n X = list()\n x = list()\n\n Y = list()\n y = list()\n \n for line in open(filepath):\n # blank lines separate sequences\n if len(line) <= 1:\n X.append(x)\n Y.append(y)\n\n x = list()\n y = list()\n else:\n a, b = line.strip().split('\\t')\n x.append(a)\n y.append(b)\n \n return X, Y", "def parse_labels(file: str) -> ndarray:\n rows = []\n with open(file, 'r', encoding='utf-8') as f:\n for row in f:\n rows.append(row.strip())\n return array(rows)", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = []\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n #if len(pair) == 2 and pair[0].strip().isdigit():\n labels.append(np.array([int(pair[0].strip()),pair[1].strip()]))\n #else:\n # labels.append(pair[0].strip())\n return np.array(labels)", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def _read_conll(cls, input_file):\n #def read_conll(input_file):\n sents = []\n sent, labels = [], []\n for line in open(input_file):\n if line.startswith(\"# sent_id\"):\n current_id = line.strip().split(\" = \")[1]\n elif line.strip() == \"\":\n if len(sent) > 0:\n sents.append((current_id, sent, labels))\n sent, labels = [], []\n else:\n token, label = line.strip().split(\"\\t\")\n sent.append(token)\n labels.append(label)\n return sents", "def load_dataset(file_handle) -> list:\n output = []\n lines = file_handle.readlines()\n name = None\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n if line.startswith(\">\"):\n if name:\n output.append(sequence)\n name = line[1:]\n sequence = \"\"\n else:\n sequence += line\n\n if name:\n output.append(sequence)\n \n return output", "def p_and_l_from(files):\n if isinstance(files, str):\n files = [files]\n paths = []\n labels = []\n for file in files:\n print(f'read {file}')\n with open(file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.split(' ')\n paths.append(line[0])\n labels.append(int(line[1]))\n return [paths, labels]" ]
[ "0.67697734", "0.6637289", "0.65705514", "0.65653694", "0.6479006", "0.64705706", "0.64496464", "0.6351888", "0.6281873", "0.6273862", "0.62096", "0.6180452", "0.6150285", "0.6150257", "0.61439294", "0.612267", "0.6109716", "0.6100851", "0.60979813", "0.60872257", "0.60840577", "0.6075221", "0.6075019", "0.60727876", "0.6065708", "0.6061248", "0.6037546", "0.60311437", "0.5999951", "0.5987098" ]
0.6663826
1
Return 'p1' if the current player is Player 1, and 'p2' if the current player is Player 2.
def get_current_player_name(self) -> str: if self.p1_turn: return 'p1' return 'p2'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def other_player(cls, player):\n return 0 if player == 1 else 1", "def checkval(self, P1, P2, winningval):\n if P1 == winningval:\n return \"Player 1\"\n elif P2 == winningval:\n return \"Player 2\"", "def get_current_player(player_one_turn):\n \n # Get appropriate player whether the parameter is True or False\n if player_one_turn == True:\n return 'Player One'\n return 'Player Two'", "def otherPlayer(cls, player):\n return 0 if player == 1 else 1", "def other_player(self, player):\n if player == self.__opponent:\n return self.__pid\n else:\n return self.__opponent", "def get_player(self, number):\n num = int(number)\n assert (num in [1, 2])\n return self.player_1 if num == 1 else self.player_2", "def switch_player(current, player1, player2):\r\n if current == player1:\r\n return player2\r\n else:\r\n return player1", "def player_css_class(p1, p2, cp=None):\n return (\"self_player\" if p1 is p2 else \"other_player\") + (\n \" current_player\" if p1 is cp else \"\")", "def game(p1,p2): \n if (p1 =='piedra' and p2=='tijera') or (p1 == 'tijera' and p2 == 'papel')or(p1 == 'papel' and p2 == 'piedra'):\n return 'gana p1'\n elif(p1==p2):\n return'empate'\n else:\n return'gana p2'", "def __get_other_player(self):\n return engine.Engine.game_manager.players[(self.current_player_index + 1) % 2]", "def next_player(current_player=\"None\"):\n if current_player == \"None\":\n return random.choice([\"Player 1\", \"Player 2\"])\n elif current_player == \"Player 1\":\n return \"Player 2\"\n else:\n return \"Player 1\"", "def __negated_player(self, player):\n\t\treturn self.PLAYER2 if self.current_player == self.PLAYER1 else self.PLAYER1", "def other_player(self):\n return self.get_others_in_group()[0]", "def goesFirst(player1, player2):\r\n first = input('who goes first ? '+ Player.get_name (player1) +' or '+Player.get_name (player2)+' ?')\r\n if first == Player.get_name(player2) :\r\n return player2\r\n elif first == Player.get_name(player1) :\r\n return player1\r\n else:\r\n return goesFirst(player1, player2)", "def get_next_player(current_player: Optional[str]) -> str:\n if current_player == c.X:\n return c.O\n else:\n return c.X", "def swap_player(self):\n\n # if player 1 then switch to player 2\n if self.current_player == 1:\n self.current_player += 1\n else:\n self.current_player -= 1\n self.playing_player = self.players[self.current_player]\n return self.playing_player", "def other(player):\n return 1 - player", "def other(player):\n return 1 - player", "def other(player):\n return 1 - player", "def win(player1, player2):\n if(player1 == 1 and player2 == 3) or (player1 == 2 and player2 == 1) \\\n or (player1 == 3 and player2 == 2):\n return True", "def player(self, state, current_player):\r\n\r\n new_piece, player = self.new_or_old_piece(state)\r\n\r\n if new_piece:\r\n return player\r\n else:\r\n return current_player", "def switch_player(player):\n if player == PLAYERX:\n return PLAYERO\n else:\n return PLAYERX", "def switch_player(player):\n if player == PLAYERX:\n return PLAYERO\n else:\n return PLAYERX", "def get_player2_mark(p1_mark):\r\n if p1_mark == 2:\r\n return markers[0]\r\n else:\r\n return markers[1]", "def opponent(self, player):\r\n # player = core.BLACK (can do this for any static var)\r\n if player == core.BLACK:\r\n return core.WHITE\r\n else:\r\n return core.BLACK", "def get_player(self):\n return 2 - int((np.sum(self.state) % 2))", "def determine_winner(score1, score2):\n if score1 == score2:\n return 'tie'\n elif score1 == 21:\n return 'player1'\n elif score2 == 21:\n return 'player2'\n elif score1 > 21 or score2 > 21:\n if score1 > 21 and score2 > 21:\n if score1 - 21 < score2 - 21:\n return 'player1'\n else:\n return 'player2'\n elif score2 < 21 < score1:\n return 'player2'\n elif score1 < 21 < score2:\n return 'player1'\n elif score1 < 21 and score2 < 21:\n if score1 - score2 > 0:\n return 'player1'\n else:\n return 'player2'\n else:\n return None", "def SelectPlayer(self):\n\n player = input(data['player'])\n if player == \"1\":\n return 0\n elif player == \"2\":\n return 1\n else:\n return 'invalid'", "def other_player_status(p: models.Player):\n return {'id': p.id,\n 'name': p.name,\n 'tricks': p.tricks,\n 'cards': p.card_count,\n 'bid': p.bid}", "def prepare(p1, p2):\n n1 = recv_msg(p1)\n n2 = recv_msg(p2)\n\n out('The name of player 1 is ' + n1)\n out('The name of player 2 is ' + n2)\n\n send_msg(p1, n2)\n send_msg(p2, n1)\n\n return False" ]
[ "0.7322427", "0.719487", "0.71915984", "0.71570975", "0.7128614", "0.7037134", "0.7009901", "0.69278514", "0.6833396", "0.6795355", "0.6771366", "0.6737287", "0.64921343", "0.6477016", "0.63291126", "0.63171285", "0.62973", "0.62973", "0.62973", "0.62903047", "0.6243002", "0.6198836", "0.6198836", "0.6189466", "0.61864525", "0.6171854", "0.616602", "0.6156774", "0.6140084", "0.6124493" ]
0.73625857
0
Return whether move is a valid move for this GameState.
def is_valid_move(self, move: Any) -> bool: return move in self.get_possible_moves()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_move(self, move):\n if self.game_state[move[0]][move[1]] is not None:\n return False\n return True", "def valid_bool(self):\n return bool(self.piece.validate_move(self.board, self))", "def is_valid_move(self, move):\n if type(move) == str:\n move = int(move)\n\n return move in self.get_possible_moves()", "def valid_move(self, player, move):\n return (True)", "def check_move(self, move):\n\n if str(move) in self.moves_made:\n return False\n return True", "def is_valid_move(state, move):\n row, col = move\n if row not in [1, 2, 3] or col not in [1, 2, 3]:\n print(\"Invalid move! Specify correct game square!\")\n return False\n if state[row-1][col-1] != '_':\n print('Invalid move! Place your marker on a free square!')\n return False\n return True", "def isValidMove(self, move: Move) -> bool:\n # TODO: How do we determine the move type?\n # Some form of duck typing?\n minigame_move_classes = {\n \"BuyPrivateCompany\": \"BuyPrivateCompanyMove\",\n \"BiddingForPrivateCompany\": \"BuyPrivateCompanyMove\",\n }\n return minigame_move_classes.get(self.minigame_class) == move.__class__.__name__", "def is_move_valid(self, direction, reference_board=None):\n # Verify a left move does not take you off the board.\n if (direction == \"l\"):\n if (self._current_loc.get_column() == 0):\n return False\n # Verify an up move does not take you off the board.\n elif (direction == \"u\"):\n # Verify the move does not take you off the board.\n if (self._current_loc.get_row() == 0):\n return False\n # Verify a right move does not take you off the board.\n elif (direction == \"r\"):\n current_row = self._current_loc.get_row()\n max_column_number = len(self._untraversed_board[current_row])\n if self._current_loc.get_column() + 1 == max_column_number:\n return False\n # Verify a down move does not take you off the board.\n elif (direction == \"d\"):\n if self._current_loc.get_row() + 1 == len(self._untraversed_board):\n return False\n else:\n assert False, \"Invalid move direction.\"\n\n # Get the new location for a move in the specified direction.\n new_location = self._calculate_move_location(direction)\n new_row = new_location.get_row()\n new_col = new_location.get_column()\n # Verify the space is available\n if(reference_board is None):\n return BoardPath._untraversed_board[new_row][new_col] != \"#\"\n else:\n return reference_board[new_row][new_col] != \"#\"", "def legal_move(self, move, state = None):\n if state is None:\n state = copy(self.state)\n else:\n state = copy(state)\n return state[move // state.shape[0], move % state.shape[0]] == 0", "def valid_move(self, player, move):\n if self.rounds < len(self.players):\n if ((False in [(self.board).in_bounds(pt) for pt in move])\n or (self.board).overlap(move)\n or not (True in [(pt in player.corners) for pt in move])):\n return (False)\n else:\n return (True)\n\n elif ((False in [(self.board).in_bounds(pt) for pt in move])\n or (self.board).overlap(move)\n or (self.board).adj(player, move)\n or not (self.board).corner(player, move)):\n return (False)\n\n else:\n return (True)", "def move_valid(move):\n return True", "def move_check(self):\r\n \r\n if not self.run:\r\n return False\r\n \r\n if self.get_num_legal_moves() == 0:\r\n SlTrace.lg(\"NO more legal moves!\", \"nolegalmoves\")\r\n ###return False \r\n \r\n if self.new_move:\r\n self.announce_player(\"start_move\")\r\n if SlTrace.trace(\"selected\"):\r\n self.list_selected(\"After start_move\")\r\n self.new_move = False\r\n player = self.get_player()\r\n if player is None:\r\n return False\r\n \r\n return True", "def is_valid_move(self, position, dest_square):\n if self.symbol.isupper() and position.turn != 'w':\n return False\n elif self.symbol.islower() and position.turn != 'b':\n return False\n elif dest_square not in self.calculate_scope(position):\n return False\n else:\n return True", "def _isvalidmove(self, from_, to_):\n if self.board[from_].occupant is None:\n print(\"Moving from empty square\")\n return False\n piece = self.board[from_].occupant\n\n if piece.color != self.to_move:\n print(\"Wrong color\")\n return False\n\n if self.is_checked:\n if piece.notation != 'K':\n print(\"King is checked!\")\n return False\n\n diff = (\n to_cartesian(to_)[0] - to_cartesian(from_)[0],\n to_cartesian(to_)[1] - to_cartesian(from_)[1]\n )\n if not piece.hopping:\n if self.board.isblocked(from_, to_):\n print(\"Move blocked by other pieces\")\n return False\n\n if self.board[to_].occupant is not None:\n if piece.color == self.board[to_].occupant.color:\n print(\"Cannot capture friendly\")\n return False\n\n if diff not in piece.get_captures():\n print(\"Invalid piece capture\")\n return False\n\n if diff not in piece.get_moves():\n print(\"Invalid piece move\")\n return False\n\n return True", "def check_move(board, move):\n\n player, spike_index, fields_to_move = Judge._validate_move(move)\n\n # 1. moving out of the bar\n # 2. check if the source is of the valid player\n # 3. check if the destination is valid\n\n board.set_player_perspective(player)\n\n # 1.\n if spike_index == OUT_OF_BAR_SPECIAL_MOVE:\n if board.bar[player] < 1:\n return False\n\n if not board.valid_dest(fields_to_move - 1):\n return False\n\n return True\n\n # 2.\n if not board.valid_source(spike_index):\n return False\n # 3.\n dest_spike_index = spike_index + fields_to_move\n\n if dest_spike_index >= len(INITIAL_SPIKES_STATE):\n return board.all_at_home()\n \n return board.valid_dest(dest_spike_index)", "def is_valid_move(self, position: Point) -> bool:\n\t\tif self.tiles[position.x][position.y] == 0:\n\t\t\treturn True\n\t\treturn False", "def is_valid(self, move):\r\n return move > 10 and move < 89", "def move_is_valid(self, pos):\n\n if (not isinstance(pos, tuple) or len(pos) != 2 or \n not isinstance(pos[0], int) or not isinstance(pos[1], int)):\n return False\n y, x = pos\n if (y >= 0 and y < self.size and x >= 0 and x < self.size and \n self.board[pos] == HexBoard.EMPTY):\n return True\n else:\n return False", "def is_valid(move):\n return isinstance(move, int) and move in Othello.squares()", "def has_valid_move(self, cur_square, board):\n coords = cur_square.coords\n neighbor_list = [tuple(map(sum, zip(coords, offset))) for offset in self._offsets]\n return self.has_valid_move_in_list(coords, neighbor_list, board)", "def checkValidMove(self, move):\n boardCopy = copy.deepcopy(self)\n tilesChange = False\n if move == Move.UP:\n boardCopy.moveUp()\n elif move == Move.DOWN:\n boardCopy.moveDown()\n elif move == Move.LEFT:\n boardCopy.moveLeft()\n elif move == Move.RIGHT:\n boardCopy.moveRight()\n else:\n raise ValueError('Invalid Move was input')\n \n for i in range(4):\n for j in range(4):\n if boardCopy.getTile(i,j) != self.getTile(i,j):\n tilesChange = True\n del(boardCopy)\n return tilesChange", "def is_move_valid(self, from_row, from_col, to_row, to_col):\n # check is taking own piece?\n if self._is_taking_own_piece(from_row, from_col, to_row, to_col):\n return False\n\n piece = self.board.squares[from_row][from_col]\n if piece == ChessPiece.W_ROOK or piece == ChessPiece.B_ROOK:\n return self.is_rook_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_KNIGHT or piece == ChessPiece.B_KNIGHT:\n return self.is_knight_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_BISHOP or piece == ChessPiece.B_BISHOP:\n return self.is_bishop_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_QUEEN or piece == ChessPiece.B_QUEEN:\n return self.is_queen_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_KING or piece == ChessPiece.B_KING:\n return self.is_king_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_PAWN or piece == ChessPiece.B_PAWN:\n return self.is_pawn_move_valid(from_row, from_col,\n to_row, to_col)", "def validate_move(move):\n if move[0] in cc.VALID_RANKS and move[1] in cc.VALID_RANKS:\n valid = True\n else:\n valid = False\n return valid", "def can_move(self):\n return self.movement", "def is_valid_move(self, board, fieldy, fieldx):\n if isinstance(board[fieldy][fieldx], Piece):\n return False\n if self.posy - fieldy == self.direction and abs(self.posx - fieldx) == 1:\n return True\n else:\n return False", "def is_valid_move(self, somerow, somecol):\n bool_1 = self.board[somerow][somecol] != 1\n bool_2 = self.num_queens_placed < self.size \n bool_3 = self.attack(somerow, somecol)\n return bool_1 and bool_2 and bool_3", "def check_valid_move(grid: np.ndarray, current_position: tuple, move: tuple) -> bool:\n # getting coordinates for moved position\n moved_position = tuple(np.add(current_position, move))\n\n def compare_coordinates(a: tuple, b: tuple) -> bool:\n \"\"\"\n Helper function to compare coordinates\n Checks if a is smaller than b\n \"\"\"\n return all(np.array(a) < np.array(b))\n\n # checking if coordinates are inside the array (between (0,0) and (N,N))\n if compare_coordinates((0, 0), moved_position) and compare_coordinates(moved_position, grid.shape):\n # checking if the coordinates are not on the obstacle\n if grid[moved_position] == 'x':\n return False\n else:\n return True\n else:\n return False", "def is_moving(self) -> bool:\n return self.orders and self.orders[0].ability.id is AbilityId.MOVE", "def validate_move(move, player_board):\n select_row = move.select_row\n select_col = move.select_col\n \n player_board_rows = player_board.shape[0]\n player_board_cols = player_board.shape[1]\n \n if select_row >= player_board_rows or select_row < 0 or \\\n select_col >= player_board_cols or select_col < 0 or \\\n player_board[select_row][select_col] != -1:\n return False\n \n return True", "def is_move_valid(move: Move, board: Board, whites_turn: bool) -> bool:\n if out_of_bounds(move[0]) == True or out_of_bounds(move[1]) == True:\n return False\n \n if move[0] == move[1]:\n return False\n\n if is_current_players_piece(piece_at_position(move[0], board), False) and whites_turn == True:\n return False\n elif is_current_players_piece(piece_at_position(move[0], board), True) and whites_turn == False:\n return False\n\n\n if piece_at_position(move[1], board) in WHITE_PIECES and whites_turn == True:\n return False\n elif piece_at_position(move[1], board) in BLACK_PIECES and whites_turn == False:\n return False\n\n\n if move[1] not in get_possible_moves(move[0], board):\n return False\n\n\n test_board = board\n test_board = update_board(test_board, move)\n if is_in_check(test_board, True) and whites_turn == True:\n return False\n elif is_in_check(test_board, False) and whites_turn == False:\n return False\n\n return True" ]
[ "0.8282215", "0.8074329", "0.80526775", "0.7981014", "0.78986144", "0.78754056", "0.7871537", "0.78681695", "0.7756859", "0.7713244", "0.7703341", "0.76398927", "0.7610206", "0.75776756", "0.75525403", "0.75510347", "0.7432144", "0.73722756", "0.7332168", "0.72985274", "0.72787625", "0.7255667", "0.72550565", "0.72518146", "0.72038835", "0.7189443", "0.716191", "0.716111", "0.7146476", "0.714388" ]
0.8471639
0
Return an estimate in interval [LOSE, WIN] of best outcome the current player can guarantee from state self.
def rough_outcome(self) -> float: # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE # pick move based on this may not be optimal but better than random # return 1 if win immediately # return -1 if all states reachable will result the other player win # return 0 if otherwise ??? what the fuck does this mean # look two states forward pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rough_outcome(self) -> float:\n if is_win(self):\n return 1\n elif is_lose(self):\n return -1\n return 0", "def rough_outcome_strategy(game: Any) -> Any:\n current_state = game.current_state\n best_move = None\n best_outcome = -2 # Temporarily -- just so we can replace this easily later\n\n # Get the move that results in the lowest rough_outcome for the opponent\n for move in current_state.get_possible_moves():\n new_state = current_state.make_move(move)\n\n # We multiply the below by -1 since a state that's bad for the opponent\n # is good for us.\n guessed_score = new_state.rough_outcome() * -1\n if guessed_score > best_outcome:\n best_outcome = guessed_score\n best_move = move\n\n # Return the move that resulted in the best rough_outcome\n return best_move", "def rough_outcome_strategy(game: Any) -> Any:\n current_state = game.current_state\n best_move = None\n best_outcome = -2 # Temporarily -- just so we can replace this easily later\n\n # Get the move that results in the lowest rough_outcome for the opponent\n for move in current_state.get_possible_moves():\n new_state = current_state.make_move(move)\n\n # We multiply the below by -1 since a state that's bad for the opponent\n # is good for us.\n guessed_score = new_state.rough_outcome() * -1\n if guessed_score > best_outcome:\n best_outcome = guessed_score\n best_move = move\n\n # Return the move that resulted in the best rough_outcome\n return best_move", "def rough_outcome_strategy(game: Any) -> Any:\n current_state = game.current_state\n best_move = None\n best_outcome = -2 # Temporarily -- just so we can replace this easily later\n\n # Get the move that results in the lowest rough_outcome for the opponent\n for move in current_state.get_possible_moves():\n new_state = current_state.make_move(move)\n\n # We multiply the below by -1 since a state that's bad for the opponent\n # is good for us.\n guessed_score = new_state.rough_outcome() * -1\n if guessed_score > best_outcome:\n best_outcome = guessed_score\n best_move = move\n\n # Return the move that resulted in the best rough_outcome\n return best_move", "def reward(self, winner):\n if winner == self.side:\n return self.win\n elif winner == VALUES.NOT_FINISHED:\n return self.not_finished\n elif winner == VALUES.DRAW:\n return self.draw\n else:\n return self.lose", "def calc_winner(self):\n pass", "def get_game_score(self):\n if self.game_is_tied():\n return 0\n elif self.is_game_won():\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n my_score = self.my_score - self.penalty_score if my_available_steps == 0 else self.my_score\n opp_score = self.opponent_score - self.penalty_score if opp_available_steps == 0 else self.opponent_score\n return (my_score - opp_score) / (abs(my_score) + abs(opp_score))\n else:\n if abs(self.my_score) + abs(self.opponent_score) == 0:\n return 0\n return (self.my_score - self.opponent_score) / (abs(self.my_score) + abs(self.opponent_score))", "def rough_outcome(self) -> float:\n\n if self.p1_turn:\n name = '2'\n else:\n name = '1'\n\n count = 0\n for i in self.claim:\n if i == name:\n count += 1\n over = (self.get_possible_moves() == []) or \\\n (count >= 0.5 * len(self.claim))\n\n result = []\n if over:\n return -1\n else:\n for move in self.get_possible_moves():\n new_state = self.make_move(move)\n if new_state.rough_outcome() == -1:\n result.append(1)\n else:\n result.append(0)\n if 1 in result:\n return 1\n return -1", "def __status(self):\r\n if self.__currentCell == self.storageCell:\r\n return Status.WIN\r\n\r\n if self.__totalReward < self.__rewardThreshold: # force end of game after to much loss\r\n return Status.LOSE\r\n\r\n return Status.PLAYING", "def get_expected_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_expected_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj", "def winner(self):\n state = self._state['visible']\n if state['reserve'][0] < 1:\n return 1\n elif state['reserve'][1] < 1:\n return 0\n return -1", "def winner(self):\n\n if self.home_score > self.away_score:\n return HOME\n elif self.home_score < self.away_score:\n return VISITOR\n else:\n return TIE", "def evaluateWinner(self):\n\t\tif self.pots[-1] == 0:\n\t\t\tself.pots.pop()\n\t\tlivePlayers = self.getLivePlayers()\t\n\t\tfor i in range(len(self.pots)):\n\t\t\tplayers = self.getPlayersInPot(i, livePlayers)\n\t\t\tevaluations = []\n\t\t\tfor x in players:\n\t\t\t\tcombined = x.hand + self.communityCards\n\t\t\t\tevaluations.append((x, self.evaluator.getRankOfSeven(\tcombined[0], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[1], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[2], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[3], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[4], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[5], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[6] )))\n\t\t\twinners = self.getWinners(evaluations, i)\n\t\t\tself.handOutMoney(winners, i)\n\t\t\tself.potwinQ.append(winners[0].name)", "def _determine_outcome(\n self,\n accept: AcceptanceCriterion,\n best: State,\n curr: State,\n cand: State,\n ) -> Outcome:\n outcome = Outcome.REJECT\n\n if accept(self._rnd_state, best, curr, cand): # accept candidate\n outcome = Outcome.ACCEPT\n\n if cand.objective() < curr.objective():\n outcome = Outcome.BETTER\n\n if cand.objective() < best.objective(): # candidate is new best\n logger.info(f\"New best with objective {cand.objective():.2f}.\")\n outcome = Outcome.BEST\n\n return outcome", "def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)", "def predict_winner(self):\n\t\tif len(self.players) > 1:\n\t\t\t# TODO: convert to using of max() function\n\t\t\twinner = self.players[0]\n\t\t\tfor player in self.players:\n\t\t\t\tif player.wr > winner.wr:\n\t\t\t\t\twinner = player\n\t\t\treturn winner\n\t\telse:\n\t\t\treturn None", "def Pwin(state):\n # Assumes opponent also plays with optimal strategy\n p, me, you, pending = state\n if me + pending >= goal:\n return 1\n elif you >= goal:\n return 0\n else:\n return max(Q_pig(state, action, Pwin) for action in pig_actions(state))", "def utility(self, state, player):\n if state.isWin() or state.isLose():\n return state.getScore()\n\n # In case of cycle.\n if player == PACMAN:\n return INFINITY\n else:\n return -INFINITY", "def get_winner(self) -> int:\n return self._win_state", "def best(self):\n alpha = -1\n beta = +1\n move = self.__negamax(alpha, beta, tt=DictTT())\n return move[1]", "def winner(self):\n\n if self.game_ended():\n return self.winning()\n else:\n return 0", "def showBestGainWon(self) :\n bestGainWon = 0\n for level in self.level_history :\n bestGainWon = level.profit if bestGainWon < level.profit else bestGainWon\n Scenario.messageGetBestGainWon(bestGainWon)", "def Pwin(state):\n # Assumes opponent also plays with optimal strategy.\n (p, me, you, pending) = state\n if me + pending >= goal:\n return 1\n elif you >= goal:\n return 0\n else:\n return max(Q_pig(state, action, Pwin)\n for action in pig_actions(state))", "def evaluate(self, mode=0):\r\n winner = self.determine_winner()\r\n if winner:\r\n return winner * self.WIN_SCORE\r\n\r\n if mode == 1:\r\n return self.centre_priority_evaluate()\r\n elif mode == 2:\r\n return 0.5 * (self.centre_priority_evaluate() + self.piece_evaluate())\r\n else:\r\n return self.piece_evaluate()", "def evaluate_state(state):\n\n my_score = get_action_score(state.action[0], state.action[1], state.action_player, state.occupied)\n other_score = get_action_score(state.action[0], state.action[1], state.player, state.occupied)\n \n return max(my_score, other_score)", "def showWorstGainWon(self) :\n worstGainWon = self.level_history[0].profit\n for level in self.level_history :\n worstGainWon = level.profit if ((worstGainWon > level.profit) and (level.result == 1)) else worstGainWon\n Scenario.messageGetWorstGainWon(worstGainWon)", "def get_winner(state):\n state_val = get_action_score(state.action[0], state.action[1], state.action_player, state.occupied)\n if state_val == 100:\n return state.action_player\n elif len(state.available_moves) == 0:\n return 0\n else:\n return -1", "def eval(self):\n\n ratio_player_win = self.player_wins / self.num_test\n ratio_opponent_win = self.opponent_wins / self.num_test\n ratio_tie = 1.0 - ratio_player_win - ratio_opponent_win\n\n print(\"\\nPlayer Test Results:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_tie))\n\n ratio_optimal_win = self.optimal_wins / self.num_test\n ratio_optimal_loose = self.optimal_losses / self.num_test\n ratio_optimal_tie = 1.0 - ratio_optimal_win - ratio_optimal_loose\n\n print(\"\\nOptimal Results:\")\n print(\"\\tPlayer {0:.2f}%\".format(100.0 * ratio_optimal_win))\n print(\"\\tOpponent {0:.2f}%\".format(100.0 * ratio_optimal_loose))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_optimal_tie))\n\n # Ratio of win, loss diff between player and optimal\n # positive if the player beats opponent\n relative_result = ((ratio_player_win - ratio_opponent_win) /\n (ratio_optimal_win - ratio_optimal_loose))\n\n print(\"\\nResults Player Relative Optimal:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win / ratio_optimal_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win / ratio_optimal_loose))\n print(\"\\tScore {0:.2f}%\".format(100.0 * relative_result))\n\n if self.last_test is not None:\n print(\"Diff from last test score is {0:.2f}%\".format(100.0 * (relative_result - self.last_test)))\n self.last_test = relative_result", "def evaluate_board(self, board):\n \n win_score = 100\n win_or_loss_score = 50\n lose_score = 0\n \n if board.win_for(self.opponent()):\n return lose_score\n if board.win_for(self.side):\n return win_score\n if not board.win_for(self.side) or not board.win_for(self.opponent()):\n return win_or_loss_score", "def minimax_decision(gameState):\n value = -sys.maxsize\n best_value = -sys.maxsize\n best_move = None\n legal_moves = gameState.get_legal_moves()\n for move in legal_moves:\n game = gameState.forecast_move(move)\n value = max(value, min_value(game))\n if value > best_value:\n best_value = value\n best_move = move\n return best_move" ]
[ "0.7338502", "0.7059583", "0.7059583", "0.7059583", "0.7020939", "0.6829376", "0.6702647", "0.6694379", "0.66560304", "0.6624065", "0.6616302", "0.6588654", "0.6566183", "0.6555869", "0.6547012", "0.6531186", "0.6518109", "0.6501664", "0.6488798", "0.6458663", "0.6427033", "0.64219874", "0.6408247", "0.6403664", "0.6400599", "0.63933843", "0.6387703", "0.6387276", "0.6378572", "0.6365341" ]
0.73625016
0
Set common fields in layer to addressing dictonary.
def set_address_values(layer): cursor = arcpy.SearchCursor(layer) for row in cursor: layer_fields = arcpy.ListFields(layer) for x in range(len(layer_fields)): layer_fields[x] = layer_fields[x].name for key in address_dict: if key in layer_fields and address_dict.get(key) is None: address_dict[key] = row.getValue(key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_attrs(self, **kwargs) -> None:\n self._obj.coords[GEO_MAP_COORD].attrs.update(**kwargs)", "def update_asop_dict(asop_dict,region,coords,color,all_settings):\n # Set unique color\n asop_dict['color'] = color\n\n # Apply any general user settings\n asop_dict['grid_desc'] = all_settings.get('grid','native')\n asop_dict['grid_type'] = all_settings.get('grid','native')\n asop_dict['region_name'] = region\n asop_dict['region_desc'] = region.replace('_',' ')\n asop_dict['region'] = coords\n\n # Edit dx for region\n mean_lat = np.mean(coords[0:2])\n asop_dict['dx'] = asop_dict['dx'] * np.cos(np.radians(mean_lat))\n all_settings.pop('infile','') # key not allowed\n for key in asop_dict:\n if key in all_settings:\n asop_dict[key] = all_settings[key]\n\n # Apply any specific file settings\n infile = os.path.basename(asop_dict['infile'])\n file_settings = settings.get(infile,{})\n file_settings.pop('infile','') # key not allowed\n file_settings.pop('region','')\n if file_settings:\n for key in file_settings:\n asop_dict[key] = file_settings[key]\n if 'legend_name' not in file_settings:\n asop_dict['legend_name'] = asop_dict['name'].replace('_',' ')\n\n print('---> Final data dictionary:')\n print(json.dumps(asop_dict, sort_keys=True, indent=2))\n\n return asop_dict", "def __setAttributes(self):\n values = {\"f\":\"json\"}\n layerInfo = self._getEsriRESTJSON(self.url,values)\n #Geometry Type\n geometryType = getGeometryType(layerInfo['geometryType'])\n self.geometryType = geometryType\n #Name\n name=arcpy.ValidateTableName(layerInfo['name'])\n self.name=name\n #Spatial Reference - both the wkid and the arcpy SpatialReference object\n #in case it's in a wkt\n try:\n wkid = layerInfo['extent']['spatialReference']['wkid']\n except:\n wkid = 4326\n sr = arcpy.SpatialReference()\n sr.factoryCode = int(wkid)\n sr.create()\n self.sr = sr\n self.wkid = wkid\n #field used to update the feature class are a subset of all the fields in a feature class\n fields = layerInfo['fields']\n updateFields = []\n for field in fields:\n if (field['type'] in ['esriFieldTypeOID','esriFieldTypeGeometry','esriFieldTypeGUID'] or 'shape' in field['name'].lower() or field['name'] in self.userFields):\n pass\n else:\n updateFields.append(field)\n updateFields.insert(0, {\"name\":'Shape@', \"type\":\"esriFieldTypeGeometry\"})\n self.updateFields = updateFields\n #Max values\n if layerInfo.has_key('maxRecordCount'):\n self.maxRecordCount = int(layerInfo['maxRecordCount'])\n else:\n self.maxRecordCount = 1000", "def set_specific_fields(self):\n raise NotImplementedError(\"Must be defined by subclass!\")", "def overwrite_field(self,cells=None,edges=None,source='depth_max',target='depth_mean'):\n if cells is not None:\n self.cells[target][cells]=self.cells[source][cells]\n if edges is not None:\n self.edges[target][edges]=self.edges[source][edges]", "def __setattr__(self, k, v):\n if k[:1] != '_' and \\\n not k in ('dimensions', 'typecode'):\n if k not in self._ncattrs:\n self._ncattrs += (k, )\n object.__setattr__(self, k, v)", "def __init__(self):\n\n for layer in self._layer_class_map:\n setattr(self, layer, self._layer_class_map[layer]())", "def _configure(self):\n from .topology import FieldBase\n\n Component._configure(self)\n\n mapBasis = {\n \"simplex\": FieldBase.SIMPLEX_BASIS,\n \"tensor\": FieldBase.TENSOR_BASIS,\n \"default\": FieldBase.DEFAULT_BASIS,\n }\n self.cellBasis = mapBasis[self.inventory.cellBasisStr]\n\n mapSpace = {\n \"polynomial\": FieldBase.POLYNOMIAL_SPACE,\n \"point\": FieldBase.POINT_SPACE,\n }\n self.feSpace = mapSpace[self.inventory.feSpaceStr]\n return", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def set_org_and_space_dicts(self, org_dict, space_dict):\n self._space = space_dict\n self._org = org_dict\n return self", "def __init__ (self, d):\n try:\n self.__dict__.update (d.__dict__)\n except:\n self.__dict__.update (d)", "def prepareMapping(self, layer, scheme):\n mapping = {}\n mapping['geometry'] = layer.geom_type.name\n for field_name, layer_field in mappingScheme.items():\n field = self.getModel(layer)._meta.get_field(field_name)\n if isinstance(layer_field, dict):\n subMapping = {}\n layer_fields = layer_field\n for rel_field_name, layer_field in layer_fields.items():\n if layer_field in layer.fields:\n subMapping[rel_field_name] = layer_field\n if subMapping:\n mapping[field_name] = subMapping\n elif layer_field in layer.fields:\n mapping[field_name] = layer_field\n if not field.null and field_name not in mapping:\n raise ValueError('%s does not exist on layer' % layer_field)\n return mapping", "def _update(self, other):\n # NOTE: detail map properties should NEVER be overridden. NEVER. EVER. kthx.\n if other.use_alpha:\n self.use_alpha = True\n if other.mipmap:\n self.mipmap = True", "def _update_all_fields(self, name, value):\n for field in self._field_map.values():\n setattr(field, name, value)", "def _set_attributes(self):", "def __extract_common_attrs(self, raw_data: Dict) -> None:\n for attr in self.COMMON_ATTRS:\n if attr not in self.ATTRS and attr in raw_data:\n setattr(self, attr, raw_data[attr])", "def _update_loose (self, dict):\n self.__dict__.update(dict)", "def set_properties(struct):", "def _set_default_init_field_attributes(self, n_dims=None):\n\n # we use the module defaults for the datasets to initialize them\n field_feature_shapes = dict(FIELD_FEATURE_SHAPES)\n field_feature_dtypes = dict(FIELD_FEATURE_DTYPES)\n\n\n # get the number of coordinates of positions. If there is a\n # main_reps then we have to set the number of atoms to that,\n # if not we count the number of atoms in the topology\n if self._main_rep_idxs is None:\n self._n_coords = json_top_atom_count(self.topology)\n self._main_rep_idxs = list(range(self._n_coords))\n else:\n self._n_coords = len(self._main_rep_idxs)\n\n # get the number of dimensions as a default\n if n_dims is None:\n self._n_dims = N_DIMS\n\n # feature shapes for positions and positions-like fields are\n # not known at the module level due to different number of\n # coordinates (number of atoms) and number of dimensions\n # (default 3 spatial). We set them now that we know this\n # information.\n # add the postitions shape\n field_feature_shapes[POSITIONS] = (self._n_coords, self._n_dims)\n # add the positions-like field shapes (velocities and forces) as the same\n for poslike_field in POSITIONS_LIKE_FIELDS:\n field_feature_shapes[poslike_field] = (self._n_coords, self._n_dims)\n\n # set the attributes\n self._field_feature_shapes = field_feature_shapes\n self._field_feature_dtypes = field_feature_dtypes", "def __init__(self, dict1):\n self.__dict__.update(dict1)", "def __setattr__(self, key, value):\n if isinstance(value, DotDict) and key != '_parent':\n value.__dict__['_parent'] = weakref.proxy(self)\n super(DotDictWithAcquisition, self).__setattr__(key, value)", "def update(self, other: Mapping[str, Any]) -> None:\n self._config.update(self._flatten_dict(other))", "def _set_attrs(ds, **attrs_map):\n for key in attrs_map:\n val = attrs_map[key] # Use Python 2/3 agnostic style\n ds.attrs[key] = val", "def update(self, other=[], **kwargs):\n if ismapping(other):\n other = other.items()\n\n for key, value in other:\n self[key] = value\n\n for key, value in kwargs.items():\n self[key] = value", "def join_data(self, base_data, join_data, base_field, join_fields):\n for data in base_data:\n extra = join_data[data[base_field]]\n for field in join_fields:\n data[field] = extra[field]\n \n return base_data", "def _extend_network_dict_provider(self, context, network, bindings=None):\n if 'id' not in network:\n return\n if not bindings:\n bindings = nsx_db.get_network_bindings(context.session,\n network['id'])\n\n # With NSX plugin, \"normal\" overlay networks will have no binding\n if bindings:\n # Network came in through provider networks API\n network[pnet.NETWORK_TYPE] = bindings[0].binding_type\n network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid\n network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id", "def set_params(self, dic):\n if dic is not None:\n for key, val in zip(dic.keys(), dic.values()):\n if key in self.__dict__.keys():\n self.__dict__[key] = val\n\n if 'scale_params' in self.__dict__.keys():\n self.scale_params.set_params(dic)\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n self.atmospheric_params.set_params(dic)\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n self.atemperature_params.set_params(dic)\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n self.oceanic_params.set_params(dic)\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n self.ground_params.set_params(dic)\n\n if 'otemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n self.gotemperature_params.set_params(dic)\n\n if 'gtemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n self.gotemperature_params.set_params(dic)", "def set_standard_attrs(da):\n da.coords[\"lat\"].attrs = OrderedDict(\n [\n (\"standard_name\", \"latitude\"),\n (\"units\", \"degrees_north\"),\n (\"axis\", \"Y\"),\n (\"long_name\", \"latitude\"),\n (\"out_name\", \"lat\"),\n (\"stored_direction\", \"increasing\"),\n (\"type\", \"double\"),\n (\"valid_max\", \"90.0\"),\n (\"valid_min\", \"-90.0\"),\n ]\n )\n da.coords[\"lon\"].attrs = OrderedDict(\n [\n (\"standard_name\", \"longitude\"),\n (\"units\", \"degrees_east\"),\n (\"axis\", \"X\"),\n (\"long_name\", \"longitude\"),\n (\"out_name\", \"lon\"),\n (\"stored_direction\", \"increasing\"),\n (\"type\", \"double\"),\n (\"valid_max\", \"180.0\"),\n (\"valid_min\", \"-180.0\"),\n ]\n )\n da.coords[\"depth_coord\"].attrs = OrderedDict(\n [\n (\"standard_name\", \"depth\"),\n (\"units\", \"m\"),\n (\"axis\", \"Z\"),\n (\"long_name\", \"ocean depth coordinate\"),\n (\"out_name\", \"lev\"),\n (\"positive\", \"down\"),\n (\"stored_direction\", \"increasing\"),\n (\"valid_max\", \"12000.0\"),\n (\"valid_min\", \"0.0\"),\n ]\n )\n da.coords[\"time\"].attrs = OrderedDict(\n [\n (\"standard_name\", \"time\"),\n (\"axis\", \"T\"),\n (\"long_name\", \"time\"),\n (\"out_name\", \"time\"),\n (\"stored_direction\", \"increasing\"),\n ]\n )\n da.coords[\"time\"].encoding[\"units\"] = \"days since '1900-01-01'\"\n\n return da", "def update_general(info, key, val):\n\n info[\"model_params\"][key] = val", "def __config_attributes(self):\n self.__name = self.__data[self.__code][\"airportName\"]\n self.__country = Country(name=self.__data[self.__code][\"countryName\"],\n code=self.__data[self.__code][\"countryCode\"])\n try:\n self.__city = self.__data[self.__code][\"city\"]\n except Exception:\n self.__city = ''" ]
[ "0.57628345", "0.55964243", "0.55274314", "0.5520741", "0.5513947", "0.54532826", "0.54528964", "0.5352825", "0.53380233", "0.52978295", "0.52767116", "0.5263793", "0.5233308", "0.52296996", "0.51820993", "0.51604235", "0.51503444", "0.51213574", "0.5110379", "0.51047695", "0.50988513", "0.508823", "0.506627", "0.5055422", "0.50306296", "0.5018073", "0.50153625", "0.50129914", "0.50034714", "0.5000164" ]
0.6680822
0
Get AWS ECS task information. For the puspose of getting the EC2 instance id by a given AWS ECS task name, for now, only the 'containerInstanceArn' is fetched from the AWS ECS task.
def get_tasks_information( task: str, list_tasks: str, cluster=CLUSTER_NAME, client=None, region=REGION, ): if not client: session = boto3.session.Session() client = session.client("ecs", region) try: # Get all tasks in the cluster. cluster_tasks = client.list_tasks(cluster=cluster)["taskArns"] logger.debug(f"[CLUSTERTASKS]: '{cluster_tasks}'.") tasks = client.describe_tasks(cluster=cluster, tasks=cluster_tasks)[ "tasks" ] logger.debug(f"[TASKS]: '{tasks}'.") # Filter for given task name. # Get instance id, container_instances = [] task_name = "" for task_ in tasks: task_definition = task_.get("taskDefinitionArn", "") if list_tasks: container_instances.append(task_definition) continue container_instance_arn = task_.get("containerInstanceArn", None) if container_instance_arn: if not list_tasks: if re.search(task, task_definition): container_instances.append(container_instance_arn) task_name = task_definition break else: container_instances.append(container_instance_arn) if list_tasks: return "\n".join(container_instances) instances = describe_instances_with_cluster( container_instances=container_instances, cluster=cluster, client=client, region=region, ) if not instances: return "" logger.info(f"Instance '{instances[0]}' runs task '{task_name}'.") return instances[0] except (botocore.exceptions.ClientError) as e: # TODO: Check right error code. if e.response["Error"]["Code"] == "ClusterNotFoundException": logger.error(f"Cluster '{cluster}' not found: {str(e)}.") else: logger.error(f"Error: {str(e)}") sys.exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(profile, cluster, tasks):\n client = boto3client.get(\"ecs\", profile)\n params = {}\n params[\"cluster\"] = cluster\n params[\"tasks\"] = tasks\n return client.describe_tasks(**params)", "def get_task_details(self) -> task.TaskMetadata:\n return task.TaskMetadata(\n name=self.name,\n description=self.task_data[\"description\"],\n keywords=self.task_data[\"keywords\"],\n max_input_length_per_query=self.task_data[\"max_input_length\"],\n max_queries=self.task_data[\"max_queries\"],\n )", "def get_task_info(self):\n\n print()\n employee_name = self.task.get_employee_name()\n task_name = self.task.get_task_name()\n mins = self.task.get_time_spent()\n notes = self.task.get_notes()\n date = self.task.get_date()\n\n task = {\n 'employee_name': employee_name,\n 'task_name': task_name,\n 'mins': mins,\n 'notes': notes,\n 'date': date\n }\n\n return task", "def task_id(self) -> str:\n return self.get_from_redis(\"task_id\")", "def createRunTaskDefinition(options):\n\n # ECS Cluster to connect to\n ecsCluster = options.cluster\n # One-off task parameters\n refTaskDefName = options.from_task\n containerCommand = options.command\n containerEntrypoint = options.entrypoint\n containerImage = options.image\n oneOffTaskName = options.task_name\n oneOffTaskLaunchType = options.launch_type\n oneOffTaskNetsId = options.networks_id\n oneOffTaskSgsId = options.security_groups_id\n # Container log group name and log stream prefix for CloudWatch\n oneOffTaskContainerLogGroup = f\"/ecs/{oneOffTaskName}\"\n oneOffTaskContainerLogStreamPrefix = \"ecs\"\n\n # Check if the network configuration is provided when the launch type is FARGATE\n if oneOffTaskLaunchType == \"FARGATE\" and (not oneOffTaskNetsId or not oneOffTaskSgsId):\n print(\"Error: for launch type 'FARGATE' the network configuration must be provided using the `--networks-id` and `--security-groups-id` flags.\")\n sys.exit(1)\n\n # Get the latest active task definition from refTaskDefName\n latestActiveTaskDef = ecs.describe_task_definition(\n taskDefinition=refTaskDefName\n )\n\n # Remove unnecessary keys from the task definition\n # See https://github.com/aws/aws-cli/issues/3064#issuecomment-504681953\n del latestActiveTaskDef['taskDefinition']['taskDefinitionArn']\n del latestActiveTaskDef['taskDefinition']['revision']\n del latestActiveTaskDef['taskDefinition']['status']\n # This key is only present when are required some attributes such as S3 environment files\n try:\n del latestActiveTaskDef['taskDefinition']['requiresAttributes']\n except KeyError:\n pass\n del latestActiveTaskDef['taskDefinition']['compatibilities']\n del latestActiveTaskDef['ResponseMetadata']\n # Added in recent versions of boto3 (1.17.64). For backward compatibility we use exceptions\n try:\n del latestActiveTaskDef['taskDefinition']['registeredAt']\n except KeyError:\n pass\n try:\n del latestActiveTaskDef['taskDefinition']['registeredBy']\n except KeyError:\n pass\n\n # Get the secrets, environment files and environment variables for the first container\n containerSecrets = latestActiveTaskDef['taskDefinition']['containerDefinitions'][0].get('secrets', None)\n containerEnvFiles = latestActiveTaskDef['taskDefinition']['containerDefinitions'][0].get('environmentFiles', None)\n containerEnv = latestActiveTaskDef['taskDefinition']['containerDefinitions'][0].get('environment', None)\n # Get the execution role ARN for the task\n execRoleArn = latestActiveTaskDef['taskDefinition'].get('executionRoleArn', None)\n\n if oneOffTaskLaunchType == \"EC2\":\n # Build the one-off task definition for EC2\n oneOffTaskDef = {\n \"executionRoleArn\": execRoleArn,\n \"containerDefinitions\": [\n {\n \"environmentFiles\": [],\n \"secrets\": [],\n \"environment\": [],\n \"entryPoint\": [],\n \"portMappings\": [],\n \"command\": containerCommand,\n \"cpu\": 128,\n \"memory\": 400,\n \"memoryReservation\": 300,\n \"volumesFrom\": [],\n \"image\": containerImage,\n \"name\": oneOffTaskName,\n \"logConfiguration\": {\n \"logDriver\": \"awslogs\",\n \"options\": {\n \"awslogs-group\": oneOffTaskContainerLogGroup,\n \"awslogs-region\": awsRegion,\n \"awslogs-stream-prefix\": oneOffTaskContainerLogStreamPrefix\n }\n }\n }\n ],\n \"family\": oneOffTaskName\n }\n else:\n # Build the one-off task definition for Fargate\n oneOffTaskDef = {\n \"executionRoleArn\": execRoleArn,\n \"containerDefinitions\": [\n {\n \"environmentFiles\": [],\n \"secrets\": [],\n \"environment\": [],\n \"entryPoint\": [],\n \"portMappings\": [],\n \"command\": containerCommand,\n \"cpu\": 128,\n \"memory\": 400,\n \"memoryReservation\": 300,\n \"volumesFrom\": [],\n \"image\": containerImage,\n \"name\": oneOffTaskName,\n \"logConfiguration\": {\n \"logDriver\": \"awslogs\",\n \"options\": {\n \"awslogs-group\": oneOffTaskContainerLogGroup,\n \"awslogs-region\": awsRegion,\n \"awslogs-stream-prefix\": oneOffTaskContainerLogStreamPrefix\n }\n }\n }\n ],\n \"family\": oneOffTaskName,\n \"networkMode\": \"awsvpc\",\n \"requiresCompatibilities\": [\n \"FARGATE\"\n ],\n \"cpu\": \"256\",\n \"memory\": \"512\"\n }\n\n # Update task definition with optionals keys\n if containerEntrypoint:\n oneOffTaskDef['containerDefinitions'][0].update({\"entryPoint\": containerEntrypoint.split(' ')})\n\n if containerEnvFiles:\n oneOffTaskDef['containerDefinitions'][0].update({\"environmentFiles\": containerEnvFiles})\n\n if containerSecrets:\n oneOffTaskDef['containerDefinitions'][0].update({\"secrets\": containerSecrets})\n\n if containerEnv:\n oneOffTaskDef['containerDefinitions'][0].update({\"environment\": containerEnv})\n\n # Create a new task revision for the one-off task\n response = ecs.register_task_definition(**oneOffTaskDef)\n\n # Get the one-off task definition ARN\n oneOffTaskDefArn = response['taskDefinition']['taskDefinitionArn']\n\n print(f\"==> Created the task definition: {oneOffTaskDefArn}\")\n\n # Create the one-off task container CloudWatch Log Group if does not exists\n print(\"\\n\" + createCloudWatchLogGroup(logGroupName=oneOffTaskContainerLogGroup))\n\n # Run the one-off task with the created task definition (oneOffTaskDefArn)\n if oneOffTaskLaunchType == \"EC2\":\n response = ecs.run_task(\n cluster=ecsCluster,\n taskDefinition=oneOffTaskDefArn\n )\n else:\n response = ecs.run_task(\n cluster=ecsCluster,\n taskDefinition=oneOffTaskDefArn,\n launchType='FARGATE',\n networkConfiguration={\n 'awsvpcConfiguration': {\n 'subnets': oneOffTaskNetsId,\n 'securityGroups': oneOffTaskSgsId,\n 'assignPublicIp': 'DISABLED'\n }\n }\n )\n\n # Get the one-off run task ARN\n oneOffTaskRunArn = response['tasks'][0]['taskArn']\n\n print(f\"\\n==> Executed task ARN: {oneOffTaskRunArn}\")\n print(\"\\nWaiting for the task to finishes...\")\n\n # Wait until the one-off task is stopped\n # The poll is every 6 seconds by default and the maximun number of attempts to be made is 100\n waiter = ecs.get_waiter('tasks_stopped')\n waiter.wait(\n cluster=ecsCluster,\n tasks=[\n oneOffTaskRunArn\n ]\n )\n\n # Get the output of the stopped task\n response = ecs.describe_tasks(\n cluster=ecsCluster,\n tasks=[\n oneOffTaskRunArn\n ]\n )\n\n # Get the container exit status code and its reason\n oneOffTaskExitCode = response['tasks'][0]['containers'][0].get('exitCode')\n oneOffTaskExitCodeReason = response['tasks'][0]['containers'][0].get('reason')\n\n # Get the one-off task stopped reason\n oneOffTaskStopeedReason = response['tasks'][0].get('stoppedReason')\n\n if oneOffTaskExitCode == 0 and not oneOffTaskExitCode:\n print(\"\\n==> The one-off task process has finished correctly!!\")\n printContainerOutput(logGroupName=oneOffTaskContainerLogGroup, taskArn=oneOffTaskRunArn)\n sys.exit()\n else:\n print(\"\\n==> The one-off task has failed!!\")\n print(f\"Container exit code: {oneOffTaskExitCode}\")\n print(f\"Container exit reason: {oneOffTaskExitCodeReason}\")\n print(f\"Stopped reason: {oneOffTaskStopeedReason}\")\n printContainerOutput(logGroupName=oneOffTaskContainerLogGroup, taskArn=oneOffTaskRunArn)\n sys.exit(1)", "def task_id(self):\n return self._task_id", "def task_id(self):\n return self._task_id", "def task_id(self):\n return self._task_id", "def task_id(self):\n return self._task_id", "def task_id(self):\n return self._mpis.task_id", "def get_task_result(self, task_name):\n logging.info(f\"Getting task: {task_name}\")\n if task_name in self._container:\n logging.info(\"Success!\")\n return self._container[task_name].result\n logging.error(f\"Could not find task: {task_name}\")\n raise TaskNotFoundException(f\"Could not find task: {task_name}\")", "def get_instance_image_info(task):\n ctx = task.context\n node = task.node\n image_info = {}\n # NOTE(pas-ha) do not report image kernel and ramdisk for\n # local boot or whole disk images so that they are not cached\n if (node.driver_internal_info.get('is_whole_disk_image')\n or deploy_utils.get_boot_option(node) == 'local'):\n return image_info\n root_dir = get_http_boot_dir()\n i_info = node.instance_info\n labels = ('kernel', 'ramdisk')\n d_info = deploy_utils.get_image_instance_info(node)\n if not (i_info.get('kernel') and i_info.get('ramdisk')):\n glance_service = service.GlanceImageService(context=ctx)\n iproperties = glance_service.show(d_info['image_source'])['properties']\n for label in labels:\n i_info[label] = str(iproperties[label + '_id'])\n node.instance_info = i_info\n node.save()\n\n for label in labels:\n image_info[label] = (\n i_info[label],\n os.path.join(root_dir, node.uuid, label)\n )\n\n return image_info", "def get_task_metadata(self, task):\n return self._gdb_interface.get_task_metadata(task)", "def get_task_uuid(self):\n\t\treturn call_sdk_function('PrlRunningTask_GetTaskUuid', self.handle)", "def getTask():\n\tcontent = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\tif content == \"null\":\n\t\treturn None\n\telse:\n\t\treturn json.loads(content)", "def list_ecs_task_definitions():\n tasks = ECS_MANAGER.list_ecs_task_definitions()\n if tasks:\n print(str_sep)\n print(\"Listing task definitions available in {}\".format(SESSION.region_name.upper()))\n print(\"{:50}{:20}\".format('Task', 'Version'))\n print(str_sep)\n\n for task in tasks['taskDefinitionArns']:\n if len(task) > 0:\n task_name, version = task.rsplit(\"/\", 1)[1].split(\":\")\n print(\"{:50}{:20}\".format(task_name, version))", "def get_task(self, task_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"tasks\", \"task_id\", task_id)", "def task(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"task\")", "async def get_task_result(task_id: TaskId):", "def getTaskName(self):\n return self._taskName", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def _mesos_task_info(self, submissionId):\n agent_id = agent_hostname = agent_port = framework_id = container_id = None\n get_state = self.driver.getState()['get_state']\n get_tasks = get_state['get_tasks']\n\n tasks = get_tasks['tasks'] + get_tasks.get('completed_tasks', [])\n tasks_list = list(filter(lambda x: x['task_id']['value'] == submissionId, tasks))\n if len(tasks_list) > 0:\n task = tasks_list[0]\n agent_id = task['agent_id']['value']\n framework_id = task['framework_id']['value']\n\n if agent_id is not None:\n get_agents = get_state['get_agents']\n agents = get_agents['agents']\n agents_list = list(filter(lambda x: x['agent_info']['id']['value'] == agent_id, agents))\n if len(agents_list) > 0:\n agent = agents_list[0]\n agent_hostname = agent['agent_info']['hostname']\n agent_port = agent['agent_info']['port']\n agent_driver = MesosOperatorAgentDriver('{}:{}'.format(agent_hostname, agent_port))\n containers = agent_driver.getContainers()['get_containers']['containers']\n containers_list = list(filter(lambda x: x['executor_id']['value'] == submissionId, containers))\n if len(containers_list) > 0:\n container = containers_list[0]\n container_id = container['container_id']['value']\n\n return agent_id, agent_hostname, str(agent_port), framework_id, container_id", "def get_task_metadata(self, task):\n metadata_record = self._read_transaction(tx.get_task_metadata, task=task)\n return _reconstruct_metadata(metadata_record)", "def task_get(context, task_id, session=None, force_show_deleted=False):\n task_ref = _task_get(context, task_id, session=session,\n force_show_deleted=force_show_deleted)\n return _task_format(task_ref, task_ref.info)", "def test_get_task(self):\n resp = self.app.get('/api/2/inf/esrs',\n headers={'X-Auth': self.token})\n\n task_id = resp.json['content']['task-id']\n expected = 'asdf-asdf-asdf'\n\n self.assertEqual(task_id, expected)", "def get_task_id(self, position):\n task_id = self.stn.get_task_id(position)\n if task_id:\n return task_id\n else:\n raise TaskNotFound" ]
[ "0.6348864", "0.598766", "0.5797471", "0.5780299", "0.57383114", "0.56886894", "0.56886894", "0.56886894", "0.56886894", "0.5631942", "0.55174756", "0.5450972", "0.5419298", "0.54183954", "0.54114175", "0.5400899", "0.53903484", "0.53873485", "0.5380392", "0.53680474", "0.53631324", "0.53631324", "0.53631324", "0.53631324", "0.53631324", "0.5350871", "0.5314721", "0.52899784", "0.52896404", "0.5289018" ]
0.68460375
0
Geeft bericht of iemand lang genoeg is voor de attractie.
def lang_genoeg(lengte): return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def substantiate():", "def cliquer_sur_unité(self):", "def makeGerund(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split()\r\n for x in LoW: \r\n if 'ing' in x and x not in self.gerund: \r\n self.gerund[x] = 1\r\n elif 'ing' in x and x in self.gerund: \r\n self.gerund[x] += 1\r\n return self.gerund", "def translate_leet(phrase):", "def gk_g_checker(self, seq):\n seq = re.sub(r'гк', r'хк', seq)\n return seq", "def question_new_translate():", "def degibber(self):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def retranslate(self):\r\n pass", "def retranslate(self):\r\n pass", "def gibber(self): \n for x in self.consonants:\n if (x in self.sentence):\n \t self.sentence = self.sentence.replace(x, x+'o'+unicode(x).lower())", "def elegir_ventana(self):\r\n pass", "def gerundify(verb):\n if verb.endswith(\"e\"):\n verb = verb[:-1]\n\n if random() < 0.4:\n if (\n not verb.startswith(\"a\")\n and not verb.startswith(\"e\")\n and not verb.startswith(\"i\")\n and not verb.startswith(\"o\")\n and not verb.startswith(\"u\")\n ):\n verb = \"a-\" + verb\n\n return verb + \"ing\"", "def test_i18n28(self):\n output = self.engine.render_to_string('i18n28', {'anton': 'α', 'berta': 'β'})\n self.assertEqual(output, 'α + β')", "def test_i18n28(self):\n output = self.engine.render_to_string('i18n28', {'anton': 'α', 'berta': 'β'})\n self.assertEqual(output, 'α + β')", "def alpha(self):\r\n return self.unif[17]", "def test_i18n28(self):\n output = self.engine.render_to_string(\"i18n28\", {\"anton\": \"α\", \"berta\": \"β\"})\n self.assertEqual(output, \"α + β\")", "def nalichtingstijd(self):\n return self._nalichtingstijd.get_waarde()", "def get_translation(self):", "def test_i18n17(self):\n output = self.engine.render_to_string(\"i18n17\", {\"anton\": \"α & β\"})\n self.assertEqual(output, \"α &amp; β\")", "def test_i18n17(self):\n output = self.engine.render_to_string('i18n17', {'anton': 'α & β'})\n self.assertEqual(output, 'α &amp; β')", "def test_i18n17(self):\n output = self.engine.render_to_string('i18n17', {'anton': 'α & β'})\n self.assertEqual(output, 'α &amp; β')", "def gold():\r\n price = give_price_websites_1(\"https://www.tgju.org/profile/geram18\")\r\n\r\n if users_language[update.effective_chat.id] == \"english\":\r\n return \"gold(per gram) : \" + format(price/10000000, '.3f') + \" mTomans\"\r\n elif users_language[update.effective_chat.id] == \"persian\":\r\n return \" هزارتومان\" + format(price/10000000, '.3f') + \"طلا : \"", "def genlangs(self):\r\n raise NotImplementedError", "def translate():\n pass", "def feature_dict(sent, i):\n palabra=sent[i] #suponinedo que al menos tiene una palabra\n especiales= [\"á\",\"é\",\"í\",\"ó\",\"ú\", \"ü\"] #solo chequeo minusculas porque pregunto sobre el lower del string\n\n #sobre la anterior\n if i==0: #primera de la oracion\n alower=\"\"\n aistitle=False\n aisupper=False\n aisnumeric=False\n aisplural=False\n #aunder=False\n aislower=False\n aespecial=False\n else:\n alower = sent[i-1].lower()\n aistitle = sent[i-1].istitle()\n aisupper = sent[i-1].isupper()\n aisnumeric = sent[i-1].isnumeric()\n aisplural= (sent[i-1][-1:].lower() == 's')\n #aunder= (sent[i-1].find('_') >= 0)\n aislower = sent[i-1].islower()\n aespecial = (1 in [c in sent[i-1].lower() for c in especiales]),\n\n #sobre la proxima\n if i==len(sent)-1: #si es la ultima\n plower = \"\"\n pistitle = False\n pisupper = False\n pisnumeric = False\n pisplural= False\n #punder=False\n pislower = False\n pespecial = False\n else:\n plower = sent[i + 1].lower()\n pistitle = sent[i + 1].istitle()\n pisupper = sent[i + 1].isupper()\n pisnumeric = sent[i + 1].isnumeric()\n pisplural= (sent[i + 1][-1:].lower() == 's')\n #punder = (sent[i + 1].find('_') >= 0)\n pislower = sent[i + 1].islower()\n pespecial = (1 in [c in sent[i+1].lower() for c in especiales]),\n\n return {\n 'lower': palabra.lower(),\n 'istitle': palabra.istitle(),\n 'isupper': palabra.isupper(),\n 'isnumeric': palabra.isnumeric(),\n 'isplural': (palabra[-1:].lower() == 's'),\n #'under': (palabra.find('_') >= 0),\n 'islower': palabra.islower(),\n 'especial': (1 in [c in palabra.lower() for c in especiales]),\n 'alower': alower,\n 'aistitle': aistitle,\n 'aisupper': aisupper,\n 'aisnumeric': aisnumeric,\n 'aisplural': aisplural,\n #'aunder': aunder,\n 'aespecial': aespecial,\n 'aislower': aislower,\n 'plower': plower,\n 'pistitle': pistitle,\n 'pisupper': pisupper,\n 'pisnumeric': pisnumeric,\n 'pisplural': pisplural,\n #'punder': punder,\n 'pislower': pislower,\n 'pespecial': pespecial,\n }" ]
[ "0.6085675", "0.60812867", "0.5822387", "0.5812007", "0.56937695", "0.56634116", "0.5626631", "0.56246907", "0.56246907", "0.56246907", "0.56246907", "0.56246907", "0.5621488", "0.5621488", "0.5549839", "0.55467236", "0.5458848", "0.5457804", "0.5457804", "0.54571193", "0.54569125", "0.5455175", "0.5446671", "0.53993636", "0.5370489", "0.5370489", "0.53680474", "0.53564864", "0.5345982", "0.53434145" ]
0.7161606
0
Add a Pseudocode Operation at the actual active buffer.
def AddPseudoCode(self, pcode): self.buffers[self.buffergrade].append(pcode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_code(self, code):\n self.code += code", "def add_operation(self):\n arg1 = self.memory[self.memory[self._cursor + 1]]\n arg2 = self.memory[self.memory[self._cursor + 2]]\n arg3 = self.memory[self._cursor + 3]\n self.memory[arg3] = arg1 + arg2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1 + n2}')\n self._cursor += 4\n return", "def add_operation(self):\n n1 = self.memory[self.memory[self._cursor + 1]]\n n2 = self.memory[self.memory[self._cursor + 2]]\n position = self.memory[self._cursor + 3]\n self.memory[position] = n1 + n2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1} + {n2} = {n1 + n2}')\n return", "def _add_to_buffer(self, data):\n for byte in data:\n self.next_fn(byte) \n self._parse_cmds()", "def addOp(self, op):\n self.operations << op", "def _putCode(self, code):\n assert(type(code) == int)\n self.code[self.codeptr] = code\n self.codeptr += 1", "def append(self, char):\n self.sequence += char", "def addop(self, mask, target, args):\n\n self.set_user(args)\n yield \"Added operator.\"", "def append(self,instr):\n self.instructions.append(instr)", "def add(text):\n orig = dispb[\"text\"]\n new = orig + text\n ops = [\"+\",\"-\",\"*\",\"/\"]\n # conditions\n # length 21\n if len(new) > 21:\n dispb[\"text\"] = orig\n return 0\n \n # one calc at a time\n if len(orig) > 0:\n if (orig[-1] in ops) & (text in ops):\n dispb[\"text\"] = orig\n return 0\n\n dispb[\"text\"] = new\n return 0", "def start(self, pos = 0, lib_call = False) -> None:\n from utils.instructions import instructions\n self.scope_push()\n self.pos = pos\n while self.pos < len(self.code.instructions):\n self.pos += 1 + instructions[self.code.get_instruction(self.pos)].run(self, self.code, self.pos + 1)", "def add_char(self, char):\n if self.pos >= self.line_length():\n self.buffer.append_char(char, self.line)\n else:\n self.buffer.insert_char(char, self.line, self.pos)\n \n self.pos += 1\n self.has_changes = True", "def add_code(self, s):\n self.code += ' ' * self.indent + s + '\\n'", "def make_codes(self):\n\t\troot = heapq.heappop(self.heap)#obtenemos la raiz del arbol\n\t\tcurrent_code = \"\"\n\t\tself.make_codes_helper(root, current_code)", "def _insert_op(self, op):", "def iadd(state: State) -> State:\n cell = state.array[state.index] or 0\n return state._replace(acc=state.acc + cell)", "def add_op(self, expr):\n from cascada.bitvector import operation\n assert isinstance(expr, operation.Operation)\n assert not self.contain_op(expr)\n name = \"{}{}\".format(self.id_prefix, self.counter)\n self.counter += 1\n identifier = core.Variable(name, expr.width)\n self.table[identifier] = expr\n\n return identifier", "def AddOperation(self, op):\n self._operations.append(op)", "def add_op(self, op):\n self._operations.append(op)", "def asm(self, text):\n self.text.append(text)", "def open_pseudocode(self, *args):\n return _ida_hexrays.Hexrays_Hooks_open_pseudocode(self, *args)", "def add(self, state, action, reward, new_state, done):\n experience = (state, action, reward, new_state, done)\n self.buffer.append(experience)", "def advance(self):\n self.pos += 1\n if self.pos > len(self.syntax) - 1:\n self.current_char = None\n else:\n self.current_char = self.syntax[self.pos]", "def add(\n self,\n state: np.ndarray,\n action: np.ndarray,\n reward: np.float64,\n next_state: np.ndarray,\n done: float,\n ):\n data = (state, action, reward, next_state, done)\n\n if len(self.buffer) == self.buffer_size:\n self.buffer[self.idx] = data\n self.idx = (self.idx + 1) % self.buffer_size\n else:\n self.buffer.append(data)", "def render(self, code_proxy):\n code_proxy.bytecode.add(opcode.opmap[self.opname])", "def operate_cipher(self):", "def process(opcode):\n opcode.process()", "def operator_c(buf, input_line, pos1, pos2, overwrite=False):\n operator_d(buf, input_line, pos1, pos2, overwrite)\n set_mode(\"INSERT\")", "def advance(self):\n if self.instr is not None:\n self.instr.opcode = self.instr.binary[25:]\n if opcode_decode[self.instr.opcode] == 'R-type':\n self.decode_rtype()\n elif opcode_decode[self.instr.opcode] == 'I-type' or opcode_decode[self.instr.opcode] == 'Load':\n self.decode_itype()\n else:\n raise SyntaxError(\"Invalid opcode\")", "def incr_operand(self):\n pass" ]
[ "0.5870351", "0.5767791", "0.57653064", "0.5667283", "0.5466743", "0.54478973", "0.52387804", "0.52152646", "0.5208209", "0.5186463", "0.51665425", "0.51497793", "0.51468796", "0.5108849", "0.5103262", "0.50743014", "0.5067011", "0.50647295", "0.5024439", "0.50223887", "0.49839905", "0.49773774", "0.49730378", "0.49691683", "0.4950089", "0.4927656", "0.49242634", "0.49180722", "0.49170488", "0.48808628" ]
0.7316713
0
Increment the BufferGrade and initialize a new empty buffer.
def IndentBuffer(self): self.buffergrade += 1 self.buffers[self.buffergrade] = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fillBuffer():\n buff[bufferCounter].next = dataIn", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])", "def _initialize_buffers(self) -> None:", "def re_init_buffer(self):\n #~ print(self.verts)\n #~ print(self.texcoords)\n #~ print(self.inds)\n self.shape.buf[0].re_init(pts=np.array(self.verts, 'f'),texcoords=np.array(self.texcoords, 'f'))", "def _refresh_buffers(self) -> None:", "def fill_buffer(self):\n num_of_smp = 0\n while num_of_smp < self.buf_size:\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n num_of_smp += len(new_c)\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t", "def initBuffer(self, env):\n cnt = 0\n while len(self.memory) < self.memory.capacity:\n cnt += 1\n print(\"\\rWarmup Buffer [{:d}]\".format(cnt), end=\"\")\n s = env.reset()\n actionIdx, actionIdxTuple = self.select_action(s, explore=True)\n s_, r, done, info = env.step(actionIdxTuple)\n self.store_transition(s, actionIdx, r, s_, info)\n print(\"\\n => Warmup Buffer Ends\")", "def reset(self):\n self._buffer.fill(0)", "def reset(self):\r\n self.buffer = np.zeros(self.nBins)\r\n self.counter = 0", "def __init__(self, buffer_size):\n self.num_experiences = 0\n self.buffer = deque(maxlen=buffer_size)", "def __init__(self, capacity, alpha, beta_i, beta_f, beta_anneal,\n weight_offset):\n self.weight_offset = weight_offset\n self.alpha = alpha\n\n assert beta_i < beta_f, \"Beta update assumes beta_i < beta_f\"\n self.beta = beta_i\n self.beta_f = beta_f\n self.beta_update = (beta_f - beta_i) / beta_anneal\n\n self.experiences = WeightedRingBuf(capacity)\n # ids of experiences that haven't been used for training yet.\n self.unplayed_experiences = deque(maxlen=capacity)", "def updateGACount(self):\n self.ga_count += 1", "def _fill_buffer(self, in_data, *args, **kwargs):\n self._buff.put(in_data)\n return None, pyaudio.paContinue", "def _fill_buffer(self, in_data, *args, **kwargs):\r\n self._buff.put(in_data)\r\n return None, pyaudio.paContinue", "def bufferCnt():\n if(reset == 1):\n bufferCounter.next = 0\n else:\n if(decimationRatio > 0):\n if(bufferCounter == (decimationRatio-1)):\n bufferCounter.next = 0\n else:\n bufferCounter.next = bufferCounter + 1", "def create_buffers(self):", "def next_buffer(self):\n selected_window = self.selected_window()\n selected_window.set_buffer(self._find_next_buffer(selected_window.buffer()))", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def _fill_buffer(buff, in_data, frame_count, time_info, status_flags):\n buff.put(in_data)\n return None, pyaudio.paContinue", "def __init__(self, buffer_size, random_seed=None):\n self.buffer_size = buffer_size\n self.count = 0\n self.oldPos = 0\n self.currPos = 0\n self.full = False\n self.buffer = []\n self.featCount = 3\n random.seed(random_seed)\n self.useSubBuffer = False", "def grow(self):\n self.capacity = self.capacity * 2\n self.rehash()", "def add(self, grad_shard_id):\n self.timeline.start(\"add\")\n self.timeline.start(\"get_buffers\")\n oid = ray.pyarrow.plasma.ObjectID(grad_shard_id)\n grads = ray.worker.global_worker.plasma_client.get(oid)\n self.timeline.end(\"get_buffers\")\n self.accumulated += grads\n self.acc_counter += 1\n self.timeline.end(\"add\")", "def _init_buffers(self, v, n, _):\n super()._init_buffers(v, n, _)\n\n self.vbos.append(gl.glGenBuffers(1))\n\n # init VBO 2 - dynamic color data\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3])\n loc = self.get_attribute_location(\"carried\")\n gl.glEnableVertexAttribArray(loc)\n gl.glVertexAttribPointer(loc, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p(0))\n gl.glVertexAttribDivisor(loc, 1)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, 0, np.array([], dtype=np.float32), gl.GL_DYNAMIC_DRAW)", "def reset_output_buffer(self):\n self._main_buffer = BufferUtils.create_buffer()\n self._secondary_buffer = BufferUtils.create_buffer()", "def _fill_buffer(self, in_data, frame_count, time_info, status_flags):\n self._buff.put(in_data)\n return None, pyaudio.paContinue", "def update(self):\n self.clear()\n self.score += 1\n self.write(f\"Score : {self.score}\",\n align=\"center\", font=(\"Arial Black\", 20))", "def DeIndentBuffer(self):\n if self.buffergrade == 0:\n raise Exception(\"You can't deindent more.\")\n self.buffergrade -= 1\n tmp = self.buffers[self.buffergrade + 1]\n del self.buffers[self.buffergrade + 1]\n return tmp" ]
[ "0.58674264", "0.5741333", "0.5509345", "0.54187745", "0.537456", "0.53408396", "0.5315984", "0.5291879", "0.52720207", "0.5246771", "0.5140495", "0.51366794", "0.51127", "0.5102545", "0.5096625", "0.5072083", "0.5029141", "0.5018385", "0.4994677", "0.4994677", "0.4994677", "0.49749547", "0.49570173", "0.49432805", "0.49428412", "0.4932598", "0.49317503", "0.49314246", "0.49311882", "0.49127075" ]
0.730155
0
Decrement the BufferGrade and pop out the buffer active before.
def DeIndentBuffer(self): if self.buffergrade == 0: raise Exception("You can't deindent more.") self.buffergrade -= 1 tmp = self.buffers[self.buffergrade + 1] del self.buffers[self.buffergrade + 1] return tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrease(self):\n self.score -= self.score", "def decrement(self):\n self.data[self.pointer] -= 1\n self.data[self.pointer] %= 256", "def IndentBuffer(self):\n self.buffergrade += 1\n self.buffers[self.buffergrade] = []", "def RemoveGrade(self, grade):\n if not self.__data['g'].HasKey(grade.ID):\n raise NonExistentItemIDError(\"Grade does not exist.\")\n self.__data['g'].RemoveItems([grade.ID])\n self.__undo_list.append(['g'])\n self.__redo_list.clear()", "def back(self):\n self.position -= 1", "def goBackInTime(self):\n if (len(self.history) == 0):\n return\n notBusy, notVisible = self.history.pop()\n for cell in notVisible:\n for item in cell[0] + cell[1]:\n self.canvas.delete(item)\n for x, y in notBusy:\n self.gridBusy[x][y] = 0\n self.onBoard -= 1\n self.refreshScore()", "def popBuffer(self):\n return self.ecg_buffer.get()", "def decrement_frame(self, increment=1, freeze_cursor=False):\n if self.current_frame > 0 or self.selected_index < self.frame_size - increment:\n self.current_frame -= increment\n\n process_result = self.__process_selected_change(True, freeze_cursor)\n if process_result:\n self.current_frame += increment", "def pop(self):\n while self.number > self.maxlength:\n self.buffer.popleft()\n self.number -= 1", "def release(self):\n if self.points > 0 and self.waiting:\n self.points = self.points - 1\n d = self.waiting.pop(0)\n d.callback(self)", "def dec_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain -= 1\r\n cell.yank()", "def decrement_max_gain(self):\r\n while self.max_gain > -self.pmax:\r\n self.max_gain -= 1\r\n if len(self[self.max_gain]) != 0:\r\n break", "def dec_greediness(self):\n self._greediness -= 1", "def decrement(self, x, y):\n self.field.add(x, y, -1)\n self.depth += 1", "def dec(self, by=1):\n assert by > 0\n self.counter -= by\n if self.counter <= 0:\n # Don't leave self.counter < 0, that will screw things up in\n # future calls.\n self.counter = 0\n # Transitioning from nonzero to 0 means wait() need no longer wait.\n self.event.send()", "def bass_decrease():\n request_command(tv_command=TVCommand.bassDecrease)", "def cb_minus(event):\n delta_alpha = pm_rate\n # Decrease Alpha \n sAlpha0.set_val( np.clip(sAlpha0.val - delta_alpha, alpha_min[0], alpha_max[0]) )\n sAlpha1.set_val( np.clip(sAlpha1.val - delta_alpha, alpha_min[1], alpha_max[1]) )\n sAlpha2.set_val( np.clip(sAlpha2.val - delta_alpha, alpha_min[2], alpha_max[2]) )\n print(\"---\")", "def dec( self ):\n if self.count > 0: self.count -= 1", "def backspace(self) -> None:\n if self.index:\n self.buffer.delete(self.index - 1)\n self.index -= 1", "def pop_focus(self):\n self._focus.pop()", "def decrement(self, stats, sample_rate=1):\n self.update_stats(stats, -1, sample_rate=sample_rate)", "def decrement_misses_remaining(self):\n self.misses_remaining -=1", "def dec(self):\n self._numBooksOut -= 1", "def cancel(self):\n self.blackened = self.blackened_history[-1]\n self.blackened_history.pop()\n if self.victory:\n self.victory = False\n self.blackened_history_size -= 1", "def remove_cell(self, cell: Cell):\r\n assert isinstance(cell, Cell)\r\n cell.bucket().remove(cell)\r\n if self[self.max_gain] == cell.bucket() and len(cell.bucket()) == 0:\r\n self.decrement_max_gain()\r\n cell.bucket_num = None", "def pop(self):\r\n return self.buff.pop(-1)", "def pop_current_line(self):\n self.current_line.pop()", "def pop_from_deque(self):", "def bkg_subtract(self, analyte, bkg, ind=None):\n\n if 'bkgsub' not in self.data.keys():\n self.data['bkgsub'] = {}\n\n self.data['bkgsub'][analyte] = self.focus[analyte] - bkg\n\n if ind is not None:\n self.data['bkgsub'][analyte][ind] = np.nan\n\n return", "def rePop(self):\n nScan = 0\n pScan = self.num-1\n while not self.isFull():\n while self.genepool[0][pScan] == None:\n pScan = (pScan-1)%self.num\n while self.genepool[0][nScan] != None:\n nScan = (nScan+1)%self.num\n self.genepool[0][nScan] = self.genepool[0][pScan].mutate()\n self.genepool[1][nScan] = self.genepool[1][pScan].mutate()\n nScan = (nScan+1)%self.num\n pScan = (pScan-1)%self.num" ]
[ "0.62998307", "0.6187696", "0.6057455", "0.5972686", "0.58809793", "0.578995", "0.5724787", "0.57013", "0.5689314", "0.5660951", "0.5610543", "0.5608327", "0.55659264", "0.55524766", "0.5540197", "0.55076224", "0.5456718", "0.5449712", "0.5409593", "0.54038453", "0.53746724", "0.5361388", "0.5355655", "0.53549105", "0.5351609", "0.52295595", "0.52095085", "0.52076095", "0.5201225", "0.5176964" ]
0.71859
0
Get a reference to the actual buffer activated.
def RefBuffer(self): return self.buffers[self.buffergrade]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_buffer(self):\n return self.layout.current_buffer", "def getBuffer(self):\n return self.buffer", "def buffer_backend(cls, *args, **kwargs):\n return cls._buffer_context", "def current_buffer_app(self):\n return self.session.current_buffer", "def buffer(self):\n return self._buffer", "def reward_buffer(self):\n return self._reward_buffer", "def __getattr__(self, name):\n if name == 'buffer':\n return self.__buffer\n raise AttributeError", "def current_buffer(self, no_minibuffer=False):\n return \\\n self._mini_buffer \\\n if self.mini_buffer_state and not no_minibuffer else \\\n self.selected_window().buffer()", "def get_buffername(self):\n return self.__buffername", "def GetMainBuffer(self):\n tmp = self.buffers[0]\n self.buffers[0] = []\n return tmp", "def _get_buffer(self):\n return memoryview(self._write_buffer)[: self._buffer_seek]", "def _determine_context_buffer(self,s):\n try: return self.buffers[inspect.stack()[2][3]]\n except KeyError: return self.buffers['default']", "def getPixelsBuffer(self):\n\t\treturn self.leds", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def grab_frame(self):\n with self._buflock:\n if self._buffer is None:\n return None\n buf = self._buffer.tostring()\n return buf", "def buffer_get():\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n\n return buffer", "def _get_activate(self):\n return self.__activate", "def create_buffers(self):", "def _get_input_buffer(self):\n return ConsoleWidget._get_input_buffer(self)", "def getBufferedData(self):\n if not self.ringBuffer: # first time when buffer is empty\n return np.zeros((1, self.windowLength, self.sensorChannels)) \n return np.array(self.ringBuffer)", "def get_buffer(self, i):\n\n if i not in range(1, self.NUM_BUFFERS + 1):\n raise IndexError(\"Error: Could not get buffer %d. Must be \"\n \"between 1 and 9\" % i)\n return self.m_param[i]", "def activate(self):\n # Send command\n self._glir.command('FRAMEBUFFER', self._id, True)\n # Associate canvas now\n canvas = get_current_canvas()\n if canvas is not None:\n canvas.context.glir.associate(self.glir)", "def _is_buffered(self):\n return self.buffered or type(self)._buffer_context", "def popBuffer(self):\n return self.ecg_buffer.get()", "def GetBitmapFocus(self):\n\n return self.bmpFocus", "def buffer(self) -> np.ndarray:\n return np.array(self._image_data, copy=False)", "def get_focus(self):\n return self._get_at(self._current)", "def get_signalBufferHostPointer(self):\n return self.GPU_bufSignalTime_cpu_handle" ]
[ "0.70962286", "0.69201726", "0.67237633", "0.6671908", "0.6613722", "0.62484485", "0.61964005", "0.61714095", "0.61454296", "0.61443", "0.6099056", "0.60289884", "0.5986562", "0.5955822", "0.5955822", "0.5955822", "0.59430313", "0.58911574", "0.5882561", "0.5830065", "0.58167636", "0.57994443", "0.5758549", "0.5744728", "0.5742756", "0.57116264", "0.5705924", "0.5680586", "0.5651987", "0.56310374" ]
0.7871913
0
Track a code indentation index for successive utilization.
def TrackIfIndex(self, index): self.indentindex.append(index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def increase_code_indent(self) -> None:\n self._parent_node.increase_code_indent()", "def _increaseindentation(self):\n self._indentlist.append(self._curindent)\n if not self._equalsigns[-1]:\n self._curindent = self._curindent + self._indent", "def getIndentationLevel(self, code_line):\n print(\"the code line : \", code_line)\n return len(code_line) - len(code_line.lstrip(\" \"))", "def indentation(self, pad, linepad, lang='c++', *args):\n pad.edit_separator()\n if lang == 'c++':\n curr = pad.get('1.0', GUI.INSERT)\n till_end = pad.get('1.0', GUI.END)\n indent = max(curr.count(\"{\") - curr.count('}'), 0)\n diff = till_end.count('{') - till_end.count('}')\n pad.insert(GUI.INSERT, ' ' * indent)\n cordinate = map(int, pad.index(GUI.INSERT).split('.'))\n if diff > 0:\n pad.insert(GUI.INSERT, '\\n' + ' ' * 4 * max(indent - 1, 0) + '}')\n pad.mark_set(GUI.INSERT, '%d.%d' % (cordinate[0], cordinate[1]))\n if lang == 'py':\n coordinates1 = map(int, pad.index(GUI.INSERT).split('.'))\n if coordinates1[0] != 1:\n coordinates = str(coordinates1[0] - 1) + '.0'\n r = pad.get(coordinates, coordinates + 'lineend')\n letters = list(str(r))\n cnt = 0\n # find indentation level\n for i in letters:\n if i == ' ':\n cnt += 1\n else:\n break\n cnt = cnt / 4\n # check if indentation increasing keywords present\n f = 0\n for i in keywords['py']['loops']:\n if i in r:\n f = 1\n break\n\n if f:\n pad.insert(GUI.INSERT, (' ' * (cnt + 1) * 4))\n else:\n pad.insert(GUI.INSERT, (' ' * (cnt) * 4))\n self.linenumber(pad, linepad)", "def indent(self):\n self.indent_level += self.INDENT_STEP", "def indent(self):\n self.indent_level += self.INDENT_STEP", "def indent_code(self, code):\n\n if isinstance(code, string_types):\n code_lines = self.indent_code(code.splitlines(True))\n return ''.join(code_lines)\n\n tab = \" \"\n inc_token = ('{', '(', '{\\n', '(\\n')\n dec_token = ('}', ')')\n\n code = [ line.lstrip(' \\t') for line in code ]\n\n increase = [ int(any(map(line.endswith, inc_token))) for line in code ]\n decrease = [ int(any(map(line.startswith, dec_token)))\n for line in code ]\n\n pretty = []\n level = 0\n for n, line in enumerate(code):\n if line == '' or line == '\\n':\n pretty.append(line)\n continue\n level -= decrease[n]\n pretty.append(\"%s%s\" % (tab*level, line))\n level += increase[n]\n return pretty", "def determine_indentation(self):\n # Ensuring NEWLINE tokens are actually specified as such\n if self.current.tokenum != NEWLINE and self.current.value == \"\\n\":\n self.current.tokenum = NEWLINE\n\n # I want to change dedents into indents, because they seem to screw nesting up\n if self.current.tokenum == DEDENT:\n self.current.tokenum, self.current.value = self.convert_dedent()\n\n if (\n self.after_space\n and not self.is_space\n and (not self.in_container or self.just_started_container)\n ):\n # Record current indentation level\n if not self.indent_amounts or self.current.scol > self.indent_amounts[-1]:\n self.indent_amounts.append(self.current.scol)\n\n # Adjust indent as necessary\n while self.adjust_indent_at:\n self.result[self.adjust_indent_at.pop()] = (\n INDENT,\n self.indent_type * (self.current.scol - self.groups.level),\n )\n\n # Roll back groups as necessary\n if not self.is_space and not self.in_container:\n while not self.groups.root and self.groups.level >= self.current.scol:\n self.finish_hanging()\n self.groups = self.groups.parent\n\n # Reset indentation to deal with nesting\n if self.current.tokenum == INDENT and not self.groups.root:\n self.current.value = self.current.value[self.groups.level :]", "def addIndentationLevel(self, original_line, trace_call):\n # apply same level of indentation\n number_spaces = self.getIndentationLevel(original_line)\n print(\"step 3 spaces : \", number_spaces)\n \n # copy the original trace_call in the new_trace_call using\n # the correct number of spaces\n new_trace_call = []\n index_new_trace_call = 0\n for trace_line in trace_call:\n # calculate new size of the trace_line\n added_space_length = len(trace_line) + number_spaces\n # append spaces at the beginning of the line\n new_trace_call.append(trace_line.rjust(added_space_length)) \n index_new_trace_call = index_new_trace_call + 1\n return new_trace_call", "def indent(self, n):\n self._ind = max(0, self._ind + n)", "def enter(self):\n self.indent += 1", "def addIndents(self, prevLevel=0):\n for num in range(len(self)):\n nextLevel = 0\n if num + 1 < len(self):\n nextLevel = self[num + 1].level\n prevLevel = self[num].addIndents(prevLevel, nextLevel)", "def linenum(self):\n return self.source_frame_stack.linenum()", "def get_function_indent(line: str) -> int:\n first_function_entrance = line.index('def')\n indents = line[:first_function_entrance]\n indents_space_count = len(indents)\n return indents_space_count", "def addIndents(self, prevLevel, nextLevel):\n for num in range(self.level - prevLevel):\n self.textLines[0] = u'<div>%s' % self.textLines[0]\n for num in range(self.level - nextLevel):\n self.textLines[-1] = u'%s</div>' % self.textLines[-1]\n return self.level", "def __enter__():\n IndentedLogger._indent_level += 1\n return IndentedLogger", "def indentation(self, indent: str) -> None:\n self._indent = indent\n self._update()", "def indent_level(self):\n return len(self._tagstack) - 1", "def menu_indentation(self, event=None):\n self.parentPanel.indentation_guides(event)", "def processindentation( lexer, blanks ):\r\n indentsize = blanks and len( blanks ) or 0\r\n \r\n indentlevel = len(lexer.levels)\r\n if ( indentsize > lexer.levels[-1] ):\r\n lexer.levels.append( indentsize )\r\n lexer.pendingtokens.append( create_indent( indentlevel ) )\r\n else:\r\n while ( indentsize < lexer.levels[-1] ):\r\n lexer.levels.pop()\r\n lexer.pendingtokens.append( create_dedent( indentlevel ) )", "def highlight_source(linenumber, index, lines, offset=None):\n # The following if statements are left-over diagnostic\n # from the hack to integrate into Idle.\n # they are harmless tests which could potentially be useful.\n if lines is None:\n return \"\", \"\"\n if index is None:\n print(\"problem in highlight_source(): index is None\")\n index = 0\n\n # The weird index arithmetic below is based on the information returned\n # by Python's inspect.getinnerframes()\n\n new_lines = []\n problem_line = \"\"\n nb_digits = len(str(linenumber + index))\n no_mark = \" {:%d}: \" % nb_digits\n with_mark = \" -->{:%d}: \" % nb_digits\n if offset is not None:\n offset_mark = \" \" * (8 + nb_digits + offset) + \"^\"\n i = linenumber - index\n\n for line in lines:\n if i == linenumber:\n num = with_mark.format(i)\n problem_line = line\n new_lines.append(num + line.rstrip())\n if offset is not None:\n new_lines.append(offset_mark)\n break\n else:\n num = no_mark.format(i)\n new_lines.append(num + line.rstrip())\n i += 1\n return \"\\n\".join(new_lines), problem_line", "def test_reset_limit_on_indent(self):\n indenter = indent.Indenter()\n indenter.indentation = -2\n self.assertRaises(ValueError, indenter.indent)\n indenter.indentation = -1\n self.assertRaises(ValueError, indenter.indent)\n indenter.indentation = 0\n indenter.indent()\n indenter.indentation = +1\n indenter.indent()\n indenter.indentation = +2\n indenter.indent()", "def indent(self):\n self.x_pos += 10", "def insert_indent(event):\n env = XSH.env\n event.cli.current_buffer.insert_text(env.get(\"INDENT\"))", "def addIndent( self, increment=0 ):\n self.context.append( self.context[-1] )\n self.log_indent.debug( \"addIndent {!s}: {!r}\".format(self.lastIndent, self.context) )", "def indent(self, increment=1):\n # increase the indentation level\n self._level += increment\n # and adjust the margin filler\n self.leader = self._indenter * self._level\n # all done\n return self", "def AutoIndent(self):\n cpos = self.GetCurrentPos()\n\n # Check if a special purpose indenter has been registered\n if self._code['indenter'] is not None:\n self.BeginUndoAction()\n self._code['indenter'](self, cpos, self.GetIndentChar())\n self.EndUndoAction()\n else:\n # Default Indenter\n line = self.GetCurrentLine()\n text = self.GetTextRange(self.PositionFromLine(line), cpos)\n if text.strip() == u'':\n self.AddText(self.GetEOLChar() + text)\n self.EnsureCaretVisible()\n return\n indent = self.GetLineIndentation(line)\n i_space = indent / self.GetTabWidth()\n ndent = self.GetEOLChar() + self.GetIndentChar() * i_space\n txt = ndent + ((indent - (self.GetTabWidth() * i_space)) * u' ')\n self.AddText(txt)\n\n self.EnsureCaretVisible()", "def __editIndent(self):\n self.activeWindow().indentLineOrSelection()", "def delta_indent(self, delta=1):\n self.manual_push += delta", "def fix_indents(self):\n indent_map = list(map(self._get_indent, self.config_lines_str))\n fixed_indent_map = []\n for i in range(len(indent_map)):\n if i == 0:\n ### Assume the first line is not indented\n fixed_indent_map.append(0)\n continue\n if indent_map[i] == 0:\n fixed_indent_map.append(0)\n continue\n # If indent is same preceding line, copy its indent\n if indent_map[i] == indent_map[i-1]:\n fixed_indent_map.append(fixed_indent_map[-1])\n # If indent is higher that preceding line, increase by one\n elif indent_map[i] > indent_map[i-1]:\n fixed_indent_map.append(fixed_indent_map[-1]+1)\n # If indent is lower that preceding l\n elif indent_map[i] < indent_map[i-1]:\n fixed_indent_map.append(fixed_indent_map[-1]-1)\n for i, val in enumerate(fixed_indent_map):\n self.config_lines_str[i] = \" \"*val + self.config_lines_str[i].strip()\n #print(val, \"'{}'\".format(self.config_lines_str[i]))" ]
[ "0.61810654", "0.60863245", "0.5817759", "0.5709027", "0.5614261", "0.5614261", "0.5520226", "0.55129415", "0.54851073", "0.54282254", "0.53754544", "0.53624535", "0.532262", "0.52931166", "0.5262821", "0.52572817", "0.5244937", "0.52138895", "0.5192445", "0.5161051", "0.51534206", "0.5143496", "0.5142681", "0.5134809", "0.51189923", "0.5105761", "0.5100565", "0.5091507", "0.50707287", "0.50539064" ]
0.70099694
0
Pop (get and remove) the last code indentation index tracked.
def PopIfIndex(self): return self.indentindex.pop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _decreaseindentation(self):\n self._curindent = self._indentlist.pop()", "def pop(self) -> int:\n return self.stack.pop()", "def pop(self) -> int:\n return self._stack.pop()", "def pop(self) -> int:\n for i in range(len(self.stack) - 1):\n self.stack.append(self.stack.pop())\n return self.stack.pop()", "def pop_current_line(self):\n self.current_line.pop()", "def indentation_level(self):\n return self._indentation_levels[-1]", "def pop(self) -> int:\n self._maybe_prepare_output_stack()\n return self._output_stack.pop()", "def pop(self) -> int:\n tmp = self.stack[-1]\n for i in range(len(self.stack)-1):\n self.stack.append(self.stack.popleft())\n self.stack.popleft()\n return tmp", "def DeIndentBuffer(self):\n if self.buffergrade == 0:\n raise Exception(\"You can't deindent more.\")\n self.buffergrade -= 1\n tmp = self.buffers[self.buffergrade + 1]\n del self.buffers[self.buffergrade + 1]\n return tmp", "def dedent(self):\n self._indent_first_line.pop()\n return self._indentation_levels.pop()", "def pop(self):\n try:\n frame = self.stack.pop()\n return frame[0]\n except:\n pass", "def scope_pop(self) -> None:\n self.scope_stack.popleft()", "def pop(self) -> int:\n self.move()\n return self.outStack.pop()", "def pop_at(self, index):\n if len(self.stacks[index]) < 1:\n return\n popped = self.stacks[index].pop()\n if index == len(self.stacks)-1:\n return popped\n for i in range(index, len(self.stacks)-1):\n # append to last operation\n self.stacks[i].append(self.stacks[i+1].pop(0))\n if len(self.stacks[-1]) < 1:\n self.stacks.pop()\n return popped", "def pop(self):\n return self.stack.pop(0)", "def pop_last(self):\n self.pop_item(-1)", "def pop(self):\n if not self.isEmpty():\n self.top -= 1\n return self.stack.pop()\n else:\n raise Exception(\"Stack Underflow\")", "def pop(self, index=-1):\n if not self.stack:\n raise ReversePolishCalcError, \"Stack is empty\"\n try:\n del self.stack[index]\n except IndexError:\n errmsg = \"Cannot pop element '%s' from stack\" % index\n raise ReversePolishCalcError, errmsg\n return self.stack", "def pop(self):\n old = self.stack.pop()\n if self.stack:\n self.current = self.stack.pop()\n else:\n self.current = None\n return old", "def pop(self):\n return self.stack.pop()", "def pop(self) -> int:\r\n return self.items.pop(0)", "def pop(self) -> int:\n tmp = list()\n while self.stack:\n tmp.append(self.stack.pop())\n \n ret = tmp.pop()\n self.head = tmp[-1] if tmp else None\n while tmp:\n self.stack.append(tmp.pop())\n \n print(self.stack)\n return ret", "def GetIfIndex(self):\n return self.indentindex[-1]", "def pop(self):\n\n return self.stack.pop()", "def pop(self):\n return self.the_stack.pop()", "def pop(self):\n item = self.stack[-1]\n self.stack = self.stack[:-1]\n return item", "def pop(self):\n return self._stack.pop()", "def pop(self):\n stack = self.stack\n if len(stack)>1:\n stack.pop()\n self._setspaces()", "def rollback(self):\n if len(self.__stack) == 0:\n raise EmptyStackException()\n self.__current_pos = self.__stack[-1][0]\n self.line = self.__stack[-1][1]\n self.linePos = self.__stack[-1][2]\n self.__stack = self.__stack[:-1]", "def pop(self):\n if self.stack:\n return self.stack.pop()" ]
[ "0.687736", "0.662047", "0.65572536", "0.64603764", "0.6362599", "0.6348772", "0.63447845", "0.630998", "0.6291612", "0.6259269", "0.624546", "0.61962366", "0.6184571", "0.61161476", "0.611117", "0.60742784", "0.6037302", "0.60337037", "0.60090023", "0.59744895", "0.5953556", "0.5936488", "0.5899665", "0.58855796", "0.58819073", "0.587673", "0.5873068", "0.5872587", "0.5872319", "0.5849548" ]
0.802693
0
Initialization of protected Operation Object attribute for subclasses.
def __init__(self): self._OPERATION = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n\n self.operations = {}", "def _init(self):\n raise NotImplementedError", "def __init__(self):\r\n self.operation_map = {}", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def _init(self):\n pass", "def initialize(cls):", "def __init__ (self):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(self):\n raise NotImplementedError()", "def __init__(self):\n self.sharedRef=self\n #raise NotImplementedError, \"This is abstract class. No instance allowed.\"\n # Despite Shared is abstract, its children would inherit the constructor.", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self, osi):\n self.osi = osi\n self._parameters = [self.op_type]\n self.to_process(osi)", "def __init__(self):\n self.inputs = []\n self.op = None\n self.const_attr = None\n self.name = \"\"" ]
[ "0.69042325", "0.6817324", "0.66968143", "0.65770996", "0.65770996", "0.6513839", "0.6513839", "0.6513839", "0.6513839", "0.64662194", "0.6441285", "0.642362", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.6404217", "0.63641566", "0.6345559", "0.6332557", "0.6332557", "0.6332557", "0.6332557", "0.6332557", "0.6329752" ]
0.7562495
0
Get the Operation Object generated by the command.
def getOp(self): return self._OPERATION
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_operation_obect(self, method):\n pass", "def get_operation(\n self,\n ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_operation\" not in self._stubs:\n self._stubs[\"get_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/GetOperation\",\n request_serializer=operations_pb2.GetOperationRequest.SerializeToString,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"get_operation\"]", "def get_operation(\n self,\n ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_operation\" not in self._stubs:\n self._stubs[\"get_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/GetOperation\",\n request_serializer=operations_pb2.GetOperationRequest.SerializeToString,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"get_operation\"]", "def get_operation(\n self,\n ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_operation\" not in self._stubs:\n self._stubs[\"get_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/GetOperation\",\n request_serializer=operations_pb2.GetOperationRequest.SerializeToString,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"get_operation\"]", "def operation(self, name):\n\n try:\n return self.operations[name]\n except KeyError:\n return self.operation_not_found(name)", "def operation(cls):\n return relationship.many_to_one(cls, 'operation')", "def operation(cls):\n return relationship.many_to_one(cls, 'operation')", "def GetOperation(\n self,\n request: google.longrunning.operations_pb2.GetOperationRequest,\n context: grpc.ServicerContext,\n ) -> google.longrunning.operations_pb2.Operation:", "def get_operagion(self):\n if self.OP_GID not in self._data_dict:\n return None\n return dao.get_operation_by_gid(self._data_dict.get(self.OP_GID, None))", "def current_operation(self):\n return self._current_operation", "def current_operation(self):\n return self._current_operation", "def getOperation(self):\n return _libsbml.FluxBound_getOperation(self)", "def cloudflare_waf_get_operation_command(client: Client, operation_id) -> CommandResults:\n response = client.cloudflare_waf_get_operation_request(operation_id)\n output = response['result']\n\n readable_output = 'The command was executed successfully'\n return CommandResults(\n readable_output=readable_output,\n outputs_prefix='CloudflareWAF.Operation',\n outputs_key_field='id',\n outputs=output,\n raw_response=output\n )", "def command(self):\n return self.package(\"SyntaxObjects\").Command", "def get_operations(self):\n return self.operations[:] # Returns a copy instead of actual attribute", "def offending_op(self):\r\n return type(self.r.owner.op)", "def get_operation_old(operation_name):\n op = operations_api.get_operation(operation_name)\n return op", "def get_operation_by_name(operation_name: str) -> Operation:\n client = vmwareengine_v1.VmwareEngineClient()\n request = GetOperationRequest()\n request.name = operation_name\n return client.get_operation(request)", "def op(self):\n return self.__op", "def op(self):\n return self.__op", "def get_command(self):\n req_type = type(self.req)\n\n if req_type == ureq.CreateEntryRequest:\n return commands.CreateCommand(self.req.results)\n elif req_type == ureq.ReadEntryRequest:\n return commands.ReadCommand(self.req.results)\n elif req_type == ureq.UpdateEntryRequest:\n return commands.UpdateCommand(self.req.results)\n elif req_type == ureq.DeleteEntryRequest:\n return commands.DeleteCommand(self.req.results)", "def op(self):\n\n return self._op", "def get_command(self, object_name, user_key = None):\n\t\treturn self.get_object('command',object_name, user_key = user_key)", "def deserialize(cls, payload):\n return operations_pb2.Operation.FromString(payload)", "def operation(self) -> str:\n return self._operation", "def get_operation(project_id: str, region: str, operation_id: str) -> Operation:\n return get_operation_by_name(\n f\"projects/{project_id}/locations/{region}/operations/{operation_id}\"\n )", "def operation_command(self, persist=False):\n pass", "def op(self):\n return self.getop(self.pc)", "def getCommand(self):\n return self.__cmd", "def get_command(self):\n return self.c_dict['COMMAND']" ]
[ "0.6874343", "0.67712283", "0.67712283", "0.67712283", "0.67545724", "0.6746998", "0.6746998", "0.66104776", "0.65664905", "0.6563017", "0.6563017", "0.6541429", "0.6439676", "0.64120996", "0.64114374", "0.640891", "0.64030427", "0.6372547", "0.6367869", "0.6367869", "0.6332308", "0.63110846", "0.62660116", "0.62420034", "0.62235296", "0.62012535", "0.61448616", "0.6118598", "0.6110245", "0.6091709" ]
0.71553755
0
Creates a temporary image for manipulation, and handles optional RGB conversion.
def _create_tmp_image(self, content): content.seek(0) image = Image.open(content) if self.force_rgb and image.mode not in ('L', 'RGB', 'RGBA'): image = image.convert('RGB') return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def temporary_unsupported_image(self):\n image = Image.new('RGB', (1, 1))\n tmp_file = tempfile.NamedTemporaryFile(suffix='.ppm')\n image.save(tmp_file, 'ppm')\n # important because after save(),\n # the fp is already at the end of the file\n tmp_file.seek(0) # retrieves the created temp file\n return tmp_file", "def imageprepare():\r\n file_name = 'temp_image.png'\r\n im = Image.open(file_name).convert('L')\r\n im = im.resize((20, 20))\r\n p = Image.new('L', (28,28), (255))\r\n p.paste(im,(4,4,24,24))\r\n p.save(\"last_image.png\")\r\n\r\n tv = list(p.getdata()) # get pixel values\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n tva = np.reshape(tva, (28, 28))\r\n\r\n return tva", "def temporary_image(self):\n\n image = Image.new('RGB', (1, 1))\n tmp_file = tempfile.NamedTemporaryFile(suffix='.jpg')\n image.save(tmp_file, 'jpeg')\n # important because after save(),\n # the fp is already at the end of the file\n tmp_file.seek(0) # retrieves the created temp file\n return tmp_file", "def test_write_rgb(self):\n with tempfile.TemporaryDirectory() as out_dir:\n image_name = os.path.join(out_dir, \"test.png\")\n img = np.random.rand(2, 3, 3)\n img_save_val = (255 * img).astype(np.uint8)\n writer_obj = PILWriter(output_dtype=np.uint8)\n writer_obj.set_data_array(img, channel_dim=-1)\n writer_obj.write(image_name)\n out = np.asarray(Image.open(image_name))\n out = np.moveaxis(out, 0, 1)\n np.testing.assert_allclose(out, img_save_val)", "def _prepare_image(image, img_lib_name, img_mod, img_save_path):\n # Pillow\n if img_lib_name == \"pil\":\n pil_image = image.copy()\n\n try:\n chan = pil_image.split()\n\n if len(chan) == 4:\n pil_image = img_mod.merge(\"RGB\", (chan[0], chan[1], chan[2]))\n\n try:\n pil_image.save(img_save_path, \"BMP\")\n except IOError as e:\n _warn(\n \"_prepare_image: (Pillow) Could not save the image to '{0}'. \"\n \"I/O Error ({1}): {2}.\".format(img_save_path, e.errno, e.strerror)\n )\n except Exception as e:\n _warn(\n \"_prepare_image: (Pillow) Unable to split and convert \"\n \"the image to RGB. Error: {0}.\".format(e)\n )\n finally:\n del pil_image\n\n # wxPython\n elif img_lib_name == \"wx\":\n wx_image = image.Copy()\n\n try:\n # No idea if 'ClearAlpha' can raise an exception\n if wx_image.HasAlpha():\n wx_image.ClearAlpha()\n\n try:\n wx_image.SaveFile(img_save_path, img_mod.BITMAP_TYPE_BMP)\n except IOError as e:\n _warn(\n \"_prepare_image: (wxPython) Could not save the image to '{0}'. \"\n \"I/O Error({1}): {2}.\".format(img_save_path, e.errno, e.strerror)\n )\n except Exception as e:\n _warn(\n \"_prepare_image: (wxPython) Unable to remove the alpha channel \"\n \"from the image. Error: {0}.\".format(e)\n )\n finally:\n del wx_image\n\n # PyQt/PySide\n elif img_lib_name == \"qt\":\n qt_image = img_mod(image)\n\n try:\n if qt_image.hasAlphaChannel():\n qt_image = qt_image.convertToFormat(img_mod.Format_RGB32)\n\n try:\n # Save the image with max quality\n qt_image.save(img_save_path, \"BMP\", 100)\n except Exception as e:\n _warn(\n \"_prepare_image: (PyQt/PySide) Could not save the image to \"\n \"'{0}'. Error: {1}.\".format(img_save_path, e)\n )\n except Exception as e:\n _warn(\n \"_prepare_image: (PyQt/PySide) Unable to convert the image to RGB.\"\n \"Error: {0}.\".format(e)\n )\n finally:\n del qt_image\n\n # OpenCV\n elif img_lib_name == \"cv\":\n cv_image = image.copy()\n\n # OpenCV 'imwrite' require a valid file extension\n img_save_path_bmp = \"{0}.bmp\".format(img_save_path)\n\n try:\n if len(cv_image.shape) > 2 and cv_image.shape[2] == 4:\n rt, th = img_mod.threshold(\n cv_image[:, :, 3], 254, 255, img_mod.THRESH_BINARY\n )\n cv_image = img_mod.bitwise_not(\n img_mod.bitwise_not(cv_image[:, :, :3], mask=th)\n )\n\n if img_mod.imwrite(img_save_path_bmp, cv_image):\n try:\n os.rename(img_save_path_bmp, img_save_path)\n except OSError as e:\n _warn(\n \"_prepare_image: (OpenCV) Could not rename the image \"\n \"file from '{0}' to '{1}'. Error: {2}.\".format(\n img_save_path_bmp, img_save_path, e\n )\n )\n else:\n _warn(\n \"_prepare_image: (OpenCV) Could not save the image to \"\n \"'{0}'.\".format(img_save_path_bmp)\n )\n except Exception as e:\n _warn(\n \"_prepare_image: (OpenCV) Unable to remove the alpha channel \"\n \"from the image. Error: {0}.\".format(e)\n )\n finally:\n del cv_image", "def _create_image(self):\n if hasattr(self, '_image') and self._image:\n return self._image\n try:\n command = \"tex2im -b transparent -t cyan\"\n subprocess.run([*command.split(), self._formula])\n except Exception as e:\n import traceback\n print(traceback.format_exc())\n return None\n # tex2im converts to out.png by default\n img = Image.open('out.png').convert('RGBA')\n # create a new rgba image to blend the latex with the alpha\n subprocess.run([\"rm\", \"out.png\"])\n return img", "def new_image(x, y, out, data):\n img = Image.new('RGB', (x, y))\n img.putdata(data)\n img.save(out)", "def convert_img(self):\r\n self.img = self.img.convert('RGB')", "def new_test_image():\n warnings.warn(DeprecationWarning(\n \"new_test_image() is deprecated in favour of the get_sample_image() \"\n \"context manager.\"), stacklevel=2)\n image_name = 'test-{}.png'.format(uuid.uuid4())\n image = Image.new('RGBA', size=(50, 50), color=(256, 0, 0))\n ImageDraw.Draw(image)\n byte_io = BytesIO()\n image.save(byte_io, 'png')\n byte_io.seek(0)\n return image_name, ContentFile(byte_io.read(), image_name)", "def create_colorful_test_image(self):\n ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)\n ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)\n ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)\n imr = np.concatenate((ch255, ch128, ch128), axis=2)\n img = np.concatenate((ch255, ch255, ch0), axis=2)\n imb = np.concatenate((ch255, ch0, ch255), axis=2)\n imw = np.concatenate((ch128, ch128, ch128), axis=2)\n imu = np.concatenate((imr, img), axis=1)\n imd = np.concatenate((imb, imw), axis=1)\n image = np.concatenate((imu, imd), axis=0)\n return image", "def imageprepare(argv):\n im = Image.open(argv).convert('L')\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\n\n if width > height: # check which dimension is bigger\n # Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\n if (nheight == 0): # rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position\n newImage.paste(img, (4, wtop)) # paste resized image on white canvas\n else:\n # Height is bigger. Heigth becomes 20 pixels.\n nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\n if (nwidth == 0): # rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\n newImage.paste(img, (wleft, 4)) # paste resized image on white canvas\n\n # newImage.save(\"sample.png\n\n tv = list(newImage.getdata()) # get pixel values\n\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n print(tva)\n return tva", "def imageprepare(image_data):\n im = Image.open(io.BytesIO(image_data))\n im = remove_transparency(im)\n im = im.resize((28,28))\n width = float(im.size[0])\n height = float(im.size[1])\n new_image = Image.new('L', (28, 28), 255) # creates white canvas of 28x28 pixels\n\n if width > height: # check which dimension is bigger\n # Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\n if nheight == 0: # rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position\n new_image.paste(img, (4, wtop)) # paste resized image on white canvas\n else:\n # Height is bigger. Heigth becomes 20 pixels.\n nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\n if nwidth == 0: # rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\n new_image.paste(img, (wleft, 4)) # paste resized image on white canvas\n\n # new_image = ImageOps.invert(new_image)\n\n tv = list(new_image.getdata()) # get pixel values\n\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n return tva, new_image", "def imageprepare(argv):\r\n im = Image.open(argv).convert('L')\r\n width = float(im.size[0])\r\n height = float(im.size[1])\r\n newImage = Image.new('L', (28, 28), (255)) #creates white canvas of 28x28 pixels\r\n \r\n if width > height: #check which dimension is bigger\r\n #Width is bigger. Width becomes 20 pixels.\r\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\r\n if (nheight == 0): #rare case but minimum is 1 pixel\r\n nheight = 1 \r\n # resize and sharpen\r\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\r\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\r\n else:\r\n #Height is bigger. Heigth becomes 20 pixels. \r\n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\r\n if (nwidth == 0): #rare case but minimum is 1 pixel\r\n nwidth = 1\r\n # resize and sharpen\r\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\r\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\r\n \r\n #newImage.save(\"sample.png\")\r\n\r\n tv = list(newImage.getdata()) #get pixel values\r\n \r\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [ (255-x)*1.0/255.0 for x in tv] \r\n #print(tva)\r\n return tva", "def create_temporary_image(image):\n\n temp = tempfile.NamedTemporaryFile()\n temp.write(image)\n temp.seek(0)\n\n return temp", "def imageprepare(self,argv):\r\n\t\tim = Image.open(argv).convert('L')\r\n\t\twidth = float(im.size[0])\r\n\t\theight = float(im.size[1])\r\n\t\tnewImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\r\n\r\n\t\tif width > height: # check which dimension is bigger\r\n\t\t\t# Width is bigger. Width becomes 20 pixels.\r\n\t\t\tnheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\r\n\t\t\tif nheight == 0: # rare case but minimum is 1 pixel\r\n\t\t\t\tnheight = 1\r\n\t\t\t\t# resize and sharpen\r\n\t\t\timg = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n\t\t\twtop = int(round(((28 - nheight) / 2), 0)) # caculate horizontal pozition\r\n\t\t\tnewImage.paste(img, (4, wtop)) # paste resized image on white canvas\r\n\t\telse:\r\n\t\t\t# Height is bigger. Heigth becomes 20 pixels.\r\n\t\t\tnwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\r\n\t\t\tif (nwidth == 0): # rare case but minimum is 1 pixel\r\n\t\t\t\tnwidth = 1\r\n\t\t\t\t# resize and sharpen\r\n\t\t\timg = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n\t\t\twleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\r\n\t\t\tnewImage.paste(img, (wleft, 4)) # paste resized image on white canvas\r\n\r\n\t\t# newImage.save(\"sample.png\")\r\n\r\n\t\ttv = list(newImage.getdata()) # get pixel values\r\n\r\n\t\t# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n\t\ttva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n\t\treturn tva", "def createRGBImage(self, filepath, width=None, outdir=None):\n print('[createRGBImage] filepath, outdir', filepath, outdir)\n\n index = 0\n rgb_data = []\n\n # Read binary file\n binary_data = self.getBinaryData(filepath)\n\n # Create R,G,B pixels\n while (index + 3) < len(binary_data):\n R = binary_data[index]\n G = binary_data[index+1]\n B = binary_data[index+2]\n index += 3\n rgb_data.append((R, G, B))\n\n size = self.get_size(len(rgb_data), width)\n image = Image.new('RGB', size)\n image.putdata(rgb_data)\n if width > 0:\n image = image.resize((width, width))\n if outdir is not None:\n self.save_file(filepath, image, size, 'RGB', width, outdir)\n # print('np.array(image)', np.array(image).shape)\n return np.array(image)/255.0", "def create_image(self):\n\n self._image = 255 * np.ones((self._height, self._width, 3), np.uint8)", "def genrandimg(args) -> None:\n\n size = (int(args.x), int(args.y))\n fp = Image.new(\"RGB\", size)\n data = []\n\n if not args.c: # If color\n for i in range(size[0]*size[1]):\n r = random.choice([0x00, 0xff])\n data.append((r, r, r)) # Each RGB value is the same random value\n else: # Else black-and-white\n for i in range(size[0]*size[1]):\n r = [random.choice(range(0, 256)) for _ in range(0, 3)]\n r = (r[0], r[1], r[2]) # Choose 3 random numbers for different RGB values\n data.append(r)\n\n fp.putdata(data)\n print(\"Saving to %s...\" % args.o)\n fp.save(args.o)\n fp.close()", "def recreate_image(im_as_var):\n recreated_im = im_as_var.data.numpy()[0]\n recreated_im[recreated_im > 1] = 1\n recreated_im[recreated_im < 0] = 0\n # recreated_im = np.round(recreated_im * 255)\n return recreated_im", "def test_write_lossless_rgb(tmp_path):\n\n temp_file = str(tmp_path / \"temp.webp\")\n # temp_file = \"temp.webp\"\n\n pil_image = hopper(\"RGBA\")\n\n mask = Image.new(\"RGBA\", (64, 64), (128, 128, 128, 128))\n # Add some partially transparent bits:\n pil_image.paste(mask, (0, 0), mask)\n\n pil_image.save(temp_file, lossless=True)\n\n with Image.open(temp_file) as image:\n image.load()\n\n assert image.mode == \"RGBA\"\n assert image.size == pil_image.size\n assert image.format == \"WEBP\"\n image.load()\n image.getdata()\n\n assert_image_equal(image, pil_image)", "def test_no_rgb_colorspace(self):\n user = UserFactory.create()\n file_path = os.path.join(os.path.dirname(__file__), \"broken_colorspace.gif\")\n self._upload_photo(user, file_path)", "def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg", "def rgb_processing(rgb_img, center, scale, rot=0):\n rgb_img = crop(rgb_img, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=rot)\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img", "def imageprepare():\r\n file_name = '9-test.png'\r\n im = Image.open(file_name).convert('L')\r\n\r\n im.save(\"9-t.png\")\r\n plt.imshow(im)\r\n plt.show()\r\n tv = list(im.getdata())\r\n\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n return tva", "def imageprepare(filename):\n img = Image.open(filename).convert('L')\n rect = img.getbbox()\n im = img.crop(rect)\n im.save(filename + '_pressprocessed.png')\n\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (0)) #creates white canvas of 28x28 pixels\n if width > height: #check which dimension is bigger\n #Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\n if (nheight == 0): #rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n else:\n #Height is bigger. Heigth becomes 20 pixels. \n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\n if (nwidth == 0): #rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n tv = list(newImage.getdata()) #get pixel values\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [ (x)*1.0/255.0 for x in tv] \n return tva", "def recompute_final_image(self):\n if self._image is None:\n self.final_image = None\n return\n if isinstance(self._image, np.ndarray):\n if self._image.dtype == np.float and np.any(self._image > 1):\n im = self._image / 255\n else:\n im = self._image\n if self.cmap is not None:\n im = cm.get_cmap(self.cmap)(im)\n im = PIL.Image.fromarray((im * 255).astype(np.uint8))\n else: # we hope it is a PIL image or equivalent\n im = self._image\n im = im.convert('RGBA')\n if self.make_square:\n new_size = max(im.width, im.height)\n im = im.resize((new_size, new_size), PIL.Image.NEAREST)\n if self.resolution is not None:\n if self.resolution.size == 1:\n im = im.resize((self.resolution, self.resolution),\n PIL.Image.NEAREST)\n else:\n im = im.resize(self.resolution,\n PIL.Image.NEAREST)\n if self.circ_cut is not None:\n middle = np.array(im.size) / 2\n x = np.arange(im.size[0]) - middle[0] + 0.5\n x = x / np.max(np.abs(x))\n y = np.arange(im.size[1]) - middle[1] + 0.5\n y = y / np.max(np.abs(y))\n yy, xx = np.meshgrid(y, x)\n r = np.sqrt(xx ** 2 + yy ** 2)\n alpha = np.empty(r.shape)\n alpha[r > 1] = 0\n alpha[r <= self.circ_cut] = 1\n val = (r > self.circ_cut) & (r <= 1)\n alpha[val] = (\n 0.5 + 0.5 * np.cos(\n np.pi * (r[val] - self.circ_cut)\n / (1 - self.circ_cut)))\n alpha = alpha.T * np.array(im.getchannel('A'))\n alpha = PIL.Image.fromarray(np.uint8(alpha))\n im.putalpha(alpha)\n if self.col is not None:\n if self.border_type is None:\n pass\n elif self.border_type == 'alpha':\n bg_alpha = np.array(im.getchannel('A'))\n bg_alpha = bg_alpha > 0\n bg_alpha = PIL.Image.fromarray(255 * np.uint8(bg_alpha))\n bg = PIL.Image.new('RGBA', im.size, color=self.col)\n bg.putalpha(bg_alpha)\n im = PIL.Image.alpha_composite(bg, im)\n elif self.border_type == 'pad':\n im = PIL.ImageOps.expand(\n im,\n border=self.border_width,\n fill=self.col)\n elif self.border_type == 'conv':\n im = PIL.ImageOps.expand(\n im,\n border=self.border_width,\n fill=(0, 0, 0, 0))\n bg_alpha = im.getchannel('A')\n bg_alpha = bg_alpha.filter(PIL.ImageFilter.BoxBlur(\n self.border_width))\n bg_alpha = np.array(bg_alpha)\n bg_alpha = 255 * np.uint8(bg_alpha > 0)\n bg_alpha = PIL.Image.fromarray(bg_alpha)\n bg = PIL.Image.new('RGBA', im.size, color=self.col)\n bg.putalpha(bg_alpha)\n im = PIL.Image.alpha_composite(bg, im)\n self.final_image = im", "def blank_image(height, width):\n all_green = create_uniform_image(height, width, [0, 255, 0])\n return all_green", "def save_file(self, _filename):\n imgsize = (self.__resolution[0], self.__resolution[1])\n print imgsize\n\n if(self.__resolution[2] == 1):\n # grayscale -> convert to RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n # duplicate the channels\n ucharcol = (255 * col[0], 255 * col[0], 255 * col[0])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 3):\n # RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n ucharcol = (255 * col[0], 255 * col[1], 255 * col[2])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 4):\n # RGBA\n bg_white = (255, 255, 255, 255)\n img = Image.new(\"RGBA\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = 255 * self.get_color((x, y))\n ucharcol = (int(col[0]), int(col[1]), int(col[2]), int(col[3]))\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n else:\n raise StandardError, ('supported number of channels are 1, 3, and 4, only.')\n\n img.save(_filename)", "def __make_png(self, abspath_img_rgb):\n if not os.path.exists(DIR_PNG):\n os.makedirs(DIR_PNG)\n\n outsize = '{}%'.format(OUTSIZE_RGB)\n img_name_rgb = os.path.basename(abspath_img_rgb)\n suffix_extension_tif = Utils.get_suffix_tif(img_name_rgb)\n img_png = img_name_rgb.replace(suffix_extension_tif, '.png')\n path_img_png = os.path.join(DIR_PNG, img_png)\n\n command = \"gdal_translate -ot byte -of PNG -outsize {} {} \" \\\n \"-a_nodata 0 -q {} {}\".format(\n outsize, outsize, abspath_img_rgb, path_img_png\n )\n os.system(command)\n return os.path.join(DIR_PNG_TO_DB, img_png)", "def prepare_image(im):\n width, height = im.size\n if width > 256 or height > 256:\n factor = 256.0 / max(width, height)\n im = im.resize((int(factor * width), int(factor * height)),\n Image.BILINEAR)\n return im" ]
[ "0.67979735", "0.6447679", "0.6425846", "0.6331001", "0.6304116", "0.6232688", "0.6175286", "0.60538095", "0.6020988", "0.6015573", "0.59706867", "0.59613866", "0.5947222", "0.5861616", "0.5856823", "0.5853021", "0.5808916", "0.5805031", "0.57975703", "0.5789881", "0.57089484", "0.57057846", "0.57044923", "0.56997156", "0.5680265", "0.5663609", "0.56351167", "0.5620828", "0.56049573", "0.5595417" ]
0.70588124
0
Renders the image. Override this method when creating a custom renderer.
def _render(self, image): raise NotImplementedError('Override this method to render images!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render(self) -> None:\n if self.native_rendering:\n self._render()\n else:\n self.renderer.render_image(self.get_rendered_image())", "def _render(self):\n self.dirty = False\n self.image = self.font.render(self._text, self.aa, self.color_fg)\n self.rect = self.image.get_rect()", "def render(self):\n\n self.desert_image.render()\n self.cannon_image.render()\n self.play_button.render()\n self.escape_label.render()", "def display(self):\n display(self.image)", "def render_image(self,\n frame=None,\n factor=4,\n antialias=True,\n trim=False,\n transparent=False):\n if frame is not None:\n self.frame = frame\n params = dict(\n factor=factor,\n antialias=antialias,\n trim=trim,\n transparent=transparent)\n self._remote_call('_exportImage', target='Widget', kwargs=params)", "def render(self):\n if self.frame_pos:\n self.pos = [\n self.frame_pos[0] + self.position[0] - (self.size[0] / 2),\n self.frame_pos[1] + self.position[1] - (self.size[1] / 2),\n ]\n if self.variable_text:\n self.image = self.fontA.render(self.text, 1, self.color)", "def display(self, image):\n raise NotImplementedError()", "def __draw_image(self):\n if self.image_name is not None:\n img = mpimg.imread(self.image_name)\n extent = (0.5, self.xmax+0.5, -0.5, self.ymax-0.5)\n self.ax.imshow(img, extent=extent, origin='lower',\n alpha=self.image_alpha)", "def draw(self):\n if self.dirty or (self.image is None):\n self._render()\n self.screen.blit(self.image, self.rect)", "def draw(self):\r\n self.screen.blit(self.image, self.image.get_rect())", "def draw(self):\n self.write_image()\n self.update()", "def render(self):\n raise NotImplementedError(\"Renderer is an abstract class\")", "def generateImage(self):\n self.image = self.font.render(self.text, True, self.color)\n self.rect = self.image.get_rect()\n self.rect.center = self.xy", "def render(self):\n raise RenderNotImplemented('Render function is not implemented.')", "def draw(self):\n self.screen.blit(self.image, self.rect)", "def draw(self, surface):\r\n surface.blit(self.image, self.rect)", "def draw(self, surface):\n surface.blit(self.image, self.rect)", "def draw(self, surface):\n surface.blit(self.image, self.rect)", "def Draw(self):\n\t\tGameImage.Draw(self, self.coords)", "def render(self, output, image_size,\n face_colors=(\"#477984\", \"#EEAA4D\", \"#74C3F2\")):\n pass", "def render(self, screen):\n # print(\"Drawing scene {}\".format(self.imgname))\n screen.fill(self.color)", "def render(self):\n raise NotImplementedError", "def generate_image(self):\n pass", "def render(self, mode='human'):\n self.rendering_mode = mode\n\n if self.viewer is None:\n self.viewer = EnvViewer(self, offscreen=self.offscreen)\n\n self.enable_auto_render = not self.offscreen\n\n # If the frame has already been rendered, do nothing\n if self.should_update_rendering:\n self.viewer.display()\n\n if mode == 'rgb_array':\n image = self.viewer.get_image()\n if not self.viewer.offscreen:\n self.viewer.handle_events()\n self.viewer.handle_events()\n return image\n elif mode == 'human':\n if not self.viewer.offscreen:\n self.viewer.handle_events()\n self.should_update_rendering = False", "def process_image(self):\n pass", "def _render(self) -> None:\n pass", "def render(self):\n raise NotImplementedError()", "def render(self, **kwargs):\n return Draw.MolToImage(self._state, **kwargs)", "def render_image(self, rgbobj, dst_x, dst_y):\n self.logger.debug(\"redraw pixmap=%s\" % (self.pixmap))\n if self.pixmap is None:\n return\n self.logger.debug(\"drawing to pixmap\")\n\n # Prepare array for rendering\n arr = rgbobj.get_array(self.rgb_order, dtype=np.uint8)\n (height, width) = arr.shape[:2]\n\n return self._render_offscreen(self.pixmap, arr, dst_x, dst_y,\n width, height)", "def draw(self):\n self.screen.blit(self.msg_image, self.msg_image_rect)" ]
[ "0.83629304", "0.7514363", "0.738157", "0.71521425", "0.7143713", "0.7124317", "0.7027804", "0.69824153", "0.697108", "0.69224477", "0.69172174", "0.69109416", "0.68451834", "0.676605", "0.6748223", "0.66155213", "0.660075", "0.660075", "0.65852135", "0.64857286", "0.64756054", "0.6467235", "0.64649606", "0.6449069", "0.639439", "0.6379378", "0.6378728", "0.6329882", "0.6296905", "0.62766397" ]
0.86215574
0
Normalize, pad and batch the input images.
def preprocess_image(self, batched_inputs): images = [x.to(self.device) for x in batched_inputs] norms = [self.normalizer(x) for x in images] size = (norms[0].shape[1],norms[0].shape[2]) images = ImageList.from_tensors(norms, self.backbone.size_divisibility) return images, size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_images(data, blend_cat, Args):\n im = data['X_train']['blend_image']\n std = np.std(im)\n mean = np.mean(im)\n data['X_train']['blend_image'] = (im - mean) / std\n data['X_val']['blend_image'] = (data['X_val']['blend_image'] - mean) / std\n data['X_train'] = normalize_other_inputs(data['X_train'], Args)\n data['X_val'] = normalize_other_inputs(data['X_val'], Args)\n for key in data['Y_train'].keys():\n data['Y_train'][key] = (data['Y_train'][key] - mean) / std\n data['Y_val'][key] = (data['Y_val'][key] - mean) / std\n blend_cat['std'] = std\n blend_cat['mean'] = mean\n return data", "def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images_aug = [x[\"image_color\"].to(self.device) for x in batched_inputs]\n\n images = [self.normalizer(x) for x in images]\n images_aug = [self.normalizer(x) for x in images_aug]\n\n images = ImageList.from_tensors(images,\n self.backbone.size_divisibility)\n images_aug = ImageList.from_tensors(images_aug,\n self.backbone.size_divisibility)\n return images, images_aug", "def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output", "def preprocess(imgs):\n imgs_p = np.ndarray((len(imgs), img_rows, img_cols), dtype=np.float32)\n for i in range(len(imgs)):\n imgs_p[i] = imgs[i].reshape((img_rows, img_cols))/255.\n\n imgs_p = imgs_p[..., np.newaxis]\n\n # Perform data normalization\n mean = imgs_p.mean()\n std = imgs_p.std()\n imgs_p -= mean\n imgs_p /= std\n\n return imgs_p", "def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].float().to(self.device) for x in batched_inputs]\n images = [self.normalizer(img) for img in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n return images", "def normalization(imgs):\n\n imgs = np.asarray(imgs).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs", "def process_batch(self, inputs):\n for key, ipt in inputs.items():\n inputs[key] = ipt.to(self.device)\n\n # we only feed the image with frame_id 0 through the depth encoder\n features = self.models[\"encoder\"](inputs[\"color_aug\", 0, 0])\n outputs = self.models[\"depth\"](features)\n\n outputs.update(self.predict_poses(inputs, features))\n\n self.generate_images_pred(inputs, outputs)\n losses = self.compute_losses(inputs, outputs)\n\n return outputs, losses", "def preprocess_image(self, inputs):\n raise NotImplementedError('preprocess_image method not implemented.')", "def _preprocessing(self, input_image):\n if self.resize:\n input_image = self._np_resize_image(input_image,\n self.input_size,\n dtype='int')\n image = self._np_transpose(input_image)\n image = self._np_normalize(image)\n image = self._np_flip_n_cat(image)\n return image", "def __call__(self, in_data):\n # There are five data augmentation steps\n # 1. Color augmentation\n # 2. Random expansion\n # 3. Random cropping\n # 4. Resizing with random interpolation\n # 5. Random horizontal flipping\n if self.count % 10 == 0 and self.count % self.batchsize == 0 and self.count != 0:\n self.i += 1\n i = self.i % len(self.dim)\n self.output_shape = (self.dim[i], self.dim[i])\n # print(self.count, self.i, self.output_shape)\n self.count += 1\n\n img, bbox, label = in_data\n\n # 1. Color augmentation\n img = random_distort(img, brightness_delta=32,\n contrast_low=0.5, contrast_high=1.5,\n saturation_low=0.5, saturation_high=1.5,\n hue_delta=25)\n\n # Normalize. range is [0, 1]\n img /= 255.0\n\n _, H, W = img.shape\n scale = np.random.uniform(0.25, 2)\n random_expand = np.random.uniform(0.8, 1.2, 2)\n net_h, net_w = self.output_shape\n out_h = net_h * scale # random_expand[0]\n out_w = net_w * scale # random_expand[1]\n if H > W:\n out_w = out_h * (float(W) / H) * np.random.uniform(0.8, 1.2)\n elif H < W:\n out_h = out_w * (float(H) / W) * np.random.uniform(0.8, 1.2)\n\n out_h = int(out_h)\n out_w = int(out_w)\n\n img = resize_with_random_interpolation(img, (out_h, out_w))\n bbox = transforms.resize_bbox(bbox, (H, W), (out_h, out_w))\n\n if out_h < net_h and out_w < net_w:\n img, param = expand(img, out_h=net_h, out_w=net_w,\n fill=self.value, return_param=True)\n bbox = transforms.translate_bbox(\n bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])\n else:\n out_h = net_h if net_h > out_h else int(out_h * 1.05)\n out_w = net_w if net_w > out_w else int(out_w * 1.05)\n img, param = expand(img, out_h=out_h, out_w=out_w,\n fill=self.value, return_param=True)\n bbox = transforms.translate_bbox(\n bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])\n\n img, param = crop_with_bbox_constraints(\n img, bbox, return_param=True,\n crop_height=net_h, crop_width=net_w)\n bbox, param = transforms.crop_bbox(\n bbox, y_slice=param['y_slice'], x_slice=param['x_slice'],\n allow_outside_center=False, return_param=True)\n label = label[param['index']]\n\n\n # 5. Random horizontal flipping # OK\n img, params = transforms.random_flip(\n img, x_random=True, return_param=True)\n bbox = transforms.flip_bbox(\n bbox, self.output_shape, x_flip=params['x_flip'])\n\n # Preparation for Yolov2 network\n bbox[:, ::2] /= self.output_shape[0] # y\n bbox[:, 1::2] /= self.output_shape[1] # x\n\n num_bbox = len(bbox)\n len_max = max(num_bbox, self.max_target)\n\n gmap = create_map_anchor_gt(bbox, self.anchors, self.output_shape,\n self.downscale, self.n_boxes, len_max)\n\n out_bbox = np.zeros((len_max, 4), dtype='f')\n out_bbox[:num_bbox] = bbox[:num_bbox]\n out_label = np.zeros((len_max), dtype='i')\n out_label[:num_bbox] = label\n\n gmap = gmap[:self.max_target]\n out_bbox = out_bbox[:self.max_target]\n out_label = out_label[:self.max_target]\n num_array = min(num_bbox, self.max_target)\n\n img = np.clip(img, 0, 1)\n return img, out_bbox, out_label, gmap, np.array([num_array], dtype='i')", "def preprocess_train(im, boxes, classes, inst_masks, mask, input_size, min_size=2,\n use_augment=False, training_scale=[0.3, 0.5, 0.7, 1.0]):\n ori_im = np.copy(im)\n target_h, target_w = input_size\n\n # ---------- old data_augmentation ----------\n if use_augment:\n if np.random.choice([0, 1]) != 0:\n scale = np.random.choice(training_scale) # adding more small objects\n im, inst_masks, mask, boxes, classes = random_scale(im, inst_masks, mask, boxes, classes, scale=scale)\n min_obj_cover = np.random.choice([0.8, 0.9, 1.0])\n # truncted examples may lead to multiple-detections..\n im, inst_masks, mask, boxes, classes = random_aspect_ratio(im, inst_masks, mask, boxes, classes,\n min_aspect_ratio=0.5, max_aspect_ratio=2.0,\n min_obj_cover=min_obj_cover)\n #\n # # r = np.random.randint(0, 3)\n # if np.random.rand() < 0.75:\n # im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n # else:\n # im, inst_masks, mask, boxes, classes = center_crop2fixed_pad(im, inst_masks, mask, boxes, classes, target_w, target_h,\n # min_size=min_size)\n\n # ---------- old data_augmentation ----------\n\n # ---------- none data_augmentation ----------\n im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n im, inst_masks, mask, boxes, classes = random_flip(im, inst_masks, mask, boxes, classes)\n # ---------- none data_augmentation ----------\n\n # ---------- old data_augmentation ----------\n im = distort_color(im)\n # ---------- old data_augmentation ----------\n\n im = imcv2_recolor(im)\n\n # add this because zeros numpy array will cause errors in torch Dataloader\n inst_masks = np.zeros([1, target_h, target_w], dtype=inst_masks.dtype) if inst_masks.size == 0 else inst_masks\n\n boxes = np.asarray(boxes, dtype=np.float32)\n return im, boxes, classes, inst_masks, mask, ori_im", "def Batch_Size_Normalization(batch, batch_len, pad_token, batch_size):\n max_length = max(batch_len)\n current_batch_len = len(batch)\n need_more = batch_size-current_batch_len\n if need_more==0:\n return batch\n\n padding_array = np.ones(max_length)*pad_token\n for i in range(need_more):\n batch.append(padding_array)\n return batch", "def pre_process(self, images: Union[np.ndarray, List]) -> np.ndarray:\n images = validate_image(images)\n image_sizes = []\n image_arr = []\n for image in images:\n image_sizes.append(image.shape)\n image = resize(image,\n height=self.in_h,\n width=self.in_w)\n image = normalize(image)\n image_arr.append(image)\n image_arr = np.array(image_arr)\n return image_arr, image_sizes", "def space_to_batch(images, labels, tiles, n_tiles, paddings_image, paddings_tiles, shape_padded_image, shape_padded_label, shape_input, shape_output, b_with_labels=False, b_verbose=False):\n\n # map parse function to each zipped element\n print(paddings_tiles, shape_padded_label, shape_output)\n assert any([a % b <= 0 for a, b in zip(shape_padded_label, shape_output)])\n\n paddings_both = [a + b for a, b in zip(paddings_image, paddings_tiles)]\n shape_padded_both = [a + 2 * b for a, b in zip(shape_padded_image, paddings_tiles)]\n scale_factor = [float(a/b) for a, b in zip(shape_padded_both, shape_padded_image)]\n\n paddings_labels = [(x, x) for x in paddings_tiles] + [(0, 0)]\n paddings_both = [(x, x) for x in paddings_both] + [(0, 0)]\n\n if b_verbose:\n print('Padding/ padding_img: ', paddings_labels, paddings_both, scale_factor)\n logging.info('Using %d patches to predict a whole image', n_tiles)\n\n # process labels into patches\n if b_with_labels:\n # print('labels prior: ', labels)\n labels = tf.pad(labels, paddings_labels)\n labels = tf.expand_dims(labels, axis=0)\n batch_shape = tf.stack([n_tiles, *shape_output, tf.shape(labels)[-1]])\n labels = tf.reshape(labels, batch_shape)\n # print('labels post: ', labels)\n\n # process images into patches\n # Note: a simple reshape is not possible due to the overlapping of inputs\n # map_fn or tf while_loops or sth similar might help\n images = tf.pad(images, paddings_both)\n if b_verbose:\n images = tf.Print(images, [tf.shape(images), tiles], 'Temporary patch shape - before: ', summarize=5)\n\n patches = [None for _ in range(n_tiles)]\n # patch_indices = list(range(n_tiles))\n positions = [None for _ in range(n_tiles)]\n offset_image = [int(x / 2) for x in shape_input]\n idx_tile = 0\n for idx_0 in range(tiles[0]):\n for idx_1 in range(tiles[1]):\n for idx_2 in range(tiles[2]):\n start_pos = [shape_output[0] * idx_0, shape_output[1] * idx_1, shape_output[2] * idx_2, 0]\n positions[idx_tile] = [float(a + b) for a, b in zip(start_pos[0:3], offset_image)]\n patches[idx_tile] = tf.slice(images, start_pos, shape_input + [tf.shape(images)[-1]])\n idx_tile += 1\n # images = tf.Print(images, [tf.shape(images), idx_0, idx_1, idx_2, start_pos], 'performed crop at: ')\n\n if b_verbose:\n patches[0] = tf.Print(patches[0], [tf.shape(patches[0])], 'Temporary patch shape - within: ', summarize=5)\n images = tf.stack(patches, axis=0)\n\n positions_t = tf.stack(positions, axis=0)\n positions_t = tf.cast(tf.multiply((tf.divide(positions_t, shape_padded_both) - 0.5) * 2, scale_factor), dtype=tf.float32) # rescale it | account for larger padded size\n if b_verbose:\n images = tf.Print(images, [tf.shape(images)], 'Temporary patch shape - after: ', summarize=5)\n\n return images, labels, positions_t", "def preprocess_image(image, training):\r\n if training:\r\n ### YOUR CODE HERE\r\n hpad = np.zeros((32,4,3))\r\n image = np.hstack((image,hpad))\r\n image = np.hstack((hpad,image))\r\n\r\n vpad = np.zeros((4,40, 3))\r\n image = np.vstack((image, vpad))\r\n image = np.vstack((vpad, image))\r\n\r\n #print(np.shape(image))\r\n # Resize the image to add four extra pixels on each side.\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Randomly crop a [32, 32] section of the image.\r\n # HINT: randomly generate the upper left point of the image\r\n rx = np.random.randint(8)\r\n ry = np.random.randint(8)\r\n crp_img = image[rx:rx+32,ry:ry+32,:]\r\n #print(np.shape(crp_img))\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Randomly flip the image horizontally.\r\n # for i in range(crp_img.shape[0]):\r\n # crp_img[i] = np.fliplr(crp_img[i])\r\n rf = np.random.randint(2)\r\n if(rf == 0):\r\n crp_img = np.fliplr(crp_img)\r\n #print(np.shape(crp_img))\r\n image = crp_img\r\n\r\n\r\n ### YOUR CODE HERE\r\n\r\n ### YOUR CODE HERE\r\n # Subtract off the mean and divide by the standard deviation of the pixels.\r\n cmean = []\r\n cstd = []\r\n for i in range(np.shape(image)[2]):\r\n arr = image[:,:,i]\r\n cmean = np.mean(arr)\r\n cstd = (np.std(arr))\r\n lfn = lambda x : (x-cmean)/cstd\r\n image[:,:,i] = lfn(arr)\r\n #print(np.shape(image))\r\n\r\n ### YOUR CODE HERE\r\n\r\n return image", "def pre_process_data(input_path: list, cuts: int, shape: int = 32, normalize: bool = True) -> list:\n images = []\n images_uncut = []\n for files_path in input_path:\n\n files = os.listdir(files_path) # TODO paths\n for f in files:\n file_path = f'{files_path}/{f}'\n im_uncut = cv2.imread(file_path)\n im_uncut = cv2.cvtColor(im_uncut, cv2.COLOR_RGB2GRAY)\n images_uncut.append(cv2.resize(im_uncut, (shape * cuts, shape * cuts)))\n x = np.array(images_uncut)\n\n if normalize:\n x_mean = np.mean(x, axis=(0, 1, 2))\n x_std = np.std(x, axis=(0, 1, 2))\n x = (x - x_mean) / (x_std + 1e-9)\n\n for im in x:\n height = im.shape[0]\n width = im.shape[1]\n frac_h = height // cuts\n frac_w = width // cuts\n i = 0\n image = []\n for h in range(cuts):\n for w in range(cuts):\n crop = im[h * frac_h:(h + 1) * frac_h, w * frac_w:(w + 1) * frac_w]\n crop_rehaped = cv2.resize(crop, (shape, shape))\n image.append([crop_rehaped, i, number_to_angle(i, cuts), neighbours(i, cuts)])\n i = i + 1\n images.append(image)\n # return np.array(images) # todo back to array\n return images", "def _pad_img(self, results):\n pad_val = self.pad_val.get('img', 0)\n for key in results.get('img_fields', ['img']):\n if self.pad_to_square:\n max_size = max(results[key].shape[:2])\n self.size = (max_size, max_size)\n if self.size is not None:\n padded_img = general_ocr.impad(\n results[key], shape=self.size, pad_val=pad_val)\n elif self.size_divisor is not None:\n padded_img = general_ocr.impad_to_multiple(\n results[key], self.size_divisor, pad_val=pad_val)\n results[key] = padded_img\n results['pad_shape'] = padded_img.shape\n results['pad_fixed_size'] = self.size\n results['pad_size_divisor'] = self.size_divisor", "def preprocess(\n self,\n images: ImageInput,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[Union[int, float]] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n pad_size: Optional[Dict[str, int]] = None,\n do_convert_rgb: bool = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: ChannelDimension = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ):\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size\n resample = resample if resample is not None else self.resample\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_pad = do_pad if do_pad is not None else self.do_pad\n pad_size = pad_size if pad_size is not None else self.pad_size\n pad_size = get_size_dict(pad_size, default_to_square=True)\n do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"Size and resample must be specified if do_resize is True.\")\n\n if do_rescale and rescale_factor is None:\n raise ValueError(\"Rescale factor must be specified if do_rescale is True.\")\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"Image mean and std must be specified if do_normalize is True.\")\n\n if do_pad and pad_size is None:\n raise ValueError(\"Pad size must be specified if do_pad is True.\")\n\n # PIL RGBA images are converted to RGB\n if do_convert_rgb:\n images = [convert_to_rgb(image) for image in images]\n\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n original_sizes = [get_image_size(image, channel_dim=input_data_format) for image in images]\n\n if do_resize:\n images = [\n self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n for image in images\n ]\n\n reshaped_input_sizes = [get_image_size(image, channel_dim=input_data_format) for image in images]\n\n if do_rescale:\n images = [\n self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_normalize:\n images = [\n self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_pad:\n images = [\n self.pad_image(image=image, pad_size=pad_size, input_data_format=input_data_format) for image in images\n ]\n\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images\n ]\n encoded_outputs = BatchFeature(\n data={\n \"pixel_values\": images,\n \"original_sizes\": original_sizes,\n \"reshaped_input_sizes\": reshaped_input_sizes,\n },\n tensor_type=return_tensors,\n )\n return encoded_outputs", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def dimension_postprocess(self, chunked_data, original_data, scale=1, padding=True):\r\n\r\n assert len(original_data.shape) == 2, \"data dimension expected to be (xline ,samp_point)\"\r\n assert len(chunked_data.shape) == 3, \"Chunked data dimension expected to be (batch_size, xline, samp_point)\"\r\n\r\n if padding:\r\n if original_data.shape[0] < self.rows:\r\n new_images = []\r\n for data in chunked_data:\r\n new_images.append(data[0:scale * original_data.shape[0], :])\r\n chunked_data = np.array(new_images)\r\n\r\n if original_data.shape[1] < self.cols:\r\n new_images = []\r\n for data in chunked_data:\r\n new_images.append(data[:, 0:scale * original_data.shape[1]])\r\n chunked_data = np.array(new_images)\r\n\r\n new_shape = (\r\n original_data.shape[0] * scale,\r\n original_data.shape[1] * scale\r\n )\r\n reconstruction = np.zeros(new_shape)\r\n x_chunks, y_chunks = self.get_chunks(original_data)\r\n\r\n i = 0\r\n s = scale\r\n for x in x_chunks:\r\n for y in y_chunks:\r\n prior_fill = reconstruction != 0\r\n chunk = np.zeros(new_shape)\r\n chunk[x[0] * s:x[1] * s, y[0] * s:y[1] * s] += chunked_data[i]\r\n chunk_fill = chunk != 0\r\n reconstruction += chunk\r\n reconstruction[prior_fill & chunk_fill] = reconstruction[prior_fill & chunk_fill] / 2\r\n i += 1\r\n return reconstruction", "def _pre_process_images(images, details):\n # If the images are gray-scale, the number of channels (1) must be \"added\" to the size of the samples.\n if details['channels'] == 1:\n img_rows, img_cols = details['sample size']\n\n # The place of the dimension with 1 depends on the backend used by Keras.\n if K.image_data_format() == 'channels_first':\n images = images.reshape(images.shape[0], 1, img_rows, img_cols)\n else:\n images = images.reshape(images.shape[0], img_rows, img_cols, 1)\n\n # Normalize pixel values to be in the interval [0, 1]\n images = images.astype('float32')\n max_bit_value = 2 ** details['bits per sample'] - 1\n images /= max_bit_value\n return images", "def main():\n\n # Just grab all files - we'll use try/except to filter\n images = glob.glob(os.path.join(args.input_dir, '*.*'))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n for img_file in images:\n print(img_file)\n try:\n np_img = plt.imread(img_file)\n print(np_img.shape)\n img_name = img_file.split(os.sep)[-1]\n new_img_file = os.path.join(args.output_dir, img_name)\n pad_image(np_img, new_img_file)\n except Exception as e:\n print('Warning: {}. Skpping file.'.format(e))\n continue", "def _augment_images(self, images, random_state, parents, hooks):\n nb_images = len(images)\n samples = self.p.draw_samples((nb_images,), random_state=random_state)\n for i in sm.xrange(nb_images):\n if samples[i] == 1:\n if self.axis == 1:\n images[i] = np.fliplr(images[i])\n elif self.axis == 0:\n images[i] = np.flipud(images[i])\n self.samples = samples\n return images", "def process_batch(self, image_batch):\n images = []\n for image_data in image_batch:\n image_resize = cv2.resize(image_data, (0,0), fx=0.5, fy=0.5) #NOTE\n images.append(image_resize)\n\n return np.array(images)", "def batch_image_preprocess(raw_images,\n image_size: Union[int, Tuple[int, int]],\n mean_rgb,\n stddev_rgb,\n batch_size: int = None):\n if not batch_size:\n # map_fn is a little bit slower due to some extra overhead.\n # map_fn -> vectorized_map (fully parallelizes the batch).\n map_fn = functools.partial(\n image_preprocess,\n image_size=image_size,\n mean_rgb=mean_rgb,\n stddev_rgb=stddev_rgb)\n images, scales = tf.vectorized_map(map_fn, raw_images, warn=False)\n images = tf.stop_gradient(tf.cast(images, tf.float32))\n scales = tf.stop_gradient(tf.cast(scales, tf.float32))\n return (images, scales)\n\n # If batch size is known, use a simple loop.\n scales, images = [], []\n for i in range(batch_size):\n image, scale = image_preprocess(raw_images[i], image_size, mean_rgb,\n stddev_rgb)\n scales.append(scale)\n images.append(image)\n images = tf.stack(images)\n scales = tf.stack(scales)\n return (images, scales)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def dimension_preprocess(self, data, padding=True):\r\n\r\n assert len(data.shape) == 2, \"Data dimension expected to be ( xline, samp_point)\"\r\n if padding:\r\n if data.shape[0] < self.rows:\r\n padding = np.ones((self.rows - data.shape[0], data.shape[1]))\r\n data = np.concatenate((data, padding), axis=0)\r\n if data.shape[1] < self.cols:\r\n padding = np.ones((data.shape[0], self.cols - data.shape[1]))\r\n data = np.concatenate((data, padding), axis=1)\r\n x_chunks, y_chunks = self.get_chunks(data)\r\n images = []\r\n for x in x_chunks:\r\n for y in y_chunks:\r\n images.append(\r\n data[x[0]:x[1], y[0]:y[1]]\r\n )\r\n images = np.array(images)\r\n\r\n return images", "def preprocess_batch(self,image_batch):\n if len(image_batch.shape) == 4 and image_batch.shape[1:] != (72,72,1):\n assert (False),'wrong batch shape'\n\n if len(image_batch.shape) == 3:\n if image_batch.shape == (72, 72, 3):\n image_batch = np.mean(image_batch,axis=2).reshape(72,72,1)\n elif image_batch.shape == (72, 72, 1):\n pass\n else:\n assert(False),'wrong batch shape'\n\n return image_batch/255", "def batch_preprocess(self, input_folder, output_folder, padding=20):\n input_files = glob.glob(input_folder + '/*')\n for input_path in input_files:\n subject_name = re.search(self.KEY_WORD_FILE, input_path).group()\n output_path = output_folder + '/' + subject_name\n\n data, options = nrrd.read(input_path)\n data, options = self.pad_upper(data, options, padding)\n data, options = self.filter_background_to_air(data, options)\n\n print 'write ' + output_path\n nrrd.write(output_path, data, options) # too slow in Python" ]
[ "0.7097169", "0.66408646", "0.65812963", "0.6465365", "0.6464898", "0.6406874", "0.63755596", "0.63661265", "0.6324502", "0.6318984", "0.63017005", "0.6297598", "0.6261444", "0.62284863", "0.6226007", "0.6213169", "0.6194187", "0.6189247", "0.61866045", "0.6185736", "0.6183467", "0.6160369", "0.6142805", "0.6132306", "0.61175466", "0.61103964", "0.61103964", "0.609539", "0.60671926", "0.6028339" ]
0.67390656
1
Ensure that apigateway v1 and apigateway v2 actions are both present in the ses namespace
def test_services_with_multiple_pages_apigateway(self): # API Gateway Management V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html self.assertTrue("apigateway:AddCertificateToDomain" in self.all_actions) self.assertTrue("apigateway:RemoveCertificateFromDomain" in self.all_actions) self.assertTrue("apigateway:SetWebACL" in self.all_actions) # API Gateway Management V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html # API Gateway V2 doesn't have any unique actions in but it does have some unique resource types. Let's make sure those resource types are in the IAM Definition. # Resource types unique to API Gateway V2: resource_types = get_arn_types_for_service("apigateway") resource_types = list(resource_types.keys()) self.assertTrue("AccessLogSettings" in resource_types) # Resource types unique to API Gateway V1: self.assertTrue("RestApi" in resource_types)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)", "def test_services_with_multiple_pages_ses(self):\n # SES V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html\n self.assertTrue(\"ses:PutIdentityPolicy\" in self.all_actions)\n # SES V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonsimpleemailservicev2.html\n self.assertTrue(\"ses:ListImportJobs\" in self.all_actions)\n\n results = get_actions_for_service(\"ses\")\n actions = [\n \"ses:CloneReceiptRuleSet\",\n \"ses:CreateConfigurationSetTrackingOptions\",\n \"ses:CreateReceiptFilter\",\n \"ses:CreateReceiptRule\",\n \"ses:CreateReceiptRuleSet\",\n \"ses:CreateTemplate\",\n \"ses:DeleteConfigurationSetTrackingOptions\",\n \"ses:DeleteIdentity\",\n \"ses:DeleteIdentityPolicy\",\n \"ses:DeleteReceiptFilter\",\n \"ses:DeleteReceiptRule\",\n \"ses:DeleteReceiptRuleSet\",\n \"ses:DeleteTemplate\",\n \"ses:DeleteVerifiedEmailAddress\",\n \"ses:DescribeActiveReceiptRuleSet\",\n \"ses:DescribeConfigurationSet\",\n \"ses:DescribeReceiptRule\",\n \"ses:DescribeReceiptRuleSet\",\n \"ses:GetAccountSendingEnabled\",\n \"ses:GetIdentityDkimAttributes\",\n \"ses:GetIdentityMailFromDomainAttributes\",\n \"ses:GetIdentityNotificationAttributes\",\n \"ses:GetIdentityPolicies\",\n \"ses:GetIdentityVerificationAttributes\",\n \"ses:GetSendQuota\",\n \"ses:GetSendStatistics\",\n \"ses:GetTemplate\",\n \"ses:ListIdentities\",\n \"ses:ListIdentityPolicies\",\n \"ses:ListReceiptFilters\",\n \"ses:ListReceiptRuleSets\",\n \"ses:ListTemplates\",\n \"ses:ListVerifiedEmailAddresses\",\n \"ses:PutIdentityPolicy\",\n \"ses:ReorderReceiptRuleSet\",\n \"ses:SendBounce\",\n \"ses:SendBulkTemplatedEmail\",\n \"ses:SendRawEmail\",\n \"ses:SendTemplatedEmail\",\n \"ses:SetActiveReceiptRuleSet\",\n \"ses:SetIdentityDkimEnabled\",\n \"ses:SetIdentityFeedbackForwardingEnabled\",\n \"ses:SetIdentityHeadersInNotificationsEnabled\",\n \"ses:SetIdentityMailFromDomain\",\n \"ses:SetIdentityNotificationTopic\",\n \"ses:SetReceiptRulePosition\",\n \"ses:TestRenderTemplate\",\n \"ses:UpdateAccountSendingEnabled\",\n \"ses:UpdateConfigurationSetReputationMetricsEnabled\",\n \"ses:UpdateConfigurationSetSendingEnabled\",\n \"ses:UpdateConfigurationSetTrackingOptions\",\n \"ses:UpdateReceiptRule\",\n \"ses:UpdateTemplate\",\n \"ses:VerifyDomainDkim\",\n \"ses:VerifyDomainIdentity\",\n \"ses:VerifyEmailAddress\",\n \"ses:VerifyEmailIdentity\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_subscriber_access_for_two_vsg_services(self):", "def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)", "def test_request_with_two_bundles(self):\n xml = self._make_request()\n request = request_from_xml(xml)\n self.assertTrue(validate_request(request, self.policy))", "def test_services_with_multiple_pages_aws_marketplace(self):\n # Overlap: AWS Marketplace, Marketplace Catalog, and AWS Marketplace Entitlement service, AWS Marketplace Image Building Service, AWS Marketplace Metering Service, AWS Marketplace Private Marketplace, and AWS Marketplace Procurement Systems\n # AWS Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplace.html\n self.assertTrue(\"aws-marketplace:AcceptAgreementApprovalRequest\" in self.all_actions)\n # AWS Marketplace Catalog: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacecatalog.html\n self.assertTrue(\"aws-marketplace:CancelChangeSet\" in self.all_actions)\n # AWS Marketplace Entitlement Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceentitlementservice.html\n self.assertTrue(\"aws-marketplace:GetEntitlements\" in self.all_actions)\n # AWS Marketplace Image Building Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceimagebuildingservice.html\n self.assertTrue(\"aws-marketplace:DescribeBuilds\" in self.all_actions)\n # AWS Marketplace Metering Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacemeteringservice.html\n self.assertTrue(\"aws-marketplace:BatchMeterUsage\" in self.all_actions)\n # AWS Marketplace Private Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprivatemarketplace.html\n self.assertTrue(\"aws-marketplace:AssociateProductsWithPrivateMarketplace\" in self.all_actions)\n # AWS Marketplace Procurement Systems: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprocurementsystemsintegration.html\n self.assertTrue(\"aws-marketplace:DescribeProcurementSystemConfiguration\" in self.all_actions)\n\n results = get_actions_for_service(\"aws-marketplace\")\n actions = [\n \"aws-marketplace:AcceptAgreementApprovalRequest\",\n \"aws-marketplace:BatchMeterUsage\",\n \"aws-marketplace:CancelAgreementRequest\",\n \"aws-marketplace:CancelChangeSet\",\n \"aws-marketplace:CompleteTask\",\n \"aws-marketplace:DescribeAgreement\",\n \"aws-marketplace:DescribeBuilds\",\n \"aws-marketplace:DescribeChangeSet\",\n \"aws-marketplace:DescribeEntity\",\n \"aws-marketplace:DescribeProcurementSystemConfiguration\",\n \"aws-marketplace:DescribeTask\",\n \"aws-marketplace:GetAgreementApprovalRequest\",\n \"aws-marketplace:GetAgreementRequest\",\n \"aws-marketplace:GetAgreementTerms\",\n \"aws-marketplace:GetEntitlements\",\n \"aws-marketplace:ListAgreementApprovalRequests\",\n \"aws-marketplace:ListAgreementRequests\",\n \"aws-marketplace:ListBuilds\",\n \"aws-marketplace:ListChangeSets\",\n \"aws-marketplace:ListEntities\",\n \"aws-marketplace:ListTasks\",\n \"aws-marketplace:MeterUsage\",\n \"aws-marketplace:PutProcurementSystemConfiguration\",\n \"aws-marketplace:RegisterUsage\",\n \"aws-marketplace:RejectAgreementApprovalRequest\",\n \"aws-marketplace:ResolveCustomer\",\n \"aws-marketplace:SearchAgreements\",\n \"aws-marketplace:StartBuild\",\n \"aws-marketplace:StartChangeSet\",\n \"aws-marketplace:Subscribe\",\n \"aws-marketplace:Unsubscribe\",\n \"aws-marketplace:UpdateAgreementApprovalRequest\",\n \"aws-marketplace:UpdateTask\",\n \"aws-marketplace:ViewSubscriptions\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_subscriber_access_if_vsg2_goes_down(self):", "def test_get_actions_with_arn_type_and_access_level_case_2(self):\n desired_output = [\n 'ssm:DeleteParameter',\n 'ssm:DeleteParameters',\n 'ssm:LabelParameterVersion',\n 'ssm:PutParameter'\n]\n output = get_actions_with_arn_type_and_access_level(\n \"ssm\", \"parameter\", \"Write\"\n )\n for item in desired_output:\n self.assertTrue(item in output)", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def lambda_handler(event, context):\n set_logging(level=logging.DEBUG)\n\n try:\n payload = json.loads(event[\"Records\"][0][\"Sns\"][\"Message\"])\n account_id = payload['account_id']\n account_name = payload['account_name']\n # get the last region from the list to process\n region = payload['regions'].pop()\n # region = payload['region']\n # if request_id is present in payload, it means this lambda was called from the API\n request_id = payload.get('request_id', None)\n except Exception:\n logging.exception(f\"Failed to parse event\\n{event}\")\n return\n\n try:\n config = Config()\n\n main_account = Account(region=config.aws.region)\n ddb_table = main_account.resource(\"dynamodb\").Table(config.sqspolicy.ddb_table_name)\n\n account = Account(id=account_id,\n name=account_name,\n region=region,\n role_name=config.aws.role_name_identification)\n if account.session is None:\n return\n\n logging.debug(f\"Checking for public SQS policies in {account}\")\n\n # existing open issues for account to check if resolved\n open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue)\n # make dictionary for fast search by id\n # and filter by current region\n open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region}\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n\n checker = SQSPolicyChecker(account=account)\n if checker.check():\n for queue in checker.queues:\n logging.debug(f\"Checking {queue.name}\")\n if queue.public:\n issue = SQSPolicyIssue(account_id, queue.url)\n issue.issue_details.tags = queue.tags\n issue.issue_details.name = queue.name\n issue.issue_details.region = queue.account.region\n issue.issue_details.policy = queue.policy\n if config.sqspolicy.in_whitelist(account_id, queue.url):\n issue.status = IssueStatus.Whitelisted\n else:\n issue.status = IssueStatus.Open\n logging.debug(f\"Setting {queue.name} status {issue.status}\")\n IssueOperations.update(ddb_table, issue)\n # remove issue id from issues_list_from_db (if exists)\n # as we already checked it\n open_issues.pop(queue.url, None)\n\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n # all other unresolved issues in DDB are for removed/remediated queues\n for issue in open_issues.values():\n IssueOperations.set_status_resolved(ddb_table, issue)\n if request_id:\n api_table = main_account.resource(\"dynamodb\").Table(config.api.ddb_table_name)\n DDB.track_progress(api_table, request_id)\n except Exception:\n logging.exception(f\"Failed to check SQS policies for '{account_id} ({account_name})'\")\n return\n\n # push SNS messages until the list with regions to check is empty\n if len(payload['regions']) > 0:\n try:\n Sns.publish(payload[\"sns_arn\"], payload)\n except Exception:\n logging.exception(\"Failed to chain insecure services checking\")\n\n logging.debug(f\"Checked SQS policies for '{account_id} ({account_name})'\")", "def verifyActionCenterRts():\n pass", "def test_connect_post_namespaced_status_webhooks(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_template_action_spaces(self):\n pass", "def test_replace_namespaced_route_status(self):\n pass", "def check_script(vouts):\n for vout in [v for v in vouts[::-1] if v['hex'].startswith('6a')]:\n verb = BlockchainSpider.decode_op_return(vout['hex'])\n action = Spoolverb.from_verb(verb).action\n if action in Spoolverb.supported_actions:\n return verb\n raise Exception(\"Invalid ascribe transaction\")", "def test_get_snsname_arn_auth_exception_handling(self, aws_res_mock):\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import get_snsname_arn\n\n # create a mock SNS client that returns what we tell it to\n client = boto3.client('sns')\n stub = Stubber(client)\n stub.add_client_error('create_topic', service_error_code='AuthorizationError')\n stub.activate()\n\n\n # since firesim manager code doesn't take clients as method parameters\n # now we mock boto3.client to return our stubbed client\n with patch.object(boto3._get_default_session(), 'client', return_value=client) as mock_session:\n topic_arn = get_snsname_arn()\n\n stub.assert_no_pending_responses()\n topic_arn.should.be.none\n\n # TODO we could mock rootLogger.critical to capture it's calls and args and validate that we're seeing the correct \"nice\" message\n\n # make sure get_snsname_arn() actually called out to get a sns\n # client, otherwise we aren't testing what we think we are\n mock_session.assert_called_once_with('sns')\n\n aws_res_mock.assert_called_once()", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action_spaces(self):\n pass", "def test_subscriber_access_if_vsg1_goes_down(self):", "def enforce(context, action, target, do_raise=True):\n \"\"\"\n ======================================================================================\n context = <xdrs.context.RequestContext object at 0x6dcf050>\n target = {'project_id': u'4537aca4a4a4462fa4c59ad5b5581f00', 'user_id': u'91d732b65831491d8bd952b3111e62dd'}\n action = xdrs:get_algorithms\n ======================================================================================\n \"\"\"\n init()\n \n credentials = context.to_dict()\n \"\"\"\n ======================================================================================\n credentials = {'project_name': u'admin', 'user_id': u'91d732b65831491d8bd952b3111e62dd', 'roles': [u'heat_stack_owner', u'_member_', u'admin'], 'timestamp': '2015-03-10T06:48:40.110653', 'auth_token': 'MIIT9wYJKoZIhvcNAQcCoIIT6DCCE+QCAQExCTAHBgUrDgMCGjCCEk0GCSqGSIb3DQEHAaCCEj4EghI6eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAiMjAxNS0wMy0xMFQwNjo0ODozOS41MzU2NjEiLCAiZXhwaXJlcyI6ICIyMDE1LTAzLTEwVDA3OjQ4OjM5WiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7ImRlc2NyaXB0aW9uIjogImFkbWluIHRlbmFudCIsICJlbmFibGVkIjogdHJ1ZSwgImlkIjogIjQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgIm5hbWUiOiAiYWRtaW4ifX0sICJzZXJ2aWNlQ2F0YWxvZyI6IFt7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc0L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjIvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAiaWQiOiAiMTZiMTVjYzVmZjUwNGNiODlmNTg2NjRlMjdhNjljNjkiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc0L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImNvbXB1dGUiLCAibmFtZSI6ICJub3ZhIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjk2OTYvIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjk2OTYvIiwgImlkIjogIjFiMjkzYTgxNjk2YjRiN2Y4OTZlYWQ0NjIyYTFjMmExIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6OTY5Ni8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAibmV0d29yayIsICJuYW1lIjogIm5ldXRyb24ifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3Ni92Mi80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCIsICJyZWdpb24iOiAiUmVnaW9uT25lIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgImlkIjogIjNhNzY3OWNjZTdkZjRhY2ZhMTZiM2NhNTJkZGNmYzgyIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3Ni92Mi80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWV2MiIsICJuYW1lIjogImNpbmRlcnYyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjMiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3NC92MyIsICJpZCI6ICIwYmIxZDFiODhhZmU0MGRhOTNiY2IxNTg0Y2ExN2ZiOSIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjMifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY29tcHV0ZXYzIiwgIm5hbWUiOiAibm92YXYzIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwODAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MCIsICJpZCI6ICIxZTMyZTE3MmU3OWM0YzVhYTZiNWM3ZjhkNzVhZjRmYiIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwODAifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiczMiLCAibmFtZSI6ICJzd2lmdF9zMyJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo5MjkyIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjkyOTIiLCAiaWQiOiAiM2QxYzc5MjY1MWEwNDljNWE2MWUzNWJmZWZjNGM4OGIiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo5MjkyIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImltYWdlIiwgIm5hbWUiOiAiZ2xhbmNlIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzciLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3NyIsICJpZCI6ICIzOWE0YzA2NDIzYTg0OTNjOTI4ZGExOGY0YTVjY2MxZiIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzcifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAibWV0ZXJpbmciLCAibmFtZSI6ICJjZWlsb21ldGVyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDAvdjEvIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDAvdjEvIiwgImlkIjogIjU1NzBiOGY4MTE0OTRlMWI5NTVkYjZlNTAzZGYyYWZkIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODAwMC92MS8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY2xvdWRmb3JtYXRpb24iLCAibmFtZSI6ICJoZWF0LWNmbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YxLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzYvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAiaWQiOiAiMGExYzhkYTRmMTU2NDk1YWFkMjEzMGUyYzA2OTE5ODIiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YxLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogInZvbHVtZSIsICJuYW1lIjogImNpbmRlciJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4NzczL3NlcnZpY2VzL0FkbWluIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzMvc2VydmljZXMvQ2xvdWQiLCAiaWQiOiAiMDMzZjY3ZTk1MDBjNDljYThmOGIxODkzZTJhN2VkYWYiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4NzczL3NlcnZpY2VzL0Nsb3VkIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImVjMiIsICJuYW1lIjogIm5vdmFfZWMyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDQvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODAwNC92MS80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCIsICJpZCI6ICI0YmViNjQ0MjUzYWU0NzdmOWU5NDk2ZWVkZDEwOTNhNSIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDQvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAib3JjaGVzdHJhdGlvbiIsICJuYW1lIjogImhlYXQifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC8iLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC92MS9BVVRIXzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgImlkIjogIjNhMTA2MzU0MjYxMDQzMjk5YTVkMjQ3ZTVmMjU5NGQyIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC92MS9BVVRIXzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogIm9iamVjdC1zdG9yZSIsICJuYW1lIjogInN3aWZ0In0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjM1MzU3L3YyLjAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6NTAwMC92Mi4wIiwgImlkIjogIjVjNGVlN2MzMTE4NDQyNGM5NDJhMWM1MjgxODU3MmZiIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6NTAwMC92Mi4wIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImlkZW50aXR5IiwgIm5hbWUiOiAia2V5c3RvbmUifV0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJhZG1pbiIsICJyb2xlc19saW5rcyI6IFtdLCAiaWQiOiAiOTFkNzMyYjY1ODMxNDkxZDhiZDk1MmIzMTExZTYyZGQiLCAicm9sZXMiOiBbeyJuYW1lIjogImhlYXRfc3RhY2tfb3duZXIifSwgeyJuYW1lIjogIl9tZW1iZXJfIn0sIHsibmFtZSI6ICJhZG1pbiJ9XSwgIm5hbWUiOiAiYWRtaW4ifSwgIm1ldGFkYXRhIjogeyJpc19hZG1pbiI6IDAsICJyb2xlcyI6IFsiZDlmZGVlODI1NjE3NGJlNWE3MmFjZGZmNDNkM2VkZDMiLCAiOWZlMmZmOWVlNDM4NGIxODk0YTkwODc4ZDNlOTJiYWIiLCAiN2E1ZTg5MmFiYTE5NDI3NWI3ZjQxZWM4Njg2ZDUwOGYiXX19fTGCAYEwggF9AgEBMFwwVzELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBVVuc2V0MQ4wDAYDVQQHDAVVbnNldDEOMAwGA1UECgwFVW5zZXQxGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbQIBATAHBgUrDgMCGjANBgkqhkiG9w0BAQEFAASCAQBkwVlwVgYM+mCIXICViGPgW+AZ--Y3NfWjW92GTBqW4keVrPosYxz--b2SVSGqwOHI1xFPqIx1+fzBCcilE5rIuJ3gxAc2VEWl4whMkriqWo6M8YY+GxGJ07h1NZ3Jc9Mrk7RTWPwU9YPilWPSU9sRx4bv+y7XpL8EIEvi+0dvHKgGI+nvqEYVFIf1vYQN5bvSnAgC1rZ9oB0M4Pg1wd47xQcenZL+XOWb8uxUReAvT-lfjXav7DhwUzPgmlY2XpN+9yfhAXAFF0GkokwjncvC5YTILOa41eMUg8ip47+rijNpQ2FuxVpRhQ-xL9it8+vAYkGLqe7eaQylsf0Nu6JJ', 'remote_address': '172.21.7.40', 'quota_class': None, 'is_admin': True, 'tenant': u'4537aca4a4a4462fa4c59ad5b5581f00', 'service_catalog': [{u'endpoints_links': [], u'endpoints': [{u'adminURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00', u'region': u'RegionOne', u'publicURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00', u'id': u'0a1c8da4f156495aad2130e2c0691982', u'internalURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00'}], u'type': u'volume', u'name': u'cinder'}], 'request_id': 'req-c0439276-3600-49cb-8de5-680b3f7d735c', 'instance_lock_checked': False, 'project_id': u'4537aca4a4a4462fa4c59ad5b5581f00', 'user_name': u'admin', 'read_deleted': 'no', 'user': u'91d732b65831491d8bd952b3111e62dd'}\n ======================================================================================\n \"\"\"\n\n # Add the exception arguments if asked to do a raise\n extra = {}\n if do_raise:\n extra.update(exc=exception.PolicyNotAuthorized, action=action)\n\n \"\"\"\n ======================================================================================\n action = xdrs:get_algorithms\n target = <xdrs.objects.instance.Instance object at 0x62b4a50>\n credentials = {'project_name': u'admin', 'user_id': u'91d732b65831491d8bd952b3111e62dd', 'roles': [u'heat_stack_owner', u'_member_', u'admin'], 'timestamp': '2015-03-10T06:48:40.110653', 'auth_token': 'MIIT9wYJKoZIhvcNAQcCoIIT6DCCE+QCAQExCTAHBgUrDgMCGjCCEk0GCSqGSIb3DQEHAaCCEj4EghI6eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAiMjAxNS0wMy0xMFQwNjo0ODozOS41MzU2NjEiLCAiZXhwaXJlcyI6ICIyMDE1LTAzLTEwVDA3OjQ4OjM5WiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7ImRlc2NyaXB0aW9uIjogImFkbWluIHRlbmFudCIsICJlbmFibGVkIjogdHJ1ZSwgImlkIjogIjQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgIm5hbWUiOiAiYWRtaW4ifX0sICJzZXJ2aWNlQ2F0YWxvZyI6IFt7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc0L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjIvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAiaWQiOiAiMTZiMTVjYzVmZjUwNGNiODlmNTg2NjRlMjdhNjljNjkiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc0L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImNvbXB1dGUiLCAibmFtZSI6ICJub3ZhIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjk2OTYvIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjk2OTYvIiwgImlkIjogIjFiMjkzYTgxNjk2YjRiN2Y4OTZlYWQ0NjIyYTFjMmExIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6OTY5Ni8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAibmV0d29yayIsICJuYW1lIjogIm5ldXRyb24ifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3Ni92Mi80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCIsICJyZWdpb24iOiAiUmVnaW9uT25lIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgImlkIjogIjNhNzY3OWNjZTdkZjRhY2ZhMTZiM2NhNTJkZGNmYzgyIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3Ni92Mi80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWV2MiIsICJuYW1lIjogImNpbmRlcnYyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjMiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3NC92MyIsICJpZCI6ICIwYmIxZDFiODhhZmU0MGRhOTNiY2IxNTg0Y2ExN2ZiOSIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjMifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY29tcHV0ZXYzIiwgIm5hbWUiOiAibm92YXYzIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwODAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MCIsICJpZCI6ICIxZTMyZTE3MmU3OWM0YzVhYTZiNWM3ZjhkNzVhZjRmYiIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwODAifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiczMiLCAibmFtZSI6ICJzd2lmdF9zMyJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo5MjkyIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjkyOTIiLCAiaWQiOiAiM2QxYzc5MjY1MWEwNDljNWE2MWUzNWJmZWZjNGM4OGIiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo5MjkyIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImltYWdlIiwgIm5hbWUiOiAiZ2xhbmNlIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzciLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3NyIsICJpZCI6ICIzOWE0YzA2NDIzYTg0OTNjOTI4ZGExOGY0YTVjY2MxZiIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzcifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAibWV0ZXJpbmciLCAibmFtZSI6ICJjZWlsb21ldGVyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDAvdjEvIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDAvdjEvIiwgImlkIjogIjU1NzBiOGY4MTE0OTRlMWI5NTVkYjZlNTAzZGYyYWZkIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODAwMC92MS8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY2xvdWRmb3JtYXRpb24iLCAibmFtZSI6ICJoZWF0LWNmbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YxLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzYvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAiaWQiOiAiMGExYzhkYTRmMTU2NDk1YWFkMjEzMGUyYzA2OTE5ODIiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YxLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogInZvbHVtZSIsICJuYW1lIjogImNpbmRlciJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4NzczL3NlcnZpY2VzL0FkbWluIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzMvc2VydmljZXMvQ2xvdWQiLCAiaWQiOiAiMDMzZjY3ZTk1MDBjNDljYThmOGIxODkzZTJhN2VkYWYiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4NzczL3NlcnZpY2VzL0Nsb3VkIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImVjMiIsICJuYW1lIjogIm5vdmFfZWMyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDQvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODAwNC92MS80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCIsICJpZCI6ICI0YmViNjQ0MjUzYWU0NzdmOWU5NDk2ZWVkZDEwOTNhNSIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDQvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAib3JjaGVzdHJhdGlvbiIsICJuYW1lIjogImhlYXQifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC8iLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC92MS9BVVRIXzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgImlkIjogIjNhMTA2MzU0MjYxMDQzMjk5YTVkMjQ3ZTVmMjU5NGQyIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC92MS9BVVRIXzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogIm9iamVjdC1zdG9yZSIsICJuYW1lIjogInN3aWZ0In0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjM1MzU3L3YyLjAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6NTAwMC92Mi4wIiwgImlkIjogIjVjNGVlN2MzMTE4NDQyNGM5NDJhMWM1MjgxODU3MmZiIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6NTAwMC92Mi4wIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImlkZW50aXR5IiwgIm5hbWUiOiAia2V5c3RvbmUifV0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJhZG1pbiIsICJyb2xlc19saW5rcyI6IFtdLCAiaWQiOiAiOTFkNzMyYjY1ODMxNDkxZDhiZDk1MmIzMTExZTYyZGQiLCAicm9sZXMiOiBbeyJuYW1lIjogImhlYXRfc3RhY2tfb3duZXIifSwgeyJuYW1lIjogIl9tZW1iZXJfIn0sIHsibmFtZSI6ICJhZG1pbiJ9XSwgIm5hbWUiOiAiYWRtaW4ifSwgIm1ldGFkYXRhIjogeyJpc19hZG1pbiI6IDAsICJyb2xlcyI6IFsiZDlmZGVlODI1NjE3NGJlNWE3MmFjZGZmNDNkM2VkZDMiLCAiOWZlMmZmOWVlNDM4NGIxODk0YTkwODc4ZDNlOTJiYWIiLCAiN2E1ZTg5MmFiYTE5NDI3NWI3ZjQxZWM4Njg2ZDUwOGYiXX19fTGCAYEwggF9AgEBMFwwVzELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBVVuc2V0MQ4wDAYDVQQHDAVVbnNldDEOMAwGA1UECgwFVW5zZXQxGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbQIBATAHBgUrDgMCGjANBgkqhkiG9w0BAQEFAASCAQBkwVlwVgYM+mCIXICViGPgW+AZ--Y3NfWjW92GTBqW4keVrPosYxz--b2SVSGqwOHI1xFPqIx1+fzBCcilE5rIuJ3gxAc2VEWl4whMkriqWo6M8YY+GxGJ07h1NZ3Jc9Mrk7RTWPwU9YPilWPSU9sRx4bv+y7XpL8EIEvi+0dvHKgGI+nvqEYVFIf1vYQN5bvSnAgC1rZ9oB0M4Pg1wd47xQcenZL+XOWb8uxUReAvT-lfjXav7DhwUzPgmlY2XpN+9yfhAXAFF0GkokwjncvC5YTILOa41eMUg8ip47+rijNpQ2FuxVpRhQ-xL9it8+vAYkGLqe7eaQylsf0Nu6JJ', 'remote_address': '172.21.7.40', 'quota_class': None, 'is_admin': True, 'tenant': u'4537aca4a4a4462fa4c59ad5b5581f00', 'service_catalog': [{u'endpoints_links': [], u'endpoints': [{u'adminURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00', u'region': u'RegionOne', u'publicURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00', u'id': u'0a1c8da4f156495aad2130e2c0691982', u'internalURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00'}], u'type': u'volume', u'name': u'cinder'}], 'request_id': 'req-c0439276-3600-49cb-8de5-680b3f7d735c', 'instance_lock_checked': False, 'project_id': u'4537aca4a4a4462fa4c59ad5b5581f00', 'user_name': u'admin', 'read_deleted': 'no', 'user': u'91d732b65831491d8bd952b3111e62dd'}\n extra = {'action': 'xdrs:get_algorithms', 'exc': <class 'xdrs.exception.PolicyNotAuthorized'>}\n ======================================================================================\n \"\"\"\n return policy.check(action, target, credentials, **extra)", "def test_must_be_associated(self):\n\n def handle(event):\n return 0x0000, event.action_information\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ProceduralEventLogging)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_ACTION, handle)]\n )\n\n ae.add_requested_context(ProceduralEventLogging)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n assoc.release()\n assert assoc.is_released\n assert not assoc.is_established\n with pytest.raises(RuntimeError):\n assoc.send_n_action(None, None, None, None)\n\n scp.shutdown()", "def has_action2(self, feature):\n return feature in self._action2", "def test_zsk_policy_no_bundle_overlap(self):\n signature_algorithm = self._make_signature_algorithm()\n request_policy = f\"\"\"\n <RequestPolicy>\n <ZSK>\n <PublishSafety>P10D</PublishSafety>\n <RetireSafety>P10D</RetireSafety>\n <MaxSignatureValidity>P21D</MaxSignatureValidity>\n <MinSignatureValidity>P21D</MinSignatureValidity>\n <MaxValidityOverlap>P12D</MaxValidityOverlap>\n <MinValidityOverlap>P9D</MinValidityOverlap>\n {signature_algorithm}\n </ZSK>\n </RequestPolicy>\n \"\"\"\n\n bundle1, bundle2, = self._get_two_bundles()\n xml = self._make_request(\n request_policy=request_policy, bundle1=bundle1, bundle2=bundle2\n )\n request = request_from_xml(xml)\n policy = replace(\n self.policy,\n check_bundle_intervals=False, # want to test against ZSK policy, not KSK policy\n check_cycle_length=False, # want to test against ZSK policy, not KSK policy\n )\n with self.assertRaises(KSR_POLICY_SIG_OVERLAP_Violation) as exc:\n validate_request(request, policy)\n self.assertEqual(\n 'Bundle \"test-2\" does not overlap with previous bundle \"test-1\" (2019-02-01 00:00:00+00:00 > '\n \"2019-01-22 00:00:00+00:00)\",\n str(exc.exception),\n )", "def test_other_iam_data_fixes_in_GH_393(self):\n # Cassandra: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonkeyspacesforapachecassandra.html\n results = get_actions_for_service(\"cassandra\")\n self.assertTrue(\"cassandra:Restore\" in results)\n # Comprehend Medical: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncomprehendmedical.html\n results = get_actions_for_service(\"comprehendmedical\")\n # print(results)\n actions = [\n \"comprehendmedical:DescribeEntitiesDetectionV2Job\",\n \"comprehendmedical:DescribeICD10CMInferenceJob\",\n \"comprehendmedical:DescribePHIDetectionJob\",\n \"comprehendmedical:DescribeRxNormInferenceJob\",\n # \"comprehendmedical:DescribeSNOMEDCTInferenceJob\", # Not in SAR\n \"comprehendmedical:DetectEntitiesV2\",\n \"comprehendmedical:InferICD10CM\",\n \"comprehendmedical:InferRxNorm\",\n # \"comprehendmedical:InferSNOMEDCT\", # Not in SAR\n \"comprehendmedical:ListEntitiesDetectionV2Jobs\",\n \"comprehendmedical:ListICD10CMInferenceJobs\",\n \"comprehendmedical:ListPHIDetectionJobs\",\n \"comprehendmedical:ListRxNormInferenceJobs\",\n # \"comprehendmedical:ListSNOMEDCTInferenceJobs\", # Not in SAR\n \"comprehendmedical:StartEntitiesDetectionV2Job\",\n \"comprehendmedical:StartICD10CMInferenceJob\",\n \"comprehendmedical:StartPHIDetectionJob\",\n \"comprehendmedical:StartRxNormInferenceJob\",\n \"comprehendmedical:StopEntitiesDetectionV2Job\",\n \"comprehendmedical:StopICD10CMInferenceJob\",\n ]\n for action in actions:\n # if action not in results:\n # print(action)\n self.assertTrue(action in results)\n # Compute Optimizer\n results = get_actions_for_service(\"compute-optimizer\")\n actions = [\n \"compute-optimizer:DeleteRecommendationPreferences\",\n \"compute-optimizer:ExportEBSVolumeRecommendations\",\n \"compute-optimizer:ExportLambdaFunctionRecommendations\",\n \"compute-optimizer:GetEffectiveRecommendationPreferences\",\n \"compute-optimizer:GetEnrollmentStatusesForOrganization\",\n \"compute-optimizer:GetLambdaFunctionRecommendations\",\n \"compute-optimizer:GetRecommendationPreferences\",\n \"compute-optimizer:PutRecommendationPreferences\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # DataSync\n results = get_actions_for_service(\"datasync\")\n actions = [\n \"datasync:UpdateLocationNfs\",\n \"datasync:UpdateLocationObjectStorage\",\n \"datasync:UpdateLocationSmb\",\n \"datasync:UpdateTaskExecution\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Account Management\n results = get_actions_for_service(\"account\")\n actions = [\n \"account:DeleteAlternateContact\",\n \"account:GetAlternateContact\",\n \"account:PutAlternateContact\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS IAM Access Analyzer\n results = get_actions_for_service(\"access-analyzer\")\n actions = [\n \"access-analyzer:CancelPolicyGeneration\",\n \"access-analyzer:CreateAccessPreview\",\n \"access-analyzer:GetAccessPreview\",\n \"access-analyzer:GetGeneratedPolicy\",\n \"access-analyzer:ListAccessPreviewFindings\",\n \"access-analyzer:ListAccessPreviews\",\n \"access-analyzer:ListPolicyGenerations\",\n \"access-analyzer:StartPolicyGeneration\",\n \"access-analyzer:ValidatePolicy\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Elemental Activations\n results = get_actions_for_service(\"elemental-activations\")\n actions = [\n \"elemental-activations:CompleteAccountRegistration\",\n \"elemental-activations:StartAccountRegistration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # OpenSearch\n results = get_actions_for_service(\"es\")\n actions = [\n \"es:DescribeDomainChangeProgress\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Location\n results = get_actions_for_service(\"geo\")\n actions = [\n \"geo:CalculateRouteMatrix\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Amazon Managed Grafana\n results = get_actions_for_service(\"grafana\")\n actions = [\n \"grafana:DescribeWorkspaceAuthentication\",\n \"grafana:UpdateWorkspaceAuthentication\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # EC2 Image Builder\n results = get_actions_for_service(\"imagebuilder\")\n actions = [\n \"imagebuilder:ImportVmImage\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Timestream\n results = get_actions_for_service(\"timestream\")\n actions = [\n \"timestream:CreateScheduledQuery\",\n \"timestream:DeleteScheduledQuery\",\n \"timestream:DescribeScheduledQuery\",\n \"timestream:ExecuteScheduledQuery\",\n \"timestream:ListScheduledQueries\",\n \"timestream:UpdateScheduledQuery\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS Transfer Family\n results = get_actions_for_service(\"transfer\")\n actions = [\n \"transfer:CreateAccess\",\n \"transfer:CreateWorkflow\",\n \"transfer:DeleteAccess\",\n \"transfer:DeleteWorkflow\",\n \"transfer:DescribeAccess\",\n \"transfer:DescribeExecution\",\n \"transfer:DescribeWorkflow\",\n \"transfer:ListAccesses\",\n \"transfer:ListExecutions\",\n \"transfer:ListWorkflows\",\n \"transfer:SendWorkflowStepState\",\n \"transfer:UpdateAccess\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_acknowledge_hmac_validation_failed(client):\n res = client.get(\n \"/v0/acknowledge?fp=splunk_82998ef6bb3db9dff3dsfdsfsdc\" \"&t=97244b15a21f45e002b2e913866ff7545510f9b08dea5241f\"\n )\n assert res.status == \"500 INTERNAL SERVER ERROR\"", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_lifecycle_progression_action_spaces(self):\n pass", "def test_services_with_multiple_pages_kinesis_analytics(self):\n # Kinesis Analytics V1\n results = get_actions_for_service(\"kinesisanalytics\")\n actions = [\n \"kinesisanalytics:GetApplicationState\", # Only in v1, not v2\n \"kinesisanalytics:ListApplications\", # In both\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_by_project_and_version_responder_spaces(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action_spaces(self):\n pass", "def test_services_with_multiple_pages_greengrass(self):\n # Greengrass V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrass.html\n self.assertTrue(\"greengrass:CreateResourceDefinition\" in self.all_actions)\n # Greengrass V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrassv2.html\n self.assertTrue(\"greengrass:CreateComponentVersion\" in self.all_actions)\n results = get_actions_for_service(\"greengrass\")\n actions = [\n \"greengrass:AssociateRoleToGroup\",\n \"greengrass:CreateConnectorDefinition\",\n \"greengrass:CreateConnectorDefinitionVersion\",\n \"greengrass:CreateCoreDefinition\",\n \"greengrass:CreateCoreDefinitionVersion\",\n \"greengrass:CreateDeviceDefinition\",\n \"greengrass:CreateDeviceDefinitionVersion\",\n \"greengrass:CreateFunctionDefinition\",\n \"greengrass:CreateFunctionDefinitionVersion\",\n \"greengrass:CreateGroup\",\n \"greengrass:CreateGroupCertificateAuthority\",\n \"greengrass:CreateGroupVersion\",\n \"greengrass:CreateLoggerDefinition\",\n \"greengrass:CreateLoggerDefinitionVersion\",\n \"greengrass:CreateResourceDefinition\",\n \"greengrass:CreateResourceDefinitionVersion\",\n \"greengrass:CreateSoftwareUpdateJob\",\n \"greengrass:CreateSubscriptionDefinition\",\n \"greengrass:CreateSubscriptionDefinitionVersion\",\n \"greengrass:DeleteConnectorDefinition\",\n \"greengrass:DeleteCoreDefinition\",\n \"greengrass:DeleteDeviceDefinition\",\n \"greengrass:DeleteFunctionDefinition\",\n \"greengrass:DeleteGroup\",\n \"greengrass:DeleteLoggerDefinition\",\n \"greengrass:DeleteResourceDefinition\",\n \"greengrass:DeleteSubscriptionDefinition\",\n \"greengrass:DisassociateRoleFromGroup\",\n \"greengrass:Discover\",\n \"greengrass:GetAssociatedRole\",\n \"greengrass:GetBulkDeploymentStatus\",\n \"greengrass:GetConnectorDefinition\",\n \"greengrass:GetConnectorDefinitionVersion\",\n \"greengrass:GetCoreDefinition\",\n \"greengrass:GetCoreDefinitionVersion\",\n \"greengrass:GetDeploymentStatus\",\n \"greengrass:GetDeviceDefinition\",\n \"greengrass:GetDeviceDefinitionVersion\",\n \"greengrass:GetFunctionDefinition\",\n \"greengrass:GetFunctionDefinitionVersion\",\n \"greengrass:GetGroup\",\n \"greengrass:GetGroupCertificateAuthority\",\n \"greengrass:GetGroupCertificateConfiguration\",\n \"greengrass:GetGroupVersion\",\n \"greengrass:GetLoggerDefinition\",\n \"greengrass:GetLoggerDefinitionVersion\",\n \"greengrass:GetResourceDefinition\",\n \"greengrass:GetResourceDefinitionVersion\",\n \"greengrass:GetSubscriptionDefinition\",\n \"greengrass:GetSubscriptionDefinitionVersion\",\n \"greengrass:GetThingRuntimeConfiguration\",\n \"greengrass:ListBulkDeploymentDetailedReports\",\n \"greengrass:ListBulkDeployments\",\n \"greengrass:ListConnectorDefinitionVersions\",\n \"greengrass:ListConnectorDefinitions\",\n \"greengrass:ListCoreDefinitionVersions\",\n \"greengrass:ListCoreDefinitions\",\n \"greengrass:ListDeviceDefinitionVersions\",\n \"greengrass:ListDeviceDefinitions\",\n \"greengrass:ListFunctionDefinitionVersions\",\n \"greengrass:ListFunctionDefinitions\",\n \"greengrass:ListGroupCertificateAuthorities\",\n \"greengrass:ListGroupVersions\",\n \"greengrass:ListGroups\",\n \"greengrass:ListLoggerDefinitionVersions\",\n \"greengrass:ListLoggerDefinitions\",\n \"greengrass:ListResourceDefinitionVersions\",\n \"greengrass:ListResourceDefinitions\",\n \"greengrass:ListSubscriptionDefinitionVersions\",\n \"greengrass:ListSubscriptionDefinitions\",\n \"greengrass:ResetDeployments\",\n \"greengrass:StartBulkDeployment\",\n \"greengrass:StopBulkDeployment\",\n \"greengrass:UpdateConnectorDefinition\",\n \"greengrass:UpdateCoreDefinition\",\n \"greengrass:UpdateDeviceDefinition\",\n \"greengrass:UpdateFunctionDefinition\",\n \"greengrass:UpdateGroup\",\n \"greengrass:UpdateGroupCertificateConfiguration\",\n \"greengrass:UpdateLoggerDefinition\",\n \"greengrass:UpdateResourceDefinition\",\n \"greengrass:UpdateSubscriptionDefinition\",\n \"greengrass:UpdateThingRuntimeConfiguration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # if action not in results:\n # print(action)", "def test_ingress_returns_envelope_unchanged():\n plugin_instance = PluginVipCustomisation()\n assert plugin_instance.ingress('envelope', 'http_headers', 'operation') == ('envelope', 'http_headers')" ]
[ "0.59183055", "0.57958144", "0.5341116", "0.51459986", "0.5056637", "0.5025083", "0.49718618", "0.49709255", "0.49639255", "0.4939532", "0.48318732", "0.47402886", "0.47391623", "0.47184974", "0.4717983", "0.47155645", "0.47113252", "0.4703302", "0.46926475", "0.46769983", "0.46675658", "0.46649936", "0.46610305", "0.46535316", "0.4649807", "0.4649175", "0.46461895", "0.46326602", "0.46007538", "0.45988023" ]
0.5989848
0
Ensure that awsmarketplace actions from all the different awsmarketplace SAR pages are present in the IAM definition.
def test_services_with_multiple_pages_aws_marketplace(self): # Overlap: AWS Marketplace, Marketplace Catalog, and AWS Marketplace Entitlement service, AWS Marketplace Image Building Service, AWS Marketplace Metering Service, AWS Marketplace Private Marketplace, and AWS Marketplace Procurement Systems # AWS Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplace.html self.assertTrue("aws-marketplace:AcceptAgreementApprovalRequest" in self.all_actions) # AWS Marketplace Catalog: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacecatalog.html self.assertTrue("aws-marketplace:CancelChangeSet" in self.all_actions) # AWS Marketplace Entitlement Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceentitlementservice.html self.assertTrue("aws-marketplace:GetEntitlements" in self.all_actions) # AWS Marketplace Image Building Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceimagebuildingservice.html self.assertTrue("aws-marketplace:DescribeBuilds" in self.all_actions) # AWS Marketplace Metering Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacemeteringservice.html self.assertTrue("aws-marketplace:BatchMeterUsage" in self.all_actions) # AWS Marketplace Private Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprivatemarketplace.html self.assertTrue("aws-marketplace:AssociateProductsWithPrivateMarketplace" in self.all_actions) # AWS Marketplace Procurement Systems: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprocurementsystemsintegration.html self.assertTrue("aws-marketplace:DescribeProcurementSystemConfiguration" in self.all_actions) results = get_actions_for_service("aws-marketplace") actions = [ "aws-marketplace:AcceptAgreementApprovalRequest", "aws-marketplace:BatchMeterUsage", "aws-marketplace:CancelAgreementRequest", "aws-marketplace:CancelChangeSet", "aws-marketplace:CompleteTask", "aws-marketplace:DescribeAgreement", "aws-marketplace:DescribeBuilds", "aws-marketplace:DescribeChangeSet", "aws-marketplace:DescribeEntity", "aws-marketplace:DescribeProcurementSystemConfiguration", "aws-marketplace:DescribeTask", "aws-marketplace:GetAgreementApprovalRequest", "aws-marketplace:GetAgreementRequest", "aws-marketplace:GetAgreementTerms", "aws-marketplace:GetEntitlements", "aws-marketplace:ListAgreementApprovalRequests", "aws-marketplace:ListAgreementRequests", "aws-marketplace:ListBuilds", "aws-marketplace:ListChangeSets", "aws-marketplace:ListEntities", "aws-marketplace:ListTasks", "aws-marketplace:MeterUsage", "aws-marketplace:PutProcurementSystemConfiguration", "aws-marketplace:RegisterUsage", "aws-marketplace:RejectAgreementApprovalRequest", "aws-marketplace:ResolveCustomer", "aws-marketplace:SearchAgreements", "aws-marketplace:StartBuild", "aws-marketplace:StartChangeSet", "aws-marketplace:Subscribe", "aws-marketplace:Unsubscribe", "aws-marketplace:UpdateAgreementApprovalRequest", "aws-marketplace:UpdateTask", "aws-marketplace:ViewSubscriptions", ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_actions_with_arn_type_and_access_level_case_3(self):\n desired_output = [\n 's3:PutAccountPublicAccessBlock',\n 's3:PutAccessPointPublicAccessBlock'\n ]\n output = get_actions_with_arn_type_and_access_level(\n # \"ram\", \"resource-share\", \"Write\"\n \"s3\", \"*\", \"Permissions management\"\n )\n print(output)\n for item in desired_output:\n self.assertTrue(item in output)\n # self.assertListEqual(desired_output, output)", "def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)", "def test_services_with_multiple_pages_apigateway(self):\n # API Gateway Management V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html\n self.assertTrue(\"apigateway:AddCertificateToDomain\" in self.all_actions)\n self.assertTrue(\"apigateway:RemoveCertificateFromDomain\" in self.all_actions)\n self.assertTrue(\"apigateway:SetWebACL\" in self.all_actions)\n # API Gateway Management V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html\n # API Gateway V2 doesn't have any unique actions in but it does have some unique resource types. Let's make sure those resource types are in the IAM Definition.\n # Resource types unique to API Gateway V2:\n resource_types = get_arn_types_for_service(\"apigateway\")\n resource_types = list(resource_types.keys())\n self.assertTrue(\"AccessLogSettings\" in resource_types)\n # Resource types unique to API Gateway V1:\n self.assertTrue(\"RestApi\" in resource_types)", "def test_other_iam_data_fixes_in_GH_393(self):\n # Cassandra: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonkeyspacesforapachecassandra.html\n results = get_actions_for_service(\"cassandra\")\n self.assertTrue(\"cassandra:Restore\" in results)\n # Comprehend Medical: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncomprehendmedical.html\n results = get_actions_for_service(\"comprehendmedical\")\n # print(results)\n actions = [\n \"comprehendmedical:DescribeEntitiesDetectionV2Job\",\n \"comprehendmedical:DescribeICD10CMInferenceJob\",\n \"comprehendmedical:DescribePHIDetectionJob\",\n \"comprehendmedical:DescribeRxNormInferenceJob\",\n # \"comprehendmedical:DescribeSNOMEDCTInferenceJob\", # Not in SAR\n \"comprehendmedical:DetectEntitiesV2\",\n \"comprehendmedical:InferICD10CM\",\n \"comprehendmedical:InferRxNorm\",\n # \"comprehendmedical:InferSNOMEDCT\", # Not in SAR\n \"comprehendmedical:ListEntitiesDetectionV2Jobs\",\n \"comprehendmedical:ListICD10CMInferenceJobs\",\n \"comprehendmedical:ListPHIDetectionJobs\",\n \"comprehendmedical:ListRxNormInferenceJobs\",\n # \"comprehendmedical:ListSNOMEDCTInferenceJobs\", # Not in SAR\n \"comprehendmedical:StartEntitiesDetectionV2Job\",\n \"comprehendmedical:StartICD10CMInferenceJob\",\n \"comprehendmedical:StartPHIDetectionJob\",\n \"comprehendmedical:StartRxNormInferenceJob\",\n \"comprehendmedical:StopEntitiesDetectionV2Job\",\n \"comprehendmedical:StopICD10CMInferenceJob\",\n ]\n for action in actions:\n # if action not in results:\n # print(action)\n self.assertTrue(action in results)\n # Compute Optimizer\n results = get_actions_for_service(\"compute-optimizer\")\n actions = [\n \"compute-optimizer:DeleteRecommendationPreferences\",\n \"compute-optimizer:ExportEBSVolumeRecommendations\",\n \"compute-optimizer:ExportLambdaFunctionRecommendations\",\n \"compute-optimizer:GetEffectiveRecommendationPreferences\",\n \"compute-optimizer:GetEnrollmentStatusesForOrganization\",\n \"compute-optimizer:GetLambdaFunctionRecommendations\",\n \"compute-optimizer:GetRecommendationPreferences\",\n \"compute-optimizer:PutRecommendationPreferences\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # DataSync\n results = get_actions_for_service(\"datasync\")\n actions = [\n \"datasync:UpdateLocationNfs\",\n \"datasync:UpdateLocationObjectStorage\",\n \"datasync:UpdateLocationSmb\",\n \"datasync:UpdateTaskExecution\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Account Management\n results = get_actions_for_service(\"account\")\n actions = [\n \"account:DeleteAlternateContact\",\n \"account:GetAlternateContact\",\n \"account:PutAlternateContact\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS IAM Access Analyzer\n results = get_actions_for_service(\"access-analyzer\")\n actions = [\n \"access-analyzer:CancelPolicyGeneration\",\n \"access-analyzer:CreateAccessPreview\",\n \"access-analyzer:GetAccessPreview\",\n \"access-analyzer:GetGeneratedPolicy\",\n \"access-analyzer:ListAccessPreviewFindings\",\n \"access-analyzer:ListAccessPreviews\",\n \"access-analyzer:ListPolicyGenerations\",\n \"access-analyzer:StartPolicyGeneration\",\n \"access-analyzer:ValidatePolicy\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Elemental Activations\n results = get_actions_for_service(\"elemental-activations\")\n actions = [\n \"elemental-activations:CompleteAccountRegistration\",\n \"elemental-activations:StartAccountRegistration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # OpenSearch\n results = get_actions_for_service(\"es\")\n actions = [\n \"es:DescribeDomainChangeProgress\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Location\n results = get_actions_for_service(\"geo\")\n actions = [\n \"geo:CalculateRouteMatrix\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Amazon Managed Grafana\n results = get_actions_for_service(\"grafana\")\n actions = [\n \"grafana:DescribeWorkspaceAuthentication\",\n \"grafana:UpdateWorkspaceAuthentication\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # EC2 Image Builder\n results = get_actions_for_service(\"imagebuilder\")\n actions = [\n \"imagebuilder:ImportVmImage\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Timestream\n results = get_actions_for_service(\"timestream\")\n actions = [\n \"timestream:CreateScheduledQuery\",\n \"timestream:DeleteScheduledQuery\",\n \"timestream:DescribeScheduledQuery\",\n \"timestream:ExecuteScheduledQuery\",\n \"timestream:ListScheduledQueries\",\n \"timestream:UpdateScheduledQuery\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS Transfer Family\n results = get_actions_for_service(\"transfer\")\n actions = [\n \"transfer:CreateAccess\",\n \"transfer:CreateWorkflow\",\n \"transfer:DeleteAccess\",\n \"transfer:DeleteWorkflow\",\n \"transfer:DescribeAccess\",\n \"transfer:DescribeExecution\",\n \"transfer:DescribeWorkflow\",\n \"transfer:ListAccesses\",\n \"transfer:ListExecutions\",\n \"transfer:ListWorkflows\",\n \"transfer:SendWorkflowStepState\",\n \"transfer:UpdateAccess\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_get_actions_with_arn_type_and_access_level_case_5(self):\n\n output = get_actions_with_arn_type_and_access_level(\n \"s3\", \"object\", \"List\"\n )\n self.assertTrue(\"s3:ListMultipartUploadParts\" in output)", "def test_get_actions_with_arn_type_and_access_level_case_2(self):\n desired_output = [\n 'ssm:DeleteParameter',\n 'ssm:DeleteParameters',\n 'ssm:LabelParameterVersion',\n 'ssm:PutParameter'\n]\n output = get_actions_with_arn_type_and_access_level(\n \"ssm\", \"parameter\", \"Write\"\n )\n for item in desired_output:\n self.assertTrue(item in output)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action_spaces(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_template_action_spaces(self):\n pass", "def legal_actions(self):\n raise NotImplementedError", "def get_legal_actions(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action_spaces(self):\n pass", "def test_actions(self, actions):\n try:\n for action in actions:\n self.get_action(action['type'])(**action)\n except Exception as e:\n print('Exception: {}'.format(str(e)))", "def test_get_actions_with_arn_type_and_access_level_case_4(self):\n desired_output = [\n 'secretsmanager:ListSecrets'\n ]\n output = get_actions_with_arn_type_and_access_level(\n \"secretsmanager\", \"*\", \"List\"\n )\n self.assertListEqual(desired_output, output)", "def _get_legal_actions(self):\n raise NotImplementedError", "def decide_place(self, action):\n pass", "def action_space(self, curr_state):\n # Action space - allowed (position, value) combinations for the agent and environment given the current state\n\n agent_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0]))\n env_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1]))\n return (agent_actions, env_actions)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_preview_action_spaces(self):\n pass", "def valid_actions(self) -> List[str]:\n return list(self.action_map().keys())", "def test_services_with_multiple_pages_kinesis_analytics(self):\n # Kinesis Analytics V1\n results = get_actions_for_service(\"kinesisanalytics\")\n actions = [\n \"kinesisanalytics:GetApplicationState\", # Only in v1, not v2\n \"kinesisanalytics:ListApplications\", # In both\n ]\n for action in actions:\n self.assertTrue(action in results)", "def action_space(self, curr_state):\n\n agent_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0])\n env_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1])\n return (agent_actions, env_actions)", "def action_space(self, curr_state):\n\n agent_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0])\n env_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1])\n return (agent_actions, env_actions)", "def get_actions(self, request):\n return super(OrganizationAdmin, self).get_actions(request)", "def get_legal_actions(self, block_=None):\n return self._get_move_actions(block_) + self._get_mine_actions() + \\\n self._get_placement_actions(block_)", "def get_action_meanings(self) -> list[str]:\n keys = ale_py.Action.__members__.values()\n values = ale_py.Action.__members__.keys()\n mapping = dict(zip(keys, values))\n return [mapping[action] for action in self._action_set]", "def _get_placement_actions(self, exclude=None):\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({\n 'func': '_place',\n 'args': (pos,),\n 'kwargs': {'exclude': exclude}\n })\n\n return rtn", "def test_autocomplete_locations_urls(self):\n r = self.base_check_request(\"get\", \"autocomplete/locations/\")\n self.assertIsInstance(r, list)\n self.assertEqual(len(r), 10, \"Invalid default count\")\n\n ac_keys = ['ancestors', 'id', 'is_region', 'name', 'prepositional_name',\n 'slug', 'text_for_apartments_search',\n 'text_for_complexes_search', 'type_name']\n # ac_keys_full = ac_keys + [\"metro_stations\"]\n for ac in r:\n # check response objects structure\n self.assertListEqual(sorted(list(ac.keys())), ac_keys)\n\n # check response types\n # self.check_list_item_keys(ac[\"ancestors\"], ac_keys_full)\n self.assertIsInstance(ac['id'], int)\n self.assertIsInstance(ac['is_region'], bool)\n self.assertIsInstance(ac['name'], str)\n self.assertIsInstance(ac['prepositional_name'], str)\n self.assertIsInstance(ac['slug'], str)\n self.assertIsInstance(ac['text_for_apartments_search'], (str, type(None)))\n self.assertIsInstance(ac['text_for_complexes_search'], (str, type(None)))\n self.assertIsInstance(ac['type_name'], str)", "def test_intent_support(self):\n dispatcher = self.get_dispatcher()\n for intent in self.get_intents():\n self.assertIsNot(dispatcher(intent), None)", "def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # -------------------------------\n # S3 Bucket for Manifests\n # -------------------------------\n\n qs_gov_bucket = s3.Bucket(\n self,\n id=f\"{cf.PROJECT}-ManifestBucket\",\n )\n bucket_name = qs_gov_bucket.bucket_name\n\n # -------------------------------\n # IAM\n # -------------------------------\n\n list_roles_policy = iam.ManagedPolicy(\n self,\n id=f\"{cf.PROJECT}-ListRolesPolicy\",\n description=None,\n managed_policy_name=None,\n path=\"/\",\n statements=[\n iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n resources=[\"*\"],\n actions=[\"iam:ListRoles\", \"iam:ListAccountAliases\"],\n )\n ],\n )\n\n federated_quicksight_policy = iam.ManagedPolicy(\n self,\n id=f\"{cf.PROJECT}-FederatedQuickSightPolicy\",\n managed_policy_name=f\"{cf.PROJECT}-FederatedQuickSightPolicy\",\n statements=[\n iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n resources=[\n f\"arn:aws:iam::{cf.ACCOUNT}:saml-provider/{cf.OKTA_IDP_NAME}\"\n ],\n actions=[\"sts:AssumeRoleWithSAML\"],\n conditions={\n \"StringEquals\": {\n \"saml:aud\": \"https://signin.aws.amazon.com/saml\"\n }\n },\n )\n ],\n )\n\n okta_federated_principal = iam.FederatedPrincipal(\n federated=f\"arn:aws:iam::{cf.ACCOUNT}:saml-provider/{cf.OKTA_IDP_NAME}\",\n assume_role_action=\"sts:AssumeRoleWithSAML\",\n conditions={\n \"StringEquals\": {\"SAML:aud\": \"https://signin.aws.amazon.com/saml\"}\n },\n )\n\n federated_quicksight_role = iam.Role(\n self,\n id=f\"{cf.PROJECT}-{cf.OKTA_ROLE_NAME}\",\n role_name=f\"{cf.PROJECT}-{cf.OKTA_ROLE_NAME}\",\n assumed_by=okta_federated_principal,\n description=\"Allow Okta to Federate Login & User Creation to QuickSight\",\n managed_policies=[federated_quicksight_policy],\n )\n\n\n iam.User(\n self,\n id=f\"{cf.PROJECT}-OktaSSOUser\",\n user_name=f\"{cf.PROJECT}-OktaSSOUser\",\n managed_policies=[list_roles_policy],\n )\n\n\n # -------------------------------\n # Lambda Functions\n # -------------------------------\n\n # iam role for Lambdas\n\n qs_governance_policy = iam.ManagedPolicy(\n self,\n id=f\"{cf.PROJECT}-QuickSightGovernancePolicy\",\n managed_policy_name=f\"{cf.PROJECT}-QuickSightGovernancePolicy\",\n statements=[\n iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n resources=[\n f\"arn:aws:secretsmanager:{cf.REGION}:{cf.ACCOUNT}:secret:{cf.OKTA_SECRET}*\"\n ],\n actions=[\n \"secretsmanager:GetSecretValue\",\n ],\n ),\n iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n resources=[\"*\"],\n actions=[\"quicksight:*\", \"ds:*\"],\n ),\n iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n resources=[f\"arn:aws:s3:::{bucket_name}/*\"],\n actions=[\"s3:Get*\", \"s3:Put*\"],\n ),\n ],\n )\n\n quicksight_permission_mapping_role = iam.Role(\n self,\n id=f\"{cf.PROJECT}-QuickSightPermissionMappingRole\",\n assumed_by=iam.ServicePrincipal(\"lambda.amazonaws.com\"),\n managed_policies=[\n iam.ManagedPolicy.from_aws_managed_policy_name(\n \"service-role/AWSLambdaBasicExecutionRole\"\n ),\n qs_governance_policy,\n ],\n )\n\n # Lambdas\n\n get_okta_info_lambda = _lambda.Function(\n self,\n id=f\"{cf.PROJECT}-GetOktaInfo\",\n handler=\"get_okta_info.handler\",\n role=quicksight_permission_mapping_role,\n runtime=_lambda.Runtime.PYTHON_3_8,\n code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, \"pkg\")),\n function_name=f\"{cf.PROJECT}-GetOktaInfo\",\n environment={\n \"OKTA_SECRET\": cf.OKTA_SECRET,\n \"OKTA_ROLE_NAME\": cf.OKTA_ROLE_NAME,\n \"QS_GOVERNANCE_BUCKET\": bucket_name,\n \"QS_USER_GOVERNANCE_KEY\": cf.QS_USER_GOVERNANCE_KEY,\n },\n memory_size=256,\n timeout=core.Duration.seconds(180),\n )\n\n # Lamda Okta to QuickSight Mappers\n\n qs_user_governance_lambda = _lambda.Function(\n self,\n id=f\"{cf.PROJECT}-QSUserGovernance\",\n handler=\"qs_user_gov.handler\",\n role=quicksight_permission_mapping_role,\n runtime=_lambda.Runtime.PYTHON_3_8,\n code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, \"pkg\")),\n function_name=f\"{cf.PROJECT}-QSUserGovernance\",\n environment={\n \"OKTA_ROLE_NAME\": f\"{cf.PROJECT}-{cf.OKTA_ROLE_NAME}\",\n \"QS_GOVERNANCE_BUCKET\": bucket_name,\n \"QS_USER_GOVERNANCE_KEY\": cf.QS_USER_GOVERNANCE_KEY,\n \"OKTA_GROUP_QS_PREFIX\": cf.OKTA_GROUP_QS_PREFIX,\n \"QS_ADMIN_OKTA_GROUP\": cf.QS_ADMIN_OKTA_GROUP,\n \"QS_AUTHOR_OKTA_GROUP\": cf.QS_AUTHOR_OKTA_GROUP,\n \"QS_READER_OKTA_GROUP\": cf.QS_READER_OKTA_GROUP\n },\n memory_size=256,\n timeout=core.Duration.seconds(180),\n )\n\n qs_asset_governance_lambda = _lambda.Function(\n self,\n id=f\"{cf.PROJECT}-QSAssetGovernance\",\n handler=\"qs_asset_gov.handler\",\n role=quicksight_permission_mapping_role,\n runtime=_lambda.Runtime.PYTHON_3_8,\n code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, \"pkg\")),\n function_name=f\"{cf.PROJECT}-QSAssetGovernance\",\n environment={\n \"QS_GOVERNANCE_BUCKET\": bucket_name,\n \"QS_ASSET_GOVERNANCE_KEY\": cf.QS_ASSET_GOVERNANCE_KEY,\n },\n memory_size=256,\n timeout=core.Duration.seconds(180),\n )\n\n # -------------------------------\n # Events\n # -------------------------------\n\n qs_user_governance_lambda.add_event_source(\n lambda_event_sources.S3EventSource(\n bucket=qs_gov_bucket,\n events=[s3.EventType.OBJECT_CREATED],\n filters=[s3.NotificationKeyFilter(prefix=cf.QS_USER_GOVERNANCE_KEY)],\n )\n )\n\n qs_asset_governance_lambda.add_event_source(\n lambda_event_sources.S3EventSource(\n bucket=qs_gov_bucket,\n events=[s3.EventType.OBJECT_CREATED],\n filters=[s3.NotificationKeyFilter(prefix=cf.QS_ASSET_GOVERNANCE_KEY)],\n )\n )\n\n lambda_schedule = events.Schedule.rate(core.Duration.days(1))\n get_okta_info_target = events_targets.LambdaFunction(\n handler=get_okta_info_lambda\n )\n events.Rule(\n self,\n id=f\"{cf.PROJECT}-GetOktaInfoScheduledEvent\",\n description=\"The once per day CloudWatch event trigger for the Lambda\",\n enabled=True,\n schedule=lambda_schedule,\n targets=[get_okta_info_target],\n )\n\n # -------------------------------\n # S3 Object Deployment - QS Asset Manifest\n # -------------------------------\n\n asset_manifest_deploy = s3_deploy.BucketDeployment(\n self,\n id=f\"{cf.PROJECT}-AssetManifestDeploy\",\n sources=[s3_deploy.Source.asset(\n os.path.join(cf.PATH_ROOT, 'qs_config')\n )],\n destination_bucket=qs_gov_bucket\n )", "def get_actions(self, request):\n actions = super().get_actions(request)\n if not settings.PUBLISHER_CODE:\n del actions['create_cwr']\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions", "def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"" ]
[ "0.58062184", "0.57554114", "0.5695702", "0.5673701", "0.5447731", "0.5414008", "0.53181934", "0.5090683", "0.5057209", "0.49688128", "0.49623215", "0.49491638", "0.49443293", "0.49354288", "0.4900723", "0.48934472", "0.48312107", "0.48288488", "0.47950754", "0.4768656", "0.4768656", "0.47588596", "0.47535774", "0.46872535", "0.46716824", "0.46657667", "0.46551266", "0.4650297", "0.46221343", "0.46129328" ]
0.66148233
0
Ensure that greengrass v1 and greengrass v2 actions are both present in the greengrass namespace
def test_services_with_multiple_pages_greengrass(self): # Greengrass V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrass.html self.assertTrue("greengrass:CreateResourceDefinition" in self.all_actions) # Greengrass V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrassv2.html self.assertTrue("greengrass:CreateComponentVersion" in self.all_actions) results = get_actions_for_service("greengrass") actions = [ "greengrass:AssociateRoleToGroup", "greengrass:CreateConnectorDefinition", "greengrass:CreateConnectorDefinitionVersion", "greengrass:CreateCoreDefinition", "greengrass:CreateCoreDefinitionVersion", "greengrass:CreateDeviceDefinition", "greengrass:CreateDeviceDefinitionVersion", "greengrass:CreateFunctionDefinition", "greengrass:CreateFunctionDefinitionVersion", "greengrass:CreateGroup", "greengrass:CreateGroupCertificateAuthority", "greengrass:CreateGroupVersion", "greengrass:CreateLoggerDefinition", "greengrass:CreateLoggerDefinitionVersion", "greengrass:CreateResourceDefinition", "greengrass:CreateResourceDefinitionVersion", "greengrass:CreateSoftwareUpdateJob", "greengrass:CreateSubscriptionDefinition", "greengrass:CreateSubscriptionDefinitionVersion", "greengrass:DeleteConnectorDefinition", "greengrass:DeleteCoreDefinition", "greengrass:DeleteDeviceDefinition", "greengrass:DeleteFunctionDefinition", "greengrass:DeleteGroup", "greengrass:DeleteLoggerDefinition", "greengrass:DeleteResourceDefinition", "greengrass:DeleteSubscriptionDefinition", "greengrass:DisassociateRoleFromGroup", "greengrass:Discover", "greengrass:GetAssociatedRole", "greengrass:GetBulkDeploymentStatus", "greengrass:GetConnectorDefinition", "greengrass:GetConnectorDefinitionVersion", "greengrass:GetCoreDefinition", "greengrass:GetCoreDefinitionVersion", "greengrass:GetDeploymentStatus", "greengrass:GetDeviceDefinition", "greengrass:GetDeviceDefinitionVersion", "greengrass:GetFunctionDefinition", "greengrass:GetFunctionDefinitionVersion", "greengrass:GetGroup", "greengrass:GetGroupCertificateAuthority", "greengrass:GetGroupCertificateConfiguration", "greengrass:GetGroupVersion", "greengrass:GetLoggerDefinition", "greengrass:GetLoggerDefinitionVersion", "greengrass:GetResourceDefinition", "greengrass:GetResourceDefinitionVersion", "greengrass:GetSubscriptionDefinition", "greengrass:GetSubscriptionDefinitionVersion", "greengrass:GetThingRuntimeConfiguration", "greengrass:ListBulkDeploymentDetailedReports", "greengrass:ListBulkDeployments", "greengrass:ListConnectorDefinitionVersions", "greengrass:ListConnectorDefinitions", "greengrass:ListCoreDefinitionVersions", "greengrass:ListCoreDefinitions", "greengrass:ListDeviceDefinitionVersions", "greengrass:ListDeviceDefinitions", "greengrass:ListFunctionDefinitionVersions", "greengrass:ListFunctionDefinitions", "greengrass:ListGroupCertificateAuthorities", "greengrass:ListGroupVersions", "greengrass:ListGroups", "greengrass:ListLoggerDefinitionVersions", "greengrass:ListLoggerDefinitions", "greengrass:ListResourceDefinitionVersions", "greengrass:ListResourceDefinitions", "greengrass:ListSubscriptionDefinitionVersions", "greengrass:ListSubscriptionDefinitions", "greengrass:ResetDeployments", "greengrass:StartBulkDeployment", "greengrass:StopBulkDeployment", "greengrass:UpdateConnectorDefinition", "greengrass:UpdateCoreDefinition", "greengrass:UpdateDeviceDefinition", "greengrass:UpdateFunctionDefinition", "greengrass:UpdateGroup", "greengrass:UpdateGroupCertificateConfiguration", "greengrass:UpdateLoggerDefinition", "greengrass:UpdateResourceDefinition", "greengrass:UpdateSubscriptionDefinition", "greengrass:UpdateThingRuntimeConfiguration" ] for action in actions: self.assertTrue(action in results) # if action not in results: # print(action)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)", "def has_action2(self, feature):\n return feature in self._action2", "def greengrass_v2(self) -> Optional[pulumi.Input['GatewayGreengrassV2Args']]:\n return pulumi.get(self, \"greengrass_v2\")", "def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"", "def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)", "def verifyActionCenterRts():\n pass", "def test_10_unsupported_actions(self):\n\n def __count_pulled_packages(pth):\n self.pkgrepo(\"list -F tsv -H -s {0}\".format(pth))\n return len(self.output.splitlines())\n\n def __check_errout(pfmri):\n s1 = \"invalid action in package {0}\".format(pfmri)\n s2 = \"Malformed action in package '{0}'\".format(pfmri)\n self.assert_(s1 in self.errout or s2 in self.errout,\n \"{0} not in error\".format(pfmri))\n\n def __empty_repo(uri, arg_string):\n if uri.startswith(\"http://\"):\n rurl = self.dcs[4].get_repo_url()\n self.pkgrepo(\"remove -s {0} '*'\".format(rurl))\n # Refresh the depot to get it to realize that\n # the catalog has changed.\n self.dcs[4].refresh()\n elif arg_string:\n portable.remove(uri)\n else:\n self.pkgrepo(\"remove -s {0} '*'\".format(uri))\n\n\n def __test_rec(duri, arg_string, pfmris):\n self.debug(\"\\n\\nNow pkgrecv'ing to {0}\".format(duri))\n\n # It's necessary to use the -D option below because\n # otherwise pkgrecv will fail because the manifest\n # doesn't validate.\n\n novalidate = \"-D manifest_validate=Never \"\n # Check that invalid action attributes don't cause\n # tracebacks.\n self.pkgrecv(self.durl1, novalidate +\n \"-d {0} {1} {2}\".format(duri, arg_string,\n \" \".join(pfmris)), exit=pkgdefs.EXIT_OOPS)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri), 0)\n if arg_string:\n portable.remove(duri)\n\n self.pkgrecv(self.rurl1, novalidate +\n \"-d {0} {1} {2}\".format(duri, arg_string,\n \" \".join(pfmris)), exit=pkgdefs.EXIT_OOPS)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri), 0)\n if arg_string:\n portable.remove(duri)\n\n # Check that other packages are retrieved and the exit\n # code reflects partial success.\n self.pkgrecv(self.durl1, novalidate +\n \"-d {0} {1} -m all-timestamps '*'\".format(\n duri, arg_string), exit=pkgdefs.EXIT_PARTIAL)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri),\n len(self.published) - len(pfmris))\n __empty_repo(duri, arg_string)\n\n self.pkgrecv(self.rurl1, novalidate +\n \"-d {0} {1} -m all-timestamps '*'\".format(\n duri, arg_string), exit=pkgdefs.EXIT_PARTIAL)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri),\n len(self.published) - len(pfmris))\n __empty_repo(duri, arg_string)\n\n self.rurl1 = self.dcs[1].get_repo_url()\n repo = self.dcs[1].get_repo()\n rd = repo.get_pub_rstore()\n pfmri = fmri.PkgFmri(self.published[4])\n mp = rd.manifest(pfmri)\n\n with open(mp, \"rb\") as fh:\n original_txt = fh.read()\n txt = original_txt.replace(\"type=require\", \"type=foo\")\n with open(mp, \"wb\") as fh:\n fh.write(txt)\n\n rpth = tempfile.mkdtemp(dir=self.test_root)\n self.pkgrepo(\"create {0}\".format(rpth))\n adir = tempfile.mkdtemp(dir=self.test_root)\n\n # The __empty repo function above assumes that the only http uri\n # used is the one for depot number 4.\n dest_uris = ((rpth, \"\"), (self.durl4, \"\"),\n (os.path.join(adir, \"archive.p5p\"), \"-a\"))\n for duri, arg_string in dest_uris:\n __test_rec(duri, arg_string, [self.published[4]])\n\n # Test that multiple packages failing are handled correctly.\n for i in range(5, 7):\n pfmri = fmri.PkgFmri(self.published[i])\n mp = rd.manifest(pfmri)\n with open(mp, \"rb\") as fh:\n original_txt = fh.read()\n txt = \"foop\\n\" + original_txt\n with open(mp, \"wb\") as fh:\n fh.write(txt)\n\n for duri, arg_string, in dest_uris:\n __test_rec(duri, arg_string, self.published[4:7])", "def check_type(self):\n if self.action < 0 or self.action >= len(_action_args_dict):\n raise GameActionError('Invalid action type ({0})'.format(self.action))", "def test_other_iam_data_fixes_in_GH_393(self):\n # Cassandra: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonkeyspacesforapachecassandra.html\n results = get_actions_for_service(\"cassandra\")\n self.assertTrue(\"cassandra:Restore\" in results)\n # Comprehend Medical: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncomprehendmedical.html\n results = get_actions_for_service(\"comprehendmedical\")\n # print(results)\n actions = [\n \"comprehendmedical:DescribeEntitiesDetectionV2Job\",\n \"comprehendmedical:DescribeICD10CMInferenceJob\",\n \"comprehendmedical:DescribePHIDetectionJob\",\n \"comprehendmedical:DescribeRxNormInferenceJob\",\n # \"comprehendmedical:DescribeSNOMEDCTInferenceJob\", # Not in SAR\n \"comprehendmedical:DetectEntitiesV2\",\n \"comprehendmedical:InferICD10CM\",\n \"comprehendmedical:InferRxNorm\",\n # \"comprehendmedical:InferSNOMEDCT\", # Not in SAR\n \"comprehendmedical:ListEntitiesDetectionV2Jobs\",\n \"comprehendmedical:ListICD10CMInferenceJobs\",\n \"comprehendmedical:ListPHIDetectionJobs\",\n \"comprehendmedical:ListRxNormInferenceJobs\",\n # \"comprehendmedical:ListSNOMEDCTInferenceJobs\", # Not in SAR\n \"comprehendmedical:StartEntitiesDetectionV2Job\",\n \"comprehendmedical:StartICD10CMInferenceJob\",\n \"comprehendmedical:StartPHIDetectionJob\",\n \"comprehendmedical:StartRxNormInferenceJob\",\n \"comprehendmedical:StopEntitiesDetectionV2Job\",\n \"comprehendmedical:StopICD10CMInferenceJob\",\n ]\n for action in actions:\n # if action not in results:\n # print(action)\n self.assertTrue(action in results)\n # Compute Optimizer\n results = get_actions_for_service(\"compute-optimizer\")\n actions = [\n \"compute-optimizer:DeleteRecommendationPreferences\",\n \"compute-optimizer:ExportEBSVolumeRecommendations\",\n \"compute-optimizer:ExportLambdaFunctionRecommendations\",\n \"compute-optimizer:GetEffectiveRecommendationPreferences\",\n \"compute-optimizer:GetEnrollmentStatusesForOrganization\",\n \"compute-optimizer:GetLambdaFunctionRecommendations\",\n \"compute-optimizer:GetRecommendationPreferences\",\n \"compute-optimizer:PutRecommendationPreferences\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # DataSync\n results = get_actions_for_service(\"datasync\")\n actions = [\n \"datasync:UpdateLocationNfs\",\n \"datasync:UpdateLocationObjectStorage\",\n \"datasync:UpdateLocationSmb\",\n \"datasync:UpdateTaskExecution\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Account Management\n results = get_actions_for_service(\"account\")\n actions = [\n \"account:DeleteAlternateContact\",\n \"account:GetAlternateContact\",\n \"account:PutAlternateContact\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS IAM Access Analyzer\n results = get_actions_for_service(\"access-analyzer\")\n actions = [\n \"access-analyzer:CancelPolicyGeneration\",\n \"access-analyzer:CreateAccessPreview\",\n \"access-analyzer:GetAccessPreview\",\n \"access-analyzer:GetGeneratedPolicy\",\n \"access-analyzer:ListAccessPreviewFindings\",\n \"access-analyzer:ListAccessPreviews\",\n \"access-analyzer:ListPolicyGenerations\",\n \"access-analyzer:StartPolicyGeneration\",\n \"access-analyzer:ValidatePolicy\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Elemental Activations\n results = get_actions_for_service(\"elemental-activations\")\n actions = [\n \"elemental-activations:CompleteAccountRegistration\",\n \"elemental-activations:StartAccountRegistration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # OpenSearch\n results = get_actions_for_service(\"es\")\n actions = [\n \"es:DescribeDomainChangeProgress\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Location\n results = get_actions_for_service(\"geo\")\n actions = [\n \"geo:CalculateRouteMatrix\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Amazon Managed Grafana\n results = get_actions_for_service(\"grafana\")\n actions = [\n \"grafana:DescribeWorkspaceAuthentication\",\n \"grafana:UpdateWorkspaceAuthentication\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # EC2 Image Builder\n results = get_actions_for_service(\"imagebuilder\")\n actions = [\n \"imagebuilder:ImportVmImage\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Timestream\n results = get_actions_for_service(\"timestream\")\n actions = [\n \"timestream:CreateScheduledQuery\",\n \"timestream:DeleteScheduledQuery\",\n \"timestream:DescribeScheduledQuery\",\n \"timestream:ExecuteScheduledQuery\",\n \"timestream:ListScheduledQueries\",\n \"timestream:UpdateScheduledQuery\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS Transfer Family\n results = get_actions_for_service(\"transfer\")\n actions = [\n \"transfer:CreateAccess\",\n \"transfer:CreateWorkflow\",\n \"transfer:DeleteAccess\",\n \"transfer:DeleteWorkflow\",\n \"transfer:DescribeAccess\",\n \"transfer:DescribeExecution\",\n \"transfer:DescribeWorkflow\",\n \"transfer:ListAccesses\",\n \"transfer:ListExecutions\",\n \"transfer:ListWorkflows\",\n \"transfer:SendWorkflowStepState\",\n \"transfer:UpdateAccess\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_by_project_and_version_responder_spaces(self):\n pass", "def check(self):\n # get the data from shotgun\n app = self.parent.app\n context = app.context\n # get asset type\n filters = [[\"id\", \"is\", context.entity[\"id\"]]]\n fields = [\"sg_asset_type\"]\n assetType = app.shotgun.find_one(\n \"Asset\", filters=filters, fields=fields)[\"sg_asset_type\"]\n # get step short name\n filters = [[\"id\", \"is\", context.step[\"id\"]]]\n fields = [\"short_name\"]\n stepShortName = app.shotgun.find_one(\n \"Step\", filters=filters, fields=fields)[\"short_name\"]\n\n try:\n assetNode = gNodes.getTopGNode()\n except:\n assetNode = None\n\n if assetNode:\n metadataCode = assetNode.grid_code.get()\n metadataAssetType = assetNode.grid_type.get(asString=True)\n metadataPipeStep = assetNode.grid_pipeStep.get(asString=True)\n if not (assetType == metadataAssetType and\n stepShortName == metadataPipeStep and\n context.entity[\"name\"] == metadataCode):\n self.status = self.errorMode\n self.addError(\"Context and asset node metadata don't match\")\n self.errorMessage = \"Context and asset node metadata don't match\"\n else:\n self.status = \"OK\"\n else:\n self.status = \"OK\"", "def __init__(__self__, *,\n greengrass: Optional[pulumi.Input['GatewayGreengrassArgs']] = None,\n greengrass_v2: Optional[pulumi.Input['GatewayGreengrassV2Args']] = None):\n if greengrass is not None:\n pulumi.set(__self__, \"greengrass\", greengrass)\n if greengrass_v2 is not None:\n pulumi.set(__self__, \"greengrass_v2\", greengrass_v2)", "def test_lti20_rest_good_dispatch(self):\r\n for ginput, expected in self.GOOD_DISPATCH_INPUTS:\r\n self.assertEquals(self.xmodule.parse_lti_2_0_handler_suffix(ginput), expected)", "def check_if_can_evolve(self):\n # This sounds similar to generate actions\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_lifecycle_progression_action_spaces(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_by_project_and_version_responder(self):\n pass", "def is_valid_git_action(action):\n\n return action in GIT_ACTIONS", "def different_actions(old_action: PersistentAction, new_action: PersistentAction) -> bool:\n if Invocation.different_required(old_action.required, new_action.required):\n return True\n\n if old_action.command != new_action.command:\n if old_action.command is None:\n old_action_kind = \"a phony command\"\n else:\n old_action_kind = \"the command: \" + \" \".join(old_action.command)\n\n if new_action.command is None:\n new_action_kind = \"a phony command\"\n else:\n new_action_kind = \"the command: \" + \" \".join(new_action.command)\n\n Logger.why(f\"Must run actions because changed {old_action_kind} \" f\"into {new_action_kind}\")\n return True\n\n return False", "def test_mr_green_genes(self):\n self.validate_goal_for('game-20130104-075510-644e1cc8.html',\n u'cyncat',\n 'MrGreenGenes')", "def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)", "def legal_actions(self):\n raise NotImplementedError", "def test_request_with_two_bundles(self):\n xml = self._make_request()\n request = request_from_xml(xml)\n self.assertTrue(validate_request(request, self.policy))", "def test_subscriber_access_if_vsg2_goes_down(self):", "def actions():\n pass", "def test_subscriber_access_for_two_vsg_services(self):", "def check_global_request(self, kind, msg):\n return False", "def check(self):\r\n for action in self._actions:\r\n action.check()", "def test_get_actions_with_arn_type_and_access_level_case_2(self):\n desired_output = [\n 'ssm:DeleteParameter',\n 'ssm:DeleteParameters',\n 'ssm:LabelParameterVersion',\n 'ssm:PutParameter'\n]\n output = get_actions_with_arn_type_and_access_level(\n \"ssm\", \"parameter\", \"Write\"\n )\n for item in desired_output:\n self.assertTrue(item in output)", "def check_stability(self):", "def test_action_independence_multiple(self):\n DST1, DST2 = ('SET_FIELD', ('IPV4_DST', 0x1)), ('SET_FIELD', ('IPV4_DST', 0x2))\n SRC1, SRC2 = ('SET_FIELD', ('IPV4_SRC', 0x1)), ('SET_FIELD', ('IPV4_SRC', 0x2))\n OUT1, OUT2 = ('OUTPUT', 1), ('OUTPUT', 2)\n n1 = normalise([\n Rule(priority=10,\n match=Match([('IPV4_DST', 0x0, 0xFFFFFFFE)]),\n instructions=inst_from_acts([DST1, SRC2, OUT1, DST2, SRC1, OUT2])),\n Rule(priority=0)\n ], match_redundancy=True)\n \"\"\"\n dst:1, src:2 -> output:1, dst:2, src:1, output:2\n dst:0/31 -> dst:1, src:2, output:1, dst:2, src:1, output:2\n \"\"\"\n n2 = normalise([\n Rule(priority=10,\n match=Match([('IPV4_DST', 1, None),\n ('IPV4_SRC', 2, None)]),\n instructions=inst_from_acts([OUT1, DST2, SRC1, OUT2])),\n Rule(priority=9,\n match=Match([('IPV4_DST', 0x0, 0xFFFFFFFE)]),\n instructions=inst_from_acts([DST1, SRC2, OUT1, DST2, SRC1, OUT2])),\n Rule(priority=0)\n ], match_redundancy=True)\n \"\"\"\n dst:1 -> src:2, output:1, dst:2, src:1, output:2\n dst:0/31 -> dst:1, src:2, output:1, dst:2, src:1, output:2\n \"\"\"\n n3 = normalise([\n Rule(priority=10,\n match=Match([('IPV4_DST', 1, None)]),\n instructions=inst_from_acts([SRC2, OUT1, DST2, SRC1, OUT2])),\n Rule(priority=9,\n match=Match([('IPV4_DST', 0x0, 0xFFFFFFFE)]),\n instructions=inst_from_acts([DST1, SRC2, OUT1, DST2, SRC1, OUT2])),\n Rule(priority=0)\n ], match_redundancy=True)\n\n self.assertTrue(check_equal(n1, n2))\n self.assertTrue(check_equal(n2, n3))\n self.assertTrue(check_equal(n1, n3))" ]
[ "0.62013024", "0.5185163", "0.51849663", "0.50805354", "0.50188154", "0.49678308", "0.49544063", "0.49272656", "0.49225903", "0.49052584", "0.4849131", "0.48472935", "0.47611517", "0.4737386", "0.47336262", "0.46869755", "0.46836528", "0.4678428", "0.46226096", "0.46140948", "0.4605737", "0.46050668", "0.45999947", "0.4576194", "0.45543835", "0.45410335", "0.45342276", "0.45339355", "0.4508958", "0.44931182" ]
0.6528508
0
Ensure that elb v1 and elb v2 actions are both present in the elasticloadbalancing namespace
def test_services_with_multiple_pages_elb(self): results = get_actions_for_service("elasticloadbalancing") actions = [ "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", "elasticloadbalancing:AttachLoadBalancerToSubnets", "elasticloadbalancing:ConfigureHealthCheck", "elasticloadbalancing:CreateAppCookieStickinessPolicy", "elasticloadbalancing:CreateLBCookieStickinessPolicy", "elasticloadbalancing:CreateLoadBalancerListeners", "elasticloadbalancing:CreateLoadBalancerPolicy", "elasticloadbalancing:DeleteLoadBalancerListeners", "elasticloadbalancing:DeleteLoadBalancerPolicy", "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", "elasticloadbalancing:DescribeInstanceHealth", "elasticloadbalancing:DescribeLoadBalancerPolicies", "elasticloadbalancing:DescribeLoadBalancerPolicyTypes", "elasticloadbalancing:DetachLoadBalancerFromSubnets", "elasticloadbalancing:DisableAvailabilityZonesForLoadBalancer", "elasticloadbalancing:EnableAvailabilityZonesForLoadBalancer", "elasticloadbalancing:RegisterInstancesWithLoadBalancer", "elasticloadbalancing:SetLoadBalancerListenerSSLCertificate", "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)", "def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)", "def test_request_with_two_bundles(self):\n xml = self._make_request()\n request = request_from_xml(xml)\n self.assertTrue(validate_request(request, self.policy))", "def test_match_endpoints():\n\n service_names = [\n \"iap-ingress-kfctl-8c9b.endpoints.kubeflow-ci-deployment.cloud.goog\",\n ]\n\n for s in service_names:\n assert cleanup_ci.is_match(s, patterns=cleanup_ci.E2E_PATTERNS)", "def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"", "def test_box_actions_out_of_bound(env: gym.Env):\n env.reset(seed=42)\n\n oob_env = gym.make(env.spec.id, disable_env_checker=True)\n oob_env.reset(seed=42)\n\n assert isinstance(env.action_space, spaces.Box)\n dtype = env.action_space.dtype\n upper_bounds = env.action_space.high\n lower_bounds = env.action_space.low\n\n for i, (is_upper_bound, is_lower_bound) in enumerate(\n zip(env.action_space.bounded_above, env.action_space.bounded_below)\n ):\n if is_upper_bound:\n obs, _, _, _, _ = env.step(upper_bounds)\n oob_action = upper_bounds.copy()\n oob_action[i] += np.cast[dtype](OOB_VALUE)\n\n assert oob_action[i] > upper_bounds[i]\n oob_obs, _, _, _, _ = oob_env.step(oob_action)\n\n assert np.alltrue(obs == oob_obs)\n\n if is_lower_bound:\n obs, _, _, _, _ = env.step(\n lower_bounds\n ) # `env` is unwrapped, and in new step API\n oob_action = lower_bounds.copy()\n oob_action[i] -= np.cast[dtype](OOB_VALUE)\n\n assert oob_action[i] < lower_bounds[i]\n oob_obs, _, _, _, _ = oob_env.step(oob_action)\n\n assert np.alltrue(obs == oob_obs)\n\n env.close()", "def test_other_iam_data_fixes_in_GH_393(self):\n # Cassandra: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonkeyspacesforapachecassandra.html\n results = get_actions_for_service(\"cassandra\")\n self.assertTrue(\"cassandra:Restore\" in results)\n # Comprehend Medical: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncomprehendmedical.html\n results = get_actions_for_service(\"comprehendmedical\")\n # print(results)\n actions = [\n \"comprehendmedical:DescribeEntitiesDetectionV2Job\",\n \"comprehendmedical:DescribeICD10CMInferenceJob\",\n \"comprehendmedical:DescribePHIDetectionJob\",\n \"comprehendmedical:DescribeRxNormInferenceJob\",\n # \"comprehendmedical:DescribeSNOMEDCTInferenceJob\", # Not in SAR\n \"comprehendmedical:DetectEntitiesV2\",\n \"comprehendmedical:InferICD10CM\",\n \"comprehendmedical:InferRxNorm\",\n # \"comprehendmedical:InferSNOMEDCT\", # Not in SAR\n \"comprehendmedical:ListEntitiesDetectionV2Jobs\",\n \"comprehendmedical:ListICD10CMInferenceJobs\",\n \"comprehendmedical:ListPHIDetectionJobs\",\n \"comprehendmedical:ListRxNormInferenceJobs\",\n # \"comprehendmedical:ListSNOMEDCTInferenceJobs\", # Not in SAR\n \"comprehendmedical:StartEntitiesDetectionV2Job\",\n \"comprehendmedical:StartICD10CMInferenceJob\",\n \"comprehendmedical:StartPHIDetectionJob\",\n \"comprehendmedical:StartRxNormInferenceJob\",\n \"comprehendmedical:StopEntitiesDetectionV2Job\",\n \"comprehendmedical:StopICD10CMInferenceJob\",\n ]\n for action in actions:\n # if action not in results:\n # print(action)\n self.assertTrue(action in results)\n # Compute Optimizer\n results = get_actions_for_service(\"compute-optimizer\")\n actions = [\n \"compute-optimizer:DeleteRecommendationPreferences\",\n \"compute-optimizer:ExportEBSVolumeRecommendations\",\n \"compute-optimizer:ExportLambdaFunctionRecommendations\",\n \"compute-optimizer:GetEffectiveRecommendationPreferences\",\n \"compute-optimizer:GetEnrollmentStatusesForOrganization\",\n \"compute-optimizer:GetLambdaFunctionRecommendations\",\n \"compute-optimizer:GetRecommendationPreferences\",\n \"compute-optimizer:PutRecommendationPreferences\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # DataSync\n results = get_actions_for_service(\"datasync\")\n actions = [\n \"datasync:UpdateLocationNfs\",\n \"datasync:UpdateLocationObjectStorage\",\n \"datasync:UpdateLocationSmb\",\n \"datasync:UpdateTaskExecution\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Account Management\n results = get_actions_for_service(\"account\")\n actions = [\n \"account:DeleteAlternateContact\",\n \"account:GetAlternateContact\",\n \"account:PutAlternateContact\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS IAM Access Analyzer\n results = get_actions_for_service(\"access-analyzer\")\n actions = [\n \"access-analyzer:CancelPolicyGeneration\",\n \"access-analyzer:CreateAccessPreview\",\n \"access-analyzer:GetAccessPreview\",\n \"access-analyzer:GetGeneratedPolicy\",\n \"access-analyzer:ListAccessPreviewFindings\",\n \"access-analyzer:ListAccessPreviews\",\n \"access-analyzer:ListPolicyGenerations\",\n \"access-analyzer:StartPolicyGeneration\",\n \"access-analyzer:ValidatePolicy\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Elemental Activations\n results = get_actions_for_service(\"elemental-activations\")\n actions = [\n \"elemental-activations:CompleteAccountRegistration\",\n \"elemental-activations:StartAccountRegistration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # OpenSearch\n results = get_actions_for_service(\"es\")\n actions = [\n \"es:DescribeDomainChangeProgress\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Location\n results = get_actions_for_service(\"geo\")\n actions = [\n \"geo:CalculateRouteMatrix\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Amazon Managed Grafana\n results = get_actions_for_service(\"grafana\")\n actions = [\n \"grafana:DescribeWorkspaceAuthentication\",\n \"grafana:UpdateWorkspaceAuthentication\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # EC2 Image Builder\n results = get_actions_for_service(\"imagebuilder\")\n actions = [\n \"imagebuilder:ImportVmImage\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Timestream\n results = get_actions_for_service(\"timestream\")\n actions = [\n \"timestream:CreateScheduledQuery\",\n \"timestream:DeleteScheduledQuery\",\n \"timestream:DescribeScheduledQuery\",\n \"timestream:ExecuteScheduledQuery\",\n \"timestream:ListScheduledQueries\",\n \"timestream:UpdateScheduledQuery\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS Transfer Family\n results = get_actions_for_service(\"transfer\")\n actions = [\n \"transfer:CreateAccess\",\n \"transfer:CreateWorkflow\",\n \"transfer:DeleteAccess\",\n \"transfer:DeleteWorkflow\",\n \"transfer:DescribeAccess\",\n \"transfer:DescribeExecution\",\n \"transfer:DescribeWorkflow\",\n \"transfer:ListAccesses\",\n \"transfer:ListExecutions\",\n \"transfer:ListWorkflows\",\n \"transfer:SendWorkflowStepState\",\n \"transfer:UpdateAccess\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def check_deployment(version_stack_name, app_name):\n\n print(\"Polling Target Group ({}) until a successful state is reached...\".format(version_stack_name))\n elbv2 = boto3.client('elbv2')\n waiter = elbv2.get_waiter('target_in_service')\n cloudformation = boto3.client('cloudformation')\n response = cloudformation.describe_stack_resources(\n StackName=version_stack_name,\n LogicalResourceId='ALBTargetGroup'\n )\n target_group = response['StackResources'][0]['PhysicalResourceId']\n start_time = datetime.datetime.now()\n try:\n waiter.wait(TargetGroupArn=target_group)\n except botocore.exceptions.WaiterError:\n print('Health check did not pass!')\n response = cloudformation.describe_stack_resources(\n StackName=version_stack_name,\n LogicalResourceId='ECSService'\n )\n service = response['StackResources'][0]['PhysicalResourceId']\n print('Outputting events for service {}:'.format(service))\n response = cloudformation.describe_stack_resources(\n StackName=\"ECS-{}\".format(app_name),\n LogicalResourceId='ECSCluster'\n )\n cluster = response['StackResources'][0]['PhysicalResourceId']\n ecs = boto3.client('ecs')\n response = ecs.describe_services(\n cluster=cluster,\n services=[service]\n )\n for event in [x['message'] for x in response['services'][0]['events']]:\n print(event)\n# print('Deleting CloudFormation stack...')\n# response = cloudformation.delete_stack(\n# StackName=\"MV-{realm}-{app_name}-{version}-{env}\".format(env=os.environ['ENV'], app_name=os.environ['ECS_APP_NAME'], version=os.environ['BUILD_VERSION'], realm=os.environ['REALM'])\n# )\n# waiter = cf.get_waiter('stack_delete_complete')\n# waiter.wait(\n# StackName=\"MV-{realm}-{app_name}-{version}-{env}\".format(env=os.environ['ENV'], app_name=os.environ['ECS_APP_NAME'], version=os.environ['BUILD_VERSION'], realm=os.environ['REALM'])\n# )\n# print('CloudFormation stack deleted.')\n elapsed_time = datetime.datetime.now() - start_time\n print('Health check passed in {}'.format(elapsed_time))\n print(\"Done.\")", "def test_redeploy_edges(self):\n pass", "def test_action_independence_multiple(self):\n DST1, DST2 = ('SET_FIELD', ('IPV4_DST', 0x1)), ('SET_FIELD', ('IPV4_DST', 0x2))\n SRC1, SRC2 = ('SET_FIELD', ('IPV4_SRC', 0x1)), ('SET_FIELD', ('IPV4_SRC', 0x2))\n OUT1, OUT2 = ('OUTPUT', 1), ('OUTPUT', 2)\n n1 = normalise([\n Rule(priority=10,\n match=Match([('IPV4_DST', 0x0, 0xFFFFFFFE)]),\n instructions=inst_from_acts([DST1, SRC2, OUT1, DST2, SRC1, OUT2])),\n Rule(priority=0)\n ], match_redundancy=True)\n \"\"\"\n dst:1, src:2 -> output:1, dst:2, src:1, output:2\n dst:0/31 -> dst:1, src:2, output:1, dst:2, src:1, output:2\n \"\"\"\n n2 = normalise([\n Rule(priority=10,\n match=Match([('IPV4_DST', 1, None),\n ('IPV4_SRC', 2, None)]),\n instructions=inst_from_acts([OUT1, DST2, SRC1, OUT2])),\n Rule(priority=9,\n match=Match([('IPV4_DST', 0x0, 0xFFFFFFFE)]),\n instructions=inst_from_acts([DST1, SRC2, OUT1, DST2, SRC1, OUT2])),\n Rule(priority=0)\n ], match_redundancy=True)\n \"\"\"\n dst:1 -> src:2, output:1, dst:2, src:1, output:2\n dst:0/31 -> dst:1, src:2, output:1, dst:2, src:1, output:2\n \"\"\"\n n3 = normalise([\n Rule(priority=10,\n match=Match([('IPV4_DST', 1, None)]),\n instructions=inst_from_acts([SRC2, OUT1, DST2, SRC1, OUT2])),\n Rule(priority=9,\n match=Match([('IPV4_DST', 0x0, 0xFFFFFFFE)]),\n instructions=inst_from_acts([DST1, SRC2, OUT1, DST2, SRC1, OUT2])),\n Rule(priority=0)\n ], match_redundancy=True)\n\n self.assertTrue(check_equal(n1, n2))\n self.assertTrue(check_equal(n2, n3))\n self.assertTrue(check_equal(n1, n3))", "def validate_availability_zones(self, context, resource_type,\n availability_zones):", "def has_action2(self, feature):\n return feature in self._action2", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_by_project_and_version_responder_spaces(self):\n pass", "def verifyActionCenterRts():\n pass", "def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)", "def test_services_with_multiple_pages_aws_marketplace(self):\n # Overlap: AWS Marketplace, Marketplace Catalog, and AWS Marketplace Entitlement service, AWS Marketplace Image Building Service, AWS Marketplace Metering Service, AWS Marketplace Private Marketplace, and AWS Marketplace Procurement Systems\n # AWS Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplace.html\n self.assertTrue(\"aws-marketplace:AcceptAgreementApprovalRequest\" in self.all_actions)\n # AWS Marketplace Catalog: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacecatalog.html\n self.assertTrue(\"aws-marketplace:CancelChangeSet\" in self.all_actions)\n # AWS Marketplace Entitlement Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceentitlementservice.html\n self.assertTrue(\"aws-marketplace:GetEntitlements\" in self.all_actions)\n # AWS Marketplace Image Building Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceimagebuildingservice.html\n self.assertTrue(\"aws-marketplace:DescribeBuilds\" in self.all_actions)\n # AWS Marketplace Metering Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacemeteringservice.html\n self.assertTrue(\"aws-marketplace:BatchMeterUsage\" in self.all_actions)\n # AWS Marketplace Private Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprivatemarketplace.html\n self.assertTrue(\"aws-marketplace:AssociateProductsWithPrivateMarketplace\" in self.all_actions)\n # AWS Marketplace Procurement Systems: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprocurementsystemsintegration.html\n self.assertTrue(\"aws-marketplace:DescribeProcurementSystemConfiguration\" in self.all_actions)\n\n results = get_actions_for_service(\"aws-marketplace\")\n actions = [\n \"aws-marketplace:AcceptAgreementApprovalRequest\",\n \"aws-marketplace:BatchMeterUsage\",\n \"aws-marketplace:CancelAgreementRequest\",\n \"aws-marketplace:CancelChangeSet\",\n \"aws-marketplace:CompleteTask\",\n \"aws-marketplace:DescribeAgreement\",\n \"aws-marketplace:DescribeBuilds\",\n \"aws-marketplace:DescribeChangeSet\",\n \"aws-marketplace:DescribeEntity\",\n \"aws-marketplace:DescribeProcurementSystemConfiguration\",\n \"aws-marketplace:DescribeTask\",\n \"aws-marketplace:GetAgreementApprovalRequest\",\n \"aws-marketplace:GetAgreementRequest\",\n \"aws-marketplace:GetAgreementTerms\",\n \"aws-marketplace:GetEntitlements\",\n \"aws-marketplace:ListAgreementApprovalRequests\",\n \"aws-marketplace:ListAgreementRequests\",\n \"aws-marketplace:ListBuilds\",\n \"aws-marketplace:ListChangeSets\",\n \"aws-marketplace:ListEntities\",\n \"aws-marketplace:ListTasks\",\n \"aws-marketplace:MeterUsage\",\n \"aws-marketplace:PutProcurementSystemConfiguration\",\n \"aws-marketplace:RegisterUsage\",\n \"aws-marketplace:RejectAgreementApprovalRequest\",\n \"aws-marketplace:ResolveCustomer\",\n \"aws-marketplace:SearchAgreements\",\n \"aws-marketplace:StartBuild\",\n \"aws-marketplace:StartChangeSet\",\n \"aws-marketplace:Subscribe\",\n \"aws-marketplace:Unsubscribe\",\n \"aws-marketplace:UpdateAgreementApprovalRequest\",\n \"aws-marketplace:UpdateTask\",\n \"aws-marketplace:ViewSubscriptions\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def check_eapi(self, eapi):\n\t\treturn True", "def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg2'))", "def test_add_or_update_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'tag1': 'value1', 'pre-existing-1': 'modified'}}\n ],\n })\n p.run()\n\n # verify modified tags\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'tag1': 'value1', 'pre-existing-1': 'modified', 'pre-existing-2': 'unmodified'})", "def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))", "def check(self):\n illegalNamespaces = list()\n\n prog = re.compile(\"^[A-Z]{4}[0-9]{2}_[0-9]{3}:$\")\n\n for assetNode in pm.ls(type=\"gAsset\"):\n if assetNode.isReferenced() and not prog.match(assetNode.namespace()):\n illegalNamespaces.append(assetNode)\n\n if not illegalNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = illegalNamespaces\n for illegalNamespace in illegalNamespaces:\n self.addError(\"%s has a illegal namespace\" % illegalNamespace)\n self.errorMessage = \"%s asset(s) have a illegal namespace\" % (\n len(illegalNamespaces))", "def test_create_role_binding_restriction_for_all_namespaces(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_lifecycle_progression_action_spaces(self):\n pass", "def test_action_independence_single(self):\n SF1, OUT = ('SET_FIELD', ('IPV4_DST', 0x01010101)), ('OUTPUT', 6)\n DEC_TTL = ('DEC_NW_TTL', None)\n # 0.1.1.0/30 -> ip:1.1.1.1, output:1\n n1 = normalise([\n Rule(priority=10,\n match=Match([('IPV4_DST', 0x01010100, 0xFFFFFFFE)]),\n instructions=inst_from_acts([SF1, OUT])),\n Rule(priority=0)\n ])\n # 1.1.1.1/32 -> output:1\n # 1.1.1.0/31 -> ip:1.1.1.1, output:1\n n2 = normalise([\n Rule(priority=10,\n match=Match([('IPV4_DST', 0x01010101, None)]),\n instructions=inst_from_acts([OUT])),\n Rule(priority=9,\n match=Match([('IPV4_DST', 0x01010100, 0xFFFFFFFE)]),\n instructions=inst_from_acts([SF1, OUT])),\n Rule(priority=0)\n ])\n # 1.1.1.0/32 -> ip:1.1.1.1, output1\n # 1.1.1.0/31 -> output:1\n n3 = normalise([\n Rule(priority=10,\n match=Match([('IPV4_DST', 0x01010100, None)]),\n instructions=inst_from_acts([SF1, OUT])),\n Rule(priority=9,\n match=Match([('IPV4_DST', 0x01010100, 0xFFFFFFFE)]),\n instructions=inst_from_acts([OUT])),\n Rule(priority=0)\n ])\n n4 = normalise([\n Rule(priority=10,\n match=Match([('IPV4_DST', 0x01010101, None)]),\n instructions=inst_from_acts([OUT])),\n Rule(priority=9,\n match=Match([('IPV4_DST', 0x01010100, 0xFFFFFFFE)]),\n instructions=inst_from_acts([DEC_TTL, SF1, OUT])),\n Rule(priority=0)\n ])\n self.assertTrue(check_equal(n1, n2))\n self.assertFalse(check_equal(n1, n4))\n self.assertTrue(check_equal(n2, n3))\n self.assertTrue(check_equal(n1, n3))", "def test_ec2_up(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', ec2['server'].id])\n assert result.exit_code == 0", "def check_host_activation(cls, ksm_merge_across_nodes):\n testflow.step(\"Deactivate the host %s\", sla_conf.HOSTS[0])\n assert ll_hosts.deactivate_host(\n positive=True,\n host=sla_conf.HOSTS[0],\n host_resource=sla_conf.VDS_HOSTS[0]\n )\n cls.update_merge_across_nodes_parameter(\n ksm_merge_across_nodes=ksm_merge_across_nodes\n )\n assert ll_hosts.activate_host(\n positive=True,\n host=sla_conf.HOSTS[0],\n host_resource=sla_conf.VDS_HOSTS[0]\n )\n testflow.step(\n \"%s: wait until KSM merge across nodes will be equal to %s\",\n sla_conf.VDS_HOSTS[0], ksm_merge_across_nodes\n )\n assert sla_helpers.wait_for_numa_aware_ksm_status(\n resource=sla_conf.VDS_HOSTS[0],\n expected_value=ksm_merge_across_nodes\n )", "def check(self):\n BadNamespaces = list()\n\n for namespace in pm.listNamespaces():\n BadNamespaces.append(namespace)\n\n if not BadNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = namespace\n for namespace in BadNamespaces:\n self.addError(\"namespace %s exist\" % namespace)\n self.errorMessage = \"%s namespace\" % (len(BadNamespaces))", "def test_subscriber_access_if_vsg2_goes_down(self):", "def test_validate_bookstore_endpoint():\n expected = {\n \"bookstore_valid\": False,\n \"publish_valid\": False,\n \"archive_valid\": False,\n \"clone_valid\": True,\n }\n settings = BookstoreSettings(s3_endpoint_url=\"\")\n assert validate_bookstore(settings) == expected", "def verifyActionCenterFirewall():\n pass" ]
[ "0.7524008", "0.5502818", "0.53605366", "0.5155062", "0.50617534", "0.5045359", "0.49514994", "0.4864293", "0.48232004", "0.47804296", "0.47697622", "0.4720676", "0.47133136", "0.4713231", "0.47080573", "0.4707765", "0.47045755", "0.46887946", "0.46873748", "0.4683964", "0.4671751", "0.4657946", "0.4651486", "0.46361938", "0.46213722", "0.46195677", "0.46189997", "0.4603296", "0.45958737", "0.45919067" ]
0.5852793
1
Ensure that lex v1 and lex v2 actions are both present in the lex namespace
def test_services_with_multiple_pages_lex(self): # Lex V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonlex.html self.assertTrue("lex:DeleteUtterances" in self.all_actions) # Lex V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonlexv2.html self.assertTrue("lex:ListBotLocales" in self.all_actions) results = get_actions_for_service("lex") actions = [ "lex:CreateIntentVersion", "lex:CreateSlotTypeVersion", "lex:DeleteBotChannelAssociation", "lex:DeleteIntentVersion", "lex:DeleteSlotTypeVersion", "lex:GetBot", "lex:GetBotAlias", "lex:GetBotAliases", "lex:GetBotChannelAssociation", "lex:GetBotChannelAssociations", "lex:GetBotVersions", "lex:GetBots", "lex:GetBuiltinIntent", "lex:GetBuiltinIntents", "lex:GetBuiltinSlotTypes", "lex:GetExport", "lex:GetImport", "lex:GetIntent", "lex:GetIntentVersions", "lex:GetIntents", "lex:GetMigration", "lex:GetMigrations", "lex:GetSlotType", "lex:GetSlotTypeVersions", "lex:GetSlotTypes", "lex:GetUtterancesView", "lex:PostContent", "lex:PostText", "lex:PutBot", "lex:PutBotAlias", "lex:PutIntent", "lex:PutSlotType", "lex:StartMigration", ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user_grammar_actions():\n grammar = \"\"\"\n S: A B C;\n @nonterm_action\n C: A B;\n A: \"a\";\n @term_action\n B: \"b\";\n \"\"\"\n\n called = [False, False]\n\n def nonterm_action(_, __):\n called[0] = True\n\n def term_action(_, __):\n called[1] = True\n\n my_actions = {\n \"nonterm_action\": nonterm_action,\n \"term_action\": term_action,\n }\n\n g = Grammar.from_string(grammar)\n p = Parser(g, actions=my_actions)\n assert p.parse(\"a b a b\")\n assert all(called)", "def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)", "def dispatch(lex):\n\n if lex.intent == \"BasicHelp\":\n return help_user(lex)\n else:\n return not_understood(lex)", "def InitActionCheck(initActionList, init):\n for actions in initActionList:\n action_class = getNameFromIRI(actions.is_a[0].iri)\n # if the action is a SpeedAction class\n if action_class == \"SpeedAction\":\n action_entity_ref = getNameFromIRI(actions.has_entity_ref[0].iri)\n target_speed = actions.has_target_speed[0]\n ontology_transition_dynamics = actions.has_transition_dynamics[0]\n xosc_transition_dynamics = checkTransitionDynamics(ontology_transition_dynamics)\n init.add_init_action(action_entity_ref, xosc.AbsoluteSpeedAction(target_speed, xosc_transition_dynamics))\n continue\n #if the action is TeleportAction\n if action_class == \"TeleportAction\":\n action_entity_ref = getNameFromIRI(actions.has_entity_ref[0].iri)\n # if the action has position as parameter set\n s: int = 0\n offset = 0\n lane_id = 0\n road_id = 0\n if len(actions.has_position) != 0:\n position = actions.has_position[0]\n if len(position.has_s) != 0:\n s = position.has_s[0]\n\n if len(position.has_offset) != 0:\n offset = position.has_offset[0]\n\n if len(position.has_lane_id) != 0:\n lane_id = position.has_lane_id[0]\n\n if len(position.has_road_id) != 0:\n road_id = position.has_road_id[0]\n\n init.add_init_action(action_entity_ref, xosc.TeleportAction(xosc.LanePosition(s, offset, lane_id, road_id)))\n continue\n if action_class == \"EnvironmentAction\": # if the action is an EnvironmentAction\n xosc_environment_action = checkEnvironmentAction(actions)\n init.add_global_action(xosc_environment_action)\n return init", "def test_get_actions_with_arn_type_and_access_level_case_2(self):\n desired_output = [\n 'ssm:DeleteParameter',\n 'ssm:DeleteParameters',\n 'ssm:LabelParameterVersion',\n 'ssm:PutParameter'\n]\n output = get_actions_with_arn_type_and_access_level(\n \"ssm\", \"parameter\", \"Write\"\n )\n for item in desired_output:\n self.assertTrue(item in output)", "def check(self):\n illegalNamespaces = list()\n\n progStandard = re.compile(\"^[A-Z]{4}[0-9]{2}_[0-9]{3}$\")\n progShot = re.compile(\"^SH[0-9]{4}_[0-9]{3}$\")\n\n for namespaces in pm.namespaceInfo(listOnlyNamespaces=True, internal=False, recurse=True):\n for namespace in namespaces.split(\":\"):\n if not progStandard.match(namespace) and not progShot.match(namespace) not in [\"UI\", \"shared\"]:\n illegalNamespaces.append(namespace)\n\n if not illegalNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = illegalNamespaces\n for illegalNamespace in illegalNamespaces:\n self.addError(\"%s is a illegal namespace\" % illegalNamespace)\n self.errorMessage = \"%s illegal namespace\" % (\n len(illegalNamespaces))", "def verifyActionCenterRts():\n pass", "def __initSpellingActions(self):\n self.spellingActGrp = createActionGroup(self)\n \n self.spellCheckAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Check spelling'),\n UI.PixmapCache.getIcon(\"spellchecking.png\"),\n QCoreApplication.translate(\n 'ViewManager', 'Check &spelling...'),\n QKeySequence(QCoreApplication.translate(\n 'ViewManager', \"Shift+F7\", \"Spelling|Spell Check\")),\n 0,\n self.spellingActGrp, 'vm_spelling_spellcheck')\n self.spellCheckAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Perform spell check of current editor'))\n self.spellCheckAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Check spelling</b>\"\"\"\n \"\"\"<p>Perform a spell check of the current editor.</p>\"\"\"\n ))\n self.spellCheckAct.triggered.connect(self.__spellCheck)\n self.spellingActions.append(self.spellCheckAct)\n \n self.autoSpellCheckAct = E5Action(\n QCoreApplication.translate(\n 'ViewManager', 'Automatic spell checking'),\n UI.PixmapCache.getIcon(\"autospellchecking.png\"),\n QCoreApplication.translate(\n 'ViewManager', '&Automatic spell checking'),\n 0, 0,\n self.spellingActGrp, 'vm_spelling_autospellcheck', True)\n self.autoSpellCheckAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', '(De-)Activate automatic spell checking'))\n self.autoSpellCheckAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Automatic spell checking</b>\"\"\"\n \"\"\"<p>Activate or deactivate the automatic spell checking\"\"\"\n \"\"\" function of all editors.</p>\"\"\"\n ))\n self.autoSpellCheckAct.setChecked(\n Preferences.getEditor(\"AutoSpellCheckingEnabled\"))\n self.autoSpellCheckAct.triggered.connect(\n self.__setAutoSpellChecking)\n self.spellingActions.append(self.autoSpellCheckAct)\n \n self.__enableSpellingActions()", "def test_unsupported_action(self):\n lang, _ = Language.objects.get_or_create(name=\"Test Language\", code=\"text-x-lang\")\n\n status = notify_external_apps(instance=lang, action=\"TEST\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n status = notify_external_apps(instance=lang, action=\"DELETE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n status = notify_external_apps(instance=lang, action=\"CREATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)", "def test_lti20_rest_good_dispatch(self):\r\n for ginput, expected in self.GOOD_DISPATCH_INPUTS:\r\n self.assertEquals(self.xmodule.parse_lti_2_0_handler_suffix(ginput), expected)", "def test_parses_ambiguous_grammars(self):\n lexed_positive = [\n Token(\n value=\"Hegh\",\n token_type=AKT.VERB,\n line_number=0,\n ),\n Token(\n value=\"be'\",\n token_type=AKT.BE,\n line_number=0,\n ),\n ]\n self.assertTrue(parse(AmbiguousKlingonGrammar, lexed_positive))\n\n lexed_negative = [\n Token(\n value=\"Hegh\",\n token_type=AKT.VERB,\n line_number=0,\n ),\n Token(\n value=\"be'\",\n token_type=AKT.BE,\n line_number=0,\n ),\n Token(\n value=\"be'\",\n token_type=AKT.BE,\n line_number=0,\n ),\n ]\n self.assertTrue(parse(AmbiguousKlingonGrammar, lexed_negative))", "def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"", "def test_create_resource_access_review_for_all_namespaces(self):\n pass", "def actions():\n pass", "def legal_actions(self):\n raise NotImplementedError", "def _syn_common_checks(self: SynopsisImpl, linter: Linter, cursor: Cursor, docstring: PetscDocStringImpl) -> None:\n items = self.items\n name_loc, symbol_name = items['name']\n assert name_loc is not None # pacify type checkers\n self._check_symbol_matches_synopsis_name(docstring, cursor, name_loc, symbol_name)\n self._check_synopsis_description_separator(docstring, name_loc.start.line)\n self._check_blurb_length(docstring, cursor, items['blurb'])\n return", "def actions() -> None:\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action_spaces(self):\n pass", "def testverb(self):\r\n from pydsl.Parser.Parser import parse, parser_factory\r\n tokelist = [x.content for x in EncodingLexer('utf8')(p0good)]\r\n self.assertTrue(parse(productionset0, tokelist , \"default\"))\r\n self.assertTrue(parse(productionset0, tokelist , \"lr0\"))\r\n self.assertTrue(parse(productionset0, tokelist , \"ll1\"))\r\n tokelist = [x.content for x in EncodingLexer('utf8')(p0bad)]\r\n self.assertFalse(parse(productionset0, tokelist , \"default\"))\r\n self.assertFalse(parse(productionset0, tokelist , \"lr0\"))\r\n self.assertFalse(parse(productionset0, tokelist , \"ll1\"))", "def _check_tokens(number_token=None, name_token=None, gpe_token=None):\n assert number_token is None or number_token == number_token.lower(), \\\n \"Tokens need to be lowercase: %s\" % number_token\n assert name_token is None or name_token == name_token.lower(), \\\n \"Tokens need to be lowercase: %s\" % name_token\n assert gpe_token is None or gpe_token == gpe_token.lower(), \\\n \"Tokens need to be lowercase: %s\" % gpe_token", "def validate_syntax(self):\n resolves_present = False\n uses_present = False\n if not self.wf.get('workflow', None):\n pu.fail('A workflow block must be present\\n')\n else:\n for _, wf_block in dict(self.wf['workflow']).items():\n if wf_block.get('resolves', None):\n resolves_present = True\n if not resolves_present:\n pu.fail('[resolves] attribute must be present\\n')\n if not self.wf.get('action', None):\n pu.fail('Atleast one action block must be present\\n')\n else:\n for _, a_block in self.wf['action'].items():\n if a_block.get('uses', None):\n uses_present = True\n if not uses_present:\n pu.fail('[uses] attribute must be present\\n')", "def test_extra_roles(modpath):\n retcode, out = flake8(\n join(modpath, \"RST304/sphinx-roles.py\"),\n roles=\"need,need_incoming\",\n )\n assert not retcode, out", "def get_legal_actions(self):\n pass", "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)", "def test_get_actions_with_arn_type_and_access_level_case_4(self):\n desired_output = [\n 'secretsmanager:ListSecrets'\n ]\n output = get_actions_with_arn_type_and_access_level(\n \"secretsmanager\", \"*\", \"List\"\n )\n self.assertListEqual(desired_output, output)", "def test_multiple_aclhooks_2(self):\n self._test_hook_approval_sequence([True, None], True)", "def test_issue4104(en_lookup_nlp):\n words = [\"dry\", \"spun\", \"spun-dry\"]\n doc = Doc(en_lookup_nlp.vocab, words=words)\n lemmatizer = en_lookup_nlp.get_pipe(\"lemmatizer\")\n doc = lemmatizer(doc)\n assert [token.lemma_ for token in doc] == [\"dry\", \"spin\", \"spin-dry\"]", "def test_create_local_resource_access_review_for_all_namespaces(self):\n pass", "def test_resource_actions(self):\n test_resource = ResourceTypeName.get()\n expected_actions = sorted(['rt:get', 'rt:put', 'rt:update', 'rt:delete'])\n self.app.post(\n f'/v1/resource/{test_resource}',\n data=json.dumps({'actions': expected_actions}),\n headers=admin_headers)\n\n # Get the actions for a resource type\n resp = self.app.get(f'/v1/resource/{test_resource}/actions', headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n actions = json.loads(resp.body)['actions']\n self.assertEqual(actions, expected_actions)\n\n # Delete actions from a resource type\n modify_actions = expected_actions[-2:]\n resp = self.app.delete(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n resp = self.app.get(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n actions = sorted(json.loads(resp.body)['actions'])\n self.assertEqual(actions, expected_actions[:2])\n\n # OK returned when deleting actions not part of a resource type\n resp = self.app.delete(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n\n # Put actions into a resource type\n resp = self.app.put(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n resp = self.app.get(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n actions = sorted(json.loads(resp.body)['actions'])\n self.assertEqual(actions, expected_actions)\n\n # OK returned when putting actions already a part of a resource type.\n resp = self.app.put(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)", "def test_unknown_action(self):\n exit_string = actions.main([\"foo\"])\n self.assertEqual(\"Action foo undefined\", exit_string)" ]
[ "0.5523519", "0.54551274", "0.52533966", "0.4947026", "0.49150628", "0.4877916", "0.4869475", "0.4861803", "0.47912464", "0.47661808", "0.47431847", "0.47416365", "0.4730279", "0.47081596", "0.46722156", "0.46685877", "0.46628264", "0.46338037", "0.463026", "0.46180958", "0.45894623", "0.45869774", "0.45716003", "0.45601535", "0.45562217", "0.45410743", "0.45358202", "0.45188215", "0.4517557", "0.45168594" ]
0.6062302
0
Ensure that Kinesis Analytics V1 actions are both present in the ses namespace
def test_services_with_multiple_pages_kinesis_analytics(self): # Kinesis Analytics V1 results = get_actions_for_service("kinesisanalytics") actions = [ "kinesisanalytics:GetApplicationState", # Only in v1, not v2 "kinesisanalytics:ListApplications", # In both ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)", "def test_services_with_multiple_pages_ses(self):\n # SES V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html\n self.assertTrue(\"ses:PutIdentityPolicy\" in self.all_actions)\n # SES V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonsimpleemailservicev2.html\n self.assertTrue(\"ses:ListImportJobs\" in self.all_actions)\n\n results = get_actions_for_service(\"ses\")\n actions = [\n \"ses:CloneReceiptRuleSet\",\n \"ses:CreateConfigurationSetTrackingOptions\",\n \"ses:CreateReceiptFilter\",\n \"ses:CreateReceiptRule\",\n \"ses:CreateReceiptRuleSet\",\n \"ses:CreateTemplate\",\n \"ses:DeleteConfigurationSetTrackingOptions\",\n \"ses:DeleteIdentity\",\n \"ses:DeleteIdentityPolicy\",\n \"ses:DeleteReceiptFilter\",\n \"ses:DeleteReceiptRule\",\n \"ses:DeleteReceiptRuleSet\",\n \"ses:DeleteTemplate\",\n \"ses:DeleteVerifiedEmailAddress\",\n \"ses:DescribeActiveReceiptRuleSet\",\n \"ses:DescribeConfigurationSet\",\n \"ses:DescribeReceiptRule\",\n \"ses:DescribeReceiptRuleSet\",\n \"ses:GetAccountSendingEnabled\",\n \"ses:GetIdentityDkimAttributes\",\n \"ses:GetIdentityMailFromDomainAttributes\",\n \"ses:GetIdentityNotificationAttributes\",\n \"ses:GetIdentityPolicies\",\n \"ses:GetIdentityVerificationAttributes\",\n \"ses:GetSendQuota\",\n \"ses:GetSendStatistics\",\n \"ses:GetTemplate\",\n \"ses:ListIdentities\",\n \"ses:ListIdentityPolicies\",\n \"ses:ListReceiptFilters\",\n \"ses:ListReceiptRuleSets\",\n \"ses:ListTemplates\",\n \"ses:ListVerifiedEmailAddresses\",\n \"ses:PutIdentityPolicy\",\n \"ses:ReorderReceiptRuleSet\",\n \"ses:SendBounce\",\n \"ses:SendBulkTemplatedEmail\",\n \"ses:SendRawEmail\",\n \"ses:SendTemplatedEmail\",\n \"ses:SetActiveReceiptRuleSet\",\n \"ses:SetIdentityDkimEnabled\",\n \"ses:SetIdentityFeedbackForwardingEnabled\",\n \"ses:SetIdentityHeadersInNotificationsEnabled\",\n \"ses:SetIdentityMailFromDomain\",\n \"ses:SetIdentityNotificationTopic\",\n \"ses:SetReceiptRulePosition\",\n \"ses:TestRenderTemplate\",\n \"ses:UpdateAccountSendingEnabled\",\n \"ses:UpdateConfigurationSetReputationMetricsEnabled\",\n \"ses:UpdateConfigurationSetSendingEnabled\",\n \"ses:UpdateConfigurationSetTrackingOptions\",\n \"ses:UpdateReceiptRule\",\n \"ses:UpdateTemplate\",\n \"ses:VerifyDomainDkim\",\n \"ses:VerifyDomainIdentity\",\n \"ses:VerifyEmailAddress\",\n \"ses:VerifyEmailIdentity\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_get_actions_with_arn_type_and_access_level_case_2(self):\n desired_output = [\n 'ssm:DeleteParameter',\n 'ssm:DeleteParameters',\n 'ssm:LabelParameterVersion',\n 'ssm:PutParameter'\n]\n output = get_actions_with_arn_type_and_access_level(\n \"ssm\", \"parameter\", \"Write\"\n )\n for item in desired_output:\n self.assertTrue(item in output)", "def verifyActionCenterRts():\n pass", "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)", "def test_other_iam_data_fixes_in_GH_393(self):\n # Cassandra: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonkeyspacesforapachecassandra.html\n results = get_actions_for_service(\"cassandra\")\n self.assertTrue(\"cassandra:Restore\" in results)\n # Comprehend Medical: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncomprehendmedical.html\n results = get_actions_for_service(\"comprehendmedical\")\n # print(results)\n actions = [\n \"comprehendmedical:DescribeEntitiesDetectionV2Job\",\n \"comprehendmedical:DescribeICD10CMInferenceJob\",\n \"comprehendmedical:DescribePHIDetectionJob\",\n \"comprehendmedical:DescribeRxNormInferenceJob\",\n # \"comprehendmedical:DescribeSNOMEDCTInferenceJob\", # Not in SAR\n \"comprehendmedical:DetectEntitiesV2\",\n \"comprehendmedical:InferICD10CM\",\n \"comprehendmedical:InferRxNorm\",\n # \"comprehendmedical:InferSNOMEDCT\", # Not in SAR\n \"comprehendmedical:ListEntitiesDetectionV2Jobs\",\n \"comprehendmedical:ListICD10CMInferenceJobs\",\n \"comprehendmedical:ListPHIDetectionJobs\",\n \"comprehendmedical:ListRxNormInferenceJobs\",\n # \"comprehendmedical:ListSNOMEDCTInferenceJobs\", # Not in SAR\n \"comprehendmedical:StartEntitiesDetectionV2Job\",\n \"comprehendmedical:StartICD10CMInferenceJob\",\n \"comprehendmedical:StartPHIDetectionJob\",\n \"comprehendmedical:StartRxNormInferenceJob\",\n \"comprehendmedical:StopEntitiesDetectionV2Job\",\n \"comprehendmedical:StopICD10CMInferenceJob\",\n ]\n for action in actions:\n # if action not in results:\n # print(action)\n self.assertTrue(action in results)\n # Compute Optimizer\n results = get_actions_for_service(\"compute-optimizer\")\n actions = [\n \"compute-optimizer:DeleteRecommendationPreferences\",\n \"compute-optimizer:ExportEBSVolumeRecommendations\",\n \"compute-optimizer:ExportLambdaFunctionRecommendations\",\n \"compute-optimizer:GetEffectiveRecommendationPreferences\",\n \"compute-optimizer:GetEnrollmentStatusesForOrganization\",\n \"compute-optimizer:GetLambdaFunctionRecommendations\",\n \"compute-optimizer:GetRecommendationPreferences\",\n \"compute-optimizer:PutRecommendationPreferences\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # DataSync\n results = get_actions_for_service(\"datasync\")\n actions = [\n \"datasync:UpdateLocationNfs\",\n \"datasync:UpdateLocationObjectStorage\",\n \"datasync:UpdateLocationSmb\",\n \"datasync:UpdateTaskExecution\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Account Management\n results = get_actions_for_service(\"account\")\n actions = [\n \"account:DeleteAlternateContact\",\n \"account:GetAlternateContact\",\n \"account:PutAlternateContact\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS IAM Access Analyzer\n results = get_actions_for_service(\"access-analyzer\")\n actions = [\n \"access-analyzer:CancelPolicyGeneration\",\n \"access-analyzer:CreateAccessPreview\",\n \"access-analyzer:GetAccessPreview\",\n \"access-analyzer:GetGeneratedPolicy\",\n \"access-analyzer:ListAccessPreviewFindings\",\n \"access-analyzer:ListAccessPreviews\",\n \"access-analyzer:ListPolicyGenerations\",\n \"access-analyzer:StartPolicyGeneration\",\n \"access-analyzer:ValidatePolicy\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Elemental Activations\n results = get_actions_for_service(\"elemental-activations\")\n actions = [\n \"elemental-activations:CompleteAccountRegistration\",\n \"elemental-activations:StartAccountRegistration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # OpenSearch\n results = get_actions_for_service(\"es\")\n actions = [\n \"es:DescribeDomainChangeProgress\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Location\n results = get_actions_for_service(\"geo\")\n actions = [\n \"geo:CalculateRouteMatrix\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Amazon Managed Grafana\n results = get_actions_for_service(\"grafana\")\n actions = [\n \"grafana:DescribeWorkspaceAuthentication\",\n \"grafana:UpdateWorkspaceAuthentication\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # EC2 Image Builder\n results = get_actions_for_service(\"imagebuilder\")\n actions = [\n \"imagebuilder:ImportVmImage\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Timestream\n results = get_actions_for_service(\"timestream\")\n actions = [\n \"timestream:CreateScheduledQuery\",\n \"timestream:DeleteScheduledQuery\",\n \"timestream:DescribeScheduledQuery\",\n \"timestream:ExecuteScheduledQuery\",\n \"timestream:ListScheduledQueries\",\n \"timestream:UpdateScheduledQuery\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS Transfer Family\n results = get_actions_for_service(\"transfer\")\n actions = [\n \"transfer:CreateAccess\",\n \"transfer:CreateWorkflow\",\n \"transfer:DeleteAccess\",\n \"transfer:DeleteWorkflow\",\n \"transfer:DescribeAccess\",\n \"transfer:DescribeExecution\",\n \"transfer:DescribeWorkflow\",\n \"transfer:ListAccesses\",\n \"transfer:ListExecutions\",\n \"transfer:ListWorkflows\",\n \"transfer:SendWorkflowStepState\",\n \"transfer:UpdateAccess\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action_spaces(self):\n pass", "def aws_es_os_coginto_authentication_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for response in describe_es_os_domains(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(response,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n esDomainName = response[\"DomainStatus\"][\"DomainName\"]\n esVersion = response[\"DomainStatus\"][\"ElasticsearchVersion\"]\n domainId = response[\"DomainStatus\"][\"DomainId\"]\n domainArn = response[\"DomainStatus\"][\"ARN\"]\n try:\n cognitoEnabledCheck = response[\"DomainStatus\"][\"CognitoOptions\"][\"Enabled\"]\n except:\n cognitoEnabledCheck = False\n # this is a failing check\n if cognitoEnabledCheck is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{domainArn}/elasticsearch-cognito-auth-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": domainArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[OpenSearch.2] OpenSearch/AWS ElasticSearch Service domains should use Cognito authentication for Kibana\",\n \"Description\": \"OpenSearch/AWS ElasticSearch Service domain \"\n + esDomainName\n + \" does not use Cognito authentication for Kibana. Refer to the remediation instructions if this configuration is not intended\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"If your domain should use Cognito authentication for Kibana refer to the Amazon Cognito Authentication for Kibana section of the Amazon Elasticsearch Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Analytics\",\n \"AssetService\": \"Amazon OpenSearch Service\",\n \"AssetComponent\": \"Search Domain\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsOpenSearchServiceDomain\",\n \"Id\": domainArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsOpenSearchServiceDomain\": {\n \"Id\": domainId,\n \"DomainName\": esDomainName,\n \"EngineVersion\": esVersion,\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-6\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-16\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-24\",\n \"NIST SP 800-53 Rev. 4 IA-1\",\n \"NIST SP 800-53 Rev. 4 IA-2\",\n \"NIST SP 800-53 Rev. 4 IA-4\",\n \"NIST SP 800-53 Rev. 4 IA-5\",\n \"NIST SP 800-53 Rev. 4 IA-8\",\n \"NIST SP 800-53 Rev. 4 PE-2\",\n \"NIST SP 800-53 Rev. 4 PS-3\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.7.1.1\",\n \"ISO 27001:2013 A.9.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{domainArn}/elasticsearch-cognito-auth-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": domainArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[OpenSearch.2] OpenSearch/AWS ElasticSearch Service domains should use Cognito authentication for Kibana\",\n \"Description\": \"OpenSearch/AWS ElasticSearch Service domain \"\n + esDomainName\n + \" uses Cognito authentication for Kibana.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"If your domain should use Cognito authentication for Kibana refer to the Amazon Cognito Authentication for Kibana section of the Amazon Elasticsearch Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Analytics\",\n \"AssetService\": \"Amazon OpenSearch Service\",\n \"AssetComponent\": \"Search Domain\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsOpenSearchServiceDomain\",\n \"Id\": domainArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsOpenSearchServiceDomain\": {\n \"Id\": domainId,\n \"DomainName\": esDomainName,\n \"EngineVersion\": esVersion,\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-6\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-16\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-24\",\n \"NIST SP 800-53 Rev. 4 IA-1\",\n \"NIST SP 800-53 Rev. 4 IA-2\",\n \"NIST SP 800-53 Rev. 4 IA-4\",\n \"NIST SP 800-53 Rev. 4 IA-5\",\n \"NIST SP 800-53 Rev. 4 IA-8\",\n \"NIST SP 800-53 Rev. 4 PE-2\",\n \"NIST SP 800-53 Rev. 4 PS-3\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.7.1.1\",\n \"ISO 27001:2013 A.9.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def lambda_handler(event, context):\n set_logging(level=logging.DEBUG)\n\n try:\n payload = json.loads(event[\"Records\"][0][\"Sns\"][\"Message\"])\n account_id = payload['account_id']\n account_name = payload['account_name']\n # get the last region from the list to process\n region = payload['regions'].pop()\n # region = payload['region']\n # if request_id is present in payload, it means this lambda was called from the API\n request_id = payload.get('request_id', None)\n except Exception:\n logging.exception(f\"Failed to parse event\\n{event}\")\n return\n\n try:\n config = Config()\n\n main_account = Account(region=config.aws.region)\n ddb_table = main_account.resource(\"dynamodb\").Table(config.sqspolicy.ddb_table_name)\n\n account = Account(id=account_id,\n name=account_name,\n region=region,\n role_name=config.aws.role_name_identification)\n if account.session is None:\n return\n\n logging.debug(f\"Checking for public SQS policies in {account}\")\n\n # existing open issues for account to check if resolved\n open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue)\n # make dictionary for fast search by id\n # and filter by current region\n open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region}\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n\n checker = SQSPolicyChecker(account=account)\n if checker.check():\n for queue in checker.queues:\n logging.debug(f\"Checking {queue.name}\")\n if queue.public:\n issue = SQSPolicyIssue(account_id, queue.url)\n issue.issue_details.tags = queue.tags\n issue.issue_details.name = queue.name\n issue.issue_details.region = queue.account.region\n issue.issue_details.policy = queue.policy\n if config.sqspolicy.in_whitelist(account_id, queue.url):\n issue.status = IssueStatus.Whitelisted\n else:\n issue.status = IssueStatus.Open\n logging.debug(f\"Setting {queue.name} status {issue.status}\")\n IssueOperations.update(ddb_table, issue)\n # remove issue id from issues_list_from_db (if exists)\n # as we already checked it\n open_issues.pop(queue.url, None)\n\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n # all other unresolved issues in DDB are for removed/remediated queues\n for issue in open_issues.values():\n IssueOperations.set_status_resolved(ddb_table, issue)\n if request_id:\n api_table = main_account.resource(\"dynamodb\").Table(config.api.ddb_table_name)\n DDB.track_progress(api_table, request_id)\n except Exception:\n logging.exception(f\"Failed to check SQS policies for '{account_id} ({account_name})'\")\n return\n\n # push SNS messages until the list with regions to check is empty\n if len(payload['regions']) > 0:\n try:\n Sns.publish(payload[\"sns_arn\"], payload)\n except Exception:\n logging.exception(\"Failed to chain insecure services checking\")\n\n logging.debug(f\"Checked SQS policies for '{account_id} ({account_name})'\")", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_lifecycle_progression_action_spaces(self):\n pass", "def test_must_be_associated(self):\n\n def handle(event):\n return 0x0000, event.action_information\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ProceduralEventLogging)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_ACTION, handle)]\n )\n\n ae.add_requested_context(ProceduralEventLogging)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n assoc.release()\n assert assoc.is_released\n assert not assoc.is_established\n with pytest.raises(RuntimeError):\n assoc.send_n_action(None, None, None, None)\n\n scp.shutdown()", "def exists_intent_action(self, intent_keyword):\n pass", "def test_get_snsname_arn_auth_exception_handling(self, aws_res_mock):\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import get_snsname_arn\n\n # create a mock SNS client that returns what we tell it to\n client = boto3.client('sns')\n stub = Stubber(client)\n stub.add_client_error('create_topic', service_error_code='AuthorizationError')\n stub.activate()\n\n\n # since firesim manager code doesn't take clients as method parameters\n # now we mock boto3.client to return our stubbed client\n with patch.object(boto3._get_default_session(), 'client', return_value=client) as mock_session:\n topic_arn = get_snsname_arn()\n\n stub.assert_no_pending_responses()\n topic_arn.should.be.none\n\n # TODO we could mock rootLogger.critical to capture it's calls and args and validate that we're seeing the correct \"nice\" message\n\n # make sure get_snsname_arn() actually called out to get a sns\n # client, otherwise we aren't testing what we think we are\n mock_session.assert_called_once_with('sns')\n\n aws_res_mock.assert_called_once()", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_snapshot_variables_action_spaces(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action_spaces(self):\n pass", "def test_get_actions_with_arn_type_and_access_level_case_3(self):\n desired_output = [\n 's3:PutAccountPublicAccessBlock',\n 's3:PutAccessPointPublicAccessBlock'\n ]\n output = get_actions_with_arn_type_and_access_level(\n # \"ram\", \"resource-share\", \"Write\"\n \"s3\", \"*\", \"Permissions management\"\n )\n print(output)\n for item in desired_output:\n self.assertTrue(item in output)\n # self.assertListEqual(desired_output, output)", "def _is_s3_notif(event):\n return (\n event.get(\"Records\")\n and isinstance(event.get(\"Records\"), list)\n and \"s3\" in event.get(\"Records\")[0]\n )", "def check_script(vouts):\n for vout in [v for v in vouts[::-1] if v['hex'].startswith('6a')]:\n verb = BlockchainSpider.decode_op_return(vout['hex'])\n action = Spoolverb.from_verb(verb).action\n if action in Spoolverb.supported_actions:\n return verb\n raise Exception(\"Invalid ascribe transaction\")", "def check_snstopicpolicy_crossaccount(self, snsitem):\n #(region, account, arn, aws_object) = audit_object\n #\"Principal\": { \"AWS\": \"*\" }\n # \"AWS\": \"arn:aws:iam::027213240437:root\"\n policy = snsitem.config.get('SNSPolicy', {})\n for statement in policy.get(\"Statement\", []):\n account_numbers = []\n account_number = ''\n princ_aws = statement.get(\"Principal\", {}) \\\n .get(\"AWS\", \"error\")\n if princ_aws == \"*\":\n account_number = statement.get(\"Condition\", {}) \\\n .get(\"StringEquals\", {}) \\\n .get(\"AWS:SourceOwner\", None)\n if not account_number:\n tag = \"SNS Topic open to everyone\"\n notes = \"An SNS policy where { 'Principal': { 'AWS': '*' } } must also have\"\n notes += \" a {'Condition': {'StringEquals': { 'AWS:SourceOwner': '<ACCOUNT_NUMBER>' } } }\"\n notes += \" or it is open to the world. In this case, anyone is allowed to perform \"\n notes += \" this action(s): {}\".format(statement.get(\"Action\"))\n self.add_issue(10, tag, snsitem, notes=notes)\n continue\n else:\n try:\n account_numbers.append(str(account_number))\n except ValueError:\n raise InvalidSourceOwner(account_number)\n else:\n if isinstance(princ_aws, list):\n for entry in princ_aws:\n account_numbers.append(str(re.search('arn:aws:iam::([0-9-]+):', entry).group(1)))\n else:\n try:\n account_numbers.append(str(re.search('arn:aws:iam::([0-9-]+):', princ_aws).group(1)))\n except:\n import json\n print json.dumps(snsitem.config, indent=4)\n raise InvalidARN(princ_aws)\n\n for account_number in account_numbers:\n account = Account.query.filter(Account.number == account_number).first()\n account_name = None\n if account is not None:\n account_name = account.name\n\n if not account_name:\n tag = \"Unknown Cross Account Access\"\n notes = \"from {} to {}\".format(account_number, snsitem.account)\n self.add_issue(10, tag, snsitem, notes=notes)\n elif account_name != snsitem.account:\n tag = \"Friendly Cross Account Access\"\n notes = \"from {} to {}\".format(account_name, snsitem.account)\n self.add_issue(0, tag, snsitem, notes=notes)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_template_action_spaces(self):\n pass", "def copy_active_kinesis_destinations(events: dict, context: dict) -> dict:\n if 'SourceTableName' not in events:\n raise KeyError('Requires SourceTableName')\n if 'TargetTableName' not in events:\n raise KeyError('Requires TargetTableName')\n\n ACTIVE_STATUSES = ['ACTIVE', 'ENABLING']\n source_table_name = events['SourceTableName']\n target_table_name = events['TargetTableName']\n kinesis_destinations = _describe_kinesis_destinations(table_name=source_table_name)\n destinations = [d['StreamArn'] for d in kinesis_destinations['KinesisDataStreamDestinations']\n if d['DestinationStatus'] in ACTIVE_STATUSES]\n\n for d in destinations:\n _enable_kinesis_destinations(table_name=target_table_name, kinesis_arn=d)\n\n return destinations", "def test_send_to_kinesis_stream(search_events, boto3_client, monkeypatch):\n monkeypatch.setattr(\"boto3.client\", boto3_client)\n lambdautils.utils.send_to_kinesis_stream(search_events, \"dummy_stream\")\n boto3_client(\"kinesis\").put_records.call_count == 1", "def test_accepted(self):\n actions = signoff_actions(appversions={\"code\": \"fx1.0\"},\n locales={\"code\": \"de\"})\n actions = list(actions)\n eq_(len(actions), 1)\n so = Signoff.objects.get(action=actions[0][0])\n eq_(so.push.tip.shortrev, \"l10n de 0002\")\n eq_(so.locale.code, \"de\")\n eq_(so.action_set.count(), 2)", "def action_intersection(s1, s2):\n isect = s1 & s2\n L1 = [ ( (a.oid, a.index_oid), a) for a in s1 ]\n L2 = [ ( (a.oid, a.index_oid), a) for a in s2 ]\n ds1 = dict(L1)\n ds2 = dict(L2)\n for k1, action1 in ds1.items():\n action2 = ds2.get(k1)\n if action2 is not None:\n # replace action in union with correct one or conflict\n isect.add(which_action(action1, action2))\n return isect", "def test_subscriber_access_for_two_vsg_services(self):", "def test_subscriber_access_if_vsg2_goes_down(self):", "def test_subscriber_access_if_vsg1_goes_down(self):", "def has_action2(self, feature):\n return feature in self._action2", "def available_actions(speaker, action, args, soco_function, use_local_speaker_list):\n print(\"Currently available playback actions: {}\".format(speaker.available_actions))\n return True", "def _validate_event(cls, event):\n event_key_diff = cls.required_event_keys().difference(set(event))\n if not event_key_diff:\n return\n\n missing_event_keys = ', '.join('\\'{}\\''.format(key) for key in event_key_diff)\n raise AppConfigError('App event is missing the following required '\n 'keys: {}'.format(missing_event_keys))" ]
[ "0.57531", "0.5480236", "0.5119307", "0.5114083", "0.50098765", "0.49987218", "0.4811319", "0.47456703", "0.4734747", "0.4714206", "0.4702901", "0.4696541", "0.46635452", "0.46622923", "0.4657708", "0.46406722", "0.46395832", "0.45904428", "0.45893326", "0.45853838", "0.4572667", "0.4558702", "0.45453376", "0.45447615", "0.4521195", "0.4483556", "0.44694838", "0.44670665", "0.44603643", "0.4452167" ]
0.55055636
1
Ensure that ses v1 and ses v2 actions are both present in the ses namespace
def test_services_with_multiple_pages_ses(self): # SES V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html self.assertTrue("ses:PutIdentityPolicy" in self.all_actions) # SES V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonsimpleemailservicev2.html self.assertTrue("ses:ListImportJobs" in self.all_actions) results = get_actions_for_service("ses") actions = [ "ses:CloneReceiptRuleSet", "ses:CreateConfigurationSetTrackingOptions", "ses:CreateReceiptFilter", "ses:CreateReceiptRule", "ses:CreateReceiptRuleSet", "ses:CreateTemplate", "ses:DeleteConfigurationSetTrackingOptions", "ses:DeleteIdentity", "ses:DeleteIdentityPolicy", "ses:DeleteReceiptFilter", "ses:DeleteReceiptRule", "ses:DeleteReceiptRuleSet", "ses:DeleteTemplate", "ses:DeleteVerifiedEmailAddress", "ses:DescribeActiveReceiptRuleSet", "ses:DescribeConfigurationSet", "ses:DescribeReceiptRule", "ses:DescribeReceiptRuleSet", "ses:GetAccountSendingEnabled", "ses:GetIdentityDkimAttributes", "ses:GetIdentityMailFromDomainAttributes", "ses:GetIdentityNotificationAttributes", "ses:GetIdentityPolicies", "ses:GetIdentityVerificationAttributes", "ses:GetSendQuota", "ses:GetSendStatistics", "ses:GetTemplate", "ses:ListIdentities", "ses:ListIdentityPolicies", "ses:ListReceiptFilters", "ses:ListReceiptRuleSets", "ses:ListTemplates", "ses:ListVerifiedEmailAddresses", "ses:PutIdentityPolicy", "ses:ReorderReceiptRuleSet", "ses:SendBounce", "ses:SendBulkTemplatedEmail", "ses:SendRawEmail", "ses:SendTemplatedEmail", "ses:SetActiveReceiptRuleSet", "ses:SetIdentityDkimEnabled", "ses:SetIdentityFeedbackForwardingEnabled", "ses:SetIdentityHeadersInNotificationsEnabled", "ses:SetIdentityMailFromDomain", "ses:SetIdentityNotificationTopic", "ses:SetReceiptRulePosition", "ses:TestRenderTemplate", "ses:UpdateAccountSendingEnabled", "ses:UpdateConfigurationSetReputationMetricsEnabled", "ses:UpdateConfigurationSetSendingEnabled", "ses:UpdateConfigurationSetTrackingOptions", "ses:UpdateReceiptRule", "ses:UpdateTemplate", "ses:VerifyDomainDkim", "ses:VerifyDomainIdentity", "ses:VerifyEmailAddress", "ses:VerifyEmailIdentity", ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)", "def test_subscriber_access_for_two_vsg_services(self):", "def test_subscriber_access_if_vsg2_goes_down(self):", "def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)", "def verifyActionCenterRts():\n pass", "def test_request_with_two_bundles(self):\n xml = self._make_request()\n request = request_from_xml(xml)\n self.assertTrue(validate_request(request, self.policy))", "def test_subscriber_access_if_vsg1_goes_down(self):", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_lifecycle_progression_action_spaces(self):\n pass", "def test_services_with_multiple_pages_greengrass(self):\n # Greengrass V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrass.html\n self.assertTrue(\"greengrass:CreateResourceDefinition\" in self.all_actions)\n # Greengrass V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrassv2.html\n self.assertTrue(\"greengrass:CreateComponentVersion\" in self.all_actions)\n results = get_actions_for_service(\"greengrass\")\n actions = [\n \"greengrass:AssociateRoleToGroup\",\n \"greengrass:CreateConnectorDefinition\",\n \"greengrass:CreateConnectorDefinitionVersion\",\n \"greengrass:CreateCoreDefinition\",\n \"greengrass:CreateCoreDefinitionVersion\",\n \"greengrass:CreateDeviceDefinition\",\n \"greengrass:CreateDeviceDefinitionVersion\",\n \"greengrass:CreateFunctionDefinition\",\n \"greengrass:CreateFunctionDefinitionVersion\",\n \"greengrass:CreateGroup\",\n \"greengrass:CreateGroupCertificateAuthority\",\n \"greengrass:CreateGroupVersion\",\n \"greengrass:CreateLoggerDefinition\",\n \"greengrass:CreateLoggerDefinitionVersion\",\n \"greengrass:CreateResourceDefinition\",\n \"greengrass:CreateResourceDefinitionVersion\",\n \"greengrass:CreateSoftwareUpdateJob\",\n \"greengrass:CreateSubscriptionDefinition\",\n \"greengrass:CreateSubscriptionDefinitionVersion\",\n \"greengrass:DeleteConnectorDefinition\",\n \"greengrass:DeleteCoreDefinition\",\n \"greengrass:DeleteDeviceDefinition\",\n \"greengrass:DeleteFunctionDefinition\",\n \"greengrass:DeleteGroup\",\n \"greengrass:DeleteLoggerDefinition\",\n \"greengrass:DeleteResourceDefinition\",\n \"greengrass:DeleteSubscriptionDefinition\",\n \"greengrass:DisassociateRoleFromGroup\",\n \"greengrass:Discover\",\n \"greengrass:GetAssociatedRole\",\n \"greengrass:GetBulkDeploymentStatus\",\n \"greengrass:GetConnectorDefinition\",\n \"greengrass:GetConnectorDefinitionVersion\",\n \"greengrass:GetCoreDefinition\",\n \"greengrass:GetCoreDefinitionVersion\",\n \"greengrass:GetDeploymentStatus\",\n \"greengrass:GetDeviceDefinition\",\n \"greengrass:GetDeviceDefinitionVersion\",\n \"greengrass:GetFunctionDefinition\",\n \"greengrass:GetFunctionDefinitionVersion\",\n \"greengrass:GetGroup\",\n \"greengrass:GetGroupCertificateAuthority\",\n \"greengrass:GetGroupCertificateConfiguration\",\n \"greengrass:GetGroupVersion\",\n \"greengrass:GetLoggerDefinition\",\n \"greengrass:GetLoggerDefinitionVersion\",\n \"greengrass:GetResourceDefinition\",\n \"greengrass:GetResourceDefinitionVersion\",\n \"greengrass:GetSubscriptionDefinition\",\n \"greengrass:GetSubscriptionDefinitionVersion\",\n \"greengrass:GetThingRuntimeConfiguration\",\n \"greengrass:ListBulkDeploymentDetailedReports\",\n \"greengrass:ListBulkDeployments\",\n \"greengrass:ListConnectorDefinitionVersions\",\n \"greengrass:ListConnectorDefinitions\",\n \"greengrass:ListCoreDefinitionVersions\",\n \"greengrass:ListCoreDefinitions\",\n \"greengrass:ListDeviceDefinitionVersions\",\n \"greengrass:ListDeviceDefinitions\",\n \"greengrass:ListFunctionDefinitionVersions\",\n \"greengrass:ListFunctionDefinitions\",\n \"greengrass:ListGroupCertificateAuthorities\",\n \"greengrass:ListGroupVersions\",\n \"greengrass:ListGroups\",\n \"greengrass:ListLoggerDefinitionVersions\",\n \"greengrass:ListLoggerDefinitions\",\n \"greengrass:ListResourceDefinitionVersions\",\n \"greengrass:ListResourceDefinitions\",\n \"greengrass:ListSubscriptionDefinitionVersions\",\n \"greengrass:ListSubscriptionDefinitions\",\n \"greengrass:ResetDeployments\",\n \"greengrass:StartBulkDeployment\",\n \"greengrass:StopBulkDeployment\",\n \"greengrass:UpdateConnectorDefinition\",\n \"greengrass:UpdateCoreDefinition\",\n \"greengrass:UpdateDeviceDefinition\",\n \"greengrass:UpdateFunctionDefinition\",\n \"greengrass:UpdateGroup\",\n \"greengrass:UpdateGroupCertificateConfiguration\",\n \"greengrass:UpdateLoggerDefinition\",\n \"greengrass:UpdateResourceDefinition\",\n \"greengrass:UpdateSubscriptionDefinition\",\n \"greengrass:UpdateThingRuntimeConfiguration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # if action not in results:\n # print(action)", "def check_script(vouts):\n for vout in [v for v in vouts[::-1] if v['hex'].startswith('6a')]:\n verb = BlockchainSpider.decode_op_return(vout['hex'])\n action = Spoolverb.from_verb(verb).action\n if action in Spoolverb.supported_actions:\n return verb\n raise Exception(\"Invalid ascribe transaction\")", "def test_get_snsname_arn_auth_exception_handling(self, aws_res_mock):\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import get_snsname_arn\n\n # create a mock SNS client that returns what we tell it to\n client = boto3.client('sns')\n stub = Stubber(client)\n stub.add_client_error('create_topic', service_error_code='AuthorizationError')\n stub.activate()\n\n\n # since firesim manager code doesn't take clients as method parameters\n # now we mock boto3.client to return our stubbed client\n with patch.object(boto3._get_default_session(), 'client', return_value=client) as mock_session:\n topic_arn = get_snsname_arn()\n\n stub.assert_no_pending_responses()\n topic_arn.should.be.none\n\n # TODO we could mock rootLogger.critical to capture it's calls and args and validate that we're seeing the correct \"nice\" message\n\n # make sure get_snsname_arn() actually called out to get a sns\n # client, otherwise we aren't testing what we think we are\n mock_session.assert_called_once_with('sns')\n\n aws_res_mock.assert_called_once()", "def test_other_iam_data_fixes_in_GH_393(self):\n # Cassandra: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonkeyspacesforapachecassandra.html\n results = get_actions_for_service(\"cassandra\")\n self.assertTrue(\"cassandra:Restore\" in results)\n # Comprehend Medical: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncomprehendmedical.html\n results = get_actions_for_service(\"comprehendmedical\")\n # print(results)\n actions = [\n \"comprehendmedical:DescribeEntitiesDetectionV2Job\",\n \"comprehendmedical:DescribeICD10CMInferenceJob\",\n \"comprehendmedical:DescribePHIDetectionJob\",\n \"comprehendmedical:DescribeRxNormInferenceJob\",\n # \"comprehendmedical:DescribeSNOMEDCTInferenceJob\", # Not in SAR\n \"comprehendmedical:DetectEntitiesV2\",\n \"comprehendmedical:InferICD10CM\",\n \"comprehendmedical:InferRxNorm\",\n # \"comprehendmedical:InferSNOMEDCT\", # Not in SAR\n \"comprehendmedical:ListEntitiesDetectionV2Jobs\",\n \"comprehendmedical:ListICD10CMInferenceJobs\",\n \"comprehendmedical:ListPHIDetectionJobs\",\n \"comprehendmedical:ListRxNormInferenceJobs\",\n # \"comprehendmedical:ListSNOMEDCTInferenceJobs\", # Not in SAR\n \"comprehendmedical:StartEntitiesDetectionV2Job\",\n \"comprehendmedical:StartICD10CMInferenceJob\",\n \"comprehendmedical:StartPHIDetectionJob\",\n \"comprehendmedical:StartRxNormInferenceJob\",\n \"comprehendmedical:StopEntitiesDetectionV2Job\",\n \"comprehendmedical:StopICD10CMInferenceJob\",\n ]\n for action in actions:\n # if action not in results:\n # print(action)\n self.assertTrue(action in results)\n # Compute Optimizer\n results = get_actions_for_service(\"compute-optimizer\")\n actions = [\n \"compute-optimizer:DeleteRecommendationPreferences\",\n \"compute-optimizer:ExportEBSVolumeRecommendations\",\n \"compute-optimizer:ExportLambdaFunctionRecommendations\",\n \"compute-optimizer:GetEffectiveRecommendationPreferences\",\n \"compute-optimizer:GetEnrollmentStatusesForOrganization\",\n \"compute-optimizer:GetLambdaFunctionRecommendations\",\n \"compute-optimizer:GetRecommendationPreferences\",\n \"compute-optimizer:PutRecommendationPreferences\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # DataSync\n results = get_actions_for_service(\"datasync\")\n actions = [\n \"datasync:UpdateLocationNfs\",\n \"datasync:UpdateLocationObjectStorage\",\n \"datasync:UpdateLocationSmb\",\n \"datasync:UpdateTaskExecution\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Account Management\n results = get_actions_for_service(\"account\")\n actions = [\n \"account:DeleteAlternateContact\",\n \"account:GetAlternateContact\",\n \"account:PutAlternateContact\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS IAM Access Analyzer\n results = get_actions_for_service(\"access-analyzer\")\n actions = [\n \"access-analyzer:CancelPolicyGeneration\",\n \"access-analyzer:CreateAccessPreview\",\n \"access-analyzer:GetAccessPreview\",\n \"access-analyzer:GetGeneratedPolicy\",\n \"access-analyzer:ListAccessPreviewFindings\",\n \"access-analyzer:ListAccessPreviews\",\n \"access-analyzer:ListPolicyGenerations\",\n \"access-analyzer:StartPolicyGeneration\",\n \"access-analyzer:ValidatePolicy\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Elemental Activations\n results = get_actions_for_service(\"elemental-activations\")\n actions = [\n \"elemental-activations:CompleteAccountRegistration\",\n \"elemental-activations:StartAccountRegistration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # OpenSearch\n results = get_actions_for_service(\"es\")\n actions = [\n \"es:DescribeDomainChangeProgress\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Location\n results = get_actions_for_service(\"geo\")\n actions = [\n \"geo:CalculateRouteMatrix\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Amazon Managed Grafana\n results = get_actions_for_service(\"grafana\")\n actions = [\n \"grafana:DescribeWorkspaceAuthentication\",\n \"grafana:UpdateWorkspaceAuthentication\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # EC2 Image Builder\n results = get_actions_for_service(\"imagebuilder\")\n actions = [\n \"imagebuilder:ImportVmImage\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Timestream\n results = get_actions_for_service(\"timestream\")\n actions = [\n \"timestream:CreateScheduledQuery\",\n \"timestream:DeleteScheduledQuery\",\n \"timestream:DescribeScheduledQuery\",\n \"timestream:ExecuteScheduledQuery\",\n \"timestream:ListScheduledQueries\",\n \"timestream:UpdateScheduledQuery\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS Transfer Family\n results = get_actions_for_service(\"transfer\")\n actions = [\n \"transfer:CreateAccess\",\n \"transfer:CreateWorkflow\",\n \"transfer:DeleteAccess\",\n \"transfer:DeleteWorkflow\",\n \"transfer:DescribeAccess\",\n \"transfer:DescribeExecution\",\n \"transfer:DescribeWorkflow\",\n \"transfer:ListAccesses\",\n \"transfer:ListExecutions\",\n \"transfer:ListWorkflows\",\n \"transfer:SendWorkflowStepState\",\n \"transfer:UpdateAccess\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def _setup_ses(self):\n print(\"\\n ** Setting up SES mocking\")\n ses = boto3.client('ses', region_name=\"us-east-1\")\n ses.verify_domain_identity(Domain='donatemates.com')\n #response = ses.verify_email_address(EmailAddress='[email protected]')", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def test_parse_request_type_2a(self):\n req_type, errors = self._exec_parse(test_source=MessageEventType.SESSION_INIT, session_secret=self.session_secret,\n check_for_auth=True)\n self.assertEqual(req_type, MessageEventType.SESSION_INIT)", "def test_get_actions_with_arn_type_and_access_level_case_2(self):\n desired_output = [\n 'ssm:DeleteParameter',\n 'ssm:DeleteParameters',\n 'ssm:LabelParameterVersion',\n 'ssm:PutParameter'\n]\n output = get_actions_with_arn_type_and_access_level(\n \"ssm\", \"parameter\", \"Write\"\n )\n for item in desired_output:\n self.assertTrue(item in output)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action_spaces(self):\n pass", "def test_accepted(self):\n actions = signoff_actions(appversions={\"code\": \"fx1.0\"},\n locales={\"code\": \"de\"})\n actions = list(actions)\n eq_(len(actions), 1)\n so = Signoff.objects.get(action=actions[0][0])\n eq_(so.push.tip.shortrev, \"l10n de 0002\")\n eq_(so.locale.code, \"de\")\n eq_(so.action_set.count(), 2)", "def test_services_with_multiple_pages_aws_marketplace(self):\n # Overlap: AWS Marketplace, Marketplace Catalog, and AWS Marketplace Entitlement service, AWS Marketplace Image Building Service, AWS Marketplace Metering Service, AWS Marketplace Private Marketplace, and AWS Marketplace Procurement Systems\n # AWS Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplace.html\n self.assertTrue(\"aws-marketplace:AcceptAgreementApprovalRequest\" in self.all_actions)\n # AWS Marketplace Catalog: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacecatalog.html\n self.assertTrue(\"aws-marketplace:CancelChangeSet\" in self.all_actions)\n # AWS Marketplace Entitlement Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceentitlementservice.html\n self.assertTrue(\"aws-marketplace:GetEntitlements\" in self.all_actions)\n # AWS Marketplace Image Building Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceimagebuildingservice.html\n self.assertTrue(\"aws-marketplace:DescribeBuilds\" in self.all_actions)\n # AWS Marketplace Metering Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacemeteringservice.html\n self.assertTrue(\"aws-marketplace:BatchMeterUsage\" in self.all_actions)\n # AWS Marketplace Private Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprivatemarketplace.html\n self.assertTrue(\"aws-marketplace:AssociateProductsWithPrivateMarketplace\" in self.all_actions)\n # AWS Marketplace Procurement Systems: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprocurementsystemsintegration.html\n self.assertTrue(\"aws-marketplace:DescribeProcurementSystemConfiguration\" in self.all_actions)\n\n results = get_actions_for_service(\"aws-marketplace\")\n actions = [\n \"aws-marketplace:AcceptAgreementApprovalRequest\",\n \"aws-marketplace:BatchMeterUsage\",\n \"aws-marketplace:CancelAgreementRequest\",\n \"aws-marketplace:CancelChangeSet\",\n \"aws-marketplace:CompleteTask\",\n \"aws-marketplace:DescribeAgreement\",\n \"aws-marketplace:DescribeBuilds\",\n \"aws-marketplace:DescribeChangeSet\",\n \"aws-marketplace:DescribeEntity\",\n \"aws-marketplace:DescribeProcurementSystemConfiguration\",\n \"aws-marketplace:DescribeTask\",\n \"aws-marketplace:GetAgreementApprovalRequest\",\n \"aws-marketplace:GetAgreementRequest\",\n \"aws-marketplace:GetAgreementTerms\",\n \"aws-marketplace:GetEntitlements\",\n \"aws-marketplace:ListAgreementApprovalRequests\",\n \"aws-marketplace:ListAgreementRequests\",\n \"aws-marketplace:ListBuilds\",\n \"aws-marketplace:ListChangeSets\",\n \"aws-marketplace:ListEntities\",\n \"aws-marketplace:ListTasks\",\n \"aws-marketplace:MeterUsage\",\n \"aws-marketplace:PutProcurementSystemConfiguration\",\n \"aws-marketplace:RegisterUsage\",\n \"aws-marketplace:RejectAgreementApprovalRequest\",\n \"aws-marketplace:ResolveCustomer\",\n \"aws-marketplace:SearchAgreements\",\n \"aws-marketplace:StartBuild\",\n \"aws-marketplace:StartChangeSet\",\n \"aws-marketplace:Subscribe\",\n \"aws-marketplace:Unsubscribe\",\n \"aws-marketplace:UpdateAgreementApprovalRequest\",\n \"aws-marketplace:UpdateTask\",\n \"aws-marketplace:ViewSubscriptions\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_create_pod_security_policy_self_subject_review_for_all_namespaces(self):\n pass", "def test_replace_namespaced_route_status(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action_spaces(self):\n pass", "def _ensure_unit(self, target_access_string: int) -> Squonk2AgentRv:\n if not self.__org_record:\n msg: str = 'The Squonk2Org record does not match' \\\n ' the configured SQUONK2_ORG_UUID.' \\\n ' You cannot change the SQUONK2_ORG_UUID once it has been used'\n return Squonk2AgentRv(success=False, msg=msg)\n\n # Now we check and create a Squonk2Unit...\n unit_name_truncated, unit_name_full = self._build_unit_name(target_access_string)\n sq2_unit: Optional[Squonk2Unit] = Squonk2Unit.objects.filter(name=unit_name_full).first()\n if not sq2_unit:\n _LOGGER.info('No existing Squonk2Unit for \"%s\"', target_access_string)\n # Get the list of Units from Squonk.\n sq2a_rv: Squonk2AgentRv = self._get_or_create_unit(unit_name_truncated, unit_name_full)\n if not sq2a_rv.success:\n _LOGGER.error('Failed to create Unit \"%s\" (%s)', target_access_string, sq2a_rv.msg)\n return Squonk2AgentRv(success=False, msg=sq2a_rv.msg)\n\n unit_uuid: str = sq2a_rv.msg\n sq2_unit = Squonk2Unit(uuid=unit_uuid,\n name=unit_name_full,\n organisation_id=self.__org_record.id)\n sq2_unit.save()\n _LOGGER.info('Created Squonk2Unit %s \"%s\" (for \"%s\")',\n unit_uuid,\n unit_name_full,\n target_access_string)\n else:\n _LOGGER.debug('Squonk2Unit %s \"%s\" already exists (for \"%s\") - nothing to do',\n sq2_unit.uuid,\n unit_name_full,\n target_access_string)\n\n return Squonk2AgentRv(success=True, msg=sq2_unit)", "def has_action2(self, feature):\n return feature in self._action2", "def test_must_be_associated(self):\n\n def handle(event):\n return 0x0000, event.action_information\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ProceduralEventLogging)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_ACTION, handle)]\n )\n\n ae.add_requested_context(ProceduralEventLogging)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n assoc.release()\n assert assoc.is_released\n assert not assoc.is_established\n with pytest.raises(RuntimeError):\n assoc.send_n_action(None, None, None, None)\n\n scp.shutdown()", "def test_cmd_cs_subscription_bad_action(self):\n bad_action = 'blahblah'\n\n result = self.runner.invoke(cli, ['subscription', bad_action])\n assert f\"invalid choice: {bad_action}\" in result.output\n assert result.exception", "def legal_actions(self):\n raise NotImplementedError", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_template_action_spaces(self):\n pass", "def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"", "def test_action_register_methods(self) -> None:\n with self.assertWarns(RemovedInReviewBoard70Warning,\n self.deprecation_message):\n foo_action = FooAction()\n foo_action.register()\n\n self.assertEqual(actions_registry.get('action_id', 'foo-action'),\n foo_action)\n\n foo_action.unregister()\n\n self.assertIsNone(actions_registry.get('action_id', 'foo-action'))" ]
[ "0.5713234", "0.550883", "0.5373594", "0.53643465", "0.5290291", "0.5130985", "0.5121447", "0.48156258", "0.479505", "0.479243", "0.47852328", "0.47827813", "0.47478974", "0.4735454", "0.4725077", "0.47070414", "0.46997523", "0.4678125", "0.46708792", "0.4640849", "0.46401328", "0.46335325", "0.461904", "0.46109438", "0.46084777", "0.46025914", "0.45958033", "0.4581516", "0.4574944", "0.4572893" ]
0.62560695
0
Ensure that kafka actions are not overwritten in the IAM definition
def test_kafka_action_names_overlap_issue(self): # Kafka actions used to be in two pages but are now one. This verifies the current state. # results = get_actions_for_service("kafka") # print(results) actions = [ "kafka:BatchAssociateScramSecret", "kafka:BatchDisassociateScramSecret", "kafka:CreateClusterV2", "kafka:DeleteConfiguration", "kafka:DescribeClusterV2", "kafka:ListClustersV2", "kafka:ListConfigurationRevisions", "kafka:ListKafkaVersions", "kafka:ListScramSecrets", "kafka:RebootBroker", "kafka:UpdateBrokerType", "kafka:UpdateConfiguration", "kafka:UpdateConnectivity", "kafka:UpdateSecurity" ] for action in actions: self.assertTrue(action in self.all_actions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def legal_actions(self):\n raise NotImplementedError", "def get_actions(self, request):\n actions = super().get_actions(request)\n if not settings.PUBLISHER_CODE:\n del actions['create_cwr']\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions", "def test_other_iam_data_fixes_in_GH_393(self):\n # Cassandra: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonkeyspacesforapachecassandra.html\n results = get_actions_for_service(\"cassandra\")\n self.assertTrue(\"cassandra:Restore\" in results)\n # Comprehend Medical: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncomprehendmedical.html\n results = get_actions_for_service(\"comprehendmedical\")\n # print(results)\n actions = [\n \"comprehendmedical:DescribeEntitiesDetectionV2Job\",\n \"comprehendmedical:DescribeICD10CMInferenceJob\",\n \"comprehendmedical:DescribePHIDetectionJob\",\n \"comprehendmedical:DescribeRxNormInferenceJob\",\n # \"comprehendmedical:DescribeSNOMEDCTInferenceJob\", # Not in SAR\n \"comprehendmedical:DetectEntitiesV2\",\n \"comprehendmedical:InferICD10CM\",\n \"comprehendmedical:InferRxNorm\",\n # \"comprehendmedical:InferSNOMEDCT\", # Not in SAR\n \"comprehendmedical:ListEntitiesDetectionV2Jobs\",\n \"comprehendmedical:ListICD10CMInferenceJobs\",\n \"comprehendmedical:ListPHIDetectionJobs\",\n \"comprehendmedical:ListRxNormInferenceJobs\",\n # \"comprehendmedical:ListSNOMEDCTInferenceJobs\", # Not in SAR\n \"comprehendmedical:StartEntitiesDetectionV2Job\",\n \"comprehendmedical:StartICD10CMInferenceJob\",\n \"comprehendmedical:StartPHIDetectionJob\",\n \"comprehendmedical:StartRxNormInferenceJob\",\n \"comprehendmedical:StopEntitiesDetectionV2Job\",\n \"comprehendmedical:StopICD10CMInferenceJob\",\n ]\n for action in actions:\n # if action not in results:\n # print(action)\n self.assertTrue(action in results)\n # Compute Optimizer\n results = get_actions_for_service(\"compute-optimizer\")\n actions = [\n \"compute-optimizer:DeleteRecommendationPreferences\",\n \"compute-optimizer:ExportEBSVolumeRecommendations\",\n \"compute-optimizer:ExportLambdaFunctionRecommendations\",\n \"compute-optimizer:GetEffectiveRecommendationPreferences\",\n \"compute-optimizer:GetEnrollmentStatusesForOrganization\",\n \"compute-optimizer:GetLambdaFunctionRecommendations\",\n \"compute-optimizer:GetRecommendationPreferences\",\n \"compute-optimizer:PutRecommendationPreferences\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # DataSync\n results = get_actions_for_service(\"datasync\")\n actions = [\n \"datasync:UpdateLocationNfs\",\n \"datasync:UpdateLocationObjectStorage\",\n \"datasync:UpdateLocationSmb\",\n \"datasync:UpdateTaskExecution\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Account Management\n results = get_actions_for_service(\"account\")\n actions = [\n \"account:DeleteAlternateContact\",\n \"account:GetAlternateContact\",\n \"account:PutAlternateContact\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS IAM Access Analyzer\n results = get_actions_for_service(\"access-analyzer\")\n actions = [\n \"access-analyzer:CancelPolicyGeneration\",\n \"access-analyzer:CreateAccessPreview\",\n \"access-analyzer:GetAccessPreview\",\n \"access-analyzer:GetGeneratedPolicy\",\n \"access-analyzer:ListAccessPreviewFindings\",\n \"access-analyzer:ListAccessPreviews\",\n \"access-analyzer:ListPolicyGenerations\",\n \"access-analyzer:StartPolicyGeneration\",\n \"access-analyzer:ValidatePolicy\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Elemental Activations\n results = get_actions_for_service(\"elemental-activations\")\n actions = [\n \"elemental-activations:CompleteAccountRegistration\",\n \"elemental-activations:StartAccountRegistration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # OpenSearch\n results = get_actions_for_service(\"es\")\n actions = [\n \"es:DescribeDomainChangeProgress\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Location\n results = get_actions_for_service(\"geo\")\n actions = [\n \"geo:CalculateRouteMatrix\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Amazon Managed Grafana\n results = get_actions_for_service(\"grafana\")\n actions = [\n \"grafana:DescribeWorkspaceAuthentication\",\n \"grafana:UpdateWorkspaceAuthentication\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # EC2 Image Builder\n results = get_actions_for_service(\"imagebuilder\")\n actions = [\n \"imagebuilder:ImportVmImage\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Timestream\n results = get_actions_for_service(\"timestream\")\n actions = [\n \"timestream:CreateScheduledQuery\",\n \"timestream:DeleteScheduledQuery\",\n \"timestream:DescribeScheduledQuery\",\n \"timestream:ExecuteScheduledQuery\",\n \"timestream:ListScheduledQueries\",\n \"timestream:UpdateScheduledQuery\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS Transfer Family\n results = get_actions_for_service(\"transfer\")\n actions = [\n \"transfer:CreateAccess\",\n \"transfer:CreateWorkflow\",\n \"transfer:DeleteAccess\",\n \"transfer:DeleteWorkflow\",\n \"transfer:DescribeAccess\",\n \"transfer:DescribeExecution\",\n \"transfer:DescribeWorkflow\",\n \"transfer:ListAccesses\",\n \"transfer:ListExecutions\",\n \"transfer:ListWorkflows\",\n \"transfer:SendWorkflowStepState\",\n \"transfer:UpdateAccess\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_get_actions_with_arn_type_and_access_level_case_2(self):\n desired_output = [\n 'ssm:DeleteParameter',\n 'ssm:DeleteParameters',\n 'ssm:LabelParameterVersion',\n 'ssm:PutParameter'\n]\n output = get_actions_with_arn_type_and_access_level(\n \"ssm\", \"parameter\", \"Write\"\n )\n for item in desired_output:\n self.assertTrue(item in output)", "def authorize(self, action, author_id=None):\n if action not in CHANGE_TYPES:\n return False\n return True", "def test_get_actions_with_arn_type_and_access_level_case_5(self):\n\n output = get_actions_with_arn_type_and_access_level(\n \"s3\", \"object\", \"List\"\n )\n self.assertTrue(\"s3:ListMultipartUploadParts\" in output)", "def test_get_actions_with_arn_type_and_access_level_case_3(self):\n desired_output = [\n 's3:PutAccountPublicAccessBlock',\n 's3:PutAccessPointPublicAccessBlock'\n ]\n output = get_actions_with_arn_type_and_access_level(\n # \"ram\", \"resource-share\", \"Write\"\n \"s3\", \"*\", \"Permissions management\"\n )\n print(output)\n for item in desired_output:\n self.assertTrue(item in output)\n # self.assertListEqual(desired_output, output)", "async def before_action(self, action, *args, **kwargs):\n return True", "async def before_action(self, action: str, *args, **kwargs) -> bool:\n return True", "def _enforce(self, req, action, target=None):\n if target is None:\n target = {}\n try:\n self.policy.enforce(req.context, action, target)\n except exception.Forbidden as e:\n LOG.debug(\"User not permitted to perform '%s' action\", action)\n raise webob.exc.HTTPForbidden(explanation=e.msg, request=req)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action_spaces(self):\n pass", "def _enforce(self, req, action):\n try:\n self.policy.enforce(req.context, action, {})\n except exception.Forbidden:\n raise HTTPForbidden()", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action_spaces(self):\n pass", "def clean_iam_access_keys(self, batch=False):\n main_account = Account(region=config.aws.region)\n ddb_table = main_account.resource(\"dynamodb\").Table(self.config.iamUserKeysRotation.ddb_table_name)\n\n retention_period = self.config.iamUserKeysRotation.remediation_retention_period\n\n jira = JiraReporting(self.config)\n slack = SlackNotification(self.config)\n\n for account_id, account_name in self.config.iamUserKeysRotation.remediation_accounts.items():\n logging.debug(\"* Account Name:\" + account_name + \" :::Account ID:::\" + account_id)\n issues = IssueOperations.get_account_open_issues(ddb_table, account_id, IAMKeyRotationIssue)\n for issue in issues:\n key_id = issue.issue_id\n username = issue.issue_details.username\n\n user_in_whitelist = self.config.iamUserKeysRotation.in_whitelist(account_id, username)\n key_in_whitelist = self.config.iamUserKeysRotation.in_whitelist(account_id, key_id)\n\n if user_in_whitelist or key_in_whitelist:\n logging.debug(f\"Skipping '{key_id} / {username}' (in whitelist)\")\n\n # Adding label with \"whitelisted\" to jira ticket.\n jira.add_label(\n ticket_id=issue.jira_details.ticket,\n label=IssueStatus.Whitelisted.value\n )\n continue\n\n if issue.timestamps.reported is None:\n logging.debug(f\"Skipping '{key_id} / {username}' (was not reported)\")\n continue\n\n if issue.timestamps.remediated is not None:\n logging.debug(f\"Skipping '{key_id} / {username}' (has been already remediated)\")\n continue\n\n updated_date = issue.timestamp_as_datetime\n no_of_days_issue_created = (self.config.now - updated_date).days\n\n if no_of_days_issue_created >= retention_period:\n try:\n if not batch and \\\n not confirm(f\"Do you want to remediate stale access key '{key_id} / {username}'\", False):\n continue\n\n account = Account(id=account_id,\n name=account_name,\n role_name=self.config.aws.role_name_reporting)\n if account.session is None:\n continue\n\n logging.debug(f\"Remediating stale access key '{key_id} / {username}'\")\n remediation_succeed = True\n try:\n IAMOperations.disable_access_key(account.client(\"iam\"), username, key_id)\n comment = (f\"Stale access key '{key_id} / {username}' issue \"\n f\"in '{account_name} / {account_id}' account \"\n f\"was remediated by hammer\")\n except Exception:\n remediation_succeed = False\n logging.exception(\"Failed to disable '{key_id} / {username}' stale access key\")\n comment = (f\"Failed to remediate stale access key '{key_id} / {username}' issue \"\n f\"in '{account_name} / {account_id}' account \"\n f\"due to some limitations. Please, check manually\")\n\n jira.remediate_issue(\n ticket_id=issue.jira_details.ticket,\n comment=comment,\n reassign=remediation_succeed,\n )\n slack.report_issue(\n msg=f\"{comment}\"\n f\"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}\",\n account_id=account_id,\n )\n IssueOperations.set_status_remediated(ddb_table, issue)\n except Exception:\n logging.exception(f\"Error occurred while disabling '{key_id} / {username}' \"\n f\"in '{account_name} / {account_id}'\")\n else:\n logging.debug(f\"Skipping '{key_id} / {username}' \"\n f\"({retention_period - no_of_days_issue_created} days before remediation)\")", "def test_get_actions_with_arn_type_and_access_level_case_4(self):\n desired_output = [\n 'secretsmanager:ListSecrets'\n ]\n output = get_actions_with_arn_type_and_access_level(\n \"secretsmanager\", \"*\", \"List\"\n )\n self.assertListEqual(desired_output, output)", "def actions() -> None:\n pass", "def actions():\n pass", "def _generate_actions(self) -> list:\n pass", "def _get_legal_actions(self):\n raise NotImplementedError", "def get_legal_actions(self):\n pass", "def robot_is_willing_default(requester, action, ctxt) :\n if action.get_actor() == \"compliant robot\" :\n raise ActionHandled()", "def pre_access_control_list_create(self, resource_dict):\n pass", "def prepare_actions(self, training_job_name):\n if self.actions is None:\n # user cannot manually specify action_json in rule_parameters for actions.\n self.rule_parameters.pop(\"action_json\", None)\n return\n\n self.actions.update_training_job_prefix_if_not_specified(training_job_name)\n action_params = {\"action_json\": self.actions.serialize()}\n self.rule_parameters.update(action_params)", "def test_allowlist_not_overwritten(self):\n handler = MyHandler()\n handler.name = \"RebuildImagesOnImageAdvisoryChange\"\n allowed = handler.allow_build(\n ArtifactType.IMAGE, advisory_state=\"SHIPPED_LIVE\")\n self.assertTrue(allowed)\n\n handler.name = \"foo\"\n allowed = handler.allow_build(\n ArtifactType.IMAGE, advisory_state=\"SHIPPED_LIVE\")\n self.assertFalse(allowed)", "def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"", "def actions(self):\n raise NotImplementedError", "def add_insufficient_data_action(self, action_arn=None):\r\n if not action_arn:\r\n return\r\n self.actions_enabled = 'true'\r\n self.insufficient_data_actions.append(action_arn)", "def update_execution_policies(\n target_role: iam.Role, project_name: str, project_id: str\n):\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\n # \"sts:AssumeRole\"\n \"iam:PassRole\"\n ],\n resources=[\n f\"arn:aws:iam::{cdk.Aws.ACCOUNT_ID}:role/cdk*\",\n ],\n )\n )\n\n policy = target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\n \"sts:AssumeRole\",\n \"iam:PassRole\",\n ],\n resources=[\n target_role.role_arn,\n ],\n )\n )\n\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\n \"cloudformation:DescribeStackEvents\",\n \"cloudformation:GetTemplate\",\n \"cloudformation:CreateChangeSet\",\n \"cloudformation:DescribeChangeSet\",\n \"cloudformation:ExecuteChangeSet\",\n \"cloudformation:DeleteChangeSet\",\n \"cloudformation:DescribeStacks\",\n \"cloudformation:DeleteStack\",\n ],\n resources=[\n f\"arn:aws:cloudformation:{cdk.Aws.REGION}:{cdk.Aws.ACCOUNT_ID}:stack/{project_name}*/*\",\n ],\n )\n )\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\n \"cloudformation:DescribeStackEvents\",\n \"cloudformation:GetTemplate\",\n \"cloudformation:DescribeStacks\",\n ],\n resources=[\n f\"arn:aws:cloudformation:{cdk.Aws.REGION}:{cdk.Aws.ACCOUNT_ID}:stack/CDKToolkit/*\",\n ],\n )\n )\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\n \"ssm:GetParameter\",\n ],\n resources=[\n f\"arn:aws:ssm:{cdk.Aws.REGION}:{cdk.Aws.ACCOUNT_ID}:parameter/cdk-bootstrap/*\",\n f\"arn:aws:ssm:{cdk.Aws.REGION}:{cdk.Aws.ACCOUNT_ID}:parameter/sagemaker-{project_name}*\",\n ],\n )\n )\n\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\"*\"],\n resources=[\"*\"],\n conditions={\n \"ForAnyValue:StringEquals\": {\n \"aws:CalledVia\": [\"cloudformation.amazonaws.com\"]\n }\n },\n )\n )\n\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\n \"logs:CreateLogGroup\",\n \"logs:CreateLogStream\",\n \"logs:PutLogEvents\",\n ],\n resources=[\n f\"arn:aws:logs:{cdk.Aws.REGION}:{cdk.Aws.ACCOUNT_ID}:log-group:/aws/codebuild/sagemaker-{project_id}*\",\n f\"arn:aws:logs:{cdk.Aws.REGION}:{cdk.Aws.ACCOUNT_ID}:/aws/codebuild/sagemaker-{project_id}*:*\",\n ],\n )\n )\n\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\n \"codebuild:CreateReportGroup\",\n \"codebuild:CreateReport\",\n \"codebuild:UpdateReport\",\n \"codebuild:BatchPutTestCases\",\n \"codebuild:BatchPutCodeCoverages\",\n ],\n resources=[\n f\"arn:aws:codebuild:{cdk.Aws.REGION}:{cdk.Aws.ACCOUNT_ID}:report-group/sagemaker-{project_id}*\",\n ],\n )\n )\n\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\n \"codepipeline:PutApprovalResult\",\n ],\n resources=[\n f\"arn:aws:codepipeline:{cdk.Aws.REGION}:{cdk.Aws.ACCOUNT_ID}:sagemaker-{project_id}*\",\n ],\n )\n )\n\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\n \"codebuild:BatchGetBuilds\",\n \"codebuild:StartBuild\",\n \"codebuild:StopBuild\",\n ],\n resources=[\n f\"arn:aws:codebuild:{cdk.Aws.REGION}:{cdk.Aws.ACCOUNT_ID}:project/sagemaker-{project_id}*\",\n ],\n )\n )\n\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\n \"glue:SearchTables\",\n \"glue:BatchCreatePartition\",\n \"athena:StartQueryExecution\",\n \"glue:CreateTable\",\n \"glue:GetTables\",\n \"glue:GetTableVersions\",\n \"glue:GetPartitions\",\n \"glue:BatchDeletePartition\",\n \"glue:UpdateTable\",\n \"glue:DeleteTableVersion\",\n \"glue:BatchGetPartition\",\n \"glue:DeleteTable\",\n \"cloudformation:DescribeStacks\",\n \"glue:GetTable\",\n \"glue:GetDatabase\",\n \"glue:GetPartition\",\n \"glue:GetTableVersion\",\n \"glue:CreateDatabase\",\n \"glue:BatchDeleteTableVersion\",\n \"athena:GetQueryExecution\",\n \"glue:BatchDeleteTable\",\n \"glue:CreatePartition\",\n \"glue:DeletePartition\",\n \"glue:UpdatePartition\",\n ],\n resources=[\n \"arn:aws:glue:*:*:catalog\",\n \"arn:aws:glue:*:*:database/default\",\n \"arn:aws:glue:*:*:database/global_temp\",\n \"arn:aws:glue:*:*:database/sagemaker*\",\n \"arn:aws:glue:*:*:table/sagemaker*\",\n \"arn:aws:glue:*:*:tableVersion/sagemaker*\",\n f\"arn:aws:athena:*:{cdk.Aws.ACCOUNT_ID}:workgroup/*\",\n ],\n )\n )\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\"glue:StartJobRun\"],\n resources=[\n f\"arn:aws:glue:{cdk.Aws.REGION}:{cdk.Aws.ACCOUNT_ID}:job/sagemaker-*\"\n ],\n )\n )\n\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\"glue:GetJobRun\", \"glue:GetJobRuns\", \"glue:GetJobs\"],\n resources=[f\"*\"],\n )\n )\n\n target_role.add_to_principal_policy(\n iam.PolicyStatement(\n actions=[\n \"dynamodb:BatchGetItem\",\n \"dynamodb:GetRecords\",\n \"dynamodb:GetShardIterator\",\n \"dynamodb:Query\",\n \"dynamodb:GetItem\",\n \"dynamodb:Scan\",\n \"dynamodb:ConditionCheckItem\",\n \"dynamodb:DescribeTable\",\n ],\n resources=[\n f\"arn:aws:dynamodb:{cdk.Aws.REGION}:{cdk.Aws.ACCOUNT_ID}:table/sagemaker-{project_id}*\"\n ],\n )\n )\n\n return policy", "async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')", "def valid_actions(self) -> List[str]:\n return list(self.action_map().keys())" ]
[ "0.57733667", "0.5674129", "0.5623118", "0.55654687", "0.54859173", "0.5416407", "0.53971905", "0.5386766", "0.5313959", "0.5262459", "0.52369434", "0.5227309", "0.5218352", "0.5207143", "0.51959413", "0.51855755", "0.5174657", "0.51633334", "0.51326454", "0.51265115", "0.50608176", "0.5060752", "0.50577503", "0.50553393", "0.5053246", "0.50392556", "0.5028449", "0.50030196", "0.49959403", "0.4994682" ]
0.67827463
0
1. Maintain a decreasing stack by scanning nums from left to right. 2. Then scan the nums from right to left and calculate the maxWidth between each ramp.
def maxWidthRamp(self, nums: list[int]) -> int: maxWidth = 0 descStack = [] # Generate decreasing stack. for i, num in enumerate(nums): if not descStack or nums[descStack[-1]] > num: descStack.append(i) # Check elements from right to left. for j in reversed(range(len(nums))): while descStack and nums[descStack[-1]] <= nums[j]: maxWidth = max(maxWidth, j - descStack.pop()) return maxWidth
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peg_width_per_levels(base_width):\n limiter = 2\n decrementer = -2\n decrementing_width = int(base_width)\n peg_count_per_level = []\n while decrementing_width >= limiter:\n peg_count_per_level.append(int(decrementing_width))\n decrementing_width += decrementer\n return peg_count_per_level", "def fn(x):\n ans = rsm = ii = 0 \n for i in range(len(nums)): \n rsm += nums[i]\n while rsm > x: # sliding window \n rsm -= nums[ii]\n ii += 1\n ans += i - ii + 1\n return ans", "def maximumGap(self, nums: List[int]) -> int:\r\n n = len(nums)\r\n if n < 2: return 0 \r\n l, r = min(nums), max(nums)\r\n if r - l == 0: return 0 \r\n gap_instance = max(1, (r - l) // n)\r\n gapcnts = math.ceil((r - l + 1) / gap_instance)\r\n buckets = [[-1, -1] for _ in range(gapcnts)] \r\n calpos = lambda num: (num - l) // gap_instance\r\n\r\n for num in nums:\r\n pos = calpos(num)\r\n if num < buckets[pos][0] or buckets[pos][0] == -1:\r\n buckets[pos][0] = num \r\n if num > buckets[pos][1] or buckets[pos][1] == -1:\r\n buckets[pos][1] = num \r\n\r\n ans, pre = 0, l\r\n for small, large in buckets:\r\n if small == -1:\r\n continue \r\n else:\r\n ans = max(small - pre, ans)\r\n pre = large\r\n return ans", "def findPeakElement2(self, nums: List[int]) -> int:\n nums.insert(0, -float('inf'))\n nums.append(-float('inf'))\n l, r = 0, len(nums)\n\n while l < r:\n mid = l + (r - l) // 2\n if nums[mid] > nums[mid-1] and nums[mid] > nums[mid+1]:\n return mid - 1\n \n elif nums[mid] <= nums[mid-1] and nums[mid] <= nums[mid+1]:\n r = mid \n elif nums[mid-1] <= nums[mid] <= nums[mid+1]:\n l = mid\n elif nums[mid-1] >= nums[mid] >= nums[mid+1]:\n r = mid\n return l", "def get_next_width(current_width,width_array):\n active_width = float(current_width)/MaxWidth\n\n active_width_constant = width_array.index(get_width_constant(active_width,width_array))\n\n width_multiplier = width_array[(active_width_constant+1)%len(width_array)]\n\n return int((MaxWidth-(WinBorder*2))*width_multiplier)", "def findMaximumSubarraySlidingWindow(self, k, nums):\n window_start, window_sum, window_max= 0, 0, 0\n for i in range(len(nums)):\n window_sum += nums[i] #add the next element\n # slide the window, we don't need to slide if we have not hit the required window size of K\n if i >= k-1:\n window_max = max(window_sum, window_max) # calculate the maximum sum\n window_sum -= nums[window_start] #substract the element going out\n window_start += 1 #slide the window ahead\n return window_max", "def findMaxLength(self, nums):\n dict1 = dict()\n count = 0\n maxlen = 0\n for i in range(len(nums)):\n if nums[i] == 1:\n count = count + 1\n else:\n count = count - 1\n\n if count == 0:\n maxlen = max(maxlen, i + 1)\n if count not in dict1:\n dict1[count] = i\n else:\n maxlen = max(maxlen, i - (dict1.get(count)))\n return maxlen", "def min_width(blocks):\r\n assert(len(blocks) > 0)\r\n return sum(blocks) + len(blocks) - 1", "def find_rising_flank(arr, method='Size'):\n arr = arr.copy()\n #arr[arr<arr.max()*0.01] = 0\n prev_val = -np.inf\n start_index = None\n len_ctr = 0\n pairs = []\n for index, val in enumerate(arr):\n if val > prev_val:\n if start_index is None:\n start_index = index - 1\n start_val = val\n len_ctr += 1\n else:\n if start_index is not None:\n if method == 'Length':\n pairs.append((len_ctr, start_index, index))\n elif method == 'Size':\n pairs.append((prev_val-start_val, start_index, index))\n start_index = None\n start_val = None\n len_ctr = 0\n prev_val = val\n #import pdb\n #pdb.set_trace()\n end_longest_streak = sorted(pairs)[-1][-1]\n return end_longest_streak", "def calculate_min_max_tiles(self):", "def splitArray(self, nums: List[int], m: int) -> int:\n l = max(nums)\n r = sum(nums)\n ans = r\n\n while l <= r:\n mid = (l + r) // 2\n range_sum = 0\n range_sum_count = 1\n for i in range(len(nums)):\n if (range_sum + nums[i] > mid):\n range_sum = nums[i]\n range_sum_count += 1\n else:\n range_sum += nums[i]\n if range_sum_count <= m:\n ans = min(ans, mid)\n r = mid - 1\n else:\n l = mid + 1\n return ans", "def build_max_heap(A):\r\n i = int((len(A)-2)//2)\r\n while i >= 0:\r\n max_heapify(A, i)\r\n i -= 1\r\n return A", "def robSingle_2(self, nums, start, end):\n # print((start, end))\n # print(nums[start: end + 1])\n curMax = 0\n preMax = 0\n for num in nums[start:end + 1]:\n preMax, curMax = curMax, max(curMax, preMax + num)\n # print(curMax)\n # print(\"####################################\")\n return curMax", "def calc_slice_size(scale_w):\n total_res = SLICE_THRE\n in_w = int((total_res - 1 - scale_w) // (1 + scale_w))\n out_w = int((in_w + 1) * scale_w) + 2\n return in_w, out_w", "def fix(xs):\n\n if xs >= 0:\n res = np.floor(xs)\n else:\n res = np.ceil(xs)\n return res", "def fix(xs):\n\n # res = [np.floor(e) if e >= 0 else np.ceil(e) for e in xs]\n if xs >= 0:\n res = np.floor(xs)\n else:\n res = np.ceil(xs)\n return res", "def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n max_heap = []\n item_dict = {}\n result = []\n\n for i in range(len(nums)):\n item = [-nums[i], False]\n heapq.heappush(max_heap, item)\n\n if nums[i] not in item_dict:\n item_dict[nums[i]] = [item]\n else:\n item_dict[nums[i]].append(item)\n\n if i - k >= 0:\n # \"remove\" element from heap\n item_to_remove = nums[i - k]\n\n in_heap_item = item_dict[item_to_remove].pop()\n in_heap_item[1] = True # mark as removed\n\n while max_heap[0][1] is True:\n heapq.heappop(max_heap)\n\n result.append(-max_heap[0][0])\n if i == k - 1:\n # handle the first window\n result.append(-max_heap[0][0])\n\n return result", "def four():\r\n \r\n i = 999\r\n j = i\r\n largest = 0\r\n \r\n while i > 0:\r\n while j > 0:\r\n number = str(i * j)\r\n forward = str(number)\r\n reverse = \"\"\r\n for char in number:\r\n reverse = char + reverse\r\n if forward == reverse:\r\n if largest < i * j:\r\n largest = i * j\r\n break\r\n else:\r\n j = j - 1\r\n i = i - 1\r\n j = i\r\n return largest", "def maxSumOfThreeSubarrays(self, nums: List[int], k: int) -> List[int]:\n\n n = len(nums)\n if n < 3 * k or k == 0:\n return 0\n\n prefix_sum = [0]\n for num in nums:\n prefix_sum.append(prefix_sum[-1] + num)\n\n left = [0] * n\n left_i = [0] * n\n right = [0] * (n + 1) # add one to right (for case of k == 1)\n right_i = [0] * (n + 1)\n\n for i in range(k - 1, n):\n window = prefix_sum[i + 1] - prefix_sum[i + 1 - k]\n if window > left[i - 1]: # > cause we prefex left start\n left[i] = window\n left_i[i] = i - (k - 1)\n else:\n left[i] = left[i - 1]\n left_i[i] = left_i[i - 1]\n\n for i in reversed(range(n - k + 1)):\n window = prefix_sum[i + k] - prefix_sum[i]\n if window >= right[i + 1]: # >= cause we prefex left start\n right[i] = window\n right_i[i] = i\n else:\n right[i] = right[i + 1]\n right_i[i] = right_i[i + 1]\n\n max_sum = 0\n a, b, c = 0, 0, 0\n for i in range(k, n - 2 * k + 1):\n curr_sum = prefix_sum[i + k] - prefix_sum[i] + left[i - 1] + right[i + k]\n if curr_sum > max_sum:\n max_sum = curr_sum\n a, b, c = left_i[i - 1], i, right_i[i + k]\n\n return [a, b, c]", "def GetScaleBlocks(width):\n\n rord=numpy.log10(abs(width)/2.0)\n nrord=rord % 1\n\n if nrord < numpy.log10(2):\n spc=0.2*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n elif nrord < numpy.log10(5):\n spc=0.5*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4]\n else:\n spc=pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=spc*5\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n\n if len(newspc) == 5:\n #labels=['0',None,\"%g\" % smallspc*2,None,\"%g\" % (smallspc*4)]\n labels=['0',None,None,None,\"%g\" % (smallspc*4)]\n else:\n labels=['0',None,None,None,None,\"%g\" % (smallspc*5)]\n\n temp_max=newspc[len(newspc)-1]\n start=temp_max\n for temp in numpy.arange(start,width-bigspc/2,bigspc):\n temp_max=temp_max+bigspc\n newspc.append(temp_max)\n labels.append(\"%g\" % temp_max)\n\n #start=temp_max\n #for temp in Numeric.arange(start,width-smallspc/2,smallspc):\n # labels.append(None)\n # temp_max=temp_max+smallspc \n # newspc.append(temp_max) \n\n return (numpy.array(newspc,numpy.float32),labels)", "def headandtail(zigma, numls):\n numls.sort()\n nummin, nummax = 0, 0\n for i in xrange(1, zigma+1):\n nummin += numls[i-1]\n nummax += numls[-i]\n print nummin + nummax", "def find_max_with_count(A):\n\n def frmax(lo, hi):\n \"\"\"Use recursion to find maximum value in A[lo:hi+1] incl. count\"\"\"\n if lo == hi: return (0, A[lo])\n\n mid = (lo+hi)//2\n ctleft,left = frmax(lo, mid)\n ctright,right = frmax(mid+1, hi)\n return (1+ctleft+ctright, max(left, right))\n\n return frmax(0, len(A)-1)", "def build_max_heap(a):\r\n for i in range(math.floor((len(a) - 1)/2), -1, -1):\r\n max_heapify(a, i)", "def get_long_len(nums):\n return len(str(max(nums + [sum(nums)])))", "def flattenFrames(stack):\n \n maxHeight=0\n frameList=[]\n \n \n print('\\n')\n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting shifts {:.2f}% done'.format(100.0*((i+1)/len(stack))),end='', flush=True)\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack", "def count_max_acc(alon, curr_max, count, pos):\n if pos == len(alon):\n return count\n curr_num = alon[pos]\n if curr_num > curr_max:\n curr_max = curr_num\n count = 0\n if curr_num == curr_max:\n count += 1\n return count_max_acc(alon, curr_max, count, pos+1)", "def calc_dim(s):\n s = s.detach().numpy()\n dim = 0\n # calculate how much 90% would be\n s_square = [i ** 2 for i in s]\n sum_square = sum(s_square)\n goal = .9 * sum_square\n # find 90%\n count = 0\n while count < goal:\n count += s_square[dim]\n dim += 1\n return dim # return this many dimensions", "def sub_division(width: float, minimum_division: float, stretch_factor: float) -> list:\n\n sum_x = 0\n next_ = minimum_division\n new_grid = []\n max_dx = 20/100\n x = width/2\n\n while sum_x < x:\n remaining = x - sum_x\n\n if next_ > max_dx:\n n = np.ceil(remaining/max_dx)\n\n if n == 0:\n new_grid.append(remaining)\n\n next_ = remaining/n\n\n for _ in range(0, int(n)):\n new_grid.append(next_)\n sum_x += next_\n\n remaining = x - sum_x\n\n if next_ < remaining:\n new_grid.append(next_)\n sum_x += next_\n else:\n remaining += new_grid[-1]\n new_grid[-1] = remaining/2\n new_grid.append(remaining/2)\n sum_x = x\n\n next_ = next_ * stretch_factor\n\n x1 = new_grid[::-1]\n x2 = new_grid+x1\n\n return x2", "def robSingle(self, nums, start, end):\n # print((start, end))\n # print(nums[start: end])\n curMax = 0\n preMax = 0\n for num in nums[start:end]:\n preMax, curMax = curMax, max(curMax, preMax + num)\n # print(curMax)\n # print(\"####################################\")\n return curMax", "def bu(lengths: List[int], L: int) -> int:\n N = len(lengths)\n dp = [0] + [-1]*L\n for l in lengths:\n for j in range(l, L+1):\n dp[j] = max(dp[j], dp[j-l]+1 if dp[j-l] != -1 else -1)\n return dp[-1]" ]
[ "0.6181188", "0.61154115", "0.61118263", "0.58728755", "0.5702233", "0.5701698", "0.5680999", "0.55414397", "0.553379", "0.5523938", "0.55195713", "0.5513929", "0.5513566", "0.550096", "0.54771763", "0.5460938", "0.5459851", "0.5444232", "0.5441669", "0.54264915", "0.54041374", "0.5401712", "0.5398094", "0.536509", "0.5361847", "0.5356479", "0.5354691", "0.5353158", "0.5332718", "0.5330513" ]
0.8285866
0
Given an input (instance of the BenchInput tuple), constructs and validates a disjunctive ChaumPedersen proof, returning the time (in seconds) to do each operation.
def chaum_pedersen_bench(bi: BenchInput) -> Tuple[float, float]: (keypair, r, s) = bi ciphertext = get_optional(elgamal_encrypt(0, r, keypair.public_key)) start1 = timer() proof = make_disjunctive_chaum_pedersen_zero( ciphertext, r, keypair.public_key, ONE_MOD_Q, s ) end1 = timer() valid = proof.is_valid(ciphertext, keypair.public_key, ONE_MOD_Q) end2 = timer() if not valid: raise Exception("Wasn't expecting an invalid proof during a benchmark!") return end1 - start1, end2 - end1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def part2(input):\n ps = PlanetSystem(input)\n c = ps.total_cycle_time()\n return c", "def part_2():\n input_ = parse_input() + list(range(10, 1_000_001))\n cups = turn_input_into_cups(input_)\n cups = solve(cups, first_cup=cups[input_[0]], turns=10_000_000)\n\n return cups[1].next.number * cups[1].next.next.number", "def dpTime():\n start_time = time.time()\n subjects = loadSubjects(SUBJECT_FILENAME)\n maxWork = 50\n answer = dpAdvisor(subjects, maxWork)\n end_time = time.time()\n printSubjects(answer)\n print 'Time taken: ', end_time - start_time\n return None", "def process():\r\n\t### INPUT ####\r\n\tpq = eat(str)\r\n\tp, q = map(int, pq.split('/'))\r\n\t\r\n\t### COMPUT ####\r\n\td = gcd(p, q)\r\n\tp, q = p//d, q//d\r\n\teprint(bin(q), bin(p))\r\n\tif len(bin(q).strip('0')) != 2:\r\n\t\treturn IMPOSSIBLE\r\n\t#length of p\r\n\tbinp = bin(p)\r\n\tbinq = bin(q)\r\n\treturn len(binq) - len(binp)\r\n\t\r\n\t### OUTPUT ####\r\n\treturn solve()", "def batchWallTime(cls, time, parsedCmd, numCpus):\n numTargets = 0\n for refList in parsedCmd.id.refList:\n numTargets += len(refList)\n return time*numTargets/float(numCpus)", "def part_2(puzzle_input: Tuple[Number] = p1) -> Number:\n for (noun, verb) in permutations(range(len(p1)), 2):\n # Create a fresh copy for each run\n program = list(p1)\n restore_program(memory_updates={1: noun, 2: verb}, memory=program)\n c = Computer(program)\n c.run_program()\n if c.read(0) == 19_690_720:\n return 100 * noun + verb\n raise ExecutionError(\"Could not satisfy requirement\")", "def _calcExecTime(self, migTask, dPrime):\n #print \"ae\", self\n # Let's start making U = 0.9999 (which probably causes deadline misses).\n # If we force U = 1, we won't be able to use La.\n if self.util() >= 0.9999:\n self._lastCost = 0.0\n return 0.0\n cPrime = (0.9999 - self.util())*migTask.period()\n\n # Temporarily add the slice\n tempSlice = WmSlice(-1, cPrime, dPrime, migTask)\n self._addSlice(tempSlice)\n\n L = self._L()\n min_d = self._minDeadline()\n\n #print \"L\", L\n #print self\n #print \"Calculating cost. dPrime\", dPrime\n\n # QPA\n t = self._lastDeadline(L)\n h = self._h(t)\n #print t\n while round(t,12) >= round(min_d,12): # We are checking demand only for the migratory task\n # We round the checking to 12 decimal places. Otherwise, it could make the algorithm repeat undefinedly, in\n # case new calculated cost is not 100% precise. We do the same when applying floor(). The other comparisons don't\n # need this correction, since they are not so critical.\n if round(h,12) > round(t,12):\n #print \"HIGH. t %.15f\" % t, \"h(t) %.15f\" % h, \". C was\", cPrime\n cPrime = (t - self._h_oth(t, tempSlice)) / floor(round((t + migTask.period() - dPrime)/migTask.period(), 12))\n #print \"New C is\", cPrime\n tempSlice._wcet = cPrime # Update slice cost to fix demand\n\n if cPrime <= 0.0: # Stop if the cost gets negative\n self._removeLastSlice()\n self._lastCost = 0.0\n return 0.0\n\n #print \"OK. t\", t, \"h(t)\",h, \"new t\",\n t = self._lastDeadline(t)\n #print t\n h = self._h(t)\n #print \"OK. t\", t, \"h(t)\",h\n\n #print self\n #print \"Final cost\", cPrime\n #if not self._qpa():\n # print self.tasks()\n #assert self._qpa()\n\n self._removeLastSlice()\n self._lastCost = cPrime\n return cPrime", "def prove_CM() -> Proof:\n # Optional Task 6.7f", "def calculateWaitingTime(self, inputs):\n CollisionCounter.CollisionCounter.getInstance().waitingTimeCalculated(self.time)\n timeUntilDepature = self.getAtt('departure_time', inputs) - self.time\n remainingLoadingTime = self.calculateLoadingTime(inputs)\n # calculates first maximum possible waiting time\n sampleTime = int((timeUntilDepature - remainingLoadingTime) / self.participants)\n\n if sampleTime >= 1:\n # result is big enough for a standard treatment\n self.waitingTime = MyRandom.RandomNumber.getInstance().getRandomNumber(sampleTime + 1)\n elif sampleTime < 1:\n # reslut is too small, special treatment necessary\n upperLimit = (10 * (1 - (math.exp(sampleTime - 1)))) + 1\n self.waitingTime = MyRandom.RandomNumber.getInstance().getRandomNumber(max((min(upperLimit,\n timeUntilDepature)) + 1, 1))\n # decides whether charging is allowed during waiting time\n if not self.stayedConnected:\n self.stayConnected = True\n self.stayedConnected = True\n else:\n self.stayConnected = False\n self.stayedConnected = False", "def part_a(puzzle_input):\r\n ec = ExperimentalCoprocessor(puzzle_input)\r\n try:\r\n while True:\r\n ec.step()\r\n except Exception:\r\n pass\r\n return str(ec.mul_counter)", "def timeThem(*args, **kwargs):\n\n funcs = []\n funcArgs = list(args[:])\n \n #filter arguments\n for arg in args:\n if callable(arg):\n funcs.append(arg)\n funcArgs.remove(arg)\n \n key = \"inNumber\"\n inNumber=10\n if key in kwargs:\n inNumber = kwargs[key]\n del kwargs[key]\n\n durations = []\n refTime = 0.0\n\n for func in funcs:\n retVal = func(*funcArgs, **kwargs)\n duration = timeit(partial(func, *funcArgs, **kwargs), number=inNumber)\n \n comparison = \"\"\n if refTime <= 0.0:\n refTime = duration\n else:\n comparison = \" ( *{:.2f})\".format(duration / refTime)\n \n print(\"{: <16} : {:.4f}\".format(func.__name__, duration) + comparison + \" returns '{}' ({})\".format(retVal, type(retVal).__name__))\n durations.append(duration)\n \n return durations", "def enter_data_for_time_calc():\n print(\"Pace & Distance -> Time\")\n print(\"=\" * 50)\n\n pace = input(\"Pace[min/km]: \")\n distance = float(input(\"Distance[km]: \"))\n\n calc_time(pace, distance)", "def benchmark(func, inputs):\n t0 = time.clock()\n results = [func(x) for x in inputs]\n t1 = time.clock()\n average_time = (t1 - t0) / len(inputs)\n return average_time, results", "def run_timings():\n\n running_times = []\n\n while recorded_time := input(f\"Enter your 10k time: \"):\n if not recorded_time:\n break\n running_times.append(float(recorded_time))\n average_pace = sum(running_times) / len(running_times)\n return average_pace", "def test_countdown_performance():\n profiler = cProfile.Profile()\n profiler.enable()\n countdown(\"0 0 0 0 0 0 1000\")\n profiler.disable()\n stats = profiler.getstats()\n tot_time = stats[0].totaltime\n assert tot_time < 3, \"Wow, your computer is really slow. Or is it my code?\"", "async def test(dut):\n\n dut._log.info(\"Running test...\")\n cocotb.fork(Clock(dut.clk, 1, units=\"ns\").start())\n dut.rst <= 0\n await Timer(0.2, units = \"ns\")\n dut.rst <= 1\n await Timer(1, units = \"ns\")\n dut.rst <= 0\n m = []\n map = {0: [0,0], 1:[0,1], 2:[1,1]}\n dut.en <= 1\n for i in range(350):\n m.append(randint(0,2))\n m.append(randint(0,2))\n num = map.get( m[2*i+1] ) + map.get( m[2*i] )\n dut.m_in <= int.from_bytes(bit_handle.arr_to_str(num), \"big\")\n await Timer(1, units = \"ns\")\n m.append(0)\n dut.m_in <= 0\n await Timer(1, units = \"ns\")\n dut.en <= 0\n await Timer(10, units = \"ns\")\n expect, m0_str = hrss(m)\n try:\n if dut.m1.value != expect:\n fail = 1\n report.write(\" + m1 = %X \\n + but i expect it = %X\\n\" %( int(dut.m1.value), expect ) )\n else:\n report.write(\"It is true that: + m1 = %X\\n\" %( int(dut.m1.value) ) )\n except:\n fail = 1\n report.write(\"Out is unidentified, but i expect it = %X\\n\" %( expect ) )\n\n m0 = binit(dut.m0.value, 9113)\n try:\n if m0 != m0_str:\n fail = 1\n report.write(\" + m0 = %s \\n + but i expect it = %s\\n\" %( m0, m0_str ) )\n else:\n report.write(\"It is true that: + m0 = %s\\n\" %( m0 ) )\n except:\n fail = 1\n report.write(\"Out is unidentified, but i expect it = %s\\n\" %( m0_str ) )\n\n dut._log.info(\"Running test...done\")", "async def my_test_dff(dut):\n\n #### Create and start clock with concurrent coroutine operation\n # Clock with 50% duty cycle and a period of 10ps\n cocotb.fork(Clock(dut.clk, 10, \"ps\").start())\n\n # Syncronize with the clock\n await RisingEdge(dut.clk)\n\n #### Generate transactions\n # In this case, all possible combinations in every consecutive order \n # 2 inputs (D and rstN) = 4 possible binary combinations (00, 01, 10, 11) => 2^4 = 16 possible combinations in every consecutive order\n # Declare the number of inputs\n num_of_inputs = 2\n # Create a list of permutations for those two inputs (list: [(0, 1), (1, 0)])\n transactions = list(permutations(range(num_of_inputs), 2))\n # Permutations do not account for repeat value combinations; so add those in to get (list: [(0, 1), (1, 0), [0, 0], [1, 1]]) the 4 possible binary combinations\n for i in range(num_of_inputs):\n transactions.append([i, i])\n # Create a list of permutations on top of the list of permutations to account for the \"in every consecutive order\" part\n transactions = list(permutations(transactions, 2))\n # Again, we must add in the missed repeat value combinations; there were 4 missed this time instead of the 2 above\n for i in range(num_of_inputs):\n transactions.append(([i, i], [i, i]))\n if i == 1:\n transactions.append(([i, 0], [i, 0]))\n transactions.append(([0, i], [0, i]))\n\n # Run the simulation with the transactions generated\n for i in range(len(transactions)):\n\n # Assign the stimulus to the DUT's ports\n dut.D <= transactions[i][0][0]\n dut.rstN <= transactions[i][0][1]\n \n # Simulate some small time (less than half the period) for the random integers to reach the DUT's input ports\n await Timer(1, \"ps\")\n #print(f\"The D input: {dut.D.value}\")\n #print(f\"The rstN input: {dut.rstN.value}\")\n \n # Detect the falling edge of clock \n await FallingEdge(dut.clk)\n\n # Simulate some small time (less than half the period) for the output to update accordingly after the falling edge (aka if reset is low)\n await Timer(1, \"ps\")\n #print(f\"The output after the falling edge: {dut.Q.value}\")\n\n # Detect the rising edge of clock\n await RisingEdge(dut.clk)\n\n # Simulate some small time (less than half the period) for the output to update accordingly after the rising edge (aka if \"D\" is different than \"Q\")\n await Timer(1, \"ps\")\n #print(f\"The output after the rising edge: {dut.Q.value}\")\n\n # Assert an error message and stop simulation if the output does not match the model's output\n assert dut.Q.value == my_dff_model(transactions[i][0][0], transactions[i][0][1]), f\"Failure: Transaction - {transactions[i][0]} failed!\"\n\n #### There is a double simulation per \"for\" loop because of how the transaction was built \n # Assign the stimulus to the DUT's ports\n dut.D <= transactions[i][1][0]\n dut.rstN <= transactions[i][1][1]\n\n #Simulate some small time (less than half the period) for the random integers to reach the DUT's input ports\n await Timer(1, \"ps\")\n #print(f\"The D input: {dut.D.value}\")\n #print(f\"The rstN input: {dut.rstN.value}\")\n\n # Detect the falling edge of clock\n await FallingEdge(dut.clk)\n\n # Simulate some small time (less than half the period) for the output to update accordingly after the falling edge (aka if reset is low)\n await Timer(1, \"ps\")\n #print(f\"The output after the falling edge: {dut.Q.value}\")\n\n # Detect the rising edge of clock\n await RisingEdge(dut.clk)\n\n # Simulate some small time (less than half the period) for the output to update accordingly after the rising edge (aka if \"D\" is different than \"Q\")\n await Timer(1, \"ps\")\n #print(f\"The output after the rising edge: {dut.Q.value}\")\n\n\n assert dut.Q.value == my_dff_model(transactions[i][1][0], transactions[i][1][1]), f\"Failure: Transaction - {transactions[i][1]} failed!\"", "def verify_implementation():\n import csv\n import time\n\n times = dict()\n max_run_times = 200\n for n in range(1, max_run_times):\n sentence = ''\n for _ in range(n):\n sentence += random.choice('01')\n sentence += 'ε'\n start = time.time()\n parse(SmallGrammar, sentence)\n end = time.time()\n times[n] = end - start\n print(n)\n with open('cyk_run_times.csv', 'w') as fout:\n writer = csv.writer(fout)\n for n in range(1, max_run_times):\n writer.writerow([n, times[n]])", "def solution(sequences):\n sorted_log = sort_log(sequences)\n guards = log(sorted_log)\n suspect = sleepy_guard(guards)\n table = guards[suspect].table()\n minute = find_minute(table)\n print(f\"sleepy guard {suspect} slept for {guards[suspect].has_slept()}\")\n print(f\"most at minute {minute}\")\n print(f\"answer: {suspect * minute}\")\n return suspect * minute", "def time(diners):\n if str(diners) in cache:\n return cache[str(diners)]\n if diners[0] <= 3:\n r = diners[0]\n cache[str(diners)] = r\n return r\n else:\n mintime = diners[0]\n for i in range(1, diners[0]//2+1):\n mintime = min(mintime, 1+time(sorted(diners[1:] + [diners[0]-i] + [i], key = lambda x: -x)))\n cache[str(diners)] = mintime\n return mintime\n # return min(diners[0], 1+time(sorted(diners[1:] + [diners[0]//2] + [diners[0]//2 + diners[0]%2], key = lambda x: -x)))\n # return min(\n # 1+time([max(0, x-1) for x in diners]),\n # 1+time(sorted(diners[1:] + [diners[0]//2] + [diners[0]//2 + diners[0]%2], key = lambda x: -x))\n # )", "def evaluate(self, time) -> float:\n ...", "def _get_timings_perinput(funcs, input_=None):\n\n global _TIMEOUT\n global _NUM_REPEATS\n\n timings_l = []\n\n from IPython import get_ipython\n if get_ipython() is None:\n iter_funcs = trange(len(funcs), desc='Loop functions', leave=False)\n else:\n iter_funcs = range(len(funcs))\n\n for j in iter_funcs:\n f = funcs[j]\n ii = 1\n process_next = True\n while process_next:\n for jj in 1, 2, 5:\n iter_rep = ii * jj\n if input_ is None:\n t = min(timeit.repeat(functools.partial(f), repeat=_NUM_REPEATS, number=iter_rep))\n else:\n t = min(timeit.repeat(functools.partial(f, *input_), repeat=_NUM_REPEATS, number=iter_rep))\n if t > _TIMEOUT:\n process_next = False\n break\n ii *= 10\n timings_l.append(t / iter_rep)\n return timings_l", "def test_find_parallel_duration():\n pt2_example = {\n \"C\": [],\n \"A\": [\"C\"],\n \"F\": [\"C\"],\n \"B\": [\"A\"],\n \"D\": [\"A\"],\n \"E\": [\"B\", \"D\", \"F\"],\n }\n assert find_parallel_duration(pt2_example, 2, 0) == 15", "def part1(input):\n sys = AmpSystem(input)\n return sys.max_thruster_signal([i for i in range(5)])", "def compare_cow_transport_algorithms():\n cow_set = load_cows(\"ps1_cow_data.txt\")\n \n def get_run_time(func):\n start = time.time()\n print(func(cow_set))\n end = time.time()\n return end-start\n \n greedy_time = get_run_time(greedy_cow_transport)\n brute_force_time = get_run_time(brute_force_cow_transport)\n \n print(\"---\"*20)\n print(\"greedy runtime: \",greedy_time)\n print(\"brute force runtime: \", brute_force_time)", "def bruteForceTime():\n start_time = time.time()\n subjects = loadSubjects(SUBJECT_FILENAME)\n maxWork = 8\n answer = bruteForceAdvisor(subjects, maxWork)\n end_time = time.time()\n printSubjects(answer)\n print 'Time taken: ', end_time - start_time\n return None", "def psych(output_str, input_str_1, input_1, input_str_2, input_2, pressure=29.92):\n\n dry_bulb = 0\n wet_bulb = 0\n dewpoint = 0\n rel_hum = 0\n hum_rat = 0\n spec_vol = 0\n enthalpy = 0\n pressure *= 0.491154\n\n if input_str_1 in ('db', 'DB', 'wb', 'WB', 'dp', 'DP', 'rh', 'RH', 'hr', 'HR', 'sv', 'SV', 'en', 'EN') and \\\n input_str_2 in ('db', 'DB', 'wb', 'WB', 'dp', 'DP', 'rh', 'RH', 'hr', 'HR', 'sv', 'SV', 'en', 'EN'):\n\n if input_str_1 in ('db', 'DB'):\n dry_bulb = input_1\n elif input_str_2 in ('db', 'DB'):\n dry_bulb = input_2\n\n if input_str_1 in ('wb', 'WB'):\n wet_bulb = input_1\n elif input_str_2 in ('wb', 'WB'):\n wet_bulb = input_2\n\n if input_str_1 in ('dp', 'DP'):\n dewpoint = input_1\n elif input_str_2 in ('dp', 'DP'):\n dewpoint = input_2\n\n if input_str_1 in ('rh', 'RH'):\n rel_hum = input_1 / 100\n elif input_str_2 in ('rh', 'RH'):\n rel_hum = input_2 / 100\n\n if input_str_1 in ('hr', 'HR'):\n hum_rat = input_1\n elif input_str_2 in ('hr', 'HR'):\n hum_rat = input_2\n\n if input_str_1 in ('sv', 'SV'):\n spec_vol = input_1\n elif input_str_2 in ('sv', 'SV'):\n spec_vol = input_2\n\n if input_str_1 in ('en', 'EN'):\n enthalpy = input_1\n elif input_str_2 in ('en', 'EN'):\n enthalpy = input_2\n else:\n return ValueError('Invalid input types')\n\n if hum_rat < 0:\n return ValueError('Humidity ratio less than 0')\n if rel_hum < 0 or rel_hum > 1:\n return ValueError('Relative humidity less than 0 or greater than 100')\n\n ############################################################################################\n\n if input_str_1 in ('db', 'DB') or input_str_2 in ('db', 'DB'):\n if output_str in ('db', 'DB'):\n return dry_bulb\n\n db_r = dry_bulb + 459.67\n\n if input_str_1 in ('wb', 'WB') or input_str_2 in ('wb', 'WB'):\n if output_str in ('wb', 'WB'):\n return wet_bulb\n\n db_r = dry_bulb + 459.67\n wb_r = wet_bulb + 459.67\n\n pres_wb_sat = sat_pres(wb_r)\n\n hr_wb_sat = 0.62198 * pres_wb_sat / (pressure - pres_wb_sat)\n\n hum_rat = (hr_wb_sat * (1093 - .556 * wet_bulb) - 0.24 * (dry_bulb - wet_bulb)) / \\\n (1093 + .444 * dry_bulb - wet_bulb)\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_db_sat = sat_pres(db_r)\n\n hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n\n mu = hum_rat / hr_db_sat\n\n rel_hum = mu / (1 - (1 - mu) * (pres_db_sat / pressure))\n if rel_hum < 0 or rel_hum > 1:\n return -1 # ValueError('Calculated relative humidity less than 0')\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n\n if output_str in ('dp', 'DP'):\n pres_vapor = (pressure * hum_rat) / (0.62198 + hum_rat)\n\n dewpoint = calc_dewpoint(pres_vapor)\n return dewpoint\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('dp', 'DP') or input_str_2 in ('dp', 'DP'):\n if output_str in ('dp', 'DP'):\n return dewpoint\n\n dp_r = dewpoint + 459.67\n\n pres_vapor = sat_pres(dp_r)\n\n hum_rat = 0.62198 * pres_vapor / (pressure - pres_vapor)\n if hum_rat < 0:\n return -1 # ValueError('Calculated humidity ratio below 0')\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_db_sat = sat_pres(db_r)\n\n hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n\n mu = hum_rat / hr_db_sat\n\n rel_hum = mu / (1 - (1 - mu) * (pres_db_sat / pressure))\n if rel_hum < 0 or rel_hum > 1:\n return -1\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n\n if output_str in ('wb', 'WB'):\n wet_bulb = calc_wetbulb(dry_bulb, hum_rat, pressure)\n return wet_bulb\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('rh', 'RH') or input_str_2 in ('rh', 'RH'):\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n pres_db_sat = sat_pres(db_r)\n\n pres_vapor = pres_db_sat * rel_hum\n\n hum_rat = 0.62198 * pres_vapor / (pressure - pres_vapor)\n if hum_rat < 0:\n return -1\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n # hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n\n # mu = hum_rat / hr_db_sat\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n\n if output_str in ('dp', 'DP'):\n dewpoint = calc_dewpoint(pres_vapor)\n return dewpoint\n\n if output_str in ('wb', 'WB'):\n wet_bulb = calc_wetbulb(dry_bulb, hum_rat, pressure)\n return wet_bulb\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('hr', 'HR') or input_str_2 in ('hr', 'HR'):\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_db_sat = sat_pres(db_r)\n\n pres_vapor = hum_rat * pressure / (hum_rat + 0.62198)\n\n rel_hum = pres_vapor / pres_db_sat\n if rel_hum < 0 or rel_hum > 1:\n return -1\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n # hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n\n # mu = hum_rat / hr_db_sat\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n\n if output_str in ('dp', 'DP'):\n dewpoint = calc_dewpoint(pres_vapor)\n return dewpoint\n\n if output_str in ('wb', 'WB'):\n wet_bulb = calc_wetbulb(dry_bulb, hum_rat, pressure)\n return wet_bulb\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n\n # pres_db_sat = sat_pres(db_r)\n\n hum_rat = (spec_vol * 28.9645 * (pressure * 144) / (1545.32 * db_r) - 1) / 1.6078\n if hum_rat < 0:\n return -1\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_vapor = hum_rat * pressure / (hum_rat + 0.62198)\n\n rel_hum = pres_vapor / pressure\n if rel_hum < 0 or rel_hum > 1:\n return -1\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n\n if output_str in ('dp', 'DP'):\n dewpoint = calc_dewpoint(pres_vapor)\n return dewpoint\n\n if output_str in ('wb', 'WB'):\n wet_bulb = calc_wetbulb(dry_bulb, hum_rat, pressure)\n return wet_bulb\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy\n\n pres_db_sat = sat_pres(dry_bulb)\n\n hum_rat = (enthalpy - 0.24 * dry_bulb) / (1061 + 0.444 * dry_bulb)\n if hum_rat < 0:\n return -1\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_vapor = hum_rat * pressure / (hum_rat + 0.62198)\n\n rel_hum = pres_vapor / pres_db_sat\n if rel_hum < 0 or rel_hum > 1:\n return -1\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n # hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n\n # mu = hum_rat / hr_db_sat\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('dp', 'DP'):\n dewpoint = calc_dewpoint(pres_vapor)\n return dewpoint\n\n if output_str in ('wb', 'WB'):\n wet_bulb = calc_wetbulb(dry_bulb, hum_rat, pressure)\n return wet_bulb\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('wb', 'WB') or input_str_2 in ('wb', 'WB'):\n if output_str in ('wb', 'WB'):\n return wet_bulb\n\n wb_r = wet_bulb + 459.67\n\n if input_str_1 in ('dp', 'DP') or input_str_2 in ('dp', 'DP'):\n if output_str in ('dp', 'DP'):\n return dewpoint\n\n dp_r = dewpoint + 459.67\n\n pres_vapor = sat_pres(dp_r)\n\n hum_rat = 0.62198 * pres_vapor / (pressure - pres_vapor)\n if hum_rat < 0:\n return -1\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_wb_sat = sat_pres(wb_r)\n\n hr_wb_sat = 0.62198 * pres_wb_sat / (pressure - pres_wb_sat)\n\n dry_bulb = ((1093 - 0.556 * wet_bulb) * hr_wb_sat + 0.24 * wet_bulb - (1093 - wet_bulb) * hum_rat) / \\\n (0.444 * hum_rat + 0.24)\n if output_str in ('db', 'DB'):\n return dry_bulb\n\n db_r = dry_bulb + 459.67\n\n pres_db_sat = sat_pres(dry_bulb)\n\n hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n mu = hum_rat / hr_db_sat\n\n rel_hum = mu / (1 - (1 - mu) * (pres_db_sat / pressure))\n if rel_hum < 0 or rel_hum > 1:\n return -1\n if output_str in ('rh', 'RH'):\n return rel_hum\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('rh', 'RH') or input_str_2 in ('rh', 'RH'):\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n elif input_str_1 in ('hr', 'HR') or input_str_2 in ('hr', 'HR'):\n if output_str in ('hr', 'HR'):\n return hum_rat\n elif input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n elif input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy\n return -1 # no enthalpy, wet bulb and enthalpy are too closely related to avoid problems\n elif input_str_1 in ('dp', 'DP') or input_str_2 in ('dp', 'DP'):\n if output_str in ('dp', 'DP'):\n return dewpoint\n\n dp_r = dewpoint + 459.67\n\n if input_str_1 in ('rh', 'RH') or input_str_2 in ('rh', 'RH'):\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n elif input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n elif input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy\n elif input_str_1 in ('hr', 'HR') or input_str_2 in ('hr', 'HR'):\n if output_str in ('hr', 'HR'):\n return hum_rat\n return -1 # no humidity ratio - it is the dew point more or less\n elif input_str_1 in ('hr', 'HR') or input_str_2 in ('hr', 'HR'):\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n if input_str_1 in ('rh', 'RH') or input_str_2 in ('rh', 'RH'):\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n elif input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n elif input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy\n elif input_str_1 in ('rh', 'RH') or input_str_2 in ('rh', 'RH'):\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n if input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n elif input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy\n elif input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n\n if input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy", "def part_1():\n input_ = parse_input()\n cups = turn_input_into_cups(input_)\n cups = solve(cups, first_cup=cups[input_[0]], turns=100)\n\n answer = []\n current_cup = cups[1].next\n while current_cup != cups[1]:\n answer.append(str(current_cup.number))\n current_cup = current_cup.next\n\n return \"\".join(answer)", "def compute_duty_factor():\n [time,\n ankle_l_trajectory,\n ankle_r_trajectory,\n foot_l_contact,\n foot_r_contact,\n muscle_lh_activations,\n muscle_rh_activations,\n muscle_lh_forces,\n muscle_rh_forces,\n joint_lh_positions,\n joint_rh_positions] = load_data()\n \n print(np.sum(foot_l_contact)/len(foot_l_contact))\n print(np.sum(foot_r_contact)/len(foot_r_contact))\n\n return np.sum(foot_l_contact)/len(foot_l_contact)*0.5 + np.sum(foot_r_contact)/len(foot_r_contact)*0.5", "def run_inference(self, input):\n #TODO(142164990): Add support for io.BytesIO heavily used on Raspberry Pi.\n #TODO(142164990): Add benchmarks for all supported types to catch regressions.\n if isinstance(input, bytes):\n result = self._engine.RunInferenceBytes(input)\n elif _is_valid_ctypes_input(input):\n pointer, size = input\n result = self._engine.RunInferenceRaw(pointer.value, size)\n elif _libgst and isinstance(input, Gst.Buffer):\n with _gst_buffer_map(input) as (pointer, size):\n result = self._engine.RunInferenceRaw(pointer.value, size)\n else:\n result = self._engine.RunInference(input)\n latency = self._engine.get_inference_time()\n return (latency, result)" ]
[ "0.57208085", "0.54804933", "0.5131725", "0.50099516", "0.49980843", "0.49875277", "0.4948482", "0.49447292", "0.49406472", "0.4887707", "0.4879715", "0.48475006", "0.48428223", "0.4833638", "0.48149478", "0.48010787", "0.4800391", "0.4798947", "0.47890905", "0.47886792", "0.4788516", "0.47770703", "0.47671026", "0.47664678", "0.47606117", "0.47534144", "0.47014824", "0.46926782", "0.46814704", "0.46700004" ]
0.60869694
0
Test of function choosing if log rotation is needed
def test_need_to_rotate_log(self): self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time') self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time') self.assertTrue(need_to_rotate_log(10, 20, 'daily', 25, None), 'rotate log by max size') self.assertFalse(need_to_rotate_log(10, 20, 'hourly', 5, 'hourly'), 'do not rotate log by min size')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_log_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n filename = self.conveyer.rotate_logs()\n self.assertEquals(self.conveyer.logfile, None)\n self.assertEquals(filename, \"testfile.dat.rotated\")", "def log(a):", "def log2(a):", "def test_log_con():\n c=14\n assert {'diff':EF.log(c).der, 'value': EF.log(c).val}=={'diff':0, 'value': math.log(c)}", "def test_rotated(self):\n self._calibration_test(\"rotated\")", "def test_log():\n c=14\n def myfunc(x):\n f1=EF.log(x)\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n\n expectAns={'diff': 1/c, 'value': math.log(c)}\n\n assert res==expectAns", "def log_Schechter_log(self, logl, alpha, logls, logl0):\n phi = (logl - logls) * (alpha+1) * np.log(10.) - np.power(10., logl-logls)\n lik = phi.copy()\n lik [logl < logl0] = -1e99\n return lik", "def test_rotation(self):\n # this logfile should rotate every 10 bytes\n with contextlib.closing(\n logfile.LogFile(self.name, self.dir, rotateLength=10)\n ) as log:\n\n # test automatic rotation\n log.write(\"123\")\n log.write(\"4567890\")\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.2\".format(self.path)))\n log.write(\"\")\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertTrue(os.path.exists(\"{}.2\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n log.write(\"3\")\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n\n # test manual rotation\n log.rotate()\n self.assertTrue(os.path.exists(\"{}.3\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.4\".format(self.path)))\n\n self.assertEqual(log.listLogs(), [1, 2, 3])", "def _logcheck(self, t, y):\n\t\t#print y\n\t\tif (t>0)&(y[0]>-2*self.phi0): self.r, self._y = numpy.r_[self.r, t], numpy.c_[self._y, y]\n\n\t\treturn 0", "def log_cust(x):\n if type(x) != str:\n if x < 0:\n return 0\n elif x == 0:\n return 0\n elif x > 0:\n return np.log(x)", "def log_inplace(a):", "def test_rotation(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n days = [(self.path + \".\" + log.suffix(day * 86400)) for day in range(3)]\n\n # test automatic rotation\n log._clock = 0.0 # 1970/01/01 00:00.00\n log.write(\"123\")\n log._clock = 43200 # 1970/01/01 12:00.00\n log.write(\"4567890\")\n log._clock = 86400 # 1970/01/02 00:00.00\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(days[0]))\n self.assertFalse(os.path.exists(days[1]))\n log._clock = 172800 # 1970/01/03 00:00.00\n log.write(\"\")\n self.assertTrue(os.path.exists(days[0]))\n self.assertTrue(os.path.exists(days[1]))\n self.assertFalse(os.path.exists(days[2]))\n log._clock = 259199 # 1970/01/03 23:59.59\n log.write(\"3\")\n self.assertFalse(os.path.exists(days[2]))", "def test_logistic():\n r=np.random.normal(size=20)\n assert np.isclose( ilogistic(logistic(r)),r ).all()", "def add_log_if_improves_skew(feature, df) :\r\n featureData = df[feature] \r\n logged = np.log(featureData)\r\n if abs(logged.skew()) >= abs(featureData.skew()) :\r\n return False\r\n df[feature+\"_log\"] = logged\r\n return True", "def test_loglike(dlm,Cl,noise,beam):\n lmax = Cl.shape[0]\n tt_exp = -1./2 * np.real(np.vdot(dlm.T,hp.almxfl(dlm,1/(beam[:lmax]**2*Cl[:,1]+noise[:lmax]))))\n #plt.plot(Cl[:,1])\n tt_det = - 1./2 *(np.arange(1,lmax+1)*np.log((noise[:lmax]+Cl[:,1]*beam[:lmax]**2))).sum() \n tt_f = tt_exp + tt_det\n return tt_exp,tt_det,tt_f#,Cl[:,1]", "def _loglike(self, y, f):\n raise NotImplementedError('Abstract base class only.')", "def test_function_log2(self):\r\n self.assertEquals(preview.latex_preview('log2(3)'), r'\\log_2(3)')", "def test_get_log(self):\n result = log_lib.get_log(True)\n self.assertTrue(callable(result))\n result(\"dummy-message\")\n\n result = log_lib.get_log(False)\n self.assertTrue(callable(result))\n result(\"dummy-message\")", "def ilog(x,delta):\n if(delta < x and x < 1.0 - delta):\n return np.log( -np.log(x) )\n elif(x < delta):\n return np.log( -np.log(delta) )\n else: \n return np.log( -np.log(1.0 - delta) )", "def test_transform(self):\n t = Linearize()\n assert t.transform(numpy.e) == numpy.log(numpy.e)\n t.transform(0)", "def ga_log(R):\n phiP, t_normal_n, t_perpendicular_n = extractRotorComponents(R)\n return phiP + t_normal_n + t_perpendicular_n", "def check( log = False):\n return True", "def HasRotated(logfile, hash):\n timestamp = utcnow()\n cursor.execute('''SELECT hash, date FROM rotate\n WHERE logfile = \"%s\"''' % (logfile,))\n result = cursor.fetchone()\n # If the database doesn't have an entry for our logfile then we need to\n # create one for it using the passed logfile hash and the current\n # timestamp.\n if not result:\n print \"New logfile, adding hash and date.\"\n cursor.execute('''INSERT INTO rotate (logfile, hash, date)\n VALUES (\"%s\", \"%s\", \"%s\")''' % (logfile, hash, timestamp))\n con.commit()\n return timestamp\n if result[0] == hash:\n # The current logfile hash matches the recorded one at last rotation,\n # we just return the old timestamp.\n return result[1]\n # If we get here, the logfile hash is different, indicating that rotation\n # has occured. We therefore set and return a new timestamp.\n print logfile, \"has rotated\"\n cursor.execute('''UPDATE rotate SET hash = \"%s\", date = \"%s\"\n WHERE logfile = \"%s\"''' % (hash, timestamp, logfile))\n con.commit()\n return timestamp", "def log2(x):\n raise NotImplementedError", "def log1p(x):\r\n # see decorator for function body\r", "def log2_inplace(a):", "def log_prob(self):", "def test_transform(self):\n\n # Known constants tests\n for i, direction in enumerate(OCIO.ColorSpaceDirection.__members__.values()):\n self.colorspace.setTransform(self.log_tr, direction)\n log_transform = self.colorspace.getTransform(direction)\n self.assertIsInstance(log_transform, OCIO.LogTransform)\n self.assertEquals(self.log_tr.getBase(), log_transform.getBase())", "def test_log():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.log(fwd.sin(x)+y**2)\n dfdx = lambda x, y: np.cos(x) / (np.sin(x)+y**2)\n dfdy = lambda x, y: 2*y / (np.sin(x)+y**2)\n d2fdxdy = lambda x, y: -2*y*np.cos(x) / (np.sin(x)+y**2)**2\n assert equals(f.evaluation_at({x: 1.5, y:2.5}), np.log(np.sin(1.5)+2.5**2))\n assert equals(f.derivative_at(x, {x: 1.5, y:2.5}), dfdx(1.5, 2.5))\n assert equals(f.derivative_at(y, {x: 1.5, y:2.5}), dfdy(1.5, 2.5))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}), d2fdxdy(1.5, 2.5))\n with pytest.raises(NotImplementedError):\n f.derivative_at(x, {x:1.0, y: 2.0}, order=3)", "def m(loglvl):\n global LOG_LEVEL\n return (loglvl & LOG_LEVEL) != 0x0" ]
[ "0.62220013", "0.61344224", "0.58783966", "0.58642936", "0.5808232", "0.5750104", "0.5715419", "0.5713662", "0.5678804", "0.56212765", "0.55951023", "0.5573171", "0.5567551", "0.5551365", "0.55413747", "0.5514498", "0.5495516", "0.5494864", "0.54912615", "0.5475333", "0.5475077", "0.5449099", "0.5437804", "0.5417328", "0.5414975", "0.54093736", "0.5400344", "0.53598154", "0.5358943", "0.53586483" ]
0.7284713
0
Tests of try rotation with compress in configuration
def test_process_log_with_compress_in_configuration(self): with tempfile.TemporaryDirectory() as sandbox: with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: srcfile = Path(sandbox, 'pokus.log') srcfile.touch() destfile = Path(sandbox, 'backup', 'pokus.log') compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), { 'target': '{{path}}/backup/{{name}}.{{ext}}', 'interval': 'hourly', 'compress': 'gzip -9' }, 'hourly', str(srcfile), 10 ) self.assertEqual(compressors, [[sandbox, 'gzip', '-9', str(destfile)]]) self.assertFalse(srcfile.exists()) self.assertTrue(destfile.exists()) self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... rotating... "{src}" -> "{dest}" done.\n'.format(src=srcfile, dest=destfile))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_compress_works(self):\n tau = 45.0\n mrate = 60.0\n Mrate = 100.0\n gain = 5\n\n tmax = 50.0\n dt = 0.2\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = gain\n\n self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n # make sure we normally go outside the range\n self.assertGreater(np.sum(M1.out < mrate), 0)\n self.assertGreater(np.sum(M1.out > Mrate), 0)\n\n self.rule.compress_rates = True\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n self.assertEqual(np.sum(M2.out < mrate), 0)\n self.assertEqual(np.sum(M2.out > Mrate), 0)", "def test_compress():\n print('Testing compress')\n\n # Cases given to test this problem\n assert_equals('c1o17l1k1a1n1g1a1r1o2',\n hw1.compress('cooooooooooooooooolkangaroo'))\n assert_equals('a3', hw1.compress('aaa'))\n assert_equals('', hw1.compress(''))\n\n # Additional cases to test this problem\n assert_equals('a1p2l1e1', hw1.compress('apple'))\n assert_equals('g1o6d1a1w1g4s3', hw1.compress('goooooodawggggsss'))", "def test_auto_compression():\n with dask.config.set({\"test123\": \"auto\"}):\n try:\n import lz4 # noqa: F401\n\n assert get_compression_settings(\"test123\") == \"lz4\"\n return\n except ImportError:\n pass\n\n try:\n import snappy # noqa: F401\n\n assert get_compression_settings(\"test123\") == \"snappy\"\n except ImportError:\n assert get_compression_settings(\"test123\") is None", "def test_compression_tanh(self):\n tau = 48.0\n mrate = 60.0\n Mrate = 100.0\n gain = 5\n\n tmax = 50.0\n dt = 0.2\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = gain\n\n self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n self.rule.compress_rates = True\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n mavg = 0.5*(mrate + Mrate)\n mdiff = 0.5*(Mrate - mrate)\n\n expected = mavg + mdiff*np.tanh((M1.out - mavg)/mdiff)\n\n self.assertTrue(np.allclose(M2.out, expected), msg=\n \"mean(abs(out - expected))={}\".format(np.mean(np.abs(M2.out - expected))))", "def test_compress(self):\n self.logger.info(\"STEP: Create the workspace directory to be compressed.\")\n workspace = Workspace(Mock)\n directory = Path.cwd().joinpath(\"workspace\")\n directory.mkdir()\n workspace.workspace = directory\n\n # Create a file to verify compression.\n directory.joinpath(\"file.txt\").touch()\n\n test_folder = Path.cwd().joinpath(\"testfolder\")\n test_folder.mkdir()\n self.items.append(test_folder)\n\n self.logger.info(\"STEP: Compress the directory.\")\n workspace.compress()\n\n self.logger.info(\n \"STEP: Verify that the directory was compressed using the gztar format.\"\n )\n self.items.append(test_folder)\n compressed_workspace = Path.cwd().joinpath(\"workspace.tar.gz\")\n unpack_archive(compressed_workspace, test_folder, format=\"gztar\")\n compressed_file = test_folder.joinpath(\"workspace/file.txt\")\n self.assertTrue(compressed_file.exists() and compressed_file.is_file())", "def test_compress_deterministic(self):\n\n class DeterministicGZipMiddleware(GZipMiddleware):\n max_random_bytes = 0\n\n r1 = DeterministicGZipMiddleware(self.get_response)(self.req)\n r2 = DeterministicGZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r1.content, r2.content)\n self.assertEqual(self.get_mtime(r1.content), 0)\n self.assertEqual(self.get_mtime(r2.content), 0)", "def test_backup_with_compress_flag(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backupset.backup_compressed = False\n self.backup_cluster()\n no_compression = self.get_database_file_info()\n self.log.info(\"\\nDelete old backup and do backup again with compress flag\")\n self.backup_create()\n self.backupset.backup_compressed = self.input.param(\"backup-compressed\", False)\n self.backup_cluster()\n with_compression = self.get_database_file_info()\n self.validate_backup_compressed_file(no_compression, with_compression)", "def _check_rotated_filename_candidates(self):\n # savelog(8)\n candidate = \"%s.0\" % self.filename\n if (exists(candidate) and exists(\"%s.1.gz\" % self.filename) and\n (stat(candidate).st_mtime > stat(\"%s.1.gz\" % self.filename).st_mtime)):\n return candidate\n\n # logrotate(8)\n # with delaycompress\n candidate = \"%s.1\" % self.filename\n if exists(candidate):\n return candidate\n\n # without delaycompress\n candidate = \"%s.1.gz\" % self.filename\n if exists(candidate):\n return candidate\n\n rotated_filename_patterns = (\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # for TimedRotatingFileHandler\n \".[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]\",\n )\n for rotated_filename_pattern in rotated_filename_patterns:\n candidates = glob.glob(self.filename + rotated_filename_pattern)\n if candidates:\n candidates.sort()\n return candidates[-1] # return most recent\n\n # no match\n return None", "def test_compressed(self):\n try:\n import zlib\n except ImportError:\n self.skipTest('zlib is missing')\n\n ba = amf3.ByteArray()\n\n self.assertFalse(ba.compressed)\n\n z = zlib.compress(b'b' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)\n\n z = zlib.compress(b'\\x00' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)", "def test_rotated(self):\n self._calibration_test(\"rotated\")", "def perform_tests():\n print \"\\n****\\nTesting Doublecompress...\\n\"\n dc_pass = unit_doublecompress()\n if (dc_pass):\n result = 'PASS'\n else:\n result = 'FAIL'\n print \">>> \" + result\n\n return dc_pass", "def test_tamper_mutate_compress(logger):\n backup = copy.deepcopy(actions.tamper.ACTIVATED_PRIMITIVES)\n actions.tamper.ACTIVATED_PRIMITIVES = [\"compress\"]\n try:\n tamper = actions.tamper.TamperAction(None)\n assert tamper.parse(\"TCP:flags:corrupt\", logger)\n tamper._mutate_tamper_type()\n assert tamper.tamper_type == \"compress\"\n assert tamper.tamper_proto_str == \"DNS\"\n assert tamper.field == \"qd\"\n packet = layers.packet.Packet(IP()/TCP()/DNS()/DNSQR())\n packet2 = tamper.tamper(packet, logger)\n assert packet2 == packet\n finally:\n actions.tamper.ACTIVATED_PRIMITIVES = backup", "def test_op_no_compression(self):\n assert OP_NO_COMPRESSION == 0x20000", "def test_need_to_rotate_log(self):\n self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time')\n self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time')\n self.assertTrue(need_to_rotate_log(10, 20, 'daily', 25, None), 'rotate log by max size')\n self.assertFalse(need_to_rotate_log(10, 20, 'hourly', 5, 'hourly'), 'do not rotate log by min size')", "def test_compress_cmd():\n # GIVEN a cli runner\n runner = CliRunner()\n # WHEN running the compress command with dry_run\n result = runner.invoke(compress, obj={})\n # THEN assert the command was succesful even without a valid api\n assert result.exit_code == 0", "def test_compression_level(self):\n test_compression_level = 8\n self.encoder._compression_level = test_compression_level", "def test_compress_spring(spring_tmp_path, first_tmp_file, second_tmp_file, spring_api):\n # GIVEN a spring api\n # GIVEN two existing fastq reads\n assert first_tmp_file.exists()\n assert second_tmp_file.exists()\n # GIVEN a spring path that does not exist\n assert not spring_tmp_path.exists()\n\n # WHEN compressing fastq files into the spring file\n res = spring_api.compress(first_tmp_file, second_tmp_file, spring_tmp_path)\n\n # THEN assert that process was succesful\n assert res is True\n # THEN assert that the spring compression exists\n assert spring_tmp_path.exists()", "def test_mcg_data_compression(\n self, mcg_obj, awscli_pod, bucket_factory, bucketclass_dict\n ):\n download_dir = \"/aws/compression/\"\n awscli_pod.exec_cmd_on_pod(\n command=craft_s3_command(\n f\"cp s3://{constants.TEST_FILES_BUCKET}/enwik8 {download_dir}\"\n ),\n out_yaml_format=False,\n )\n bucketname = bucket_factory(1, bucketclass=bucketclass_dict)[0].name\n full_object_path = f\"s3://{bucketname}\"\n sync_object_directory(awscli_pod, download_dir, full_object_path, mcg_obj)\n # For this test, enwik8 is used in conjunction with Snappy compression\n # utilized by NooBaa. Snappy consistently compresses 35MB of the file.\n mcg_obj.check_data_reduction(bucketname, 35 * 1024 * 1024)", "def test_compress_2(self):\n text = 'abcdefdeabc'\n actual = LZ77.compress(text)\n expected = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n self.assertEqual(actual, expected)", "def test_rotation(self):\n # this logfile should rotate every 10 bytes\n with contextlib.closing(\n logfile.LogFile(self.name, self.dir, rotateLength=10)\n ) as log:\n\n # test automatic rotation\n log.write(\"123\")\n log.write(\"4567890\")\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.2\".format(self.path)))\n log.write(\"\")\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertTrue(os.path.exists(\"{}.2\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n log.write(\"3\")\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n\n # test manual rotation\n log.rotate()\n self.assertTrue(os.path.exists(\"{}.3\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.4\".format(self.path)))\n\n self.assertEqual(log.listLogs(), [1, 2, 3])", "def test_compress_fastq_real_with_integrity_fail(\n first_tmp_file, second_tmp_file, spring_tmp_path, real_base_context, mocker\n):\n # GIVEN the path to a existing two existing fastq files and a non existing spring\n runner = CliRunner()\n assert not spring_tmp_path.exists()\n assert first_tmp_file.exists()\n assert second_tmp_file.exists()\n\n dir_path = spring_tmp_path.parent\n assert nr_files(dir_path) == 2\n mocker.patch.object(compare_cmd, \"compare_elements\")\n compare_cmd.compare_elements.return_value = False\n # WHEN running the compress command with an intergrity check\n result = runner.invoke(\n fastq,\n [\n \"--first-read\",\n str(first_tmp_file),\n \"--second-read\",\n str(second_tmp_file),\n \"--spring-path\",\n str(spring_tmp_path),\n \"--check-integrity\",\n ],\n obj=real_base_context,\n )\n # THEN assert the command succedes\n assert result.exit_code == 1\n # THEN assert that the spring file was deleted\n assert not spring_tmp_path.exists()\n # THEN assert that only the original fastq files are left\n assert nr_files(dir_path) == 2", "def _should_compress(new_descriptor: Union[FileDescriptor, StreamDescriptor], ingestion_properties: IngestionProperties) -> bool:\n return not new_descriptor.is_compressed and ingestion_properties.format.compressible", "def check_compression(ctype, clevel, olevel):\n repository = Repository(archiver.repository_path, exclusive=True)\n with repository:\n manifest = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)\n state = None\n while True:\n ids, state = repository.scan(limit=LIST_SCAN_LIMIT, state=state)\n if not ids:\n break\n for id in ids:\n chunk = repository.get(id, read_data=True)\n meta, data = manifest.repo_objs.parse(id, chunk) # will also decompress according to metadata\n m_olevel = meta.get(\"olevel\", -1)\n m_psize = meta.get(\"psize\", -1)\n print(\n hexlify(id).decode(),\n meta[\"ctype\"],\n meta[\"clevel\"],\n meta[\"csize\"],\n meta[\"size\"],\n m_olevel,\n m_psize,\n )\n # this is not as easy as one thinks due to the DecidingCompressor choosing the smallest of\n # (desired compressed, lz4 compressed, not compressed).\n assert meta[\"ctype\"] in (ctype, LZ4.ID, CNONE.ID)\n assert meta[\"clevel\"] in (clevel, 255) # LZ4 and CNONE has level 255\n if olevel != -1: # we expect obfuscation\n assert \"psize\" in meta\n assert m_olevel == olevel\n else:\n assert \"psize\" not in meta\n assert \"olevel\" not in meta", "def test_compress_response(self):\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(self.decompress(r.content), self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")\n self.assertEqual(r.get(\"Content-Length\"), str(len(r.content)))", "def test_compress_fastq_real_with_integrity(\n first_tmp_file, second_tmp_file, spring_tmp_path, real_base_context\n):\n # GIVEN the path to a existing two existing fastq files and a non existing spring\n runner = CliRunner()\n assert not spring_tmp_path.exists()\n assert first_tmp_file.exists()\n assert second_tmp_file.exists()\n\n dir_path = spring_tmp_path.parent\n assert nr_files(dir_path) == 2\n # WHEN running the compress command with an intergrity check\n result = runner.invoke(\n fastq,\n [\n \"--first-read\",\n str(first_tmp_file),\n \"--second-read\",\n str(second_tmp_file),\n \"--spring-path\",\n str(spring_tmp_path),\n \"--check-integrity\",\n ],\n obj=real_base_context,\n )\n # THEN assert the command succedes\n assert result.exit_code == 0\n # THEN assert that the spring file was created\n assert spring_tmp_path.exists()\n # THEN assert that the files created for integrity check was removed\n assert nr_files(dir_path) == 3", "def test_compress_fastq_dry_run_integrity(first_read, second_read):\n # GIVEN the path to a existing bam file and a cli runner\n runner = CliRunner()\n assert first_read.exists()\n assert second_read.exists()\n # WHEN running the compress command with dry_run\n result = runner.invoke(\n fastq,\n [\n \"--first-read\",\n str(first_read),\n \"--second-read\",\n str(second_read),\n \"--dry-run\",\n \"--check-integrity\",\n ],\n obj={},\n )\n # THEN assert the command was succesful even without a valid api\n assert result.exit_code == 0", "def check_zlib():\n\n try:\n import zlib\n zlib.compress('Compress this')\n return True\n except Exception as ex:\n LOG.error(str(ex))\n LOG.error('Failed to import zlib module.')\n return False", "def _optimise_rotation(self):\n logger.info(\n f\"Minimising dimer rotation up to \"\n f'δϕ = {self.phi_tol.to(\"degrees\"):.4f}º'\n )\n\n for i in range(self._ratio_rot_iters):\n\n result = self._rotate()\n\n if (\n result == _StepResult.skipped_rotation\n or abs(self._coords.phi) < self.phi_tol\n ):\n break\n\n logger.info(\n f\"Micro iteration: {i}.\"\n f' ϕ={self._coords.phi.to(\"degrees\"):.2f}º'\n )\n\n return None", "def test_rotation(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n days = [(self.path + \".\" + log.suffix(day * 86400)) for day in range(3)]\n\n # test automatic rotation\n log._clock = 0.0 # 1970/01/01 00:00.00\n log.write(\"123\")\n log._clock = 43200 # 1970/01/01 12:00.00\n log.write(\"4567890\")\n log._clock = 86400 # 1970/01/02 00:00.00\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(days[0]))\n self.assertFalse(os.path.exists(days[1]))\n log._clock = 172800 # 1970/01/03 00:00.00\n log.write(\"\")\n self.assertTrue(os.path.exists(days[0]))\n self.assertTrue(os.path.exists(days[1]))\n self.assertFalse(os.path.exists(days[2]))\n log._clock = 259199 # 1970/01/03 23:59.59\n log.write(\"3\")\n self.assertFalse(os.path.exists(days[2]))", "def test_no_compress_compressed_response(self):\n self.resp[\"Content-Encoding\"] = \"deflate\"\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r.content, self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"deflate\")" ]
[ "0.63947666", "0.61248237", "0.6114381", "0.6083236", "0.6082104", "0.6062771", "0.59740895", "0.59541243", "0.5930159", "0.5895802", "0.57862955", "0.5755226", "0.5731432", "0.56462026", "0.56308395", "0.5578965", "0.5572358", "0.55638254", "0.5525263", "0.5516631", "0.5515557", "0.55060184", "0.5438584", "0.54046595", "0.53942204", "0.53855133", "0.5384133", "0.53684163", "0.53375804", "0.5310259" ]
0.62119424
1
Test get_spec_config on empty conf
def test_get_spec_config_empty(self): spec_conf = get_spec_config({}, '') self.assertEqual(spec_conf, {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_spec_config_defaults(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'foo': 'bar'\n }\n }, '')\n self.assertEqual(spec_conf, {'foo': 'bar'})", "def test_config_spec(self):\n spec = self._gen.config_spec()\n self.assertIn('Number of examples', spec)\n self.assertIn('Maximum number of columns to change', spec)\n self.assertIn('Regression threshold', spec)\n self.assertIn('Prediction key', spec)", "def test_get_empty_config():\n\n testutils.deploy_config_raw(\"\")\n\n with pytest.raises(prop.PropertyError):\n prop.get_prop('info', 'sdk')\n\n testutils.undeploy()\n\n return 0", "def get_config_spec(cls):\n return False", "def test_config_file_empty(get_empty_config, monkeypatch, get_root, conf_obj):\n path = os.path.join(get_root, 'res', 'missing.yml')\n\n with pytest.raises(FileNotFoundError):\n get_empty_config(conf_obj, path)", "def test_get_spec_config_match(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'default_foo': 'default_bar',\n 'foo': 'bar'\n },\n 'specific': [\n {'mask': ['filenomatch'], 'foo': 'bar_nomatch'},\n {'mask': ['filematch'], 'foo': 'match'},\n {'mask': ['filenomatch2'], 'foo': 'bar_nomatch2'}\n ]\n }, 'filematch')\n self.assertEqual(spec_conf, {'default_foo': 'default_bar', 'foo': 'match', 'mask': ['filematch']})", "def test_get_with_empty_value(self):\n self.assertEqual(self.config.get('none_types','other_value'),None)\n self.assertEqual(self.config.get('none_types','other_value','something'),'something')", "def test_config_class():\n assert config is not None", "def test_test_empty_config():\n\n testutils.deploy_config_raw(\"\")\n\n assert prop.test_prop('info', 'sdk') == 0\n\n testutils.undeploy()\n\n return 0", "def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG", "def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG", "def test_no_config_keyword(self):\n args = self.get_args()\n config = {\n \"site\": {\n \"username\": \"\",\n \"name\": \"\",\n \"ip_address\": \"\",\n \"password\": \"\",\n \"local\": \"\",\n \"use_https\": \"\"\n }\n }\n temp = sys.stdout\n fake_out = FakeStdio()\n sys.stdout = fake_out\n\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()\n\n execute_tool(args, test_mode=True)\n sys.stdout = temp\n self.assertTrue(fake_out.verify_output(['%% Invalid configuration file', '\\n']))", "def test_defaults():\n config = Config(\n env_var='DO_NOT_USE',\n env_prefix='DO_NOT_USE',\n entry_point_name='DO_NOT_USE',\n )\n\n assert not config.keys()", "def test_config_option_not_required_no_default():\n class Config(config.Config):\n a = config.option(int, help=\"\")\n\n c = config.structure({}, Config)\n assert c.a is None", "async def test_full_config(hass: HomeAssistant, mock_client) -> None:\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"requires_auth\": False,\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()", "def test_no_default(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_config(\n self._cmd_args,\n imageroller.test.get_config_parser(self._no_default))\n # ConcurrentWorkers is the first value that is checked\n self.assertEqual(str(cm.exception),\n \"Config must contain ConcurrentWorkers\")", "def test_config_device_init_with_defaults(get_config, monkeypatch):\n notbase_config = {'not_presented': 1}\n monkeypatch.setattr(DeviceConfig, 'minimal_essential_conf', notbase_config)\n cfg = get_config(DeviceConfig, base_config)\n\n assert isinstance(cfg, DeviceConfig), 'wrong class'\n assert cfg.data == notbase_config, 'bad config loaded'", "def test_get_config_default_value(configs):\n assert get_config('SOURCE_FOLDER') == configs['SOURCE_FOLDER']", "def test_config_device_restore_empty(get_config, write_config_fixture, monkeypatch):\n fname = 'will_be_empty.yml'\n # saving normal conf\n is_default = {'test': 'conf'}\n monkeypatch.setattr(DeviceConfig, 'minimal_essential_conf', is_default)\n cfg = get_config(DeviceConfig, base_config, fname=fname)\n cfg.save()\n write_config_fixture('', fname)\n should_be_default = cfg.read()\n\n assert should_be_default == is_default, 'configs not matched'", "def test_required_config_none(self):\n base_config = BaseConfig()\n setattr(base_config, 'required_config', ['TEST_CONF'])\n setattr(base_config, 'TEST_CONF', None)\n\n self.assertRaises(Exception, base_config.check_required_config)", "def test_config_option_required_no_default():\n class Config(config.Config):\n a = config.option(int, required=True, help=\"\")\n\n with pytest.raises(config.ConfigError):\n config.structure({}, Config)\n\n with pytest.raises(config.ConfigError):\n config.structure({\"a\": None}, Config)\n\n c = config.structure({\"a\": 12}, Config)\n assert c.a == 12", "def test_config_option_not_required_default():\n class Config(config.Config):\n a = config.option(int, default=12, required=False, help=\"\")\n\n c = config.structure({\"a\": None}, Config)\n assert c.a is None", "def test_validate_config_empty_config(self):\n\n sample_config = {}\n\n expected_config = {\n 'hosts': [],\n 'syncs': [],\n 'recursive': False,\n 'tags': [],\n }\n\n result = syncme.validate_config(sample_config)\n self.assertTrue(result)\n self.assertDictEqual(sample_config, expected_config)", "def test_no_config(self):\n exit_code = self.run_beat()\n\n assert exit_code == 1\n assert self.log_contains(\"error loading config file\") is True", "def check_config(cfg):", "def test_config_option_required_default():\n class Config(config.Config):\n a = config.option(int, required=True, default=12, help=\"\")\n\n c = config.structure({}, Config)\n assert c.a == 12\n\n c = config.structure({\"a\": 23}, Config)\n assert c.a == 23\n\n with pytest.raises(config.ConfigError):\n config.structure({\"a\": None}, Config)", "def test_none_in_config(self, mocked_callable_loader, mocked_load_config):\n config_filename = 'aconfigfile'\n importer.Finder(config_filename)", "def test_get_config(self):\r\n config = self.profile.get_config('testing.conf', TestConfig, storage_args=['this_section'])\r\n self.assertIsInstance(config, TestConfig)\r\n self.assertIsNone(config.save())", "def test_get_with_None_value(self):\n self.assertEqual(self.config.get('none_types','some_value'),None)\n self.assertEqual(self.config.get('none_types','some_value','something'),'something')", "def test_missing_paths():\n with pytest.raises(InputError):\n make_config([])" ]
[ "0.76028246", "0.7311133", "0.72477543", "0.7058012", "0.69974715", "0.69667923", "0.68360406", "0.6766845", "0.67532086", "0.6709966", "0.6709966", "0.66910833", "0.66852343", "0.6642581", "0.6638022", "0.6632264", "0.66242176", "0.661909", "0.661713", "0.6613062", "0.6608504", "0.6528488", "0.6495036", "0.6484634", "0.64743805", "0.6469067", "0.6463956", "0.64562863", "0.64150214", "0.64059645" ]
0.84538144
0
Test get_spec_config on conf with defaults
def test_get_spec_config_defaults(self): spec_conf = get_spec_config({ 'defaults': { 'foo': 'bar' } }, '') self.assertEqual(spec_conf, {'foo': 'bar'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_spec_config_match(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'default_foo': 'default_bar',\n 'foo': 'bar'\n },\n 'specific': [\n {'mask': ['filenomatch'], 'foo': 'bar_nomatch'},\n {'mask': ['filematch'], 'foo': 'match'},\n {'mask': ['filenomatch2'], 'foo': 'bar_nomatch2'}\n ]\n }, 'filematch')\n self.assertEqual(spec_conf, {'default_foo': 'default_bar', 'foo': 'match', 'mask': ['filematch']})", "def test_config_spec(self):\n spec = self._gen.config_spec()\n self.assertIn('Number of examples', spec)\n self.assertIn('Maximum number of columns to change', spec)\n self.assertIn('Regression threshold', spec)\n self.assertIn('Prediction key', spec)", "def test_get_spec_config_empty(self):\n spec_conf = get_spec_config({}, '')\n self.assertEqual(spec_conf, {})", "def test_get_config_default_value(configs):\n assert get_config('SOURCE_FOLDER') == configs['SOURCE_FOLDER']", "def test_config_device_init_with_defaults(get_config, monkeypatch):\n notbase_config = {'not_presented': 1}\n monkeypatch.setattr(DeviceConfig, 'minimal_essential_conf', notbase_config)\n cfg = get_config(DeviceConfig, base_config)\n\n assert isinstance(cfg, DeviceConfig), 'wrong class'\n assert cfg.data == notbase_config, 'bad config loaded'", "def test_get_config(default_config, tmp_path):\n abcconfig.write_config(default_config, configpath=tmp_path)\n config = abcconfig.get_config(configpath=tmp_path)\n assert config == default_config", "def test_get_with_default(self):\n self.assertEqual(self.config.get('basic','salutation'),None)\n self.assertEqual(self.config.get('basic','salutation','bonjour'),\n 'bonjour')", "def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG", "def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG", "def test_collect_configuration(self):\n sample_config = \"\"\"[dyndnsc]\nconfigs = testconfig\n\n[testconfig]\nuse_preset = testpreset\nupdater-userid = bob\nupdater-password = XYZ\n# test overwriting a preset value:\ndetector-url = http://myip.example.com/\n\n[preset:testpreset]\nupdater = fubarUpdater\nupdater-url = https://update.example.com/nic/update\nupdater-moreparam = some_stuff\ndetector = webcheck4\ndetector-family = INET\ndetector-url = http://ip.example.com/\ndetector-parser = plain\n \"\"\"\n p = configparser.ConfigParser()\n p.readfp(StringIO(sample_config)) # XXX readfp() is deprecated since py 3.2\n config = collect_config(p)\n self.assertEqual(dict, type(config))\n self.assertTrue('testconfig' in config)\n self.assertTrue('detector' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['detector'], list))\n self.assertEqual(1, len(config['testconfig']['detector']))\n detector, detector_opts = config['testconfig']['detector'][-1]\n self.assertEqual(detector, \"webcheck4\") # from the preset\n self.assertEqual(detector_opts['url'], \"http://myip.example.com/\") # from the user conf\n self.assertTrue('updater' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['updater'], list))\n self.assertEqual(1, len(config['testconfig']['updater']))\n updater = config['testconfig']['updater'][0]\n self.assertEqual(\"fubarUpdater\", updater[0])\n self.assertTrue(\"url\" in updater[1])\n self.assertTrue(\"moreparam\" in updater[1])\n self.assertEqual(\"some_stuff\", updater[1][\"moreparam\"])", "def get_config_spec(cls):\n return False", "def test_config_spec(self):\n spec = self.ci.config_spec()\n self.assertIn('Label', spec)\n self.assertIsInstance(spec['Label'], lit_types.CategoryLabel)", "async def test_full_config(hass: HomeAssistant, mock_client) -> None:\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"requires_auth\": False,\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()", "def test_SpecConfig_class():\n res = SpecConfig(**SPEC_CONFIG)\n assert res.path_out == SPEC_CONFIG['path_out']", "def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)", "def test_get(self):\n self.assertEqual(self.config.get('basic','greeting'),'hello')", "def test_config():\n args = Namespace(molecule=\"nucleotide\", verbose=False)\n config = core.Config.from_args(args)\n assert config.verbose is False\n assert config.molecule == 'nucleotide'\n assert config.extended_validation == 'none'\n\n args = Namespace(molecule=\"protein\", verbose=True)\n config = core.Config.from_args(args)\n assert config.verbose is True\n assert config.molecule == 'protein'", "def mock_config():\n real_configuration = pymod.config.config\n cfg = pymod.config.Configuration()\n basename = pymod.names.config_file_basename\n default_config_file = os.path.join(pymod.paths.etc_path, \"defaults\", basename)\n defaults = pymod.config.load_config(default_config_file)\n cfg.push_scope(\"defaults\", defaults)\n\n dirname = py.path.local(tempfile.mkdtemp())\n pymod.paths.user_config_path = dirname.strpath\n pymod.paths.user_cache_path = dirname.strpath\n\n pymod.config.config = cfg\n\n yield pymod.config.config\n\n pymod.config.config = real_configuration", "def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })", "def test_config_opts(sc):\n assert sc.server_name is not None\n assert sc.deployment == Deployment.stg\n assert sc.admins is not None\n assert sc.command_handler is not None\n assert sc.command_handler_work_dir is not None\n assert sc.command_handler_pvc_env_var is not None\n assert sc.command_handler_image_reference is not None\n assert sc.command_handler_k8s_namespace is not None\n assert sc.fas_password is not None\n assert sc.testing_farm_secret is not None\n assert sc.github_requests_log_path is not None\n assert sc.webhook_secret is not None\n assert sc.validate_webhooks is not None\n assert sc.gitlab_token_secret is not None", "def test_config_option_required_default():\n class Config(config.Config):\n a = config.option(int, required=True, default=12, help=\"\")\n\n c = config.structure({}, Config)\n assert c.a == 12\n\n c = config.structure({\"a\": 23}, Config)\n assert c.a == 23\n\n with pytest.raises(config.ConfigError):\n config.structure({\"a\": None}, Config)", "def test_config_class():\n assert config is not None", "def test_load_configs_simulation(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n\n self.assertEqual(locator.config['routines'], ['simulate'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'SimDriver',\n 'kwargs': {\n \"arg\": \"val\"\n }\n })", "def test_defaults():\n config = Config(\n env_var='DO_NOT_USE',\n env_prefix='DO_NOT_USE',\n entry_point_name='DO_NOT_USE',\n )\n\n assert not config.keys()", "def test_read_config_option(self):\n # set up config\n config.set_config_file(os.path.join(path_to_module, \"test_config.conf\"))\n config.setup()\n # Test that all the parameters loaded from file are correct\n self.assertEqual(config.read_config_option('client_id'), 'uploader')\n self.assertEqual(config.read_config_option('client_secret'), 'secret')\n self.assertEqual(config.read_config_option('username'), 'admin')\n self.assertEqual(config.read_config_option('password'), 'password1')\n self.assertEqual(config.read_config_option('base_url'), 'http://localhost:8080/irida-latest/api/')\n self.assertEqual(config.read_config_option('parser'), 'miseq')\n self.assertEqual(config.read_config_option('readonly', bool), False)", "def ignor_test_load_default_config(self):\n config = AnnotatorConfig()\n assert config[\"config\"] == \"config.json\"", "def mock_config():\n from .. import config\n\n _old_fs = os.getenv('FREESURFER_HOME')\n if not _old_fs:\n os.environ['FREESURFER_HOME'] = mkdtemp()\n\n filename = Path(pkgrf('fmriprep', 'data/tests/config.toml'))\n settings = loads(filename.read_text())\n for sectionname, configs in settings.items():\n if sectionname != 'environment':\n section = getattr(config, sectionname)\n section.load(configs, init=False)\n config.nipype.omp_nthreads = 1\n config.nipype.init()\n config.loggers.init()\n config.init_spaces()\n\n config.execution.work_dir = Path(mkdtemp())\n config.execution.bids_dir = Path(pkgrf('fmriprep', 'data/tests/ds000005')).absolute()\n config.execution.fmriprep_dir = Path(mkdtemp())\n config.execution.init()\n\n yield\n\n shutil.rmtree(config.execution.work_dir)\n shutil.rmtree(config.execution.fmriprep_dir)\n\n if not _old_fs:\n del os.environ[\"FREESURFER_HOME\"]", "def test_loads_a_config_file(self):\n from test.resources import config\n self.assertIsInstance(config, type(sys))\n self.assertIsNotNone(config.example)\n self.assertEqual(config.example.config_option, 'config-value')", "def test_SpecConfig_class_minimal():\n res = SpecConfig(path=PATH_SPECS_2_YAML)\n assert res.path_out == PATH_SPECS_2_YAML_MODIFIED", "def test_get_config(self):\r\n config = self.profile.get_config('testing.conf', TestConfig, storage_args=['this_section'])\r\n self.assertIsInstance(config, TestConfig)\r\n self.assertIsNone(config.save())" ]
[ "0.77164143", "0.7436098", "0.7076511", "0.7013907", "0.6881537", "0.6805723", "0.678276", "0.6729274", "0.6729274", "0.66931385", "0.66794413", "0.6565056", "0.6563163", "0.65287656", "0.6503387", "0.64884305", "0.64868605", "0.6484734", "0.6454509", "0.6409027", "0.63939387", "0.63753587", "0.6355658", "0.6340941", "0.63258415", "0.632423", "0.6320525", "0.6308311", "0.62858176", "0.6272889" ]
0.8287769
0
Test get_spec_config on matching conf
def test_get_spec_config_match(self): spec_conf = get_spec_config({ 'defaults': { 'default_foo': 'default_bar', 'foo': 'bar' }, 'specific': [ {'mask': ['filenomatch'], 'foo': 'bar_nomatch'}, {'mask': ['filematch'], 'foo': 'match'}, {'mask': ['filenomatch2'], 'foo': 'bar_nomatch2'} ] }, 'filematch') self.assertEqual(spec_conf, {'default_foo': 'default_bar', 'foo': 'match', 'mask': ['filematch']})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_config_spec(self):\n spec = self._gen.config_spec()\n self.assertIn('Number of examples', spec)\n self.assertIn('Maximum number of columns to change', spec)\n self.assertIn('Regression threshold', spec)\n self.assertIn('Prediction key', spec)", "def test_get_spec_config_defaults(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'foo': 'bar'\n }\n }, '')\n self.assertEqual(spec_conf, {'foo': 'bar'})", "def test_config_spec(self):\n spec = self.ci.config_spec()\n self.assertIn('Label', spec)\n self.assertIsInstance(spec['Label'], lit_types.CategoryLabel)", "def test_get_spec_config_empty(self):\n spec_conf = get_spec_config({}, '')\n self.assertEqual(spec_conf, {})", "def test_SpecConfig_class():\n res = SpecConfig(**SPEC_CONFIG)\n assert res.path_out == SPEC_CONFIG['path_out']", "def get_config_spec(cls):\n return False", "def test_collect_configuration(self):\n sample_config = \"\"\"[dyndnsc]\nconfigs = testconfig\n\n[testconfig]\nuse_preset = testpreset\nupdater-userid = bob\nupdater-password = XYZ\n# test overwriting a preset value:\ndetector-url = http://myip.example.com/\n\n[preset:testpreset]\nupdater = fubarUpdater\nupdater-url = https://update.example.com/nic/update\nupdater-moreparam = some_stuff\ndetector = webcheck4\ndetector-family = INET\ndetector-url = http://ip.example.com/\ndetector-parser = plain\n \"\"\"\n p = configparser.ConfigParser()\n p.readfp(StringIO(sample_config)) # XXX readfp() is deprecated since py 3.2\n config = collect_config(p)\n self.assertEqual(dict, type(config))\n self.assertTrue('testconfig' in config)\n self.assertTrue('detector' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['detector'], list))\n self.assertEqual(1, len(config['testconfig']['detector']))\n detector, detector_opts = config['testconfig']['detector'][-1]\n self.assertEqual(detector, \"webcheck4\") # from the preset\n self.assertEqual(detector_opts['url'], \"http://myip.example.com/\") # from the user conf\n self.assertTrue('updater' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['updater'], list))\n self.assertEqual(1, len(config['testconfig']['updater']))\n updater = config['testconfig']['updater'][0]\n self.assertEqual(\"fubarUpdater\", updater[0])\n self.assertTrue(\"url\" in updater[1])\n self.assertTrue(\"moreparam\" in updater[1])\n self.assertEqual(\"some_stuff\", updater[1][\"moreparam\"])", "def check_config(cfg):", "async def test_full_config(hass: HomeAssistant, mock_client) -> None:\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"requires_auth\": False,\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()", "def _get_configspec():\n files = sorted(pkg_resources.resource_listdir(__name__, \"\"))\n # NOTE:\n # Explicit convert the filter results to a list, since the returned\n # iterator can ONLY be used ONCE.\n specfiles = list(filter(lambda fn: fn.endswith(\".conf.spec\"), files))\n if os.environ.get(\"DEBUG_FG21SIM\"):\n print(\"DEBUG: Found config specifications: %s\" % \", \".join(specfiles),\n file=sys.stderr)\n # NOTE:\n # `resource_string()` returns the resource in *binary/bytes* string\n configspec = \"\\n\".join([\n pkg_resources.resource_string(__name__, fn).decode(\"utf-8\")\n for fn in specfiles\n ]).split(\"\\n\")\n return configspec", "def test_read_config_option(self):\n # set up config\n config.set_config_file(os.path.join(path_to_module, \"test_config.conf\"))\n config.setup()\n # Test that all the parameters loaded from file are correct\n self.assertEqual(config.read_config_option('client_id'), 'uploader')\n self.assertEqual(config.read_config_option('client_secret'), 'secret')\n self.assertEqual(config.read_config_option('username'), 'admin')\n self.assertEqual(config.read_config_option('password'), 'password1')\n self.assertEqual(config.read_config_option('base_url'), 'http://localhost:8080/irida-latest/api/')\n self.assertEqual(config.read_config_option('parser'), 'miseq')\n self.assertEqual(config.read_config_option('readonly', bool), False)", "def test_get(self):\n self.assertEqual(self.config.get('basic','greeting'),'hello')", "def test_get_yaml_spec(self):\n pass", "def test_compliance_configuration(self, evidence):\n evidence_config = json.loads(evidence.content)\n if evidence_config != self.config.raw_config:\n evidence = json.dumps(evidence_config, indent=2).split('\\n')\n config = json.dumps(self.config.raw_config, indent=2).split('\\n')\n self.add_failures(\n 'Differences found',\n {\n 'Fetcher Configuration': evidence,\n 'Check Configuration': config\n }\n )", "def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })", "def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG", "def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG", "async def test_full_config(hass, mock_client):\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()\n assert hass.bus.listen.called\n assert hass.bus.listen.call_args_list[0][0][0] == EVENT_STATE_CHANGED", "def test_get_config_default_value(configs):\n assert get_config('SOURCE_FOLDER') == configs['SOURCE_FOLDER']", "def test_get_reg_ex_config(self):\n pass", "def test_SpecConfig_class_minimal():\n res = SpecConfig(path=PATH_SPECS_2_YAML)\n assert res.path_out == PATH_SPECS_2_YAML_MODIFIED", "def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)", "def test_valid_configuration(self):\n\n conf = [\n 'gasoline', '228i', 'model_luxury_line', 'silver', 'rims_384',\n 'tapistry_black', 'steptronic', 'smoker_package', 'tow_hook'\n ]\n\n attr_val_ids = self.get_attr_val_ids(conf)\n validation = self.cfg_tmpl.validate_configuration(attr_val_ids)\n self.assertTrue(validation, \"Valid configuration failed validation\")", "def testConfigA(self):\n assert type(self.config) == dict, \"Read setting not returning a dictionary\"", "async def test_api_get_config(hass: HomeAssistant, mock_api_client: TestClient) -> None:\n resp = await mock_api_client.get(const.URL_API_CONFIG)\n result = await resp.json()\n if \"components\" in result:\n result[\"components\"] = set(result[\"components\"])\n if \"whitelist_external_dirs\" in result:\n result[\"whitelist_external_dirs\"] = set(result[\"whitelist_external_dirs\"])\n if \"allowlist_external_dirs\" in result:\n result[\"allowlist_external_dirs\"] = set(result[\"allowlist_external_dirs\"])\n if \"allowlist_external_urls\" in result:\n result[\"allowlist_external_urls\"] = set(result[\"allowlist_external_urls\"])\n\n assert hass.config.as_dict() == result", "def test_config_ok_config(self):\n test_data = (\"[gnupg]\\n\"\n \"recipients = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"\\n\"\n \"[data]\\n\"\n \"\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\"\n \"\\n\")\n if os.path.isfile(\"test_config.conf\"):\n os.remove(\"test_config.conf\")\n file(\"test_config.conf\", \"wb\").write(test_data)\n config = Config(\"test_config.conf\")\n self.assertIn(\"gnupg\", config.config.sections())\n self.assertIn(\"amazon-s3\", config.config.sections())\n self.assertEqual(config.config.get(\n \"gnupg\", \"recipients\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"gnupg\", \"signer\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"access_key\"), \"ACCESSKEY\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"secret_access_key\"), \"SECRETACCESSKEY\")\n self.assertEqual(config.config.get(\n \"data\", \"bucket\"), \"DATABUCKET\")\n self.assertEqual(config.config.get(\n \"metadata\", \"bucket\"), \"METADATABUCKET\")\n os.remove(\"test_config.conf\")", "def test_get_feature_config(self):\n tools.eq_(\n self.old_manifest.get_feature_config(\"sub\").to_dict(),\n {\n \"url\": \"git://github.com/Toumorokoshi/sub.git\",\n \"formula\": \"sprinter.formula.git\",\n \"depends\": \"git\",\n \"branch\": \"yusuke\",\n \"rc\": \"temp=`pwd`; cd %(sub:root_dir)s/libexec && . sub-init2 && cd $tmp\",\n \"bc\": \"temp=`pwd`; cd %(sub:testvar)s/libexec && . sub-init2 && cd $tmp\",\n },\n )", "def test_get_configs_with_filter(self) -> None:\n config1 = self.integration.create_config(name='Config 1',\n enabled=True,\n save=True)\n self.integration.create_config(name='Config 2',\n enabled=True,\n save=True)\n\n # Add some configs that shouldn't be returned.\n integration2 = \\\n self.manager.register_integration_class(DummyIntegration2)\n self.integration.create_config(name='Config 3', save=True)\n integration2.create_config(name='Config 4', save=True)\n\n self.assertEqual(self.integration.get_configs(name='Config 1'),\n [config1])", "def test_load_configs_simulation(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n\n self.assertEqual(locator.config['routines'], ['simulate'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'SimDriver',\n 'kwargs': {\n \"arg\": \"val\"\n }\n })", "def test_config_class():\n assert config is not None" ]
[ "0.7582333", "0.67213416", "0.6621223", "0.66077214", "0.6536261", "0.65335846", "0.64342177", "0.6432759", "0.6411313", "0.6303535", "0.62789136", "0.6260329", "0.62516683", "0.6249136", "0.6229848", "0.6223742", "0.6223742", "0.62210363", "0.61939514", "0.617981", "0.61518675", "0.61431146", "0.6132865", "0.61304533", "0.61266756", "0.6122041", "0.6120953", "0.6092638", "0.6089856", "0.6081552" ]
0.8165602
0
Check that given modifier name is valid one. If not raise exception based on violation.
def _isValidModifier(self, modifiers, modifierName): if Modifiers.ILLEGAL_MODIFIER_PATTER.search(modifierName): msg = ('Modifier named "{0}" in sheet {1} contains illegal characters. ' 'Supported characters are a to z, A to Z, 0 to 9 and underscore "_". ' 'Spaces are not allowed characters, use underscore instead. For example ' '"some_mod".' ).format(modifierName, MODIFIER_LIST_SHEET_NAME) raise errors.UnsupportedCharacter(MODIFIER_LIST_SHEET_NAME, msg) if modifierName in map(lambda mod: mod.name, modifiers): msg = ('Modifier named "{0}" already exists in the sheet {1}. ' 'Modifier names must be unique. To fix remove or rename ' 'duplicates.' ).format(modifierName, MODIFIER_LIST_SHEET_NAME) raise errors.DuplicateError(MODIFIER_LIST_SHEET_NAME, msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateName(name):\r\n if not name:\r\n raise IllegalName('Name can not be an empty string.')\r\n\r\n m = _NAME_RE.match(name)\r\n\r\n if m is None or m.group(0) != name:\r\n raise IllegalName('Name has to start with a letter followed by an '\r\n 'arbitrary number of alphanumeric characters or '\r\n 'underscores.')", "def validname(name):\r\n return len(name)>0 and (\r\n Context.__invalid_character.search(name) is None)", "def raiseNameError(text):\n pattern = re.compile(\"[a-zA-Z]\")\n if not pattern.match(text):\n raise Exception(\"Invalid Name Entered\")", "def name_valid(name):\n return name.isalpha()", "def _check_is_name_valid(self, name):\n if name in self.forbidden_names or name.endswith(\n self.forbidden_extensions) or self.__check_is_match_regex(name):\n return False\n return True", "def _validate_mod(self, mod: Modifier):\r\n return not mod.name in self.mods", "def test_valid_name_invalid():\n assert not valid_name(\"\")\n assert not valid_name(\"a\"*21)", "def _is_valid_varname(self, name):\n if name in RESERVED or re.match(r'^str([0-9]+|L)$', name): return False\n return True if VALID_NAME_RE.match(name) else False", "def invalid_name(name):\n if any(not item.isalpha() for item in str(name)):\n return True\n return False", "def _is_valid_varname(self, name):\n if name in RESERVED or re.match(r'^str[0-9]+$', name): return False\n return True if VALID_NAME_RE.match(name) else False", "def validate_name(name: str) -> None:\n\n # Disallow empty.\n if not name:\n raise CleanError('Feature set name cannot be empty.')\n\n # Require starting with a letter.\n if not name[0].isalpha():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - names must start with a letter.'\n )\n\n # Require only letters, numbers, and underscores.\n if not name.replace('_', '').isalnum():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only letters, numbers, and underscores are allowed.'\n )\n\n # Require all lowercase.\n if not name.islower():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only lowercase letters are allowed.'\n )\n\n # Disallow leading, trailing, or consecutive underscores.\n # (these will result in a '' in the split results which evals to False)\n if not all(name.split('_')):\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - leading, trailing, and consecutive underscores are'\n ' not allowed.'\n )", "def validate_name(name:str) -> bool:\r\n return name.isalpha() and name.count(\" \") == 0 and len(name) >= 2", "def validateMemberName(n):\n try:\n if len(n) < 1:\n raise Exception('Name must be at least one byte in length')\n if len(n) > 255:\n raise Exception('Name exceeds maximum length of 255')\n if n[0].isdigit():\n raise Exception('Names may not begin with a digit')\n if mbr_re.search(n):\n raise Exception(\n 'Names contains a character outside the set [A-Za-z0-9_]')\n except Exception as e:\n raise MarshallingError(f'Invalid member name \"{n}\": {str(e)}')", "def _assert_valid_name(name, container):\n container.file.name_validation(container.directory, name)", "def validateNamePart(self, passed_name):\n ## Declaring a Flag to control a while loop\n name_ok = False\n ## While loop to have user retry their input if they enter incorrectly\n while not name_ok:\n if passed_name.isalpha():\n name_ok = True\n return True\n\n else:\n print(\"You have entered an invalid character. Please try again.\")\n return False", "def is_valid(name):\n return bool(name)", "def test_nonreserved_name(self):\n try:\n field_name_validator('_identifier')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')", "def _check_name(self):\n\t\tpass", "def test_valid_name_valid():\n assert valid_name(\"1\")\n assert valid_name(\"a\"*20)", "def check_name(self, name):\n status, msg = utils.validate_name(name, \"36\", \"storageview name\")\n if not status:\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n else:\n LOG.info(msg)", "def _validate_content_name(content_name: str, performative: str) -> Tuple[bool, str]:\n # check content name's format\n if not _is_valid_regex(CONTENT_NAME_REGEX_PATTERN, content_name):\n return (\n False,\n \"Invalid name for content '{}' of performative '{}'. Content names must match the following regular expression: {} \".format(\n content_name, performative, CONTENT_NAME_REGEX_PATTERN\n ),\n )\n\n # check content name is not a reserved name\n if _is_reserved_name(content_name):\n return (\n False,\n \"Invalid name for content '{}' of performative '{}'. This name is reserved.\".format(\n content_name,\n performative,\n ),\n )\n\n return (\n True,\n \"Content name '{}' of performative '{}' is valid.\".format(\n content_name, performative\n ),\n )", "def name_error(name):\n\n if len(name) > MAX_NAME_LENGHT:\n raise InputError(description=\"Name cannot be more than 20 characters long\")", "def _validate_rule_target_name(name: str) -> None:\n if not name:\n raise common_exceptions.RuleTargetValidationError(\n \"A `name` field must be supplied.\"\n )", "def verify_name(name):\n try:\n if name.index(' '):\n return False\n except ValueError:\n return True", "def validate_team_name(name):\n if not re.match('^[A-Za-z0-9_]*$', name):\n print('INVALID NAME. LETTERS, NUMBERS AND UNDERSCORES ONLY')\n return False\n elif len(name) > 10:\n print('INVALID NAME. 10 CHARACTERS MAX')\n return False\n elif len(name) == 0:\n print('INVALID NAME. NOT LONG ENOUGH')\n else:\n return True", "def validate_interval_name(name):\n msg = 'invalid interval name \"{}\"'.format(name)\n if name[0] not in ['+', '-']:\n raise ValueError(msg)\n if name[1] not in ['d', 'm', 'P', 'M', 'A']:\n raise ValueError(msg)\n try:\n int(name[2:])\n except ValueError:\n raise ValueError(msg)", "def validate_custom_name(self, name):\n if not re.match( r'(/?[a-zA-Z_][a-zA-Z0-9_]*)+$', name):\n raise ValueError('Invalid name for node (%s)' % name)\n return", "def nameIsValid(self, name):\n self.notify.debug('nameIsValid')\n if (name in self.usedNames):\n return OTPLocalizer.ToonAlreadyExists % (name)\n\n problem = NameCheck.checkName(name, font=self.nameEntry.getFont())\n if problem:\n return problem\n\n # name has passed local checks\n return None", "def _validate_param(name, value):\n\n # First things first -- check that we have a legal parameter name.\n try:\n validator = _legal_params[name]\n except KeyError:\n raise ViewVCException(\"An illegal parameter name was provided.\", \"400 Bad Request\")\n\n # Is there a validator? Is it a regex or a function? Validate if\n # we can, returning without incident on valid input.\n if validator is None:\n return\n elif hasattr(validator, \"match\"):\n if validator.match(value):\n return\n else:\n if validator(value):\n return\n\n # If we get here, the input value isn't valid.\n raise ViewVCException(\n 'An illegal value was provided for the \"%s\" parameter.' % (name), \"400 Bad Request\"\n )", "def validated_name(cls, name):\n if (name[:5] == 'hive-'\n and name[5] in ['1', '2', '3']\n and re.match(r'^hive-[123]\\d{4,6}$', name)):\n return name\n return None" ]
[ "0.64032656", "0.63721097", "0.6336271", "0.62860745", "0.62444806", "0.6232126", "0.6111689", "0.6085224", "0.60028106", "0.59533495", "0.5930594", "0.59233236", "0.5870704", "0.58153677", "0.5802187", "0.5795572", "0.57821715", "0.5776781", "0.5740856", "0.5735548", "0.5728841", "0.5715866", "0.5700054", "0.5685522", "0.5649832", "0.56189436", "0.56132185", "0.560684", "0.5598782", "0.55804724" ]
0.78125316
0
Determines if a given datetime.datetime is aware.
def is_aware(value): return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_aware(value: datetime) -> bool:\n\n return value.utcoffset() is not None", "def test_make_datetime_aware(settings):\n # Set the TIME_ZONE in the settings.\n settings.TIME_ZONE = \"America/New_York\"\n\n # Calling make_datetime_aware() returns a timezone-aware datetime referring\n # to the moment from the naive_datetime_obj, in the appropriate time zone.\n naive_datetime_str = \"2018-01-01T20:00:00\"\n expected_datetime_obj = make_aware(\n datetime(year=2018, month=1, day=1, hour=20, minute=0, second=0),\n timezone=pytz.timezone(\"America/New_York\"),\n )\n assert make_datetime_aware(naive_datetime_str) == expected_datetime_obj\n\n # Calling make_datetime_aware() for non-datetime strings returns None.\n dt_str = \"\"\n aware_dt = make_datetime_aware(dt_str)\n assert aware_dt == None\n dt_str = None\n aware_dt = make_datetime_aware(dt_str)\n assert aware_dt == None", "def _is_timezone_aware(value):\n return value.utcoffset() is not None", "def valid_datetime(dt):\n if isinstance(dt.tzinfo, tzinfo) and not datetime_ambiguous(dt):\n return True\n return False", "def is_datetime(self) -> bool:\n return False", "def is_datetime(self):\n answer = self._call('is_datetime')\n return answer.yes", "def test_freeze_with_timezone_aware_datetime_in_non_utc():\n utc_now = datetime.datetime.utcnow()\n assert utc_now.tzinfo is None\n assert utc_now == datetime.datetime(1970, 1, 1, 4)", "def check_dt_consistency(date_dt):\n\n # https://en.wikipedia.org/wiki/Tz_database\n # https://www.iana.org/time-zones\n \n if date_dt.tzinfo is None:\n return True\n else:\n \n # This check is quite heavy but there is apparently no other way to do it.\n if date_dt.utcoffset() != dt_from_s(s_from_dt(date_dt), tz=date_dt.tzinfo).utcoffset():\n return False\n else:\n return True", "def make_aware(value: datetime, timezone=None, is_dst=None) -> datetime:\n\n if timezone is None:\n timezone = get_current_timezone()\n\n if hasattr(timezone, \"localize\"):\n # This method is available for pytz time zones.\n return timezone.localize(value, is_dst=is_dst)\n else:\n # Check that we won't overwrite the timezone of an aware datetime.\n if is_aware(value):\n raise ValueError(\"make_aware expects a naive datetime, got %s\" % value)\n # This may be wrong around DST changes!\n return value.replace(tzinfo=timezone)", "def correct_datetime(record_datetime):\n assert record_datetime.date() == datetime.now(timezone.utc).date()", "def _uses_datetimeblock(dtype: Union[np.dtype, ExtensionDtype]) -> bool:\n vtype = dtype.type\n return issubclass(vtype, np.datetime64)", "def test_freeze_with_timezone_aware_datetime_in_utc():\n utc_now = datetime.datetime.utcnow()\n assert utc_now.tzinfo is None", "def is_after(self, dt: datetime) -> bool:\n return self.target_time >= make_tz_aware(dt)", "def in_between_datetime(now, start, end):\n return start <= now <= end", "def isNaive(self, date):\n return not self.isAware(date)", "def has_datetime_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.datetime64", "def is_date(dt):\n return isinstance(dt, datetime.date) and not isinstance(dt, datetime.datetime)", "def is_naive(value: datetime) -> bool:\n\n return value.utcoffset() is None", "def datetime_has_tz(dt):\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n return dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None", "def in_datetime_interval(when, *, start=None, end=None):\n when = as_ref_datetime(when) # This is not allowed to be None, but could be str and we need datetimes to compare.\n start = start and as_ref_datetime(start)\n end = end and as_ref_datetime(end)\n return (not start or start <= when) and (not end or end >= when)", "async def datetime(self, aware=False) -> dt.datetime:\n if aware is True:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz)\n else:\n return await self.AD.sched.get_now_naive()", "def check_date(created_at, start, end):\n x = get_date(created_at)\n return x <= end and x >= start", "def test_enlighten_dtime(self):\n\n est = pytz.timezone(\"EST\")\n aware_dtime = datetime.datetime(\n year=1985, month=11, day=15,\n hour=6, minute=0,\n tzinfo=est)\n\n enlightened_dtime = enlighten_dtime(aware_dtime)\n # The tzinfo should be untouched.\n self.assertIs(aware_dtime.tzinfo, enlightened_dtime.tzinfo)\n\n # This is a naive object, but has UTC values for hour.\n utcnow = datetime.datetime.now()\n # No tzinfo was present, so that is replaced. hour should be the same.\n enlightened_utcnow = enlighten_dtime(utcnow)\n self.assertEqual(enlightened_utcnow.hour, utcnow.hour)\n self.assertIs(enlightened_utcnow.tzinfo, UTC_TZINFO)", "def office_is_open_on_datetime(iso_datetime):\n is_open = False\n d_time = datetime.fromisoformat(iso_datetime)\n d_date = date(d_time.year, d_time.month, d_time.day)\n schedule = AppointmentService.APPOINTMENT_SCHEDULE.get(d_date.weekday(), {})\n if schedule:\n begin_time = datetime.combine(d_date, schedule['begin'])\n end_time = datetime.combine(d_date, schedule['end'])\n if begin_time <= d_time <= end_time:\n is_open = True\n\n return is_open", "def ensure_datetime(ob: AnyDatetime) -> datetime.datetime:\n if isinstance(ob, datetime.datetime):\n return ob\n date = cast(datetime.date, ob)\n time = cast(datetime.time, ob)\n if isinstance(ob, datetime.date):\n time = datetime.time()\n if isinstance(ob, datetime.time):\n date = datetime.date(1900, 1, 1)\n return datetime.datetime.combine(date, time)", "def make_tz_aware(local_dt):\n aware_dt = timezone('US/Eastern').localize(local_dt)\n return aware_dt", "def _is_date_in_range(self, date):\n date_obj = datetime.strptime(date.split('T')[0], '%Y-%m-%d')\n \"\"\"When running under delta feed mode, we need to consider only those vulns which were\n updated between the given offset date and today's date.\"\"\"\n return self.today > date_obj >= self.start_day", "def omniscient_datetime(*args):\n d = original_datetime(*args)\n if settings.USE_TZ:\n d = timezone.make_aware(d, timezone.utc)\n return d", "def validDateTime( dateTime ):\n try:\n datetime.strptime( dateTime, \"%Y-%m-%dT%H:%M:%S.%fZ\" )\n return True\n except ValueError:\n return False", "def adjust_icms_v1_datetime(dt_val: dt.datetime) -> dt.datetime:\n\n if timezone.is_aware(dt_val):\n raise ValueError(f\"Unable to adjust an aware datetime value: {dt_val}\")\n\n # ICMS V1 datetime values are created using this:\n # https://docs.oracle.com/database/121/SQLRF/functions207.htm#SQLRF06124\n # Therefore replace the naive datetime with the correct timezone\n aware_dt = dt_val.replace(tzinfo=UK_TZ)\n\n # Return a datetime that has been offset to UTC\n utc_dt = aware_dt.astimezone(dt.timezone.utc)\n\n return utc_dt" ]
[ "0.7423152", "0.6706246", "0.656491", "0.65267324", "0.6137775", "0.60896856", "0.6019291", "0.59568536", "0.57755363", "0.57708514", "0.5679119", "0.5660917", "0.56022364", "0.55941117", "0.5527686", "0.55162454", "0.54947275", "0.5432787", "0.5420078", "0.5379313", "0.5340649", "0.5322148", "0.5298153", "0.5293154", "0.52929807", "0.52778995", "0.52714974", "0.5259795", "0.5222237", "0.5187491" ]
0.6891435
1
Define ZMQ connection and return socket to work with
def connect_to_worker(): socket = context.socket(zmq.REQ) socket.connect("tcp://localhost:5555") return socket
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def meta_trader_connector():\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(SOCKET_LOCAL_HOST)\n return socket", "def socket(self):\n if not hasattr(self, \"_socket\"):\n # create a new one\n self._socket = self.context.socket(zmq.REQ)\n if hasattr(zmq, \"RECONNECT_IVL_MAX\"):\n self._socket.setsockopt(zmq.RECONNECT_IVL_MAX, 5000)\n\n self._set_tcp_keepalive()\n if self.master.startswith(\"tcp://[\"):\n # Hint PF type if bracket enclosed IPv6 address\n if hasattr(zmq, \"IPV6\"):\n self._socket.setsockopt(zmq.IPV6, 1)\n elif hasattr(zmq, \"IPV4ONLY\"):\n self._socket.setsockopt(zmq.IPV4ONLY, 0)\n self._socket.linger = self.linger\n if self.id_:\n self._socket.setsockopt(zmq.IDENTITY, self.id_)\n self._socket.connect(self.master)\n return self._socket", "def get_socket():\n return socket.create_connection((HOST, PORT))", "def start(self):\n zmq_uri = (\n \"{protocol}://{address}:{port}\".format(\n protocol=self.protocol, address=self.address, port=self.port\n )\n if self.port\n else \"{protocol}://{address}\".format( # noqa\n protocol=self.protocol, address=self.address\n )\n )\n log.debug(\"ZMQ URI: %s\", zmq_uri)\n self.ctx = zmq.Context()\n if hasattr(zmq, self.type):\n skt_type = getattr(zmq, self.type)\n else:\n skt_type = zmq.PULL\n self.sub = self.ctx.socket(skt_type)\n self.sub.connect(zmq_uri)\n if self.hwm is not None:\n self.sub.setsockopt(zmq.RCVHWM, self.hwm)\n if self.recvtimeout is not None:\n log.debug(\"Setting RCVTIMEO to %d\", self.recvtimeout)\n self.sub.setsockopt(zmq.RCVTIMEO, self.recvtimeout)\n if self.keepalive is not None:\n log.debug(\"Setting TCP_KEEPALIVE to %d\", self.keepalive)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE, self.keepalive)\n if self.keepalive_idle is not None:\n log.debug(\"Setting TCP_KEEPALIVE_IDLE to %d\", self.keepalive_idle)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_IDLE, self.keepalive_idle)\n if self.keepalive_interval is not None:\n log.debug(\"Setting TCP_KEEPALIVE_INTVL to %d\", self.keepalive_interval)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_INTVL, self.keepalive_interval)", "def __init__(self, port=1071):\n\n context = zmq.Context()\n\n self.socket = context.socket(zmq.REP)\n self.socket.bind('tcp://*:' + str(port))\n\n self.socket.recv()", "def __init__(self, ip='127.0.0.1', port='50020'):\n self.ip = ip \n self.port = port\n self.ctx = zmq.Context()\n self.socket = zmq.Socket(self.ctx, zmq.REQ) # this is pub socket", "def setup(self):\n self.context = zmq.Context()\n self.sub_socket = self.context.socket(zmq.SUB)\n if self.filter:\n self.sub_socket.setsockopt(zmq.SUBSCRIBE, self.filter)\n self.sub_socket.connect('tcp://'+self.host+':'+str(self.com_port))\n return self", "def _create_socket():\n sock = socket.socket()\n return sock", "def build_socket(self, paradigm, topic, url):\n\n socket = None\n if paradigm == \"sub\":\n socket = self.context.socket(zmq.SUB)\n socket.connect(url)\n socket.setsockopt_string(zmq.SUBSCRIBE, topic)\n elif paradigm == \"pub\":\n socket = self.context.socket(zmq.PUB)\n socket.bind(url)\n elif paradigm == \"req\":\n socket = self.context.socket(zmq.REQ)\n socket.connect(url)\n elif paradigm == \"rep\":\n socket == self.context.socket(zmq.REP)\n socket.bind(url)\n else:\n raise Exception(\"Please provide a valid paradigm\")\n\n return socket", "def init_connexion():\n connexion = socket(AF_INET, SOCK_STREAM)\n connexion.bind((hote, port))\n\n return connexion", "def getconnection(self):\n\n # If we were able to create the affix_tcpsocket, then we attempt to call\n # getconnection() on the affix tcp server socket first. If we were unable \n # to create it or get a SocketWouldBlockError, we default to the basic\n # repy getconnection() call. The reason for this is to ensure that even\n # if the affixstack breaks down, we are able to revert to the default repy\n # connection.\n if self.tcpserversocket_dict['affix_tcpsocket']:\n try:\n rip, rport, sockobj = self.tcpserversocket_dict['affix_tcpsocket'].getconnection()\n return (rip, rport, AffixSocket(sockobj, self.affix_object)) \n except SocketWouldBlockError:\n pass\n\n return self.tcpserversocket_dict['repy_tcpsocket'].getconnection()", "def __get_zmq_pub(self):\n print(\"Publishing to tcp://127.0.0.1:%d channel: tweets\" % self.port)\n context = zmq.Context()\n socket = context.socket(zmq.PUB)\n socket.bind(\"tcp://127.0.0.1:%d\" % self.port)\n return socket", "def create_connection(address):\n\n sock = socks.socksocket()\n sock.connect(address)\n return sock", "def _bind_zmq_sockets(config):\n workers_socket = context.socket(zmq.ROUTER)\n manager_socket = context.socket(zmq.DEALER)\n workers_port = config[\"zmq\"][\"ports\"][\"workers\"]\n workers_socket.bind(f\"tcp://*:{workers_port}\")\n logger.info(f\"worker socket bound to port {workers_port}\")\n manager_port = config[\"zmq\"][\"ports\"][\"manager\"]\n manager_socket.bind(f\"tcp://*:{manager_port}\")\n logger.info(f\"manager socket bound to port {manager_port}\")\n return workers_socket, manager_socket", "def getconnection(self):\n # If we were able to create the shim_tcpsocket, then we attempt to call\n # getconnection() on the shim tcp server socket first. If we were unable \n # to create it or get a SocketWouldBlockError, we default to the basic\n # repy getconnection() call. The reason for this is to ensure that even\n # if the shimstack breaks down, we are able to revert to the default repy\n # connection.\n if self.tcpserversocket_dict['shim_tcpsocket']:\n try:\n rip, rport, sockobj = self.tcpserversocket_dict['shim_tcpsocket'].getconnection()\n return (rip, rport, ShimSocket(sockobj, self.shim_object)) \n except SocketWouldBlockError:\n pass\n\n return self.tcpserversocket_dict['repy_tcpsocket'].getconnection()", "def __enter__(self):\n\n self.sock.connect(self.socket_path)\n return self.sock", "def __init__(self, factory, endpoint=None, identity=None):\n self.factory = factory\n self.endpoints = []\n self.identity = identity\n self.socket = Socket(factory.context, self.socketType)\n self.queue = deque()\n self.recv_parts = []\n self.read_scheduled = None\n\n self.fd = self.socket_get(constants.FD)\n self.socket_set(constants.LINGER, factory.lingerPeriod)\n\n if not ZMQ3:\n self.socket_set(\n constants.MCAST_LOOP, int(self.allowLoopbackMulticast))\n\n self.socket_set(constants.RATE, self.multicastRate)\n\n if not ZMQ3:\n self.socket_set(constants.HWM, self.highWaterMark)\n else:\n self.socket_set(constants.SNDHWM, self.highWaterMark)\n self.socket_set(constants.RCVHWM, self.highWaterMark)\n\n if ZMQ3 and self.tcpKeepalive:\n self.socket_set(\n constants.TCP_KEEPALIVE, self.tcpKeepalive)\n self.socket_set(\n constants.TCP_KEEPALIVE_CNT, self.tcpKeepaliveCount)\n self.socket_set(\n constants.TCP_KEEPALIVE_IDLE, self.tcpKeepaliveIdle)\n self.socket_set(\n constants.TCP_KEEPALIVE_INTVL, self.tcpKeepaliveInterval)\n\n if self.identity is not None:\n self.socket_set(constants.IDENTITY, self.identity)\n\n if endpoint:\n self.addEndpoints([endpoint])\n\n self.factory.connections.add(self)\n\n self.factory.reactor.addReader(self)\n self.doRead()", "def ConnectSocket(self) -> Socket:", "def make_connection( hostname, port = 4663 ):\n \tconnection = socket.socket();", "def connect(self):\n assert self.listening\n assert not self.connected\n ctx = zmq.Context.instance()\n port = NODE_INFOS[self.ID].port\n self._send_socket = ctx.socket(zmq.PUB)\n self._send_socket.bind(f\"tcp://*:{port}\")\n self.connected = True", "def __connect():\n # Create socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect socket to server\n sock.connect((SERVER_IP, SERVER_PORT))\n\n # Return connected socket\n return sock", "def init_connect_mq(self):\n try:\n mq_username = Configs.mq_username\n mq_pwd = Configs.mq_pwd\n mq_ip_addr = Configs.mq_ip_addr\n mq_port_num = Configs.mq_port_num\n mq_vhost = Configs.mq_vhost\n\n mq_credentials = pika.PlainCredentials(mq_username, mq_pwd)\n mq_connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=mq_ip_addr, port=mq_port_num, virtual_host=mq_vhost,\n credentials=mq_credentials))\n # connect to mq channel\n self.mq_channel = mq_connection.channel()\n self.mq_channel.exchange_declare(exchange=Configs.mq_exchange_name, exchange_type='topic', durable='true')\n # self.mq_channel.queue_declare(queue='test', durable=False, arguments={'x-message-ttl': 10000})\n self.mq_conn_flag = True\n print(\" ************** MQ Connect Success ************** \")\n except Exception as e:\n print(e)", "def connect(self):\n if self._zerorpc:\n return\n try:\n self._zerorpc = _ZeroRPCClient(connect_to=self._address, timeout=self._timeout)\n self._zerorpc._events.setsockopt(zmq.LINGER, 0) # when we teardown, we want to discard all messages\n except:\n self._zerorpc = None\n raise", "def connect(self):\n self.socket.connect(f'tcp://{self.ip}:{self.port}')\n self.socket.send_string('PUB_PORT')\n self.pub_port = self.socket.recv_string()\n self.pub_socket = zmq.Socket(self.ctx, zmq.PUB)\n self.pub_socket.connect(f\"tcp://{self.ip}:{self.pub_port}\")", "def _create_socket_context(self):\n # Find upper bound on ACTIME from constants and set timeout to double\n # that\n timeout = int(2000 * self.p_constants[\"ACTIME_UPPER\"])\n\n context = zmq.Context() # Create Context\n socket = context.socket(zmq.REQ) # Create socket\n # Connect to dining philosophers\n socket.connect(self.p_constants[\"SERV_ADDR\"])\n socket.RCVTIMEO = timeout # Set timeout\n\n return context, socket", "def create_socket():\n sock = socket.socket()\n sock.bind(('0.0.0.0', 3000))\n print('Listening for connection...')\n sock.listen(1)\n conn, client_address = sock.accept()\n print('EV3 connected @ %s:%s\\n' % (client_address[0], client_address[1]))\n return conn", "def __init__(self, server_addr, server_port, local_port):\n\n if local_port is None:\n self.local_addr = ('localhost', 7700) \n else:\n self.local_addr = ('localhost', local_port)\n self.server_socket = (server_addr, server_port)\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.connection.bind(self.local_addr)\n self.message_q = []\n self.failed = False\n \n try:\n self.connection.create_connect(server_port)\n\n except:\n sys.stderr.write('failed to connect to server \\n')\n self.failed = True\n self.connection.close()\n return None", "def __init__(self, socket, info=None): \r\n # If we are given a socket, assume it is setup\r\n if socket != None:\r\n # Is everything setup?\r\n self.connectionInit = True \r\n\r\n # Default incoming and outgoing buffer size expansion value\r\n # Defaults to 128 kilobytes\r\n self.defaultBufSize = 128*1024\r\n\r\n # This is the main socket\r\n self.socket = socket \r\n\r\n # This dictionary contains information about this socket\r\n # This just has some junk default values, and is filled in during init\r\n self.socketInfo = {\"localip\":\"127.0.0.1\",\"localport\":0,\"remoteip\":\"127.0.0.1\",\"remoteport\":0}\r\n\r\n # Locks, this is to make sure only one thread is reading or writing at any time\r\n self.readLock = getlock()\r\n self.writeLock = getlock()\r\n\r\n # Callback function that is passed a socket object\r\n # Maps a host (e.g. 127.0.0.1) to a dictionary of ports -> functions\r\n # So callBackFunctions[\"127.0.0.1\"][50] returns the user function for host 127.0.0.1 port 50\r\n self.callbackFunction = {}\r\n\r\n # This dictionary keeps track of sockets we are waiting to open, e.g. openconn has been called\r\n # but the partner multiplexer has not responded yet\r\n self.pendingSockets = {}\r\n\r\n # If we want a new client, what number should we request?\r\n self.nextReferenceID = 0\r\n\r\n # A dictionary that associates reference ID's to their MultiplexerSocket objects\r\n self.virtualSockets = {}\r\n self.virtualSocketsLock = getlock() \r\n \r\n # Inject or override socket info given to use\r\n if info is not None:\r\n for key, value in info.items():\r\n self.socketInfo[key] = value\r\n \r\n # Set error if one occurs in socketReader\r\n self.error = None\r\n \r\n # Callback function in case of fatal error\r\n self.errorDelegate = None\r\n \r\n # Launch event to handle the multiplexing\r\n # Wait a few seconds so that the user has a chance to set waitforconn\r\n settimer(MULTIPLEXER_START_DELAY, self._socketReader, ())\r\n \r\n else:\r\n raise ValueError, \"Must pass in a valid socket!\"", "def _init_socket_tcp(self, worker_id):\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.host, self.port))\n if len(self.sockets) - 1 < worker_id:\n self.sockets.append(MessageSocket(sock))\n else:\n # socket was already initialized, MessageSocket implements a try:catch\n self.sockets[worker_id].close()\n self.sockets[worker_id] = MessageSocket(sock)", "def build_connection(\r\n self,\r\n socket,\r\n address = None,\r\n datagram = False,\r\n ssl = False\r\n ):\r\n\r\n return Connection(\r\n owner = self,\r\n socket = socket,\r\n address = address,\r\n datagram = datagram,\r\n ssl = ssl\r\n )" ]
[ "0.7175116", "0.70785046", "0.6988766", "0.690476", "0.67760694", "0.673751", "0.664417", "0.65480554", "0.64834565", "0.6372554", "0.62621725", "0.6188751", "0.6151901", "0.614989", "0.6100526", "0.60442936", "0.6018518", "0.60160136", "0.59882885", "0.5986249", "0.5965816", "0.5951423", "0.5935449", "0.59229046", "0.5856678", "0.58501065", "0.5824026", "0.5816859", "0.5808708", "0.57883763" ]
0.77242374
0
Used to handle not responding zmq server
def raise_timeout(*args, **kwargs): raise ZMQNotResponding('ZMQ server is not responding')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fix_zmq_exit():\n import zmq\n ctx = zmq.Context.instance()\n ctx.term()", "def test_recv_nomsg(self):\n flag, msg_recv = self.recv_instance.recv(timeout=self.sleeptime)\n assert(not flag)\n nt.assert_equal(msg_recv, self.recv_instance.eof_msg)", "def connectionLost(reason):", "def checkConnection(self,msg):\n if (len(msg) == 0):\n sleep(self.m_to/2)\n print >>sys.stderr, 'Closing due to possible server fault'\n self.close()", "def server_exit():\n return", "def test_keep_alive_cancelled(self):\n sleep(0.005) # Wait before a keep-alive message will be sent\n self.inverter.send(b\"\\x01\\x02\\x03\", b\"\") # Send something arbitrary\n self.sock.recv(4096) # Retrieve the sent message\n sleep(0.008) # Wait until just before the next keep-alive is supposed to happen\n # Check that no message was sent\n self.sock.setblocking(False)\n with self.assertRaises(BlockingIOError):\n self.sock.recv(4096)", "def connectionLost(self,reason):\n pass", "def recv(self):\n return None", "def time_server_not_responding(self):\n if not self.time_server_set:\n return False\n if self.am_leader:\n return False\n try:\n uid = self.global_time_server.get_id()\n except socket.error:\n self.global_time_server = None\n self.time_server_set = False\n print \"The time server is not responding.\"\n return True\n print \"The time server is responding!\"\n return False", "def broker_null(self, data):\n\n print(\"Heartbeat\")\n #TODO: Reset heartbeat timer or something like that", "def connectionLost(self, reason):\n print \"connection lost from\", self.addr\n reactor.stop()", "def connectionLost(self, reason):\n print \"lost connection to\", host, \"port\", port\n reactor.stop()", "def connection_lost(self, exc):\n logger.info('The server closed the connection')\n self.loop.stop()", "def start_server(self) -> None:\n with self.socket.bind(self.address):\n print(\"ZeroMQ Server listening at {}\".format(self.address))\n while True:\n payload_rx = self.socket.recv(flags=0)\n if payload_rx:\n self.decode_payload(payload_rx)\n self.socket.send_string(self.reply(), flags=0, copy=False)", "def main(_):\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(CORENLP_ADDRESS)\n socket.send(\"stop\")\n message = socket.recv()\n print(\"Received reply [%s]\" % message)", "def __connection_lost(self):\n print(\"Error: connection lost.\")\n try:\n # Try and send a message back to the server to notify connection\n # lost\n self.client_socket.send(\"q\".encode())\n except:\n pass\n # Raise an error to finish\n raise Exception", "def peer_server_host(self):\n try:\n while True:\n while not self.peer_server_listener_queue.empty():\n with futures.ThreadPoolExecutor(max_workers=8) as executor:\n conn, addr = self.peer_server_listener_queue.get()\n data_received = json.loads(conn.recv(1024))\n\n if data_received['command'] == 'obtain_active':\n fut = executor.submit(\n self.peer_server_upload, conn, data_received)\n except Exception as e:\n print \"Peer Server Hosting Error, %s\" % e", "def _receive_thread(self):\r\n while True:\r\n try:\r\n self.response, ip = self.socket.recvfrom(3000)\r\n except socket.error as exc:\r\n print (f\"Caught exception socket.error: {exc}\")", "def controls():\n\n context = zmq.Context()\n\n print(\"Transmitting commands to process.\")\n socket = context.socket(zmq.REQ)\n rc = socket.connect(\"ipc:///tmp/mail_queue_ipc\")\n #print(rc)\n\n\n for request in range(2):\n print(\"Sending request %s\" % request)\n socket.send(b\"insert\")\n\n message = socket.recv()\n print(\"Recieved reply %s [ %s ]\" % (request, message))\n time.sleep(1)", "def _shutdown(self):\n self.control_socket.send(zmqmessage.IPC_END)\n self.end_threads = True\n self.timeout = 1", "def exit(self):\n self._status = \"\"\n self._sock.settimeout(1.0)\n self._sock.sendto(bytes(\"bla\", \"utf-8\"), (self._cfg.host, self._cfg.port))", "def do_socket_logic():\n pass", "def run(self) -> None:\n\n while not self.stop_event.is_set():\n if self.my_queue:\n # if heartbeat received at '/heartbeat' route from the monitored peer,\n # sleep until next\n self.my_queue.clear()\n time.sleep(7)\n\n else:\n # else drop peer data from database and inform central server appending '0'\n # to my queue\n self.db_access.drop_peer(self.peer_id)\n self.my_queue.append(0)\n break", "def wait_for_termination(self):\n self.server.wait_for_termination()", "def ecute(self):\n msg = self.up_queue_recv_socket.recv()\n result, e = self.up_queue.get()\n if e is not None:\n raise e\n return result", "def keepAliveReceived(self):", "def _receive_thread(self):\r\n while True:\r\n try:\r\n self.response, ip = self.socket.recvfrom(2048)\r\n print(\"Response \", self.response)\r\n except socket.error as exc:\r\n print (\"Receive Thread caught exception socket.error : %s\" % exc)", "def test_solicitation_no_reply_resend(self):\n waittime = self.autoconflayer._solicitation_timeout * 4.0\n self.autoconflayer.start_process()\n interest = Interest(Name('/foo/bar'))\n self.queue_from_higher.put([None, interest])\n\n # Catch all data the autoconfig layer sends downwards for 3 seconds\n deadline = datetime.utcnow() + timedelta(seconds=waittime)\n tolower = []\n while datetime.utcnow() < deadline:\n try:\n data = self.queue_to_lower.get(timeout=waittime/10)\n tolower.append(data)\n except queue.Empty:\n pass\n # Make sure the broadcast face was actually created and get its face id\n bcfid = self.faceidtable.get_or_create_faceid(AddressInfo(('127.255.255.255', 4242), 0))\n self.assertIsNotNone(bcfid)\n # Make sure the forwarder solicitation was sent more than once\n solictiation = Interest(Name('/autoconfig/forwarders'))\n solictiation_count = len([1 for data in tolower if data == [bcfid, solictiation]])\n self.assertGreater(solictiation_count, 1)", "def slot_not_connected(self, addr):\n if self.next_connection(addr):\n self.connection_responded()\n else:\n print(addr, \": Reconnecting...\")", "def cmd_handler():\n context = zmq.Context()\n\n # socket to receive commands (a subscription to ELECTION_CODE channel)\n cmd_socket = context.socket(zmq.SUB)\n cmd_socket.connect (\"tcp://%s:5556\" % SERVER_HOST)\n topicfilter = \"politiche2013\"\n cmd_socket.setsockopt(zmq.SUBSCRIBE, topicfilter)\n\n # socket to send replies\n reply_sender = context.socket(zmq.PUSH)\n reply_sender.connect(\"tcp://%s:5557\" % SERVER_HOST)\n\n # main loop\n while True:\n print \"Aye sir, unit {0} ready for your commands ...\".format(computer_id)\n # wait for a command\n string = cmd_socket.recv()\n\n # action\n print \"Message received: '%s'\" % (string,)\n\n # send reply to server\n print \"Sending reply to server\"\n reply = { 'unit' : computer_id, 'status' : 'configured'}\n reply_sender.send_json(reply)" ]
[ "0.6562494", "0.62199134", "0.62131953", "0.61594874", "0.61404806", "0.6073438", "0.6002306", "0.59552497", "0.5933235", "0.59127086", "0.5870852", "0.58468467", "0.5773297", "0.5767091", "0.5763371", "0.57514524", "0.5717791", "0.57169217", "0.5679771", "0.56770015", "0.56764233", "0.56656206", "0.5659198", "0.56323355", "0.55985117", "0.559015", "0.5590063", "0.55804837", "0.5579394", "0.5575713" ]
0.71370596
0
this functions creates a draft with the email data given the user id should be either 'me', either 'users/email.com' either 'users/{AAD_userId}',
def create_draft(auth, subject, body, addresses, user_id, cc_addresses=[], attachments_list=None): data = {} data['Subject'] = subject data['Body'] = {} data['Body']['ContentType'] = 'HTML' data['Body']['Content'] = body data['ToRecipients'] = [{'EmailAddress': {'Address': addr}} for addr in addresses] data['ccRecipients'] = [{'EmailAddress': {'Address': addr}} for addr in cc_addresses] if attachments_list is not None: data['Attachments'] = attachments_list params = json.dumps(data).encode('utf8') url = "{api_url}/{user_id}/messages".format(api_url=API_URL, user_id=user_id) headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer {}'.format(auth.access_token) } req = urllib.request.Request(url, params, headers) try: resp = urllib.request.urlopen(req) resp_data = json.load(resp) logging.getLogger(__name__).info("Draft created") return resp_data['id'] except urllib.error.HTTPError as err: raise AzureError(err)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_email(context, params):\n updated = {}\n for key in params:\n updated[camelcase_to_underscore(key)] = params[key]\n params = updated\n if not params.get('val') or params.get('is_deleted'):\n return None\n form_email = dict()\n if not params.get('label'):\n form_email['label'] = \"Office\"\n form_email['label'] = params.get('label')\n form_email['is_main'] = params.get('is_main', False)\n form_email['value'] = params.get('val')\n # form_email['edited_by'] = context.user\n form_email['user'] = params.get('person')\n return UserEmail.objects.create(**form_email)", "def create(self, user_data): #user_data is a dictionary\n\n\t\tif isEmailUsed(user_data[\"email\"]):\n\t\t\tuser_data[\"creation_status\"] = \"Email is already in use\";\n\t\t\treturn user_data;\n\n\t\tuser_data[\"password\"] = makeHash(user_data[\"password\"]);\n\t\tuser_data[\"date.creation\"] = getTimeStamp();\n\t\tuser_data[\"date.update\"] = user_data[\"date.creation\"];\n\t\tuser_data[\"status\"] = \"Pending email confirmation\";\n\t\tuser_data[\"field.utility\"] = makeHash(user_data[\"email\"] + user_data[\"date.update\"]);\n\t\tuser_data[\"creation_status\"] = \"Ok\";\n\n\t\tself.id = self.db.request(\"insert\", user_data);\n\n\t\tuser_data[\"id\"] = self.id;\n\n\t\treturn user_data;", "def create_email(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Account Created\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n \n message = \"Your ECE/CIS %s account has been created with the username: %s\\n\\n\" % (domain, user.username)\n message += \"Please do not reply to this message. If you need assistance with your account, please visit:\\n\"\n message += \"%s\\n\\n\" % helprequest\n message += \"-- EE/CIS Labstaff\\n\"\n\n send('[email protected]', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)", "def create_draft(convo_ID, template_ID):\n # Get response template through helper function.\n # Make an API request to reply to a conversation with the content in that template\n response_template = get_canned_response(template_ID)\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/drafts\"\n payload = {\n \"body\": response_template[\"body\"],\n \"subject\": response_template[\"subject\"],\n \"author_id\": \"tea_188ud\", # [needs to change later on]\n \"channel_id\": \"cha_14tfp\", # [also will need to be changed for team based settings]\n }\n files = []\n headers = {\"Authorization\": BEARER_TOKEN}\n requests.request(\"POST\", url, headers=headers, json=payload, files=files)", "def _create_new_attende(name, email, gdpr, marketing):\n\n new_attendee = Attendee.objects.create(\n name=name,\n email=email,\n gdpr=gdpr,\n marketing=marketing,\n token=uuid.uuid1(),\n date_signed=datetime.date.today()\n )\n new_attendee.save()\n return new_attendee", "def send_mail_to_onboard_new_reviewers(user_id, category):\n\n email_subject = 'Invitation to review suggestions'\n\n email_body_template = (\n 'Hi %s,<br><br>'\n 'Thank you for actively contributing high-quality suggestions for '\n 'Oppia\\'s lessons in %s, and for helping to make these lessons better '\n 'for students around the world!<br><br>'\n 'In recognition of your contributions, we would like to invite you to '\n 'become one of Oppia\\'s reviewers. As a reviewer, you will be able to '\n 'review suggestions in %s, and contribute to helping ensure that any '\n 'edits made to lessons preserve the lessons\\' quality and are '\n 'beneficial for students.<br><br>'\n 'If you\\'d like to help out as a reviewer, please visit your '\n '<a href=\"https://www.oppia.org/creator_dashboard/\">dashboard</a>. '\n 'and set your review preferences accordingly. Note that, if you accept,'\n 'you will receive occasional emails inviting you to review incoming '\n 'suggestions by others.<br><br>'\n 'Again, thank you for your contributions to the Oppia community!<br>'\n '- The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n recipient_user_settings = user_services.get_user_settings(user_id)\n can_user_receive_email = user_services.get_email_preferences(\n user_id).can_receive_email_updates\n\n if can_user_receive_email:\n # Send email only if recipient wants to receive.\n email_body = email_body_template % (\n recipient_user_settings.username, category, category,\n EMAIL_FOOTER.value)\n _send_email(\n user_id, feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_ONBOARD_REVIEWER,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)", "def sendEmail(body, subject, email=\"\"):\n dest = [\"[email protected]\", \"[email protected]\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "async def createdm(self, ctx, user: discord.User):\n try:\n dm_channel = await ex.get_dm_channel(user=user)\n if dm_channel is not None:\n ex.cache.mod_mail[user.id] = ctx.channel.id\n await ex.conn.execute(\"INSERT INTO general.modmail(userid, channelid) VALUES ($1, $2)\", user.id, ctx.channel.id)\n await dm_channel.send(f\"> {ctx.author.display_name} ({ctx.author.id}) has created a DM with you. All messages sent here will be sent to them.\")\n await ctx.send(f\"> A DM has been created with {user.id}. All messages you type in this channel will be sent to the user.\")\n else:\n await ctx.send(\"> I was not able to create a DM with that user.\")\n except Exception as e:\n await ctx.send(f\"ERROR - {e}\")\n log.console(e)", "def create_associated_email(sender, **kwargs):\n user = kwargs['instance']\n if kwargs['created']:\n email = AssociatedEmail(user=user, email=user.email, is_primary_email=True)\n if user.is_active:\n email.verification_date = timezone.now()\n email.is_verified = True\n email.save()", "def gmail_send_message():\n creds, _ = google.auth.default()\n\n try:\n service = build('gmail', 'v1', credentials=creds)\n message = MIMEText('This is automated draft mail')\n message['to'] = '[email protected]'\n message['from'] = '[email protected]'\n message['subject'] = 'Automated draft'\n # encoded message\n encoded_message = base64.urlsafe_b64encode(message.as_bytes()) \\\n .decode()\n\n create_message = {\n 'message': {\n\n 'raw': encoded_message\n }\n }\n # pylint: disable=E1101\n send_message = (service.users().messages().send\n (userId=\"me\", body=create_message).execute())\n print(F'Message Id: {send_message[\"id\"]}')\n except HttpError as error:\n print(F'An error occurred: {error}')\n send_message = None\n return send_message", "def test_update_user_endpoint_new_email(self):\n print(\"Generate a new email and check if email is not allocated\")\n email_id = Workflows.generate_new_email(suffix=self.global_config[\"email_id_suffix\"])\n kwargs = {'email_id': email_id, 'return_response_obj': True,\n 'url': self.test_args[\"relative_url_check_email\"]}\n response = self.test_check_email_endpoint(**kwargs)\n assert json.loads(response.text)[\"data\"][\"available\"] is True, \"Unable to generate a new email id\"\n\n print(\"Update email id\")\n response = self.test_update_user_endpoint(**kwargs)\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"message\"] == self.test_args[\"expected_result\"], \"Test Failed\"", "def add_manualpost_email(request, submission_id=None, access_token=None):\n\n if request.method == 'POST':\n try:\n button_text = request.POST.get('submit', '')\n if button_text == 'Cancel':\n return redirect(\"submit/manual_post.html\")\n \n form = SubmissionEmailForm(request.POST)\n if form.is_valid():\n submission_pk = form.cleaned_data['submission_pk']\n message = form.cleaned_data['message']\n #in_reply_to = form.cleaned_data['in_reply_to']\n # create Message\n \n if form.cleaned_data['direction'] == 'incoming':\n msgtype = 'msgin'\n else:\n msgtype = 'msgout'\n \n submission, submission_email_event = (\n add_submission_email(request=request,\n remote_ip=request.META.get('REMOTE_ADDR', None),\n name = form.draft_name,\n rev=form.revision,\n submission_pk = submission_pk,\n message = message,\n by = request.user.person,\n msgtype = msgtype) )\n \n messages.success(request, 'Email added.')\n \n try:\n draft = Document.objects.get(name=submission.name)\n except Document.DoesNotExist:\n # Assume this is revision 00 - we'll do this later\n draft = None\n \n if (draft != None):\n e = AddedMessageEvent(type=\"added_message\", doc=draft)\n e.message = submission_email_event.submissionemailevent.message\n e.msgtype = submission_email_event.submissionemailevent.msgtype\n e.in_reply_to = submission_email_event.submissionemailevent.in_reply_to\n e.by = request.user.person\n e.desc = submission_email_event.desc\n e.time = submission_email_event.time\n e.save()\n \n return redirect(\"ietf.submit.views.manualpost\")\n except ValidationError as e:\n form = SubmissionEmailForm(request.POST)\n form._errors = {}\n form._errors[\"__all__\"] = form.error_class([\"There was a failure uploading your message. (%s)\" % e.message])\n else:\n initial = {\n }\n\n if (submission_id != None):\n submission = get_submission_or_404(submission_id, access_token)\n initial['name'] = \"{}-{}\".format(submission.name, submission.rev)\n initial['direction'] = 'incoming'\n initial['submission_pk'] = submission.pk\n else:\n initial['direction'] = 'incoming'\n \n form = SubmissionEmailForm(initial=initial)\n\n return render(request, 'submit/add_submit_email.html',dict(form=form))", "def create(self, data):\n # Make User\n username = data['email'].split(\"@\")[0]\n user = User.objects.create_user(**data, username=username, is_verified=False, is_client=True)\n Profile.objects.create(user=user)\n send_confirmation_email.delay(user_pk=user.pk)\n return user", "def create(self,request):\n try:\n print(request.data)\n user = models.UserProfile.objects.get(email=request.data['email'])\n current_site=get_current_site(request)\n email_subject='Reset Password'\n message=render_to_string('reset_password.html',{\n 'user':user,\n 'domain':current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(user.id)),\n 'token':account_activation_token.make_token(user),\n })\n to_email= user.email\n email= EmailMessage(email_subject,message,to=[to_email])\n email.send()\n return Response(\n {\n \"status\":\"The Reset password email has been sent.\"\n }\n )\n except(TypeError, ValueError, KeyError, OverflowError, models.UserProfile.DoesNotExist):\n user = None\n return Response(\n {\n \"status\":\"No matching account found.\"\n }\n )", "def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user", "def sample_user_dynamic_email(email):\n return get_user_model().objects.create_user(email=email,\n password=\"password123\",\n name=\"some name\")", "def email_user(user, template_path, from_address, context_dict):\n return email_list([user.email], template_path, from_address, context_dict)", "def test_create_email_account(self):\n first = 'create_email'\n last = 'account_test'\n user_id = first + last\n email_addr = first + last + '@' + self.email_dom\n user = SpokeUser(self.org_name)\n user.create(email_addr, first, last)\n \n org = '%s=%s' % (self.org_attr, self.org_name)\n people = '%s=%s' % (self.container_attr, self.user_container)\n uid = '%s=%s' % (self.user_key, user_id)\n dn = '%s,%s,%s,%s' % (uid, people, org, self.base_dn)\n dn_info = {'objectClass': ['top', 'inetOrgPerson', self.user_class,\n self.imap_class, self.smtp_class],\n self.imap_enable: ['TRUE'],\n self.imap_mailbox: [user_id],\n self.imap_domain: [self.email_dom],\n self.imap_partition: [self.imap_partition_def],\n self.smtp_destination: [email_addr],\n self.smtp_enable: ['TRUE'],\n self.smtp_pri_address: [email_addr]\n }\n expected_result = [(dn, dn_info)] \n acc = SpokeEmailAccount(self.org_name, user_id)\n result = acc.create(email_addr)['data']\n self.assertEqual(result, expected_result)\n user.delete(first, last)", "def create_user_questionnaire_in_progress(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=2, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n list_advice_id = [1, 5, 10]\n self.add_advice_to_user_created(user_created, list_advice_id)\n\n return user_created", "def contact_user(request, pk=None):\n # another way of checking if user is logged-in\n if not request.user.is_authenticated:\n return redirect('login')\n else:\n if request.method == 'GET':\n # identifying the sender and recipient of the message\n sender = User.objects.get(email=request.user.email)\n data = {'recipient': get_object_or_404(User, pk=pk)}\n contact_profile_form = ContactProfileForm(initial=data)\n else:\n contact_profile_form = ContactProfileForm(request.POST, request.FILES)\n if contact_profile_form.is_valid():\n sender = User.objects.get(email=request.user.email)\n contactuserpost = contact_profile_form.save(commit=False)\n contactuserpost.sender = request.user\n messages.success(request, 'Your message has been successfully sent!')\n contactuserpost.save() \n return redirect(reverse('all_users'))\n else:\n contact_profile_form = ContactProfileForm()\n return render(request, 'contactuserpost.html', {'contact_profile_form': contact_profile_form})", "def _post(self, object='emailTemplate', path=None, params=None):\n if params is None:\n params = {}\n result = self.client.post(object=object, path=path, params=params)\n return result", "def save_object(self, data):\n return Email(**data)", "def createOtherUser(self, email):\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import FixedUserProvider\n properties = {'account': FixedUserProvider(value=email), 'status': 'valid'}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def create(self, data):\n data.pop('password_confirmation')\n try:\n availability = data.pop(\"availability\")\n babysitter = data.pop(\"user_bbs\")\n user = User.objects.create_user(**data, is_verified=False)\n if babysitter:\n bbs = Babysitter.objects.create(user_bbs=user, **babysitter)\n for shift in availability:\n Availability.objects.create(bbs=bbs, **shift)\n except KeyError:\n logging.info('This is a instance client')\n user = User.objects.create_user(**data, is_verified=False)\n logging.info(f'User created, whit pk {user.pk}')\n client = Client.objects.create(user_client=user)\n logging.info(f'User pk is already to pass {user.pk}')\n send_confirmation_email.delay(username=user.username, email=user.email )\n return user", "def create_user_emails_sheets_subscribers():\n input_range = \"Sheet1\"\n\n sheetsService = build(\n 'sheets', 'v4', credentials=credentials, cache_discovery=False)\n\n # Empty sheet\n sheetsService.spreadsheets().values().clear(\n spreadsheetId=spreadsheet_id, range=input_range).execute()\n\n # Get all basic users' email\n users = list(User.objects.filter(is_active=True,\n role=\"BU\").values('email', 'profile_id'))\n\n # Check their consent status and update accordingly\n subscribers = []\n for user in users:\n if user['profile_id'] != None:\n profile = SubscriberProfile.objects.get(id=user['profile_id'])\n status = profile.consent_status\n if status == \"IMPLIED\" and profile.expired_at < date.today():\n profile.consent_status = \"EXPIRED\"\n profile.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n user.pop('profile_id')\n user.update({\"first_name\": profile.first_name,\n \"last_name\": profile.last_name, \"consent_status\": profile.consent_status})\n subscribers.append(user)\n\n # Get newsletter only users' email\n nlusers = list(NLUser.objects.all())\n\n # Check their consent status and update accordingly\n for nluser in nlusers:\n status = nluser.consent_status\n if status == \"IMPLIED\" and nluser.expired_at < date.today():\n nluser.consent_status = \"EXPIRED\"\n nluser.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n subscribers.append({\"email\": nluser.email, \"first_name\": nluser.first_name,\n \"last_name\": nluser.last_name, \"consent_status\": nluser.consent_status})\n\n # Append user info into values (only users that has email verified)\n values = [['Email', 'First name', 'Last name', 'Consent Status']]\n for subscriber in subscribers:\n values.append(list(subscriber.values()))\n\n body = {\n 'values': values\n }\n\n try:\n sheetsService.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=input_range,\n valueInputOption=\"USER_ENTERED\", body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error\n # return None\n\n # Automatically format the sheets\n requests = [\n {\n \"autoResizeDimensions\": {\n \"dimensions\": {\n \"sheetId\": 0,\n \"dimension\": \"COLUMNS\",\n \"startIndex\": 0,\n \"endIndex\": 4\n }\n }\n },\n {\n \"repeatCell\": {\n \"range\": {\n \"sheetId\": 0,\n \"startRowIndex\": 0,\n \"endRowIndex\": 1,\n \"startColumnIndex\": 0,\n \"endColumnIndex\": 4\n },\n \"cell\": {\n \"userEnteredFormat\": {\n \"textFormat\": {\n \"bold\": True\n }\n }\n },\n \"fields\": \"userEnteredFormat(textFormat)\"\n }\n }\n ]\n\n body = {\n 'requests': requests\n }\n\n try:\n sheetsService.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheet_id, body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error", "def test_admin_approval_complete_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def _post(self, object_name='emailTemplate', path=None, params=None):\n if params is None:\n params = {}\n response = self.client.post(object_name=object_name, path=path, params=params)\n return response", "def post(self, request):\n\n try:\n eventoid = request.POST.get('id', '')\n correo = request.POST.get('correo', '')\n AsigStaff.objects.create(id_Evento = eventoid, email_staff = correo)\n print(\"Exito en la asignación de staff\")\n except:\n print(\"Error en la asignacion de staff\")\n\n \n return render(request, self.template, self.context)\n #return render(request, self.template, self.context)", "def create_user_emails_sheets_all():\n input_range = \"Sheet1\"\n\n sheetsService = build(\n 'sheets', 'v4', credentials=credentials, cache_discovery=False)\n\n # Empty sheet\n sheetsService.spreadsheets().values().clear(\n spreadsheetId=spreadsheet_id, range=input_range).execute()\n\n # Get all basic users' email\n users = list(User.objects.filter(is_active=True,\n role=\"BU\").values('email', 'username', 'role', 'profile_id'))\n\n # Check their consent status and update accordingly\n subscribers = []\n for user in users:\n if user['profile_id'] != None:\n profile = SubscriberProfile.objects.get(id=user['profile_id'])\n status = profile.consent_status\n if status == \"IMPLIED\" and profile.expired_at < date.today():\n profile.consent_status = \"EXPIRED\"\n profile.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n user.pop('profile_id')\n subscribers.append(user)\n # Get newsletter only users' email\n nlusers = list(NLUser.objects.all())\n\n # Check their consent status and update accordingly\n for nluser in nlusers:\n status = nluser.consent_status\n if status == \"IMPLIED\" and nluser.expired_at < date.today():\n nluser.consent_status = \"EXPIRED\"\n nluser.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n subscribers.append({\"email\": nluser.email, \"username\": nluser.first_name,\n \"role\": \"NL\"})\n\n # Get all basic users' email\n restaurant_owners = list(\n User.objects.filter(is_active=True, role=\"RO\").values('email', 'username', 'role'))\n\n # Append user info into values (only users that has email verified)\n values = [['Email', 'Username', 'Role']]\n for subscriber in subscribers:\n values.append(list(subscriber.values()))\n for restaurant_owner in restaurant_owners:\n values.append(list(restaurant_owner.values()))\n\n body = {\n 'values': values\n }\n\n try:\n sheetsService.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=input_range,\n valueInputOption=\"USER_ENTERED\", body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error\n # return None\n\n # Automatically format the sheets\n requests = [\n {\n \"autoResizeDimensions\": {\n \"dimensions\": {\n \"sheetId\": 0,\n \"dimension\": \"COLUMNS\",\n \"startIndex\": 0,\n \"endIndex\": 3\n }\n }\n },\n {\n \"repeatCell\": {\n \"range\": {\n \"sheetId\": 0,\n \"startRowIndex\": 0,\n \"endRowIndex\": 1,\n \"startColumnIndex\": 0,\n \"endColumnIndex\": 3\n },\n \"cell\": {\n \"userEnteredFormat\": {\n \"textFormat\": {\n \"bold\": True\n }\n }\n },\n \"fields\": \"userEnteredFormat(textFormat)\"\n }\n }\n ]\n\n body = {\n 'requests': requests\n }\n\n try:\n sheetsService.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheet_id, body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error", "def create_user(headers, email, payload):\n\n # Add admin's email, NOT the user being added\n headers['From'] = email\n\n # Data is user info\n r = requests.post(base_url, headers=headers, data=json.dumps(payload))\n\n print 'User creation response code: ' + str(r.status_code)\n return r.json()['user']" ]
[ "0.6185358", "0.5889587", "0.56886303", "0.563527", "0.5423166", "0.5389278", "0.53863585", "0.5356391", "0.5334495", "0.5330233", "0.53246087", "0.53019273", "0.5286498", "0.5276959", "0.5267437", "0.5240132", "0.5237234", "0.5231543", "0.52308434", "0.5228887", "0.5216414", "0.5204619", "0.518094", "0.5180281", "0.5179719", "0.5177352", "0.5169962", "0.5169251", "0.5163908", "0.51629907" ]
0.73411775
0
iterator which goes through all the pages to find all the emails
def get_all_emails_it(auth, user_id, folder_id='AllItems', pages_limit=None, pages_size=50, **kwargs): i = 0 args_dict = dict(kwargs, top=pages_size, skip=pages_size * i) curr_emails = get_emails(auth, user_id, folder_id, **args_dict) while len(curr_emails) != 0: yield curr_emails if pages_limit is not None and i >= pages_limit: break i += 1 args_dict = dict(kwargs, top=pages_size, skip=pages_size * i) curr_emails = get_emails(auth, user_id, folder_id, **args_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_email_addresses(startdate, enddate, user, password):\n emails = []\n page = 1\n more_pages = True\n\n while more_pages:\n response = requests.get(\n 'https://restapi.surveygizmo.com/v2/survey/{survey}'\n '/surveyresponse?'\n 'filter[field][0]=datesubmitted'\n '&filter[operator][0]=>=&filter[value][0]={start}+0:0:0'\n '&filter[operator][1]=<&filter[value][1]={end}+0:0:0'\n '&filter[field][1]=status&filter[operator][1]=='\n '&filter[value][1]=Complete'\n '&resultsperpage=500'\n '&page={page}'\n '&user:pass={user}:{password}'.format(\n survey=EMAIL_COLLECTION_SURVEY_ID, start=startdate,\n end=enddate, page=page, user=user, password=password))\n\n results = json.loads(response.content)\n total_pages = results['total_pages']\n more_pages = page < total_pages\n emails = emails + [r['[question(13)]'] for r in results['data']]\n\n return emails", "def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://www.med.navy.mil'\n r = requests.get(self.starting_url, verify=CERTIFICATE_DIR + '/cat3.pem')\n soup = bs4.BeautifulSoup(r.content, features=\"html.parser\")\n\n # get target column of list items\n issuance_list = soup.find('div', attrs={'class': 'noindex ms-wpContentDivSpace'})\n matches = [\"Publications\", \"BUMEDNotes\", \"BUMEDInstructions\"]\n # extract links\n links = [link for link in issuance_list.find_all('a')]\n for link in links[2:-1]:\n if any(x in str(link) for x in matches):\n if not link['href'].startswith('http'):\n url = base_url + link['href']\n else:\n url = link['href']\n yield url", "def extract_emails_from_category(initial_url, first_page=int(1)):\r\n\tresult_emails = set() #we will return this\r\n\t#last page regex\r\n\tlp_regex = re.compile('[0-9]+/;')\r\n\t#Open URL\r\n\tsoup = bs4.BeautifulSoup(urlopen(initial_url), \"html5lib\")\r\n\t#extract the link to the last page. It is inside div.paging-bottom > ul > li with text \">>\"\r\n\tnavigation = soup.find_all(\"div\",id=\"paging-bottom\")\r\n\tif not navigation:\r\n\t\tprint(\"This page is weird. It has no navigation. Aborting\\n\")\r\n\t\treturn result_emails\r\n\r\n\ttxt_elem = navigation[0].ul.find_all(text=\">>\")[0]\r\n\t#link to last page\r\n\tlink = txt_elem.parent\r\n\t#Get its url.. smthg like /ourivesarias-joalharias/134/;jsessionid=67E1932531B84B3E77AAF47A29B263CE\r\n\turl = link['href']\r\n\t#Pick the number of the last page\r\n\tmatch = lp_regex.search(url)\r\n\tif match:\r\n\t\tlast_page = match.group()[0:-2]\r\n\t\tlast_page_i = int(last_page)\r\n\telse:\r\n\t\tprint(\"This category has no navigation to the last page\\n\")\r\n\t\tlast_page_i = first_page\r\n\t\t\r\n\t#Sanity Check\r\n\tif last_page_i < first_page:\r\n\t\tlast_page_i = first_page\r\n\t\t\r\n\tprint(\"Working on category %s\" % initial_url)\r\n\t#Now that we have the last page. Time to iterate on each one and get the emails\r\n\tfor page in xrange( first_page, last_page_i ):\r\n\t\tpage_url = initial_url + str(page) + '/' #This is fragile\r\n\t\tprint(\"Scanning page %d of %d (%s).\" % (page, last_page_i, page_url))\r\n\t\ttry:\r\n\t\t\temails = extract_emails_from_page(bs4.BeautifulSoup( unicode(urlopen(page_url).read(),'utf-8','ignore'), \"html5lib\"))\r\n\t\t\twrite_emails_to_set(emails, result_emails)\r\n\t\t\ttime.sleep(5)\r\n\t\texcept IOError:\r\n\t\t\tprint(\"Coult not fetch url %s. Skipped\\n\" % page_url)\r\n\treturn result_emails", "def extract_emails_from_page(soup):\r\n\temail_pattern = re.compile('([\\w\\-\\.+]+@(\\w[\\w\\-]+\\.)+[\\w\\-]+)')\r\n\ttry:\r\n\t\tpage_content = str(soup)\r\n\texcept:\r\n\t\tprint('Error parsing page. Skipped\\n')\r\n\t\treturn []\r\n\tmatches = email_pattern.findall(page_content)\r\n\tif matches:\r\n\t\treturn [ match[0] for match in matches ]\r\n\treturn []", "def __iter__(self):\n while self.has_next_page():\n response = self.get_next_page_response()\n for item in self.get_items_from_response(response):\n yield item", "def scrape_emails(webpage):\n emails = []\n html = requests.get(webpage)\n email_regex = re.compile(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z]+')\n emails = email_regex.findall(html.text)\n return emails", "def get_emails(self):\n email_ids = self.get_email_ids()\n Email = get_email_class()\n return [email for email in Email.objects.filter(pk__in=email_ids)]", "def fetch_all(self):\n emails = []\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n try:\n res, data = self._mailconn.fetch(msg.decode('utf-8'), '(RFC822)')\n except Exception as error:\n self.close_mail_connection()\n print('No email to read: '+error)\n exit()\n \n msg = email.message_from_string((data[0][1]).decode('utf-8'))\n if not isinstance(msg, str):\n if self.is_sender_in_whitelist(msg['From']):\n emails.append(msg)\n\n return emails", "def getIdeaUrlsFromEmail():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n\n lists,nextPageToken = ListMessages(service,user_id = 'me',q='from:[email protected]')\n # print (lists)\n mes,mes_str = GetMimeMessage(service,user_id = 'me',msg_id = lists[0]['id'])\n # print (mes)\n j = 0\n urls = []\n for part in mes.walk(): \n j = j + 1 \n fileName = part.get_filename() \n contentType = part.get_content_type() \n mycode=part.get_content_charset(); \n # 保存附件 \n if fileName:\n print ('保存邮件附件……TODO?')\n elif contentType == 'text/html': #or contentType == 'text/plain' \n #保存正文 \n data = part.get_payload(decode=True) \n content=str(data); \n # if mycode=='gb2312': \n # content= mbs_to_utf8(content) \n #end if \n # nPos = content.find('降息') \n # print(\"nPos is %d\"%(nPos)) \n # print >> f, data \n # 正则替换掉所有非 <a></a>的标签 <[^>|a]+>\n # reg = re.compile('<[^>|a]+>')\n # print (content)\n url,title = findIdeaUrlInHtml(content)\n urls.append((url,title))\n # print (url,title)\n # contentTxt = re.compile('<[^>|a]+>').sub('',content)\n # print (reg.sub('',content))\n # #end if \n\n return urls", "def _get_iter(self, url, params):\n for current_page_index in itertools.count():\n result_dict = self._get_page(url, params, current_page_index)\n for document in result_dict['entries']:\n yield document\n if not result_dict['isNextPageAvailable']:\n break", "def _all_pages(self, page_function, **kwargs) -> Iterator[Iterable]:\n\n next_token = None\n is_truncated = True\n while is_truncated:\n page = page_function(token=next_token, **kwargs)\n next_token = page.next_token\n is_truncated = page.is_truncated and next_token is not None\n for task in page.page_data:\n yield task", "def get_emails():\n\n # generate the gmail api service\n service = build_gmail_api_v1()\n\n # compute date for one year ago\n today = date.today()\n one_year_ago = today - timedelta(days=365.25)\n start = one_year_ago - timedelta(days=1)\n end = one_year_ago + timedelta(days=1)\n start_string = start.strftime(\"%Y/%m/%d\")\n end_string = end.strftime(\"%Y/%m/%d\")\n query_string = f'after:{start_string} before:{end_string}'\n\n # generate the gmail api request (get list of messages from one year ago)\n request = service.users().messages().list(userId='me', q=query_string)\n\n # try to get the api response\n try:\n response = request.execute()\n except HTTPError as e:\n print('Error response status code : {0}, reason : {1}'.format(\n e.resp.status, e.error_details))\n return []\n\n # get list of message ids from the api response\n messages = list(response[\"messages\"])\n ids = [message[\"id\"] for message in messages]\n\n # store all emails in a list\n data_to_display = []\n\n # loop through each message id\n for id in ids:\n\n try:\n # store email data in a dict\n email = {}\n\n # get message data by querying gmail api using message id\n request = service.users().messages().get(userId='me', id=id)\n response = request.execute()\n\n # get date, subject, from, to, etc from message header\n headers = list(response[\"payload\"][\"headers\"])\n looking_for = [\"Date\", \"Subject\", \"From\", \"To\"]\n for header in headers:\n if header[\"name\"] in looking_for:\n email[header[\"name\"]] = header[\"value\"]\n\n # try to get message body (base64) from response\n # the json structure varies a lot so that is why there are no many try/except\n try:\n base64_message = response[\"payload\"][\"parts\"][0][\"parts\"][0][\"body\"][\"data\"]\n except (KeyError, TypeError) as e:\n try:\n base64_message = response[\"payload\"][\"parts\"][1][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n try:\n base64_message = response[\"payload\"][\"parts\"][0][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n try:\n base64_message = response[\"payload\"][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n base64_message = \"Ti9B\"\n\n # decode the email body\n email[\"body\"] = base64.urlsafe_b64decode(\n base64_message).decode('utf-8')\n\n # populate list with email\n data_to_display.append(email)\n\n except HTTPError as e:\n print('Error response status code : {0}, reason : {1}'.format(\n e.resp.status, e.error_details))\n\n return data_to_display", "def extract_page_urls(self, _):\n url = \"https://mossadams.taleo.net/careersection/rest/jobboard/searchjobs?lang=en&portal=4160751617\"\n page_num = 1\n last_count = 0\n this_count = 0\n\n while True:\n last_count = len(self.urls_to_scrape)\n payload = PAYLOAD + '\"pageNo\":' + str(page_num) + \"}\"\n json_data = self.post_request(url, out_format='json', headers=HEADERS, data=payload)\n\n for job in json_data['requisitionList']:\n job_url = \"https://mossadams.taleo.net/careersection/6/jobdetail.ftl?job=\" + job['contestNo']\n self.urls_to_scrape.add(job_url)\n\n # check to see if any new records were scraped; if not, I've reach the end\n this_count = len(self.urls_to_scrape)\n if last_count == this_count:\n break\n else:\n last_count = this_count\n page_num += 1", "def find_emails(url):\n\thtml = retrieve_html(url)\n\temail_set = find_emails_in_html(html)\n\n\tif len(email_set) > 0:\n\t\t# If there is a email, we stop at level 1.\n\t\treturn email_set\n\n\telse:\n\t\t# No email at level 1. Crawl level 2\n\t\tlogger.info('No email at level 1.. proceeding to crawl level 2')\n\n\t\tlink_set = find_links(url, html)\n\t\tfor link in link_set:\n\t\t\t# Crawl them right away!\n\t\t\t# Enqueue them too\n\t\t\thtml = retrieve_html(link)\n\t\t\tif html is None:\n\t\t\t\tcontinue\n\t\t\temail_set = find_emails_in_html(html)\n\t\t\tdb.enqueue(link, list(email_set))\n\n\t\t# We return an empty set\n\t\treturn set()", "def emails(self):\r\n return emails.Emails(self)", "def extract_linked_items(pages):\n for page in pages:\n for iterate in iterate_on_items(page):\n yield((iterate[1:])[:-1])", "def get_email_addresses(survey, startdatetime, enddatetime):\n token = settings.SURVEYGIZMO_API_TOKEN\n secret = settings.SURVEYGIZMO_API_TOKEN_SECRET\n emails = []\n page = 1\n more_pages = True\n survey_id = SURVEYS[survey][\"email_collection_survey_id\"]\n dtfmt = \"%Y-%m-%d+%H:%M:%S\"\n\n # Can't do anything without credentials.\n if token is None or secret is None:\n return emails\n\n while more_pages:\n response = requests.get(\n \"https://restapi.surveygizmo.com/v2/survey/{survey}\"\n \"/surveyresponse?\"\n \"filter[field][0]=datesubmitted\"\n \"&filter[operator][0]=>=&filter[value][0]={start}\"\n \"filter[field][1]=datesubmitted\"\n \"&filter[operator][1]=<&filter[value][1]={end}\"\n \"&filter[field][2]=status&filter[operator][2]==\"\n \"&filter[value][2]=Complete\"\n \"&resultsperpage=500\"\n \"&page={page}\"\n \"&api_token={token}\"\n \"&api_token_secret={secret}\".format(\n survey=survey_id,\n start=startdatetime.strftime(dtfmt),\n end=enddatetime.strftime(dtfmt),\n page=page,\n token=token,\n secret=secret,\n ),\n timeout=300,\n )\n\n results = json.loads(response.content)\n total_pages = results.get(\"total_pages\", 1)\n more_pages = page < total_pages\n emails = emails + [r[\"[question(13)]\"] for r in results[\"data\"]]\n page += 1\n\n valid_emails = []\n for email in emails:\n try:\n validate_email(email)\n except ValidationError:\n pass\n else:\n valid_emails.append(email)\n\n return valid_emails", "def emails(self):\r\n url = api_base + 'emails/'\r\n return json.loads(self.load_url(url))", "def email_all():\n\tSubscribtion = session.query(email).all()\n\treturn subscribtion_object", "def __iter__(self):\n return self.paged()", "def iter_pages(self):\n for num in range(1, self.pages + 1):\n yield Page(num)", "def extract(self, response):\n # print response.url,\"extract response url\"\n sel = response.selector\n pages = []\n try:\n # print \"pages work\"\n pages = sel.xpath(\"//div[contains(@class,'fen_ye_nav')]//td/text()\").re(u\"共([\\d]{1,3})页\")\n # print pages\n except Exception, e:\n print e,\"error pages\"\n log.msg(e, level=log.ERROR)\n log.msg(response.url, level=log.ERROR)\n\n if len(pages) == 0:\n self.getUserName(response) #only one page\n else:\n for page in range(int(pages[0])+1)[1:]: #fro test\n url = response.url+\"_m0_p\"+str(page)\n yield Request(url, callback=self.getUserName,dont_filter=True)", "def get_group_of_emails(M):\n print \"Try to access group of emails\"\n data = search_email_advanced(M)\n if data is None:\n return\n # print \"Got data as \", data\n ids = data[0]\n id_list = ids.split()\n for id_num in id_list:\n rv, data = M.uid('fetch', id_num, \"(RFC822)\")\n if rv != \"OK\":\n print \"Error getting message\"\n return\n # get raw text of the whole email\n raw_email = data[0][1]\n content = email.message_from_string(raw_email)\n # print raw_email\n p = EmailParser()\n # print sender and receivers\n print \"To: \", content['To'], \"\\n\"\n print \"From: \", email.utils.parseaddr(content['From']), \"\\n\"\n print \"Date: \", content['Date'], \"\\n\"\n print \"Subject: \", p.parsestr(raw_email).get('Subject'), \\\n \"\\n\"\n result = parse_content(content)\n # print results\n printData(result)", "def find(self):\n self.paths.add(self.url)\n while len(self.visited_paths) < self.num_pages_limit and \\\n len(self.paths) > 0:\n self.find_emails_and_paths(path=self.paths.pop())", "def iter_feed(gd_client):\n feed = gd_client.GetContactsFeed()\n while feed:\n for entry in feed.entry:\n yield entry\n # Check whether there is another page and if yes\n next_link = feed.GetNextLink()\n feed = None\n if next_link:\n feed = gd_client.GetContactsFeed(uri=next_link.href)", "def get_pages(search_url):\n page_number = 1\n page = fetch_page(search_url.format(page_number))\n while (page_exists(page)) & (page_number <= 100):\n print (page_number, end=', ')\n yield page, page_number\n page_number += 1\n page = fetch_page(search_url.format(page_number))", "def iterate_on_items(pagecode):\n parser = etree.HTMLParser()\n \n tree = etree.parse(StringIO(pagecode), parser)\n\n # xpath = \"/html/body/div[3]/div[3]/div[3]/ul/li[83]/a/span/span[2]\"\n span_class = \"wb-itemlink-id\"\n request = tree.xpath('//span[@class=\"{}\"]'.format(span_class))\n for span in request:\n yield span.text", "def find_emails(site):\n regex=re.compile(r\"[\\w,\\.,\\_,\\%,\\+,\\-]+@[\\w,\\.]*\")\n emails=[]\n for a in site:\n emails.extend(regex.findall(str(a.decode('utf-8'))))\n all_emails.extend(emails)\n return set(emails)", "def __iter__(self):\n self.__iter_page = 1\n return self", "def _paginate(self) -> Iterable[List[str]]:\n req = self.html\n videos_lens = self._extractor(req)\n yield videos_lens # yielding doesn't mean that is the end\n\n # The above only returns 100 or fewer links\n # as Youtube loads 100 videos at a time\n # Simulating a browser request for the load more link\n load_more_url = self._find_load_more_url(req)\n\n while load_more_url: # there is an url found\n req = get(load_more_url)\n load_more = json.loads(req)\n try:\n html = load_more[\"content_html\"]\n except KeyError:\n return # if there is no content_html there is no chanch to find_load_more_url\n videos_lens = self._extractor(html)\n yield videos_lens\n\n load_more_url = self._find_load_more_url(\n load_more[\"load_more_widget_html\"],\n )\n\n return" ]
[ "0.6516295", "0.64715", "0.6371025", "0.6256334", "0.6235852", "0.61884665", "0.61240774", "0.6112633", "0.6023784", "0.60078543", "0.59824973", "0.59707236", "0.5946463", "0.5928901", "0.5901219", "0.58981884", "0.5887079", "0.5860919", "0.5854434", "0.58382696", "0.5836329", "0.5761337", "0.57366735", "0.572208", "0.57137436", "0.5700502", "0.5698886", "0.5681073", "0.5669405", "0.56630343" ]
0.71289283
0
Calculate the masked ratio.
def get_masked_ratio(mask): hist = mask.histogram() return hist[0] / np.prod(mask.size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maskedFraction(self):\n\n\t\tif not self._masked:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn self._masked_fraction", "def maskedFraction(self):\n\n\t\treturn self._masked_fraction", "def bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/(1024.*bw)", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def medicalMask(self) -> float:\n return self._coreEstimation.maskScore", "def masked_mre_cal(inputs, target, mask):\n return torch.sum(torch.abs(inputs - target) * mask) / (\n torch.sum(torch.abs(target * mask)) + 1e-9\n )", "def correct(self) -> float:\n return self._coreEstimation[DetailedMaskType.CorrectMask]", "def mask_density(mask):\n return get_number_of_unpruned_weights(mask).float() / get_number_of_weights(mask).float()", "def depolarization_ratio(self):\r\n if self._depol_ratio is not None:\r\n return round(self._depol_ratio,3)\r\n else:\r\n return self._depol_ratio", "def mask_percentage(self):\n return 100 - self.tissue_percentage", "def mask_rate(rate, error, maxsig):\n # initialise mask array with existing NaNs\n mask = ~isnan(error)\n # original Nan count\n orig = np.count_nonzero(mask)\n # determine where error is larger than the maximum sigma threshold\n mask[mask] &= error[mask] > maxsig\n # replace values with NaNs\n rate[mask] = nan\n error[mask] = nan\n # calculate percentage of masked pixels\n nummasked = int(np.count_nonzero(mask)/orig*100)\n log.info('Percentage of pixels masked = {}%'.format(nummasked))\n\n return rate, error", "def _ratio(sim: xr.DataArray, ref: xr.DataArray) -> xr.DataArray:\n out = sim / ref\n out.attrs[\"units\"] = \"\"\n return out", "def denominator(self):\n return 1", "def mapd(self) -> float:\n a = np.sum(np.abs(self.predicted - self.true))\n b = np.sum(np.abs(self.true))\n return float(a / b)", "def running_ratio(self) -> np.ndarray:\n result_array = self.result_array\n result = result_array.sum(axis=1) / result_array.sum()\n\n if isinstance(result, np.ndarray):\n result_out = result\n else:\n result_out = np.array(result)\n\n return result_out", "def sharpe_ratio(self, r_f):\n return (\n self.cumulative_returns().last('1D').iat[0] - r_f\n ) / self.cumulative_returns().std()", "def totaled_ratio_calculator(numerator, denominator):\n if denominator != 0:\n ratio = round(float(numerator) / denominator, 3)\n else:\n ratio = 0\n return ratio", "def calcmask(self, *args, **kwargs):\n return _image.image_calcmask(self, *args, **kwargs)", "def mask_sparsity(mask: Mask):\n return 1 - mask_density(mask)", "def ratio(self):\n return float(self.max_width) / self.max_height", "def infected_ratio(self):\n if self.max_pop != 0:\n return int(self.infected_pop) / self.max_pop\n else:\n return 1", "def circle_mask(width, ratio):\n # taken from Paul's code\n mask = np.zeros((width, width), dtype=np.float32)\n center = width // 2\n radius = ratio * center\n y, x = np.ogrid[-center:width - center, -center:width - center]\n mask_check = x * x + y * y <= radius * radius\n mask[mask_check] = 1.0\n return mask", "def Mask(self) -> int:", "def distance_image(self):\n return exclusion_distance(self.mask)", "def fe_ratio(self):\n return self._fe_ratio", "def span_rbw_ratio(self):\r\n res = self._visa.query(f\"SENSE{self._screen()}:BANDWIDTH:RESOLUTION:RATIO?\")\r\n return 1 / float(res)", "def rrint(self):\n if len(self.data.peaks):\n return (np.diff(self.data._masked) / self.data.fs).compressed()", "def f_a(self):\n return np.count_nonzero(self.label_mask) / float(self.label_mask.size)", "def ratio_calc(first_strandI, second_strandI):\n if first_strandI + second_strandI != 0:\n Ratio = first_strandI / float(first_strandI + second_strandI)\n return Ratio\n else:\n return np.nan", "def normalization_brain(img, mask):\n zone1 = img[mask != 0]\n imge = img.copy()\n imge[mask != 0] = (zone1 - zone1.min()) / (zone1.max() - zone1.min())\n imge[mask == 0] = 0\n return imge" ]
[ "0.7183577", "0.6947266", "0.6375616", "0.62425804", "0.6177991", "0.6146051", "0.6002309", "0.5985765", "0.59175396", "0.58450127", "0.5783589", "0.5759599", "0.57562935", "0.56993043", "0.56441855", "0.56413704", "0.5576878", "0.55284727", "0.54961735", "0.5492076", "0.5457063", "0.54535043", "0.5447613", "0.54475504", "0.54434675", "0.54419243", "0.5419726", "0.5415708", "0.54018736", "0.5400798" ]
0.78421235
0
Create a dictionary with domain architectures exclusive in a single pathogen type group.
def generateArchitectureDataStructure(db, collapse_pathogen_groups=False): # Calculate total numbers of species and strains for each pathogen group counts_species_pathogen_dict = defaultdict(lambda: defaultdict(int)) for row in db.getNumSpeciesPathogen(): counts_species_pathogen_dict[row['pathogen_type']]['num_species'] = row['num_species'] counts_species_pathogen_dict[row['pathogen_type']]['num_strains'] = row['num_strains'] architecture_pathogen_dict = defaultdict(list) arch_strains_species_dict = defaultdict(lambda: defaultdict(list)) for row in db.getArchitecturePathogenTypeIterator(): strains = row['species'] species = str(strains).split(' (')[0] pathogen_type = row['pathogen_type'] architecture_id = row['architecture'] architecture_acc = row['architecture_acc'] architecture_pathogen_dict[(architecture_id, architecture_acc)].append(pathogen_type) arch_strains_species_dict[(architecture_id, architecture_acc)]['species'].append(species) arch_strains_species_dict[(architecture_id, architecture_acc)]['strains'].append(strains) for architecture in architecture_pathogen_dict.keys(): # If an architecture is only present in proteins of a certain pathogen_type, # it should have only 1 pathogen_type pathogen_groups_set = set(architecture_pathogen_dict[architecture]) if not exclusive_arch(pathogen_groups_set, collapse_pathogen_groups): architecture_pathogen_dict.pop(architecture) arch_strains_species_dict.pop(architecture) else: # Check if the architecture is present in all species and strains species_set = set(arch_strains_species_dict[architecture]['species']) strains_set = set(arch_strains_species_dict[architecture]['strains']) total_num_species, total_num_strains = get_number_ssp_stt_members(counts_species_pathogen_dict, pathogen_groups_set, collapse_pathogen_groups) arch_strains_species_dict[architecture]['total_num_species'] = total_num_species arch_strains_species_dict[architecture]['total_num_strains'] = total_num_strains if total_num_species == len(species_set): arch_strains_species_dict[architecture]['all_species'] if total_num_strains == len(strains_set): arch_strains_species_dict[architecture]['all_strains'] return architecture_pathogen_dict, arch_strains_species_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def environments_of(groups):\n types = {}\n for group in groups:\n for env in group.environments:\n et = env.environmentType\n envs = types.setdefault((et.id, et.name), set())\n envs.add((env.id, env.name))\n return types", "def build_groupings(idir: str) -> dict:\n bkg_group = {key: [ifile for ifile in glob(f'{idir}/*_{key}_*.root')] for key in bkgs}\n pw_group = {key: [ifile for ifile in glob(f'{idir}/{key}*.root')] for key in powhegs}\n wh_pw_group = [ifile for name in wh_powhegs for ifile in glob(f'{idir}/{name}*.root')]\n ungrouped = [ifile for ifile in glob(f'{idir}/*.root') if 'madgraph' in ifile or 'JHU' in ifile]\n\n group = {}\n for key, files in bkg_group.items():\n if len(files) > 0:\n group[key] = files\n\n for key, files in pw_group.items():\n if len(files) > 0:\n group[key] = files\n\n for ifile in ungrouped:\n name = ifile.split('/')[-1].replace('.root', '')\n name = name.split('_SYST')[0].replace('-', '_')\n name = name.replace('_ggH125', '').replace('_VBF125', '').replace('_WH125', '').replace('_ZH125', '')\n group[name] = [ifile]\n\n if len(wh_pw_group) > 0:\n group['wh125_powheg'] = wh_pw_group\n\n return group", "def typeMapping(self):\n statemachines = self.package.getStateMachines()\n classes = {}\n for sm in statemachines:\n workflowId = sm.getCleanName()\n for klass in sm.getClasses():\n # We allow to bound a workflow to a <<stub>>\n if klass.isabstract:\n continue\n elif not self.atgenerator._isContentClass(klass) and \\\n not klass.hasStereoType(self.atgenerator.stub_stereotypes):\n continue\n name = klass.getTaggedValue('portal_type') or \\\n klass.getCleanName()\n classes.setdefault(name, []).append(workflowId)\n\n classNames = classes.keys()\n classNames.sort()\n result = []\n for id_ in classNames:\n item = {}\n item['id'] = id_ # portal type\n item['workflowIds'] = classes[id_]\n result.append(item)\n\n # no need to check use_workflow, it's already done by xmiparser.XMIModel.associateClassesToStateMachines,\n # so the sm.getClasses() already returns classes which uses use_workflow tgv.\n # if you uncomment thoses lines, you will have the bound-workflow twice\n #handle the use_workflow tgvs\n #for klass in self.package.getProduct().getClasses(recursive=True):\n # if klass.hasTaggedValue('use_workflow'):\n # result.append(dict(id=klass.getCleanName(),workflowId=klass.getTaggedValue('use_workflow')))\n # remember special case\n remembertypes = []\n self.atgenerator.getRememberTypes(remembertypes, self.package)\n for remembertype in remembertypes:\n existent = False\n for type in result:\n if type['id'] == remembertype['portal_type']:\n existent = True\n if existent:\n continue\n additionaltype = dict()\n additionaltype['id'] = remembertype['portal_type']\n additionaltype['workflowIds'] = [remembertype['workflow']]\n result.append(additionaltype)\n\n # take tgv on state machine itself into account\n for sm in statemachines:\n bindings = sm.getTaggedValue('bindings', '')\n bindings = [b.strip() for b in bindings.split(', ') if b.strip()]\n for binding in bindings:\n item = {}\n item['id'] = binding\n item['workflowIds'] = [sm.getCleanName()]\n result.append(item)\n\n return result", "def _get_type_mapping():\n return {\n Box.SPACE_NAME: Box,\n Dict.SPACE_NAME: Dict,\n Discrete.SPACE_NAME: Discrete\n }", "def _part_group_cell_mapper(bd_type):\n js, iss = np.meshgrid(range(smt.cols), range(smt.rows)) # zero indexed to agree with python interpretation\n idx = bd_type.flatten() != -1\n out = dict(zip(range(1, idx.sum() + 1), list(zip(iss.flatten()[idx], js.flatten()[idx]))))\n return out", "def build_doctype_map(self):\n\t\tself.doctype_map = {}\n\n\t\tactive_domains = frappe.get_active_domains()\n\t\tall_doctypes = frappe.get_all(\n\t\t\t\"DocType\",\n\t\t\tfields=[\n\t\t\t\t\"name\",\n\t\t\t\t\"in_create\",\n\t\t\t\t\"module\",\n\t\t\t\t\"istable\",\n\t\t\t\t\"issingle\",\n\t\t\t\t\"read_only\",\n\t\t\t\t\"restrict_to_domain\",\n\t\t\t],\n\t\t)\n\n\t\tfor dt in all_doctypes:\n\t\t\tif not dt.restrict_to_domain or (dt.restrict_to_domain in active_domains):\n\t\t\t\tself.doctype_map[dt[\"name\"]] = dt", "def get_info_dict(\n X: AnyStr,\n Y: AnyStr,\n ecod_df: pd.DataFrame = ecod_df,\n group_df: pd.DataFrame = group_df,\n) -> Dict:\n X = int(X)\n Y = int(Y)\n dom1, dom2 = get_proper_domains_id(X, Y)\n if dom1 is None:\n return None\n info_dict = {\"X\": X, \"Y\": Y}\n info_dict.update({\"domain1\": dom1, \"domain2\": dom2, \"swapFlag\": (X > Y)})\n return info_dict", "def build_network_definition(rsn_oms):\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"build_network_definition. rsn_oms class: %s\",\n rsn_oms.__class__.__name__)\n\n # platform types:\n platform_types = rsn_oms.config.get_platform_types()\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"got platform_types %s\", str(platform_types))\n\n # platform map:\n map = rsn_oms.config.get_platform_map()\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"got platform map %s\", str(map))\n\n # build topology:\n pnodes = NetworkUtil.create_node_network(map)\n dummy_root = pnodes['']\n root_pnode = pnodes[dummy_root.subplatforms.keys()[0]]\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"topology's root platform_id=%r\", root_pnode.platform_id)\n\n # now, populate the attributes and ports for the platforms\n\n def build_attributes_and_ports(pnode):\n \"\"\"\n Recursive routine to call set_attributes and set_ports on each pnode.\n \"\"\"\n set_attributes(pnode)\n set_ports(pnode)\n\n for sub_platform_id, sub_pnode in pnode.subplatforms.iteritems():\n build_attributes_and_ports(sub_pnode)\n\n def set_attributes(pnode):\n platform_id = pnode.platform_id\n attr_infos = rsn_oms.attr.get_platform_attributes(platform_id)\n if not isinstance(attr_infos, dict):\n raise PlatformDriverException(\n \"%r: get_platform_attributes returned: %s\" % (\n platform_id, attr_infos))\n\n if log.isEnabledFor(logging.TRACE):\n log.trace(\"%r: attr_infos: %s\", platform_id, attr_infos)\n\n if not platform_id in attr_infos:\n raise PlatformDriverException(\n \"%r: get_platform_attributes response does not \"\n \"include entry for platform_id: %s\" %(\n platform_id, attr_infos))\n\n ret_infos = attr_infos[platform_id]\n for attrName, attr_defn in ret_infos.iteritems():\n attr = AttrNode(attrName, attr_defn)\n pnode.add_attribute(attr)\n\n def set_ports(pnode):\n platform_id = pnode.platform_id\n port_infos = rsn_oms.port.get_platform_ports(platform_id)\n if not isinstance(port_infos, dict):\n raise PlatformDriverException(\n \"%r: get_platform_ports response is not a dict: %s\" % (\n platform_id, port_infos))\n\n if log.isEnabledFor(logging.TRACE):\n log.trace(\"%r: port_infos: %s\", platform_id, port_infos)\n\n if not platform_id in port_infos:\n raise PlatformDriverException(\n \"%r: get_platform_ports response does not include \"\n \"platform_id: %s\" % (platform_id, port_infos))\n\n ports = port_infos[platform_id]\n\n if not isinstance(ports, dict):\n raise PlatformDriverException(\n \"%r: get_platform_ports: entry for platform_id is \"\n \"not a dict: %s\" % (platform_id, ports))\n\n for port_id, dic in ports.iteritems():\n port = PortNode(port_id, dic['network'])\n port.set_state(dic['state'])\n pnode.add_port(port)\n\n # add connected instruments:\n instrs_res = rsn_oms.instr.get_connected_instruments(platform_id, port_id)\n if not isinstance(instrs_res, dict):\n log.warn(\"%r: port_id=%r: get_connected_instruments \"\n \"response is not a dict: %s\" % (platform_id, port_id, instrs_res))\n continue\n\n if log.isEnabledFor(logging.TRACE):\n log.trace(\"%r: port_id=%r: get_connected_instruments \"\n \"returned: %s\" % (platform_id, port_id, instrs_res))\n\n if not platform_id in instrs_res:\n raise PlatformDriverException(\n \"%r: port_id=%r: get_connected_instruments response\"\n \"does not have entry for platform_id: %s\" % (\n platform_id, ports))\n\n if not port_id in instrs_res[platform_id]:\n raise PlatformDriverException(\n \"%r: port_id=%r: get_connected_instruments response \"\n \"for platform_id does not have entry for port_id: %s\" % (\n platform_id, port_id, instrs_res[platform_id]))\n\n instr = instrs_res[platform_id][port_id]\n for instrument_id, attrs in instr.iteritems():\n port.add_instrument(InstrumentNode(instrument_id, attrs))\n\n # call the recursive routine\n build_attributes_and_ports(root_pnode)\n\n # we got our whole network including platform attributes and ports.\n\n # and finally create and return NetworkDefinition:\n ndef = NetworkDefinition()\n ndef._platform_types = platform_types\n ndef._pnodes = pnodes\n ndef._dummy_root = dummy_root\n return ndef", "def get_domains(graph: Graph, property_to_id: Dict[str, int], entity_type_to_id: Dict[str, int]) -> Dict[int, int]:\n # dictionary pointing from object property id to an entity type id\n domains = {}\n\n # add all domain triples for which the subject is an object property and the object is an entity type\n for subject, predicate, object in graph.triples((None, RDFS.domain, None)):\n if subject in property_to_id and object in entity_type_to_id:\n domains[property_to_id[subject]] = entity_type_to_id[object]\n\n return domains", "def _identify_media(self):\n\n mediapaths = {k: v['medium'] for k, v in self.labels.items() if v.get('medium') is not None}\n\n media_dict = {}\n for label, path in mediapaths.items():\n if path.lower() == 'air':\n media_dict[label] = Air()\n else:\n media_dict[label] = from_yaml(path)\n return media_dict", "def create_package_dict(self):\n dep_node = list()\n param_list = ['name', 'version', 'dir', 'description']\n inp_list = list()\n dep_node_list = list()\n pkg_dict = dict()\n for line in self.full_ed_lines:\n inp_list.append(line.text())\n dep_pkg = inp_list[6].split(', ')\n if dep_pkg[len(dep_pkg) - 1] == '':\n dep_pkg.pop()\n for dep in self.manager.wid.sub_list:\n dep_node.append(dep['msg_type'])\n for dep in self.manager.wid.pub_list:\n dep_node.append(dep['msg_type'])\n for dep in dep_node:\n msg, msg_type = dep.split('/')\n dep_node_list.append({'name': msg, 'type': msg_type})\n for param, value in zip(param_list, inp_list):\n pkg_dict[param] = value\n pkg_dict['maintainer'] = {'name': inp_list[4], 'email': inp_list[5]}\n pkg_dict['depend'] = dep_pkg\n pkg_dict['node'] = dict()\n pkg_dict['node']['name'] = inp_list[7]\n pkg_dict['node']['depend'] = dep_node_list\n pkg_dict['node']['subscribers'] = self.manager.wid.sub_list\n pkg_dict['node']['publishers'] = self.manager.wid.pub_list\n return pkg_dict", "def _get_entity_mappings(query_list: ProcessedQueryList) -> Dict:\n entity_labels = set()\n logger.info(\"Generating Entity Labels...\")\n for d, i, entities in zip(\n query_list.domains(), query_list.intents(), query_list.entities()\n ):\n if len(entities):\n for entity in entities:\n e = str(entity.entity.type)\n entity_labels.add(f\"{d}.{i}.B|{e}\")\n entity_labels.add(f\"{d}.{i}.I|{e}\")\n entity_labels.add(f\"{d}.{i}.S|{e}\")\n entity_labels.add(f\"{d}.{i}.E|{e}\")\n\n e = \"O|\"\n entity_labels.add(f\"{d}.{i}.{e}\")\n\n entity_labels = sorted(list(entity_labels))\n return dict(zip(entity_labels, range(len(entity_labels))))", "def metro_phil_to_basis_dict(metro):\n for o in metro.objects:\n if o.is_scope:\n #one of the subkeys of the root object will be the detector phil. it will be the only one not extracted.\n detector_phil = o.extract()\n break\n #metro = metro.extract() # not needed\n\n bd = {(detector_phil.serial,): basis(matrix.col(detector_phil.orientation),\n matrix.col(detector_phil.translation)*1000) }\n for p in detector_phil.panel:\n bd[(detector_phil.serial,p.serial)] = basis(matrix.col(p.orientation),\n matrix.col(p.translation)*1000)\n for s in p.sensor:\n bd[(detector_phil.serial,p.serial,s.serial)] = basis(matrix.col(s.orientation),\n matrix.col(s.translation)*1000)\n for a in s.asic:\n bd[(detector_phil.serial,p.serial,s.serial,a.serial)] = basis(matrix.col(a.orientation),\n matrix.col(a.translation)*1000)\n\n return bd", "def save_network_architecture(self,network_path):\n net_architecture = {}\n net_architecture['y_res'] = self.y_res\n net_architecture['x_res'] = self.x_res\n net_architecture['n_input_channels'] = self.n_input_channels\n net_architecture['n_output_classes'] = self.n_output_classes\n net_architecture['fc1_n_chan'] = self.fc1_n_chan\n net_architecture['fc1_dropout'] = self.fc1_dropout\n net_architecture['alpha'] = self.alpha\n net_architecture['n_samples_trained'] = self.n_samples_trained\n net_architecture['n_class_samples_trained'] = self.n_class_samples_trained\n net_architecture['n_samples_list'] = self.n_samples_list\n net_architecture['n_class_samples_list'] = self.n_class_samples_list\n net_architecture['accuracy_list'] = self.accuracy_list\n net_architecture['precision_list'] = self.precision_list\n net_architecture['recall_list'] = self.recall_list\n net_architecture['F1_list'] = self.F1_list\n np.save(os.path.join( \\\n network_path,'net_architecture.npy'), net_architecture)\n self.log(\"Network architecture saved to file:\\n{}\".format(\n os.path.join(network_path,'net_architecture.npy')))", "def save_network_architecture(self,network_path):\n net_architecture = {}\n net_architecture['y_res'] = self.y_res\n net_architecture['x_res'] = self.x_res\n net_architecture['n_input_channels'] = self.n_input_channels\n net_architecture['n_output_classes'] = self.n_output_classes\n net_architecture['conv1_size'] = self.conv1_size\n net_architecture['conv1_n_chan'] = self.conv1_n_chan\n net_architecture['conv1_n_pool'] = self.conv1_n_pool\n net_architecture['conv2_size'] = self.conv2_size\n net_architecture['conv2_n_chan'] = self.conv2_n_chan\n net_architecture['conv2_n_pool'] = self.conv2_n_pool\n net_architecture['fc1_n_chan'] = self.fc1_n_chan\n net_architecture['fc1_dropout'] = self.fc1_dropout\n net_architecture['alpha'] = self.alpha\n net_architecture['n_samples_trained'] = self.n_samples_trained\n net_architecture['n_class_samples_trained'] = self.n_class_samples_trained\n net_architecture['n_samples_list'] = self.n_samples_list\n net_architecture['n_class_samples_list'] = self.n_class_samples_list\n net_architecture['accuracy_list'] = self.accuracy_list\n net_architecture['precision_list'] = self.precision_list\n net_architecture['recall_list'] = self.recall_list\n net_architecture['F1_list'] = self.F1_list\n np.save(os.path.join( \\\n network_path,'net_architecture.npy'), net_architecture)\n self.log(\"Network architecture saved to file:\\n{}\".format(\n os.path.join(network_path,'net_architecture.npy')))", "def dict_of_domains(fc):\r\n # need to find root database (GDB or SDE)\r\n db_root = os.path.dirname(fc)\r\n while db_root[-4:].lower() != '.gdb' and db_root[-4:].lower() != '.sde':\r\n old_db_root = db_root # protect against infinite loop\r\n db_root = os.path.dirname(db_root)\r\n if old_db_root == db_root: # protect against infinite loop\r\n break\r\n arcpy.AddMessage(\"Retrieving Domains from \" + str(db_root))\r\n return {domain.name: domain.codedValues for domain in arcpy.da.ListDomains(db_root)}", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"product\": [\n self.from_entity(entity=\"product\", intent=[\"inform\"]),\n ],\n \"applicant_name\": [\n self.from_entity(entity=\"applicant_name\", intent=[\"inform\"]),\n ],\n \"applicant_dob\": [\n self.from_entity(entity=\"applicant_dob\", intent=[\"inform\"]),\n ],\n \"applicant_phoneno\": [\n self.from_entity(entity=\"applicant_phoneno\", intent=[\"inform\"]),\n ],\n \"applicant_address\": [\n self.from_entity(entity=\"applicant_address\", intent=[\"inform\"]),\n ]\n }", "def save_network_architecture(self,network_path):\n net_architecture = {}\n net_architecture['y_res'] = self.y_res\n net_architecture['x_res'] = self.x_res\n net_architecture['n_input_channels'] = self.n_input_channels\n net_architecture['n_output_classes'] = self.n_output_classes\n net_architecture['fc1_dropout'] = self.fc1_dropout\n net_architecture['alpha'] = self.alpha\n net_architecture['n_samples_trained'] = self.n_samples_trained\n net_architecture['n_class_samples_trained'] = self.n_class_samples_trained\n net_architecture['n_samples_list'] = self.n_samples_list\n net_architecture['n_class_samples_list'] = self.n_class_samples_list\n net_architecture['accuracy_list'] = self.accuracy_list\n net_architecture['precision_list'] = self.precision_list\n net_architecture['recall_list'] = self.recall_list\n net_architecture['F1_list'] = self.F1_list\n np.save(os.path.join( \\\n network_path,'net_architecture.npy'), net_architecture)\n self.log(\"Network architecture saved to file:\\n{}\".format(\n os.path.join(network_path,'net_architecture.npy')))", "def detect_domains (nffg):\n return {infra.domain for infra in nffg.infras}", "def structure_to_dict(structure):\n from aiida.common.exceptions import InputValidationError\n\n for kind in structure.kinds:\n if kind.is_alloy():\n raise InputValidationError(\n \"Kind '{}' is an alloy. This is not allowed for CRYSTAL input structures.\"\n \"\".format(kind.name))\n if kind.has_vacancies():\n raise InputValidationError(\n \"Kind '{}' has vacancies. This is not allowed for CRYSTAL input structures.\"\n \"\".format(kind.name))\n\n kindname_symbol_map = {\n kind.name: kind.symbols[0]\n for kind in structure.kinds\n }\n kindname_id_map = {kind.name: i for i, kind in enumerate(structure.kinds)}\n id_kind_map = {i: kind for i, kind in enumerate(structure.kinds)}\n kind_names = [site.kind_name for site in structure.sites]\n symbols = [kindname_symbol_map[name] for name in kind_names]\n equivalent = [kindname_id_map[name] for name in kind_names]\n kinds = [id_kind_map[e] for e in equivalent]\n\n sdata = {\n \"lattice\": structure.cell,\n \"atomic_numbers\": [ATOMIC_SYMBOL2NUM[sym] for sym in symbols],\n \"ccoords\": [site.position for site in structure.sites],\n \"pbc\": structure.pbc,\n \"equivalent\": equivalent,\n \"kinds\": kinds,\n }\n\n return sdata", "def known_domain_data(known_uid, known_verbose_name, known_os_type):\n return {\n 'id': known_uid,\n 'verbose_name': known_verbose_name,\n 'os_type': known_os_type\n }", "def by_type(environments):\n types = {}\n for env in environments:\n et = env.environmentType\n options = types.setdefault(et.id, set())\n options.add(env.id)\n return types", "def _init_group_dicts(self):\n\n all_groups = set()\n\n for detection in config['detections'].values():\n if 'action' in detection and detection['action'] == 'buy':\n if 'groups' in detection:\n for group in detection['groups']:\n all_groups.add(group)\n\n for group in all_groups:\n self.trade_sizes[group] = config['trade_min_size']\n self.trade_proceeds[group] = {}\n\n self.trade_sizes['default'] = config['trade_min_size']\n self.trade_proceeds['default'] = {}", "def createDict( self ):\n d = {}\n devTup = ( 'endcap', 'comp', 'shutter','397intensity' )\n for dev in devTup:\n d[dev] = {'devChannels':{}}\n endcap = ( ( 1, 1 ), ( 2, 0 ) )\n comp = ( ( 1, 4 ), ( 2, 2 ), ( 'common', 3 ) )\n shutter = ( ( 1, 5 ), ( 2, 6 ), ( 3, 7 ) )\n intensity397 = (('397intensity',8),)\n chanTup = ( endcap, comp, shutter ,intensity397 )\n for dev, value in zip( devTup, chanTup ):\n for chanPair in value:\n d[dev]['devChannels'][chanPair[0]] = {'value':None, 'channel':chanPair[1]}\n ecRange = ( 0.0, 40.0 )\n compRange = ( -40.0, 40.0 )\n shutterRange = ( 0.0, 5.0 )\n intensity397Range = (0.0,2500.0)\n rangeTup = ( ecRange, compRange, shutterRange, intensity397Range )\n for dev, value in zip( devTup, rangeTup ): d[dev]['range'] = value\n self.dcDict = d", "def get_type_dag(graph: Graph, entity_type_to_id: Dict[str, int]) -> Dict[int, DAGNode]:\n # dictionary pointing from entity type id to the corresponding node in the entity type DAG\n entity_type_dag = {}\n\n # extract equivalence class relation\n equivalent_classes = {}\n for subject, predicate, object in graph.triples((None, OWL.equivalentClass, None)):\n equivalent_classes[subject] = object\n equivalent_classes[object] = subject\n\n # iterate over class hierarchy\n for subject, predicate, object in graph.triples((None, RDFS.subClassOf, None)):\n\n # is the subject is an entity type or equivalent to an entity type\n subject_is_entity_type = (subject in entity_type_to_id or\n (subject in equivalent_classes and equivalent_classes[subject] in entity_type_to_id))\n # is the object is an entity type or equivalent to an entity type\n object_is_entity_type = (object in entity_type_to_id or\n (object in equivalent_classes and equivalent_classes[object] in entity_type_to_id))\n\n # if the subject is an entity type or equivalent to an entity type AND the object is an entity type or\n # equivalent to an entity type\n if subject_is_entity_type and object_is_entity_type:\n # replace subject and object with their equivalent entity type if thhey are not an entity type themselves\n if subject not in entity_type_to_id:\n subject = equivalent_classes[subject]\n if object not in entity_type_to_id:\n object = equivalent_classes[object]\n\n subject_id = entity_type_to_id[subject]\n object_id = entity_type_to_id[object]\n # add subject and object and their relation to the DAG\n if subject_id != object_id:\n if object_id not in entity_type_dag:\n entity_type_dag[object_id] = DAGNode(object_id, object)\n if subject_id not in entity_type_dag:\n entity_type_dag[subject_id] = DAGNode(subject_id, subject)\n\n # add DAG node of object as parent to the subject DAG node\n entity_type_dag[subject_id].parents.append(entity_type_dag[object_id])\n # add DAG node of the subject as child to the object DAG node\n entity_type_dag[object_id].children.append(entity_type_dag[subject_id])\n\n return entity_type_dag", "def define_group_properties(self):\n\n # PropertyGroup\n self.propertygroup['debug']['x86'] = get_propertygroup(\n 'debug', 'x86', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['debug']['x64'] = get_propertygroup(\n 'debug', 'x64', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['release']['x86'] = get_propertygroup(\n 'release', 'x86', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['release']['x64'] = get_propertygroup(\n 'release', 'x64', ' and @Label=\"Configuration\"'\n )\n\n # ItemDefinitionGroup\n self.definitiongroups['debug']['x86'] = get_definitiongroup('debug', 'x86')\n self.definitiongroups['debug']['x64'] = get_definitiongroup('debug', 'x64')\n self.definitiongroups['release']['x86'] = get_definitiongroup('release', 'x86')\n self.definitiongroups['release']['x64'] = get_definitiongroup('release', 'x64')", "def make_grp(self):\n try:\n self.base['grp']\n except:\n self.base['grp'] = np.zeros(len(self.base),dtype='i')\n\n for halo in self._halos.values():\n halo[name][:] = halo._halo_id\n\n if config['verbose']: print \"writing %s\"%(self._base().filename+'.grp')\n self._base().write_array('grp',overwrite=True,binary=False)", "def bases(layout, mvClass=MultiVector, grades=None):\n\n dict = {}\n for i in range(layout.gaDims):\n grade = layout.gradeList[i]\n if grade != 0:\n if grades is not None and grade not in grades:\n continue\n v = np.zeros((layout.gaDims,), dtype=int)\n v[i] = 1\n dict[layout.names[i]] = mvClass(layout, v)\n return dict", "def _get_domain_mappings(domain_to_intents: Dict) -> Dict:\n domain2id = {}\n domains = list(domain_to_intents)\n for index, domain in enumerate(domains):\n domain2id[domain] = index\n return domain2id", "def format_domain(domain):\n domain.ns_converted = []\n for ns in domain.ns :\n if isinstance(ns, objects.DomainHostAttr) :\n ns_item = {\n 'hostname' : ns.hostname,\n 'ips' : []\n }\n\n for hostaddr in ns.hostAddr :\n ns_item['ips'].append(hostaddr.ip)\n else :\n ns_item = {\n 'hostname' : ns.name,\n 'ips' : [],\n 'hostobj' : 1\n }\n domain.ns_converted.append(ns_item)\n\n return domain" ]
[ "0.53147066", "0.5278428", "0.51913285", "0.5105832", "0.5089798", "0.5063621", "0.5043128", "0.5004288", "0.49817312", "0.4942777", "0.49404955", "0.49214888", "0.49084446", "0.48987442", "0.48879838", "0.4885036", "0.48838812", "0.48718145", "0.4864769", "0.4852137", "0.48391476", "0.48375362", "0.48369786", "0.48351386", "0.48169854", "0.48155892", "0.4813881", "0.48055103", "0.47993198", "0.47892964" ]
0.56843174
0
Boolean function to check if a given architecture is exclusive.
def exclusive_arch(pathogen_groups_set, collapse_pathogen_groups): if len(pathogen_groups_set) == 1: return True # Only check pathogen grouping when the flag is on if collapse_pathogen_groups: if len(pathogen_groups_set) > 2: return False if 0 in pathogen_groups_set and 1 in pathogen_groups_set: return True if 3 in pathogen_groups_set and 4 in pathogen_groups_set: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_logical(*args):\n return _ida_hexrays.is_logical(*args)", "def __bool__(self):\n return any(self.smask)", "def is_infrastructure (self):\n return sum([1 for i in self.infras]) != 0", "def is_exclusive(self):\n return self.exclusive", "def incompatible_architecture(self) -> bool:\n return pulumi.get(self, \"incompatible_architecture\")", "def available_on_system(cls):\n return (cls.reason_to_be_disabled() is None)", "def incompatible_architecture(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"incompatible_architecture\")", "def check_masked(self):\n if self._alternate == 'N': # If our alternate allele is masked, or an 'N'\n return True # Return True\n else: # Otherwise\n return False # Return False", "def osarch_is_amd64():\n return osarch_match(\"amd64\")", "def _check_state(enabled_states, paper_states):\n enabled_states = set(enabled_states)\n paper_states = set(paper_states)\n return bool(enabled_states.intersection(paper_states))", "def isShiftHeld():\n return False if pm.about(batch=True) else (pm.getModifiers() & 1) > 0", "def in_state(self, state: str) -> bool:\n if state.startswith('APP_SPECIFIC'):\n app_state = int(state[len('APP_SPECIFIC'):])\n value = (app_state << 3) | 0b00000111\n return self.value == value\n return super().in_state(state)", "def is64Bit(program: ghidra.program.model.listing.Program) -> bool:\n ...", "def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.DISABLE,\n ]:\n return False\n\n return True", "def g3(a, b): \n return not (a and b)", "def xor(a: bool, b: bool) -> bool:\n return (a and not b) or (not a and b)", "def _common_check(self, flag):\n has_perms = self.user.is_active and self.user.is_staff and (\n self.user.has_perm('blog.change_membership') or\n self.user.has_perm('blog.change_blog'))\n return has_perms or (self.role in ['O', 'A'] and\n not self.is_left() and\n not self.is_banned() and\n (flag or self.role == 'O'))", "def canUnlockAll(boxes):\n for key in range(1, len(boxes) - 1):\n res = False\n for index in range(len(boxes)):\n res = key in boxes[index] and key != index\n if res:\n break\n if res is False:\n return res\n return True", "def is_enabled(self):\n for arch in self.inputs:\n if arch.place.M < arch.weight:\n return False\n return True", "def ExclusiveAddressUse(self) -> bool:", "def ExclusiveAddressUse(self) -> bool:", "def ExclusiveAddressUse(self) -> bool:", "def ExclusiveAddressUse(self) -> bool:", "def is_non_exclusive(self, variable):\n non_exclusive = False\n for sub_effect in self._sub_effects:\n if sub_effect.get_variable() == variable:\n if not sub_effect.is_exclusive():\n non_exclusive = True\n elif len(sub_effect.get_value()) > 0 and not sub_effect.is_negated():\n return False\n return non_exclusive", "def is_commutative(*args):\n return _ida_hexrays.is_commutative(*args)", "def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.ON,\n ]:\n tango.Except.throw_exception(\n f\"Disable() is not allowed in current state {self.state_model.op_state}\",\n \"Failed to invoke Disable command on SdpMasterLeafNode.\",\n \"SdpMasterLeafNode.Disable() \",\n tango.ErrSeverity.ERR,\n )\n return True", "def osarch_match(op):\n arch = g_osarch\n while True:\n if op == arch:\n return True\n arch = platform_map_iterate(arch)\n if not arch:\n break\n return False", "def has_exclusive_attributes(self):\n return any(schema.is_exclusive for schema in itervalues(self.schema))", "def is_commutative(self):\n try:\n return self.universe().is_commutative()\n except Exception:\n # This is not the mathematically correct default, but agrees with\n # history -- we've always assumed factored things commute\n return True", "def is_negated(x) -> bool:\n return not (x & 1 == 0)" ]
[ "0.588355", "0.5876618", "0.58541375", "0.58166057", "0.5798117", "0.5712255", "0.5689939", "0.5669322", "0.5637539", "0.5548849", "0.5544105", "0.5517736", "0.54952157", "0.5487494", "0.5465993", "0.5461534", "0.54453385", "0.5438241", "0.5419825", "0.53650993", "0.53650993", "0.53650993", "0.53650993", "0.53577167", "0.5331902", "0.53083897", "0.5296869", "0.5295731", "0.52924204", "0.5283284" ]
0.59786093
0
returns True if employee has rejoined otherwise False
def is_rejoinee(self): return len(self._start_date) > 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_employee():\n return _is_member('uw_employee')", "def is_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n return (self.date_joined + expiration_date <= datetime.datetime.now())", "def is_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n\n return (self.user.date_joined + expiration_date <= datetime.datetime.now())", "def _compute_can_reset(self):\n\t\tuser = self.env.user\n\t\tgroup_hr_manager = self.env.ref ('hr_holidays.group_hr_holidays_manager')\n\t\tfor holiday in self:\n\t\t\tif group_hr_manager in user.groups_id or holiday.employee_id and holiday.employee_id.user_id == user:\n\t\t\t\tholiday.can_reset = True", "def already_booked(slots, attendees, user_name):\n already_joined = False\n for i in attendees:\n if i[\"email\"] == user_name+'@student.wethinkcode.co.za':\n already_joined = True\n\n if already_joined == True:\n return False\n else:\n return True", "def is_emperor(user_id: int, table_id: int) -> bool:\n table = Table.query.get(table_id)\n return table.emperor == user_id", "def is_manager(self) -> bool:\n return self.role in EmployeeRole.manager_roles()", "def activation_expired(self):\n return self.date_joined + timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS) < timezone.now()", "def activation_key_expired(self):\r\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\r\n return self.activation_key == \"ALREADY_ACTIVATED\" or \\\r\n (self.user.date_joined + expiration_date <= datetime.datetime.now())", "def activation_key_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n return self.activation_key == \"ALREADY_ACTIVATED\" or \\\n (self.user.date_joined + expiration_date <= datetime.datetime.now())", "def activation_key_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n return self.activation_key == RegistrationProfile.ACTIVATED or \\\n (self.user.date_joined + expiration_date <= datetime.datetime.now())", "def isOn(self):\r\n return len(self.__agenda)>2", "def is_student_employee():\n return _is_member('uw_affiliation_student-employee')", "def is_joined_days_passed(self, days):\n return timezone.now() >= self.user.date_joined + timedelta(days=days)", "def has_happened(self):\n\n return self.end < timezone.now()", "def all_leave(self):\n return self.num_leaves == self.num_workers", "def activation_key_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n return self.user.date_joined + expiration_date <= datetime.datetime.now()", "def is_retired(self):\n if str.__str__(self) in UID_dictionary:\n return bool(UID_dictionary[self][3])\n\n return False", "def activation_key_expired(self):\n exp_date = timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n return self.user.date_joined + exp_date <= datetime.now()", "def can_reschedule(self) -> bool:\n return pulumi.get(self, \"can_reschedule\")", "def same_user_or_shiftleader(self, user):\n try:\n return (\n self.get_object().userid == user\n or user.is_superuser\n or user.userprofile.has_shift_leader_rights\n )\n except UserProfile.DoesNotExist:\n return False", "def has_orcid(self):\n try:\n if self.orcid:\n return True\n except Orcid.DoesNotExist:\n pass\n return False", "def is_examiner(self, user_obj):\n return self.examiners.filter(pk=user_obj.pk).count() > 0", "def test_func(self):\n member_to_finish = self.get_object()\n return self.request.user.rfid == member_to_finish.rfid", "def get_employee(self):\n employee_ids = self.env['hr.employee'].search([('user_id', '=', self.env.uid)])\n return employee_ids[0] if employee_ids else False", "def replied(self):\n return bool(self.replied_at is not None)", "def _check_employee(self):\n\n for record in self:\n\n if record.nik_number:\n # find duplicate nik\n employee_ids = self.search([('id', 'not in', self.ids), ('nik_number', '=', record.nik_number)])\n if employee_ids:\n error_msg = _(\"There is duplicate of Employee Identity Number.\")\n raise ValidationError(error_msg)\n\n # check nik format. it required base_indonesia\n if not record._check_nik(record):\n error_msg = _(\"NIK did not match with Company Code.\")\n raise ValidationError(error_msg)\n\n if record.identification_id:\n employee_ids = self.search([('id', 'not in', self.ids), ('identification_id', '=', record.identification_id)])\n if employee_ids:\n error_msg = _(\"There is duplicate of Identification Number.\")\n raise ValidationError(error_msg)\n\n return True", "def reports_editable(self):\n end_plus_time = self.datetime_end + datetime.timedelta(days=CCR_DELTA)\n return timezone.now() < end_plus_time", "def user_is_attendee(user):\n exists = check_attendee_exists(user, user)\n if exists[0]:\n return True\n return False", "def user_has_selected_nickname(self):\n if self.fresh is None:\n delta = self.created - self.modified\n # Simulate delta = abs(delta)\n if delta.days < 0:\n delta = -delta\n self.fresh = (delta.days == 0 and delta.seconds < 2)\n return not self.fresh" ]
[ "0.6680365", "0.587085", "0.58500487", "0.57760894", "0.5741819", "0.57230127", "0.56929135", "0.56781346", "0.56605685", "0.5577466", "0.55369276", "0.5530934", "0.55146056", "0.55014586", "0.54542553", "0.5392969", "0.5388207", "0.53847474", "0.5377452", "0.53713554", "0.5348058", "0.5341284", "0.532348", "0.5295116", "0.5291401", "0.52853847", "0.5240574", "0.52337444", "0.52328223", "0.5220771" ]
0.61320335
1
Process the Exit of employee
def process_employee_exit(self): if self.is_employee_serving(): self._end_date.append(datetime.now().isoformat()) print(f"Successfully processed exit for employee {self.name} on" \ f"{self._end_date[-1]}\nWe wish {self.name} for future endeavours") return raise RejoiningException("Employee not in service. Cannot process exit.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_exit(self, args):\n return -1", "def identify_result_exit(self, record):\n return [\"exit\"]", "def exit(self):\n pass", "def _PExit(self, m):\n pass", "def user_exit(cls):\n cls.exit_program(ErrorCodes.E_USER_EXIT)", "def do_exit(self, line): \n sys.exit(0)", "def do_exit(self,*args):\r\n return True", "def set_exit(self, exit_name):\r\n pass", "def do_exit(self, arg):\n return self._leave(arg)", "def on_exit(self):\n pass", "def exit(self):\n logger.debug(\"EXIT\")", "def __my_exit__(self, arg=0):\n self.services.error('Called sys.exit() from component code')\n raise Exception('Called sys.exit() from component code')", "def doExit(n, info):\n\tprint(info)\n\tsys.exit(n)", "def do_exit(self, args):\n sys.exit(1)", "def exit(self):\n self.exit_flag = True", "def __exit(self, *args):\n sys.exit(0)", "def exit(context):\n return _nfc.exit(context)", "def exit(self):\n return self.__exit", "def _exit(self, save_vars):\n raise NotImplementedError()", "def do_exit(self, args):\n return sys.exit(1)", "def exit(self): \n self.teo_exchange_intent = self.teo_wallet\n self.withdraw_intent = self.euro_wallet\n\n self.register_teo_exchange(self.teo_exchange_intent)\n self.register_withdraw(self.withdraw_intent)\n\n if self.teo_wallet + self.euro_wallet == 0:\n print('Agent exited: ', self.__class__.__name__)\n self.model.schedule.remove(self)", "def on_exit(self, userdata):\n pass", "def exit(self) -> None:\n self.on_exit(None)", "def ConsoleExit(self, errorcode=200):\n pass", "def exit(self):\n print(\"\\n***************************** Exit Metafor *****************************\")", "def __exit__(self, *ex_info):\n if self.device:\n self._device_ctx.__exit__(*ex_info)\n\n stdout('')\n stdout('Finished {0} in {1:0.1f}s'.format(self.name, self.timer_elapsed('script')))", "def do_exit(self, arg):\n self.db.close_db()\n print(\" \\\\o_ Bye-bye...\")\n print(\" / \")\n print(\"<\\\\\")\n sys.exit()", "def program_exit(self, button_object):\n\t\tsys.exit(0)", "def __exit_handler(signum, frame):\n #print \"EH START\"\n with this.lock:\n exit_barrier = this.exit_barrier\n\n if exit_barrier is not None:\n # Meet up with the worker\n this.exit_barrier.wait()\n #print \"EH FIRST BARRIER\"\n # Wait for the worker to be done\n this.finish_barrier.wait()\n #print \"EH HANDLER FINISHED\"\n #print \"EH DONE\"\n sys.exit(0)", "def do_exit(self, _):\n return True" ]
[ "0.66456366", "0.66187525", "0.6506358", "0.64646137", "0.6454185", "0.6431671", "0.63921857", "0.63420844", "0.63289636", "0.6281149", "0.6274805", "0.6250398", "0.6227689", "0.6203884", "0.6194826", "0.6160062", "0.6157645", "0.61544615", "0.61414033", "0.6138231", "0.6130484", "0.61210597", "0.61201847", "0.6104874", "0.6077881", "0.6075954", "0.6054154", "0.6039468", "0.60306275", "0.6020049" ]
0.8065909
0
Takes a full media url from Bandwidth and extracts the media id
def get_media_id(media_url): split_url = media_url.split("/") #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/file.png if split_url[-2] == "media": return split_url[-1] #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/abc/0/file.png else: #This is required for now due to the SDK parsing out the `/`s return "%2F".join(split_url[-3:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def media_id(self):\n try:\n return Html.toId(self.content)\n except:\n Mp3Error(1)", "def _id_from_url(url):\n url = re.sub(r'\\?.*', '', url)\n video_id = url.split('/')[-2]\n return video_id", "def media_content_id(self):\n return self._media_uri_final", "def media_content_id(self) -> str | None:\n # The lovelace app loops media to prevent timing out, don't show that\n if self.app_id == CAST_APP_ID_HOMEASSISTANT_LOVELACE:\n return None\n media_status = self._media_status()[0]\n return media_status.content_id if media_status else None", "def get_single_media(media_id):\n return query_single(media_id, Media, media_schema)", "def get_media_id_from_post(media_obj):\n if media_obj:\n media_id = media_obj.get('id')\n return media_id\n return", "def unique_id(self):\n if self._uuid != '':\n return \"linkplay_media_\" + self._uuid", "def get_video_id(url):\n\n if not url:\n return \"\"\n\n # If URL is embedded\n if \"embed\" in url:\n return url.split(\"/\")[-1]\n\n parse_result = urlparse(url)\n query = parse_qs(parse_result.query)\n return query[\"v\"][0]", "def media_content_id(self) -> str | None:\n if self._device.movie.handle:\n return self._device.movie.handle\n return None", "def get_media_filename(media_url):\n return media_url.split(\"/\")[-1]", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n #logger.debug('DAILYMOTION VIDEO FOUND %s' % url)\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/embed/video/') or p.path.startswith('/swf/video/')):\n # http://www.dailymotion.com/embed/video/xmp7zw\n return re.sub('_.+', '', path_list[2])\n elif len(path_list) == 2 and (p.path.startswith('/video/') or p.path.startswith('/swf/')):\n # http://www.dailymotion.com/video/xmp7zw_whatever\n # http://www.dailymotion.com/swf/xmp7zw\n return re.sub('_.+', '', path_list[1])\n \n return ''", "def fix_moviedb(url):\n assert url\n\n # get id from the title\n # e.g.: https://www.themoviedb.org/movie/482936-la-quietud\n path = url.split('/')[-1]\n movie_id = int(path.split('-')[0])\n return url, movie_id", "def media_content_id(self):\n return self._table.active_track.id if self._table.active_track else None", "def extract_item_id(url):\n m = re.search('/([0-9]+)\\.htm', url)\n if m is not None:\n return m.group(1)\n else:\n return None", "def media_entry_id(self):\n return self.getattr('media_entry_id')", "def parse_link_to_id(self, playlist_link: str) -> str:\n split_1 = playlist_link.split('/')[4]\n split_2 = split_1.split('?')\n return split_2[0]", "def extract_media_v1(data):\n user = data[\"user\"]\n location = data.get(\"location\")\n if location:\n location = {\"pk\": int(location.get(\"pk\")), \"name\": location.get(\"name\")}\n video_url = \"\"\n if \"video_versions\" in data:\n # Select Best Quality by Resolutiuon\n video_url = sorted(\n data[\"video_versions\"], key=lambda o: o[\"height\"] * o[\"width\"]\n ).pop()[\"url\"]\n product_type = data.get(\"product_type\", \"\")\n if data[\"media_type\"] == 2 and not product_type:\n product_type = \"feed\"\n thumbnail_url = ''\n if 'image_versions2' in data:\n thumbnail_url = sorted(\n data[\"image_versions2\"][\"candidates\"],\n key=lambda o: o[\"height\"] * o[\"width\"],\n ).pop()[\"url\"]\n return {\n \"pk\": int(data[\"pk\"]),\n \"taken_at\": int(data[\"taken_at\"]),\n \"id\": data[\"id\"],\n \"media_type\": data[\"media_type\"],\n \"product_type\": product_type,\n \"code\": data[\"code\"],\n \"thumbnail_url\": thumbnail_url,\n \"location\": location,\n \"user\": extract_user_short(user),\n \"comment_count\": int(data.get(\"comment_count\") or 0),\n \"like_count\": int(data.get(\"like_count\") or 0), # the media just published has no like_count\n \"caption_text\": json_value(data, \"caption\", \"text\", default=\"\"),\n \"usertags\": [\n extract_usertag(usertag)\n for usertag in data.get(\"usertags\", {}).get(\"in\", [])\n ],\n \"video_url\": video_url,\n \"view_count\": int(data.get('view_count') or 0),\n \"video_duration\": data.get('video_duration'),\n \"title\": data.get(\"title\") or None,\n \"resources\": [\n extract_resource_v1(edge)\n for edge in data.get('carousel_media', [])\n ]\n }", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n params = cgi.parse_qs(p.query)\n \n if p.path.endswith('/video'):\n # url type http://www.livestream.com/xprize/video?clipId=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2\n if 'clipId' in params:\n return params['clipId'][0]\n if p.path.startswith('/embed'):\n # url type http://cdn.livestream.com/embed/xprize?layout=4&amp;clip=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2&amp;width=560&amp;autoplay=false\n if 'clip' in params:\n return params['clip'][0]\n \n return ''", "def get_id(self, url):\n return url.split('/')[-1]", "def get_id(share_url):\n url = get_redirect_url(share_url)\n id_num = re.findall('(\\d*)\\?', url)[0]\n if id_num.isnumeric():\n return id_num\n else:\n print(\"Something wrong with id number\")", "def parse_image_id(image_ref):\n temp = image_ref.rsplit('/')\n #Return the last item, which is the image id\n return temp[len(temp) - 1]", "def get_lis_id(chamber, url):\n match = re.search(lis_id_patterns[chamber], url)\n if match.groups:\n return match.group(1)", "def get_id_regular_link(link = None):\n #Legacy compatibility\n choppedLink = legacy_check(link)\n # dont bother if we are none.\n if link == None:\n return link\n\n vid_url_params = choppedLink[3].split(\"&\")\n # Search the id in the list of elements of the url\n vid = search_video_id(vid_url_params)\n\n # And dont forget the links with hashtags #\n vid = vid.split(\"#\")[0]\n\n return vid # change this var names TODO", "def get_playlist_id_from_url(url):\n return match1(url, r'youku\\.com/playlist_show/id_([a-zA-Z0-9=]+)')", "def generate_media_source_id(domain: str, identifier: str) -> str:\n uri = f\"{URI_SCHEME}{domain or ''}\"\n if identifier:\n uri += f\"/{identifier}\"\n return uri", "def find_player_id(url):\r\n response = requests.get(url)\r\n result = PLAYER_ID_PATTERN.search(response.text)\r\n return result.group(1)", "def get_id_attribution(link = None):\n log.debug(\"attribution link: \" + repr(link))\n choppedLink = legacy_check(link)\n id = None\n try:\n # First try to get the relevant part, that is encoded\n step1 = choppedLink[3][choppedLink[3].find(\"watch\"):]\n # Then stplit the other encoded params\n step2 = step1[12:].split(\"%\")\n # and get the good part\n step3 = step2[0]\n id = step3 # choppedLink[3][choppedLink[3].find(\"watch\"):][12:].split(\"%\")[0]\n except Exception as e:\n raise e # dont care 'bout issues here. all will be NotImplementedError \n\n # If we havent found a match, then this is not implemented.\n if id == \"\":\n raise Exception(\"no recognised kind of link\")\n\n return id", "def parse_url_discl_id(cls, url):\n url_query = urlparse(url)[4]\n try:\n return parse_qs(url_query).get('Discl_id', None)[-1]\n except IndexError as e:\n print(e)\n return \"\"", "def find_id(href):\n ID = idRE.search(href)\n if ID:\n return ID.group(1)", "def get_id(html):\n\ttry:\n\t\tsong_id = re.findall('soundcloud://sounds:(.*?)\"', html)[0]\n\t\treturn song_id\n\texcept IndexError:\n\t\tprint(\"\\033[91m✘ Could not find song ID\\033[0m\")\n\t\tsys.exit()" ]
[ "0.65878916", "0.6518125", "0.6397448", "0.6382529", "0.61623603", "0.61505437", "0.6048567", "0.60049254", "0.60015476", "0.5999453", "0.5963927", "0.5951866", "0.59363306", "0.59259576", "0.59230775", "0.5900245", "0.5842724", "0.5811348", "0.57866263", "0.5779499", "0.57722807", "0.5754538", "0.575169", "0.5737208", "0.57291096", "0.5720975", "0.5688694", "0.56549746", "0.5641877", "0.5641138" ]
0.79839915
0
Takes a full media url from Bandwidth and extracts the filename
def get_media_filename(media_url): return media_url.split("/")[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_file_path(self, url):\n try:\n row = ET.fromstring(self._session.get(url, headers={\"Access-Token\":self._token}).text)[1][2][1]\n data = [row[1].text, row[1].text, row[2].text]\n if \" - S\" in data[0]:\n data[0] = data[0][0:data[1].rfind(\" - S\")]\n elif \" (\" in data[0]:\n data[0] = data[0][0:data[1].rfind(\" (\")]\n return data\n except Exception as e:\n exception_type = type(e).__name__\n print(\"Unable to get media name.\")\n print(exception_type)\n print(e)\n return None", "def get_content_name(self, content_url):\n endpoint = content_url.split('/')[-1]\n return re.match(r'(.+\\.(?:jpg|mp4))', endpoint).group(0)", "def filename_from(url):\n filename = url.split('/')[-1]\n return filename", "def get_media_id(media_url):\n split_url = media_url.split(\"/\")\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/file.png\n if split_url[-2] == \"media\":\n return split_url[-1]\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/abc/0/file.png\n else:\n #This is required for now due to the SDK parsing out the `/`s\n return \"%2F\".join(split_url[-3:])", "def filename(self,imgurl):\n if imgurl.find('/'):\n return imgurl.rsplit('/', 1)[1]", "def _get_filename_from_url(self) -> Optional[str]:\n file_name_portion = None\n\n right_portion = self.url.rsplit(\"/\", 1)\n if len(right_portion) == 2:\n # split any potential query params - these start with \"?\"\"\n file_name_portion = right_portion[1].split(\"?\")[0].strip()\n\n if len(file_name_portion) == 0:\n file_name_portion = None\n\n return file_name_portion", "def file_path(self, request, response=None, info=None):\n url = request.url\n media_guid = hashlib.sha1(to_bytes(url)).hexdigest()\n media_ext = os.path.splitext(url)[1]\n if not media_ext.isalnum():\n media_ext = os.path.splitext(urlparse(url).path)[1]\n return \"full/%s%s\" % (media_guid, media_ext)", "def _filename_from_url(url):\n file_name = url.split(\"/\")[-1]\n return file_name", "def url_file_name(url):\r\n return url[url.rfind('/') + 1:]", "def _get_file_name(url: str) -> str:\n url = url.strip('/')\n result = findall(r'/(\\w+\\.\\w+)[?|$]', url)\n if result:\n return result[-1]\n return url.split('/')[-1]", "def get_file_name(url: str):\n filename = os.path.basename(url)\n fname, extension = os.path.splitext(filename)\n if extension:\n if \"=\" in filename:\n return filename.split(\"=\")[-1]\n return filename\n header = requests.head(url).headers\n if \"Location\" in header:\n return os.path.basename(header[\"Location\"])\n return filename", "def get_filename(target_dir, filename_prefix):\n # this whole function is not the nicest thing, but isolating it makes\n # things clearer , a good refactoring would be to get\n # the info from the video_url or the current output, to avoid the\n # iteration from the current dir\n filenames = os.listdir(target_dir)\n subs_filename = filename_prefix\n for name in filenames: # Find the filename of the downloaded video\n if name.startswith(filename_prefix):\n (basename, ext) = os.path.splitext(name)\n return basename", "def get_filename(target_dir, filename_prefix):\n # This whole function is not the nicest thing, but isolating it makes\n # things clearer. A good refactoring would be to get the info from the\n # video_url or the current output, to avoid the iteration from the\n # current dir.\n filenames = os.listdir(target_dir)\n for name in filenames: # Find the filename of the downloaded video\n if name.startswith(filename_prefix):\n (basename, ext) = os.path.splitext(name)\n return basename\n return None", "def get_filename(self) -> str:\n fname = self.url.split(\"/\")[-1]\n if \",\" in fname:\n _fname, _i = fname.split(\",\")\n _split_fname = _fname.split(\".\")\n _name = _split_fname[0]\n _extension = _split_fname[-1]\n return _name + _i + \".\" + _extension\n else:\n return fname", "def get_filename(link):\r\n return link[link.rfind(\"/\") + 1:]", "def get_filename_from_url(url: str) -> str:\n return os.path.basename(urllib.parse.urlparse(urllib.parse.unquote_plus(url)).path)", "def url_filename(url):\n return os.path.basename(urlparse.urlparse(url).path)", "def get_filename(url: str) ->str:\n if 'drive.google.com' in url:\n return _extract_google_drive_file_id(url)\n url, filename = os.path.split(url)\n return filename or os.path.basename(url)", "def extractParticular(link):\n webpage = openWebsite(link).read()\n nameIndexStart = webpage.index('<title>') + 7\n nameIndexStop = webpage[nameIndexStart:].index('</title>') + nameIndexStart - 1\n name = webpage[nameIndexStart : nameIndexStop].split('-')[0]\n name = \" \".join(name.split())\n name = re.sub('/', '', name)\n\n avatarName = RESTAURANTPATH + '{}.png'.format(\"\".join(name.split()).lower())\n captureImage(link, avatarName)\n\n return name, avatarName", "def get_urifilename(uri):\n up=urlparse.urlparse(uri)\n return split(up[2],\"/\")[-1]", "def parse_filename(url):\n # extract the URL path\n url_path = urlparse.urlparse(url).path\n filename = url_path.split('/')[-1]\n\n # make loose assumption the file name is for an HTML page\n if len(filename) < 1:\n filename = 'index.html'\n\n return filename", "def get_url_filename(url, headers=None, strip=[]):\n filename = get_url_disposition_filename(url, headers)\n if filename:\n return filename\n return get_url_straight_filename(url, strip=[])", "def get_track_filename(self, url = None):\n track_file = urllib.urlopen(url)\n headers = track_file.info()\n track_file.close()\n return wget.filename_from_headers(headers)", "def extract_filename(str):\n regex = r\"([0-9_-]+).jpg\"\n matches = re.search(regex, str)\n if matches:\n return matches.group(1)", "def parse_url(url):\n url_parts = url.split('/')\n webcam_name = url_parts[-3] + 'CAM' + url_parts[-2]\n file_ext = url[-5:-1]\n last_update = 0.\n return {\n 'url': url[:-1], # Skip end of line\n 'name': webcam_name,\n 'imgpath': os.path.join(WEBCAM_DIR, webcam_name, '%d' + file_ext),\n 'last_update': last_update\n }", "def fileId_from_url(url):\r\n raw_fileId = re.findall(\"~[0-z.]+/[0-9]+\", url)[0][1: ]\r\n return raw_fileId.replace('/', ':')", "def fileId_from_url(url):\r\n raw_fileId = re.findall(\"~[A-z.]+/[0-9]+\", url)[0][1:]\r\n return raw_fileId.replace('/', ':')", "def prepare_media(self, object):\n if object.media is not None:\n #return object.media.media_file.name\n return '/api/v1/media/{0}/'.format(object.media.id)\n else:\n return ''", "def isolate_path_filename(self, uri, api_base=None):\n # Did we get an api_base\n api_base = api_base if api_base else self.api_base\n\n # Look for the part after the api_base\n url_parse = uri.lower().rpartition(api_base)\n\n # Take everything to the right of the api_base\n file_component = url_parse[2]\n\n # Remove any URL ? parameters\n if '?' in file_component:\n file_component = file_component.rpartition('?')[0]\n\n #: Remove URL encoding\n file_component = unquote(file_component)\n\n #: Remove any spaces in the filename\n file_component = file_component.replace(' ','')\n\n return file_component", "def best_filename(link, response):\n content_type = response.info().get('content-type', '')\n filename = link.filename # fallback\n # Have a look at the Content-Disposition header for a better guess:\n content_disposition = response.info().get('content-disposition')\n if content_disposition:\n type, params = cgi.parse_header(content_disposition)\n # We use ``or`` here because we don't want to use an \"empty\" value\n # from the filename param:\n filename = params.get('filename') or filename\n ext = splitext(filename)[1]\n if not ext:\n ext = mimetypes.guess_extension(content_type)\n if ext:\n filename += ext\n if not ext and link.url != response.geturl():\n ext = splitext(response.geturl())[1]\n if ext:\n filename += ext\n return filename" ]
[ "0.7303174", "0.70052874", "0.68807364", "0.67521065", "0.67236215", "0.66535795", "0.661498", "0.6603966", "0.6586546", "0.6538387", "0.6509765", "0.64932483", "0.64535576", "0.6430052", "0.638914", "0.63559645", "0.6279012", "0.62202466", "0.6168988", "0.61595553", "0.6069832", "0.60655814", "0.60260266", "0.6017655", "0.6014971", "0.6010148", "0.6006867", "0.59884715", "0.5948225", "0.5939837" ]
0.7981755
0
Takes a list of media urls and downloads the media into the temporary storage
def download_media_from_bandwidth(media_urls): downloaded_media_files = [] for media_url in media_urls: media_id = get_media_id(media_url) filename = get_media_filename(media_url) with open(filename, "wb") as f: try: downloaded_media = messaging_client.get_media(MESSAGING_ACCOUNT_ID, media_id) f.write(downloaded_media.body) except Exception as e: print(e) downloaded_media_files.append(filename) return downloaded_media_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')", "def _download_file(self, video_objects):\n downloaded_video = []\n path=\"media/\"\n for video_object in video_objects:\n if 'contentUrl' in video_object.keys() and video_object['contentUrl']!='':\n \n url = video_object['contentUrl']\n filename = url.split('/')[-1]\n r = requests.get(url, stream=True)\n \n with open(filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk:\n f.write(chunk)\n\n path+=filename\n return path", "def downloader(urls, path):\n counter = 1\n for media_file in urls:\n # Create the file name\n file_name = \"meme\" + str(counter) + \".jpg\"\n file_location = path + \"/\" + file_name\n print(f\"Downloading {media_file} as {file_name}.\")\n # Overwrite files\n if os.path.exists(file_location):\n os.remove(file_location)\n print(f\"{file_name} will overwrite an existing file of the same name.\")\n wget.download(media_file, out=file_location)\n print(\"\\n\")\n counter += 1\n print(f\"{counter - 1} items were downloaded.\")\n return counter - 1", "def download(urls, dest_folder):\n pass", "def fetch_files_from_urls(urls, dir):\n makedir(dir)\n try:\n pool = []\n for url in urls:\n p = Process(target=download, args=(url, dir,))\n p.start()\n pool.append(p)\n for p in pool:\n p.join()\n except KeyboardInterrupt:\n print \"Shutdown requested...exiting\"\n # except Exception:\n # traceback.print_exc(file=sys.stdout)\n\n # print(\"removing temporary files from current directory\")\n map(os.remove, glob.glob(\"*.tmp\"))", "def download_songs(**kwargs):\n for url in kwargs[\"songs\"][\"urls\"]:\n log.debug(\"Downloading to %s\", url[\"save_path\"])\n reference_file = DOWNLOAD_LIST\n track_db = write_tracks(reference_file, kwargs[\"songs\"])\n os.rename(reference_file, kwargs[\"output_dir\"] + \"/\" + reference_file)\n reference_file = str(kwargs[\"output_dir\"]) + \"/\" + reference_file\n kwargs[\"reference_file\"] = reference_file\n kwargs[\"track_db\"] = track_db\n if kwargs[\"multi_core\"] > 1:\n multicore_find_and_download_songs(kwargs)\n else:\n find_and_download_songs(kwargs)\n os.remove(reference_file)", "def downloadMinio(url_list,list_d):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n name = \"-\".join(parser_arguments().classes)\n name = name.lower()\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n\n\n if r.status_code == 200:\n r.raw.decode_content = True\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n metadata = list_d[i]\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n path = os.getcwd()+'/'+filename # image path\n minioClient.fput_object(name,filename,path,'image/jpg',metadata)\n os.remove(filename)\n print(filename,'have been successfuly uploaded')\n print('Done!')", "def download_cdn_videos(filenames,sub_urls,handout_urls,video_urls, target_dir):\n \"\"\" using a simple file downloader \"\"\"\n for i, v in enumerate(video_urls):\n filename_prefix = str(i+1).zfill(2) + '-'\n #original_filename = v.rsplit('/', 1)[1]\n video_filename = filename_prefix + filenames[i] + '.mp4'\n sub_filename = filename_prefix + filenames[i] + '.srt'\n handout_filename = filename_prefix + filenames[i] + '.srt'\n video_path = os.path.join(target_dir, video_filename)\n sub_path = os.path.join(target_dir, sub_filename)\n handout_path = os.path.join(target_dir, handout_filename)\n #print('[debug] GET %s' % v)\n print('[download] Destination: %s' % video_path)\n v = quote(v,safe=\":/\")\n if len(v) != YOUTUBE_VIDEO_ID_LENGTH:\n req = Request(v) \n try:\n video = urlopen(v)\n fileSize = int(video.headers['content-length'])\n finish = False\n existSize = 0\n if os.path.exists(video_path):\n output = open(video_path,\"ab\")\n existSize = os.path.getsize(video_path)\n #If the file exists, then only download the remainder\n if existSize < fileSize:\n #print(\"[debug] bytes range is: %s-%s\" % (existSize,fileSize))\n req.headers[\"Range\"]= \"bytes=%s-%s\" % (existSize,fileSize)\n video = urlopen(req)\n else:\n finish = True\n else:\n output = open(video_path,\"wb\")\n if finish == False:\n file_size_dl = existSize\n block_sz = 262144\n while True:\n buffer = video.read(block_sz)\n if not buffer:\n break\n \n file_size_dl += len(buffer)\n output.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / fileSize)\n status = status + chr(8)*(len(status)+1)\n sys.stdout.write(status)\n sys.stdout.flush()\n \n output.close()\n\n except URLError as e:\n print(\"[warning]error: %r when downloading %s\" % (e.reason,v) )\n\n else:\n download_youtube_video(v,video_path)\n \n if sub_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(sub_path):\n subs_string = edx_get_subtitle(sub_urls[i], headers)\n if subs_string:\n print('[info] Writing edX subtitles: %s' % sub_path)\n open(os.path.join(os.getcwd(), sub_path),\n 'wb+').write(subs_string.encode('utf-8'))\n\n if handout_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(handout_path):\n handout_content = urlopen(BASE_URL+handout_urls[i]).read()\n if handout_content:\n print('[info] Writing handout: %s' % handout_path)\n open(os.path.join(os.getcwd(), handout_path),\n 'wb+').write(handout_content)\n #srtfile = urlopen(BASE_URL+sub_urls[i])\n #output = open(srt_path,'wb')\n #output.write(srtfile.read())\n #output.close()", "def download_pics(pics_links):\n\n for link in range(len(pics_links)):\n r = requests.get(pics_links[link][0])\n with open(os.path.join(\"tmp\", f\"{link}.jpg\"), \"wb\") as dl:\n dl.write(r.content)", "def url_media(self, csvlinks=\"\", csvset=\"\", urldir=\"\", medialog_file=\"\",\n directory=\"\", ignore_twitter_link=True, mediatype=\"vi\",\n site_sources=[], name_scraping=\"\", video_timelimit=1000,\n image_timelimit=60):\n\n if csvlinks == \"\":\n csvlinks = CSVLINKS\n if csvset == \"\":\n csvset = CSVSETURL\n if medialog_file == \"\":\n medialog_file = MEDIALOG\n if directory == \"\":\n directory = self.directory\n\n if urldir == \"\" and name_scraping == \"\":\n urldir = URLDIR\n name_scraping = urldir.lower()\n elif name_scraping == \"\":\n name_scraping = urldir.lower()\n elif urldir == \"\":\n urldir = name_scraping\n\n if urldir[-1] != \"/\":\n urldir = urldir + \"/\"\n if name_scraping[-1] == \"/\":\n name_scraping = name_scraping[:-1]\n\n mediatype = str(mediatype).lower()\n if mediatype not in (\"v\", \"i\", \"vi\", \"iv\"):\n mediatype = \"vi\"\n\n root_dir = os.getcwd()\n\n if directory != \"\":\n os.chdir(directory)\n directory = os.getcwd()\n else:\n directory = root_dir\n\n setUrls = CSVUtils.csv_to_dict(csvset, 1, 0)\n\n if urldir[-1] == '/':\n urldir = urldir[:-1]\n OSUtils.createDir(urldir)\n\n seq = \"\"\n\n # get next sequence number\n if os.path.isfile(medialog_file):\n seq = JSONUtils.read_keyval_json(\"next_\"+name_scraping+\"Seq\",\n medialog_file)\n\n # if the parameter does not exist, get the seq from the\n if seq == \"\":\n seq = max([int(d) for d in os.listdir(urldir)] + [0]) + 1\n\n try:\n seqdir = os.path.realpath(urldir + \"/\" + str(seq))\n\n # implemented in order to give a feedback about progresss %\n total_row = sum(1 for row in CSVUtils.csvGenerator(csvlinks))\n row_count = 0\n\n # iterate through each link\n for line in CSVUtils.csvGenerator(csvlinks):\n row_count += 1\n\n if \"https://twitter.com\" in line[0] and ignore_twitter_link:\n continue\n\n url = self.__expandURL(line[0])\n\n if len(site_sources) > 0:\n if len([site for site in site_sources if site in url]\n ) == 0:\n continue\n\n if url not in setUrls.keys():\n\n print('\\x1b[6;30;42m' + \"Starting Scrapping for Link \"\n + str(url) + \" (\" + str(seq) + \")\" + '\\x1b[0m')\n\n os.mkdir(seqdir)\n os.chdir(seqdir)\n\n if \"v\" in mediatype:\n try:\n # in order to avoid stalls in lives\n signal.signal(signal.SIGALRM,\n OSUtils.handler_timeout)\n signal.alarm(video_timelimit)\n\n youtube_dl.YoutubeDL({}).download([url])\n except KeyboardInterrupt:\n raise\n except Exception as e:\n print(e)\n finally:\n signal.alarm(0)\n\n if \"i\" in mediatype:\n for im in self.__urlImageGenerator(url):\n try:\n signal.signal(signal.SIGALRM,\n OSUtils.handler_timeout)\n signal.alarm(image_timelimit)\n\n if \"base64,\" in im:\n continue\n\n lo = Text.lastocc(im, \"/\")+1\n\n if lo < len(im)-1:\n output = im[Text.lastocc(im, \"/\")+1:]\n else:\n output = im[\n Text.lastocc(im[:-1], \"/\")+1:-1]\n\n if output == \"\" or len(output) > 80:\n output = random.randint(1, 10000000000000)\n\n self.__request_download(link=im,\n output=str(output))\n except requests.exceptions.ConnectionError as e:\n print(e)\n continue\n except requests.exceptions.InvalidSchema as e:\n print(e)\n continue\n except Exception as e:\n print(e)\n finally:\n signal.alarm(0)\n\n os.chdir(directory)\n\n setUrls[url] = seq\n\n CSVUtils.write_line_b_csv(csvfile=csvset, line=[seq, url])\n\n print('\\x1b[6;30;42m' + \"Scrap Finished for Link \"\n + str(url) + \" (\"\n + str(round(row_count*100/total_row, 4)) + \"%)\"\n + '\\x1b[0m')\n\n seq += 1\n seqdir = os.path.realpath(urldir + \"/\" + str(seq))\n\n os.chdir(root_dir)\n\n except KeyboardInterrupt:\n print(\"Stopping...\")\n\n JSONUtils.add_keyval_json(\"next_\"+name_scraping+\"Seq\", seq,\n medialog_file)\n\n os.chdir(root_dir)\n\n shutil.rmtree(seqdir)\n except Exception as e:\n JSONUtils.add_keyval_json(\"next_\"+name_scraping+\"Seq\", seq,\n medialog_file)\n\n os.chdir(root_dir)\n\n shutil.rmtree(seqdir)\n print(e)\n raise", "def download_images(links):\n\n for link in links:\n print(\"Processing\", link)\n try:\n response = requests.get(link,\n timeout=METADATA_REQUEST_TIMEOUT, stream=True)\n except requests.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n artist_name = link.rsplit('/', 2)[1]\n image_name = link.rsplit('/', 2)[2]\n image_name = artist_name + image_name\n\n file_location = ASSET_PATH.joinpath(image_name)\n\n with open(str(file_location), 'wb') as outfile:\n shutil.copyfileobj(response.raw, outfile)", "def download_photos(urls, folder=''):\n folder_path = os.path.join('photos', folder)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n for url in urls:\n image = requests.get(url)\n filename = os.path.join(folder_path, url.split('/')[-1])\n with open(filename, 'wb') as f:\n f.write(image.content)", "def download(urls: List[str], num_threads: int = 40) -> List[str]:\n\n num_files = len(urls)\n start = perf_counter()\n\n print(\"Starting download of %s files . . .\" % num_files)\n\n results = multiprocess(urls, Downloader, num_threads=num_threads)\n\n dur = perf_counter() - start\n print(\"Completed download of %s files after %.3f seconds.\" % (num_files, dur))\n\n return results", "def get_media_files(tweets, today, hour, output_folder):\n media_file = \"\"\n tweet_id = \"\"\n create_picture_folder(output_folder)\n\n for tweet in tweets:\n if tweet.get('delete') != None:\n continue\n if not tweet['retweeted'] and 'RT @' not in tweet['text'] and not tweet['in_reply_to_status_id']:\n media = tweet.get('entities').get('media', [])\n if len(media) > 0:\n # media_files.append(media[0]['media_url'])\n media_file += media[0]['media_url']\n # tweet_ids.append(tweet['id'])\n tweet_id += tweet['id_str']\n return media_file, tweet_id", "def download_urls(urls, path):\n count = 0\n if urls:\n for url in urls:\n try:\n res = requests.get(url, verify=False, stream=True)\n rawdata = res.raw.read()\n with open(os.path.join(path, 'img_' + str(count) + '.jpg'), 'wb') as f:\n f.write(rawdata)\n count += 1\n except Exception as e:\n print('Failed to write rawdata.')\n print(e)", "def download_list(urls, outdir=None, workdir=None, threads=3):\n pool = ThreadPool(threads)\n download_lambda = lambda x: download(x, outfile=outdir, workdir=workdir)\n pool.map(download_lambda, urls)", "def download(self, output_dir=None, chunk_size=1024):\n def download_content(content_link, output_dir):\n \"\"\"Download the content of a media and save it in a existing\n directory.\n\n Args:\n content_link (str):\n output_dir (str):\n Returns:\n dict: local version of the media object\n \"\"\"\n if content_link is None: return None\n res = requests.get(content_link, stream=True)\n try:\n res.raise_for_status()\n except requests.exceptions.HTTPError:\n return None\n img_name, img_format = parse_image_url(res.url)\n filepath = '{}/{}.{}'.format(output_dir, img_name, img_format)\n\n with open(filepath, mode='wb') as image_file:\n for chunk in res.iter_content(chunk_size=chunk_size):\n image_file.write(chunk)\n\n return abspath(filepath)\n\n output_dir = output_dir or getcwd()\n\n media_links = dict(\n image=[],\n video=[]\n )\n if self['media'] and self['media']['image']:\n downloaded_images = [\n download_content(item, output_dir) for item in self['media']['image']\n ]\n media_links['image'].extend(list(filter(None, downloaded_images)))\n if self['media'] and self['media']['video']:\n downloaded_videos = [\n {\n 'url': download_content(item['url'], output_dir),\n 'thumbnail': download_content(item['thumbnail'], output_dir)\n } for item in self['media']['video']\n ]\n media_links['video'].extend(\n filter(lambda x: x['url'] and x['thumbnail'], downloaded_videos)\n )\n\n return media_links", "def download_files(file_uris):\n\n if os.path.exists(LOG_FILE):\n log_file = open(LOG_FILE, \"rU+\")\n downloaded_podcasts = strip_newlines(log_file)\n else:\n log_file = open(LOG_FILE,\"w\")\n downloaded_podcasts = []\n\n for uri in file_uris:\n # if the current file URI is not found in the log, it is a new file, and\n # is thus downloaded\n if uri not in downloaded_podcasts:\n # extract filename from the URI \n uri_split = re.split(\"/\", uri)\n filename = uri_split[len(uri_split) - 1]\n \n # download the file\n if OUTPUT:\n print \"downloading \" + uri\n urllib.urlretrieve(uri, DEST_DIR + os.sep + filename)\n log_file.write(uri + os.linesep)\n\n log_file.close()", "def download_files(urls, folder): \n\n if not urls: \n return None\n if not folder: \n return None\n \n folder_path = Path(folder)\n if not folder_path.exists():\n os.makedirs(folder_path)", "async def save_url_images(images):\n for source, image in images:\n name = source.split('/')[-1]\n async with aiofiles.open(f'{OUTPUT_FOLDER}/{name}', 'wb') as f:\n await f.write(image)", "def download_files(self):", "def download_image(urls):\r\n image_paths = []\r\n\r\n base_url = \"https://classifieds.castanet.net\"\r\n image_directory = os.path.join('C:\\\\', 'users', 'ccholon', 'my documents', 'castanet images')\r\n\r\n for url in urls:\r\n listing_url = base_url + url\r\n image_page = requests.get(listing_url)\r\n image_soup = BeautifulSoup(image_page.text, 'html.parser')\r\n\r\n # find the URL for the listing image\r\n image_element = image_soup.find(name='div', class_='image_container')\r\n image_element = image_element.find(name='img')\r\n image_url = image_element.get('src')\r\n\r\n # download the image\r\n #image = requests.get(image_url, stream=True)\r\n\r\n # save to local directory\r\n #image_file = open(os.path.join(image_directory, os.path.basename(image_url)), 'wb')\r\n #for bytes in image.iter_content(100000):\r\n #image_file.write(bytes)\r\n #image_file.close()\r\n\r\n image_paths.append(os.path.join(image_directory, os.path.basename(image_url)))\r\n\r\n return image_paths", "def multi_download(self, url_list):\n workers = 4\n with ThreadPoolExecutor(workers) as ex:\n urls = [url_list[x] for x in range(len(url_list))]\n self.filenames = [str(y)+\".txt\" for y in range(len(url_list))]\n ex.map(self.download, urls, self.filenames)\n return self.filenames", "def download_remote_files(output_dir, files):\n logging.debug(f\"Try to download files: {files}\")\n\n # Create list of remote and local files\n base_url = \"https://storage.googleapis.com/\"\n urls = [base_url+file for file in files]\n local_files = [output_dir + file.split(\"/\")[-1] for file in files]\n\n\n async def get(session, url, local_f):\n if os.path.isfile(local_f):\n logging.info(\"Raw file {} exists locally\".format(local_f))\n pass\n else:\n # Download file\n async with session.get(url=url) as response:\n if response.status == 200:\n resp = await response.read()\n with open(local_f, \"wb\") as outfile:\n outfile.write(resp)\n\n\n async def main(urls, local_files):\n conn = aiohttp.TCPConnector(limit=30)\n timeout = aiohttp.ClientTimeout(total=None, connect=None, sock_connect=30, sock_read=10)\n async with aiohttp.ClientSession(connector=conn, timeout=timeout) as session:\n _ = await asyncio.gather(*[get(session, urls[f], local_files[f]) for f in range(len(urls))])\n\n asyncio.run(main(urls, local_files))\n return local_files", "def download_attachments(output_path, urls):\r\n locations = []\r\n for url in urls:\r\n path = urlparse(url).path\r\n #teardown path and rebuild to negate any errors with\r\n #os.path.join and leading /'s\r\n path = path.split('/')\r\n filename = path.pop(-1)\r\n localpath = ''\r\n for item in path:\r\n localpath = os.path.join(localpath, item)\r\n full_path = os.path.join(output_path, localpath)\r\n if not os.path.exists(full_path):\r\n os.makedirs(full_path)\r\n print('downloading {}'.format(filename))\r\n try:\r\n urlretrieve(url, os.path.join(full_path, filename))\r\n locations.append(os.path.join(localpath, filename))\r\n except URLError as e:\r\n error = (\"No file could be downloaded from {}; Error {}\"\r\n .format(url, e))\r\n logger.warning(error)\r\n except IOError as e: #Python 2.7 throws an IOError rather Than URLError\r\n # For japanese, the error might look kind of like this:\r\n # e = IOError( 'socket error', socket.error(111, u'\\u63a5\\u7d9a\\u3092\\u62d2\\u5426\\u3055\\u308c\\u307e\\u3057\\u305f') )\r\n # and not be suitable to use in \"{}\".format(e) , raising UnicodeDecodeError\r\n # (This is at least the case on my Fedora running Python 2.7.5 \r\n # (default, Feb 19 2014, 13:47:28) [GCC 4.8.2 20131212 (Red Hat 4.8.2-7)] on linux2\r\n try:\r\n error = (\"No file could be downloaded from {}; Error {}\"\r\n .format(url, e))\r\n except UnicodeDecodeError:\r\n # For lack of a better log message because we could not decode e, let's use repr(e)\r\n error = (\"No file could be downloaded from {}; Error {}\"\r\n .format(url, repr(e)))\r\n logger.warning(error)\r\n return locations", "def get_media(api, num_tweets=25, profile=\"@hakeemangulu\", admin=False):\n # Store the media urls in a list\n media_files = []\n\n # Create cursor object for the timeline\n if admin:\n # If the admin is using the application, return his timeline\n tl = tweepy.Cursor(api.home_timeline).items(num_tweets)\n else:\n # If the admin is not using the application, return the specified\n # user's timeline\n tl = tweepy.Cursor(api.user_timeline, screen_name=profile).items(num_tweets)\n\n # Iterate through the timeline and extract images\n for status in tl:\n # Get all media from a tweet\n media = status.entities.get('media', [])\n # Add non-empty media to the set\n for image in media:\n # Only add the image if it is a photo or GIF (as opposed to a\n # video)\n if image['type'] == 'photo' or image['type'] == 'animated_gif':\n media_files.append(image['media_url'])\n return media_files", "def download_feed_item(feed_item, base_directory):\n join_path = partial(os.path.join, base_directory)\n\n base_filename = base_filename_for_feed_item(feed_item)\n\n json_filename = join_path(\"{}.json\".format(base_filename))\n\n if os.path.exists(json_filename):\n # Stop here, we already have this video.\n return\n\n content = highest_quality_content(\n download_info_for_feed_item(feed_item)\n )\n\n video_content = (\n content[0]\n if isinstance(content, tuple) else\n content\n )\n\n assert video_content.media_type.has_video\n\n video_filename = join_path(\"{}.{}\".format(\n base_filename, video_content.media_type.file_type\n ))\n\n if os.path.exists(video_filename):\n # Delete the video file if it's there already.\n os.remove(video_filename)\n\n if isinstance(content, tuple):\n # Download video and audio at the same time.\n que = Queue()\n exception_queue = Queue()\n\n def download_in_queue():\n try:\n download_to_file(*que.get())\n except Exception as ex:\n exception_queue.put(ex)\n\n # TODO: It would be nice to be able to terminate the other\n # thread here.\n\n if isinstance(ex, (KeyboardInterrupt, SystemExit)):\n # Re-raise interrupts so cleanup code works.\n raise ex\n finally:\n que.task_done()\n\n temp_video_filename = tempfile.mkstemp(prefix= base_filename)[1]\n temp_audio_filename = tempfile.mkstemp(prefix= base_filename)[1]\n\n try:\n que.put((content[0].url, temp_video_filename))\n que.put((content[1].url, temp_audio_filename))\n\n for i in range(2):\n Thread(target= download_in_queue).start()\n\n que.join()\n\n if not exception_queue.empty():\n raise exception_queue.get()\n\n # Now use ffmpeg to join the audio and video content together.\n subprocess.check_call((\n \"ffmpeg\",\n \"-i\", temp_video_filename,\n \"-i\", temp_audio_filename,\n \"-c\", \"copy\", os.path.abspath(video_filename)\n ))\n finally:\n # Clean up temporary files.\n os.remove(temp_video_filename)\n os.remove(temp_audio_filename)\n else:\n # Download one audio-video file.\n download_to_file(video_content.url, video_filename)\n\n # Now write the JSOn file with the metadata.\n with open(json_filename, \"w\") as out_file:\n json.dump({\n \"version\": JSON_FORMAT_VERSION,\n \"content\": (\n [content[0].to_json(), content[1].to_json()]\n if isinstance(content, tuple) else\n [content.to_json()]\n ),\n \"feed_item\": feed_item.to_json(),\n }, out_file)\n\n return (video_filename, json_filename)", "def content_media_urls(*paths):\n from mezzanine.conf import settings\n media_url = settings.CONTENT_MEDIA_URL.strip(\"/\")\n return [\"/%s/%s\" % (media_url, path) for path in paths]", "def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')", "def downloadDatasets(datasets: Iterable) -> Generator[tuple, None, None]:\n\n for ds in datasets:\n with urllib.request.urlopen(ds) as response:\n\n with tempfile.NamedTemporaryFile(delete=False) as tmp_file:\n shutil.copyfileobj(response, tmp_file)\n\n yield (response.url, tmp_file.name)" ]
[ "0.66829646", "0.6647512", "0.6639991", "0.6638432", "0.66363764", "0.6452788", "0.6394921", "0.63525635", "0.6312523", "0.6243412", "0.6240311", "0.61974305", "0.6190884", "0.61289537", "0.6114882", "0.6058402", "0.6052626", "0.6050897", "0.59977293", "0.5993598", "0.5966962", "0.59464955", "0.5936169", "0.59230244", "0.5921858", "0.5857853", "0.57877237", "0.57627517", "0.5756762", "0.5733441" ]
0.75554264
0
Takes a list of media files and uploads them to Bandwidth The media file names are used as the media id
def upload_media_to_bandwidth(media_files): for filename in media_files: with open(filename, "rb") as f: file_content = f.read() try: ##Note: The filename is doubling as the media id## response = messaging_client.upload_media(MESSAGING_ACCOUNT_ID, filename, str(len(file_content)), body=file_content) except Exception as e: print(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_files(self, files):\n\n for f in files:\n self.scp.put(f, recursive=True)", "def upload(self, folder_list, files):\n current_folder_id = self.top_folder_id\n for fname in folder_list:\n current_folder_id = self._fetch_or_create_folder(fname, current_folder_id)\n for file in files:\n self._upload_detail(file, current_folder_id)", "def copy_media(items, dest):\n for file in items:\n filename = os.path.basename(file)\n copyfile(file, dest + '\\\\' + filename)", "def download_media_from_bandwidth(media_urls):\n downloaded_media_files = []\n for media_url in media_urls:\n media_id = get_media_id(media_url)\n filename = get_media_filename(media_url)\n with open(filename, \"wb\") as f:\n try:\n downloaded_media = messaging_client.get_media(MESSAGING_ACCOUNT_ID, media_id)\n f.write(downloaded_media.body)\n except Exception as e:\n print(e)\n downloaded_media_files.append(filename)\n return downloaded_media_files", "def media_file_upload(request, manifest_id):\n manifest = get_object_or_404(Manifest, id=manifest_id)\n\n manifest_files = MediaFile.objects.filter(manifest=manifest)\n total_files_count = manifest_files.count()\n files_needing_upload = manifest_files.filter(file='')\n files_needing_upload_count = files_needing_upload.count()\n\n file_to_upload = files_needing_upload.first()\n\n # If no files left to upload, mark the manifest complete and move on\n if files_needing_upload_count < 1:\n Manifest.objects.filter(id=manifest.id).update(all_media_present=True)\n return HttpResponseRedirect(reverse('manifest-view', args=(manifest.id,)))\n\n form = MediaFileForm(request.POST or None, request.FILES or None, instance=file_to_upload)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('file-upload', args=(manifest.id,))) # Refresh view\n\n return render(request, 'file_manager/file_upload.html', {\n 'form': form,\n 'upload_number': total_files_count - files_needing_upload_count + 1, # Which place in order of upload e.g. 2 of 3\n 'total_files_count': manifest_files.count(),\n 'file_to_upload': file_to_upload,\n })", "def post_wave(cnct):\n files = []\n\n if request.mimetype == 'multipart/form-data':\n for _, file in request.files.items():\n files.append((file.filename, file))\n else:\n files.append(('%s.wav' % uuid4(), request.stream))\n\n response = []\n for (name, fp) in files:\n parser = WaveParser(fp)\n try:\n audio_file = db.AudioFile.FromWaveParser(name, parser)\n cnct.add(audio_file)\n except WaveException as err:\n raise HttpError(406, str(err)) from None\n except Exception as err:\n print(err)\n raise HttpError(500) from None\n\n response.append(audio_file.info)\n\n cnct.commit()\n return {'files': response}", "def move_media(items, dest):\n for file in items:\n filename = os.path.basename(file)\n os.rename(file, dest + '\\\\' + filename)", "def testMediaUpload(self):\n self._testUpload(DefaultStorage(), 'media')\n self._testUpload(StaticStorage(), 'static')", "def upload_all(all_file_names):\n with ThreadPool(processes=int(10)) as pool:\n return pool.map(upload_file, all_file_names)", "def upload_bulk_sms_file(batch_id, file_path):\n batch = Batch.objects.get(id=batch_id)\n batch.add_messages(read_messages_from_file(file_path))\n batch.status = Batch.PENDING\n batch.save()", "async def create_upload_files(background_tasks: BackgroundTasks, files: List[UploadFile] = File(...), db: Session = Depends(get_db)):\n background_tasks.add_task(process_wrist, files)\n return {\"status\": \"success\"}", "async def create_upload_files(files: List[UploadFile] = File(...)):\n\n if len(files) > 3:\n return {\" \": {\"mode\": \"File Limit Exceeded\"}}\n \n filename = \"_temp_files_one/myfilem.wav\"\n res_json = {}\n file_counter = 0\n for upload_file in files:\n \n with open(filename, \"wb\") as file_object:\n \n file_object.write(upload_file.file.read())\n \n res_json[upload_file.filename + str(file_counter)] = predict_many(filename)\n \n os.remove(filename)\n \n return res_json", "def add_files(self,count=None):\n message_buffer =[]\n if count is None:\n count = len(self.files)\n while count:\n count -= 1\n message_buffer.append((count,base64.b64encode(self.files.pop()),0)) # required to maintain compatibility with\n if len(message_buffer) > 9:\n self.queue.write_batch(message_buffer)\n message_buffer = []\n self.queue.write_batch(message_buffer)", "def upload(media, media_data, *, additional_owners=_ELIDE,\n media_category=_ELIDE):\n binding = {'media': media, 'media_data': media_data, 'additional_owners':\n additional_owners, 'media_category': media_category}\n url = 'https://upload.twitter.com/1.1/media/upload.json'\n return _TwitterRequest('POST',\n url,\n 'rest:media',\n 'post-media-upload',\n binding)", "def post_files(self, file_list):\n self.body = None # Disable general body to be sent\n f_list = map(lambda f: (f[0], (pycurl.FORM_FILE, f[1])), file_list)\n self.curl.setopt(pycurl.HTTPPOST, f_list)", "def process_meter_upload(self, configlist):\n switches = [str(t[0]) for t in self.get_switches()]\n for swconfig in configlist: # for each\n dpid = list(swconfig.keys())[0]\n\n if dpid not in switches:\n break\n\n for flow in swconfig[dpid]:\n flow['dpid'] = dpid\n flow['operation'] = 'add'\n result = self.process_meter_message(flow)\n return 'Meters added successfully!'", "def _add_files(self, index_key, media_key,\n new_list, fundamentals):\n _index=fundamentals.get(index_key, {})\n _media=fundamentals.get(media_key, {})\n for _file in new_list:\n _data=self._item_from_index(_file, 'data', _media)\n if not _data:\n self.log('Failed to write file %s due to no data'%_file)\n continue\n if self._item_from_index(_file, None, _index) is None:\n _origin=self._item_from_index(_file, 'origin', _media)\n if _origin=='ringers':\n _path=self.protocolclass.RT_PATH\n elif _origin=='sounds':\n _path=self.protocolclass.SND_PATH\n elif _origin=='images':\n _path=self.protocolclass.PIC_PATH\n else:\n selg.log('File %s has unknown origin, skip!'%_file)\n continue\n _file_name=_path+'/'+_file\n try:\n self.writefile(_file_name, _data)\n except:\n self.log('Failed to write file '+_file_name)\n if __debug__:\n raise", "def handle_inbound_media_mms(to, from_, media):\n downloaded_media_files = download_media_from_bandwidth(media)\n upload_media_to_bandwidth(downloaded_media_files)\n remove_files(downloaded_media_files)\n body = MessageRequest()\n body.application_id = MESSAGING_APPLICATION_ID\n body.to = [from_]\n body.mfrom = to\n body.text = \"Rebound!\"\n #Build the media URL by taking the media ids (that doubled as the file names) and appending them to\n #the bandwidth media base url\n body.media = [BANDWIDTH_MEDIA_BASE_ENDPOINT + media_file for media_file in downloaded_media_files]\n try:\n messaging_client.create_message(MESSAGING_ACCOUNT_ID, body)\n except Exception as e:\n print(e)\n return None", "def upload_file(log_filename_list, index):\n initlog(\"begin to upload files to server!!!!!!!\") \n for filename in log_filename_list:\n ftp_server = '10.10.3.25'\n ftp_port = '21'\n remotepath = '.'\n \n ftp = FTP() \n ftp.set_debuglevel(2)\n ftp.connect(ftp_server, ftp_port)\n ftp.login('', '')\n ftp.cwd(remotepath)\n bufsize = 1024\n \n try:\n file_handler = open(filename, 'rb') \n ftp.storbinary('STOR %s' % (str(index) + '_' + os.path.basename(filename)), file_handler, bufsize)\n ftp.set_debuglevel(0)\n except Exception, e:\n initlog('failed to upload files; %s' % str(e))\n else:\n file_handler.close()\n finally:\n ftp.quit()", "def filter(self):\n for f in FileHelper.ALL_PATHS:\n media_obj = MediaObject(FileHelper.get_url(f), FileHelper.get_title(f), FileHelper.get_media_type(f), FileHelper.get_icon(f), FileHelper.get_duration(f), FileHelper.get_ctype(f))\n _id = media_obj.uuid\n if media_obj.media_type == \"image\":\n DB.IMAGES[_id] = media_obj\n elif media_obj.media_type == \"audio\":\n DB.MUSIC[_id] = media_obj\n elif media_obj.media_type == \"video\":\n DB.VIDEOS[_id] = media_obj\n else:\n print \"File '%s' doesn't play nice.\" % (f)", "def _upload_samples(self, samples):\n # Iterate over the full set of provided samples, uploading them in chunks.\n for offset in range(0, len(samples), self.upload_chunk_size):\n chunk = samples[offset:offset + self.upload_chunk_size]\n self.api.upload_samples(offset, chunk)", "def submitFiles(self):\n formData =__new__(FormData)();\n \"\"\"\n Iteate over any file sent over appending the files\n to the form data.\n \"\"\"\n i=0\n console.log(self.vue.files)\n while i < self.vue.files.length:\n file = self.vue.files[i];\n formData.append('files[' + i + ']', file);\n i+=1\n \"\"\"\n Make the request to the POST /file-drag-drop URL\n \"\"\"\n formData.append(\"type\",\"upload\")\n __pragma__ ('jsiter') \n fetch('/json/plugins/',\n {\n \"method\":\"POST\",\n \"body\":formData,\n })\\\n .then(lambda res:res.json())\\\n .then(self.uploaded)\\\n .catch(lambda e:console.log('FAILURE!!',e));\n __pragma__ ('nojsiter')", "def _multipart_upload(self, credentials, src_file_path, artifact_file_path):\n try:\n headers = self._extract_headers_from_credentials(credentials.headers)\n # try to create the file\n self._retryable_adls_function(\n func=put_adls_file_creation,\n artifact_file_path=artifact_file_path,\n sas_url=credentials.signed_uri,\n headers=headers,\n )\n # next try to append the file\n futures = {}\n file_size = os.path.getsize(src_file_path)\n num_chunks = _compute_num_chunks(src_file_path, _MULTIPART_UPLOAD_CHUNK_SIZE)\n use_single_part_upload = num_chunks == 1\n for index in range(num_chunks):\n start_byte = index * _MULTIPART_UPLOAD_CHUNK_SIZE\n future = self.chunk_thread_pool.submit(\n self._retryable_adls_function,\n func=patch_adls_file_upload,\n artifact_file_path=artifact_file_path,\n sas_url=credentials.signed_uri,\n local_file=src_file_path,\n start_byte=start_byte,\n size=_MULTIPART_UPLOAD_CHUNK_SIZE,\n position=start_byte,\n headers=headers,\n is_single=use_single_part_upload,\n )\n futures[future] = index\n\n _, errors = _complete_futures(futures, src_file_path)\n if errors:\n raise MlflowException(\n f\"Failed to upload at least one part of {artifact_file_path}. Errors: {errors}\"\n )\n\n # finally try to flush the file\n if not use_single_part_upload:\n self._retryable_adls_function(\n func=patch_adls_flush,\n artifact_file_path=artifact_file_path,\n sas_url=credentials.signed_uri,\n position=file_size,\n headers=headers,\n )\n except Exception as err:\n raise MlflowException(err)", "def test_upload_dir_contents_multiple_files(self):\n self._test_upload_dir_contents(filenames=['file1', 'file2'])", "def upload(self, sources=None):\n\n # Backwards compatible with < v1.4\n if self.path is None:\n self.path = self.name\n\n if self.name is None:\n raise ValueError(\"Cannot upload without a file name\")\n\n if self.ftype is None:\n raise ValueError(\"Cannot upload without a file type\")\n\n data = {}\n sources = ThreatQSource.make_source_list(sources)\n if sources:\n data['sources'] = [src.to_dict() for src in sources if src]\n\n fname = os.path.basename(self.name)\n new_filename = \"%i-%s\" % (\n random.randint(1, 100000),\n fname.replace('.', ''))\n\n content = self.content\n if not content:\n inf = open(self.path, 'rb')\n content = inf.read()\n inf.close()\n\n res = self.tq.post(\n '/api/attachments/upload',\n data={\n 'resumableIdentifier': new_filename,\n 'resumableRelativePath': fname,\n 'resumableTotalChunks': 1,\n 'resumableFilename': fname,\n },\n files={\n 'file': ('blob', content, 'application/octet-stream')\n })\n\n data['name'] = fname\n if self.title:\n data['title'] = self.title\n data['type'] = self.ftype\n data['malware_locked'] = self.locked\n\n res = self.tq.post('/api/attachments', data=data)\n\n r = res.get('data')\n if not r or 'id' not in r:\n raise exceptions.UploadFailedError(res)\n\n for t in self.tags:\n res = self.tq.post('/api/attachments/%i/tags' % r['id'], data={'name': t})\n\n self.fid = r['id']\n return self", "def extract(request):\n try:\n files = request.FILES.getlist('myFile')\n msg_data = []\n fs = FileSystemStorage()\n for file in files:\n name = file.name.replace(\" \", \"_\")\n if os.path.exists(settings.MEDIA_ROOT + \"\\\\\" + name):\n os.remove(settings.MEDIA_ROOT + \"\\\\\" + name)\n fs.save(settings.MEDIA_ROOT + \"\\\\\" + name, file)\n msg = extract_msg.Message(settings.MEDIA_ROOT + \"\\\\\" + name)\n msg.save_attachments(customPath=settings.MEDIA_ROOT + \"\\\\\")\n attachments = []\n for i in range(0, len(msg.attachments)):\n attachments.append({\n \"filename\": msg.attachments[i].shortFilename,\n \"filepath\": \"/media/\" + msg.attachments[i].shortFilename\n })\n msg_data.append({\n # \"mainProperties\": msg.mainProperties,\n # \"header\": msg.header,\n \"attachments\": attachments,\n \"filename\": file.name,\n \"filepath\": \"/media/\" + name,\n \"from\": msg.sender,\n \"to\": msg.to,\n \"cc\": msg.cc,\n \"subject\": msg.subject,\n \"date\": msg.date,\n \"body\": msg.body,\n })\n msg.close()\n response = {\n \"response\": \"SUCCESS\",\n \"message\": \"File Uploaded!\",\n \"data\": msg_data\n }\n except:\n response = {\n \"response\": \"FAIL\",\n \"message\": \"Erorr in file uploading!\",\n \"data\": msg_data\n }\n return Response(response)", "def upload(request):\n ids = ((1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1),\n (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1),\n (18, 1), (19, 2), (20, 2), (21, 2), (22, 2), (23, 2), (24, 2), (25, 2),\n (26, 2), (27, 2), (28, 2), (29, 3), (30, 3), (31, 3), (32, 3), (33, 3),\n (34, 3), (35, 3), (36, 4), (37, 4), (38, 4), (39, 4), (40, 4), (41, 4),\n (42, 4), (43, 4), (44, 4), (45, 4), (46, 4), (47, 4), (48, 4), (49, 4),\n (50, 4), (51, 4), (52, 4), (53, 4), (54, 4), (55, 4), (56, 4), (57, 4),\n (58, 4), (59, 4), (60, 4), (61, 4), (62, 4), (63, 4), (64, 4), (81, 4),\n (97, 4), (98, 4), (65, 5), (66, 5), (67, 5), (68, 5), (69, 5), (70, 5),\n (71, 5), (72, 5), (73, 5), (74, 5), (75, 5), (76, 5), (77, 5), (78, 5),\n (79, 5), (80, 6), (81, 6), (82, 6), (83, 6), (84, 6), (85, 6), (86, 6),\n (87, 6), (88, 6), (89, 6), (90, 6), (91, 6), (92, 6), (93, 6), (94, 6),\n (95, 6), (96, 7), (97, 7), (98, 7), (99, 7), (100, 7), (101, 7));\n idx = 1\n products = Product.objects.all()\n for product in products:\n product.product_category.add(Category.objects.get(category_id=ids[idx][1]))\n idx += 1\n\n serializer = ProductSerializer(instance=products, context={'request': request})\n\n return Response(data=serializer.data)", "async def update_files_provided(conn):\n select_sql = \"\"\" select * from media_files where type = ?\"\"\"\n rows = select(conn, select_sql, (\"mapshot\",))\n for row in rows:\n print(texture_path + row[1])\n if any([os.path.isfile(pball_path + row[1] + x) for x in (\".png\", \".jpg\", \".tga\", \".pcx\", \".wal\")]):\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (1, row[0]))\n else:\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (0, row[0]))\n\n select_sql = \"\"\" select * from media_files where type = ?\"\"\"\n rows = select(conn, select_sql, (\"texture\",))\n for row in rows:\n print(texture_path + row[1])\n if any([os.path.isfile(texture_path + row[1] + x) for x in (\".png\", \".jpg\", \".tga\", \".pcx\", \".wal\")]):\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (1, row[0]))\n else:\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (0, row[0]))\n\n select_sql = \"\"\" select * from media_files where type = ?\"\"\"\n rows = select(conn, select_sql, (\"sky\",))\n for row in rows:\n print(texture_path + row[1])\n if any([os.path.isfile(env_path + row[1] + x) for x in (\".png\", \".jpg\", \".tga\", \".pcx\", \".wal\")]):\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (1, row[0]))\n else:\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (0, row[0]))\n\n select_sql = \"\"\" select * from media_files where type = ?\"\"\"\n rows = select(conn, select_sql, (\"requiredfile\",))\n for row in rows:\n if os.path.isfile(pball_path + row[1]):\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (1, row[0]))\n else:\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (0, row[0]))\n\n select_sql = \"\"\" select * from media_files where type = ?\"\"\"\n rows = select(conn, select_sql, (\"externalfile\",))\n for row in rows:\n if any([any([os.path.isfile(pball_path + row[1] + x) for x in (\".skm\", \".md2\")]),\n any([os.path.isfile(pball_path + row[1] + x) for x in\n (\".png\", \".jpg\", \".tga\", \".pcx\", \".wal\", \"\")]),\n os.path.isfile(pball_path + \"sound/\" + row[1])]):\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (1, row[0]))\n else:\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (0, row[0]))\n\n select_sql = \"\"\" select * from media_files where type = ?\"\"\"\n rows = select(conn, select_sql, (\"linkedfile\",))\n print(\"rows\", rows)\n for row in rows:\n if any([any([os.path.isfile(pball_path + row[1] + x) for x in (\".skp\", \"\")]), any(\n [os.path.isfile(pball_path + row[1].split(\".\")[0] + x) for x in\n (\".png\", \".jpg\", \".tga\", \".pcx\", \".wal\", \"\")])]):\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (1, row[0]))\n else:\n select_sql = \"\"\"update media_files set provided=? where file_id=?\"\"\"\n select(conn, select_sql, (0, row[0]))", "def _upload_dir_to_bucket(self, path, ext_path):\n for file in os.listdir(path):\n self._upload_to_bucket(path+'/'+file, ext_path+'/'+file)", "def add_media():\n input_csv = os.path.join(config['input_dir'], config['input_csv'])\n if os.path.exists(input_csv):\n with open(input_csv) as csvfile:\n csv_data = csv.DictReader(csvfile, delimiter=config['delimiter'])\n csv_column_headers = csv_data.fieldnames\n\n for row in csv_data:\n row = clean_csv_values(row)\n if not ping_node(config, row['node_id']):\n print(\"Node \" + row['node_id'] + \" not found or not \" +\n \"accessible, skipping adding media.\")\n continue\n\n file_path = os.path.join(config['input_dir'], row['file'])\n media_type = set_media_type(file_path, config)\n\n node_json_url = config['host'] + '/node/' + row['node_id'] + '?_format=json'\n node_uri = config['host'] + '/node/' + row['node_id']\n node_response = issue_request(config, 'GET', node_json_url)\n if node_response.status_code == 200:\n media_response_status_code = create_media(config, row['file'], node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print(media_type.title() + \" media for \" + row['file'] + \" created and added to \" + node_uri)\n logging.info(\"%s media for %s created and added to %s.\", media_type.title(), row['file'], node_uri)" ]
[ "0.619183", "0.60925543", "0.6053126", "0.5934323", "0.59186196", "0.57231236", "0.5704533", "0.57031876", "0.57009745", "0.56800365", "0.5639666", "0.5630675", "0.56029576", "0.56010246", "0.55941606", "0.5494556", "0.54650325", "0.54639095", "0.5461023", "0.5386679", "0.53646606", "0.5339651", "0.53395617", "0.5324782", "0.5322305", "0.53209245", "0.53136164", "0.5310266", "0.5306981", "0.5303259" ]
0.8371117
0
Removes all of the given files
def remove_files(files): for file_name in files: os.remove(file_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_files(self, files: Set[str]) -> None:\n for f in files:\n src = os.path.join(self.get_directory(), f)\n os.remove(src)", "def clean(files):\n\tfor file in files:\n\t\ttry:\n\t\t\tos.remove(file)\n\t\texcept Exception as e:\n\t\t\tprint(e)", "def remove_files(file_list):\n###############################################################################\n for fpath in file_list:\n if os.path.exists(fpath):\n os.remove(fpath)\n # End if\n # End for", "def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)", "def remove_files(files):\n for file in files:\n if os.path.exists(file):\n if file.startswith(\"./\") or file.startswith(\".\\\\\"):\n file = file[2:]\n if os.path.isdir(file):\n rmtree(file)\n else:\n os.unlink(file)", "def remover_files():\n directory = os.getcwd()\n for file_name in glob.glob((\"{}/tmp/*\").format(directory)):\n remove(file_name)", "def cleanup(*args, **kwargs):\n for file in args:\n if exists(file):\n remove(file)\n for file in kwargs:\n if exists(file):\n remove(file)", "def delete_files(src_files):\n for i, src_file in enumerate(src_files):\n sys.stdout.write(str(i + 1) + ': ' + src_file + '\\n')\n subprocess.call(['rm', src_file])", "def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()", "def clean():\n clean_files()", "def cleanFiles(a_file_list):\n for entry in a_file_list:\n cmd = 'sudo rm ' + entry\n os.system(cmd)", "def rm(*fns):\n for fn in fns:\n try:\n os.remove(fn)\n except FileNotFoundError:\n pass", "def _deleteFiles(self, fileList):\n import os\n import glob\n\n for ent in fileList:\n # for fil in glob.glob(os.path.join(self._outputDir_, ent)):\n for fil in glob.glob(ent):\n try:\n if os.path.exists(fil):\n os.remove(fil)\n except OSError as e:\n self._reporter.writeError(\"Failed to delete '\" + fil + \"' : \" + e.strerror)\n raise", "def delete_b_files(intermediate_files: List[File]) -> None:\n for f in intermediate_files:\n f.remove()", "def remove_frames(tmpdir, files):\n for fname in files: os.remove(os.path.join(tmpdir, fname))\n if not(tmpdir == None): os.rmdir(tmpdir)", "def clean_files(self):\n self.filenames.clear()", "def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)", "def remove(args):\n files = []\n for path in args.files:\n if os.path.isdir(path):\n ft = filetree(path)\n files.extend(ft.filelist())\n else:\n files.append(path)\n for path in files:\n relpath = os.path.normpath(os.path.relpath(path, args.base))\n if relpath in args.cache:\n del args.cache[args.cache.index(relpath)]\n if args.delete and os.path.exists(path):\n os.remove(path)\n args.update = True\n return", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def remove(self, filenames=None, missing_ok=False):\n if not filenames:\n filenames = self.filenames\n\n for f in filenames:\n try:\n self.path.joinpath(f).unlink()\n except FileNotFoundError as e:\n if missing_ok == False:\n raise", "def deleteAllFiles(self, flush=True): \n \n if flush: \n self.flush(False) \n \n for filePath in self.filePathDict.keys(): \n if self.outDir is None: \n fullPath = filePath \n else: \n fullPath = os.path.join(self.outDir,filePath) \n \n if os.path.exists(fullPath): \n os.remove(fullPath)", "def remove_files(filename=None):\n os.remove(filename)\n print(\"The file %s has been removed\" % filename)", "def clean_files_for(file):\n for f in [file, f\"{file}.json\", f\"{file}.lock\"]:\n if os.path.isfile(f):\n os.remove(f)", "def delete_files(pths):\n for f in pths:\n try:\n os.remove(f)\n except OSError:\n log.debug(\"Found and ignored Error when deleting file %s\" % f)\n pass\n log.debug(\"deleted %d files\" % len(pths))", "def teardown():\n for filename in files_to_delete:\n delete_file(filename)", "def cleanup(e):\n for f in e.files:\n try:\n if os.path.isfile(f):\n os.remove(f)\n except OSError:\n continue\n\n return", "def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()", "def _remove_tmpfiles():\n for f in tmpfiles:\n try:\n os.remove(f)\n except OSError:\n pass", "def remove_all():\n storage = FileStorage()\n objects = storage.all()\n objects = list(objects.values())\n\n for element in objects:\n storage.delete(element)\n objects = storage.all()", "def _delete_files(p4, files, repo_name=None):\n if repo_name:\n msgstr = _(\"Deleting {num_commits} commit objects for repo '{repo_name}'.\")\n else:\n msgstr = _(\"Deleting {num_commits} commit objects for all repos.\")\n total = 0\n bite_size = 1000\n while len(files):\n to_delete = files[:bite_size]\n files = files[bite_size:]\n result = p4.run(\"delete\", to_delete)\n count = sum([int('depotFile' in row and row['action'] == 'delete') for row in result])\n total += count\n if count:\n for d in to_delete:\n if os.path.isfile(d):\n os.remove(d)\n result = p4.run(\"submit\", \"-d\", msgstr.format(num_commits=count, repo_name=repo_name))\n return total" ]
[ "0.8326417", "0.8259835", "0.7764254", "0.7697654", "0.76045775", "0.76031864", "0.7492463", "0.7479637", "0.7377181", "0.73686516", "0.7330737", "0.7330138", "0.7317665", "0.7217544", "0.72154135", "0.720359", "0.719617", "0.71945643", "0.71943825", "0.71901155", "0.71693027", "0.71502703", "0.7121444", "0.7086343", "0.7078291", "0.7040704", "0.69513613", "0.69470316", "0.69141686", "0.69122446" ]
0.8486456
0
Takes information from a Bandwidth inbound message callback that includes media and responds with a text message containing the same media sent through Bandwidth's media resource.
def handle_inbound_media_mms(to, from_, media): downloaded_media_files = download_media_from_bandwidth(media) upload_media_to_bandwidth(downloaded_media_files) remove_files(downloaded_media_files) body = MessageRequest() body.application_id = MESSAGING_APPLICATION_ID body.to = [from_] body.mfrom = to body.text = "Rebound!" #Build the media URL by taking the media ids (that doubled as the file names) and appending them to #the bandwidth media base url body.media = [BANDWIDTH_MEDIA_BASE_ENDPOINT + media_file for media_file in downloaded_media_files] try: messaging_client.create_message(MESSAGING_ACCOUNT_ID, body) except Exception as e: print(e) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_inbound_message():\n data = json.loads(request.data)\n\n if data[0][\"type\"] == \"message-received\":\n if \"call me\" in data[0][\"message\"][\"text\"]:\n handle_inbound_sms_call_me(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n elif \"media\" in data[0][\"message\"]:\n handle_inbound_media_mms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"], data[0][\"message\"][\"media\"])\n else:\n handle_inbound_sms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n else:\n print(data)\n return \"\"", "def sms_reply():\n # Fetch the message\n media_msg = request.form.get('NumMedia')\n msg = request.form.get('Body').lower()\n resp = MessagingResponse()\n responded = False\n if '1' in media_msg:\n pic_url = request.form.get('MediaUrl0') # URL of the person's media\n # pprint(pic_url) # so you can see the URL that the picture generated \n resp.message(\"We have recieved your request for image analysis! Please wait for our response\")\n resp.message(pic_url)\n url = \"https://techclan-twitter.herokuapp.com/reverse_image?URL=\"\n url=url+pic_url\n resp.message('The image has been succesfully uploaded to our server!The Url of the image is :')\n response=requests.get(url)\n parsed=json.loads(response.text)\n s1=\"\"\n count=0\n for each in parsed:\n s1=s1+each+\"\\n ................\\n\"\n if count>5:\n break\n count=count+1\n resp.message('The reverse image analysis of image reports are:')\n resp.message(s1)\n time.sleep(1)\n u='http://18.205.87.224/api/text?id='\n u=u+pic_url\n response=requests.get(u)\n parsed=json.loads(response.text)\n resp.message(parsed)\n responded==True\n elif '5' in msg:\n r = requests.get('https://coronavirus-19-api.herokuapp.com/countries/india')\n if r.status_code == 200:\n data = r.json()\n text = f'_Covid-19 Cases in India_ \\n..........................\\nConfirmed Cases : *{data[\"cases\"]}* \\n................\\nToday Cases : *{data[\"todayCases\"]}* \\n..............\\nDeaths : *{data[\"deaths\"]}* \\n..................................\\nRecovered : *{data[\"recovered\"]}* \\n\\n..................\\nTotal Tested : *{data[\"totalTests\"]}* \\n\\n Type 0 to return to main menu'\n else:\n text = 'I could not retrieve the results at this time, sorry.'\n resp.message(text)\n responded = True \n \n elif '1' in msg:\n \n resp.message(\"wait we will fetch your results soon!!\")\n url = \"http://18.234.107.157:5000/api/text?id=\"\n ms=str(msg)\n #a,b=ms.split(' ',1)\n url=url+ms\n response=requests.get(url)\n parsed=json.loads(response.text)\n agree=0\n disagree=0\n discuss=0\n ctr=0\n for each in parsed:\n if ctr>100:\n break\n ctr=ctr+1\n answ=each.get('Score',\"error\")\n if answ == \"agree\":\n agree=agree+1\n elif answ == \"disagree\":\n disagree=disagree+1\n if(agree>disagree):\n resp.message(\"This is *REAL* according to our sources !! Our results are based on following sources..we cannot be 100% Sure.\")\n else:\n resp.message(\"This is *FAKE* according to our sources !! Our results are based on following sources..we cannot be 100% Sure.\")\n count=0\n s1=\"\"\n for each in parsed:\n s1=s1+each['link']+\"*Title :*\" +each['title']+\"\\n ................\\n\"\n if count>5:\n break\n count=count+1\n resp.message(s1)\n responded==True\n #reporting\n elif '3' in msg:\n # resp.message(\"We have reported your content to our police database!!\")\n ms=str(msg)\n a,b=ms.split(' ',1)\n url='https://spreadsheetupdate1.herokuapp.com/spreed?id='\n url=url+ms\n r=requests.get(url)\n resp.message(\"We have reported your content to our police database!!\")\n responded==True\n\n\n\n \n #for news\n\n elif msg=='news' or msg=='4':\n \n url=\"\"\"https://newsapi.org/v2/top-headlines?sources=bbc-news,cnn,cnbc,abc-news,google-news-uk,independent&apiKey=3ff5909978da49b68997fd2a1e21fae8\"\"\"\n r = requests.get(url)\n #resp.message(\"stay\") \n if r.status_code == 200:\n resp.message(\"stay here with us! We are fetching news for you \")\n data = r.json()\n articles = data['articles'][:5]\n result = \"\"\n ctr=0 \n for article in articles:\n # if ctr>10:\n # break\n # ctr=ctr+1\n title = article['title']\n url = article['url']\n if 'Z' in article['publishedAt']:\n published_at = datetime.datetime.strptime(article['publishedAt'][:19], \"%Y-%m-%dT%H:%M:%S\")\n else:\n published_at = datetime.datetime.strptime(article['publishedAt'], \"%Y-%m-%dT%H:%M:%S%z\")\n \n result += \"\"\"*{}*\nRead more: {}\n_Published at {:02}/{:02}/{:02} {:02}:{:02}:{:02} UTC_\n\"\"\".format(\n title,\n url, \n published_at.day, \n published_at.month, \n published_at.year, \n published_at.hour, \n published_at.minute, \n published_at.second\n )+\"\\n ..................\\n\"\n\n else:\n result = 'I cannot fetch news at this time. Sorry!'\n\n resp.message(result)\n responded = True\t\n else:\n phone_no = request.form.get('From')\n reply = fetch_reply(msg, phone_no)\n\n resp = MessagingResponse()\n resp.message(reply)\n responded = True\n \n\n \t\n\n return str(resp)", "def process_response(self, req, resp, resource, req_succeeded):\n if req.method == \"POST\":\n log.info((thisFilename, inspect.currentframe().f_code.co_name, \"media\", str(resp.media)))", "def handle_media( environ ):\n # TODO: implement me\n return 200, [], _html.format(\n title = 'MEDIA',\n head = '',\n body = 'MEDIA'\n )", "def ret_message(incoming_msg):\n # Create a object to create a reply.\n response = Response()\n\n # Set the text of the reply.\n response.text = \"Here's a fun little meme.\"\n\n # Craft a URL for a file to attach to message\n u = \"https://sayingimages.com/wp-content/uploads/\"\n u = u + \"aaaaaalll-righty-then-alrighty-meme.jpg\"\n response.files = u\n return response", "def receive_message(self, message, data):\n\n self.logger.debug('Plex media receive function called.')\n if data[MESSAGE_TYPE] == TYPE_MEDIA_STATUS:\n self.logger.debug('(PlexController) MESSAGE RECEIVED: ' + data)\n return True\n\n return False", "def sms_reply():\n # Start our TwiML response\n # if body.lower()==\"good\":\n message=\"Hi I'm IRIS, an Immediately Responsive Intelligent System\\nHow are you feeling today?\"\n user=request.form['Body']\n\n # message=\"Hi \"+ name+ \"\"\n # user=request.form['Body']\n\n if user==\"good\":\n message=\"Glad to hear it! I hope you continue to feel this way! Celebrate this feeling and hold onto what happened ot make you feel this way so that you can repeat it in the future!\"\n\n if user==\"sad\":\n message=\"I’m sorry to hear that. Here are some things I do to make me feel better: take a walk outside, listen to uplifting music, call or message a loved one, or watch or read something positive to take my mind off of what I’m feeling.\"\n\n if user==\"nervous\":\n message=\"It’s going to be ok! This feeling will not last forever.\"\n if user==\"lonely\":\n message=\"I’m here for you, and know that you are loved, supported, and important. The world would not be the same without you! For a loving quote respond\"\n\n if user==\"angry\":\n message=\"“Let me help you turn your anger into something positive. Here are some ways to burn off energy productively: take a long walk, remove yourself from the situation, paint of draw, listen to loud music, or take a break from what you are doing.\"\n\n if user==\"tired\":\n message=\"I understand what you are feeling well. I recommend taking a break to do an activity you enjoy, taking a nap, getting a coffee, doing 20 jumping jacks, listening to a pump-up playlist, or standing up to stretch for a bit.\"\n\n if user==\"average\":\n message=\"There are many things to look forward to!\"\n resp = MessagingResponse()\n\t # Add a message\n \n resp.message(message)\n\t # Add a picture message\n\t #msg.media(\"https://farm8.staticflickr.com/7090/6941316406_80b4d6d50e_z_d.jpg\")\n\n return str(resp)", "def incoming_sms():\n # Get the message body\n body = request.values.get('Body', None)\n\n # Start our TwiML response\n resp = MessagingResponse()\n\n # Determine the appropriate response/action for incoming message\n replyText = getReply(body)\n\n resp.message(replyText)\n\n return str(resp)", "def mms_reply():\n print('New MMS')\n client_number = request.form[\"From\"]\n MessageSid = request.form[\"MessageSid\"]\n video_url = request.form[\"MediaUrl0\"]\n MediaSid = video_url.split('/')[-1]\n media_content_type = request.form[\"MediaContentType0\"]\n\n file_path = os.path.join(app.config['INPUT_VIDEOS_PATH'], str(MessageSid))\n video_name = str(MessageSid) + '.mp4'\n\n # Download the video\n video_response = requests.get(video_url, stream=True)\n video_response.raise_for_status() # Throw an error for bad status codes\n with open(\"{}.mp4\".format(file_path), 'wb') as handle:\n for block in video_response.iter_content(1024):\n handle.write(block)\n\n # Call Scenescoop async \n async_scenescoop.apply_async(args=[file_path, video_name, client_number, MessageSid, MediaSid])\n \n # Send an empty response message\n resp = MessagingResponse()\n #resp.message(\"Got it, now wait...\")\n\n return str(resp)", "def process_message(self, msg, src):", "def handle_recording():\n\n recording_url = request.values.get(\"RecordingUrl\", None)\n\n resp = VoiceResponse()\n resp.say(\"Listen to your recorded message.\")\n resp.play(recording_url)\n resp.say(\"Goodbye.\")\n return str(resp)", "def process(self):\n received_message = SubscribeMessage(*self.message.value)\n allow, msg = customize.authorize_subscription(received_message.topic, self.connection)\n if allow:\n subscription_id = tornwamp_topic.topics.add_subscriber(\n received_message.topic,\n self.connection,\n )\n answer = SubscribedMessage(\n request_id=received_message.request_id,\n subscription_id=subscription_id\n )\n self.broadcast_messages = customize.get_subscribe_broadcast_messages(received_message, subscription_id, self.connection.id)\n else:\n answer = ErrorMessage(\n request_id=received_message.request_id,\n request_code=received_message.code,\n uri=\"tornwamp.subscribe.unauthorized\"\n )\n answer.error(msg)\n self.answer_message = answer", "def incoming_sms():\n txt = request.form['Body']\n\n # remove leading and trailing white space and make lowercase\n txt = txt.strip()\n txt = txt.lower()\n\n # handle random searches differently than breed searches\n if txt == 'random' or txt == 'dog':\n url = get_dogs.get_random_dog()\n else:\n url = get_dogs.request_breed(txt)\n \n resp = MessagingResponse()\n if url:\n resp.message(url)\n else:\n resp.message(\"Sorry! We couldn't find a dog matching that query. Please try \\\n a more general search term.\")\n return str(resp)", "def on_watch_message(self, bus, msg):\n msg_struct = msg.get_structure()\n if msg_struct:\n if msg_struct.get_name() == 'GstMessageTag':\n codec_name = ((msg_struct[\"taglist\"].nth_tag_name(0)))\n codec_value = msg_struct[\"taglist\"].get_string(codec_name)\n info_name = codec_name\n c_result, info_value = codec_value\n if c_result:\n self.info_handler(info_name, info_value)\n if codec_name == \"video-codec\":\n self.info_handler(codec_name, info_value)\n r_result, width, height = self.get_resolution()\n if r_result:\n info_name = \"resolution\"\n info_value = \"[{}x{}]\".format(width, height)\n self.info_handler(info_name, info_value)\n bus.remove_signal_watch()", "def handle_message(self, message):", "def handle(self, message):", "def onMessage(self, payload, isBinary):", "def callback(ch, method, properties, body):\r\n body = json.loads(body)\r\n print(f\"[x] Task in the queue {body}\")\r\n # Creating instance of AudioRecorder\r\n recorder = AudioRecorder(body)\r\n driver = recorder.prepare_browser(body['settings'])\r\n recorder.run(driver, body)", "def _respond_message(self, msg):\n self.set_status(200)\n self.set_header(\"Content-Type\", \"application/x-mplane+json\")\n self.write(mplane.model.unparse_json(msg))\n self.finish()", "def upload_media_to_bandwidth(media_files):\n for filename in media_files:\n with open(filename, \"rb\") as f:\n file_content = f.read()\n try:\n ##Note: The filename is doubling as the media id##\n response = messaging_client.upload_media(MESSAGING_ACCOUNT_ID, filename, str(len(file_content)), body=file_content)\n except Exception as e:\n print(e)", "def handle_recording():\n \n recording_url = request.values.get(\"RecordingUrl\", None)\n \n resp = twilio.twiml.Response()\n resp.say(\"Thanks for howling... take a listen to what you howled.\")\n resp.play(recording_url)\n resp.say(\"Goodbye.\")\n return str(resp)", "def received_message(self, m):\n self.receiver.handle_message(m)", "def receive_message(self, message):", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def application_message(self, bus, msg):\n\t\tmsgtype = msg.structure.get_name()\n\t\tif msgtype == 'partial_result':\n\t\t\tself.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\telif msgtype == 'result':\n\t\t\tself.final_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\t\tself.pipeline.set_state(gst.STATE_PAUSED)", "def receive(self, data):\n try:\n xml = ElementTree.XML(data)\n except:\n raise StandardError(\"API request malformed\")\n\n mms = mobile.models.IncomingMMS.objects.create(\n id=xml.find('id').text,\n country=xml.find('country').text,\n sender=xml.find('senderNumber').text,\n recipient=xml.find('targetNumber').text,\n subject=xml.find('mms/subject').text,\n source=data\n )\n\n for item in xml.findall('mms/item'):\n if item.find('base64').text == 'true':\n data = b64decode(item.find('content').text)\n else:\n data = item.find('content').text\n\n mms_file = mobile.models.MMSFile(\n mms=mms\n )\n\n # Extract content type from MIME data\n matches = re.search('([^;]*/[^;]*);', item.find('mimeType').text)\n if matches:\n mms_file.content_type = matches.group(1)\n\n # Save file\n mms_file.file.save(\n name=item.find('name').text,\n content=ContentFile(data)\n )\n\n mms_file.save()\n\n return mms", "def message_callback(self, message):\n pass", "def process(self, message: Message, **kwargs: Any) -> None:", "def incoming_sms():\n # Get the message the user sent our Twilio number\n body = request.values.get('Body', None)\n\n # Start our TwiML response\n resp = MessagingResponse()\n\n # Determine the right reply for this message\n if body[0:7] == 'Newhigh':\n num = body[7:]\n result = ''\n with open('currentData.txt') as f:\n for i in range(int(num)):\n result += str(i + 1)\n result += '. '\n result += f.readline()\n # resp.message(result.stdout.decode('utf-8'))\n resp.message(result)\n return str(resp)\n \n if body[0:6] == 'Newlow':\n num = body[6:]\n result = subprocess.run(['tail','-n',num,'currentData.txt'], stdout=subprocess.PIPE)\n resp.message(result.stdout.decode('utf-8'))\n return str(resp)\n if body[0:4] == 'High':\n num = body[4:]\n num = '-' + num\n result = subprocess.run(['head',str(num),'AllBuildings.txt'], stdout=subprocess.PIPE)\n resp.message(result.stdout.decode('utf-8'))\n return str(resp)\n if body[0:3] == 'Low':\n num = body[3:]\n result = subprocess.run(['tail','-n',num,'AllBuildings.txt'], stdout=subprocess.PIPE)\n resp.message(result.stdout.decode('utf-8'))\n return str(resp)\n if body == 'Hourly':\n result = subprocess.run(['cat','ByHour.txt'], stdout=subprocess.PIPE)\n elif body == 'Weekday':\n result = subprocess.run(['cat','DaysOfWeek.txt'], stdout=subprocess.PIPE)\n # resp.message(fortune)\n elif body == '10minute':\n result = subprocess.run(['cat','data/PerTenMinutes.txt'], stdout=subprocess.PIPE)\n else:\n resp.message(\"June 2018 - Feb 2019 Totals\\n\\nCommands:\\n(# is any number between 1 and 50)\\nNewhigh# - Highest Past 10 Minutes\\nNewlow# - Lowest Past 10 Minutes\\n\\nBelow are cumulative annual figs:\\nHigh# - Highest of Year\\nLow# - Lowest of Year\\n\\nCampuswide Figures:\\n10minute - Ten Minute Intervals\\nHourly - 1 Hour Intervals\\nWeekday - By Day of the Week\\n\")\n return str(resp)\n resp.message(result.stdout.decode('utf-8'))\n return str(resp)" ]
[ "0.6259964", "0.6026629", "0.586778", "0.5793742", "0.5789032", "0.5778347", "0.56937456", "0.56709975", "0.5563751", "0.55476165", "0.5544929", "0.5535647", "0.55325127", "0.55144805", "0.55017763", "0.5446944", "0.5444091", "0.5428671", "0.5415789", "0.54080504", "0.5401289", "0.5378489", "0.53578794", "0.53400666", "0.53400666", "0.53297096", "0.5293248", "0.52578837", "0.52457666", "0.5244298" ]
0.68463576
0
Takes information from a Bandwidth inbound message callback and initiates a call
def handle_inbound_sms_call_me(to, from_): handle_call_me(to, from_)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def on_call(message, client):\n pass", "def _call(self, msg, cb, *args):\r\n if not self._status:\r\n raise InterfaceDisabledError('A disabled interface should not be '\r\n 'called.')\r\n\r\n if not callable(cb):\r\n raise TypeError('Callback has to be callable.')\r\n\r\n uid = uuid4().hex\r\n deferred = Deferred()\r\n deferred.addCallback(cb, *args)\r\n self._responses[uid] = deferred\r\n\r\n self._conn.sendMessage(self._iTag, self._clsName, msg, uid)", "def ProcessCallback(self, interface, info):\n pass", "def ProcessCallback(self, interface, info):\n pass", "def whenReadReady(self, channel, call):", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def onMessageBegin(self, isBinary):", "def call(self, procedure: str, *args: aiowamp.WAMPType,\n kwargs: aiowamp.WAMPDict = None,\n receive_progress: bool = None,\n call_timeout: float = None,\n cancel_mode: aiowamp.CancelMode = None,\n disclose_me: bool = None,\n resource_key: str = None,\n options: aiowamp.WAMPDict = None) -> aiowamp.CallABC:\n ...", "def call(self, msg, cb=None):\r\n self._call(msg, cb or self._cb)", "def on_incoming_call(self, call):\n\n try:\n current_time = time.time()\n remote_uri = hash_remote_uri(self.cfg, call.info().remote_uri)\n\n if not self.cfg['VoipIO']['reject_calls']:\n if self.voipio.black_list[get_user_from_uri(remote_uri)] < current_time:\n # answer the call\n self.voipio.call = call\n self.voipio.on_incoming_call(remote_uri)\n\n if self.cfg['VoipIO']['debug']:\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Incoming call from %s\" % remote_uri)\n\n call_cb = CallCallback(self.cfg, call, self.voipio)\n call.set_callback(call_cb)\n\n call.answer()\n else:\n # rejected the call since the caller is blacklisted\n if self.cfg['VoipIO']['debug']:\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Rejected call from blacklisted remote URI %s \" % remote_uri)\n wait_hours = (self.voipio.black_list[get_user_from_uri(remote_uri)] - current_time) / (60 * 60)\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Must wait for %d hours\" % wait_hours)\n # respond by \"Busy here\"\n call.answer(486)\n\n self.voipio.on_rejected_call_from_blacklisted_uri(remote_uri)\n else:\n # reject the call since all calls must be rejected\n if self.cfg['VoipIO']['debug']:\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Rejected call from %s\" % remote_uri)\n\n # respond by \"Busy here\"\n call.answer(486)\n # respond by \"Decline\"\n #call.answer(603)\n\n self.voipio.on_rejected_call(remote_uri)\n except:\n self.voipio.close_event.set()\n self.cfg['Logging']['system_logger'].exception('Uncaught exception in the AccountCallback class.')\n raise", "def on_call_update(self, event):\n # if plivo_app != 'true', check b leg Dial callback\n plivo_app_flag = event['variable_plivo_app'] == 'true'\n if not plivo_app_flag:\n # request Dial callbackUrl if needed\n aleg_uuid = event['Bridged-To']\n if not aleg_uuid:\n return\n bleg_uuid = event['Unique-ID']\n if not bleg_uuid:\n return\n disposition = event['variable_endpoint_disposition']\n if disposition != 'ANSWER':\n return\n ck_url = event['variable_plivo_dial_callback_url']\n if not ck_url:\n return\n ck_method = event['variable_plivo_dial_callback_method']\n if not ck_method:\n return\n params = {'DialBLegUUID': bleg_uuid,\n 'DialALegUUID': aleg_uuid,\n 'DialBLegStatus': 'answer',\n 'CallUUID': aleg_uuid\n }\n # add extra params\n extra_params = self.get_extra_fs_vars(event)\n if extra_params:\n params.update(extra_params)\n spawn_raw(self.send_to_url, ck_url, params, ck_method)\n return", "async def asterisk_init(request):\n\n try:\n phone = request.rel_url.query[\"phone\"]\n except KeyError:\n phone = None\n LOGGER.error(f\"No 'phone' parameter passed on: '{request.rel_url}'\")\n raise web.HTTPClientError(\n reason=ASTERISK_CALL_ERROR, body=None, text=None, content_type=None\n )\n try:\n message = request.rel_url.query[\"message\"]\n except KeyError:\n message = None\n LOGGER.error(f\"No 'message' parameter passed on: '{request.rel_url}'\")\n raise web.HTTPClientError(\n reason=ASTERISK_CALL_ERROR, body=None, text=None, content_type=None\n )\n\n # Prepare the URL to 'call' the Asterisk ARI\n asterisk_query_string = (\n f\"endpoint={ASTERISK_CHAN_TYPE}/{phone}&extension={ASTERISK_EXTENSION}\"\n + f\"&context={ASTERISK_CONTEXT}&callerId={ASTERISK_CALLERID}\"\n )\n asterisk_call_init = (\n f\"{ASTERISK_URL}/{ASTERISK_ARI_CHANNELS}?{asterisk_query_string}\"\n )\n # Place a call on the Asterisk system using HTTP Basic Auth on the PBX\n headers = await gen_headers(await gen_auth_string())\n\n try:\n session = ClientSession(timeout=CLIENT_TIMEOUT_TOTAL)\n call_resp = await session.post(\n url=asterisk_call_init, data=None, headers=headers\n )\n await session.close()\n if call_resp.status == 200:\n response_data = await call_resp.json()\n asterisk_chan = response_data[\"id\"]\n session = ClientSession(timeout=CLIENT_TIMEOUT_TOTAL)\n await session.post(\n url=CALL_REGISTER_URL\n + f\"/{CALL_REGISTER_APP_ROUTE_REGISTER_CALL}\"\n + f\"?phone={phone}&message={message}&asterisk_chan={asterisk_chan}\",\n data=None,\n headers=headers,\n )\n await session.close()\n else:\n LOGGER.error(\n f\"Asterisk server '{ASTERISK_URL}' response: {call_resp.status}. Unable to initialize the call.\"\n )\n\n except client_exceptions.ClientConnectorError as e:\n LOGGER.error(f\"Unable to connect to the Asterisk system: '{e}'\")\n raise web.HTTPClientError(\n reason=str(e), body=None, text=None, content_type=None\n )\n\n return web.json_response({\"status\": call_resp.status})", "def onMessage(self, payload, isBinary):", "def main(msg: func.ServiceBusMessage):\r\n\r\n # Extract the method into a dictionary\r\n msg_dict = json.loads(msg.get_body().decode(\"utf-8\"))\r\n\r\n logging.info(f\"Python ServiceBus queue trigger processed message: {msg_dict}\")\r\n\r\n # Enable a connection with the IoT Hub. The connectionstring for the IoT Hub\r\n # is preloaded in the Azure Functions configurations.\r\n connectino_string_iothub = os.getenv(\"connectionStringIotHub\")\r\n registry_manager = IoTHubRegistryManager(connectino_string_iothub)\r\n\r\n # Settings for the method that the IoT Device should run upon receiving the message.\r\n callback_method = \"start_fan\"\r\n callback_payload = {}\r\n device_method = CloudToDeviceMethod(\r\n method_name=callback_method, payload=callback_payload\r\n )\r\n\r\n # Sending the actual cloud-to-device message and invoke a function on the IoT device.\r\n device_id = msg_dict[\"IoTHub\"][\"ConnectionDeviceId\"]\r\n response = registry_manager.invoke_device_method(device_id, device_method)\r\n\r\n print(\"\")\r\n print(\"Device Method called\")\r\n print(\"Device Method name : {0}\".format(callback_method))\r\n print(\"Device Method payload : {0}\".format(callback_payload))\r\n print(\"\")\r\n print(\"Response status : {0}\".format(response.status))\r\n print(\"Response payload : {0}\".format(response.payload))", "def subscribe(receiver, catchup):", "def test_incoming_k(self):\n m_interface = Mock()\n m_interface.callback.return_value = True\n m_interface.read.return_value = ''\n upb = UPB(m_interface)\n upb.onCommand(address=(22,255), callback=m_interface.callback)\n m_interface.read.return_value = \"PU07141610FF3090\\x0DPU07151610FF308F\\x0D\"\n# time.sleep(4000)\n time.sleep(2)\n m_interface.callback.assert_called_with(address=(22,255), command='status', source=upb)\n m_interface.read.return_value = ''", "def call(self, callee: \"SIPPhoneTemplate\") -> None:", "def _initiate(self, call):\n if not self.gsm_call:\n raise Exception(\"No connectivity\")\n number = str(call.number)\n logger.info(\"initiate call to %s\", number)\n call_id = yield WaitDBus(self.gsm_call.Initiate, number, \"voice\")\n call_id = int(call_id)\n logger.info(\"call id : %d\", call_id)\n self.lines[call_id] = call\n # TODO: mabe not good idea to store this in the call itself,\n # beside, it makes pylint upset.\n call.__id = call_id", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def callback(ch, method, properties, body):\r\n body = json.loads(body)\r\n print(f\"[x] Task in the queue {body}\")\r\n # Creating instance of AudioRecorder\r\n recorder = AudioRecorder(body)\r\n driver = recorder.prepare_browser(body['settings'])\r\n recorder.run(driver, body)", "def incoming(self,message):\n #Convert to Dictionary, Whatever the input is\n if isinstance(message, str):\n message = json.loads(message)\n elif isinstance(message, bytes):\n message = self.deserialize(message)\n\n op = message.get(\"op\")\n if op == \"publish\":\n message[\"msg\"] = self.decompress(message[\"topic\"],message.get(\"msg\"))\n message[\"topic\"] = self.remap_topic(message[\"topic\"]) \n elif op == \"advertise\":\n message[\"topic\"] = self.remap_topic(message[\"topic\"])\n elif op == \"advertise_service\" or op == \"service_response\":\n message[\"service\"] = self.remap_service(message[\"service\"])\n\n\n message = json.dumps(message)\n #--------\n #replace JSON Null values in float32 types with infinity datatype (changed according to the error for LaserScan values)\n message = message.replace(\"null\", \"Infinity\")\n #--------\n self._protocol.incoming(message)", "def call(self, addr, *args, **kwargs):\n prepare_cb = kwargs.pop('prepare_cb', self.jitter.func_prepare_stdcall)\n super(self.__class__, self).call(prepare_cb, addr, *args)", "def call(self, addr, *args, **kwargs):\n prepare_cb = kwargs.pop('prepare_cb', self.jitter.func_prepare_stdcall)\n super(self.__class__, self).call(prepare_cb, addr, *args)", "def polling_call(self) -> global___Snippet.ClientCall:", "def place_call(self, number):\n call_params = urllib.urlencode({\n 'outgoingNumber' : number,\n 'forwardingNumber' : self.forwarding_number,\n 'subscriberNumber' : 'undefined',\n 'remember' : '0',\n 'phoneType' : self.phone_type,\n '_rnr_se': self.key\n })\n\n # Send the text, display status message \n self.response = self.opener.open(self.call_url, call_params).read()", "def ceilometer_callback(self, ch, method, properties, body):\n payload = json.loads(body)\n try:\n message_body = json.loads(payload['oslo.message'])\n samples = message_body['args']['data']\n #print \"--------------------------------------------------\"\n self.pool.spawn_n(self.zabbix_sender.consume_samples,samples)\n except Exception,e:\n log.warn(str(e))", "def handle_call(self):\n call_socket, address = self.call_socket.accept()\n print(\"connected call socket: {}\".format(call_socket))\n # gets name of user making the call:\n caller_name = self.receive_mes(call_socket)\n # gets from calling client user they want to call:\n receiver_name = self.receive_mes(call_socket)\n # gets receivers socket from dictionary\n if receiver_name not in self.client_dict:\n print(\"boi bye\")\n sys.exit(EXIT)\n receiver_sock = self.client_dict[receiver_name]\n mes = \"{} is calling you\".format(caller_name)\n self.send_mes(mes.encode(), receiver_sock)\n answer = self.receive_mes(receiver_sock)\n print(\"answer from {}: {}\".format(receiver_name, answer))\n if answer == \"Y\":\n self.send_mes(\"call\".encode(), call_socket)\n self.start_call()\n else:\n self.send_mes(\"no call\".encode(), call_socket)" ]
[ "0.63667405", "0.60182863", "0.59576505", "0.59576505", "0.59211445", "0.589763", "0.589763", "0.589763", "0.5851596", "0.5813852", "0.5791376", "0.57506865", "0.57401466", "0.5735596", "0.5733125", "0.57320815", "0.571448", "0.5710372", "0.570059", "0.5682062", "0.5653709", "0.5653709", "0.5653049", "0.56425714", "0.5633873", "0.5633873", "0.5620766", "0.5611832", "0.5583558", "0.55589837" ]
0.62250584
1
A method for showing how to handle Bandwidth messaging callbacks. For inbound SMS that contains the phrase "call me", a phone call is made and the user is asked to forward the call to another number For inbound SMS that doesn't contain the phrase "call me", the response is a SMS with the date and time. For inbound MMS with a media attachment, the response is the same media attachment sent through Bandwidth's media resource. For all other events, the callback is logged to console
def handle_inbound_message(): data = json.loads(request.data) if data[0]["type"] == "message-received": if "call me" in data[0]["message"]["text"]: handle_inbound_sms_call_me(data[0]["message"]["to"][0], data[0]["message"]["from"]) elif "media" in data[0]["message"]: handle_inbound_media_mms(data[0]["message"]["to"][0], data[0]["message"]["from"], data[0]["message"]["media"]) else: handle_inbound_sms(data[0]["message"]["to"][0], data[0]["message"]["from"]) else: print(data) return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_inbound_sms_call_me(to, from_):\n handle_call_me(to, from_)", "def handle_inbound_media_mms(to, from_, media):\n downloaded_media_files = download_media_from_bandwidth(media)\n upload_media_to_bandwidth(downloaded_media_files)\n remove_files(downloaded_media_files)\n body = MessageRequest()\n body.application_id = MESSAGING_APPLICATION_ID\n body.to = [from_]\n body.mfrom = to\n body.text = \"Rebound!\"\n #Build the media URL by taking the media ids (that doubled as the file names) and appending them to\n #the bandwidth media base url\n body.media = [BANDWIDTH_MEDIA_BASE_ENDPOINT + media_file for media_file in downloaded_media_files]\n try:\n messaging_client.create_message(MESSAGING_ACCOUNT_ID, body)\n except Exception as e:\n print(e)\n return None", "def incoming_sms():\n txt = request.form['Body']\n\n # remove leading and trailing white space and make lowercase\n txt = txt.strip()\n txt = txt.lower()\n\n # handle random searches differently than breed searches\n if txt == 'random' or txt == 'dog':\n url = get_dogs.get_random_dog()\n else:\n url = get_dogs.request_breed(txt)\n \n resp = MessagingResponse()\n if url:\n resp.message(url)\n else:\n resp.message(\"Sorry! We couldn't find a dog matching that query. Please try \\\n a more general search term.\")\n return str(resp)", "def handle_inbound_sms(to, from_):\n body = MessageRequest()\n body.application_id = MESSAGING_APPLICATION_ID\n body.to = [from_]\n body.mfrom = to\n body.text = \"The current date-time is: \" + str(time.time() * 1000) + \" milliseconds since the epoch\"\n try:\n messaging_client.create_message(MESSAGING_ACCOUNT_ID, body)\n except Exception as e:\n print(e)\n return None", "def incoming_sms():\n number = request.values.get('From', None)\n body = request.values.get('Body', None)\n print(body)\n # Start our TwiML response\n resp = MessagingResponse()\n\n body = body.lower()\n body = body.strip()\n body_arr = body.split()\n class_name = \"\"\n name = \"\"\n if len(body_arr) == 4:\n first_name = body_arr[0]\n last_name = body_arr[1]\n name = first_name + \" \" + last_name\n class_name = body_arr[2] + body_arr[3]\n elif len(body_arr) == 6:\n first_name = body_arr[0]\n last_name = body_arr[1]\n name = first_name + \" \" + last_name\n class_name = body_arr[2] + body_arr[3] + body_arr[4] + body_arr[5]\n else:\n resp.message(\"Invalid: Enter your name, class, and session# separated by spaces as shown\\n(eg: Avi Patel grade1 session1, Ravi Rao PreK session1, Mira Singh kg session2, etc.):\")\n return str(resp)\n\n if classes.find_one({'class':class_name}):\n forward_message(class_name, number, name)\n resp.message(\"Your teachers have been notified\")\n\n else:\n resp.message(\"Invalid: Enter your name, class, and session# separated by spaces as shown\\n(eg: Avi Patel grade1 session1, Ravi Rao PreK session1, Mira Singh kg session2, etc.):\")\n\n return str(resp)", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def onMessage(self, payload, isBinary):", "def incoming_sms():\n # Get the message body\n body = request.values.get('Body', None)\n\n # Start our TwiML response\n resp = MessagingResponse()\n\n # Determine the appropriate response/action for incoming message\n replyText = getReply(body)\n\n resp.message(replyText)\n\n return str(resp)", "def sms_reply():\n # Start our TwiML response\n # if body.lower()==\"good\":\n message=\"Hi I'm IRIS, an Immediately Responsive Intelligent System\\nHow are you feeling today?\"\n user=request.form['Body']\n\n # message=\"Hi \"+ name+ \"\"\n # user=request.form['Body']\n\n if user==\"good\":\n message=\"Glad to hear it! I hope you continue to feel this way! Celebrate this feeling and hold onto what happened ot make you feel this way so that you can repeat it in the future!\"\n\n if user==\"sad\":\n message=\"I’m sorry to hear that. Here are some things I do to make me feel better: take a walk outside, listen to uplifting music, call or message a loved one, or watch or read something positive to take my mind off of what I’m feeling.\"\n\n if user==\"nervous\":\n message=\"It’s going to be ok! This feeling will not last forever.\"\n if user==\"lonely\":\n message=\"I’m here for you, and know that you are loved, supported, and important. The world would not be the same without you! For a loving quote respond\"\n\n if user==\"angry\":\n message=\"“Let me help you turn your anger into something positive. Here are some ways to burn off energy productively: take a long walk, remove yourself from the situation, paint of draw, listen to loud music, or take a break from what you are doing.\"\n\n if user==\"tired\":\n message=\"I understand what you are feeling well. I recommend taking a break to do an activity you enjoy, taking a nap, getting a coffee, doing 20 jumping jacks, listening to a pump-up playlist, or standing up to stretch for a bit.\"\n\n if user==\"average\":\n message=\"There are many things to look forward to!\"\n resp = MessagingResponse()\n\t # Add a message\n \n resp.message(message)\n\t # Add a picture message\n\t #msg.media(\"https://farm8.staticflickr.com/7090/6941316406_80b4d6d50e_z_d.jpg\")\n\n return str(resp)", "def message_callback(self, message):\n pass", "def bitfinex2_on_message(caller, msg):\n msg = json.loads(msg)\n if caller.subbed_count == 7:\n if msg[1] == \"te\":\n chnl = msg[0]\n body = msg[2]\n df = pd.DataFrame.from_records(\n data=[{\n \"tid\": int(body[0]),\n \"price\": float(body[3]),\n \"volume\": float(body[2]),\n \"datetime\": pd.to_datetime(body[1], unit='ms')\n }],\n index=\"datetime\"\n )\n # print (df)\n df.index = df.index.tz_localize(\"GMT0\")\n caller.write(chnl, df)\n\n return chnl, df\n\n if type(msg) is dict and \"event\" in msg and msg[\"event\"] == \"subscribed\":\n caller.config[\"channel_symbol\"][msg[\"chanId\"]] = \"bitfinex2\" + \":\" + bdic[msg[\"symbol\"]]\n caller.subbed_count += 1\n return\n\n\n chnl = msg[0]\n body = msg[2]\n df = pd.DataFrame.from_records(\n data=[{\n \"tid\": int(body[0]),\n \"price\": float(body[3]),\n \"volume\": float(body[2]),\n \"datetime\": pd.to_datetime(body[1], unit='ms')\n }],\n index=\"datetime\"\n )\n df.index = df.index.tz_convert(\"GMT0\")\n caller.write(chnl, df)\n\n return chnl, df", "async def on_call(message, client):\n pass", "def incoming_sms():\n # Get the message the user sent our Twilio number\n body = request.values.get('Body', None)\n\n # Start our TwiML response\n resp = MessagingResponse()\n\n # Determine the right reply for this message\n if body[0:7] == 'Newhigh':\n num = body[7:]\n result = ''\n with open('currentData.txt') as f:\n for i in range(int(num)):\n result += str(i + 1)\n result += '. '\n result += f.readline()\n # resp.message(result.stdout.decode('utf-8'))\n resp.message(result)\n return str(resp)\n \n if body[0:6] == 'Newlow':\n num = body[6:]\n result = subprocess.run(['tail','-n',num,'currentData.txt'], stdout=subprocess.PIPE)\n resp.message(result.stdout.decode('utf-8'))\n return str(resp)\n if body[0:4] == 'High':\n num = body[4:]\n num = '-' + num\n result = subprocess.run(['head',str(num),'AllBuildings.txt'], stdout=subprocess.PIPE)\n resp.message(result.stdout.decode('utf-8'))\n return str(resp)\n if body[0:3] == 'Low':\n num = body[3:]\n result = subprocess.run(['tail','-n',num,'AllBuildings.txt'], stdout=subprocess.PIPE)\n resp.message(result.stdout.decode('utf-8'))\n return str(resp)\n if body == 'Hourly':\n result = subprocess.run(['cat','ByHour.txt'], stdout=subprocess.PIPE)\n elif body == 'Weekday':\n result = subprocess.run(['cat','DaysOfWeek.txt'], stdout=subprocess.PIPE)\n # resp.message(fortune)\n elif body == '10minute':\n result = subprocess.run(['cat','data/PerTenMinutes.txt'], stdout=subprocess.PIPE)\n else:\n resp.message(\"June 2018 - Feb 2019 Totals\\n\\nCommands:\\n(# is any number between 1 and 50)\\nNewhigh# - Highest Past 10 Minutes\\nNewlow# - Lowest Past 10 Minutes\\n\\nBelow are cumulative annual figs:\\nHigh# - Highest of Year\\nLow# - Lowest of Year\\n\\nCampuswide Figures:\\n10minute - Ten Minute Intervals\\nHourly - 1 Hour Intervals\\nWeekday - By Day of the Week\\n\")\n return str(resp)\n resp.message(result.stdout.decode('utf-8'))\n return str(resp)", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])", "def processMessage(self, *args, **kwargs):\r\n pass", "def on_message(data):\n pass", "def callback_message(self, message):\n return \"hi bro\"", "def on_bus_message(self, bus, message):\n pass", "def process(self, message: Message, **kwargs: Any) -> None:", "def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n if \"in_reply_to_status_id\" in data:\n status = Status.parse(None, data)\n return self.on_status(status)\n if \"delete\" in data:\n delete = data[\"delete\"][\"status\"]\n return self.on_delete(delete[\"id\"], delete[\"user_id\"])\n if \"disconnect\" in data:\n return self.on_disconnect_message(data[\"disconnect\"])\n if \"limit\" in data:\n return self.on_limit(data[\"limit\"][\"track\"])\n if \"scrub_geo\" in data:\n return self.on_scrub_geo(data[\"scrub_geo\"])\n if \"status_withheld\" in data:\n return self.on_status_withheld(data[\"status_withheld\"])\n if \"user_withheld\" in data:\n return self.on_user_withheld(data[\"user_withheld\"])\n if \"warning\" in data:\n return self.on_warning(data[\"warning\"])\n\n log.error(\"Received unknown message type: %s\", raw_data)", "def _handle_custom_msg(self, content, buffers):\n self._msg_callbacks(self, content, buffers)", "def sms_reply():\n # Fetch the message\n media_msg = request.form.get('NumMedia')\n msg = request.form.get('Body').lower()\n resp = MessagingResponse()\n responded = False\n if '1' in media_msg:\n pic_url = request.form.get('MediaUrl0') # URL of the person's media\n # pprint(pic_url) # so you can see the URL that the picture generated \n resp.message(\"We have recieved your request for image analysis! Please wait for our response\")\n resp.message(pic_url)\n url = \"https://techclan-twitter.herokuapp.com/reverse_image?URL=\"\n url=url+pic_url\n resp.message('The image has been succesfully uploaded to our server!The Url of the image is :')\n response=requests.get(url)\n parsed=json.loads(response.text)\n s1=\"\"\n count=0\n for each in parsed:\n s1=s1+each+\"\\n ................\\n\"\n if count>5:\n break\n count=count+1\n resp.message('The reverse image analysis of image reports are:')\n resp.message(s1)\n time.sleep(1)\n u='http://18.205.87.224/api/text?id='\n u=u+pic_url\n response=requests.get(u)\n parsed=json.loads(response.text)\n resp.message(parsed)\n responded==True\n elif '5' in msg:\n r = requests.get('https://coronavirus-19-api.herokuapp.com/countries/india')\n if r.status_code == 200:\n data = r.json()\n text = f'_Covid-19 Cases in India_ \\n..........................\\nConfirmed Cases : *{data[\"cases\"]}* \\n................\\nToday Cases : *{data[\"todayCases\"]}* \\n..............\\nDeaths : *{data[\"deaths\"]}* \\n..................................\\nRecovered : *{data[\"recovered\"]}* \\n\\n..................\\nTotal Tested : *{data[\"totalTests\"]}* \\n\\n Type 0 to return to main menu'\n else:\n text = 'I could not retrieve the results at this time, sorry.'\n resp.message(text)\n responded = True \n \n elif '1' in msg:\n \n resp.message(\"wait we will fetch your results soon!!\")\n url = \"http://18.234.107.157:5000/api/text?id=\"\n ms=str(msg)\n #a,b=ms.split(' ',1)\n url=url+ms\n response=requests.get(url)\n parsed=json.loads(response.text)\n agree=0\n disagree=0\n discuss=0\n ctr=0\n for each in parsed:\n if ctr>100:\n break\n ctr=ctr+1\n answ=each.get('Score',\"error\")\n if answ == \"agree\":\n agree=agree+1\n elif answ == \"disagree\":\n disagree=disagree+1\n if(agree>disagree):\n resp.message(\"This is *REAL* according to our sources !! Our results are based on following sources..we cannot be 100% Sure.\")\n else:\n resp.message(\"This is *FAKE* according to our sources !! Our results are based on following sources..we cannot be 100% Sure.\")\n count=0\n s1=\"\"\n for each in parsed:\n s1=s1+each['link']+\"*Title :*\" +each['title']+\"\\n ................\\n\"\n if count>5:\n break\n count=count+1\n resp.message(s1)\n responded==True\n #reporting\n elif '3' in msg:\n # resp.message(\"We have reported your content to our police database!!\")\n ms=str(msg)\n a,b=ms.split(' ',1)\n url='https://spreadsheetupdate1.herokuapp.com/spreed?id='\n url=url+ms\n r=requests.get(url)\n resp.message(\"We have reported your content to our police database!!\")\n responded==True\n\n\n\n \n #for news\n\n elif msg=='news' or msg=='4':\n \n url=\"\"\"https://newsapi.org/v2/top-headlines?sources=bbc-news,cnn,cnbc,abc-news,google-news-uk,independent&apiKey=3ff5909978da49b68997fd2a1e21fae8\"\"\"\n r = requests.get(url)\n #resp.message(\"stay\") \n if r.status_code == 200:\n resp.message(\"stay here with us! We are fetching news for you \")\n data = r.json()\n articles = data['articles'][:5]\n result = \"\"\n ctr=0 \n for article in articles:\n # if ctr>10:\n # break\n # ctr=ctr+1\n title = article['title']\n url = article['url']\n if 'Z' in article['publishedAt']:\n published_at = datetime.datetime.strptime(article['publishedAt'][:19], \"%Y-%m-%dT%H:%M:%S\")\n else:\n published_at = datetime.datetime.strptime(article['publishedAt'], \"%Y-%m-%dT%H:%M:%S%z\")\n \n result += \"\"\"*{}*\nRead more: {}\n_Published at {:02}/{:02}/{:02} {:02}:{:02}:{:02} UTC_\n\"\"\".format(\n title,\n url, \n published_at.day, \n published_at.month, \n published_at.year, \n published_at.hour, \n published_at.minute, \n published_at.second\n )+\"\\n ..................\\n\"\n\n else:\n result = 'I cannot fetch news at this time. Sorry!'\n\n resp.message(result)\n responded = True\t\n else:\n phone_no = request.form.get('From')\n reply = fetch_reply(msg, phone_no)\n\n resp = MessagingResponse()\n resp.message(reply)\n responded = True\n \n\n \t\n\n return str(resp)", "def onMessageBegin(self, isBinary):", "def listen_for_any_message(self, msg, match):\n question=\"{}\".format(msg)\n return self.cbmodel.get_response(question)", "def sms_notification(request, source):\n\n # call provider factory based on slug\n source = source.lower()\n try:\n provider = get_service_provider(slug=source)\n except Exception, e:\n log.critical(e)\n raise Http404()\n\n # do a simple IP check\n ip = request.META['REMOTE_ADDR']\n\n if not provider.is_ip_allowed(ip):\n log.warn(\"Illegal call from %s\" % ip)\n raise Http404()\n\n log.info(\"Got request notification from %s\" % source)\n\n # extract message data\n try:\n msisdn, text, number = provider.get_primal_data(request.GET)\n log.debug(\"%s %s %s\" % (msisdn, text, number))\n except Exception, e:\n return HttpResponse(provider.handle_notification_error(e, request))\n\n log.debug(\"%s Request input: msisdn:%s, text:%s, number:%s\" % \\\n (source, msisdn, text, number))\n\n # collect purchase data, send success signal and say thanks to your\n # notification service\n la = provider.get_large_account(la_number = number, text = text)\n provider.dispatch_purchase(la = la, msisdn = msisdn, text = text)\n return HttpResponse(provider.NOTIFICATION_REPLY)", "def handle_message(self, message):", "def incoming(self, msg):\n hdr = msg.header\n\n # Signals:\n if hdr.message_type is MessageType.signal:\n key = (hdr.fields.get(HeaderFields.path, None),\n hdr.fields.get(HeaderFields.interface, None),\n hdr.fields.get(HeaderFields.member, None)\n )\n cb = self.signal_callbacks.get(key, None)\n if cb is not None:\n cb(msg.body)\n return\n\n # Method returns & errors\n reply_serial = hdr.fields.get(HeaderFields.reply_serial, -1)\n reply_handle = self.awaiting_reply.pop(reply_serial, None)\n if reply_handle is not None:\n if hdr.message_type is MessageType.method_return:\n reply_handle.set_result(msg.body)\n return\n elif hdr.message_type is MessageType.error:\n reply_handle.set_exception(DBusErrorResponse(msg))\n return\n\n if self.on_unhandled:\n self.on_unhandled(msg)", "def handle(self, message):", "def handleMessage(msg):" ]
[ "0.6835022", "0.60718656", "0.59854895", "0.59607", "0.5945775", "0.5932107", "0.5932107", "0.5917332", "0.5839464", "0.5817131", "0.5794533", "0.574719", "0.5722084", "0.5713493", "0.57134485", "0.5699287", "0.5698061", "0.56944233", "0.5692819", "0.5692426", "0.56885827", "0.5686601", "0.5684375", "0.56739146", "0.56652766", "0.56640935", "0.5655968", "0.5651607", "0.56513226", "0.5589333" ]
0.6912686
0
Formats |record| with color.
def format(self, record): msg = super(ColoredFormatter, self).format(record) color = self._COLOR_MAPPING.get(record.levelname) if self._use_colors and color: msg = '%s%s%s' % (color, msg, self._RESET) return msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format(self, record):\n\n\t\t# Use copy.copy - c.f. https://stackoverflow.com/a/7961390\n\t\tcolored_record = copy.copy(record)\n\n\t\tcolor = None\n\t\ttry:\n\t\t\tcolor = record.color\n\t\texcept AttributeError as e:\n\t\t\tpass\n\n\t\tif color is not None:\n\t\t\tif color is None or not color or color == \"none\":\n\t\t\t\tpass\n\t\t\telif color == \"white\":\n\t\t\t\twhite = \"\\033[37m\"\n\t\t\t\tclear = \"\\033[0;0m\"\n\t\t\t\tcolored_record.msg = \"{0:s}{1:s}{2:s}\".format(\n\t\t\t\t\twhite,\n\t\t\t\t\tcolored_record.msg,\n\t\t\t\t\tclear,\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\traise WCMIError(\"error: ConsoleFilter: unrecognized color `{0:s}'.\".format(str(color)))\n\n\t\treturn super().format(colored_record)", "def colored_formatter(record):\n\n colours = {\n \"info\": (\"blue\", \"normal\"),\n \"debug\": (\"magenta\", \"normal\"),\n \"warning\": (\"yellow\", \"normal\"),\n \"print\": (\"green\", \"normal\"),\n \"error\": (\"red\", \"bold\"),\n }\n\n levelname = record.levelname.lower()\n\n if levelname == \"error\":\n return\n\n if levelname.lower() in colours:\n levelname_color = colours[levelname][0]\n header = color_text(\"[{}]: \".format(levelname.upper()), levelname_color)\n\n message = record.getMessage()\n\n if levelname == \"warning\":\n warning_category_groups = re.match(r\"^\\w*?(.+?Warning) (.*)\", message)\n if warning_category_groups is not None:\n warning_category, warning_text = warning_category_groups.groups()\n\n warning_category_colour = color_text(\n \"({})\".format(warning_category), \"cyan\"\n )\n message = \"{} {}\".format(\n color_text(warning_text, \"\"), warning_category_colour\n )\n\n sys.__stdout__.write(\"{}{}\\n\".format(header, message))\n sys.__stdout__.flush()\n\n return", "def format(self, record):\n\n level_colors = {\n 'DEBUG': strc('DEBUG', 'yellow', 'bold'),\n 'INFO': strc('INFO', 'blue', 'bold'),\n 'WARNING': strc('WARNING', 'yellow', 'bold'),\n 'ERROR': strc('ERROR', 'red', 'bold'),\n 'CRITICAL': strc('CRITICAL', 'red', 'bold')}\n\n if record.levelname in level_colors.keys():\n record.levelname = level_colors[record.levelname]\n record.name = strc(record.name, 'black', 'bold')\n\n return logging.Formatter.format(self, record)", "def update_format(self, record):\n prefix = \"\\u001b[\"\n color = f\"{prefix}{self.color_map[record.levelno]}m\"\n bold = f\"{prefix}1m\"\n gray = f\"{prefix}1m{prefix}30m\"\n reset = f\"{prefix}0m\"\n self._style._fmt = (\n f\"%(asctime)s\"\n f\" {gray}│{reset} {color}%(levelname)-8s{reset} {gray}│{reset} \"\n )\n if hasattr(record, \"function\"):\n self._style._fmt += (\n f\"{gray}%(indent)s{reset}\"\n f\"{bold}%(function)s{reset}{gray}:{reset}\"\n \" %(message)s\"\n )\n else:\n self._style._fmt += \"%(indent)s%(message)s\"", "def format(self, record):\n log_fmt = self.FORMATS.get(record.levelno)\n return BaseFormatter(log_fmt).format(record)", "def format(self, record):\n message = record.getMessage()\n asctime = self.formatTime(record, self.datefmt)\n name = yellow(record.name)\n\n s = \"%(timestamp)s %(levelname)s %(name)s \" % {\n \"timestamp\": green(\"%s,%03d\" % (asctime, record.msecs), bold=True),\n \"levelname\": self.LEVELS[record.levelname],\n \"name\": name,\n }\n\n if \"\\n\" in message:\n indent_length = len(re_color_codes.sub(\"\", s))\n message = message.replace(\"\\n\", \"\\n\" + \" \" * indent_length)\n\n s += message\n return s", "def format(self, record):\n msg = logging.Formatter.format(self, record)\n label, color = self.label(record)\n if self.strip:\n return \"{:10s}{}\".format(label, sub(\"\\033\\\\[[0-9]+m\", \"\", msg, 0))\n else:\n return \"\\033[1;{}m{:10s}\\033[0m{}\".format(color, label, msg)", "def _log_format_onecolor(record):\n\n return LEVEL_COLORS.get(record.levelname)", "def format(self, record):\n message = super(ConsoleFormatter, self).format(record)\n color_code = self.color(self.log_colors, record.levelname)\n if hasattr(record, 'ctx'):\n metadata = record.ctx.invocation_metadata()\n for item in metadata:\n if item.key == 'author_name':\n setattr(record, 'user', item.value)\n elif item.key == 'correlation_id':\n setattr(record, 'correlationId', item.value)\n\n for key, value in record.__dict__.items():\n #this allows to have numeric keys\n if (key not in RESERVED_ATTR_HASH\n and not (hasattr(key, \"startswith\")\n and key.startswith('_'))):\n message = append(color_code=color_code, message=message, key=key, value=value)\n return message", "def format(self, record):\n\n\n if not hasattr(record, 'filename_'):\n record.file_indicator = '-'\n else:\n record.file_indicator = os.path.relpath(record.filename_.strip(),\n self.study_dir)\n record.line_indicator = self.format_aggregated(\n record,\n 'line_number',\n ' line %d:',\n ' lines [%s]:',\n optional=True)\n record.column_indicator = self.format_aggregated(\n record,\n 'column_number',\n ' column %d:',\n ' columns [%s]:',\n optional=True)\n record.cause_indicator = self.format_aggregated(\n record,\n 'cause',\n \"; value encountered: '%s'\",\n \"; values encountered: ['%s']\",\n join_string=\"', '\",\n optional=True)\n\n # format the string based on these fields\n formatted_result = super(LogfileStyleFormatter, self).format(record)\n\n # prepend an empty line if the filename is different than before\n current_filename = getattr(record, 'filename_', '')\n if (self.previous_filename is not None and\n current_filename != self.previous_filename):\n formatted_result = '\\n' + formatted_result\n self.previous_filename = current_filename\n\n return formatted_result", "def get_color(self, record):\n if record.level >= logbook.ERROR:\n return 'red'\n elif record.level >= logbook.NOTICE:\n return 'yellow'\n elif record.level >= logbook.INFO:\n return 'green'\n elif record.level >= logbook.DEBUG:\n return 'darkblue'\n return 'lightgray'", "def format(self, record):\n\n scrubbed = record[\"message\"]\n # scrubs any messages that match the message pattern\n if isinstance(scrubbed, dict):\n scrubbed = json.dumps(scrubbed)\n for search, replace in self.scrub_patterns.items():\n scrubbed = re.sub(search, replace, scrubbed)\n record[\"extra\"][\"scrubbed\"] = scrubbed\n\n if not record[\"extra\"].get(\"device\") or record[\"extra\"].get(\"device\") is None:\n record[\"extra\"][\"device\"] = \"\"\n else:\n record[\"extra\"][\"device\"] = f\"{record['extra']['device']} - \"\n return self.fmt", "def format(self, record):\n mappings = {\n 'asctime': create_timestamp,\n 'message': lambda r: r.msg,\n }\n\n formatters = self.parse()\n\n log_record = {}\n for formatter in formatters:\n try:\n log_record[formatter] = mappings[formatter](record)\n except KeyError:\n log_record[formatter] = record.__dict__[formatter]\n\n return json.dumps(log_record)", "def get_color(self, record):\n if record.level == CRITICAL:\n return Fore.RED + Style.DIM\n elif record.level == ERROR:\n return Fore.RED + Style.BRIGHT\n elif record.level == WARNING:\n return Fore.YELLOW + Style.DIM\n elif record.level == NOTICE:\n return Fore.CYAN + Style.BRIGHT\n elif record.level == DEBUG:\n return Fore.GREEN + Style.BRIGHT\n return Fore.WHITE", "def format(self, record):\n extra = {\n \"message\": record.getMessage(),\n \"time\": self.formatTime(record, self.datefmt),\n \"msecs\": record.msecs,\n \"name\": record.name,\n \"level\": record.levelname,\n }\n\n keys = filter(self.filterer, record.__dict__)\n extra.update({k: record.__dict__[k] for k in keys})\n return str(CustomEncoder().encode(extra))", "def format(self, record):\n row = [self.formatTime(record, self.datefmt), record.name, record.levelname]\n keys = filter(self.filterer, record.__dict__)\n extra = [record.__dict__[k] for k in keys]\n\n self.writer.writerow(row + extra + [record.getMessage()])\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()", "def color_domain_record_cells(val):\n if isinstance(val, int):\n color = \"yellow\" if val < 3 else None\n elif isinstance(val, float):\n color = \"yellow\" if val > 4.30891 or val < 2.72120 else None\n else:\n color = None\n return f\"background-color: {color}\"", "def format(self, record: logging.LogRecord) -> str:\n return filter_datum(self.fields, self.REDACTION,\n super().format(record), self.SEPARATOR)", "def format(self, record: LogRecord) -> str:\n json_record: Dict = self.json_record(record.getMessage(), record)\n mutated_record: Dict = self.mutate_json_record(json_record)\n mutated_record = mutated_record if mutated_record is not None else json_record\n\n return self.to_json(mutated_record)", "def format(self, record):\n # Standard document\n document = {\n 'created_at': datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),\n 'level': record.levelname,\n 'thread': record.thread,\n 'threadName': record.threadName,\n 'message': record.getMessage(),\n 'loggerName': record.name,\n 'fileName': record.pathname,\n 'module': record.module,\n 'method': record.funcName,\n 'lineNumber': record.lineno,\n 'hostname': socket.getfqdn(socket.gethostname()),\n 'ip': socket.gethostbyname(socket.gethostname())\n }\n # Standard document decorated with exception info\n if record.exc_info is not None:\n document.update({\n 'exception': {\n 'message': str(record.exc_info[1]),\n 'code': 0,\n 'stackTrace': self.formatException(record.exc_info)\n }\n })\n # Standard document decorated with extra contextual information\n if len(self.DEFAULT_PROPERTIES) != len(record.__dict__):\n contextual_extra = set(record.__dict__).difference(set(self.DEFAULT_PROPERTIES))\n if contextual_extra:\n for key in contextual_extra:\n document[key] = record.__dict__[key]\n return document", "def format(self, record):\n record.message = indent_string(record.getMessage())\n if \"%(asctime)\" in self._fmt:\n record.asctime = self.formatTime(record, self.datefmt)\n s = self._fmt % record.__dict__\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if s[-1:] != \"\\n\":\n s = s + \"\\n\"\n s = \"{0} Exception:\\n {1}\".format(s, indent_string(record.exc_text))\n return s", "def formatter(record):\n\n lines = record[\"message\"].splitlines()\n prefix = (\n \"{time:YY-MM-DD HH:mm:ss.S} | {level.name:<8} | \"\n + \"{file}.{function}:{line} - \".format(**record)\n )\n indented = (\n lines[0] + \"\\n\" + \"\\n\".join(\" \" * len(prefix) + line for line in lines[1:])\n )\n record[\"message\"] = indented.strip()\n return (\n \"<g>{time:YY-MM-DD HH:mm:ss.S}</> | <lvl>{level.name:<8}</> | \"\n + \"<e>{file}.{function}:{line}</> - <lvl>{message}\\n</>{exception}\"\n )", "def format(self, record):\n return '[{}] {}'.format(QBShFormatter.LEVEL_DICT[record.levelname], record.getMessage())", "def emit(self, record):\n # Need to make a actual copy of the record\n # to prevent altering the message for other loggers\n myrecord = copy.copy(record)\n levelno = myrecord.levelno\n if levelno >= 50: # CRITICAL / FATAL\n front = '\\033[30;41m' # black/red\n elif levelno >= 40: # ERROR\n front = '\\033[30;41m' # black/red\n elif levelno >= 30: # WARNING\n front = '\\033[30;43m' # black/yellow\n elif levelno >= 20: # INFO\n front = '\\033[30;42m' # black/green\n elif levelno >= 10: # DEBUG\n front = '\\033[30;46m' # black/cyan\n else: # NOTSET and anything else\n front = '\\033[0m' # normal\n\n myrecord.levelname = '%s%s\\033[0m' % (front, myrecord.levelname)\n logging.StreamHandler.emit(self, myrecord)", "def format(self, record):\n data = record.__dict__.copy()\n\n # if record.args:\n # msg = record.msg % record.args\n # else:\n # msg = record.msg\n\n data.update(\n username=getpass.getuser(),\n time=datetime.now(),\n host=gethostname(),\n #args=tuple(unicode(arg) for arg in record.args)\n args=record.args\n )\n if 'exc_info' in data and data['exc_info']:\n data['exc_info'] = self.formatException(data['exc_info'])\n return data", "def get_formatted_record(self, record_format: str = None) -> str:\n if record_format:\n return record_format.format_map(defaultdict(str, **self.dict_values))\n raise RecordFormatError(\"Format string must be set\")", "def default_format(data, color):\n if color:\n out = '\\n\\n\\nTitle: {0}\\nDate: {1}\\nLink: {2}\\n\\nImages links: {3}'.format(\n colored(data['title'], 'green'),\n data['pubDate'],\n colored(data['link'], 'blue'),\n colored(data['media'], 'blue'),)\n else:\n out = '\\n\\n\\nTitle: {0}\\nDate: {1}\\nLink: {2}\\n\\nImages links: {3}'.format(\n data['title'],\n data['pubDate'],\n data['link'],\n data['media'],)\n return out", "def format(self, record):\n data = dict()\n\n data[\"category\"] = record.name\n data[\"timestamp\"] = datetime.datetime.utcnow()\\\n .replace(tzinfo=utc)\\\n .strftime('%Y-%m-%dT%H:%M:%SZ')\n data[\"level\"] = record.levelname\n data[\"message\"] = record.msg\n data[\"threadName\"] = record.threadName\n data[\"hostname\"] = self.hostname\n \n return data", "def format(self, record):\n message = {\n \"time\": datetime.utcfromtimestamp(record.created).isoformat(),\n \"level\": record.levelname,\n \"name\": record.name,\n \"message\": record.getMessage(),\n \"process\": record.process,\n \"thread\": record.threadName,\n \"hostname\": self.hostname,\n \"filename\": record.filename,\n \"function\": record.funcName,\n \"lineNo\": record.lineno,\n }\n\n if record.exc_info:\n message[\n \"exception\"\n ] = f\"{record.exc_info[0].__name__}: {record.exc_info[1]}\"\n message[\"traceback\"] = traceback.format_exc()\n\n return json.dumps(message, ensure_ascii=False)", "def transform(self, src_record):\n src_record.colon = ':'\n src_record.space = ' '\n src_record.sep = ' - '\n src_record.prefix = ''\n return \\\n (self.color_levelname \\\n (skip_repeat_line1 \\\n (src_record)))" ]
[ "0.8311548", "0.7502058", "0.74881405", "0.7452882", "0.7222441", "0.7179237", "0.7129312", "0.70246935", "0.6647282", "0.6643303", "0.6622566", "0.6554629", "0.6488578", "0.64701414", "0.64188516", "0.6313016", "0.6285745", "0.61440796", "0.59593135", "0.59322673", "0.5924928", "0.5848499", "0.5826485", "0.5812966", "0.58063215", "0.5797361", "0.57933277", "0.57707405", "0.57416284", "0.5711762" ]
0.76215637
1
Always symlink |path| to a relativized |target|.
def symlink(target, path): unlink(path) path = os.path.realpath(path) target = os.path.relpath(os.path.realpath(target), os.path.dirname(path)) logging.info('Symlinking %s -> %s', path, target) os.symlink(target, path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def symlink(path, v=False):\r\n if not os.path.exists(path):\r\n err(path + ' : no such file or directory')\r\n elif not os.path.isdir(path):\r\n err(path + ' : not a directory')\r\n else:\r\n theme_name = os.path.basename(os.path.normpath(path))\r\n theme_path = os.path.join(_THEMES_PATH, theme_name)\r\n if os.path.exists(theme_path):\r\n err(path + ' : already exists')\r\n else:\r\n if v:\r\n print(\"Linking `{p}' to `{t}' ...\".format(p=path, t=theme_path))\r\n try:\r\n os.symlink(path, theme_path)\r\n except Exception as e:\r\n err(\"Cannot link `{p}' to `{t}':\\n{e}\".format(p=path, t=theme_path, e=str(e)))", "def force_symlink(target_path, link_location):\n\n pardir = os.path.dirname(link_location)\n if not os.path.exists(pardir):\n os.makedirs(pardir)\n\n if os.path.lexists(link_location):\n assert os.path.islink(link_location), \\\n \"The path {} exists but is not a symlink\".format(link_location)\n if os.readlink(link_location) != target_path:\n os.remove(link_location)\n os.symlink(target_path, link_location)\n else:\n os.symlink(target_path, link_location)", "def symlink(source, target):\n source, target = map(os.path.expanduser, (source, target))\n print(\"Will symlink %s to %s\" % (source, target))\n\n if os.path.exists(target):\n if os.path.islink(target) and os.path.realpath(target) == source:\n logging.info(\"%s exists\" % target)\n return\n\n backup = target + \".old\"\n\n if os.path.exists(backup):\n raise Exception(\"Can't backup to %s: file already exists.\" % backup)\n\n shutil.move(target, backup)\n\n else:\n os.symlink(source, target)\n logging.info(\"%s symlinked to %s\" % (source, target))", "def symlink(source, target, use_sudo=True):\n\n # Some older versions of Fabric do not have the is_link method \n try:\n from fabric.contrib.files import is_link\n is_a_link = is_link(target)\n except ImportError:\n with settings(hide(\"everything\"), warn_only=True):\n if run(\"test -L \"+target).failed:\n\t is_a_link = False\n else:\n is_a_link = True\n\n if not is_a_link:\n cmd = \"ln -s \"+source+\" \"+target\n if use_sudo:\n sudo(cmd)\n else:\n run(cmd)", "def force_symlink(target, name):\n makedirs(os.path.dirname(name))\n try:\n os.symlink(target, name)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.remove(name)\n os.symlink(target, name)", "def symlink(origin, target):\n # Skip anything in the home directory if the user is admin\n if user_is_admin() and not args.root and check_contain_home_dir(target):\n print(highlight_colour(\"'%s'\") % str(target) +\n warning_colour(\" is inside of home folder. Skipping...\"))\n raise StopTraversing(\"Skipping.\")\n\n # Check for a broken symlink, if true: prompt for replacement.\n # This is done to avoid having any broken symlinks lingering.\n if is_broken_symlink(target):\n if args.yes or prompt(origin, target, \"remove\"):\n target.unlink()\n else:\n return\n\n if args.replace:\n replace_symlink(origin, target)\n elif args.remove:\n remove_symlink(origin, target)\n else:\n create_symlink(origin, target)", "def ln(src, dst):\n os.symlink(src, dst)", "def attempt_symlink_to(path: str, to_path: str) -> None:\n try:\n Path(path).symlink_to(Path(to_path))\n except OSError:\n pytest.skip(\"could not create symbolic link\")", "def symlink_target(pth):\n\n if os.path.islink(pth):\n return os.readlink(pth)\n return pth", "def absolute_symlink(\n source_path: str,\n dest_path: str\n):\n os.symlink(os.path.abspath(source_path), dest_path)", "def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)", "def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)", "def link(target, link_name):\n src = os.path.abspath(target)\n dst = os.path.abspath(link_name)\n os.symlink(src, dst)", "def symlink(self, src, dst):\n return os.symlink(src, dst)", "def create_symbolic_link(file, target):\n try:\n os.symlink(file, target)\n except NotImplementedError:\n logger.critical(\"Symbolic links not supported on this platform\")\n raise\n except OSError:\n logger.critical(\"Not sufficient permissions\")\n raise", "def makeLinks(self, source, target):\n\n if os.path.exists(target): os.unlink(target)\n os.symlink(source, target)", "def _makeSymlink ( target, source, env ) :\n if len(target) != 1 :\n fail ( \"unexpected number of targets for symlink: \"+str(target) )\n if len(source) != 1 :\n fail ( \"unexpected number of sources for symlink: \"+str(source) )\n\n target = str(target[0])\n source = str(source[0].abspath)\n trace ( \"Executing symlink `%s' -> `%s'\" % ( target, source ), \"makeSymlink\", 3 )\n\n os.symlink ( source, target )", "def link(self):\n\n if self.path_source is not None:\n full_source_path = os.path.join(\n os.path.expandvars(self.path_source), self.name\n )\n full_destination_path = os.path.join(\n os.path.expandvars(self.path_destination), self.name\n )\n\n try:\n if self.sudo:\n spawn.process(\n f'ln -sfv \"{full_source_path}\" \"{full_destination_path}\"',\n sudo=True,\n )\n else:\n os.symlink(full_source_path, full_destination_path)\n except FileExistsError:\n message.error(\n \"Can't symlink, file already exists at destination. Attempting fix.\"\n )\n os.remove(full_destination_path)\n message.info(f\"Removed: '{full_destination_path}'\")\n os.symlink(full_source_path, full_destination_path)\n finally:\n message.info(\n f\"Symlink created: '{full_source_path}' <--> '{full_destination_path}'\"\n )\n else:\n message.error(\n f\"'{self.name}' has no source from which to create a link from.\"\n )", "def update_link(self):\n try:\n relpath = os.path.relpath(self.path, os.path.dirname(self.link_path))\n os.symlink(relpath, self.link_path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.unlink(self.link_path)\n os.symlink(self.path, self.link_path)", "def _symlink(conf, devname, label, remove=False):\n return\n\n linkpath = conf.get('symlink')\n if linkpath:\n linkpath = expanduser(linkpath)\n if lexists(linkpath):\n os.unlink(linkpath)\n if not remove:\n # TODO: handle path errors\n os.symlink(get_mount_target(devname, label), linkpath)", "def _symlink(source, link_name):\n flags = 0\n\n if source is not None and os.path.isdir(source):\n flags = 1\n\n CreateSymbolicLinkW(link_name, source, flags)", "def force_symlink(src, dst):\n try:\n os.unlink(dst)\n os.symlink(src, dst)\n except OSError:\n os.symlink(src, dst)", "def create_symlink(src, dest):\n sudo('ln -s {} {}'.format(src, dest))", "def fix_link(hook, target_link):\n if os.path.exists(hook):\n os.unlink(hook)\n os.symlink(target_link, hook)", "def relink(path, Arg = (None, True, False)):\n if not os.path.islink(path): return\n\n exps = Arg[0]\n debuginfo = Arg[1]\n v = Arg[2]\n\n path = os.path.normpath(path)\n s = os.readlink(path)\n snorm = os.path.normpath(s)\n p = os.path.join(PROJ_SRC, path)\n hatpath = os.path.join(PROJ_HAT, path)\n\n if snorm.startswith(PROJ_SRC + os.sep):\n srcpath = snorm[len(PROJ_SRC + os.sep):]\n\n pathl = path.split(os.sep)\n srcpathl = srcpath.split(os.sep)\n head = commonhead(pathl, srcpathl)\n\n if len(pathl) > len(head) + 1 or \\\n len(pathl) == len(head) + 1 and len(srcpathl) > len(head):\n # pathl: o o o a b # pathl: o o o a\n # srcpathl: o o o c d e # srcpathl: o o o c d e\n # head: o o o or # head: o o o\n # --------------------- # ---------------------\n # src: ../c/d/e # src: c/d/e\n srcl = [os.pardir for i in xrange(len(pathl) - 1 - len(head))] + srcpathl[len(head):]\n src = os.path.join(*srcl)\n elif len(pathl) == len(head) + 1 and len(srcpathl) == len(head):\n # pathl: o o o a\n # srcpathl: o o o\n # head: o o o\n # ---------------------\n # src: .\n src = os.curdir\n if v: print >> sys.stderr, 'detected symlink to current directory', `hatpath`, '->', `src`\n elif len(pathl) == len(head):\n src = os.path.join(*srcpathl[len(head) - 1:])\n if len(srcpathl) == len(head):\n # pathl: o o a\n # srcpathl: o o a\n # ---------------------\n # src: a\n if v: print >> sys.stderr, 'detected symlink to itself', `hatpath`, '->', `src`\n else:\n # pathl: o o a\n # srcpathl: o o a c\n # ---------------------\n # src: a/c\n if v: print >> sys.stderr, 'detected too many levels of symlinks', `hatpath`, '->', `src`\n else:\n print >> sys.stderr, 'detected UNFORESEEN', `path`, '->', `srcpath`\n return\n\n _srcpath = os.path.normpath(os.path.join(os.path.dirname(path), src))\n assert srcpath == _srcpath, '%s:\\n%s not equal to %s' % (path, `srcpath`, `_srcpath`)\n\n os.remove(path)\n if os.path.isfile(srcpath) or os.path.isdir(srcpath):\n try:\n os.symlink(src, path)\n except (IOError, os.error), why:\n print >> sys.stderr, 'Cannot symlink %s -> %s: %s' % (`hatpath`, `src`, str(why))\n else:\n if v: print 'symlinked', `hatpath`, '->', `src`\n else:\n if os.path.isfile(s):\n print >> sys.stderr, 'missing:', hatpath, '->', src\n try:\n shutil.copy2(s, path)\n except (IOError, os.error), why:\n print >> sys.stderr, 'Cannot copy %s -> %s: %s' % (`s`, `hatpath`, str(why))\n else:\n if v: print >> sys.stderr, 'copied', `s`, '->', `hatpath`\n elif os.path.isdir(s):\n print >> sys.stderr, 'missing:', hatpath, '->', src\n try:\n os.makedirs(srcpath)\n except (IOError, os.error), why:\n print >> sys.stderr, 'Cannot create directory %s: %s' % (`os.path.join(PROJ_HAT, srcpath)`, str(why))\n else:\n if v: print >> sys.stderr, 'created directory', `os.path.join(PROJ_HAT, srcpath)`\n try:\n os.symlink(src, path)\n except (IOError, os.error), why:\n print >> sys.stderr, 'Cannot symlink %s -> %s: %s' % (`hatpath`, `src`, str(why))\n else:\n if v: print 'symlinked', `hatpath`, '->', `src`\n else:\n print >> sys.stderr, 'dangling:', p, '->', s\n if v: print >> sys.stderr, 'removed', `hatpath`\n# elif os.path.normpath(os.path.join(os.path.dirname(p), s)).startswith(PROJ_SRC + os.sep):\n else:\n srcpath = os.path.normpath(os.path.join(os.path.dirname(p), s))\n# os.path.normpath(os.path.join(os.path.dirname(p), s)).startswith(PROJ_SRC + os.sep):\n if srcpath.startswith(PROJ_SRC + os.sep):\n if os.path.isfile(path) or os.path.isdir(path):\n if v: print 'relative:', hatpath, '->', s\n else:\n if os.path.isfile(p) or os.path.isdir(p):\n print >> sys.stderr, 'missing:', hatpath, '->', s\n else:\n print >> sys.stderr, 'dangling:', p, '->', s\n os.remove(path);\n if v: print >> sys.stderr, 'removed', `hatpath`\n else:\n if os.path.isfile(p) or os.path.isdir(p):\n if exps:\n dst = exps.destination(srcpath)\n if dst:\n os.remove(path)\n if not dst[1] or debuginfo:\n # if not dst[1] or DEBUGINFO == 'yes' or MODE == 'dbg':\n upl = [os.pardir for i in xrange(len(hatpath.split(os.sep)) - 1)]\n src = os.path.join(os.path.join(*upl), dst[0])\n try:\n os.symlink(src, path)\n except (IOError, os.error), why:\n print >> sys.stderr, 'Cannot symlink %s -> %s: %s' % (`hatpath`, `src`, str(why))\n else:\n if v: print 'symlinked', `hatpath`, '->', `src`\n else:\n print 'debuginfo:', hatpath, '->', s\n if v: print 'removed', `hatpath`\n else:\n print >> sys.stderr, 'not_exported:', srcpath\n os.remove(path);\n if v: print >> sys.stderr, 'removed', `hatpath`, '->', `s`\n else:\n print >> sys.stderr, 'external:', hatpath, '->', s\n os.remove(path);\n if v: print >> sys.stderr, 'removed', `hatpath`\n else:\n print >> sys.stderr, 'dangling:', p, '->', s\n os.remove(path);\n if v: print >> sys.stderr, 'removed', `hatpath`", "def symlink_p(src, dst):\n try:\n os.symlink(src, dst)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.islink(dst):\n if os.path.realpath(dst) == os.path.realpath(src):\n pass\n else:\n print('%s is a link already pointing to %s' % (dst, os.path.realpath(dst)), file=sys.stderr)\n else:\n raise", "def symlink(self, name, source, linkname):\n self._assert_absolute_path_or_placeholder(source)\n self._assert_absolute_path_or_placeholder(linkname)\n self._run(name, ['symlink', source, linkname])\n self.m.path.mock_copy_paths(source, linkname)", "def symlink_force(target: str, link_name: str):\n\n # os.replace() may fail if files are on different filesystems\n link_dir = os.path.dirname(link_name)\n\n while True:\n temp_link_name = tempfile.mktemp(dir=link_dir)\n try:\n os.symlink(target, temp_link_name)\n break\n except FileExistsError:\n pass\n try:\n os.replace(temp_link_name, link_name)\n except OSError: # e.g. permission denied\n os.remove(temp_link_name)\n raise", "def symlink_force(source, link_name):\n try:\n os.symlink(source, link_name)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.remove(link_name)\n os.symlink(source, link_name)", "def _follow_symlinks(filepath):\n filepath = os.path.abspath(filepath)\n while os.path.islink(filepath):\n filepath = os.path.normpath(\n os.path.join(os.path.dirname(filepath), os.readlink(filepath)))\n return filepath" ]
[ "0.7404254", "0.7402173", "0.7312787", "0.72121716", "0.7204278", "0.71373194", "0.71331364", "0.71290386", "0.71224445", "0.70757365", "0.69786334", "0.69786334", "0.69290304", "0.68298745", "0.6800475", "0.6755648", "0.67269856", "0.67166317", "0.6690445", "0.66393733", "0.65924454", "0.6582094", "0.65082896", "0.6503687", "0.6480789", "0.6456912", "0.6438272", "0.6430119", "0.64076376", "0.6402103" ]
0.8573706
0
Return sha256 hex digest of |path|.
def sha256(path: Union[Path, str]) -> str: # The file shouldn't be too big to load into memory, so be lazy. with open(path, 'rb') as fp: data = fp.read() m = hashlib.sha256() m.update(data) return m.hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_hash(path: Path) -> str:\n m = hashlib.sha256()\n m.update(path.read_bytes())\n return m.hexdigest()", "def _get_hash(self, path):\n with open(path, \"r\") as fp:\n content = fp.read()\n\n return sha256(content).hexdigest()", "def hash_of_file(path):\n with open(path, 'rb') as archive:\n sha = sha256()\n while True:\n data = archive.read(2 ** 20)\n if not data:\n break\n sha.update(data)\n return encoded_hash(sha)", "def hash(path):\n\n with open(path, 'r') as file:\n return hashlib.sha1(file.read()).hexdigest()", "def GetFileSha256(file_path):\n return base64.b64encode(GetFileHashes(file_path, do_sha256=True)['sha256'])", "def _get_file_sha256_hash(file_path):\n sha256hash = hashlib.sha256()\n chunk_size = 8192\n with open(file_path, \"rb\") as f:\n while True:\n buffer = f.read(chunk_size)\n if not buffer:\n break\n sha256hash.update(buffer)\n return sha256hash.hexdigest()", "def compute_digest(path):\n hash = hashlib.sha512()\n for part in DiskCrawler.partial_reader(path, 4 * 1024 * 1024):\n hash.update(part)\n return hash.digest()", "def sha256(self):\n return sha256file(self.abspath)", "def hash_file(path: str) -> str:\n return _hash_file(path, hashlib.md5()).hexdigest()", "def checksum(path):\n with open(path, 'r') as f:\n return md5(f.read()).digest()", "def sha256(value):\n return hashlib.sha256(value).hexdigest()", "def file_digest(path, algo=hashlib.md5):\n checksum = algo()\n with open(path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n checksum.update(chunk)\n return checksum.hexdigest()", "def hash_file_sha256(file_path, binary=False, buffer_size=65536):\n return hash_file(file_path, hash_type=hashlib.sha256, binary=binary, buffer_size=buffer_size)", "def sha256(content):\n content = content.encode('utf-8')\n return hashlib.sha256(content).hexdigest()", "def sha256(f: IO[str]) -> str:\n pos = f.tell()\n f.seek(0)\n digest = hashlib.sha256(f.read().encode()).hexdigest()\n f.seek(pos)\n\n return digest", "def hashFile(path: str) -> str:\n\tif not os.path.exists(path):\n\t\traise FileNotFoundError\n\n\thasher = hashlib.sha1()\n\tblock_sz = 8192\n\twith open(path, 'rb') as f:\n\t\tbuf = f.read(block_sz)\n\t\twhile len(buf) > 0:\n\t\t\thasher.update(buf)\n\t\t\tbuf = f.read(block_sz)\n\treturn str(hasher.hexdigest())", "def hash_file(path, digest=None):\r\n digest = digest or hashlib.sha1()\r\n with open(path, 'rb') as fd:\r\n s = fd.read(8192)\r\n while s:\r\n digest.update(s)\r\n s = fd.read(8192)\r\n return digest.hexdigest()", "def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()", "def sha256(cls, value):\n assert type(value) is str\n return int(sha256(value.encode()).hexdigest(), 16)", "def symlink_hash(path):\n hasher = sha1()\n data = path_to_bytes(os.readlink(path))\n hasher.update(('blob %u\\0' % len(data)).encode('ascii'))\n hasher.update(data)\n return hasher", "def file_hash(filepath: Path):\n hsh = hashlib.sha256()\n b = bytearray(128 * 1024)\n mv = memoryview(b)\n with Path(filepath).open(\"rb\", buffering=0) as f:\n for n in iter(lambda: f.readinto(mv), 0):\n hsh.update(mv[:n])\n return hsh.hexdigest()", "def hash_bytes_256(b: bytes) -> str:\n return hashlib.sha256(b).hexdigest()", "def get_checksum(file_path: str) -> str:\n\n # Open the file in binary mode\n with open(file_path, \"rb\") as file:\n # Create a SHA-256 hash object\n hash_object = hashlib.sha256()\n\n # Iterate over the file in chunks\n for chunk in iter(lambda: file.read(4096), b\"\"):\n # Feed the chunk to the hash object\n hash_object.update(chunk)\n\n # Obtain the checksum in hexadecimal format\n checksum = hash_object.hexdigest()\n\n return checksum", "def _Hash(content: bytes) -> str:\n return hashlib.sha256(content).hexdigest()", "def sha256_hexoutput(in_str):\r\n return sha256(in_str.encode('ascii')).hexdigest()", "def _asset_hash(path: str) -> str:\n full_path = THEME_PATH / \"static\" / path\n digest = hashlib.sha1(full_path.read_bytes()).hexdigest()\n\n return f\"_static/{path}?digest={digest}\"", "def hash_of(self, arcpath) -> str:\n return self._records[arcpath].hash", "def get_file_sha256(fname):\n with open(fname, 'rb') as afile:\n return base64.b64encode(get_file_hash(afile, hashlib.sha256()))", "def generate_sha256_hash(fpath, sig_key=None):\n return run(fpath, sig_key)", "def sha256(s: str) -> str:\n return hashlib.sha256(s.encode()).hexdigest()" ]
[ "0.84448147", "0.7994854", "0.7628011", "0.7436823", "0.73758745", "0.72930205", "0.72381604", "0.7233479", "0.72159195", "0.70954305", "0.70228964", "0.69657105", "0.695293", "0.6945403", "0.69420445", "0.6941568", "0.6924218", "0.68792206", "0.68724394", "0.68499297", "0.68373364", "0.6779685", "0.6776504", "0.6775331", "0.6764068", "0.6713469", "0.6618318", "0.66041803", "0.6601849", "0.6572953" ]
0.83290803
1
Unpack |archive| into |cwd|.
def unpack(archive: Union[Path, str], cwd: Optional[Path] = None, files: Optional[List[Union[Path, str]]] = ()): archive = Path(archive) if cwd is None: cwd = Path.cwd() if files: files = ['--'] + list(files) else: files = [] # Try to make symlink usage easier in Windows. extra_env = { 'MSYS': 'winsymlinks:nativestrict', } logging.info('Unpacking %s', archive.name) # We use relpath here to help out tar on platforms where it doesn't like # paths with colons in them (e.g. Windows). We have to construct the full # before running through relpath as relative archives will implicitly be # checked against os.getcwd rather than the explicit cwd. src = os.path.relpath(cwd / archive, cwd) run(['tar', '--no-same-owner', '-xf', src] + files, cwd=cwd, extra_env=extra_env)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _unpack_archive(self):\n with zipfile.ZipFile(self._archive_full_path, 'r') as zip_ref:\n zip_ref.extractall(self._storage_path)\n\n _logger.debug('Archive has been unpacked.')", "def unpack_dir(indir, outdir, bands=None, clouds=None):\r\n archives = glob.glob(indir + '*.tar.gz')\r\n count = len(archives)\r\n for idx, archive in enumerate(archives):\r\n # Determine the outpath directory name for the unpacked landsat archive\r\n unpackDir = outdir + os.path.splitext(os.path.split(\r\n os.path.splitext(archive)[0])[1])[0]\r\n\r\n # Check if the directory already exists and make it if it doesn't\r\n if not os.path.exists(unpackDir):\r\n os.makedirs(unpackDir)\r\n\r\n # Unpack the current archive.\r\n unpack_landsat(archive, unpackDir, bands=bands,clouds=clouds)\r\n\r\n # Let the user know how progress is going.\r\n print(archive + ' unpacked (' + str(idx + 1) + ' of ' + str(count) + ')')", "def unpack_archive(self, archive_name):\n archive = zipfile.ZipFile(\n os.path.join(\n self.current_path,\n os.path.split(self.exe_file)[0],\n archive_name\n )\n )\n\n self.extraction_path = os.getcwd()\n\n archive.extractall(self.extraction_path)\n\n self.rename_main_script()\n\n archive_pyc_files = []\n\n for path, dirs, files in os.walk(self.extraction_path):\n for f in files:\n archive_pyc_files.append(os.path.join(path, f))\n\n return archive_pyc_files", "def _unzip_archive(archive_path, target_directory, source_path=None, **_):\n\n # Create a temporary directory.\n # Create a zip archive object.\n # Extract the object.\n ctx.logger.debug('Unzipping {src} to {dst}.'.format(\n src=archive_path, dst=target_directory))\n\n src = unzip_archive(archive_path, skip_parent_directory=False)\n copy_directory(src, target_directory)\n remove_dir(src)\n return target_directory", "def unpack(backend_name, archive_id):\n backend = get_backend(backend_name)\n click.echo(f\"Retrieving archive {archive_id}\")\n backend.archive_retrieve(config.root_path, archive_id)", "def _unpack_archive(self, dir, filters):\n ext = os.path.splitext(self.path)[1]\n if ext in [\".zip\", \".xpi\"]:\n if filters:\n raise GbpError(\"Can only filter tar archives: %s\", (ext, self.path))\n self._unpack_zip(dir)\n else:\n self._unpack_tar(dir, filters)", "def restore(self, archive):\n logger.info(\"Restoring an old archive run from {}\".format(archive))\n if os.path.isabs(archive):\n restorefile = archive\n else:\n restorefile = os.path.join(self.containerpath, const.ARCHIVEDIR, archive)\n with ignored(OSError):\n shutil.rmtree(os.path.join(self.rundir))\n with tarfile.open(restorefile, \"r:gz\") as f:\n def is_within_directory(directory, target):\n \n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n \n prefix = os.path.commonprefix([abs_directory, abs_target])\n \n return prefix == abs_directory\n \n def safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n \n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n \n tar.extractall(path, members, numeric_owner=numeric_owner) \n \n \n safe_extract(f, self.rundir)\n self._refreshconfig()", "def extract_one(self, archive: Path, dest: Path):\n if dest.exists():\n shutil.rmtree(dest)\n\n dest.mkdir(parents=True)\n\n if self.should_use_libarchive_c:\n import libarchive\n\n old_cwd = os.getcwd()\n os.chdir(str(dest))\n try:\n libarchive.extract_file(str(archive))\n finally:\n os.chdir(old_cwd)\n return\n\n if archive.name.endswith(EXTENSION_ZIP):\n with zipfile.ZipFile(archive) as zf:\n zf.extractall(dest)\n elif archive.name.endswith(EXTENSION_TAR):\n mode = \"r:bz2\" if archive.name.endswith(\".bz2\") else \"r:gz\"\n with tarfile.open(archive, mode) as tf:\n self.safe_extract_all(tf, dest)\n else:\n raise ValueError(f\"Unrecognized archive format {archive.name}\")\n\n for path in [dest, *dest.rglob(\"*\")]:\n path.chmod(MOD_DIRECTORY if path.is_dir() else MOD_FILE)", "def unpackToProject(self,archive,project,progress=None):\n progress = progress or bolt.Progress()\n files = self.sortFiles([x[0] for x in self.fileSizeCrcs])\n if not files: return 0\n #--Clear Project\n destDir = dirs['installers'].join(project)\n if destDir.exists(): destDir.rmtree(safety='Installers')\n #--Extract\n progress(0,project.s+_(\"\\nExtracting files...\"))\n self.unpackToTemp(archive,files,SubProgress(progress,0,0.9))\n #--Move\n progress(0.9,project.s+_(\"\\nMoving files...\"))\n count = 0\n tempDir = self.tempDir\n for file in files:\n srcFull = tempDir.join(file)\n destFull = destDir.join(file)\n if srcFull.exists():\n srcFull.moveTo(destFull)\n count += 1\n self.clearTemp()\n return count", "def unpack_archive(\n filepath: types.PathLike, *, extract_dir: Optional[types.PathLike] = None\n) -> types.PathLike:\n filepath = utils.to_path(filepath).resolve()\n if not extract_dir:\n extract_dir = str(filepath.parent)\n filepath = str(filepath)\n os.makedirs(extract_dir, exist_ok=True)\n is_zipfile = zipfile.is_zipfile(filepath)\n is_tarfile = tarfile.is_tarfile(filepath)\n if not is_zipfile and not is_tarfile:\n LOGGER.debug(\"'%s' is not an archive\", filepath)\n return extract_dir\n else:\n LOGGER.info(\"extracting data from archive file '%s'\", filepath)\n shutil.unpack_archive(filepath, extract_dir=extract_dir, format=None)\n # we want to rename the unpacked directory to a consistent value\n # unfortunately, shutil doesn't pass this back to us\n # so, we get the root path of all the constituent members\n if is_zipfile:\n with zipfile.ZipFile(filepath, mode=\"r\") as zf:\n members = zf.namelist()\n else:\n with tarfile.open(filepath, mode=\"r\") as tf:\n members = tf.getnames()\n src_basename = os.path.commonpath(members)\n dest_basename = os.path.basename(filepath)\n if src_basename:\n while True:\n tmp, _ = os.path.splitext(dest_basename)\n if tmp == dest_basename:\n break\n else:\n dest_basename = tmp\n if src_basename != dest_basename:\n return shutil.move(\n os.path.join(extract_dir, src_basename),\n os.path.join(extract_dir, dest_basename),\n )\n else:\n return os.path.join(extract_dir, src_basename)\n else:\n return extract_dir", "def unzip_archive(archive):\n tmpdir = os.path.join(tempfile.gettempdir(),\n os.path.basename(archive))\n assert tmpdir != archive # That wouldn't work out\n\n if os.path.exists(tmpdir):\n # files are already extracted\n pass\n else:\n if tarfile.is_tarfile(archive):\n print 'Extracting tarfile ...'\n with tarfile.open(archive) as tf:\n tf.extractall(path=tmpdir)\n elif zipfile.is_zipfile(archive):\n print 'Extracting zipfile ...'\n with zipfile.ZipFile(archive) as zf:\n zf.extractall(path=tmpdir)\n else:\n raise ValueError('Unknown file type for %s' % os.path.basename(archive))\n return tmpdir", "def untar(archive):\n log.info('Unpacking archive \"%s\".' % archive)\n tar = module.params['tar']\n tar_extra_options = shlex.split(module.params['tar_extra_options'])\n if not tar:\n tar = module.get_bin_path('tar', required=True)\n if archive.endswith('.gz'):\n uncompress = 'z'\n elif archive.endswith('.bz2'):\n uncompress = 'j'\n else:\n raise ValueError('Unsupported compression type: %s' % archive)\n options = ''.join(['x', uncompress, 'f'])\n args = [tar, options] + tar_extra_options + [archive]\n rc, out, err = module.run_command(args)\n log.info('untar: rc=%d out=%s err=%s', rc, out, err)\n if rc != 0:\n raise ValueError('tar command failed: %d' % rc)", "def unpack(input_filename, extract_dir):\n if not is_archive_file(input_filename):\n raise AttributeError(\"Input_filename must be an archive (ex: .tar.gz, .zip)\")\n if zipfile.is_zipfile(input_filename):\n unzip(input_filename, extract_dir)\n else:\n untar(input_filename, extract_dir)", "def unpack(tarball, dst, verbose=False, match=None):\n print(\"extracting\", tarball)\n fname = os.path.basename(tarball).replace(\".tar.gz\", \"\")\n with contextlib.closing(tarfile.open(tarball)) as tar:\n for member in tar.getnames():\n if \"/\" not in member:\n continue\n name = member.replace(fname + \"/\", \"\", 1)\n if match is not None and not name.startswith(match):\n continue\n name = name[len(match) + 1:]\n\n dst_path = os.path.join(dst, name)\n if verbose:\n print(\" extracting\", member)\n tar.extract(member, dst)\n src_path = os.path.join(dst, member)\n if os.path.isdir(src_path) and os.path.exists(dst_path):\n continue\n shutil.move(src_path, dst_path)\n shutil.rmtree(os.path.join(dst, fname))", "def unzip_and_untar(item):\n print(\"Unpacking %s\" % item)\n\n f = tarfile.open(item, mode=\"r\")\n f.extractall(path=\"working\")\n f.close()", "def extract_file(self):\n# path_destination = os.path.join(\n# self.root, self.resources.replace(\".zip\", \"\"))\n# os.makedirs(path_destination, exist_ok=True)\n shutil.unpack_archive(os.path.join(\n self.root, self.resources), self.root)\n os.remove(os.path.join(self.root, self.resources))", "def _unpack_from_actor(pack_actor: ray.ActorID, target_dir: str) -> None:\n stream = io.BytesIO()\n for buffer in _iter_remote(pack_actor):\n stream.write(buffer)\n _unpack_dir(stream, target_dir=target_dir)", "async def unarchive_dir(\n archive_to_extract: Path,\n destination_folder: Path,\n *,\n max_workers: int = _MAX_UNARCHIVING_WORKER_COUNT,\n progress_bar: ProgressBarData | None = None,\n log_cb: Callable[[str], Awaitable[None]] | None = None,\n) -> set[Path]:\n if not progress_bar:\n progress_bar = ProgressBarData(steps=1)\n async with AsyncExitStack() as zip_stack:\n zip_file_handler = zip_stack.enter_context(\n zipfile.ZipFile( # pylint: disable=consider-using-with\n archive_to_extract,\n mode=\"r\",\n )\n )\n zip_stack.enter_context(logging_redirect_tqdm())\n process_pool = zip_stack.enter_context(\n non_blocking_process_pool_executor(max_workers=max_workers)\n )\n\n # running in process poll is not ideal for concurrency issues\n # to avoid race conditions all subdirectories where files will be extracted need to exist\n # creating them before the extraction is under way avoids the issue\n # the following avoids race conditions while unzippin in parallel\n _ensure_destination_subdirectories_exist(\n zip_file_handler=zip_file_handler,\n destination_folder=destination_folder,\n )\n\n futures: list[asyncio.Future] = [\n asyncio.get_event_loop().run_in_executor(\n process_pool,\n # ---------\n _zipfile_single_file_extract_worker,\n archive_to_extract,\n zip_entry,\n destination_folder,\n zip_entry.is_dir(),\n )\n for zip_entry in zip_file_handler.infolist()\n ]\n\n try:\n extracted_paths: list[Path] = []\n total_file_size = sum(\n zip_entry.file_size for zip_entry in zip_file_handler.infolist()\n )\n async with AsyncExitStack() as progress_stack:\n sub_prog = await progress_stack.enter_async_context(\n progress_bar.sub_progress(steps=total_file_size)\n )\n tqdm_progress = progress_stack.enter_context(\n tqdm.tqdm(\n desc=f\"decompressing {archive_to_extract} -> {destination_folder} [{len(futures)} file{'s' if len(futures) > 1 else ''}\"\n f\"/{_human_readable_size(archive_to_extract.stat().st_size)}]\\n\",\n total=total_file_size,\n **_TQDM_MULTI_FILES_OPTIONS,\n )\n )\n for future in asyncio.as_completed(futures):\n extracted_path = await future\n extracted_file_size = extracted_path.stat().st_size\n if tqdm_progress.update(extracted_file_size) and log_cb:\n with log_catch(log, reraise=False):\n await log_cb(f\"{tqdm_progress}\")\n await sub_prog.update(extracted_file_size)\n extracted_paths.append(extracted_path)\n\n except Exception as err:\n for f in futures:\n f.cancel()\n\n # wait until all tasks are cancelled\n await asyncio.wait(\n futures, timeout=2 * _MIN, return_when=asyncio.ALL_COMPLETED\n )\n\n # now we can cleanup\n if destination_folder.exists() and destination_folder.is_dir():\n await remove_directory(destination_folder, ignore_errors=True)\n\n raise ArchiveError(\n f\"Failed unarchiving {archive_to_extract} -> {destination_folder} due to {type(err)}.\"\n f\"Details: {err}\"\n ) from err\n\n # NOTE: extracted_paths includes all tree leafs, which might include files and empty folders\n return {\n p\n for p in extracted_paths\n if p.is_file() or (p.is_dir() and not any(p.glob(\"*\")))\n }", "def archive(ctx, config):\n log.info('Creating archive directory...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--', archive_dir,\n ],\n wait=False,\n )\n )\n\n try:\n yield\n except Exception:\n # we need to know this below\n set_status(ctx.summary, 'fail')\n raise\n finally:\n passed = get_status(ctx.summary) == 'pass'\n if ctx.archive is not None and \\\n not (ctx.config.get('archive-on-error') and passed):\n log.info('Transferring archived files...')\n logdir = os.path.join(ctx.archive, 'remote')\n if (not os.path.exists(logdir)):\n os.mkdir(logdir)\n for rem in ctx.cluster.remotes.iterkeys():\n path = os.path.join(logdir, rem.shortname)\n misc.pull_directory(rem, archive_dir, path)\n # Check for coredumps and pull binaries\n fetch_binaries_for_coredumps(path, rem)\n\n log.info('Removing archive directory...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'rm',\n '-rf',\n '--',\n archive_dir,\n ],\n wait=False,\n ),\n )", "def untar(conn, tarball, path):\n conn.run(f\"tar xf {tarball} -C {path}\")", "def unpack(filepath, target_dir, rm_tar=False):\n print(\"Unpacking %s ...\" % filepath)\n tar = tarfile.open(filepath)\n tar.extractall(target_dir)\n tar.close()\n if rm_tar == True:\n os.remove(filepath)", "def _uncompress(fname, outdir, msg=msg):\n import os\n assert os.access(fname, os.R_OK), \"could not access [%s]\" % fname\n fname = os.path.abspath(os.path.realpath(fname))\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n orig_dir = os.getcwd()\n try:\n os.chdir(outdir)\n ext = os.path.splitext(fname)[1][1:] # drop the dot\n if ext in ('gz', 'bz2'):\n import tarfile\n f = tarfile.open(fname, 'r:%s'%ext)\n f.extractall()\n else:\n err = 'extension [%s] not handled (yet?)' % ext\n msg.error(err)\n raise ValueError(err)\n finally:\n os.chdir(orig_dir)", "def _copy_binaries_to_archive(archive: PyfmuArchive) -> PyfmuArchive:\n\n binaries_path = Resources.get().binaries_dir\n\n\n archive_binaries_path = archive.root / 'binaries'\n\n copytree(binaries_path,archive_binaries_path)\n\n # paths\n archive.binaries_dir = archive_binaries_path\n archive.wrapper_win64 = archive.binaries_dir / 'win64' / 'pyfmu.dll'\n archive.wrapper_linux64 = archive.binaries_dir / 'linux64' / 'pyfmu.so'\n\n return archive", "def unpackArchiveToFiles(source, target = None, filter = None):\n if target is None:\n target, _ = os.path.split(source)\n \n for fileRecord, fileData in unpackArchive(source, filter):\n path, offset, size, compressedSize, archiveFileIndex = fileRecord\n \n outPath = os.path.join(target, path)\n outHead, outTail = os.path.split(outPath)\n os.makedirs(outHead, exist_ok = True)\n outFile = open(outPath, \"wb\")\n outFile.write(fileData)\n outFile.close()", "def extract_source(source_archive, target):\r\n with tarfile.open(source_archive) as tar_file:\r\n safetar_extractall(tar_file, target)", "def extract(self, archive_path: str, extracted_path: str) -> None:\n if not os.listdir(archive_path):\n self.log.warning(\n \"No files found in directory: {}\".format(archive_path))\n return\n\n for root, _, archive_files in os.walk(archive_path):\n if not archive_files:\n continue\n\n extract_to = os.path.normpath(os.path.join(\n extracted_path,\n os.path.relpath(root, archive_path)\n ))\n if not os.path.isdir(extract_to):\n os.makedirs(extract_to)\n\n for zfile in archive_files:\n zfile = os.path.join(root, zfile)\n filename, ext = os.path.splitext(os.path.basename(zfile))\n # unzip (tree) each archive file in archive_path\n if ext in self.zip_ext:\n # double splitext for .tar.gz\n fname, ext = os.path.splitext(os.path.basename(filename))\n if ext == '.tar':\n filename = fname\n self.log.info(\"Extracting from: {}\".format(zfile))\n self.log.info(\" Extracting to: {}\".format(\n os.path.join(extract_to, filename)))\n unzip(\n zfile,\n extract_to,\n zip_ext=self.zip_ext,\n create_own_folder=True,\n tree=True\n )\n\n # move each non-archive file in archive_path\n else:\n dest = os.path.join(extract_to, os.path.basename(zfile))\n self.log.info(\"Copying from: {}\".format(zfile))\n self.log.info(\" Copying to: {}\".format(dest))\n shutil.copy(zfile, dest)", "def unzip(source_archive_path, target_path):\n assert zipfile.is_zipfile(source_archive_path), 'Not a valid ZIP archive'\n print('Decompressing archive {} into {}'.format(source_archive_path, target_path))\n with zipfile.ZipFile(source_archive_path) as zf:\n zf.extractall(target_path)\n print('Done')", "def _decompress_tarball(*, in_fileobj, out_fileobj):\n with tarfile.open(fileobj=in_fileobj, mode=\"r\") as it, tarfile.open(\n fileobj=out_fileobj, mode=\"w|\"\n ) as ot:\n for member in it.getmembers():\n extracted = it.extractfile(member)\n ot.addfile(member, extracted)", "def unpack(file_path, extraction_path, remove):\n print(file_path)\n Archive(file_path).extractall(extraction_path, auto_create_dir=True)\n # remove original compressed file???\n if remove is True:\n os.remove(file_path)", "def unpack_package(package, dest):\n members = []\n for member in package.getmembers():\n # this is the equivalent of `--strip-components 1` when using tar CLI\n split_res = member.path.split('/', 1)\n if len(split_res) == 1:\n continue\n stripped_path = split_res[1]\n if not stripped_path:\n continue\n # set the name to the stripped path to take effect when extracting\n member.name = stripped_path\n members.append(member)\n package.extractall(dest, members=members)" ]
[ "0.72888505", "0.6926044", "0.6809945", "0.6746972", "0.6636526", "0.65584207", "0.6507182", "0.6496216", "0.6464963", "0.6461252", "0.64201564", "0.63850856", "0.6347677", "0.63212836", "0.6308422", "0.6223524", "0.6171321", "0.61497027", "0.61496353", "0.614074", "0.6140402", "0.6132096", "0.6075669", "0.6067505", "0.60018367", "0.5995476", "0.59874576", "0.5977743", "0.59769696", "0.5966177" ]
0.7522544
0
Create an |archive| with |paths| in |cwd|. The output will use XZ compression.
def pack(archive: Union[Path, str], paths: List[Union[Path, str]], cwd: Optional[Path] = None, exclude: Optional[List[Union[Path, str]]] = ()): archive = Path(archive) if cwd is None: cwd = Path.cwd() if archive.suffix == '.xz': archive = archive.with_suffix('') # Make sure all the paths have sane permissions. def walk(path): if path.is_symlink(): return elif path.is_dir(): # All dirs should be 755. mode = path.stat().st_mode & 0o777 if mode != 0o755: path.chmod(0o755) for subpath in path.glob('*'): walk(subpath) elif path.is_file(): # All scripts should be 755 while other files should be 644. mode = path.stat().st_mode & 0o777 if mode in (0o755, 0o644): return if mode & 0o111: path.chmod(0o755) else: path.chmod(0o644) else: raise ValueError(f'{path}: unknown file type') logging.info('Forcing sane permissions on inputs') for path in paths: walk(cwd / path) logging.info('Creating %s tarball', archive.name) # We use relpath here to help out tar on platforms where it doesn't like # paths with colons in them (e.g. Windows). We have to construct the full # before running through relpath as relative archives will implicitly be # checked against os.getcwd rather than the explicit cwd. tar = os.path.relpath(cwd / archive, cwd) run(['tar', '--owner=0', '--group=0', '-cf', tar] + [f'--exclude={x}' for x in exclude] + ['--'] + paths, cwd=cwd) logging.info('Compressing tarball') run(['xz', '-f', '-T0', '-9', tar], cwd=cwd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_archive_file(location, paths, environment=None, compression=None, archive_format=None):\n if archive_format == 'zip':\n archive = ZipTarWrapper(location.name, 'w', zipfile.ZIP_DEFLATED)\n else:\n write_type = \"w\"\n if compression:\n write_type = \"w|{0}\".format(compression)\n archive = tarfile.open(location.name, write_type)\n\n # Add all the things to the archive\n for path_spec in paths:\n path_spec.add_to_tar(archive, environment)\n\n # Finish the zip\n archive.close()\n\n return archive", "def _create_zip_file(self, dest, paths):\n with zipfile.ZipFile(dest, 'w') as zip_file:\n for path in paths:\n zip_file.write(path, os.path.basename(path))", "def zip_package(paths: List[Path], fp, compression=zipfile.ZIP_DEFLATED):\n\n with zipfile.ZipFile(\n file=fp, mode=\"w\", compression=compression, compresslevel=9\n ) as z:\n for path in paths:\n (local_path, zip_path) = path\n z.write(filename=str(path[0]), arcname=str(path[1]))", "def archive(self):\n logging.info(_('Creating compressed archive...'))\n\n report_file_ext = 'bz2'\n compressor = 'bzip2'\n caller = Caller({})\n try:\n caller.call('xz --version')\n report_file_ext = 'xz'\n compressor = 'xz'\n except Exception:\n logging.debug('xz compression not available')\n\n if not os.path.exists(self.conf[\"output\"]):\n os.makedirs(self.conf[\"output\"])\n\n self.conf[\"path\"] = os.path.join(\n self.conf[\"output\"],\n \"sosreport-%s-%s.tar.%s\" % (\n 'LogCollector',\n time.strftime(\"%Y%m%d%H%M%S\"),\n report_file_ext\n )\n )\n\n if self.conf[\"ticket_number\"]:\n self.conf[\"path\"] = os.path.join(\n self.conf[\"output\"],\n \"sosreport-%s-%s-%s.tar.%s\" % (\n 'LogCollector',\n self.conf[\"ticket_number\"],\n time.strftime(\"%Y%m%d%H%M%S\"),\n report_file_ext\n )\n )\n\n config = {\n 'report': os.path.splitext(self.conf['path'])[0],\n 'compressed_report': self.conf['path'],\n 'compressor': compressor,\n 'directory': self.conf[\"local_tmp_dir\"],\n 'rname': os.path.basename(self.conf['path']).split('.')[0],\n }\n caller.configuration = config\n shutil.move(\n os.path.join(\n self.conf[\"local_tmp_dir\"],\n 'working'\n ),\n os.path.join(\n self.conf[\"local_tmp_dir\"],\n config[\"rname\"]\n ),\n )\n caller.call(\"tar -cf '%(report)s' -C '%(directory)s' '%(rname)s'\")\n shutil.rmtree(self.conf[\"local_tmp_dir\"])\n caller.call(\"%(compressor)s -1 '%(report)s'\")\n os.chmod(self.conf[\"path\"], stat.S_IRUSR | stat.S_IWUSR)\n sha256_out = caller.call(\"sha256sum '%(compressed_report)s'\")\n checksum = sha256_out.split()[0]\n with open(\"%s.sha256\" % self.conf[\"path\"], 'w') as checksum_file:\n checksum_file.write(sha256_out)\n\n msg = ''\n if os.path.exists(self.conf[\"path\"]):\n archiveSize = float(os.path.getsize(self.conf[\"path\"])) / (1 << 20)\n\n size = '%.1fM' % archiveSize\n\n msg = _(\n 'Log files have been collected and placed in {path}\\n'\n 'The sha256 for this file is {checksum} and its size is {size}'\n ).format(\n path=self.conf[\"path\"],\n size=size,\n checksum=checksum,\n )\n\n if archiveSize >= 1000:\n msg += _(\n '\\nYou can use the following filters -c, -d, -H in the '\n 'next execution to limit the number of Datacenters,\\n'\n 'Clusters or Hosts that are collected in order to '\n 'reduce the archive size.'\n )\n return msg", "def create_zip(\n output_path,\n input_paths,\n ignore_dotfiles,\n ignore_windows_volume_folders,\n put_all_files_in_shared_root_dir,\n path_separator,\n):\n # Hash each file, add hashes to file_hash_dict, then add to zip\n file_hash_dict = {}\n total_file_count = 0\n with zipfile.ZipFile(output_path, \"w\", zipfile.ZIP_DEFLATED, allowZip64=True) as zip_handler:\n for path in input_paths:\n if len(input_paths) == 1:\n common_root_directory = os.path.dirname(path)\n else:\n common_root_directory = get_common_root_directory(input_paths, path_separator)\n if os.path.isdir(path):\n file_list, total_size = get_file_paths_and_size(\n [path], ignore_dotfiles, ignore_windows_volume_folders\n )\n printer(\n \"'{}' contains {} files ({}) for compression\".format(\n path, len(file_list), bytes_filesize_to_readable_str(total_size)\n ),\n \"info\",\n )\n total_file_count += len(file_list)\n directory_hash_dict = get_hash_dict(\n file_list,\n common_root_directory,\n put_all_files_in_shared_root_dir,\n )\n for hash_value, relative_paths in directory_hash_dict.items():\n if hash_value not in file_hash_dict:\n file_hash_dict[hash_value] = relative_paths\n else:\n file_hash_dict[hash_value].extend(relative_paths)\n add_files_to_zip(\n file_list,\n common_root_directory,\n zip_handler,\n put_all_files_in_shared_root_dir,\n )\n printer(\"'{}' contents added to zip successfully\".format(path), \"info\")\n else:\n total_file_count += 1\n individual_file_hash_dict = get_hash_dict(\n [path],\n common_root_directory,\n put_all_files_in_shared_root_dir,\n )\n for hash_value, relative_paths in individual_file_hash_dict.items():\n if hash_value not in file_hash_dict:\n file_hash_dict[hash_value] = relative_paths\n else:\n file_hash_dict[hash_value].extend(relative_paths)\n add_files_to_zip(\n [path],\n common_root_directory,\n zip_handler,\n put_all_files_in_shared_root_dir,\n )\n printer(\"'{}' added to zip successfully\".format(path), \"info\")\n return file_hash_dict, total_file_count", "def Zip(args):\n parser = argparse.ArgumentParser(description=Zip.__doc__)\n parser.add_argument(\n '-r', dest='recursive', action='store_true',\n default=False,\n help='recurse into directories')\n parser.add_argument(\n '-q', dest='quiet', action='store_true',\n default=False,\n help='quiet operation')\n parser.add_argument('zipfile')\n parser.add_argument('filenames', nargs='+')\n options = parser.parse_args(args)\n\n src_files = []\n for filename in options.filenames:\n globbed_src_args = glob.glob(filename)\n if not globbed_src_args:\n if not options.quiet:\n print('zip warning: name not matched: %s' % filename)\n\n for src_file in globbed_src_args:\n src_file = os.path.normpath(src_file)\n src_files.append(src_file)\n if options.recursive and os.path.isdir(src_file):\n for root, dirs, files in os.walk(src_file):\n for dirname in dirs:\n src_files.append(os.path.join(root, dirname))\n for filename in files:\n src_files.append(os.path.join(root, filename))\n\n # zip_data represents a list of the data to be written or appended to the\n # zip_stream. It is a list of tuples:\n # (OS file path, zip path/zip file info, and file data)\n # In all cases one of the |os path| or the |file data| will be None.\n # |os path| is None when there is no OS file to write to the archive (i.e.\n # the file data already existed in the archive). |file data| is None when the\n # file is new (never existed in the archive) or being updated.\n zip_data = []\n new_files_to_add = [OSMakeZipPath(src_file) for src_file in src_files]\n zip_path_to_os_path_dict = dict((new_files_to_add[i], src_files[i])\n for i in range(len(src_files)))\n write_mode = 'a'\n if os.path.exists(options.zipfile):\n with zipfile.ZipFile(options.zipfile, 'r') as zip_stream:\n try:\n files_to_update = set(new_files_to_add).intersection(\n set(zip_stream.namelist()))\n if files_to_update:\n # As far as I can tell, there is no way to update a zip entry using\n # zipfile; the best you can do is rewrite the archive.\n # Iterate through the zipfile to maintain file order.\n write_mode = 'w'\n for zip_path in zip_stream.namelist():\n if zip_path in files_to_update:\n os_path = zip_path_to_os_path_dict[zip_path]\n zip_data.append((os_path, zip_path, None))\n new_files_to_add.remove(zip_path)\n else:\n file_bytes = zip_stream.read(zip_path)\n file_info = zip_stream.getinfo(zip_path)\n zip_data.append((None, file_info, file_bytes))\n except IOError:\n pass\n\n for zip_path in new_files_to_add:\n zip_data.append((zip_path_to_os_path_dict[zip_path], zip_path, None))\n\n if not zip_data:\n print('zip error: Nothing to do! (%s)' % options.zipfile)\n return 1\n\n with zipfile.ZipFile(options.zipfile, write_mode,\n zipfile.ZIP_DEFLATED) as zip_stream:\n for os_path, file_info_or_zip_path, file_bytes in zip_data:\n if isinstance(file_info_or_zip_path, zipfile.ZipInfo):\n zip_path = file_info_or_zip_path.filename\n else:\n zip_path = file_info_or_zip_path\n\n if os_path:\n st = os.stat(os_path)\n if stat.S_ISDIR(st.st_mode):\n # Python 2.6 on the buildbots doesn't support writing directories to\n # zip files. This was resolved in a later version of Python 2.6.\n # We'll work around it by writing an empty file with the correct\n # path. (This is basically what later versions do anyway.)\n zip_info = zipfile.ZipInfo()\n zip_info.filename = zip_path\n zip_info.date_time = time.localtime(st.st_mtime)[0:6]\n zip_info.compress_type = zip_stream.compression\n zip_info.flag_bits = 0x00\n zip_info.external_attr = (st[0] & 0xFFFF) << 16\n zip_info.CRC = 0\n zip_info.compress_size = 0\n zip_info.file_size = 0\n zip_stream.writestr(zip_info, '')\n else:\n zip_stream.write(os_path, zip_path)\n else:\n zip_stream.writestr(file_info_or_zip_path, file_bytes)\n\n if not options.quiet:\n if zip_path in new_files_to_add:\n operation = 'adding'\n else:\n operation = 'updating'\n zip_info = zip_stream.getinfo(zip_path)\n if (zip_info.compress_type == zipfile.ZIP_STORED or\n zip_info.file_size == 0):\n print(' %s: %s (stored 0%%)' % (operation, zip_path))\n elif zip_info.compress_type == zipfile.ZIP_DEFLATED:\n print(' %s: %s (deflated %d%%)' % (operation, zip_path,\n 100 - zip_info.compress_size * 100 / zip_info.file_size))\n\n return 0", "def create_archive(filelist):\n\t\n\n\ttmp = tempfile.NamedTemporaryFile()\n\t# with tempfile.SpooledTemporaryFile() as tmp:\n\twith zipfile.ZipFile(tmp, 'w', zipfile.ZIP_DEFLATED) as archive:\n\t\tarcname = './docs/'\n\t\tfor x in filelist:\n\t\t\tfilename = os.path.basename(x[1])\n\t\t\t_file = x[0]\n\t\t\t# make sure we're at the start...\n\t\t\t_file.seek(0)\n\t\t\tarchive.write(_file.name, arcname=os.path.join(arcname, filename))\n\n\t# Reset file pointer\n\ttmp.seek(0)\n\n\treturn tmp\n\n\t\t# Write file data to response\n\t\t# return HttpResponse(tmp.read(), content_type='application/x-zip-compressed')", "def _make_archive(file_list, archive, root):\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zipf:\n for f in file_list:\n zipf.write(f, os.path.relpath(f, root))", "def pack_zip(output_filename, sources):\n previous_dir = os.getcwd()\n if not isinstance(sources, (list, tuple)) and \\\n isinstance(sources, str):\n sources = [sources]\n zip_ds = zipfile.ZipFile(output_filename, 'w', zipfile.ZIP_DEFLATED)\n for source in sources:\n os.chdir(os.path.dirname(source))\n if os.path.isdir(source):\n for root, dirs, files in os.walk(os.path.basename(source)):\n for file in files:\n zip_ds.write(os.path.join(root, file))\n else:\n zip_ds.write(os.path.basename(source))\n zip_ds.close()\n os.chdir(previous_dir)", "def _zip_files(self):\n\n zip_file = Path(self.build_directory.parent).joinpath(\n self.package_name + '.zip'\n )\n logger.info('Creating zip file: %s', zip_file)\n\n shutil.make_archive(zip_file.with_suffix(''), 'zip', self.build_directory)\n shutil.move(str(zip_file), self.build_directory)", "def add_files(self, *paths, **kw):\n write_p = self._pointer\n\n block_size = ffi.write_get_bytes_per_block(write_p)\n if block_size <= 0:\n block_size = 10240 # pragma: no cover\n\n with new_archive_entry() as entry_p:\n entry = ArchiveEntry(None, entry_p)\n for path in paths:\n with new_archive_read_disk(path, **kw) as read_p:\n while 1:\n r = read_next_header2(read_p, entry_p)\n if r == ARCHIVE_EOF:\n break\n entry.pathname = entry.pathname.lstrip('/')\n read_disk_descend(read_p)\n write_header(write_p, entry_p)\n if entry.isreg:\n with open(entry_sourcepath(entry_p), 'rb') as f:\n while 1:\n data = f.read(block_size)\n if not data:\n break\n write_data(write_p, data, len(data))\n write_finish_entry(write_p)\n entry_clear(entry_p)", "def archive(ctx, config):\n log.info('Creating archive directory...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--', archive_dir,\n ],\n wait=False,\n )\n )\n\n try:\n yield\n except Exception:\n # we need to know this below\n set_status(ctx.summary, 'fail')\n raise\n finally:\n passed = get_status(ctx.summary) == 'pass'\n if ctx.archive is not None and \\\n not (ctx.config.get('archive-on-error') and passed):\n log.info('Transferring archived files...')\n logdir = os.path.join(ctx.archive, 'remote')\n if (not os.path.exists(logdir)):\n os.mkdir(logdir)\n for rem in ctx.cluster.remotes.iterkeys():\n path = os.path.join(logdir, rem.shortname)\n misc.pull_directory(rem, archive_dir, path)\n # Check for coredumps and pull binaries\n fetch_binaries_for_coredumps(path, rem)\n\n log.info('Removing archive directory...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'rm',\n '-rf',\n '--',\n archive_dir,\n ],\n wait=False,\n ),\n )", "def archive(filepath,archive_dir='archive'):\n\n # Make sure we have a directory to archive to\n try:\n mkdir(archive_dir)\n except:\n print(\"Error making archive directory\")\n return\n\n try:\n (dir, filename) = os.path.split(filepath)\n outfile = os.path.join(dir,archive_dir,filename)+'.gz'\n with open(filename, 'rb') as f_in, gzip.open(outfile, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n except Exception as e:\n print(\"Error archiving \",filepath)\n print(e)\n else:\n try:\n os.remove(filepath)\n except:\n print(\"Error removing \",filepath)", "def create_zip_from_files(files: List[Path]) -> Any:\n temp = tempfile.NamedTemporaryFile()\n with zipfile.ZipFile(temp, 'w') as handle:\n for f in files:\n filename = f.name\n handle.write(f, arcname=filename)\n temp.flush()\n return temp", "def archive(project, filename, pack_envs=False):\n return archiver._archive_project(project, filename, pack_envs)", "def writepy(self, paths=[]):\n from vyperlogix import misc\n for top in paths if (misc.isList(paths)) else [paths]:\n try:\n for root, dirs, files in os.walk(top):\n if (self.rx.search(root) == None):\n print '='*80\n print 'files=%s' % files\n py_files = [os.path.join(root,f) for f in files if f.endswith('.py' if not self.isSourceless else '.pyo')]\n print '-'*80\n print 'py_files=%s' % py_files\n util.byte_compile(py_files,optimize=2,force=1)\n for f in py_files:\n print 'ZIP Adding (%s) to (%s)' % (f,self.filename)\n f_base = f.replace('.pyo','.pyc').replace(top,'')\n _f_base = f_base.split(os.sep)[-1]\n self.write(f,f_base)\n print '='*80\n except Exception as details:\n print 'Error in ZIP processing. (%s)' % (str(details))", "def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)", "def make_archive(fname_archive: str, \n sim_epoch: rebound.Simulation, \n object_names: List[str],\n epoch: datetime, dt0: datetime, dt1: datetime, \n time_step: int, save_step: int = 1,\n save_elements: bool = False,\n progbar: bool = False) -> rebound.SimulationArchive:\n try:\n # First try to load the named archive\n sa = rebound.SimulationArchive(filename=fname_archive)\n except:\n # If the archive is not on disk, save it to disk\n print(f'Generating archive {fname_archive}\\n'\n f'from {dt0} to {dt1}, time_step={time_step}, save_step={save_step}...')\n make_archive_impl(fname_archive=fname_archive, sim_epoch=sim_epoch, object_names=object_names,\n epoch=epoch, dt0=dt0, dt1=dt1, \n time_step=time_step, save_step=save_step, \n save_elements=save_elements, progbar=progbar)\n # Load the new archive into memory\n sa = rebound.SimulationArchive(filename=fname_archive)\n return sa", "def _zip_archive(extracted_source, exclude_files=None, **_):\n ctx.logger.debug(\"Zipping source {source}\".format(source=extracted_source))\n exclude_files = exclude_files or []\n ctx.logger.debug('Excluding files {l}'.format(l=exclude_files))\n with tempfile.NamedTemporaryFile(suffix=\".zip\",\n delete=False) as updated_zip:\n updated_zip.close()\n with zipfile.ZipFile(updated_zip.name,\n mode='w',\n compression=zipfile.ZIP_DEFLATED) as output_file:\n for dir_name, subdirs, filenames in os.walk(extracted_source):\n # Make sure that the files that we don't want\n # to include (e.g. plugins directory) will not be archived.\n exclude_dirs(dir_name, subdirs, exclude_files)\n for filename in filenames:\n # Extra layer of validation on the excluded files.\n if not exclude_file(dir_name, filename, exclude_files):\n # Create the path as we want to archive it to the\n # archivee.\n file_to_add = os.path.join(dir_name, filename)\n # The name of the file in the archive.\n if file_storage_breaker(file_to_add):\n continue\n arc_name = file_to_add[len(extracted_source)+1:]\n output_file.write(file_to_add, arcname=arc_name)\n archive_file_path = updated_zip.name\n return archive_file_path", "def main():\n run_time_str = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n log = _prepare_logging()\n Args = collections.namedtuple(\n \"Args\",\n (\n \"input_paths\",\n \"output_path\",\n \"root_directory\",\n \"ignore_dotfiles\",\n \"ignore_windows_volume_folders\",\n ),\n )\n # If we are running from Mac Automator, take file paths from sys.argv\n if check_running_from_automator():\n # Example sys.argv for two files selected: ['-c', '/absolute/path/1.txt',\n # '/absolute/path/to/2.txt']\n args = Args(\n input_paths=sys.argv[1:],\n output_path=None,\n root_directory=False,\n ignore_dotfiles=False,\n ignore_windows_volume_folders=False,\n )\n # Otherwise, use argparse and allow for some additional options\n else:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_paths\", nargs=\"+\", help=\"Items to compress\")\n parser.add_argument(\"-o\", \"--output_path\", \"--output\", help=\"Filename for zip\")\n parser.add_argument(\n \"-d\",\n \"--root-directory\",\n action=\"store_true\",\n help=\"Place all files in zip within a shared parent folder\",\n )\n parser.add_argument(\n \"--ignore-dotfiles\",\n action=\"store_true\",\n help=\"Ignore files and folders beginning with '.' (typically these are hidden folders)\",\n )\n parser.add_argument(\n \"--ignore-windows-volume-folders\",\n action=\"store_true\",\n help=(\n \"Ignore folders named 'System Volume Information' and '$RECYCLE.BIN' (typically\"\n \" these contain hidden system information)\"\n ),\n )\n\n parsed_args = parser.parse_args()\n args = Args(**vars(parsed_args))\n\n # Check passed arguments and return if issues\n if get_missing_sources(args.input_paths):\n printer(\n \"Path(s) {} not found\".format(get_list_as_str(get_missing_sources(args.input_paths))),\n \"error\",\n True,\n )\n return\n\n # Set path separator based on OS\n if platform.system() == \"Windows\":\n path_separator = \"\\\\\"\n else:\n path_separator = \"/\"\n\n # Convert input paths into absolute paths\n input_paths = [os.path.abspath(path) for path in args.input_paths]\n\n # Set output path\n if args.output_path is not None:\n output_path = args.output_path\n output_directory = os.path.dirname(output_path)\n else:\n if check_running_from_automator():\n # Last item in the list of arguments will be the last item clicked in Finder\n output_directory = os.path.dirname(input_paths[-1])\n else:\n output_directory = \".\"\n if len(input_paths) == 1:\n output_filename = os.path.basename(\"{}.zip\".format(input_paths[0]))\n else:\n output_filename = \"{}_archive.zip\".format(run_time_str)\n output_path = get_safe_file_path(os.path.join(output_directory, output_filename))\n printer(\"Zip file will be created at path '{}'\".format(output_path), \"info\")\n\n # Create zipfile and get file_hash_dict info for subsequent verification\n try:\n file_hash_dict, total_file_count = create_zip(\n output_path,\n input_paths,\n args.ignore_dotfiles,\n args.ignore_windows_volume_folders,\n args.root_directory,\n path_separator,\n )\n except:\n # Log the exception to a file, so we can view later if running from Automator\n error_log_file_path = os.path.join(\n output_directory, \"{}_verizip_error.txt\".format(run_time_str)\n )\n error_log_handler = logging.FileHandler(error_log_file_path)\n error_log_handler.setLevel(logging.ERROR)\n error_log_handler.setFormatter(\n logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n )\n log.addHandler(error_log_handler)\n log.exception(\"Exception occurred during creation of zip file '%s':\", output_path)\n printer(\n \"Error occurred - see '{}'\".format(os.path.abspath(error_log_file_path)), \"error\", True\n )\n if os.path.isfile(output_path):\n os.remove(output_path)\n return\n printer(\"'{}' finalised - will now be verified\".format(output_path), \"info\")\n\n # Get hashes of files within finalised zip\n zip_hash_dict = {}\n with zipfile.ZipFile(output_path, \"r\") as zip_handler:\n zip_file_listing = zip_handler.namelist()\n zip_file_count = 0\n for file_within_zip in zip_file_listing:\n # Todo: confirm no 'file_info.is_dir()' type check needed here - don't believe so, as\n # only files with paths are being added, rather than directories as separate archive\n # items\n zip_file_count += 1\n hash_value = hash_file_in_zip(zip_handler, file_within_zip)\n if hash_value not in zip_hash_dict:\n zip_hash_dict[hash_value] = []\n zip_hash_dict[hash_value].append(file_within_zip)\n\n # Verify that hashes from source files match those for compressed files within newly-created zip\n if file_hash_dict == zip_hash_dict and total_file_count == zip_file_count:\n printer(\"Verification complete; no discrepancies identified\", \"info\")\n printer(\"'{}' created successfully\".format(output_path), \"info\", True)\n else:\n error_log_file_path = os.path.join(\n output_directory, \"{}_verizip_error.txt\".format(run_time_str)\n )\n with open(error_log_file_path, \"w\") as error_log_file_handler:\n for hash_value, file_paths in file_hash_dict.items():\n if hash_value not in zip_hash_dict:\n error_log_file_handler.write(\n \"Hash '{}' not present in zip file (with expected files {})\\n\".format(\n hash_value, get_list_as_str(file_paths)\n )\n )\n elif sorted(file_paths) != sorted(zip_hash_dict[hash_value]):\n error_log_file_handler.write(\n \"Files for hash '{}' do not match between source and zip ({} in source - {}\"\n \" in zip)\\n\".format(hash_value, file_paths, zip_hash_dict[hash_value])\n )\n printer(\n \"'{}' failed verification - see error log at '{}'\".format(\n output_path, os.path.abspath(error_log_file_path)\n ),\n \"error\",\n True,\n )\n os.remove(output_path) # Delete the zip that failed verification", "def compress_skim_dir(directory, output=\"zarr\"):\n\n if output not in (\"zarr\", \"zarr.zip\"):\n raise NotImplementedError(output)\n\n if output == \"zarr\":\n if not os.path.exists(directory+\".zarr\"):\n os.makedirs(directory+\".zarr\")\n elif output == \"zarr.zip\":\n if os.path.exists(directory+\".zarr.zip\"):\n raise FileExistsError(directory+\".zarr.zip\")\n\n master = {}\n for f in os.walk(directory):\n for fi in f[2]:\n if \".emx\" in fi:\n arr = np.fromfile(fi, dtype='f4')\n side = int(np.sqrt(arr.size))\n arr = arr.reshape(side, side)\n tazrange = pd.RangeIndex(1, side+1)\n master[fi.replace(\".emx\", \"\")] = xr.DataArray(\n arr,\n dims=['otaz', 'dtaz'],\n coords={'otaz': tazrange, 'dtaz': tazrange}\n )\n\n master = sh.Dataset(master)\n\n if output == \"zarr\":\n master.to_zarr(directory+\".zarr\", mode='a')\n elif output == \"zarr.zip\":\n with zarr.ZipStore(directory+\".zarr.zip\", mode='w') as store:\n master.to_zarr(store)\n return master", "def make_zip(self):\n shutil.make_archive(self.name, 'zip', self.name)", "def archive(self, files, name):\n self.log.debug(\"Putting files into archive: %s\" % \"\\n\".join(files))\n tar_name = \"%s%s\" % (name, self.extension)\n if os.path.exists(tar_name):\n raise RuntimeError (\"Tried to create an archive that already exists: %s\" % tar_name) \n else:\n self.log.info(\"Creating a new archive %s\" % tar_name)\n tar = tarfile.open(tar_name, 'w:gz');\n for name in files:\n tar.add(name)\n print '%s'% (name)\n tar.close()\n return tar_name", "def _archive_project(name, buff, files=None, repo=None, branch='master',\n ignore_deleted=False):\n if repo is None:\n repo = Repoman.open_repo(name)\n now = datetime.now().timetuple()[:6]\n archive = zipfile.ZipFile(buff, \"w\", zipfile.ZIP_DEFLATED)\n files_list = files if files is not None else \\\n repo.list_files_for_branch(branch)\n all_files = files_list if files is None else \\\n repo.list_files_for_branch(branch)\n\n template_paths = defaultdict(list)\n for file_path in all_files:\n split_file_path = file_path.split('/')\n if len(split_file_path) > 2:\n template_paths[split_file_path[1]].append(file_path)\n extractors = json.loads(repo.file_contents_for_branch('extractors.json',\n branch) or '{}')\n\n seen_files = set()\n spiders = set()\n for file_path in files_list:\n if file_path.startswith('spiders'):\n try:\n parts = file_path.split(\"/\")\n if len(parts) >= 2:\n spider_name = parts[1]\n if spider_name.endswith('.json'):\n spider_name = spider_name[:-5]\n if spider_name not in spiders:\n # Load spider if necessary\n if len(parts) > 2:\n file_path = 'spiders/' + spider_name + '.json'\n file_contents = repo.file_contents_for_branch(\n file_path, branch)\n as_json = json.loads(file_contents)\n templates = []\n # Load all spider templates\n spider_templates = template_paths.get(spider_name, [])\n for template_path in spider_templates:\n seen_files.add(template_path)\n existing = {}\n # Ignore deleted templates\n try:\n templ_contents = repo.file_contents_for_branch(\n template_path, branch)\n except (TypeError, ValueError):\n continue\n json_template = json.loads(templ_contents)\n # Validate extractors\n template_extractors = json_template.get(\n 'extractors', {})\n for field, eids in template_extractors.items():\n existing[field] = [eid for eid in eids\n if eid in extractors]\n json_template['extractors'] = existing\n spider_name = parts[1]\n templates.append(json_template)\n spiders.add(spider_name)\n as_json.pop('template_names', None)\n as_json['templates'] = templates\n _add_to_archive(archive, file_path,\n json.dumps(as_json), now)\n except TypeError:\n if ignore_deleted:\n continue\n # Handle Deleted Spiders\n file_contents = repo.file_contents_for_branch(file_path,\n 'master')\n file_info = {'deleted': True}\n if file_contents:\n as_json = json.loads(file_contents)\n _add_to_archive(archive, file_path, json.dumps(file_info), now)\n else:\n file_contents = repo.file_contents_for_branch(file_path, branch)\n _add_to_archive(archive, file_path, file_contents, now)\n seen_files.add(file_path)\n\n # Add empty placeholders for missing files required by dash\n for file_path in {'extractors.json', 'items.json'} - seen_files:\n _add_to_archive(archive, file_path, '{}', now)\n archive.close()", "def _archive(self, name, contents, isolate_content):\n # Shared code for all test_isolated_* test cases.\n root = os.path.join(self.tmpdir, name)\n # Refuse reusing the same task name twice, it makes the whole test suite\n # more manageable.\n self.assertFalse(os.path.isdir(root), root)\n os.mkdir(root)\n isolate_path = os.path.join(root, 'i.isolate')\n with open(isolate_path, 'wb') as f:\n f.write(isolate_content)\n for relpath, content in contents.items():\n p = os.path.join(root, relpath)\n d = os.path.dirname(p)\n if not os.path.isdir(d):\n os.makedirs(d)\n with open(p, 'wb') as f:\n f.write(content)\n return self.client.isolate(isolate_path)", "def create_zip_file():\n shutil.make_archive(os.path.join(DIST_DIR, \"build\"), \"zip\", BUILD_DIR)", "def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())", "def unpack(archive: Union[Path, str],\n cwd: Optional[Path] = None,\n files: Optional[List[Union[Path, str]]] = ()):\n archive = Path(archive)\n if cwd is None:\n cwd = Path.cwd()\n if files:\n files = ['--'] + list(files)\n else:\n files = []\n\n # Try to make symlink usage easier in Windows.\n extra_env = {\n 'MSYS': 'winsymlinks:nativestrict',\n }\n\n logging.info('Unpacking %s', archive.name)\n # We use relpath here to help out tar on platforms where it doesn't like\n # paths with colons in them (e.g. Windows). We have to construct the full\n # before running through relpath as relative archives will implicitly be\n # checked against os.getcwd rather than the explicit cwd.\n src = os.path.relpath(cwd / archive, cwd)\n run(['tar', '--no-same-owner', '-xf', src] + files, cwd=cwd,\n extra_env=extra_env)", "def open(self, *args, **kwargs):\n return ZipFileArchiver(*args,**kwargs)", "def generate_test_dataset_archive(filepath, dataset):\n\n # 'file:///some/path' to '/some/path'\n if filepath[:7] == 'file://':\n filepath = filepath[7:]\n\n # Check if the dataset exists.\n # When not been generate it.\n if not os.path.isfile(filepath):\n\n print(\"Generating\", filepath)\n data = get_test_dataset(dataset)\n \n ensure_dir(os.path.dirname(filepath))\n idxgz.save(filepath, data)" ]
[ "0.6538356", "0.6479174", "0.64458525", "0.6059645", "0.6056643", "0.5963025", "0.596087", "0.5931735", "0.5828145", "0.5716116", "0.57026917", "0.56945693", "0.56655836", "0.5658968", "0.5611987", "0.5595439", "0.5587479", "0.5581423", "0.55702585", "0.5558787", "0.5543491", "0.5520394", "0.5510049", "0.5508748", "0.5494774", "0.54929924", "0.54857963", "0.54503274", "0.5448339", "0.54331535" ]
0.720668
0
Fetch |uri| and write the results to |output| (or return BytesIO).
def fetch_data(uri: str, output=None, verbose: bool = False, b64: bool = False): # This is the timeout used on each blocking operation, not the entire # life of the connection. So it's used for initial urlopen and for each # read attempt (which may be partial reads). 5 minutes should be fine. TIMEOUT = 5 * 60 if output is None: output = io.BytesIO() try: with urllib.request.urlopen(uri, timeout=TIMEOUT) as infp: mb = 0 length = infp.length while True: data = infp.read(1024 * 1024) if not data: break # Show a simple progress bar if the user is interactive. if verbose: mb += 1 print('~%i MiB downloaded' % (mb,), end='') if length: percent = mb * 1024 * 1024 * 100 / length print(' (%.2f%%)' % (percent,), end='') print('\r', end='', flush=True) if b64: data = base64.b64decode(data) output.write(data) except urllib.error.HTTPError as e: logging.error('%s: %s', uri, e) sys.exit(1) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch(uri, output, b64=False):\n output = os.path.abspath(output)\n distdir, name = os.path.split(output)\n if os.path.exists(output):\n logging.info('Using existing download: %s', name)\n return\n\n logging.info('Downloading %s to %s', uri, output)\n os.makedirs(distdir, exist_ok=True)\n\n # Use kokoro build cache or Gentoo distdir if available.\n for envvar in ('KOKORO_GFILE_DIR', 'DISTDIR'):\n cache_dir = os.getenv(envvar)\n if cache_dir:\n cache_file = os.path.join(cache_dir, name)\n if os.path.exists(cache_file):\n logging.info(' Cache hit via %s', envvar)\n symlink(cache_file, output)\n return\n\n # Don't be verbose if running on CI systems.\n verbose = os.isatty(sys.stdout.fileno())\n\n # We use urllib rather than wget or curl to avoid external utils & libs.\n # This seems to be good enough for our needs.\n tmpfile = output + '.tmp'\n for _ in range(0, 5):\n try:\n with open(tmpfile, 'wb') as outfp:\n fetch_data(uri, outfp, verbose=verbose, b64=b64)\n break\n except ConnectionError as e:\n time.sleep(1)\n logging.warning('Download failed; retrying: %s', e)\n else:\n logging.error('Unabled to download; giving up')\n unlink(tmpfile)\n sys.exit(1)\n\n # Clear the progress bar.\n if verbose:\n print(' ' * 80, end='\\r')\n\n os.rename(tmpfile, output)", "def fetch_file(self, location, output=None):\n\n self.log.debug(\"Fetching '%s' file...\" % location)\n\n if not output:\n output = tempfile.mktemp(\"-dogen\")\n \n self.log.debug(\"File will be saved as '%s'...\" % output)\n\n with open(output, 'wb') as f:\n f.write(requests.get(location, verify=self.ssl_verify).content)\n\n return output", "def download(self, source_uri, output, **kwargs):\n raise NotImplementedError(\"Subclass needs to implement this method\")", "def fetch(self, url) -> bytes:\n buffer = self.download(url)\n zfs = ZipFileSystem(buffer, \"r\")\n return zfs.open(zfs.glob(\"*\")[0]).read()", "def download_img(self, url, output):\n try:\n print(\"Downloading from: %s\" % url)\n with open(output, 'wb') as f:\n f.write(urllib2.urlopen(url).read())\n print(\"Wrote to: %s\" % output)\n except IOError, e:\n print(e)", "def do_GET(self):\n self.send_head()\n f = io.BytesIO()\n f.write(self.output.encode())\n f.seek(0)\n shutil.copyfileobj(f, self.wfile)\n f.close()", "def url_retrieve(url, output_file):\n r = requests.get(url, allow_redirects=True)\n if r.status_code != 200:\n raise ConnectionError(f\"Could not download {url}\\nError code: {r.status_code}\")\n\n output_file.write_bytes(r.content)", "def get_output(self, download_dir, output=None, overwrite=False, callback=None, block=4096):\n if output:\n name = output.get('name', \"\")\n download = self._get_intermediate_output(output,\n download_dir,\n overwrite,\n callback=callback,\n block=block)\n\n elif self.output_url and self.output_filename:\n name = self.output_filename\n download = self._get_final_output(download_dir, overwrite,\n callback=callback, block=block)\n\n else:\n raise FileDownloadException(\n \"Job has no reference to an output file, \"\n \"please update to check if the output is ready\")\n\n if download.success:\n return os.path.join(download_dir, name)\n\n else:\n raise download.result", "def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None", "def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")", "def __call__(self, url, output_file, pooch):\n kwargs = self.kwargs.copy()\n kwargs.setdefault(\"stream\", True)\n ispath = not hasattr(output_file, \"write\")\n if ispath:\n output_file = open(output_file, \"w+b\")\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n content = response.iter_content(chunk_size=self.chunk_size)\n if self.progressbar:\n total = int(response.headers.get(\"content-length\", 0))\n # Need to use ascii characters on Windows because there isn't\n # always full unicode support\n # (see https://github.com/tqdm/tqdm/issues/454)\n use_ascii = bool(sys.platform == \"win32\")\n progress = tqdm(\n total=total,\n ncols=79,\n ascii=use_ascii,\n unit=\"B\",\n unit_scale=True,\n leave=True,\n )\n for chunk in content:\n if chunk:\n output_file.write(chunk)\n output_file.flush()\n if self.progressbar:\n # Use the chunk size here because chunk may be much\n # larger if the data are decompressed by requests after\n # reading (happens with text files).\n progress.update(self.chunk_size)\n # Make sure the progress bar gets filled even if the actual number\n # is chunks is smaller than expected. This happens when streaming\n # text files that are compressed by the server when sending (gzip).\n # Binary files don't experience this.\n if self.progressbar:\n progress.reset()\n progress.update(total)\n progress.close()\n finally:\n if ispath:\n output_file.close()", "def fetch(self, url):\r\n fname = os.path.join(self._cachedir, self._formatter(url))\r\n if not os.path.exists(fname):\r\n time.sleep(self._sleep)\r\n html = urllib.urlopen(url).read()\r\n with codecs.open(fname, 'w', 'utf-8') as f:\r\n soup = BeautifulSoup(html)\r\n f.write(unicode(soup))\r\n return fname", "def _download(self, url, output_dir, dataset, chunk_size=1024):\n r = self.session.get(url, stream=True, allow_redirects=True)\n if not r.ok:\n r = self.session.get(r.url, stream=True, allow_redirects=True, auth=(self._username, self._password))\n file_size = int(r.headers['Content-Length'])\n\n with tqdm(total=file_size, unit_scale=True, unit='B', unit_divisor=1024) as pbar:\n ### GET FILE NAME ###\n if \"Content-Disposition\" in r.headers.keys():\n local_filename = re.findall(\"filename=(.+)\", r.headers[\"Content-Disposition\"])[0]\n else:\n local_filename = url.split(\"/\")[-3]\n local_filename = self.api.lookup(dataset, local_filename)[0]\n local_filename = local_filename + util.convert_to_extension(r.headers['content-type'])\n print(\"*** FNAME\", local_filename)\n\n local_filename = os.path.join(output_dir, local_filename)\n\n ### WRITE FILE ###\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=chunk_size):\n if chunk:\n f.write(chunk)\n pbar.update(chunk_size)\n return local_filename", "def get_url(self,url,output=None):\n parsed_url = urlparse(url)\n hostname = parsed_url[1]\n \n #Make the command\n cmd = \"wget %s -O -\" % url\n (ssh_input,ssh_output,ssh_err) = self.execute_command(cmd)\n \n if(output==None):\n p = urlparse(url)[2]\n filename = os.path.split(p)[1] \n output = filename\n # See if it's ok.\n err = sio.StringIO()\n dat = ssh_err.read(BLOCKSIZE)\n while(dat):\n err.write(dat)\n dat = ssh_err.read(BLOCKSIZE)\n \n err_out = err.getvalue()\n print >> sys.stderr, err_out\n err1 = re.compile(r\"failed\") # Failed to resolve hostname\n err2 = re.compile(r\"404 Not Found\") # File not found\n \n if(err1.search(err_out)):\n raise SSHError(\"ERROR: Failed to retrieve file! Hostname unknown\")\n elif(err2.search(err_out)):\n raise SSHError(\"ERROR: Failed to retrieve file. File not found\")\n # If it didn't fail, read the file.\n \n if(output==\"-\"):\n f = sys.stdout\n else:\n f = open(output,\"w+b\")\n dat = ssh_output.read(BLOCKSIZE)\n while(dat):\n f.write(dat)\n dat = ssh_output.read(BLOCKSIZE)", "def _download_file(url: str, output_path: str):\n\n def write_to_file(response: requests.Response, output_path: str) -> int:\n \"\"\"Write the response content to the given file.\n\n :param response: Response to be written to the output file.\n :param output_path: Path to the output file.\n :returns: Number of bytes read from the response content.\n \"\"\"\n read_bytes = 0\n with open(output_path, \"wb\") as output_file:\n # Use the same chunk size of `urlretrieve`\n for chunk in response.iter_content(chunk_size=1024 * 8):\n read_bytes += len(chunk)\n output_file.write(chunk)\n if read_bytes > FETCHER_MAXIMUM_FILE_SIZE:\n break\n return read_bytes\n\n try:\n with requests.get(\n url, stream=True, timeout=FETCHER_REQUEST_TIMEOUT\n ) as response:\n response.raise_for_status()\n\n content_length = int(response.headers.get(\"Content-Length\", 0))\n if content_length > FETCHER_MAXIMUM_FILE_SIZE:\n raise REANAFetcherError(\"Maximum file size exceeded\")\n\n read_bytes = write_to_file(response, output_path)\n\n if read_bytes > FETCHER_MAXIMUM_FILE_SIZE:\n os.remove(output_path)\n raise REANAFetcherError(\"Maximum file size exceeded\")\n except HTTPError as e:\n error = f\"Cannot fetch the workflow specification: {e.response.reason} ({response.status_code})\"\n if response.status_code == 404:\n error = \"Cannot find the given workflow specification\"\n raise REANAFetcherError(error)\n except Timeout:\n raise REANAFetcherError(\n \"Timed-out while fetching the workflow specification\"\n )\n except RequestException:\n raise REANAFetcherError(\n \"Something went wrong while fetching the workflow specification\"\n )", "def __fetch_output_task(\n self, task, download_dir, overwrite, changed_only, **extra_args):\n return task.fetch_output(\n download_dir, overwrite, changed_only, **extra_args)", "def get_output(self, output, download_dir, overwrite=False, callback=None, block=4096):\n download = self._get_file(output, download_dir, overwrite, callback=callback, block=block)\n if download.success:\n return os.path.join(download_dir, output.get('name', ''))\n else:\n raise download.result", "def download(url, output, encoding, insrs, format_name):\n\n folder = download_data(url, encoding)\n joined_file = join_files(folder)\n transform(joined_file, output, insrs, format_name)\n\n shutil.rmtree(folder)\n os.remove(joined_file)\n\n if not os.path.isfile(output):\n raise Error(\"Output file not created, the whole process failed\")\n else:\n logging.info(\"File %s successfuly created\" % output)", "def save(self, url, output):\n\n shutil.copy2(self.get(url), output)", "def url_fetch(self, url):\n user_agent = random.choice(self.conf.user_agents)\n if self.isCompress == True:\n headers = {\n 'Uesr-Agent': user_agent,\n \"Accept-Encoding\": \"gzip,deflate\",\n \"Accept-Charset\" : \"UTF-8,*\"\n }\n else:\n headers = {\n 'Uesr-Agent': user_agent,\n \"Accept-Charset\" : \"UTF-8,*\"\n }\n raw_data = ''\n try:\n conn = httplib.HTTPConnection(self.proxy, timeout=3.0)\n conn.request('GET', url, None, headers)\n response = conn.getresponse()\n raw_data = response.read()\n except Exception as err:\n self.logger.error('connect error[%s]' % err)\n return '999', 'Request failed', ''\n finally:\n conn.close()\n \n content = ''\n if self.isCompress == True:\n if response.status == 200:\n try:\n stream = StringIO.StringIO(raw_data)\n decompressor = gzip.GzipFile(fileobj=stream)\n content = decompressor.read()\n except:\n self.logger.error('status[%s] len_raw_data[%d]' % (response.status, len(raw_data)))\n return '998', 'content err', ''\n else:\n if response.status == 200:\n content = raw_data \n\n return response.status, response.reason, content", "def fetch(self, url: furl) -> str:\n try:\n contents = self._download(url)\n except requests.ConnectionError as err:\n logger.exception(f\"Request failed with {err}\")\n click.secho(\n f\"The URL {url} could not be downloaded. Either your network is unreachable or the URL is broken.\"\n f\" Check the URL, fix your connection, or use \"\n f\" {OptionEnum.OFFLINE.as_flake8_flag()} / {OptionEnum.OFFLINE.as_envvar()}=1\",\n fg=\"red\",\n err=True,\n )\n return \"\"\n return contents", "def download(self, outputfile: str, outputformat: str):\n pass", "def fetch(self) -> None:\n workflow_spec_path = os.path.join(self._output_dir, self._spec)\n self._download_file(self._parsed_url.original_url, workflow_spec_path)", "def read_and_save(res):\n fname = os.path.split(urlsplit(res.url).path)[-1]\n fpath = os.path.join(cfg.OUTPUT_DIR, fname)\n with open(fpath, 'wb') as f:\n for chunk in res.iter_content(cfg.CHUNK):\n f.write(chunk)", "def download_from_url(url, output_path):\n\n print('Pulling data from {} to {}'.format(url, output_path))\n wget.download(url, output_path)\n print('done')", "def fetch(self, url, body=None, headers=None):\r\n if body:\r\n # method = 'POST'\r\n # undo the URL encoding of the POST arguments\r\n data = parse_qs(body)\r\n response = self.client.post(url, data)\r\n else:\r\n # method = 'GET'\r\n data = {}\r\n if headers and 'Accept' in headers:\r\n data['CONTENT_TYPE'] = headers['Accept']\r\n response = self.client.get(url, data)\r\n\r\n # Translate the test client response to the fetcher's HTTP response abstraction\r\n content = response.content\r\n final_url = url\r\n response_headers = {}\r\n if 'Content-Type' in response:\r\n response_headers['content-type'] = response['Content-Type']\r\n if 'X-XRDS-Location' in response:\r\n response_headers['x-xrds-location'] = response['X-XRDS-Location']\r\n status = response.status_code\r\n\r\n return HTTPResponse(\r\n body=content,\r\n final_url=final_url,\r\n headers=response_headers,\r\n status=status,\r\n )", "def _get_file(self, output, download_dir, overwrite, callback=None, block=4096):\n if output.get('type') == 'TaskPreview':\n size = None\n\n else:\n output_props = self._api.props_output_file(url=output.get('link'))\n\n if output_props.success:\n size = output_props.result\n\n else:\n raise output_props.result\n\n return self._api.get_output_file(download_dir,\n size,\n overwrite,\n fname=output.get('name'),\n url=output.get('link'),\n callback=callback,\n block=block)", "def fetch_and_save(cls, url, path):\n content = cls.fetch_with_retry(url)\n if not content:\n return False\n # print(\"Saving {}\".format(os.path.basename(path)))\n with open(path, \"wb\") as file:\n file.write(content)\n return content", "def download_http(self, url):\n\n # Set things up.\n # ==============\n\n out = None\n headers = {}\n if (url.username is not None) and (url.password is not None):\n tmp = base64.b64encode(':'.join([url.username, url.password]))\n headers['Authorization'] = \"Basic %s\" % tmp\n\n\n # Toe the waters.\n # ===============\n # We start with an HTTP HEAD request to check the status.\n\n conn = httplib.HTTPConnection(url.netloc)\n conn.request(\"HEAD\", url.path, '', headers)\n r = conn.getresponse()\n conn.close()\n if self.verbose:\n print >> sys.stderr, url, r.status, ''\n\n\n # Bail.\n # =====\n # Short-cut when we just care whether it's a package.\n\n if url.path.endswith('/'):\n out = r.status == 200\n\n\n elif r.status == 200:\n\n # Wade in.\n # ========\n # If the status is positive we check to see if we've already\n # downloaded the latest copy.\n\n etag = r.getheader('etag', '')\n lm = r.getheader('last-modified', '')\n key = sha.new(str(url) + etag + lm).hexdigest()\n\n if not self.cachedir:\n raise ValueError(\"netimp.importer.cachedir not set\")\n if not os.path.isdir(self.cachedir):\n raise IOError( \"netimp.importer.cachedir not found \"\n + \"(%s)\" % self.cachedir\n )\n\n path = join(self.cachedir, key)\n if os.path.isfile(path):\n out = open(path, 'rb')\n else:\n\n # Dive in!\n # ========\n # We don't have this module locally yet: download it for real.\n\n conn = httplib.HTTPConnection(url.netloc)\n conn.request(\"GET\", url.path, '', headers)\n r = conn.getresponse()\n if r.status == 200: # just in case!\n fp = open(path, 'w+b')\n fp.write(r.read())\n fp.flush()\n fp.close()\n out = open(path, 'rb')\n conn.close()\n\n return out", "def fetch(url, filename):\n with open(filename, 'wb') as handle:\n response = requests.get(url, stream=True)\n\n if not response.ok:\n logger.error('Download failed')\n return False\n\n for block in response.iter_content(1024):\n if not block:\n break\n\n handle.write(block)\n\n logger.info(' -> Rewriting URIs')\n q = re.compile(r'http://data.ub.uio.no/realfagstermer/([0-9]+)')\n with open(filename, 'r') as infile:\n with open(filename + '.tmp', 'w') as outfile:\n outfile.write(q.sub('http://data.ub.uio.no/realfagstermer/c\\\\1', infile.read()))\n os.unlink(filename)\n os.rename(filename + '.tmp', filename)\n\n return True" ]
[ "0.6911675", "0.6535504", "0.6211069", "0.60043406", "0.59758717", "0.58903176", "0.5829563", "0.5752656", "0.57295847", "0.5723706", "0.57197624", "0.5693564", "0.5693242", "0.56680185", "0.5658385", "0.56308764", "0.5616187", "0.5565478", "0.5563205", "0.5561096", "0.55498993", "0.54965216", "0.54814184", "0.5476896", "0.54705817", "0.54317045", "0.5420145", "0.5413119", "0.5394613", "0.5384457" ]
0.7404381
0
Download our copies of node & npm to our tree and updates env ($PATH).
def node_and_npm_setup(): # We have to update modules first as it'll nuke the dir node lives under. node.modules_update() node.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_requirements():\n with cd(REMOTE_REPO_DIR):\n cmd = ['npm install']\n # cmd += ['--requirement %s' % os.path.join(CODE_DIR,'requirements.txt')]\n run(' '.join(cmd))", "def InstallNodeDependencies():\n logging.info('entering ...')\n # Install the project dependencies specified in package.json into\n # node_modules.\n logging.info('installing AMP Validator engine dependencies ...')\n subprocess.check_call(\n ['npm', 'install', '--userconfig', '../.npmrc'],\n stdout=(open(os.devnull, 'wb') if os.environ.get('CI') else sys.stdout))\n logging.info('installing AMP Validator nodejs dependencies ...')\n subprocess.check_call(['npm', 'install', '--userconfig', '../../../.npmrc'],\n cwd='js/nodejs',\n stdout=(open(os.devnull, 'wb')\n if os.environ.get('CI') else sys.stdout))\n logging.info('... done')", "def nodejs(self):\n self.summarize_operation(\"Installing Nodejs\")\n process = Popen(shlex.split(\"curl --silent --location https://deb.nodesource.com/setup_5.x \"), stdout=subprocess.PIPE)\n process_stdout = Popen(shlex.split(\"sudo -E bash -\"), stdin=process.stdout)\n process_stdout.communicate()[0]\n self.install_package(\"nodejs\")\n self.npm_install_globally(\"npm@latest\")", "def install_frontend_deps():\n\n with lcd(FRONTENDDIR):\n cmd = '%(npm)s install' % {'npm': get_npm()}\n local(cmd)\n cmd = '%(bower)s install' % {'bower': get_bower()}\n local(cmd)", "def update_npm():\n path = os.path.join(settings.PROJECT_PATH, 'rnacentral', 'portal', 'static')\n with env.cd(path):\n env.run('npm update --loglevel info')", "def node_prereqs_installation():\n\n # NPM installs hang sporadically. Log the installation process so that we\n # determine if any packages are chronic offenders.\n shard_str = os.getenv('SHARD', None)\n if shard_str:\n npm_log_file_path = f'{Env.GEN_LOG_DIR}/npm-install.{shard_str}.log'\n else:\n npm_log_file_path = f'{Env.GEN_LOG_DIR}/npm-install.log'\n npm_log_file = open(npm_log_file_path, 'wb') # lint-amnesty, pylint: disable=consider-using-with\n npm_command = 'npm install --verbose'.split()\n\n # The implementation of Paver's `sh` function returns before the forked\n # actually returns. Using a Popen object so that we can ensure that\n # the forked process has returned\n proc = subprocess.Popen(npm_command, stderr=npm_log_file) # lint-amnesty, pylint: disable=consider-using-with\n retcode = proc.wait()\n if retcode == 1:\n # Error handling around a race condition that produces \"cb() never called\" error. This\n # evinces itself as `cb_error_text` and it ought to disappear when we upgrade\n # npm to 3 or higher. TODO: clean this up when we do that.\n print(\"npm install error detected. Retrying...\")\n proc = subprocess.Popen(npm_command, stderr=npm_log_file) # lint-amnesty, pylint: disable=consider-using-with\n retcode = proc.wait()\n if retcode == 1:\n raise Exception(f\"npm install failed: See {npm_log_file_path}\")\n print(\"Successfully installed NPM packages. Log found at {}\".format(\n npm_log_file_path\n ))", "def update_dependencies():\n pip = env.virtualenv.child('bin', 'pip')\n reqs = env.code_dir.child('deploy-requirements.txt')\n sudo('%s -q install -U pip' % pip)\n sudo('%s -q install -r %s' % (pip, reqs))", "def NPMInstall(infra_root):\n cipd_npm = os.path.join(\n infra_root, 'cipd', 'lib', 'node_modules', 'npm', 'bin', 'npm-cli.js')\n return RunNode(infra_root, [cipd_npm, 'install'])", "def do_base_setup(run_as_user, branch, base_path, dist_path):\n #change time to UTC\n runcmd(\"ln -sf /usr/share/zoneinfo/UTC /etc/localtime\")\n\n #install some necessary base deps\n runcmd(\"apt-get update\")\n runcmd(\"apt-get -y install git-core software-properties-common python-software-properties build-essential ssl-cert ntp runit\")\n \n #install node-js\n #node-gyp building has ...issues out of the box on Ubuntu... use Chris Lea's nodejs build instead, which is newer\n runcmd(\"apt-get -y remove nodejs npm gyp\")\n runcmd(\"add-apt-repository -y ppa:chris-lea/node.js\")\n runcmd(\"apt-get update\")\n runcmd(\"apt-get -y install nodejs\") #includes npm\n gypdir = None\n try:\n import gyp\n gypdir = os.path.dirname(gyp.__file__)\n except:\n pass\n else:\n runcmd(\"mv %s %s_bkup\" % (gypdir, gypdir))\n #^ fix for https://github.com/TooTallNate/node-gyp/issues/363\n\n #Create xcp user, under which the files will be stored, and who will own the files, etc\n try:\n pwd.getpwnam(USERNAME)\n except:\n logging.info(\"Creating user '%s' ...\" % USERNAME)\n runcmd(\"adduser --system --disabled-password --shell /bin/false --group %s\" % USERNAME)\n \n #Create xcpd user (to run counterpartyd, counterblockd, insight, bitcoind, nginx) if not already made\n try:\n pwd.getpwnam(DAEMON_USERNAME)\n except:\n logging.info(\"Creating user '%s' ...\" % DAEMON_USERNAME)\n runcmd(\"adduser --system --disabled-password --shell /bin/false --ingroup nogroup --home %s %s\" % (USER_HOMEDIR, DAEMON_USERNAME))\n \n #add the run_as_user to the xcp group\n runcmd(\"adduser %s %s\" % (run_as_user, USERNAME))\n \n #Check out counterpartyd-build repo under this user's home dir and use that for the build\n git_repo_clone(\"counterpartyd_build\", \"https://github.com/CounterpartyXCP/counterpartyd_build.git\",\n os.path.join(USER_HOMEDIR, \"counterpartyd_build\"), branch, for_user=run_as_user)\n\n #enhance fd limits for the xcpd user\n runcmd(\"cp -af %s/linux/other/xcpd_security_limits.conf /etc/security/limits.d/\" % dist_path)", "def update_go_deps(self):\n self.go_version()\n env = self.m.step.get_from_context('env', {})\n env.update(self.go_env)\n with self.m.step.context({'env': env}):\n self.m.run.with_retry(\n self.m.step,\n 'update go pkgs',\n UPDATE_GO_ATTEMPTS,\n cmd=[self.go_exe, 'get', '-u', '-t', '%s/...' % INFRA_GO_PKG])", "def deploy_node(app, deltas={}):\n\n virtualenv_path = join(ENV_ROOT, app)\n node_path = join(ENV_ROOT, app, \"node_modules\")\n node_modules_symlink = join(APP_ROOT, app, \"node_modules\")\n npm_prefix = abspath(join(node_path, \"..\"))\n env_file = join(APP_ROOT, app, 'ENV')\n deps = join(APP_ROOT, app, 'package.json')\n\n first_time = False\n if not exists(node_path):\n echo(\"-----> Creating node_modules for '{}'\".format(app), fg='green')\n makedirs(node_path)\n first_time = True\n\n env = {\n 'VIRTUAL_ENV': virtualenv_path,\n 'NODE_PATH': node_path,\n 'NPM_CONFIG_PREFIX': npm_prefix,\n \"PATH\": ':'.join([join(virtualenv_path, \"bin\"), join(node_path, \".bin\"), environ['PATH']])\n }\n if exists(env_file):\n env.update(parse_settings(env_file, env))\n\n # include node binaries on our path\n environ[\"PATH\"] = env[\"PATH\"]\n\n version = env.get(\"NODE_VERSION\")\n node_binary = join(virtualenv_path, \"bin\", \"node\")\n installed = check_output(\"{} -v\".format(node_binary), cwd=join(APP_ROOT, app), env=env, shell=True).decode(\"utf8\").rstrip(\n \"\\n\") if exists(node_binary) else \"\"\n\n if version and check_requirements(['nodeenv']):\n if not installed.endswith(version):\n started = glob(join(UWSGI_ENABLED, '{}*.ini'.format(app)))\n if installed and len(started):\n echo(\"Warning: Can't update node with app running. Stop the app & retry.\", fg='yellow')\n else:\n echo(\"-----> Installing node version '{NODE_VERSION:s}' using nodeenv\".format(**env), fg='green')\n call(\"nodeenv --prebuilt --node={NODE_VERSION:s} --clean-src --force {VIRTUAL_ENV:s}\".format(**env),\n cwd=virtualenv_path, env=env, shell=True)\n else:\n echo(\"-----> Node is installed at {}.\".format(version))\n\n if exists(deps) and check_requirements(['npm']):\n if first_time or getmtime(deps) > getmtime(node_path):\n copyfile(join(APP_ROOT, app, 'package.json'), join(ENV_ROOT, app, 'package.json'))\n if not exists(node_modules_symlink):\n symlink(node_path, node_modules_symlink)\n echo(\"-----> Running npm for '{}'\".format(app), fg='green')\n call('npm install --prefix {} --package-lock=false'.format(npm_prefix), cwd=join(APP_ROOT, app), env=env, shell=True)\n return spawn_app(app, deltas)", "def bootstrap(execute=dummy_execute):\n path = node(['-p',\n 'try { require.resolve(\"@prometheusresearch/react-scripts/bin/react-scripts.js\") } catch (e) {\"\"}'],\n quiet=True)\n if not path.strip():\n def bootstrap_yarn():\n url, md5_hash = download.parse_url(YARN_URL)\n yarn_data = download.download(url, md5_hash=md5_hash)\n yarn_path = os.path.join(sys.prefix, 'bin', 'yarn')\n with open(yarn_path, 'w') as f:\n f.write(yarn_data)\n yarn_stat = os.stat(yarn_path)\n os.chmod(yarn_path, yarn_stat.st_mode | stat.S_IEXEC)\n\n def bootstrap_npm():\n npm_path = find_executable('npm', 'npm')\n out, err = exe(npm_path, ['--version'])\n npm_version = out.strip()\n if npm_version[0] not in ('4', '3', '2'):\n npm(['install', '--global', '[email protected]'])\n npm(['install', '--global', 'npm@' + NPM_VERSION])\n\n def bootstrap_react_scripts():\n deps = [\n '@prometheusresearch/react-scripts@%s' % REACT_SCRIPTS_VERSION,\n '[email protected]', # this is required for yarn to function propely\n ]\n npm(['install', '--global'] + deps)\n\n execute(bootstrap_yarn, (), 'Installing yarn')\n execute(bootstrap_npm, (), 'Installing npm')\n execute(bootstrap_react_scripts, (), 'Installing react-scripts')", "def install_with_npm_fast_install(self, directory, silent=False):\n timer = Timer()\n program_name = 'npm-fast-install'\n if not self.context.test('which', 'npm-fast-install'):\n program_name = os.path.join(directory, 'node_modules', '.bin', 'npm-fast-install')\n if not self.context.exists(program_name):\n logger.verbose(\"Installing npm-fast-install locally (because it's not globally installed) ..\")\n self.context.execute('npm', 'install', 'npm-fast-install', directory=directory, silent=silent)\n package_file = os.path.join(directory, 'package.json')\n original_contents = self.context.read_file(package_file)\n metadata = dict(dependencies={}, devDependencies={})\n metadata.update(json.loads(auto_decode(original_contents)))\n need_patch = metadata['devDependencies'] and not self.production\n try:\n # Temporarily change the contents of the package.json file?\n if need_patch:\n logger.debug(\"Temporarily patching %s ..\", package_file)\n patched_data = copy.deepcopy(metadata)\n patched_data['dependencies'].update(patched_data['devDependencies'])\n patched_data.pop('devDependencies')\n self.context.write_file(package_file, json.dumps(patched_data).encode('UTF-8'))\n # Run the npm-fast-install command.\n logger.info(\"Running command: %s\", quote(program_name))\n self.context.execute(program_name, directory=directory, silent=silent)\n finally:\n # Restore the original contents of the package.json file?\n if need_patch:\n logger.debug(\"Restoring original contents of %s ..\", package_file)\n self.context.write_file(package_file, original_contents)\n logger.verbose(\"Took %s to install with npm-fast-install.\", timer)", "def _compile_web_assets_npm(project_root_dir):\n clientdir = os.path.join(project_root_dir, 'client')\n modulesdir = os.path.join(clientdir, 'node_modules')\n if os.path.isdir(modulesdir):\n log(\"removing \" + str(modulesdir))\n try:\n rmtree(modulesdir)\n except OSError as exception:\n log(exception.strerror + \": \" + exception.filename)\n return 1\n log(\"installing node modules under \" + str(clientdir))\n cmd = 'npm i'\n cr = container_users.make_host_user_command_runner()\n cr.set_working_dir(clientdir)\n res = cr.run(cmd, stream_log=True)\n return res.get_exit_code()", "def test_npm_install_url_referenced_package(modules, npm, npm_version, states):\n ret = npm.installed(\n name=\"request/request#v2.88.2\",\n registry=\"https://registry.npmjs.org/\",\n )\n assert ret.result is True\n ret = npm.removed(\n name=\"git://github.com/request/request\",\n )\n assert ret.result is True", "def npm_install(self, package):\n self.summarize_operation(\"Installing \" + package)\n print subprocess.call(shlex.split(\"sudo npm install --save \" + package))", "def _set_environment_vars(self):\n os.environ[\"PATH\"] = os.path.join(self.source_folder, \"depot_tools\") + os.pathsep + os.environ[\"PATH\"]\n os.environ[\"DEPOT_TOOLS_PATH\"] = os.path.join(self.source_folder, \"depot_tools\")\n if tools.os_info.is_windows:\n os.environ[\"DEPOT_TOOLS_WIN_TOOLCHAIN\"] = \"0\"\n os.environ[\"GYP_MSVS_VERSION\"] = \"2017\" if str(self.settings.compiler.version) == \"15\" else \"2019\"", "def _install_system_requirements_linux(self):\n self.output.info(\"Calling v8/build/install-build-deps.sh\")\n os.environ[\"PATH\"] += os.pathsep + os.path.join(self.source_folder, \"depot_tools\")\n sh_script = self.source_folder + \"/v8/build/install-build-deps.sh\"\n self.run(\"chmod +x \" + sh_script)\n cmd = sh_script + \" --unsupported --no-arm --no-nacl --no-backwards-compatible --no-chromeos-fonts --no-prompt \"\n cmd = cmd + (\"--syms\" if str(self.settings.build_type) == \"Debug\" else \"--no-syms\")\n cmd = \"export DEBIAN_FRONTEND=noninteractive && \" + cmd\n self.run(cmd)", "def npm_install_globally(self, package):\n self.summarize_operation(\"Installing \" + package)\n print subprocess.call(shlex.split(\"sudo npm install -g \" + package))", "def pip_install():\n _require_environment()\n remote(PIP_INSTALL_PREFIX)", "def install_backend_deps():\n with lcd(BACKENDDIR):\n cmd = '%(pip)s install -r %(requirements_file)s' % {\n 'pip': get_pip(),\n 'requirements_file': requirements_file\n }\n local(cmd)\n # Install Pandoc\n local(\"sudo apt-get install pandoc\")\n # Install Pyandoc\n with lcd(HOMEDIR):\n if not os.path.isdir(os.path.join(HOMEDIR, 'pyandoc')):\n local(\"git clone [email protected]:kennethreitz/pyandoc.git\")\n with lcd(\"pyandoc\"):\n if not env.local:\n\t with prefix('. /home/ubuntu/virtualenvs/venv-system/bin/activate'):\n local(\"python setup.py install\")\n else:\n local(\"python setup.py install\")", "def install_deps():\n pipenv_dev = run('pipenv install --dev'.split(), check=True)\n print('Installed dependencies and virtual environment. Type `pipenv shell` to activate later.')", "def install_deps():\n click.echo(\"install_deps\")", "def install_js_deps():\n click.echo('-> Installing JavaScript dependencies for the Vue.js client...')\n subprocess.check_call(['npm',\n '--prefix={0}'.format(os.path.join(os.path.dirname(aliquis.__file__),\n 'aliquisjs')),\n 'install'])\n click.echo('-> JavaScript dependencies succesfully installed.')", "def YumInstall(vm):\n vm.Install('build_tools')\n vm.InstallEpelRepo()\n vm.InstallPackages(YUM_PACKAGES)", "def local_install(self):\n import subprocess\n\n print(\"Making local install\")\n from pathlib import Path\n\n root = Path(__file__).parent.parent\n\n def run(args, shell=False):\n print(\"---\", \" \".join(args))\n return subprocess.check_call(args, cwd=curdir, shell=shell)\n\n def get_version():\n import json\n\n p = Path(curdir / \"package.json\")\n contents = json.loads(p.read_text())\n return contents[\"version\"]\n\n print(\"--- installing RobotFramework Language Server\")\n curdir = root / \"robotframework-ls\"\n run(\"python -m dev vendor_robocorp_ls_core\".split())\n run(\"vsce package\".split(), shell=sys.platform == \"win32\")\n run(\n f\"code --install-extension robotframework-lsp-{get_version()}.vsix\".split(),\n shell=sys.platform == \"win32\",\n )\n run(\"python -m dev remove_vendor_robocorp_ls_core\".split())\n\n print(\"\\n--- installing Robocorp Code\")\n curdir = root / \"robocorp-code\"\n run(\"python -m dev vendor_robocorp_ls_core\".split())\n run(\"vsce package\".split(), shell=sys.platform == \"win32\")\n run(\n f\"code --install-extension robocorp-code-{get_version()}.vsix\".split(),\n shell=sys.platform == \"win32\",\n )\n run(\"python -m dev remove_vendor_robocorp_ls_core\".split())", "def install():\n\n if (Path.cwd() / \"src\" / \"environment.yml\").is_file():\n call([\"conda\", \"install\", \"--file\", \"src/environment.yml\", \"--yes\"])\n\n pip_command = [\"install\", \"-U\", \"-r\", \"src/requirements.txt\"]\n\n if os.name == \"posix\":\n python_call(\"pip\", pip_command)\n else:\n command = [sys.executable, \"-m\", \"pip\"] + pip_command\n subprocess.Popen(command, creationflags=subprocess.CREATE_NEW_CONSOLE)", "def install_for_spec(self):\n self.create_package_json()\n os.system('npm install json-refs')\n os.system('npm install json2yaml')\n os.system('npm install yamljs')\n os.system('npm install swagger-split') # package only required while splitting hence being installed here\n self.delete_package_json()", "def sub_install_packages():\n sudo('apt-get update') # Update repository links\n sudo('apt-get -y upgrade') # Upgrade the system\n package_str = ' '.join(INSTALL_PACKAGES)\n sudo('apt-get -y install ' + package_str) # Install the packages", "def _install():\n download_file='http://www.ipol.im/pub/art/2015/136/inpaint_8.tgz'\n tools.download_and_extract(download_file) \n this_file_path=os.path.dirname(__file__)\n subprocess.call(' mkdir build; cd build; cmake ..; make', shell=True,cwd=exec_folder)" ]
[ "0.6407233", "0.63381755", "0.6324544", "0.6251576", "0.6231938", "0.6057195", "0.5836138", "0.5828892", "0.5789863", "0.5718106", "0.56944567", "0.56539077", "0.5616942", "0.5609459", "0.5526702", "0.5480729", "0.5467356", "0.54643357", "0.5417278", "0.5364148", "0.53312916", "0.53000194", "0.5296201", "0.5286228", "0.5278628", "0.527807", "0.52618295", "0.52430105", "0.5237947", "0.5236408" ]
0.7617056
0
Load a module from the filesystem.
def load_module(name, path): loader = importlib.machinery.SourceFileLoader(name, path) module = types.ModuleType(loader.name) loader.exec_module(module) return module
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_module(module_name: str, module_path: str) -> object:\n spec = module_util.spec_from_file_location(module_name, module_path)\n module = module_util.module_from_spec(spec)\n spec.loader.exec_module(module) # type: ignore\n return module", "def load_module(path):\n spec = spec_from_file_location(\"module.name\", path)\n module = module_from_spec(spec)\n try:\n spec.loader.exec_module(module)\n except Exception as err:\n # ToDo: Append functions found from spec.loader.get_code(\"module.name\")\n # To some hidden attribute of the module object to be returned.\n warn(f'Exception when loading module {path}: \\n{err}')\n return module", "def load_module(path: os.PathLike):\n path = Path(path)\n pwd = Path(os.getcwd())\n os.chdir(path.parent)\n try:\n mod = import_module(path.stem)\n except ModuleNotFoundError as err:\n raise err\n finally:\n os.chdir(pwd)\n return mod", "def _load_module(modulepath):\n\n mod = __import__(modulepath)\n path = []\n for token in modulepath.split(\".\")[1:]:\n path.append(token)\n mod = getattr(mod, token)\n return mod", "def LoadModule(filename):\n (name, ext) = os.path.splitext(filename)\n\n fh = open(filename, \"r\")\n try:\n return imp.load_module(name, fh, filename, (ext, \"r\", imp.PY_SOURCE))\n finally:\n fh.close()", "def loadModule(path, doReload=False):\n relPath = Files.relName(path)\n context = Context.getContext()\n parentMod = context.package\n if parentMod is not None:\n modName = \"%s.%s\" % (parentMod.__name__,\n relPath.replace(\"/\", \".\")[:-3])\n else:\n modName = \"%s\" % (relPath.replace(\"/\", \".\")[:-3])\n if not doReload and path in _loadedModules:\n return _loadedModules[path]\n\n ns = {}\n here = os.getcwd()\n subDir = os.path.dirname(path)\n if subDir:\n os.chdir(subDir)\n\n global _loading, _curScriptPackage\n try:\n try:\n try:\n _loading = os.path.basename(path)\n _curScriptPackage = parentMod\n mod = imp.load_source(modName, os.path.basename(path))\n except Unsupported as exc:\n return\n except Exception as exc:\n print(formatImportFailure(modName, exc))\n print(\"Hmm\", exc)\n raise\n except Unsupported:\n return\n finally:\n os.chdir(here)\n return mod", "def import_module_from_module_path(path):\n return SourceFileLoader('', path).load_module()", "def loadmodule(self, name):\n\n if name in self._modules:\n return self._modules[name]()\n\n raise Error(\"No such module: {0}\".format(name))", "def load(identifier, path):\r\n\tloader = importlib.machinery.SourceFileLoader(identifier, path)\r\n\thandle = loader.load_module(identifier)\r\n\treturn handle", "def load_module(module):\n try:\n return import_module(module)\n except ImportError:\n sys.stderr.write('Unable to load the module: %s.\\n' % module)\n exit(-1)", "def load_module(module_name, file_name):\n from importlib.machinery import SourceFileLoader\n home_dir = os.path.expanduser(\"~\")\n valid_paths = [\n os.path.join(home_dir, \"Google Drive\"),\n os.path.join(home_dir, \"GoogleDrive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"Google Drive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"GoogleDrive\"),\n os.path.join(\"C:/\", \"GoogleDrive\"),\n os.path.join(\"C:/\", \"Google Drive\"),\n os.path.join(\"D:/\", \"GoogleDrive\"),\n os.path.join(\"D:/\", \"Google Drive\"),\n ]\n\n drive_path = None\n for path in valid_paths:\n if os.path.isdir(path):\n drive_path = path\n break\n\n if drive_path is None:\n raise Exception(\"Couldn't find google drive folder!\")\n\n utils_path = os.path.join(drive_path, \"_pyutils\")\n print(\"Loading [{}] package...\".format(os.path.join(utils_path,file_name)),flush = True)\n logger_lib = SourceFileLoader(module_name, os.path.join(utils_path, file_name)).load_module()\n print(\"Done loading [{}] package.\".format(os.path.join(utils_path,file_name)),flush = True)\n\n return logger_lib", "def load_module(file_name):\n path = temp.relpath(file_name)\n m = _load_module(path)\n logger.info(\"load_module %s\", path)\n return m", "def load_module(cls, bytes, options=None):\n\t\traise NotImplementedError(\"load_module must be implemented\")", "def load(path):\n pass", "def load_module (self, name):\n module = sys.modules.get (name)\n if module is not None:\n return module\n\n containment = self.containments.get (name)\n if containment is None:\n raise ImportError ('No such module: \\'{}\\''.format (name))\n source, filename, ispkg = containment\n\n module = imp.new_module (name)\n module.__loader__ = self\n module.__file__ = filename\n if ispkg:\n module.__path__ = [os.path.dirname (filename)]\n module.__package__ = name\n else:\n module.__package__ = name.rpartition ('.') [0]\n\n module.__initializing__ = True\n sys.modules [name] = module\n try:\n Exec (compile (source, module.__file__, 'exec'), module.__dict__)\n return module\n except Exception:\n sys.modules.pop (name, None)\n raise\n finally:\n module.__initializing__ = False", "def load(self):\n\n\t\tif self.module is None:\n\t\t\t# Cause the interpreter to load the module in local namespace ...\n\t\t\texec \"import \" + self.name\n\n\t\t\t# Store the module object ...\n\t\t\tobject.__setattr__(self, 'module', eval(self.name))", "def import_module(self, path):\n\n try:\n module = import_module(path)\n except ImportError:\n self.error('Failed to Load module: {0}'.format(path))\n return False\n else:\n self.out('Loaded module: {0}'.format(path))\n return module", "def loadmodule( conf ):\n try:\n #conf = routes[ route ]\n # try to load the module\n module_name = conf['module']['name']\n module_path = conf['module']['path']\n \n mod_name, file_ext = os.path.splitext( os.path.split( module_path )[ -1] )\n if file_ext.lower() == '.py':\n py_mod = imp.load_source( mod_name, module_path )\n elif file_ext.lower() == '.pyc':\n py_mod = imp.load_compiled( mod_name, module_path )\n else:\n raise Exception(\"Cannot handle module for route: \" + route )\n except Exception, e:\n import traceback\n traceback.print_exc( file=sys.stdout )\n # TODO log error + msg\n return py_mod", "def load_module(module_name, file_name):\n from importlib.machinery import SourceFileLoader\n home_dir = os.path.expanduser(\"~\")\n valid_paths = [\n os.path.join(home_dir, \"Google Drive\"),\n os.path.join(home_dir, \"GoogleDrive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"Google Drive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"GoogleDrive\"),\n os.path.join(\"C:/\", \"GoogleDrive\"),\n os.path.join(\"C:/\", \"Google Drive\"),\n os.path.join(\"D:/\", \"GoogleDrive\"),\n os.path.join(\"D:/\", \"Google Drive\"),\n ]\n\n drive_path = None\n for path in valid_paths:\n if os.path.isdir(path):\n drive_path = path\n break\n\n if drive_path is None:\n logger_lib = None\n print(\"Logger library not found in shared repo.\", flush = True)\n #raise Exception(\"Couldn't find google drive folder!\")\n else: \n utils_path = os.path.join(drive_path, \"_pyutils\")\n print(\"Loading [{}] package...\".format(os.path.join(utils_path,file_name)),flush = True)\n logger_lib = SourceFileLoader(module_name, os.path.join(utils_path, file_name)).load_module()\n print(\"Done loading [{}] package.\".format(os.path.join(utils_path,file_name)),flush = True)\n\n return logger_lib", "def load_module(module_name, file_name):\n from importlib.machinery import SourceFileLoader\n home_dir = os.path.expanduser(\"~\")\n valid_paths = [\n os.path.join(home_dir, \"Google Drive\"),\n os.path.join(home_dir, \"GoogleDrive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"Google Drive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"GoogleDrive\"),\n os.path.join(\"C:/\", \"GoogleDrive\"),\n os.path.join(\"C:/\", \"Google Drive\"),\n os.path.join(\"D:/\", \"GoogleDrive\"),\n os.path.join(\"D:/\", \"Google Drive\"),\n ]\n\n drive_path = None\n for path in valid_paths:\n if os.path.isdir(path):\n drive_path = path\n break\n\n if drive_path is None:\n logger_lib = None\n print(\"Logger library not found in shared repo.\", flush = True)\n #raise Exception(\"Couldn't find google drive folder!\")\n else: \n utils_path = os.path.join(drive_path, \"_pyutils\")\n print(\"Loading [{}] package...\".format(os.path.join(utils_path,file_name)),flush = True)\n logger_lib = SourceFileLoader(module_name, os.path.join(utils_path, file_name)).load_module()\n print(\"Done loading [{}] package.\".format(os.path.join(utils_path,file_name)),flush = True)\n\n return logger_lib", "def load_module(module_file: Path):\n try:\n name = module_file.stem\n spec = importlib.util.spec_from_file_location(name, module_file)\n module = importlib.util.module_from_spec(spec)\n sys.modules[name] = module\n spec.loader.exec_module(module)\n return module\n except Exception as err:\n _LOGGER.exception(err)\n raise", "def load_module(name_or_path):\n if os.path.exists(name_or_path):\n path = name_or_path.rstrip(\"/\")\n modname = os.path.splitext(os.path.basename(path))[0]\n if os.path.isdir(path):\n path = os.path.join(path, \"__init__.py\")\n spec = importlib.util.spec_from_file_location(modname, path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n else:\n mod = importlib.import_module(name_or_path)\n try:\n path = mod.__path__[0]\n except AttributeError:\n path = mod.__file__\n return mod, path", "def load_module(file_name):\n mod_name = file_module_name(file_name)\n spec = imputil.spec_from_file_location(mod_name, file_name)\n if spec is None:\n raise ImportError(f'cannot import from {file_name!r}')\n mod = imputil.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod", "def load_from_module_path(self, filename: str) -> None:\n # pylint: disable=import-outside-toplevel\n import importlib.util\n spec = importlib.util.spec_from_file_location(\"base_config\", filename)\n module = importlib.util.module_from_spec(spec)\n if spec.loader is not None:\n spec.loader.exec_module(module)\n else:\n raise Exception(\"Could not get module loader from spec\")\n self.load_from_module(module)", "def load_module(module_name, root_dir):\n module_filepath = os.path.join(root_dir, module_name)\n python_version = sys.version_info[:2]\n\n module = None\n if python_version <= (2, 7):\n import imp\n module = imp.load_source(module_name, module_filepath)\n else:\n import importlib\n loader = importlib.machinery.SourceFileLoader(module_name, module_filepath)\n if python_version <= (3, 4):\n module = loader.load_module()\n else:\n spec = importlib.util.spec_from_loader(loader.name, loader)\n module = importlib.util.module_from_spec(spec)\n loader.exec_module(module)\n\n return module", "def load_module(module_name):\n try:\n module = resolve_name(module_name)\n except ImportError:\n raise error.NotFound(msg=module_name)\n\n return module", "def load_mod_from_file(self, fpath):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tfpath = os.path.abspath(fpath)\n\t\tfile_ext = os.path.splitext(os.path.split(fpath)[-1])[-1]\n\t\tif file_ext.lower() != '.py':\n\t\t\treturn\n\t\twith open(fpath) as f:\n\t\t\tcontent = f.read().splitlines()\n\t\tok = False\n\t\tfor line in content:\n\t\t\tif line.strip() == 'from shutit_module import ShutItModule':\n\t\t\t\tok = True\n\t\t\t\tbreak\n\t\tif not ok:\n\t\t\tself.log('Rejected file: ' + fpath,level=logging.DEBUG)\n\t\t\treturn\n\t\t# Note that this attribute will only be set for 'new style' module loading, # this should be ok because 'old style' loading checks for duplicate # existing modules.\n\t\t# TODO: this is quadratic complexity\n\t\texistingmodules = [\n\t\t\tm for m in self.shutit_modules\n\t\t\tif getattr(m, '__module_file', None) == fpath\n\t\t]\n\t\tif existingmodules:\n\t\t\tself.log('Module already seen: ' + fpath,level=logging.DEBUG)\n\t\t\treturn\n\t\t# Looks like it's ok to load this file\n\t\tself.log('Loading source for: ' + fpath,level=logging.DEBUG)\n\n\t\t# Add this directory to the python path iff not already there.\n\t\tdirectory = os.path.dirname(fpath)\n\t\tif directory not in sys.path:\n\t\t\tsys.path.append(os.path.dirname(fpath))\n\t\t# TODO: use bytearray to encode?\n\t\tmod_name = base64.b32encode(fpath.encode()).decode().replace('=', '')\n\t\tpymod = imp.load_source(mod_name, fpath)\n\n\t\t# Got the python module, now time to pull the shutit module(s) out of it.\n\t\ttargets = [\n\t\t\t('module', self.shutit_modules), ('conn_module', self.conn_modules)\n\t\t]\n\t\tself.build['source'] = {}\n\t\tfor attr, target in targets:\n\t\t\tmodulefunc = getattr(pymod, attr, None)\n\t\t\t# Old style or not a shutit module, nothing else to do\n\t\t\tif not callable(modulefunc):\n\t\t\t\treturn\n\t\t\tmodules = modulefunc()\n\t\t\tif not isinstance(modules, list):\n\t\t\t\tmodules = [modules]\n\t\t\tfor module in modules:\n\t\t\t\tsetattr(module, '__module_file', fpath)\n\t\t\t\tShutItModule.register(module.__class__)\n\t\t\t\ttarget.add(module)\n\t\t\t\tself.build['source'][fpath] = open(fpath).read()", "def load_from_path(cls, module_path: str) -> \"FilebaseApiModuleInfo\":\n module = try_load_module_dynamic_with_timestamp(module_path)\n if module is None:\n return None\n\n if not hasattr(module, \"__filebase_api_module_info\"):\n # thread blocking command\n module.__filebase_api_module_info = cls(module)\n\n return module.__filebase_api_module_info", "def import_module(module_name, path):\n file, path, description = imp.find_module(module_name, [path])\n # Close the .so file after load.\n with file:\n return imp.load_module(module_name, file, path, description)", "def load_datamodule(cls, path: Union[str, Path]):\n if isinstance(path, str):\n path = Path(path)\n if not path.exists():\n raise FileNotFoundError(f\"{path} does not exist.\")\n datamodule = joblib.load(path)\n return datamodule" ]
[ "0.7140824", "0.7115812", "0.7082071", "0.7029506", "0.7029197", "0.6992073", "0.6935706", "0.6864377", "0.6848782", "0.6841059", "0.68201846", "0.6806607", "0.67745715", "0.6732326", "0.6725582", "0.6686308", "0.6678542", "0.6671717", "0.6595986", "0.6595986", "0.6583503", "0.6569107", "0.6548343", "0.65345293", "0.64482105", "0.64325804", "0.64281946", "0.64126027", "0.64002603", "0.63949096" ]
0.74431866
0
Load & cache the program module.
def _module(self): if self._module_cache is None: self._module_cache = load_module(self._name, self._path) return self._module_cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self):\n\n\t\tif self.module is None:\n\t\t\t# Cause the interpreter to load the module in local namespace ...\n\t\t\texec \"import \" + self.name\n\n\t\t\t# Store the module object ...\n\t\t\tobject.__setattr__(self, 'module', eval(self.name))", "def load(self):\n \"\"\"Load a program into memory.\"\"\"\n\n if len(sys.argv) != 2:\n print(\"format: ls8.py [filename]\")\n sys.exit(1)\n\n program = sys.argv[1]\n address = 0\n\n # For now, we've just hardcoded a program:\n\n # program = [\n # # From print8.ls8\n # 0b10000010, # LDI R0,8\n # 0b00000000,\n # 0b00001000,\n # 0b01000111, # PRN R0\n # 0b00000000,\n # 0b00000001, # HLT\n # ]\n\n #open file\n with open(program) as file:\n #read the lines\n for line in file:\n #parse out comments\n line = line.strip().split(\"#\")[0]\n #cast numbers from strings to ints\n val = line.strip()\n #ignore blank lines\n if line == \"\":\n continue\n\n value = int(val, 2)\n self.ram[address] = value\n address +=1", "def load(self):\n\n address = 0\n\n program = sys.argv[1]\n\n with open(program) as p:\n for instruction in p:\n if instruction[0] == '#':\n continue\n\n instruction = instruction.strip()\n temp = instruction.split()\n\n if len(temp) == 0:\n continue\n\n self.ram[address] = int(temp[0], 2)\n address += 1\n \n # print(\"======= PROGRAM =========\")\n # for i in self.ram[:35]:\n # print(i)", "def _load_program(self, kernel):\n return cl.Program(\n self.context, open('kernels/{0}'.format(kernel)).read()\n ).build()", "def load_program(self, program):\n for idx, val in enumerate(program):\n self.memory[idx] = val", "def load(self):\n\n address = 0\n\n if len(sys.argv) < 2:\n print('ERROR - Provide program address to load')\n return\n\n program_filename = sys.argv[1]\n\n program_text = open(program_filename).read()\n program_lines = program_text.split('\\n')\n program = []\n\n for line in program_lines:\n blocks = line.split()\n if len(blocks) > 0:\n if blocks[0] != '#':\n inst = blocks[0]\n program.append(int(inst, 2))\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1", "def reload_programs(self):\r\n print(\"Reloading programs:\")\r\n for name, program in self._programs.items():\r\n if getattr(program, 'program', None):\r\n print(\" - {}\".format(program.meta.label))\r\n program.program = resources.programs.load(program.meta)", "def do_workload(self):\n module_manager = self._core.get_module_manager()\n module = module_manager.get_module_by_name(self._values[\"name\"])\n module_manager.update_module(module)", "def prog():\n global program\n return program", "def mod_load(self):\n raise NotImplementedError(\"Mod load isn't overriden\")", "def load(self, program):\n\n address = 0\n\n try:\n with open(program, 'r') as f:\n for line in f:\n # strip out comment, if any, and whitespace\n instruction = line.split('#')[0].strip()\n if instruction == '':\n continue\n self.ram[address] = int(instruction, base=2)\n address += 1\n\n except FileNotFoundError:\n print(f'File not found. path: {program}')\n sys.exit(2)", "def _load_program():\n filepath = os.path.join(os.getcwd(), os.path.dirname(__file__), PROGRAM_TXT)\n f = open(filepath, 'r')\n program = f.read()\n f.close()\n return program.strip().split('\\n')", "def exec_module(self, module):\n pass", "def pymod_cache():\n pymod.cache.cache = Singleton(pymod.cache.factory)", "def load(self, program):\n\n #print(f\"Program in memory {program}\")\n address = 0\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1", "def load():\n out = load_as_root_module()\n parser = create_parser(os.path.basename(sys.argv[0]))\n opts = parser.parse_args(sys.argv[1:])\n load_env(opts, out.opt)\n\n return out", "def load(self):\n\n self.commands = {\n # Usual text commands (e.g. \"/echo 123\")\n 'user': {},\n 'owner': {\n 'load': self.load,\n 'modprobe': self.modprobe,\n 'rmmod': self.rmmod\n },\n # Modules for bot's reaction to a different message types\n 'text': {},\n 'photo': {},\n 'audio': {},\n 'video': {},\n 'sticker': {},\n 'voice': {}\n }\n\n for file in os.listdir('modules'):\n if file.endswith('.py'):\n command_type, command = file.split('_', 1)\n self.modprobe(self, command[:-3])", "def load(self, program):\n address = 0\n\n with open(program) as lines:\n for line in lines:\n line = line.split('#')\n # print(line)\n try:\n value = int(line[0], 2)\n except ValueError:\n continue\n self.ram[address] = value\n address += 1", "def load(self):\n\n address = 0\n program = []\n\n if len(sys.argv) < 2:\n print(\"Please pass in a second file.\")\n sys.exit()\n\n file_name = sys.argv[1]\n try:\n with open(file_name) as file:\n for line in file:\n split_line = line.split('#')[0]\n command = split_line.strip()\n\n if command == '':\n continue\n\n program.append(int(command, 2))\n\n except FileNotFoundError:\n print(f'{sys.argv[0]}: {sys.argv[1]} file was not found')\n sys.exit()\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1", "def load(self, program):\n\n address = 0\n\n # For now, we've just hardcoded a program:\n\n # program = [\n # # From print8.ls8\n # 0b10000010, # LDI R0,8\n # 0b00000000,\n # 0b00001000,\n # 0b01000111, # PRN R0\n # 0b00000000,\n # 0b00000001, # HLT\n # ]\n\n # for instruction in program:\n # self.ram[address] = instruction\n # address += 1\n\n try:\n with open(program) as program:\n for line in program:\n line_split = line.split('#')\n value = line_split[0].strip()\n \n if value == \"\":\n continue\n formatted_value = int(value, 2)\n \n self.ram[address] = formatted_value\n address += 1\n except FileNotFoundError:\n print(f\"{program} not found\")\n sys.exit(1)", "def exe():\n e = entry()\n if e:\n return load(e)", "def load(self):\n\n # Extract filename from command line\n try:\n filename = sys.argv[1]\n print(filename)\n except IndexError:\n print(\"Usage: python3 ls8.py <program_name>\")\n sys.exit(1)\n\n # Validate filetype and confirm file exists\n if filename[-4:] != '.ls8':\n print(\"You must supply a '.ls8' binary.\")\n sys.exit(2)\n try:\n f = open(filename)\n except FileNotFoundError:\n print(f\"File not found: {filename}\")\n sys.exit(2)\n\n # Read the contents of the file\n address = 0\n for line in f:\n try:\n opcode = line.split()[0]\n except IndexError:\n continue\n if opcode == '#':\n continue\n self.ram[address] = int(opcode, 2)\n address += 1\n f.close()\n\n # Double-check the file wasn't empty\n if address == 0:\n print(\"Error: Empty source file\")\n sys.exit(2)", "def get_programs(self):\n self.logger.info(\"Preparing programs...\")\n current_dir = Path()\n dir_path = current_dir / \"data\" / \"break_data\" / \"programs\"\n\n file_name = \"programs_\" + self.dataset_split + \".pkl\"\n if not (dir_path / file_name).is_file():\n self.create_matching_programs(dir_path, file_name)\n data = load_obj(dir_path, file_name)\n\n self.logger.info(\"Programs ready.\")\n return data", "def initialize_cache(inputs, outputs, programs, max_decode_len, config):\n target_shape = (programs.shape[0], max_decode_len)\n initial_variables = models.ProgramTransformer(config).init(\n jax.random.PRNGKey(0),\n jnp.ones(inputs.shape, config.dtype),\n jnp.ones(outputs.shape, config.dtype),\n jnp.ones(target_shape, config.dtype))\n return initial_variables['cache']", "def load(self):\n with self.__lock:\n self._d.update(self.backend.load())\n log.debug(\"load: {}\".format(self.backend.filename))", "def __put_module_in_sys_cache(module_name, module_obj):\n #try:\n #if hasattr(sys, 'stypy_module_cache'):\n sys.stypy_module_cache[module_name] = module_obj\n # else:\n # __preload_sys_module_cache()\n # sys.stypy_module_cache[module_name] = module_obj\n # except:\n # pass\n # finally:\n # return None", "def do_workload(self):\n module_manager = self._core.get_module_manager()\n module_manager.install_module(self.get_meta())", "def __preload_sys_module_cache():\n # Preload sys module\n sys.stypy_module_cache = {\n 'sys': __load_python_module_dynamically('sys', False)} # By default, add original sys module clone\n\n # Preload builtins module\n sys.stypy_module_cache['__builtin__'] = __load_python_module_dynamically('__builtin__', False)\n sys.stypy_module_cache['ctypes'] = __load_python_module_dynamically('ctypes', False)", "def reload(self):\n\n\t\tif self.module is None:\n\t\t\t# Do nothing, as the module will be imported on attribute access.\n\t\t\tpass\n\t\telse:\n\t\t\texec \"reload(\" + self.name + \")\"\n\t\t\t# The module object is still identical, only its code has been\n\t\t\t# replaced. Thus no eval(self.name) is necessary.", "def load(self):\n\n address = 0\n\n # For now, we've just hardcoded a program:\n\n if len(sys.argv) != 2:\n print(\"Usage: cpu.py filename\")\n sys.exit(1)\n \n filename = sys.argv[1]\n\n try:\n with open(filename) as f:\n for line in f:\n \n instruction = line.split(\"#\")[0].strip()\n \n if instruction == \"\":\n continue\n\n val = int(instruction, 2) \n\n self.ram_write(address, val)\n\n address += 1\n\n except FileNotFoundError:\n print(f\"File {filename} not found\")\n sys.exit(2)" ]
[ "0.64689153", "0.63654965", "0.62676257", "0.62425035", "0.62108284", "0.62063473", "0.61392486", "0.6114491", "0.602539", "0.60204077", "0.60017616", "0.59714663", "0.588076", "0.5877579", "0.58205396", "0.5767681", "0.5746382", "0.57439184", "0.57372934", "0.5737229", "0.5736307", "0.57307464", "0.571669", "0.5713525", "0.56753576", "0.5653245", "0.5648725", "0.5646026", "0.56441456", "0.5632725" ]
0.6589164
0
Set the packet length.
def _set_packet_len(self, packet_len): self._packet_len = packet_len
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPacketLength(self):\n self.packetLength = len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def setLength(self, new_length):\n\n self.length = new_length", "def length(self, length):\n\n self._length = length", "def set_length(self, ak_tpl: BKT, newLength: float): # -> None:\n ...", "async def gpt2_set_length(self, ctx, *, arg=None):\n print('Command gpt2_set_length triggered')\n if arg:\n try:\n i = int(arg)\n assert (i > 0) and (i < 1024)\n except ValueError or AssertionError:\n ctx.send(\"ERROR: Argument must be a positive integer number\")\n self.update_config(length=arg)\n else:\n await ctx.send(\"ERROR: Argument required\")", "def set_length(self, new_length):\n if(new_length == None):\n self._logger.write(\"Error! new_length cannot be a NoneType\")\n elif(type(new_length) != float):\n self._logger.write(\"Error! new_length must be of type float\")\n else:\n try:\n self._length = new_length\n except Exception as e:\n self._logger.write(\"Error! Could not set the new length:\\n %s\" % e)", "def set_length(self, length):\n if length < 0:\n raise AttributeError('length should be positive')\n self.progress_char_length = length", "def change_tail_length(self, value):\n self.layer.tail_length = value", "def set_length(self, ak_spec: Union[str, BKT], val: float) -> None:\n ...", "def token_length(self, token_length):\n\n self._token_length = token_length", "def _set_maskLength(self, v, load=False):\n try:\n t = YANGDynClass(v,base=np.uint8, is_leaf=True, yang_name=\"maskLength\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"maskLength must be of a type compatible with base=np.uint8, is_leaf=True, yang_name=\"maskLength\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__maskLength = t\n if hasattr(self, '_set'):\n self._set()", "def length(self, length: Union[int, float]):\n self._length = length\n self._update_length()\n self.events.length()\n\n self.refresh()", "def change_length(self, value):\n self.layer.length = value\n self.lengthSpinBox.clearFocus()\n self.setFocus()", "def setLength(self, length):\n self.vector.norm = length", "def read_packetlen(self):\n packetlen = int(struct.unpack('!I', b\"\".join(self.__input))[0])\n self.__input = []\n self.set_terminator(packetlen)\n self.found_terminator = self.read_milter_data", "def setDataSize(self, head,payload,eop):\n self.dataSize = len(head)+len(payload)+len(eop)", "def setGoalLength(self, length):\n assert isinstance(length, int)\n self.goal_length = length", "def update_total_length(self):\n self.total_length = len(bytes(self))", "def setSplitLength(self, value):\n return self._set(splitLength=value)", "def length_changed(self, value):\n self.message.dlc = value\n self.validate_data_input(value)", "def set_part_length(self, seconds):\n self._part_length = seconds", "def sent_len(self) -> int:\n raise NotImplementedError(\"must be implemented by subclasses\")", "def network_byte_length(self) -> int:", "def __set_size(self, size):\n if not isinstance(size, int):\n raise TypeError('The size should be an integer')\n if size < 64 or size > 1500: # It should be in the Standard Ethernet Payload range\n raise ValueError('The size should be in the range of Standard Ethernet frames [64,1500] bytes')\n self.__size = size", "def _setVals(self, cmd_length=0):\n self.cmd_length = cmd_length", "def length(self, value):\n raise TypeError(\"Cannot delete {class-name} length property.\")", "def _on_len_change(self, event=None):\n with self.layer.events.length.blocker():\n self.lengthSpinBox.setValue(self.layer.length)", "def setsize(self, size):\n self.__size = size", "def _set_length(self, length):\n self.bottom.pos.y = self.top.pos.y + length", "def _update_length(self, field, tag_id, value):\n # pylint: disable=unused-argument\n if tag_id not in {8, 9, 10}:\n self._message_length += len(field) + 1\n if self._message_length >= self._max_length:\n raise FIXLengthTooLongError(\n f'message too long: {self._message_length}')" ]
[ "0.7967779", "0.7632073", "0.72674304", "0.7185319", "0.7084449", "0.70364594", "0.6882304", "0.66527474", "0.6593729", "0.6497038", "0.64803594", "0.6373639", "0.63581616", "0.63335747", "0.6284977", "0.6236442", "0.6221398", "0.6134403", "0.6132038", "0.6003637", "0.597266", "0.59429586", "0.59172124", "0.59083015", "0.58775926", "0.58236974", "0.58028436", "0.57977897", "0.5788708", "0.5780997" ]
0.8634613
0
Creates a XCP Ethernet frame
def create_message(self, packet): self._header.packet_len = len(bytes(packet)) frame_bytes = super(EthernetTransport, self).create_message(packet) # Update control counter for next frame self._header.update_control() return bytes(frame_bytes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_ieee_packet(self, data):\n\t\tpacket = Dot15d4FCS() / Dot15d4Data() / Raw(load=data)\n\n\t\tpacket.fcf_srcaddrmode = 2\n\t\tpacket.fcf_destaddrmode = 2\n\n\t\tpacket.fcf_panidcompress = True\n\t\tpacket.fcf_ackreq = True\n\t\tpacket.seqnum = self.seqnum\n\n\t\tpacket.dest_panid = self.link_config.dest_panid\n\n\t\tpacket.dest_addr = self.link_config.destination.get_short_address()\n\t\tpacket.src_addr = self.link_config.source.get_short_address()\n\n\t\treturn packet.build()", "def _create_packet(self, request):\n\n data_len = struct.pack('<Q', len(request))\n packet = b'ZBXD\\x01' + data_len + request\n\n def ord23(x):\n if not isinstance(x, int):\n return ord(x)\n else:\n return x\n\n logger.debug('Packet [str]: %s', packet)\n logger.debug('Packet [hex]: %s', ':'.join(hex(ord23(x))[2:] for x in packet))\n return packet", "def create_tcp_pkt(smac: bytes, dmac: bytes, sip: bytes, dip: bytes, ip_id: int, sp: int, dp: int,\n flags: int =dpkt.tcp.TH_SYN, payload: bytes = b\"\") -> dpkt.ethernet.Ethernet:\n tcp_pkt = dpkt.tcp.TCP(sport=sp, dport=dp, flags=flags)\n tcp_pkt.data = payload\n ip_pkt = dpkt.ip.IP(id=ip_id, p=6, src=sip, dst=dip)\n ip_pkt.data = tcp_pkt\n ip_pkt.len += len(ip_pkt.data)\n eth_pkt = dpkt.ethernet.Ethernet(src=smac, dst=dmac)\n eth_pkt.data = ip_pkt\n return eth_pkt", "def create(self):\n\t\t\n\t\tflagbyte = 0\n\t\tif self.synf: flagbyte += 1\n\t\tif self.ackf: flagbyte += 2\n\t\t\n\t\tself.header = struct.pack(\">IBIII\", self.connid, flagbyte, self.seqn, self.ackn, self.recv)\n\t\t\n\t\tself.data = self.header+self.payload", "def define_ethernet_header(self, src=None, dst=None, typeeth=None, tag=None):\n ether_header = Ether()\n if (dst == None):\n ether_header.dst = BCAST_MAC\n else:\n ether_header.dst = dst\n ether_header.src = src\n return ether_header", "def ethernet_frame(packet):\n dest_mac, src_mac, proto = struct.unpack('! 6s 6s H', packet[:14])\n return get_mac_addr(dest_mac), get_mac_addr(src_mac), socket.htons(proto), packet[14:]", "def make_packet(message, host):\n\tRESOURCE = \"/\"\t\t\t\t# dummy resource\n\t\n\t# First line is the request\n\trequest = HTTPConstants.GET_REQUEST + \" \" + RESOURCE + \" \" + HTTPConstants.VERSION + HTTPConstants.CRLF\n\t\n\t# Next are the headers\n\theaders = \"Host: {0}\".format(host) + HTTPConstants.CRLF\n\t\n\t# Construct the head\n\thead = request + headers\n\t\n\t# Construct the body\n\tbody = message + HTTPConstants.CRLF\n\t\n\t# Assembly into a packet, where the head and body (message) are separated by a blank line (CRLF), and the EOM is\n\t# denoted by a blank line\n\treturn head + HTTPConstants.CRLF + body + HTTPConstants.CRLF", "def main():\n connection = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.htons(0x03))\n\n # Start the main loop.\n while True:\n # 65536 is the biggest buffer size that can be used.\n raw_data, addr = connection.recvfrom(65536)\n dest_mac, src_mac, eth_proto, data = ethernet_frame(raw_data)\n print('\\nEthernet Frame:')\n print('Destination: {}, Source: {}, Protocol: {}'.format(dest_mac, src_mac, eth_proto))", "def create_coa_packet(self, **args):\n return host.Host.create_coa_packet(self, secret=self.secret, **args)", "def create_frame(data, opcode):\r\n if opcode == ABNF.OPCODE_TEXT and isinstance(data, unicode):\r\n data = data.encode(\"utf-8\")\r\n # mask must be set if send data from client\r\n return ABNF(1, 0, 0, 0, opcode, 1, data)", "def createPacket(id):\n\n # Header is type (8), code (8), checksum (16), id (16), sequence (16)\n header = getHeaderData(0, id)\n\n data = 192 * 'Q'\n\n checksum = getChecksum(header + data)\n\n header = getHeaderData(socket.htons(checksum), id)\n\n return header + data", "def init_from_body(knxip_body: KNXIPBody):\n knxipframe = KNXIPFrame(knxip_body.xknx)\n knxipframe.header.service_type_ident = knxip_body.__class__.service_type\n knxipframe.body = knxip_body\n knxipframe.normalize()\n return knxipframe", "def fusion_api_create_ethernet_network(self, body, api=None, headers=None):\n return self.ethernet_network.create(body, api, headers)", "def build_frame(self, message):\r\n header = BytesIO()\r\n if 0x3 <= self.opcode <= 0x7 or 0xB <= self.opcode:\r\n raise WebSocketProtocolError('Opcode cannot be a reserved opcode')\r\n ## +-+-+-+-+-------+\r\n ## |F|R|R|R| opcode|\r\n ## |I|S|S|S| (4) |\r\n ## |N|V|V|V| |\r\n ## | |1|2|3| |\r\n ## +-+-+-+-+-------+\r\n header.write(i2b(((self.fin << 7)\r\n | (self.rsv1 << 6)\r\n | (self.rsv2 << 5)\r\n | (self.rsv3 << 4)\r\n | self.opcode)))\r\n ## +-+-------------+-------------------------------+\r\n ## |M| Payload len | Extended payload length |\r\n ## |A| (7) | (16/63) |\r\n ## |S| | (if payload len==126/127) |\r\n ## |K| | |\r\n ## +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +\r\n ## | Extended payload length continued, if payload len == 127 |\r\n ## + - - - - - - - - - - - - - - - +-------------------------------+\r\n if self.masking_key:\r\n mask_bit = 1 << 7\r\n else:\r\n mask_bit = 0\r\n length = self.payload_length \r\n if length < 126:\r\n header.write(i2b(mask_bit | length))\r\n elif length < (1 << 16):\r\n header.write(i2b(mask_bit | 126))\r\n header.write(pack('!H', length))\r\n elif length < (1 << 63):\r\n header.write(i2b(mask_bit | 127))\r\n header.write(pack('!Q', length))\r\n else:\r\n raise WebSocketProtocolError('Frame too large')\r\n ## + - - - - - - - - - - - - - - - +-------------------------------+\r\n ## | |Masking-key, if MASK set to 1 |\r\n ## +-------------------------------+-------------------------------+\r\n ## | Masking-key (continued) | Payload Data |\r\n ## +-------------------------------- - - - - - - - - - - - - - - - +\r\n ## : Payload Data continued ... :\r\n ## + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +\r\n ## | Payload Data continued ... |\r\n ## +---------------------------------------------------------------+\r\n if not self.masking_key:\r\n header.write(message)\r\n else:\r\n header.write(self.masking_key)\r\n header.write(self.mask(message))\r\n return header.getvalue()", "def testFramepack2(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n frame.cmd = 'DISCONNECT'\n result = frame.pack()\n correct = 'DISCONNECT\\n\\n\\x00\\n'\n self.assertEqual(result, correct)", "def create_packet_definition(packet_to_send):\n source_mac = \"00:00:00:00:00:01\"\n destination_mac = \"00:00:00:00:00:02\"\n source_ip = \"10.10.10.1\"\n destination_ip = \"10.10.10.2\"\n source_ip6 = 'fe80::214:f2ff:fe07:af0'\n destination_ip6 = 'ff02::1'\n sport = 1\n dport = 2\n tos = 4\n if packet_to_send[\"type\"] == \"ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {}})\n elif packet_to_send[\"type\"] == \"tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"vlan\"],\n \"prio\": packet_to_send[\"priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"tcp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"double_tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"outer_vlan\"], \"type\": 0x8100,\n \"prio\": packet_to_send[\"outer_priority\"]}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"inner_vlan\"], \"type\": 0x0800,\n \"prio\": packet_to_send[\"inner_priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"arp\":\n packet_definition = (\n {\"Ether\": {\"src\": source_mac, \"dst\": 'FF:FF:FF:FF:FF:FF', \"type\": 0x0806}},\n {\"ARP\": {\"op\": 1, \"hwsrc\": source_mac,\n \"psrc\": source_ip, \"pdst\": destination_ip}},)\n elif packet_to_send[\"type\"] == \"arp_reply_tagged\":\n packet_definition = ({\"Ether\": {\"src\": source_mac, \"dst\": destination_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": 2}},\n {\"ARP\": {\"op\": 2, \"hwsrc\": source_mac, \"hwdst\": destination_mac,\n \"pdst\": destination_ip, \"psrc\": source_ip}}, )\n elif packet_to_send[\"type\"] == \"icmp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"proto\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n elif packet_to_send[\"type\"] == \"ipv6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"plen\": 64, \"tc\": 225}})\n elif packet_to_send[\"type\"] == \"tcp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 6}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 17}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"icmp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n return packet_definition", "def _send_knxipframe(self, knxipframe: KNXIPFrame) -> None:\n self.transport.send(knxipframe)", "def make_packet(self, type, data): \n return (\"{}\\x00{}\\x00{}\".format(type, data, self.ID)).encode()", "def _create_pgframe(nodes=None, edges=None):\n pass", "def new_packet():\n return rtmp_packet.RtmpPacket()", "def makeMessage( name, *structure ):\n return X12Message( name, *structure )", "def packetize(cls, source, raw_data):\n pkt = cls(source, raw_data)\n\n if pkt.type not in DGTL.descriptors.keys():\n raise Warning('Unsupported packet type! (%s)' % pkt.type)\n\n pkt.set_decoder(DGTL.descriptors[pkt.type][2])\n\n return pkt", "def create_frame_blob(self):\n # self.image_blob = cv2.dnn.blobFromImage(\n # cv2.resize(self.frame, (300, 300)), 1.0, (300, 300),\n # (104.0, 177.0, 123.0), swapRB=False, crop=False)\n self.image_blob = cv2.dnn.blobFromImage(cv2.resize(self.frame, (300, 300)),\n 0.007843, (300, 300), 127.5)", "def setUp(self):\n self.message = PhyPort()\n self.message.port_no = 1\n self.message.hw_addr = HWAddress('9a:da:11:8a:f4:0c')\n self.message.name = 's1-eth1'\n self.message.state = PortState.OFPPS_STP_LISTEN\n self.message.curr = (PortFeatures.OFPPF_10GB_FD |\n PortFeatures.OFPPF_COPPER)", "def __init__(\n self, env, source, destination, size_bytes, message_type,\n data=None):\n if not isinstance(size_bytes, int):\n raise FT4FTTSimException(\"Message size must be integer\")\n if not (ethernet.MIN_FRAME_SIZE_BYTES <= size_bytes <=\n ethernet.MAX_FRAME_SIZE_BYTES):\n raise FT4FTTSimException(\n \"Message size must be between {} and {}, but is {}\".format(\n ethernet.MIN_FRAME_SIZE_BYTES,\n ethernet.MAX_FRAME_SIZE_BYTES,\n size_bytes))\n self.env = env\n self._identifier = Message.next_identifier\n Message.next_identifier += 1\n # source of the message. Models the source MAC address.\n self.source = source\n # destination of the message. It models the destination MAC address. It\n # is a list to allow multicast addressing.\n self.destination = destination\n self.size_bytes = size_bytes\n self.message_type = message_type\n self.data = data\n self.name = \"({:03d}, {}, {}, {:d}, {}, {})\".format(\n self.identifier, self.source, self.destination, self.size_bytes,\n self.message_type, self.data)\n log.debug(\"{} created\".format(self))", "async def test_create_knxipframe_err(self):\n xknx = XKNX()\n udp_client = UDPClient(xknx, (\"192.168.1.1\", 0), (\"192.168.1.2\", 1234))\n request_response = RequestResponse(xknx, udp_client, DisconnectResponse)\n request_response.timeout_in_seconds = 0\n\n with self.assertRaises(NotImplementedError):\n await request_response.start()", "def genFrame(self):\n # generate frame-specific data\n frameData = self._genFrameData()\n\n # call parent function to create the complete frame (as bytearray)\n frame = self._genDigiMeshFrame(frameData)\n\n # OBS: never escape-sequence local msg\n return frame", "def setupPacket(self):\n return None", "def build_packets(self):\n from scapy.all import IP, TCP\n return IP()/TCP()", "def __init__(self, bytes = None):\n hrd = pcs.Field(\"hrd\", 16, default = 1)\n pro = pcs.Field(\"pro\", 16, default = 0x800)\n hln = pcs.Field(\"hln\", 8, default = 6)\n pln = pcs.Field(\"pln\", 8, default = 4)\n op = pcs.Field(\"op\", 16)\n sha = pcs.StringField(\"sha\", 48)\n spa = pcs.Field(\"spa\", 32)\n tha = pcs.StringField(\"tha\", 48)\n tpa = pcs.Field(\"tpa\", 32)\n \n pcs.Packet.__init__(self, [hrd, pro, hln, pln, op,\n sha, spa, tha, tpa], bytes = bytes)\n self.description = \"ARP\"\n self.data = None" ]
[ "0.58488214", "0.5700761", "0.56956196", "0.56287456", "0.56241447", "0.56202054", "0.54626226", "0.5419994", "0.5410478", "0.53642374", "0.53623325", "0.531552", "0.52997607", "0.526246", "0.5247345", "0.5247104", "0.5242202", "0.5209782", "0.5174515", "0.5075918", "0.50341254", "0.5015512", "0.49719715", "0.4962528", "0.49543566", "0.49539757", "0.49517596", "0.49474245", "0.49429342", "0.4928377" ]
0.6318154
0
Computes the pickup_features feature group. To restrict features to a time range, pass in ts_column, start_date, and/or end_date as kwargs.
def pickup_features_fn(df, ts_column, start_date, end_date): df = filter_df_by_ts( df, ts_column, start_date, end_date ) pickupzip_features = ( df.groupBy( "pickup_zip", window("tpep_pickup_datetime", "1 hour", "15 minutes") ) # 1 hour window, sliding every 15 minutes .agg( mean("fare_amount").alias("mean_fare_window_1h_pickup_zip"), count("*").alias("count_trips_window_1h_pickup_zip"), ) .select( col("pickup_zip").alias("zip"), unix_timestamp(col("window.end")).alias("ts").cast(IntegerType()), partition_id(to_timestamp(col("window.end"))).alias("yyyy_mm"), col("mean_fare_window_1h_pickup_zip").cast(FloatType()), col("count_trips_window_1h_pickup_zip").cast(IntegerType()), ) ) return pickupzip_features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dropoff_features_fn(df, ts_column, start_date, end_date):\n df = filter_df_by_ts(\n df, ts_column, start_date, end_date\n )\n dropoffzip_features = (\n df.groupBy(\"dropoff_zip\", window(\"tpep_dropoff_datetime\", \"30 minute\"))\n .agg(count(\"*\").alias(\"count_trips_window_30m_dropoff_zip\"))\n .select(\n col(\"dropoff_zip\").alias(\"zip\"),\n unix_timestamp(col(\"window.end\")).alias(\"ts\").cast(IntegerType()),\n partition_id(to_timestamp(col(\"window.end\"))).alias(\"yyyy_mm\"),\n col(\"count_trips_window_30m_dropoff_zip\").cast(IntegerType()),\n is_weekend(col(\"window.end\")).alias(\"dropoff_is_weekend\"),\n )\n )\n return dropoffzip_features", "def get_date_features(gt_ids=[], gt_masks=None, gt_shifts=None, first_year=None):\n # If particular arguments aren't lists, replace with repeating iterators\n if not isinstance(gt_masks, list):\n gt_masks = itertools.repeat(gt_masks)\n if not isinstance(gt_shifts, list):\n gt_shifts = itertools.repeat(gt_shifts)\n\n # Add each ground truth feature to dataframe\n df = None\n for gt_id, gt_mask, gt_shift in zip(gt_ids, gt_masks, gt_shifts):\n print \"Getting {}_shift{}\".format(gt_id, gt_shift)\n t = time.time()\n # Load ground truth data\n gt = get_ground_truth(gt_id, gt_mask, gt_shift)\n # Discard years prior to first_year\n gt = year_slice(gt, first_year = first_year)\n # If lat, lon columns exist, pivot to wide format\n if 'lat' in gt.columns and 'lon' in gt.columns:\n if gt_shift == None:\n measurement_variable = get_measurement_variable(gt_id)\n else:\n measurement_variable = get_measurement_variable(gt_id)+'_shift'+str(gt_shift)\n gt = pd.pivot_table(gt, values=measurement_variable, index='start_date',\n columns=['lat', 'lon']).reset_index()\n gt = pd.DataFrame(gt.to_records())\n gt.drop(\"index\", axis=1, inplace=True)\n # Rename columns to start_date and precip_(27.0,261.0), etc.\n gt.rename(columns={gt.columns[0]: 'start_date'}, inplace=True)\n gt.rename(columns=lambda x: x.replace('(',\n measurement_variable +\n '_('), inplace=True)\n # Use outer merge to include union of start_date values across all features\n # combinations across all features\n df = df_merge(df, gt, on=\"start_date\")\n print \"Elapsed: {}s\".format(time.time() - t)\n\n return df", "def _create_ts_features(df, tscol):\r\n df = copy.deepcopy(df)\r\n dt_adds = []\r\n try:\r\n df[tscol+'_hour'] = df[tscol].dt.hour.fillna(0).astype(int)\r\n df[tscol+'_minute'] = df[tscol].dt.minute.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_hour')\r\n dt_adds.append(tscol+'_minute')\r\n except:\r\n print(' Error in creating hour-second derived features. Continuing...')\r\n try:\r\n df[tscol+'_dayofweek'] = df[tscol].dt.dayofweek.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_dayofweek')\r\n if tscol+'_hour' in dt_adds:\r\n DAYS = dict(zip(range(7),['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']))\r\n df[tscol+'_dayofweek'] = df[tscol+'_dayofweek'].map(DAYS)\r\n df.loc[:,tscol+'_dayofweek_hour_cross'] = df[tscol+'_dayofweek'] +\" \"+ df[tscol+'_hour'].astype(str)\r\n dt_adds.append(tscol+'_dayofweek_hour_cross')\r\n df[tscol+'_quarter'] = df[tscol].dt.quarter.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_quarter')\r\n df[tscol+'_month'] = df[tscol].dt.month.fillna(0).astype(int)\r\n MONTHS = dict(zip(range(1,13),['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',\r\n 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']))\r\n df[tscol+'_month'] = df[tscol+'_month'].map(MONTHS)\r\n dt_adds.append(tscol+'_month')\r\n #### Add some features for months ########################################\r\n festives = ['Oct','Nov','Dec']\r\n name_col = tscol+\"_is_festive\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in festives else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n summer = ['Jun','Jul','Aug']\r\n name_col = tscol+\"_is_summer\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in summer else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n winter = ['Dec','Jan','Feb']\r\n name_col = tscol+\"_is_winter\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in winter else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n cold = ['Oct','Nov','Dec','Jan','Feb','Mar']\r\n name_col = tscol+\"_is_cold\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in cold else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n warm = ['Apr','May','Jun','Jul','Aug','Sep']\r\n name_col = tscol+\"_is_warm\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in warm else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n #########################################################################\r\n if tscol+'_dayofweek' in dt_adds:\r\n df.loc[:,tscol+'_month_dayofweek_cross'] = df[tscol+'_month'] +\" \"+ df[tscol+'_dayofweek']\r\n dt_adds.append(tscol+'_month_dayofweek_cross')\r\n df[tscol+'_year'] = df[tscol].dt.year.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_year')\r\n today = date.today()\r\n df[tscol+'_age_in_years'] = today.year - df[tscol].dt.year.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_age_in_years')\r\n df[tscol+'_dayofyear'] = df[tscol].dt.dayofyear.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_dayofyear')\r\n df[tscol+'_dayofmonth'] = df[tscol].dt.day.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_dayofmonth')\r\n df[tscol+'_weekofyear'] = df[tscol].dt.weekofyear.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_weekofyear')\r\n weekends = (df[tscol+'_dayofweek'] == 'Sat') | (df[tscol+'_dayofweek'] == 'Sun')\r\n df[tscol+'_typeofday'] = 'weekday'\r\n df.loc[weekends, tscol+'_typeofday'] = 'weekend'\r\n dt_adds.append(tscol+'_typeofday')\r\n if tscol+'_typeofday' in dt_adds:\r\n df.loc[:,tscol+'_month_typeofday_cross'] = df[tscol+'_month'] +\" \"+ df[tscol+'_typeofday']\r\n dt_adds.append(tscol+'_month_typeofday_cross')\r\n except:\r\n print(' Error in creating date time derived features. Continuing...')\r\n print(' created %d columns from time series %s column' %(len(dt_adds),tscol))\r\n return df, dt_adds", "def FE_start_end_date_time_features(smalldf, startTime, endTime, splitter_date_string=\"/\",splitter_hour_string=\":\"):\r\n smalldf = smalldf.copy()\r\n add_cols = []\r\n date_time_variable_flag = False\r\n if smalldf[startTime].dtype in ['datetime64[ns]','datetime16[ns]','datetime32[ns]']:\r\n print('%s variable is a date-time variable' %startTime)\r\n date_time_variable_flag = True\r\n if date_time_variable_flag:\r\n view_days = 'processing'+startTime+'_elapsed_days'\r\n smalldf[view_days] = (smalldf[endTime] - smalldf[startTime]).astype('timedelta64[s]')/(60*60*24)\r\n smalldf[view_days] = smalldf[view_days].astype(int)\r\n add_cols.append(view_days)\r\n view_time = 'processing'+startTime+'_elapsed_time'\r\n smalldf[view_time] = (smalldf[endTime] - smalldf[startTime]).astype('timedelta64[s]').values\r\n add_cols.append(view_time)\r\n else:\r\n start_date = 'processing'+startTime+'_start_date'\r\n smalldf[start_date] = smalldf[startTime].map(lambda x: x.split(\" \")[0])\r\n add_cols.append(start_date) \r\n try:\r\n start_time = 'processing'+startTime+'_start_time'\r\n smalldf[start_time] = smalldf[startTime].map(lambda x: x.split(\" \")[1])\r\n add_cols.append(start_time)\r\n except:\r\n ### there is no hour-minutes part of this date time stamp field. You can just skip it if it is not there\r\n pass\r\n end_date = 'processing'+endTime+'_end_date'\r\n smalldf[end_date] = smalldf[endTime].map(lambda x: x.split(\" \")[0])\r\n add_cols.append(end_date)\r\n try:\r\n end_time = 'processing'+endTime+'_end_time'\r\n smalldf[end_time] = smalldf[endTime].map(lambda x: x.split(\" \")[1])\r\n add_cols.append(end_time)\r\n except:\r\n ### there is no hour-minutes part of this date time stamp field. You can just skip it if it is not there\r\n pass\r\n view_days = 'processing'+startTime+'_elapsed_days'\r\n smalldf[view_days] = (pd.to_datetime(smalldf[end_date]) - pd.to_datetime(smalldf[start_date])).values.astype(int)\r\n add_cols.append(view_days)\r\n try:\r\n view_time = 'processing'+startTime+'_elapsed_time'\r\n smalldf[view_time] = (pd.to_datetime(smalldf[end_time]) - pd.to_datetime(smalldf[start_time])).astype('timedelta64[s]').values\r\n add_cols.append(view_time)\r\n except:\r\n ### In some date time fields this gives an error so skip it in that case\r\n pass\r\n #### The reason we chose endTime here is that startTime is usually taken care of by another library. So better to do this alone.\r\n year = 'processing'+endTime+'_end_year'\r\n smalldf[year] = smalldf[end_date].map(lambda x: str(x).split(splitter_date_string)[0]).values\r\n add_cols.append(year)\r\n #### The reason we chose endTime here is that startTime is usually taken care of by another library. So better to do this alone.\r\n month = 'processing'+endTime+'_end_month'\r\n smalldf[month] = smalldf[end_date].map(lambda x: str(x).split(splitter_date_string)[1]).values\r\n add_cols.append(month)\r\n try:\r\n #### The reason we chose endTime here is that startTime is usually taken care of by another library. So better to do this alone.\r\n daynum = 'processing'+endTime+'_end_day_number'\r\n smalldf[daynum] = smalldf[end_date].map(lambda x: str(x).split(splitter_date_string)[2]).values\r\n add_cols.append(daynum)\r\n except:\r\n ### In some date time fields the day number is not there. If not, just skip it ####\r\n pass\r\n #### In some date time fields, the hour and minute is not there, so skip it in that case if it errors!\r\n try:\r\n start_hour = 'processing'+startTime+'_start_hour'\r\n smalldf[start_hour] = smalldf[start_time].map(lambda x: str(x).split(splitter_hour_string)[0]).values\r\n add_cols.append(start_hour)\r\n start_min = 'processing'+startTime+'_start_hour'\r\n smalldf[start_min] = smalldf[start_time].map(lambda x: str(x).split(splitter_hour_string)[1]).values\r\n add_cols.append(start_min)\r\n except:\r\n ### If it errors, skip it\r\n pass\r\n #### Check if there is a weekday and weekends in date time columns using endTime only\r\n weekday_num = 'processing'+endTime+'_end_weekday_number'\r\n smalldf[weekday_num] = pd.to_datetime(smalldf[end_date]).dt.weekday.values\r\n add_cols.append(weekday_num)\r\n weekend = 'processing'+endTime+'_end_weekend_flag'\r\n smalldf[weekend] = smalldf[weekday_num].map(lambda x: 1 if x in[5,6] else 0)\r\n add_cols.append(weekend)\r\n #### If everything works well, there should be 13 new columns added by module. All the best!\r\n print('%d columns added using start date=%s and end date=%s processing...' %(len(add_cols),startTime,endTime))\r\n return smalldf", "def create_feature_based_on_spent_by_timestamp(data):\n utils.save_log('{0} :: {1}'.format(\n create_feature_based_on_spent_by_timestamp.__module__,\n create_feature_based_on_spent_by_timestamp.__name__))\n\n data = data.withColumn('RatioValueSpentByWeekOfYear',\n (data['Value'] / data['TransactionWeekOfYear']))\n data = data.withColumn('RatioValueSpentByDayOfWeek',\n (data['Value'] / data['TransactionDayOfWeek']))\n data = data.withColumn('RatioValueSpentByDayOfYear',\n (data['Value'] / data['TransactionDayOfYear']))\n\n update_list_features(\"numerical\", ['RatioValueSpentByWeekOfYear',\n 'RatioValueSpentByDayOfWeek',\n 'RatioValueSpentByDayOfYear'])\n\n return data", "def create_features(energy_data, label=None):\n energy_data['date'] = energy_data.index\n energy_data['hour'] = energy_data['Datetime'].dt.hour\n energy_data['dayofweek'] = energy_data['Datetime'].dt.dayofweek\n energy_data['month'] = energy_data['Datetime'].dt.month\n energy_data['quarter'] = energy_data['Datetime'].dt.quarter\n energy_data['year'] = energy_data['Datetime'].dt.year\n energy_data['dayofyear'] = energy_data['Datetime'].dt.dayofyear\n energy_data['dayofmonth'] = energy_data['Datetime'].dt.day\n energy_data['weekofyear'] = energy_data['Datetime'].dt.weekofyear\n energy_data['pjme_2_hrs_lag'] = energy_data['PJME_MW'].shift(2)\n energy_data['pjme_4_hrs_lag'] = energy_data['PJME_MW'].shift(4)\n energy_data['pjme_8_hrs_lag'] = energy_data['PJME_MW'].shift(8)\n energy_data['pjme_12_hrs_lag'] = energy_data['PJME_MW'].shift(12)\n energy_data['pjme_24_hrs_lag'] = energy_data['PJME_MW'].shift(24)\n energy_data['pjme_4_hrs_mean'] = energy_data['PJME_MW'].rolling(window=4).mean()\n energy_data['pjme_8_hrs_mean'] = energy_data['PJME_MW'].rolling(window=8).mean()\n energy_data['pjme_12_hrs_mean'] = energy_data['PJME_MW'].rolling(window=12).mean()\n energy_data['pjme_24_hrs_mean'] = energy_data['PJME_MW'].rolling(window=24).mean()\n energy_data['pjme_4_hrs_std'] = energy_data['PJME_MW'].rolling(window=4).std()\n energy_data['pjme_8_hrs_std'] = energy_data['PJME_MW'].rolling(window=8).std()\n energy_data['pjme_12_hrs_std'] = energy_data['PJME_MW'].rolling(window=12).std()\n energy_data['pjme_24_hrs_std'] = energy_data['PJME_MW'].rolling(window=24).std()\n energy_data['pjme_4_hrs_max'] = energy_data['PJME_MW'].rolling(window=4).max()\n energy_data['pjme_8_hrs_max'] = energy_data['PJME_MW'].rolling(window=8).max()\n energy_data['pjme_12_hrs_max'] = energy_data['PJME_MW'].rolling(window=12).max()\n energy_data['pjme_24_hrs_max'] = energy_data['PJME_MW'].rolling(window=24).max()\n energy_data['pjme_4_hrs_min'] = energy_data['PJME_MW'].rolling(window=4).min()\n energy_data['pjme_8_hrs_min'] = energy_data['PJME_MW'].rolling(window=8).min()\n energy_data['pjme_12_hrs_min'] = energy_data['PJME_MW'].rolling(window=12).min()\n energy_data['pjme_24_hrs_min'] = energy_data['PJME_MW'].rolling(window=24).min()\n\n features = energy_data[['hour', 'dayofweek', 'quarter', 'month', 'year',\n 'dayofyear', 'dayofmonth', 'weekofyear', 'pjme_2_hrs_lag', 'pjme_4_hrs_lag',\n 'pjme_8_hrs_lag', 'pjme_12_hrs_lag', 'pjme_24_hrs_lag', 'pjme_4_hrs_mean',\n \"pjme_8_hrs_mean\", \"pjme_12_hrs_mean\", \"pjme_24_hrs_mean\", \"pjme_4_hrs_std\",\n \"pjme_8_hrs_std\", \"pjme_12_hrs_std\", \"pjme_24_hrs_std\",\n \"pjme_4_hrs_max\", \"pjme_8_hrs_max\", \"pjme_12_hrs_max\", \"pjme_24_hrs_max\",\n \"pjme_4_hrs_min\", \"pjme_8_hrs_min\", \"pjme_12_hrs_min\", \"pjme_24_hrs_min\"]]\n if label:\n label = energy_data[label]\n return features, label\n return features", "def getFeature(df, start, end):\n\n return [df[start:end].mean(),\n df[start:end].std(),\n df[start:end].skew(),\n df[start:end].kurt(),\n df[start:end].quantile(0.25),\n df[start:end].quantile(0.75),\n df[start:end].quantile(0.90),\n df[start:end].quantile(0.15),\n df[start:end].median(),\n df[start:end].mad(),\n df[start:end].sem(),\n df[start:end].var(),\n df[start:end].autocorr(1),\n df[start:end].autocorr(2),\n df[start:end].autocorr(3),\n df[start:end].autocorr(4),\n df[start:end].autocorr(5),\n np.append(df[start:end].mode(), -1)[0]\n ]", "def _extract_features(self, row):\n ncep_data = self.ncep_data\n ncep_sfc_data = self.ncep_sfc_data\n date = row['date']\n features = dict(row)\n #reduce the dimensions of ncep_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_data = ncep_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['level','time'])\n #reduce the dimensions of ncep_sfc_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_sfc_data = ncep_sfc_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['time'])\n\n for level in self.levels:\n #features at different pressure level\n point = ncep_data.loc[level]\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_data_vars:\n features[\"{0}_0w_lvl_{1}\".format(data_var,level)] = v0w[data_var]\n features[\"{0}_1w_lvl_{1}\".format(data_var,level)] = v1w[data_var]\n features[\"{0}_2w_lvl_{1}\".format(data_var,level)] = v2w[data_var]\n features[\"{0}_3w_lvl_{1}\".format(data_var,level)] = v3w[data_var]\n #features at surface level\n point = ncep_sfc_data\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_sfc_data_vars:\n features[\"{0}_0w\".format(data_var)] = v0w[data_var]\n features[\"{0}_1w\".format(data_var)] = v1w[data_var]\n features[\"{0}_2w\".format(data_var)] = v2w[data_var]\n features[\"{0}_3w\".format(data_var)] = v3w[data_var] \n\n return features", "def compute_features_one_round(\n train_base_df,\n train_delta_df,\n test_df,\n df_config,\n feature_config_list,\n feature_map,\n filter_by_month,\n compute_load_ratio=False,\n):\n\n train_round_df = pd.concat([train_base_df, train_delta_df])\n max_train_timestamp = train_round_df[df_config[\"time_col_name\"]].max()\n max_test_timestamp = test_df[df_config[\"time_col_name\"]].max()\n train_test_diff = max_test_timestamp - max_train_timestamp\n max_horizon = ceil(train_test_diff.days * 24 + train_test_diff.seconds / 3600)\n train_features, feature_pipeline = compute_training_features(\n train_round_df, df_config, feature_config_list, feature_map, max_horizon,\n )\n\n test_features = compute_testing_features(test_df, feature_pipeline, feature_config_list, train_round_df)\n\n if compute_load_ratio:\n rolling_window_args = LOAD_RATIO_CONFIG[\"same_day_of_week_rolling_args\"]\n previous_years_lag_args = LOAD_RATIO_CONFIG[\"same_week_of_year_lag_args\"]\n same_week_day_hour_rolling_featurizer = SameDayOfWeekRollingWindowFeaturizer(\n df_config, input_col_names=df_config[\"target_col_name\"], max_horizon=max_horizon, **rolling_window_args\n )\n train_df_with_recent_load = same_week_day_hour_rolling_featurizer.transform(train_round_df)\n same_week_day_hour_rolling_featurizer.train_df = train_round_df\n test_df_with_recent_load = same_week_day_hour_rolling_featurizer.transform(test_df)\n\n time_col_name = df_config[\"time_col_name\"]\n ts_id_col_names = df_config[\"ts_id_col_names\"]\n keep_col_names = [time_col_name]\n if ts_id_col_names is not None:\n if isinstance(ts_id_col_names, list):\n keep_col_names = keep_col_names + ts_id_col_names\n else:\n keep_col_names.append(ts_id_col_names)\n lag_df_list = []\n start_week = rolling_window_args[\"start_week\"]\n end_week = start_week + rolling_window_args[\"agg_count\"]\n for i in range(start_week, end_week):\n col_old = df_config[\"target_col_name\"] + \"_\" + rolling_window_args[\"output_col_suffix\"] + \"_\" + str(i)\n col_new = col_old + \"_\" + previous_years_lag_args[\"output_col_suffix\"]\n col_ratio = \"recent_load_ratio_\" + str(i)\n\n same_week_day_hour_lag_featurizer = SameWeekOfYearLagFeaturizer(\n df_config,\n input_col_names=col_old,\n train_df=train_df_with_recent_load,\n max_horizon=max_horizon,\n **previous_years_lag_args\n )\n\n lag_df = same_week_day_hour_lag_featurizer.transform(test_df_with_recent_load)\n lag_df[col_ratio] = lag_df[col_old] / lag_df[col_new]\n lag_df_list.append(lag_df[keep_col_names + [col_ratio]].copy())\n\n test_features = reduce(\n lambda left, right: pd.merge(left, right, on=keep_col_names), [test_features] + lag_df_list,\n )\n\n if filter_by_month:\n test_month = test_features[\"month_of_year\"].values[0]\n train_features = train_features.loc[train_features[\"month_of_year\"] == test_month,].copy()\n\n train_features.dropna(inplace=True)\n\n return train_features, test_features", "def build_shape_data(self, start=None, end=None):\n # If start and end are None, then set them to be min/max of self.df_demand\n if start is None:\n start = self.df_demand['date'].min()\n if end is None:\n end = self.df_demand['date'].max()\n print(f\"date range for shape data is from {start} to {end}\")\n # Extract part of df_demand that is within start and end\n df_sub = self.df_demand[(self.df_demand['date'] >= start) & (self.df_demand['date'] <= end)]\n assert df_sub['date'].min() >= start\n assert df_sub['date'].max() <= end\n num_days = len(pd.date_range(iso8601.parse_date(start), iso8601.parse_date(end), freq='d'))\n print(f\"number of days is {num_days}\")\n # When finding variance and mean, add in missing days as 0s\n # Obtain the counts for each lat/lng region\n counts = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).size().reset_index(name='counts')\n # Group demand data by lat/lng region and average across other cols\n df = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])[['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']].mean().reset_index()\n df = df.merge(counts, on=['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])\n # print(df.head())\n # Modify averages by multiplying each by count and divide by num_days\n vars = ['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']\n for var in vars:\n df[var] = df[var]*df['counts']/num_days\n # print(df.head())\n # Calculate the variance for prob_scooter_avail\n probVariance = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).apply(lambda x: ((x['prob_scooter_avail'] - (x['prob_scooter_avail'].sum()/num_days))**2).sum()/(num_days-1)).reset_index(name='prob_scooter_avail')\n # print(probVariance.head())\n df['prob_scooter_avail_var'] = probVariance['prob_scooter_avail']\n # Check to see if there are any Nan values\n print(f\"Nan values in df? {df.isnull().values.any()}\")\n # print(df.head())\n # For each var col, create corresponding color columns (log and unlog)\n # Also create the factors list that get passed into self.create_rectangle_lst\n factors = [('avail_count', 'decimal'), ('avail_mins', 'decimal'),\n ('trips', 'decimal'), ('prob_scooter_avail', 'percent'), ('adj_trips', 'decimal')]\n i = 0\n original_len = len(factors)\n while i < original_len:\n name, type = factors[i]\n # print(f\"name={name}, type={type}\")\n # Create color column\n df = self.map_values_to_color(df, name)\n # If type is not percent than create log version\n if type != 'percent':\n df = self.create_log_column(df, name)\n factors.append(('log_'+name, type))\n i += 1\n # Deal with estimated demand and unmet demand\n # Filter out rows where prob_scooter_avail sig diff from 0\n sigDiffIdx = df.apply(lambda x: utils.sig_diff_from_zero(x['prob_scooter_avail'], x['prob_scooter_avail_var']), axis=1)\n # print(sigDiffIdx.head())\n df_sig_diff = df[sigDiffIdx]\n # Calculate estimated demand and unmet demand\n df_sig_diff = self.calculate_demand(df_sig_diff)\n # print(df_sig_diff.head())\n # Create color column and log column for unmet demand\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'unmet_demand')\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'estimated_demand')\n df_sig_diff = self.create_log_column(df_sig_diff, 'unmet_demand')\n factors.extend([('estimated_demand', 'decimal'), ('unmet_demand', 'decimal'), ('log_unmet_demand', 'decimal')])\n # Fill in the colors for the grid cells that aren't significantly different\n df_not_sig_diff = df[~sigDiffIdx]\n # print(df_not_sig_diff.head())\n df = pd.concat([df_sig_diff, df_not_sig_diff])\n # df.to_csv('../../../data_files/20210427_estimatedDemand.csv', index=False)\n # Create Rectangle information\n rectangles = self.create_rectangle_lst(df, factors)\n return rectangles, start, end", "def create_date_features(df = None, date = None):\n #TODO", "def get_lat_lon_date_features(gt_ids=[], gt_masks=None, gt_shifts=None,\n forecast_ids=[], forecast_masks=None, forecast_shifts=None,\n anom_ids=[], anom_masks=None, anom_shifts=None,\n first_year = None):\n # If particular arguments aren't lists, replace with repeating iterators\n if not isinstance(gt_masks, list):\n gt_masks = itertools.repeat(gt_masks)\n if not isinstance(gt_shifts, list):\n gt_shifts = itertools.repeat(gt_shifts)\n if not isinstance(forecast_masks, list):\n forecast_masks = itertools.repeat(forecast_masks)\n if not isinstance(forecast_shifts, list):\n forecast_shifts = itertools.repeat(forecast_shifts)\n if not isinstance(anom_masks, list):\n anom_masks = itertools.repeat(anom_masks)\n if not isinstance(anom_shifts, list):\n anom_shifts = itertools.repeat(anom_shifts)\n\n # Define canonical name for target start date column\n date_col = \"start_date\"\n # Add each ground truth feature to dataframe\n df = None\n for gt_id, gt_mask, gt_shift in zip(gt_ids, gt_masks, gt_shifts):\n print \"Getting {}_shift{}\".format(gt_id, gt_shift)\n t = time.time()\n # Load ground truth data\n gt = get_ground_truth(gt_id, gt_mask, shift=gt_shift)\n # Discard years prior to first_year\n gt = year_slice(gt, first_year = first_year)\n # Use outer merge to include union of (lat,lon,date_col)\n # combinations across all features\n df = df_merge(df, gt)\n print \"Elapsed: {}s\".format(time.time() - t)\n\n # Add each forecast feature to dataframe\n for forecast_id, forecast_mask, forecast_shift in zip(forecast_ids,\n forecast_masks,\n forecast_shifts):\n print \"Getting {}_shift{}\".format(forecast_id, forecast_shift)\n t = time.time()\n # Load forecast with years >= first_year\n forecast = get_forecast(forecast_id, forecast_mask, shift=forecast_shift)\n # Rename target start date column to \"start_date\"\n fcst_date_col = get_target_start_date_col(forecast_id)\n forecast.rename(columns={fcst_date_col: date_col}, inplace=True)\n # Discard years prior to first_year\n forecast = year_slice(forecast, first_year = first_year)\n # Use outer merge to include union of (lat,lon,date_col)\n # combinations across all features\n df = df_merge(df, forecast)\n print \"Elapsed: {}s\".format(time.time() - t)\n\n # Add anomaly features and climatology last so that climatology\n # is produced for all previously added start dates\n for anom_id, anom_mask, anom_shift in zip(anom_ids, anom_masks, anom_shifts):\n print \"Getting {}_shift{} with anomalies\".format(anom_id, anom_shift)\n t = time.time()\n # Check if ground truth column already exists\n gt_col = get_measurement_variable(anom_id, shift=anom_shift)\n if df is None or gt_col not in df.columns:\n # Add masked ground truth data if absent\n gt = get_ground_truth(anom_id, anom_mask, shift=anom_shift)\n # Discard years prior to first_year\n gt = year_slice(gt, first_year = first_year)\n # Use outer merge to include union of (lat,lon,date_col)\n # combinations across all features\n df = df_merge(df, gt)\n\n # Load masked ground truth data climatology\n climatology = get_climatology(anom_id, anom_mask, anom_shift)\n # Merge climatology into dataset\n df = pd.merge(df, climatology[[gt_col]],\n left_on=['lat', 'lon', df[date_col].dt.month,\n df[date_col].dt.day],\n right_on=[climatology.lat, climatology.lon,\n climatology[date_col].dt.month,\n climatology[date_col].dt.day],\n how='left', suffixes=('', '_clim'))\n clim_col = gt_col+\"_clim\"\n # Compute ground-truth anomalies\n anom_col = gt_col+\"_anom\"\n df[anom_col] = df[gt_col] - df[clim_col]\n print \"Elapsed: {}s\".format(time.time() - t)\n\n return df", "def __feature_set__(self):\r\n import numpy as np\r\n import datetime\r\n import time\r\n cols_norm = [col for col in self.columns]\r\n cols_lower = [col.lower() for col in self.columns]\r\n fields = []\r\n features = []\r\n date_fields = []\r\n _geom_types = {\r\n arcgis.geometry._types.Point : \"esriGeometryPoint\",\r\n arcgis.geometry._types.Polyline : \"esriGeometryPolyline\",\r\n arcgis.geometry._types.MultiPoint : \"esriGeometryMultipoint\",\r\n arcgis.geometry._types.Polygon : \"esriGeometryPolygon\"\r\n }\r\n if self.sr is None:\r\n sr = {'wkid' : 4326}\r\n else:\r\n sr = self.sr\r\n fs = {\r\n \"objectIdFieldName\" : \"\",\r\n \"globalIdFieldName\" : \"\",\r\n \"displayFieldName\" : \"\",\r\n \"geometryType\" : _geom_types[type(self.geometry[self.geometry.first_valid_index()])],\r\n \"spatialReference\" : sr,\r\n \"fields\" : [],\r\n \"features\" : []\r\n }\r\n if 'objectid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n elif 'fid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('fid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('fid')]\r\n elif 'oid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('oid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('oid')]\r\n else:\r\n self['OBJECTID'] = list(range(1, self.shape[0] + 1))\r\n res = self.__feature_set__\r\n del self['OBJECTID']\r\n return res\r\n if 'objectIdFieldName' in fs:\r\n fields.append({\r\n \"name\" : fs['objectIdFieldName'],\r\n \"type\" : \"esriFieldTypeOID\",\r\n \"alias\" : fs['objectIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['objectIdFieldName']))\r\n if 'globalIdFieldName' in fs and len(fs['globalIdFieldName']) > 0:\r\n fields.append({\r\n \"name\" : fs['globalIdFieldName'],\r\n \"type\" : \"esriFieldTypeGlobalID\",\r\n \"alias\" : fs['globalIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['globalIdFieldName']))\r\n elif 'globalIdFieldName' in fs and \\\r\n len(fs['globalIdFieldName']) == 0:\r\n del fs['globalIdFieldName']\r\n if self._geometry_column_name in cols_norm:\r\n cols_norm.pop(cols_norm.index(self._geometry_column_name))\r\n for col in cols_norm:\r\n try:\r\n idx = self[col].first_valid_index()\r\n col_val = self[col].loc[idx]\r\n except:\r\n col_val = \"\"\r\n if isinstance(col_val, (str, np.str)):\r\n l = self[col].str.len().max()\r\n if str(l) == 'nan':\r\n l = 255\r\n\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeString\",\r\n \"length\" : int(l),\r\n \"alias\" : col\r\n })\r\n if fs['displayFieldName'] == \"\":\r\n fs['displayFieldName'] = col\r\n elif isinstance(col_val, (datetime.datetime,\r\n pd.Timestamp,\r\n np.datetime64,\r\n pd.datetime)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDate\",\r\n \"alias\" : col\r\n })\r\n date_fields.append(col)\r\n elif isinstance(col_val, (np.int32, np.int16, np.int8)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSmallInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (int, np.int, np.int64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (float, np.float64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDouble\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (np.float32)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSingle\",\r\n \"alias\" : col\r\n })\r\n fs['fields'] = fields\r\n for row in self.to_dict('records'):\r\n geom = {}\r\n if self._geometry_column_name in row:\r\n geom = row[self._geometry_column_name]\r\n del row[self._geometry_column_name]\r\n for f in date_fields:\r\n try:\r\n row[f] = int(row[f].to_pydatetime().timestamp() * 1000)\r\n except:\r\n row[f] = None\r\n features.append(\r\n {\r\n \"geometry\" : dict(geom),\r\n \"attributes\" : row\r\n }\r\n )\r\n del row\r\n del geom\r\n fs['features'] = features\r\n return fs", "def generate_features(df):\n return np.array([np.array(xi) for xi in pd.to_datetime(df).apply(lambda x: [x.year, x.month, x.day, x.hour, x.minute, x.second, x.weekday()])])", "def featuretest(self, args):\n db_engine = create_engine(self.root.db_url)\n feature_config = yaml.load(args.feature_config_file)\n\n FeatureGenerator(db_engine, 'features_test').create_features_before_imputation(\n feature_aggregation_config=feature_config,\n feature_dates=[args.as_of_date]\n )\n logging.info('Features created for feature_config %s and date %s', feature_config, args.as_of_date)", "def new_features(df):\n print(\"Add new features ...\")\n # distinguish Spring, Fall and pregnant females (don't care about juvenilles/unknown)\n df[\"gender_plus\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_plus\"] = \"f_gra\"\n\n df[\"gender_seasons\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_seasons\"] = \"f_gra\"\n\n # add features\n df[\"Age_To_Weight\"] = df[\"Annuli\"] / df[\"Weight\"]\n\n # Calcuate Number of recaptures\n df_captures = df[[\"ID\", \"Date\"]].groupby(\"ID\").count()\n df_captures.columns = [\"recapture_count\"]\n df_captures.reset_index(inplace=True)\n df = pd.merge(df, df_captures, how=\"outer\", on=\"ID\")\n\n # recalculate annuli\n df_min = pd.pivot_table(\n df[df.Annuli > 0],\n values=[\"Date\", \"Annuli\"],\n index=[\"ID\"],\n aggfunc={\"Date\": min, \"Annuli\": min},\n )\n df_min.columns = [\"annuli_min\", \"date_min\"]\n df_min.reset_index(inplace=True)\n\n df = pd.merge(df, df_min, how=\"outer\", on=\"ID\")\n df[\"year\"] = df.Date.map(lambda x: x.year)\n df[\"year_min\"] = df.date_min.map(lambda x: x.year)\n df[\"Annuli_orig\"] = df.Annuli\n df.Annuli = df.year - df.year_min + df.annuli_min\n df.Annuli = np.nan_to_num(df.Annuli)\n df[\"Annuli\"] = pd.to_numeric(df[\"Annuli\"], downcast=\"integer\")\n\n # Annuli Buckets\n buckets = 5\n interval = int(df[\"Annuli\"].max() / buckets)\n buckets = [i for i in range(0, df[\"Annuli\"].max() + interval, interval)]\n labels = [\"'{0} - {1}'\".format(i, i + interval) for i in buckets]\n df[\"Annuli_Group\"] = pd.cut(\n df.Annuli, buckets, labels=labels[:-1], include_lowest=True\n )\n\n return df", "def create_features_using_groupby(training, entity, feature, avg=True, minimum=True, maximum=True):\n\n entity_col = 'offer_id' if entity == 'portfolio' else 'person'\n\n groupby = training.groupby(entity_col)[feature]\n\n features, col_name = [], []\n if avg:\n features.append(groupby.mean())\n col_name.append('avg_'+feature)\n if minimum:\n features.append(groupby.min())\n col_name.append('min_'+feature)\n if maximum:\n features.append(groupby.max())\n col_name.append('max_'+feature)\n\n feature_df = pd.concat(features, axis=1)\n feature_df.columns = [col + '_' + entity for col in col_name]\n\n return feature_df", "def at(self, time_slices):\n\n if self.base is not None:\n return self.base.at(time_slices)\n\n if isinstance(time_slices, TimeSlice):\n time_slices = [time_slices]\n\n # join the time slice values\n timed_data = pd.DataFrame(columns=self.data.columns)\n\n # make the new data\n for slice_t in time_slices:\n slice_index = (slice_t.time <= self.data.index) & (\n self.data.index < slice_t.time + slice_t.duration\n )\n timed_data.loc[slice_t.time] = self.aggregate(\n self.data[slice_index], axis=0\n )\n\n # return the new feature object\n return Feature(\n data=timed_data,\n aggregate=self.aggregate,\n base=self,\n time_slices=time_slices,\n )", "def postprocess_features(self, featurelist):\n \n ##: To overwrite the time of features that are in a clause\n for feature in featurelist:\n if feature.inClause() or self.is_in_clause(feature.getStartPos(), feature.getSentNum()):\n feature = self.assign_feature_time_with_references(feature, self.timeReferences, feature.getStartPos(), True)\n \n ##: To set time of features after death to none. Currently disabled.\n# deathDates = []\n# for feature in featurelist:\n# if 'Death' in [tg[1] for tg in feature.getTags()]:\n# dt = feature.getDateTime()\n# if dt and feature.getTlink().getTimexes()[0].getType()!='VIRTUAL': ##: only original date counts\n# deathDates.append(dt)\n# \n# if feature.getType()=='CAUSE_OF_DEATH':\n# feature.setTlink(None)\n# \n# if deathDates:\n# deathDate = min(deathDates)\n# for feature in featurelist: \n# dt = feature.getDateTime()\n# if dt and dt>deathDate:\n# feature.setTlink(None)\n \n ##: Remove time from features in the blockout range, \n ##: e.g., A 34 years old male with{ history of leg pain }who on ....\n for feature in featurelist:\n posStart = feature.getStartPos()\n posEnd = feature.getEndPos()\n for r in self.blockout_range:\n if (posStart>r[0] and posStart<r[1]) or (posEnd>r[0] and posEnd<r[1]):\n timex = feature.getTimex()\n if timex:\n tpos = timex.getStartPos()\n if tpos>=r[0] and tpos<=r[1]:\n continue\n \n feature.setTlink(None)\n \n return featurelist", "def features(self, mask=None, propnames=None):\n\t\t\n\t\t# See if we have a cached result\n\t\tif self._features:\n\t\t\treturn self._features\n\t\t\n\t\tresult = {'type': 'FeatureCollection', 'features':[]}\n\t\tfeatures = []\n\t\t\t\t\t\t\t\t\t\t\n\t\t# We can dealt with grid type collections first\n\t\tif self.featuretype in ['Grid', 'GridSeries']:\n\t\t\t\n\t\t\t# Get center point latitudes and longitudes\n\t\t\tlatitudes = self.latitudes\n\t\t\tlongitudes = self.longitudes\n\t\t\tshape = latitudes.shape\n\t\t\t\n\t\t\t# How do we slice the data to get grid point values?\n\t\t\tindex = 0\n\t\t\tfor dim in self.variable.dimensions:\n\t\t\t\tprint dim, dim.length, len(self.times)\n\t\t\t\tif dim.length == shape[0]:\n\t\t\t\t\ty_index = index\n\t\t\t\tif dim.length == shape[1]:\n\t\t\t\t\tx_index = index\n\t\t\t\tif dim.length == len(self.times):\n\t\t\t\t\tt_index = index\n\t\t\t\tindex += 1\n\t\t\t\n\t\t\t\n\t\t\t# Create the initial slices with indices defaulting to 0\n\t\t\tslices = [0]*len(self.variable.dimensions)\n\t\t\tslices[t_index] = slice(0,len(self.times))\n\n\t\t\t\t\t\t\n\t\t\t# Create corner point latitude longitude arrays\n\t\t\tcorner_lats = numpy.zeros((shape[0]+1, shape[1]+1))\n\t\t\tcorner_lons = numpy.zeros((shape[0]+1, shape[1]+1))\n\t\t\t\t\t\t\n\t\t\t# Step through all the interior grid points\n\t\t\tfor y in range(1, shape[0]):\n\t\t\t\tfor x in range(1, shape[1]):\n\t\t\t\t\tcorner_lats[y,x] = (latitudes[y, x-1] + latitudes[y,x] + latitudes[y-1,x-1] + latitudes[y-1,x])/4\n\t\t\t\t\tcorner_lons[y,x] = (longitudes[y, x-1] + longitudes[y,x] + longitudes[y-1,x-1] + longitudes[y-1,x])/4\n\t\t\t\t\t\n\t\t\t# Left boundary\n\t\t\tx = 0\n\t\t\tfor y in range(1,shape[0]):\n\t\t\t\ttmp_lat = (latitudes[y,x] + latitudes[y-1,x])/2\n\t\t\t\ttmp_lon = (longitudes[y,x] + longitudes[y-1,x])/2\n\t\t\t\tcorner_lats[y,x] = tmp_lat - (corner_lats[y,x+1] - tmp_lat)\n\t\t\t\tcorner_lons[y,x] = tmp_lon - (corner_lons[y,x+1] - tmp_lon)\n\n\n\t\t\t# Right boundary\n\t\t\tx = shape[1]\n\t\t\tfor y in range(1,shape[0]):\n\t\t\t\ttmp_lat = (latitudes[y,x-1] + latitudes[y-1,x-1])/2\n\t\t\t\ttmp_lon = (longitudes[y,x-1] + longitudes[y-1,x-1])/2\n\t\t\t\tcorner_lats[y,x] = tmp_lat - (corner_lats[y,x-1] - tmp_lat)\n\t\t\t\tcorner_lons[y,x] = tmp_lon - (corner_lons[y,x-1] - tmp_lon)\n\n\n\t\t\t# Bottom boundary\n\t\t\ty = 0\n\t\t\tfor x in range(1,shape[1]):\n\t\t\t\ttmp_lat = (latitudes[y,x] + latitudes[y,x-1])/2\n\t\t\t\ttmp_lon = (longitudes[y,x] + longitudes[y,x-1])/2\n\t\t\t\tcorner_lats[y,x] = tmp_lat - (corner_lats[y+1,x] - tmp_lat)\n\t\t\t\tcorner_lons[y,x] = tmp_lon - (corner_lons[y+1,x] - tmp_lon)\n\n\t\t\t# Top boundary\n\t\t\ty = shape[0]\n\t\t\tfor x in range(1,shape[1]):\n\t\t\t\ttmp_lat = (latitudes[y-1,x] + latitudes[y-1,x-1])/2\n\t\t\t\ttmp_lon = (longitudes[y-1,x] + longitudes[y-1,x-1])/2\n\t\t\t\tcorner_lats[y,x] = tmp_lat - (corner_lats[y-1,x] - tmp_lat)\n\t\t\t\tcorner_lons[y,x] = tmp_lon - (corner_lons[y-1,x] - tmp_lon)\n\t\t\t\n\t\t\t# Corners\n\t\t\tcorner_lats[0,0] = latitudes[0,0] - (corner_lats[1,1] - latitudes[0,0])\n\t\t\tcorner_lats[0,shape[1]] = latitudes[0,shape[1]-1] - (corner_lats[1,shape[1]-1] - latitudes[0,shape[1]-1])\n\t\t\tcorner_lats[shape[0],0] = latitudes[shape[0]-1,0] + (latitudes[shape[0]-1,0] - corner_lats[shape[0]-1,1])\n\t\t\tcorner_lats[shape[0],shape[1]] = latitudes[shape[0]-1,shape[1]-1] + (latitudes[shape[0]-1,shape[1]-1] - corner_lats[shape[0]-1,shape[1]-1])\n\n\t\t\tcorner_lons[0,0] = longitudes[0,0] - (corner_lons[1,1] - longitudes[0,0])\n\t\t\tcorner_lons[0,shape[1]] = longitudes[0,shape[1]-1] + (longitudes[0,shape[1]-1] - corner_lons[1,shape[1]-1])\n\t\t\tcorner_lons[shape[0],0] = longitudes[shape[0]-1,0] - (corner_lons[shape[0]-1,1] - longitudes[shape[0]-1,0])\n\t\t\tcorner_lons[shape[0],shape[1]] = longitudes[shape[0]-1,shape[1]-1] + (longitudes[shape[0]-1,shape[1]-1] - corner_lons[shape[0]-1,shape[1]-1])\n\n\n#\t\t\tprint corner_lats\n\n\t\t\t# Now create all polygons\n\t\t\tfor y in range(0, shape[0]):\n\t\t\t\tfor x in range(0, shape[1]):\n\n\t\t\t\t\t# Configure the slices\n\t\t\t\t\tslices[x_index] = slice(x,x+1)\n\t\t\t\t\tslices[y_index] = slice(y,y+1)\n\n\t\t\t\t\t# Check if we are masking and if this point is masked\n\t\t\t\t\tmasked = False\n\n\t\t\t\t\tif mask:\n\t\t\t\t\t\tif mask[y, x] < 0.5:\n\t\t\t\t\t\t\tmasked = True\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tif not masked:\n\n\t\t\t\t\t\tvertices = []\n\t\t\t\t\t\tvertices.append([corner_lons[y, x], corner_lats[y,x]])\n\t\t\t\t\t\tvertices.append([corner_lons[y+1, x], corner_lats[y+1,x]])\n\t\t\t\t\t\tvertices.append([corner_lons[y+1, x+1], corner_lats[y+1,x+1]])\n\t\t\t\t\t\tvertices.append([corner_lons[y, x+1], corner_lats[y,x+1]])\n\t\t\t\t\t\tvertices.append([corner_lons[y, x], corner_lats[y,x]])\t\t\t\t\n\n\t\t\t\t\t\t# Create the basic feature\n\t\t\t\t\t\tfeature = {'type': 'Feature', 'properties':{'id':x + y * shape[1]}, 'geometry': {'type': 'Polygon', 'coordinates': [vertices]}}\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Now add the data\t\t\t\t\t\n\t\t\t\t\t\t#data = self.variable[slices].flatten()\n\t\t\t\t\t\t\n\t\t\t\t\t\t# If we have property names then extract data for each name\n\t\t\t\t\t\tif propnames:\n\t\t\t\t\t\t\tfor name in propnames:\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tfeature['properties']['value'] = self.variable[slices].flatten()[1]\n\t#\t\t\t\t\t\t\tprint self.variable[slices]\n\t\t\t\t\t\t\t\t#feature['properties']['value'] = self.variable[slices].flatten()[propnames.index(name)]\n\t\t\t\t\t\t\n\t\t\t\t\t\t# else just set property 'value' to the first value of the flattened data array\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\t\t#feature['properties']['value'] = float(self.variable[slices].flatten()[1])\n\t\t\t\t\t\t\n\t\t\t\t\t\t#print feature['properties']\n\t\t\t\t\t\t#, 'value':float(values[y,x])\n\t\t\t\t\t\tfeatures.append(feature)\n\t\t\t\t\t\n\t\t\tresult['features'] = features\n\t\t\t\t\t\t\n#\t\t\toutfile = open('test.json', 'w')\n#\t\t\toutfile.write(simplejson.dumps(result))\n#\t\t\toutfile.close()\n\t\t\t\n\t\t\n\t\t# Point type feature sets next\n\t\telif self.featuretype in ['Point', 'PointSeries']:\n\t\t\t\n\t\t\tresult = {'type': 'FeatureCollection', 'features':[]}\n\t\t\tfeatures = []\n\t\t\t\n\t\t\tlongitudes = self.longitudes\n\t\t\tlatitudes = self.latitudes\n\t\t\t\n\t\t\tcount = len(longitudes)\n\t\t\tfor fid in range(0,count):\n\t\t\t\tfeature = {'type':'Feature', 'properties':{'_id':fid}, 'geometry': {'type':'Point', 'coordinates': [float(longitudes[fid]), float(latitudes[fid])]}}\n\n\t\t\t\t# Add related variables to properties\n\t\t\t\tfor key in self.coordinates_mapping:\n\t\t\t\t\tif key in self.variable.group.variables and key not in ['latitude', 'longitude']:\n\t\t\t\t\t\tif self.coordinates_mapping[key]['map'] == self.coordinates_mapping['latitude']['map']:\n\t\t\t\t\t\t\tfeature['properties'][key] = self.variable.group.variables[key][fid]\n\t\t\t\t\t\t\t\n\t\t\t\tfeatures.append(feature)\n\t\t\t\t\n\t\t\tresult['features'] = features\n\n\t\t\t\n\t\telse:\n\t\t\treturn None\n\n\t\t# Cache result\n\t\tif not self._features:\n\t\t\tself._features = result\n\t\t\t\n\t\treturn result", "def _filter_temporal(self, start_date: str, end_date: str) -> 'ImageCollection':\n process_id = 'filter_daterange'\n args = {\n 'imagery': self.graph,\n 'extent': [start_date, end_date]\n }\n\n return self.graph_add_process(process_id, args)", "def add_features(df):\n \n assert df.columns.str.contains(\"query|value|keyword|ranking|timestamp|geo\").all(), \"Add features failed. \\\n Missing one of [query, value, keyword, ranking, timestamp, geo]\"\n \n # feature engineering: totals and normalize\n grouped = df.groupby(['ranking']).value # group values by ranking\n df['value_total'] = grouped.transform('sum') # total sum \n df['value_normalized'] = (df.value-grouped.transform('min'))/(grouped.transform('max')-grouped.transform('min')) # normalize \n df['value_normalized_total'] = df.groupby(['ranking']).value_normalized.transform('sum') # total sum of normalized values \n df['date'] = pd.to_datetime(df.query_timestamp).dtd\n \n return df", "def samples_timesteps_features(dataframe, columns, start_date, timesteps=72, \n steps_ahead=24, window_days=100, train_percent=80.):\n \n def overlap_windows(dataset, timesteps, steps_ahead):\n \"\"\" Create overlaping window of time-series data\n \n Parameters\n ----------\n dataset: pd.DataFrame\n time-series pandas dataset\n timesteps: int\n number of time steps from the past for creating output arrays\n steps_ahead: int\n number of time steps into the future for making predictions\n \n Returns\n -------\n X, y: np.array\n input and output 3-d arrays of overlaping time windows\n \"\"\"\n X = []; y = []\n \n start = 0\n for i in range(len(dataset)):\n # Define the end of the input sequence\n in_end = start + timesteps\n out_end = in_end + steps_ahead\n # Ensure that there is enough data\n if out_end <= len(dataset):\n X.append(dataset[start:in_end, :])\n # First column holds load values\n y.append(dataset[in_end:out_end, 0])\n # Move along one time step\n start += 1\n \n # Convert list to np.array\n X = np.asarray(X)\n y = np.asarray(y)\n \n return X, y\n\n\n data = dataframe.copy()\n \n if window_days*24 > data.values.shape[0]:\n raise ValueError('Variable window_days has too large value: {}*24h = {} > {}, which is more than there is data!'.format(window_days, window_days*24, \n data.values.shape[0]))\n \n # Training period\n # ---------------\n train_percent = train_percent/100.\n st = pd.to_datetime(start_date) # start date\n et = st + dt.timedelta(days=int(train_percent*window_days)) # end date\n train = data.loc[st:et].values\n \n # Standardize and transform training data set\n mean_std_values = {}\n for i, column in enumerate(columns):\n # Calculate mean and standard deviation only\n # from the training data set values\n mu = train[:,i].mean() # axis=0\n sd = train[:,i].std()\n mean_std_values[column] = (mu, sd)\n # Standardize training data\n train[:,i] = (train[:,i] - mu)/sd\n \n # Create overlapping windows with training data\n X_train, y_train = overlap_windows(train, timesteps, steps_ahead)\n \n # Testing / Validation period\n # ---------------------------\n sv = et \n ev = sv + dt.timedelta(days=int((1-train_percent)*window_days)+1)\n test = data.loc[sv:ev].values\n \n # Transform testing/validation data set\n for i, column in enumerate(columns):\n # Use mean and standard deviation from the\n # training data set\n mu = mean_std_values[column][0]\n sd = mean_std_values[column][1]\n # Standardize test data\n test[:,i] = (test[:,i] - mu)/sd\n \n # Create overlaping windows with test data\n X_test, y_test = overlap_windows(test, timesteps, steps_ahead)\n \n return mean_std_values, X_train, y_train, X_test, y_test", "def select_features(self):\r\n \r\n features_list = list(self.feed_data.columns.values)\r\n features_list.remove(\"min_time\")\r\n thisrace = self.config.race_to_predict\r\n\r\n #if never ran race before, don't include these variables in feature\r\n #selection, they're just 0's anyway\r\n if self.config.first_time_running_race == True:\r\n unuseable_columns = [('min_time', thisrace),('std', thisrace),('num_races', thisrace),\r\n ('rainfall', thisrace),\r\n ('temp', thisrace),\r\n ('wind', thisrace),\r\n ('metersup', thisrace), \r\n 'sex_W']\r\n else:\r\n #drop this column...probs should have removed it earlier. \r\n unuseable_columns = ['sex_W']\r\n #print(features_list)\r\n for element in unuseable_columns:\r\n features_list.remove(element)\r\n data_with_all_feats = self.feed_data.drop(unuseable_columns,axis=1)\r\n colstodrop = features_list\r\n thiscols = []\r\n data_with_current_feats = data_with_all_feats.drop(features_list,axis=1)\r\n checkfit=100.0\r\n scores = []\r\n dropped_cols = []\r\n loopgain =True\r\n #mymod = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=10,\r\n # min_samples_split = 25, criterion='mse')\r\n thisloopfeatures_list = features_list\r\n curcols = data_with_current_feats.columns\r\n countgain=0\r\n #print(\"cc\",curcols)\r\n while loopgain == True:\r\n thisloopscore=100.0\r\n for fet in thisloopfeatures_list:\r\n data_with_current_feats[fet] = data_with_all_feats[fet]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=15,\r\n min_samples_split = 12, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n if ((thisloopscore - oobs) > 0.0):\r\n thisloopscore = oobs\r\n fetwinner = fet\r\n data_with_current_feats.drop(fet,axis=1,inplace=True)\r\n etrain.drop(fet,axis=1,inplace=True)\r\n\r\n data_with_current_feats[fetwinner] = data_with_all_feats[fetwinner]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n #print(fetwinner,predscore)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n #print(fetwinner,\"~\",oobs)\r\n thisloopfeatures_list.remove(fetwinner)\r\n if ((checkfit-oobs)>0.0001):\r\n checkfit = oobs\r\n curcols = data_with_current_feats.columns\r\n #print(curcols)\r\n else:\r\n break\r\n\r\n\r\n self.final_df = self.feed_data[data_with_current_feats.columns]\r\n self.Xtrain=self.final_df.sample(frac=0.8,random_state=200)\r\n self.Xtest=self.final_df.drop(self.Xtrain.index)#\r\n self.ytrain = self.Xtrain.pop('min_time')\r\n self.ytest = self.Xtest.pop('min_time')\r\n self.model= RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n self.model.fit(self.Xtrain,self.ytrain)\r\n #print(y)\r\n return", "def create_features(df,rsi_window = 14,macd_feat = [12,26,9]):\n df.dropna(inplace=True)\n ## day and month\n df['Date'] = pd.to_datetime(df['Date'])\n df['Month'] = df['Date'].dt.month\n df['dayowk'] = df['Date'].dt.dayofweek\n df = pd.get_dummies(data = df,columns = ['Month','dayowk'])\n \n ##Previos n-day pct_changes\n df['1day_pct'] = df['Adj Close'].pct_change()\n df['2day_pct'] = df['Adj Close'].pct_change(periods = 2)\n df['3day_pct'] = df['Adj Close'].pct_change(periods = 3)\n df['4day_pct'] = df['Adj Close'].pct_change(periods = 4)\n df['5day_pct'] = df['Adj Close'].pct_change(periods = 5)\n df['7day_pct'] = df['Adj Close'].pct_change(periods = 7)\n \n ##Cumulative sum of 1day_pct\n df['1day_pct_cs'] = df['Adj Close'].pct_change().cumsum()\n \n ##EWMA of 7, 50 and 200 days\n df['ewma_7'] = df['Adj Close'].ewm(span=7).mean()/df['Adj Close']\n df['ewma_50'] = df['Adj Close'].ewm(span=50).mean()/df['Adj Close']\n df['ewma_200'] = df['Adj Close'].ewm(span=200).mean()/df['Adj Close']\n ## Golden Cross vs Death Cross etc.\n #df['7g(50&200)'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n #df['7l(50&200)'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g50'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g200'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n \n ##RSI and MACD\n df = RSI(df,14)\n df = MACD_mod(df,nl=macd_feat[0],nh=macd_feat[1],nsig=macd_feat[2])\n \n df['day_var'] = (df['High'] - df['Low'])/df['Close']## Days variance\n df['open_close'] = (df['Open'] - df['Close'])/df['Close'] ## Days Open-Close\n df['high_close'] = (df['High'] - df['Close'])/df['Close'] ##Days High-Close\n df['open_prev_close'] = (df['Open'] - df['Close'].shift(1))/df['Close'] ## Days open - Previos Dyas Close\n \n ##Classification target\n df['target'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2) ## Target for classification\n #df['1_day_target'] = df['Adj Close'].shift(-1) - df['Adj Close'] ## Target for Regression\n #df['target2'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2)## Will the price go up intra-day\n \n ## IS the stock Overbought or Oversold based on RSI?\n df['RSI_overbought'] = df['RSI']>70\n df['RSI_oversold'] = df['RSI']<30\n \n \n #df.drop(['Open','High','Low','Close'],axis=1,inplace=True)\n# df = df.dropna()\n \n #df = df.reset_index(drop=True)\n \n ## Calculating how large the previos hot and cold streaks were\n f = 0\n df['prev_hot_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==1:\n f += 1\n if df['target'][i+1] ==0:\n df['prev_hot_streak'][i+1] = f\n f = 0\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_hot_streak'][i]==0:\n df['prev_hot_streak'][i]=df['prev_hot_streak'][i-1]\n \n \n df['prev_cold_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==0:\n f += 1\n if df['target'][i+1] ==1:\n df['prev_cold_streak'][i+1] = f\n f = 0\n\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_cold_streak'][i]==0:\n df['prev_cold_streak'][i] = df['prev_cold_streak'][i-1]\n \n ## Calculating current hot and cold streaks\n df['current_hot_streak'] = np.zeros(df.shape[0])\n df['current_cold_streak'] = np.zeros(df.shape[0])\n fhot=0\n fcold=0\n for i in range(df.shape[0]):\n if df['target'][i]==1:\n fhot += 1\n fcold = 0\n df['current_hot_streak'][i] = fhot\n elif df['target'][i]==0:\n fcold += 1\n fhot = 0\n df['current_cold_streak'][i] = fcold\n \n df['prev_hot_streak'] = df['prev_hot_streak'].shift(1)\n df['prev_cold_streak'] = df['prev_cold_streak'].shift(1)\n df['current_hot_streak'] = df['current_hot_streak'].shift(1)\n df['current_cold_streak'] = df['current_cold_streak'].shift(1)\n \n ## Combinations of previos streaks\n df['prev_current_hot'] = df['prev_hot_streak'] - df['current_hot_streak']\n df['prev_current_cold'] = df['prev_cold_streak'] - df['current_cold_streak']\n df['current_hot_prev_cold'] = df['current_hot_streak'] - df['prev_cold_streak']\n df['current_cold_prev_hot'] = df['current_cold_streak'] - df['prev_hot_streak']\n \n ##Calculating days since max\n current_max = df['Adj Close'][0]\n df['days_from_max'] = np.zeros(df.shape[0])\n df['pct_from_max'] = np.zeros(df.shape[0])\n #print('blah')\n for i in range(1,df.shape[0]):\n if df['Adj Close'][i] > current_max:\n current_max = df['Adj Close'][i]\n # print(current_max)\n else:\n df['days_from_max'][i] = df['days_from_max'][i-1]+1\n df['pct_from_max'][i] = (df['Adj Close'][i]-current_max)/current_max\n #print(df['days_from_max'][i])\n \n \n \n df.dropna(inplace=True)\n df = df.reset_index(drop=True)\n return df", "def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset", "def add_features(df_in, rolling_win_size=15):\n cols =['Turbine_ID', 'Date', 'TTF', '60_days', 'Component']\n other_cols = []\n for i in df_in.columns:\n if i not in cols:\n other_cols.append(i)\n all_cols = cols + other_cols\n\n df_in = df_in[all_cols]\n\n sensor_cols = []\n for i in df_in.columns[5:]:\n sensor_cols.append(i)\n\n sensor_av_cols = [nm+'_av' for nm in sensor_cols]\n sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]\n\n df_out = pd.DataFrame()\n\n ws = rolling_win_size\n\n #calculate rolling stats for each engine id\n\n for m_id in pd.unique(df_in.Turbine_ID):\n\n # get a subset for each engine sensors\n df_engine = df_in[df_in['Turbine_ID'] == m_id]\n df_sub = df_engine[sensor_cols]\n\n # get rolling mean for the subset\n av = df_sub.rolling(ws, min_periods=1).mean()\n av.columns = sensor_av_cols\n\n # get the rolling standard deviation for the subset\n sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)\n sd.columns = sensor_sd_cols\n\n # combine the two new subset dataframes columns to the engine subset\n new_ftrs = pd.concat([df_engine,av,sd], axis=1)\n\n # add the new features rows to the output dataframe\n df_out = pd.concat([df_out,new_ftrs])\n df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )\n return df_out", "def add_datepart(\n cls,\n df: pd.DataFrame,\n field_name: str,\n frequency: str,\n prefix: str = None,\n drop: bool = True,\n ) -> Tuple[pd.DataFrame, List[str]]:\n field = df[field_name]\n prefix = (re.sub(\"[Dd]ate$\", \"\", field_name) if prefix is None else prefix) + \"_\"\n attr = cls.time_features_from_frequency_str(frequency)\n added_features = []\n for n in attr:\n if n == \"Week\":\n continue\n df[prefix + n] = getattr(field.dt, n.lower())\n added_features.append(prefix + n)\n # Pandas removed `dt.week` in v1.1.10\n if \"Week\" in attr:\n week = field.dt.isocalendar().week if hasattr(field.dt, \"isocalendar\") else field.dt.week\n df.insert(3, prefix + \"Week\", week)\n added_features.append(prefix + \"Week\")\n # TODO Not adding Elapsed by default. Need to route it through config\n # mask = ~field.isna()\n # df[prefix + \"Elapsed\"] = np.where(\n # mask, field.values.astype(np.int64) // 10 ** 9, None\n # )\n # added_features.append(prefix + \"Elapsed\")\n if drop:\n df.drop(field_name, axis=1, inplace=True)\n\n # Removing features woth zero variations\n # for col in added_features:\n # if len(df[col].unique()) == 1:\n # df.drop(columns=col, inplace=True)\n # added_features.remove(col)\n return df, added_features", "def feature_list(user_id: str, session: str, tap_feature: str, task_name: str, window: DataFrame):\n if window.shape[0] == 0:\n return None\n #Add user ID, session, task name\n features = [user_id, session, task_name]\n\n #Add orientation\n orientation = mode(window['Phone_orientation_accel'])\n features.append(orientation)\n\n #Add tap type\n features.append(tap_feature)\n\n lead_file = 'Accelerometer.csv'\n\n time_col = x_columns[lead_file]\n\n before_start = window[window[tap_feature] == 4].index[0]\n during_start = window[window[tap_feature] == 2].index[0]\n after_start = window[window[tap_feature] == 3].index[0] + 1\n after_end = window[window[tap_feature] == 5].index[0]\n\n before = window.loc[before_start : during_start]\n during = window.loc[during_start : after_start]\n after = window.loc[after_start : after_end + 1]\n\n if during.shape[0] < 2:\n # If there were none or one measurements during the tap,\n # add the closest ones\n during = window[during_start - 1 : after_start + 1]\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n # Feature 1: Mean during\n mean_during = mean(during[y])\n\n # Feature 2: SD during\n sd_during = sd(during[y])\n\n # Feature 3: Difference before/after\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n difference_before_after = mean_after - mean_before\n\n # Feature 4: Net change from tap\n net_change_due_to_tap = mean_during - mean_before\n\n # Feature 5: Maximal change from tap\n max_tap = max(during[y])\n max_change = max_tap - mean_before\n\n # Feature 6: Restoration time\n avgDiffs = []\n for j in range(after[y].shape[0]):\n subsequentValues = after[y].iloc[j:]\n subsequentDistances = subsequentValues.map(lambda x: abs(x - mean_before))\n averageDistance = mean(subsequentDistances)\n avgDiffs.append(averageDistance)\n time_of_earliest_restoration = min(avgDiffs)\n restoration_time = time_of_earliest_restoration - during[time_col].iloc[-1]\n\n # Feature 7: Normalized duration\n t_before_center = (before[time_col].iloc[0] + before[time_col].iloc[-1]) / 2 \n t_after_center = (after[time_col].iloc[0] + after[time_col].iloc[-1]) / 2\n normalized_duration = (t_after_center - t_before_center) / (mean_after - mean_before)\n \n # Feature 8: Ndormalized duration max\n t_max_in_tap = during[during[y] == max_tap][time_col].iloc[0]\n normalized_duration_max = (t_after_center - t_max_in_tap) / (mean_after - max_tap)\n\n\n features += [mean_during, sd_during, difference_before_after,\n net_change_due_to_tap, max_change, restoration_time,\n normalized_duration, normalized_duration_max]\n\n if random.choice(range(100))== 0:\n plot_tap('Plots/Project/' + session, before, during, after, time_col)\n \n return features", "def compute_features(ctx, input_file, output_file):\n kwargs = {ctx.args[i][2:]: ctx.args[i+1].strip('\"') for i in range(0, len(ctx.args), 2)}\n output_file = os.path.abspath(output_file)\n click.echo(\"Init feature set computation\")\n executor = FeatureSetPreparer.build(verbose=True, violate=True, independent=True, session_file=None, location_mapping_file = None, orientation_fix_file=None, ws=12800, ss=12800, threshold=0.2, subwins=4, skip_post=True, **kwargs)\n click.echo(\"Compute feautures\")\n result = executor(input_file)\n if not os.path.exists(os.path.dirname(output_file)):\n click.echo(\"Create output folder if not exists\")\n os.makedirs(os.path.dirname(output_file))\n click.echo(\"Save feature set to: \" + output_file)\n result.to_csv(output_file, index=False, float_format='%.6f')\n click.echo(\"Saved\")" ]
[ "0.6709174", "0.59639823", "0.5956674", "0.57902575", "0.5713756", "0.5689175", "0.5615991", "0.5600183", "0.5599253", "0.5595833", "0.55765986", "0.55623573", "0.5488219", "0.5388274", "0.53792185", "0.5378838", "0.5333837", "0.53081375", "0.53008443", "0.5289871", "0.5253744", "0.5248897", "0.52153873", "0.5212669", "0.51900107", "0.5179637", "0.5173532", "0.5166036", "0.5159631", "0.5148492" ]
0.7756553
0
Computes the dropoff_features feature group. To restrict features to a time range, pass in ts_column, start_date, and/or end_date as kwargs.
def dropoff_features_fn(df, ts_column, start_date, end_date): df = filter_df_by_ts( df, ts_column, start_date, end_date ) dropoffzip_features = ( df.groupBy("dropoff_zip", window("tpep_dropoff_datetime", "30 minute")) .agg(count("*").alias("count_trips_window_30m_dropoff_zip")) .select( col("dropoff_zip").alias("zip"), unix_timestamp(col("window.end")).alias("ts").cast(IntegerType()), partition_id(to_timestamp(col("window.end"))).alias("yyyy_mm"), col("count_trips_window_30m_dropoff_zip").cast(IntegerType()), is_weekend(col("window.end")).alias("dropoff_is_weekend"), ) ) return dropoffzip_features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pickup_features_fn(df, ts_column, start_date, end_date):\n df = filter_df_by_ts(\n df, ts_column, start_date, end_date\n )\n pickupzip_features = (\n df.groupBy(\n \"pickup_zip\", window(\"tpep_pickup_datetime\", \"1 hour\", \"15 minutes\")\n ) # 1 hour window, sliding every 15 minutes\n .agg(\n mean(\"fare_amount\").alias(\"mean_fare_window_1h_pickup_zip\"),\n count(\"*\").alias(\"count_trips_window_1h_pickup_zip\"),\n )\n .select(\n col(\"pickup_zip\").alias(\"zip\"),\n unix_timestamp(col(\"window.end\")).alias(\"ts\").cast(IntegerType()),\n partition_id(to_timestamp(col(\"window.end\"))).alias(\"yyyy_mm\"),\n col(\"mean_fare_window_1h_pickup_zip\").cast(FloatType()),\n col(\"count_trips_window_1h_pickup_zip\").cast(IntegerType()),\n )\n )\n return pickupzip_features", "def get_date_features(gt_ids=[], gt_masks=None, gt_shifts=None, first_year=None):\n # If particular arguments aren't lists, replace with repeating iterators\n if not isinstance(gt_masks, list):\n gt_masks = itertools.repeat(gt_masks)\n if not isinstance(gt_shifts, list):\n gt_shifts = itertools.repeat(gt_shifts)\n\n # Add each ground truth feature to dataframe\n df = None\n for gt_id, gt_mask, gt_shift in zip(gt_ids, gt_masks, gt_shifts):\n print \"Getting {}_shift{}\".format(gt_id, gt_shift)\n t = time.time()\n # Load ground truth data\n gt = get_ground_truth(gt_id, gt_mask, gt_shift)\n # Discard years prior to first_year\n gt = year_slice(gt, first_year = first_year)\n # If lat, lon columns exist, pivot to wide format\n if 'lat' in gt.columns and 'lon' in gt.columns:\n if gt_shift == None:\n measurement_variable = get_measurement_variable(gt_id)\n else:\n measurement_variable = get_measurement_variable(gt_id)+'_shift'+str(gt_shift)\n gt = pd.pivot_table(gt, values=measurement_variable, index='start_date',\n columns=['lat', 'lon']).reset_index()\n gt = pd.DataFrame(gt.to_records())\n gt.drop(\"index\", axis=1, inplace=True)\n # Rename columns to start_date and precip_(27.0,261.0), etc.\n gt.rename(columns={gt.columns[0]: 'start_date'}, inplace=True)\n gt.rename(columns=lambda x: x.replace('(',\n measurement_variable +\n '_('), inplace=True)\n # Use outer merge to include union of start_date values across all features\n # combinations across all features\n df = df_merge(df, gt, on=\"start_date\")\n print \"Elapsed: {}s\".format(time.time() - t)\n\n return df", "def create_features(energy_data, label=None):\n energy_data['date'] = energy_data.index\n energy_data['hour'] = energy_data['Datetime'].dt.hour\n energy_data['dayofweek'] = energy_data['Datetime'].dt.dayofweek\n energy_data['month'] = energy_data['Datetime'].dt.month\n energy_data['quarter'] = energy_data['Datetime'].dt.quarter\n energy_data['year'] = energy_data['Datetime'].dt.year\n energy_data['dayofyear'] = energy_data['Datetime'].dt.dayofyear\n energy_data['dayofmonth'] = energy_data['Datetime'].dt.day\n energy_data['weekofyear'] = energy_data['Datetime'].dt.weekofyear\n energy_data['pjme_2_hrs_lag'] = energy_data['PJME_MW'].shift(2)\n energy_data['pjme_4_hrs_lag'] = energy_data['PJME_MW'].shift(4)\n energy_data['pjme_8_hrs_lag'] = energy_data['PJME_MW'].shift(8)\n energy_data['pjme_12_hrs_lag'] = energy_data['PJME_MW'].shift(12)\n energy_data['pjme_24_hrs_lag'] = energy_data['PJME_MW'].shift(24)\n energy_data['pjme_4_hrs_mean'] = energy_data['PJME_MW'].rolling(window=4).mean()\n energy_data['pjme_8_hrs_mean'] = energy_data['PJME_MW'].rolling(window=8).mean()\n energy_data['pjme_12_hrs_mean'] = energy_data['PJME_MW'].rolling(window=12).mean()\n energy_data['pjme_24_hrs_mean'] = energy_data['PJME_MW'].rolling(window=24).mean()\n energy_data['pjme_4_hrs_std'] = energy_data['PJME_MW'].rolling(window=4).std()\n energy_data['pjme_8_hrs_std'] = energy_data['PJME_MW'].rolling(window=8).std()\n energy_data['pjme_12_hrs_std'] = energy_data['PJME_MW'].rolling(window=12).std()\n energy_data['pjme_24_hrs_std'] = energy_data['PJME_MW'].rolling(window=24).std()\n energy_data['pjme_4_hrs_max'] = energy_data['PJME_MW'].rolling(window=4).max()\n energy_data['pjme_8_hrs_max'] = energy_data['PJME_MW'].rolling(window=8).max()\n energy_data['pjme_12_hrs_max'] = energy_data['PJME_MW'].rolling(window=12).max()\n energy_data['pjme_24_hrs_max'] = energy_data['PJME_MW'].rolling(window=24).max()\n energy_data['pjme_4_hrs_min'] = energy_data['PJME_MW'].rolling(window=4).min()\n energy_data['pjme_8_hrs_min'] = energy_data['PJME_MW'].rolling(window=8).min()\n energy_data['pjme_12_hrs_min'] = energy_data['PJME_MW'].rolling(window=12).min()\n energy_data['pjme_24_hrs_min'] = energy_data['PJME_MW'].rolling(window=24).min()\n\n features = energy_data[['hour', 'dayofweek', 'quarter', 'month', 'year',\n 'dayofyear', 'dayofmonth', 'weekofyear', 'pjme_2_hrs_lag', 'pjme_4_hrs_lag',\n 'pjme_8_hrs_lag', 'pjme_12_hrs_lag', 'pjme_24_hrs_lag', 'pjme_4_hrs_mean',\n \"pjme_8_hrs_mean\", \"pjme_12_hrs_mean\", \"pjme_24_hrs_mean\", \"pjme_4_hrs_std\",\n \"pjme_8_hrs_std\", \"pjme_12_hrs_std\", \"pjme_24_hrs_std\",\n \"pjme_4_hrs_max\", \"pjme_8_hrs_max\", \"pjme_12_hrs_max\", \"pjme_24_hrs_max\",\n \"pjme_4_hrs_min\", \"pjme_8_hrs_min\", \"pjme_12_hrs_min\", \"pjme_24_hrs_min\"]]\n if label:\n label = energy_data[label]\n return features, label\n return features", "def getFeature(df, start, end):\n\n return [df[start:end].mean(),\n df[start:end].std(),\n df[start:end].skew(),\n df[start:end].kurt(),\n df[start:end].quantile(0.25),\n df[start:end].quantile(0.75),\n df[start:end].quantile(0.90),\n df[start:end].quantile(0.15),\n df[start:end].median(),\n df[start:end].mad(),\n df[start:end].sem(),\n df[start:end].var(),\n df[start:end].autocorr(1),\n df[start:end].autocorr(2),\n df[start:end].autocorr(3),\n df[start:end].autocorr(4),\n df[start:end].autocorr(5),\n np.append(df[start:end].mode(), -1)[0]\n ]", "def compute_features_one_round(\n train_base_df,\n train_delta_df,\n test_df,\n df_config,\n feature_config_list,\n feature_map,\n filter_by_month,\n compute_load_ratio=False,\n):\n\n train_round_df = pd.concat([train_base_df, train_delta_df])\n max_train_timestamp = train_round_df[df_config[\"time_col_name\"]].max()\n max_test_timestamp = test_df[df_config[\"time_col_name\"]].max()\n train_test_diff = max_test_timestamp - max_train_timestamp\n max_horizon = ceil(train_test_diff.days * 24 + train_test_diff.seconds / 3600)\n train_features, feature_pipeline = compute_training_features(\n train_round_df, df_config, feature_config_list, feature_map, max_horizon,\n )\n\n test_features = compute_testing_features(test_df, feature_pipeline, feature_config_list, train_round_df)\n\n if compute_load_ratio:\n rolling_window_args = LOAD_RATIO_CONFIG[\"same_day_of_week_rolling_args\"]\n previous_years_lag_args = LOAD_RATIO_CONFIG[\"same_week_of_year_lag_args\"]\n same_week_day_hour_rolling_featurizer = SameDayOfWeekRollingWindowFeaturizer(\n df_config, input_col_names=df_config[\"target_col_name\"], max_horizon=max_horizon, **rolling_window_args\n )\n train_df_with_recent_load = same_week_day_hour_rolling_featurizer.transform(train_round_df)\n same_week_day_hour_rolling_featurizer.train_df = train_round_df\n test_df_with_recent_load = same_week_day_hour_rolling_featurizer.transform(test_df)\n\n time_col_name = df_config[\"time_col_name\"]\n ts_id_col_names = df_config[\"ts_id_col_names\"]\n keep_col_names = [time_col_name]\n if ts_id_col_names is not None:\n if isinstance(ts_id_col_names, list):\n keep_col_names = keep_col_names + ts_id_col_names\n else:\n keep_col_names.append(ts_id_col_names)\n lag_df_list = []\n start_week = rolling_window_args[\"start_week\"]\n end_week = start_week + rolling_window_args[\"agg_count\"]\n for i in range(start_week, end_week):\n col_old = df_config[\"target_col_name\"] + \"_\" + rolling_window_args[\"output_col_suffix\"] + \"_\" + str(i)\n col_new = col_old + \"_\" + previous_years_lag_args[\"output_col_suffix\"]\n col_ratio = \"recent_load_ratio_\" + str(i)\n\n same_week_day_hour_lag_featurizer = SameWeekOfYearLagFeaturizer(\n df_config,\n input_col_names=col_old,\n train_df=train_df_with_recent_load,\n max_horizon=max_horizon,\n **previous_years_lag_args\n )\n\n lag_df = same_week_day_hour_lag_featurizer.transform(test_df_with_recent_load)\n lag_df[col_ratio] = lag_df[col_old] / lag_df[col_new]\n lag_df_list.append(lag_df[keep_col_names + [col_ratio]].copy())\n\n test_features = reduce(\n lambda left, right: pd.merge(left, right, on=keep_col_names), [test_features] + lag_df_list,\n )\n\n if filter_by_month:\n test_month = test_features[\"month_of_year\"].values[0]\n train_features = train_features.loc[train_features[\"month_of_year\"] == test_month,].copy()\n\n train_features.dropna(inplace=True)\n\n return train_features, test_features", "def _create_ts_features(df, tscol):\r\n df = copy.deepcopy(df)\r\n dt_adds = []\r\n try:\r\n df[tscol+'_hour'] = df[tscol].dt.hour.fillna(0).astype(int)\r\n df[tscol+'_minute'] = df[tscol].dt.minute.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_hour')\r\n dt_adds.append(tscol+'_minute')\r\n except:\r\n print(' Error in creating hour-second derived features. Continuing...')\r\n try:\r\n df[tscol+'_dayofweek'] = df[tscol].dt.dayofweek.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_dayofweek')\r\n if tscol+'_hour' in dt_adds:\r\n DAYS = dict(zip(range(7),['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']))\r\n df[tscol+'_dayofweek'] = df[tscol+'_dayofweek'].map(DAYS)\r\n df.loc[:,tscol+'_dayofweek_hour_cross'] = df[tscol+'_dayofweek'] +\" \"+ df[tscol+'_hour'].astype(str)\r\n dt_adds.append(tscol+'_dayofweek_hour_cross')\r\n df[tscol+'_quarter'] = df[tscol].dt.quarter.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_quarter')\r\n df[tscol+'_month'] = df[tscol].dt.month.fillna(0).astype(int)\r\n MONTHS = dict(zip(range(1,13),['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',\r\n 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']))\r\n df[tscol+'_month'] = df[tscol+'_month'].map(MONTHS)\r\n dt_adds.append(tscol+'_month')\r\n #### Add some features for months ########################################\r\n festives = ['Oct','Nov','Dec']\r\n name_col = tscol+\"_is_festive\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in festives else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n summer = ['Jun','Jul','Aug']\r\n name_col = tscol+\"_is_summer\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in summer else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n winter = ['Dec','Jan','Feb']\r\n name_col = tscol+\"_is_winter\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in winter else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n cold = ['Oct','Nov','Dec','Jan','Feb','Mar']\r\n name_col = tscol+\"_is_cold\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in cold else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n warm = ['Apr','May','Jun','Jul','Aug','Sep']\r\n name_col = tscol+\"_is_warm\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in warm else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n #########################################################################\r\n if tscol+'_dayofweek' in dt_adds:\r\n df.loc[:,tscol+'_month_dayofweek_cross'] = df[tscol+'_month'] +\" \"+ df[tscol+'_dayofweek']\r\n dt_adds.append(tscol+'_month_dayofweek_cross')\r\n df[tscol+'_year'] = df[tscol].dt.year.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_year')\r\n today = date.today()\r\n df[tscol+'_age_in_years'] = today.year - df[tscol].dt.year.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_age_in_years')\r\n df[tscol+'_dayofyear'] = df[tscol].dt.dayofyear.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_dayofyear')\r\n df[tscol+'_dayofmonth'] = df[tscol].dt.day.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_dayofmonth')\r\n df[tscol+'_weekofyear'] = df[tscol].dt.weekofyear.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_weekofyear')\r\n weekends = (df[tscol+'_dayofweek'] == 'Sat') | (df[tscol+'_dayofweek'] == 'Sun')\r\n df[tscol+'_typeofday'] = 'weekday'\r\n df.loc[weekends, tscol+'_typeofday'] = 'weekend'\r\n dt_adds.append(tscol+'_typeofday')\r\n if tscol+'_typeofday' in dt_adds:\r\n df.loc[:,tscol+'_month_typeofday_cross'] = df[tscol+'_month'] +\" \"+ df[tscol+'_typeofday']\r\n dt_adds.append(tscol+'_month_typeofday_cross')\r\n except:\r\n print(' Error in creating date time derived features. Continuing...')\r\n print(' created %d columns from time series %s column' %(len(dt_adds),tscol))\r\n return df, dt_adds", "def create_date_features(df = None, date = None):\n #TODO", "def _drop_features(self, X, drop_features):\n self.drop_features = drop_features\n if len(self.drop_features) != 0:\n cfp = ComprehensiveFCParameters()\n df2 = []\n for df in self.drop_features:\n if df in X.columns:\n df2.append(df) # exact match\n else:\n if df in cfp.keys() or df in ['fft_coefficient_hann']:\n df = '*__{:s}__*'.format(df) # feature calculator\n # wildcard match\n df2 += [col for col in X.columns if fnmatch(col, df)] \n X = X.drop(columns=df2)\n return X", "def filter_dataframe(df, start_date_dt, end_date_dt):\n\n dff = df \n # df[\n # (df[\"timestamp\"].dt.date >= dt.date(start_date_dt.year, start_date_dt.month, start_date_dt.day))\n # & (df[\"timestamp\"].dt.date <= dt.date(end_date_dt.year, end_date_dt.month, end_date_dt.day))\n # ]\n # if (lat_min != -90) or (lat_max != 90):\n # dff = dff[\n # (dff[\"lat\"] >= lat_min)\n # & (dff[\"lat\"] <= lat_max)\n # ]\n # if (lon_min != -90) or (lon_max != 90):\n # dff = dff[\n # (dff[\"lon\"] >= lon_min)\n # & (dff[\"lon\"] <= lon_max)\n # ]\n\n return dff", "def create_features(df,rsi_window = 14,macd_feat = [12,26,9]):\n df.dropna(inplace=True)\n ## day and month\n df['Date'] = pd.to_datetime(df['Date'])\n df['Month'] = df['Date'].dt.month\n df['dayowk'] = df['Date'].dt.dayofweek\n df = pd.get_dummies(data = df,columns = ['Month','dayowk'])\n \n ##Previos n-day pct_changes\n df['1day_pct'] = df['Adj Close'].pct_change()\n df['2day_pct'] = df['Adj Close'].pct_change(periods = 2)\n df['3day_pct'] = df['Adj Close'].pct_change(periods = 3)\n df['4day_pct'] = df['Adj Close'].pct_change(periods = 4)\n df['5day_pct'] = df['Adj Close'].pct_change(periods = 5)\n df['7day_pct'] = df['Adj Close'].pct_change(periods = 7)\n \n ##Cumulative sum of 1day_pct\n df['1day_pct_cs'] = df['Adj Close'].pct_change().cumsum()\n \n ##EWMA of 7, 50 and 200 days\n df['ewma_7'] = df['Adj Close'].ewm(span=7).mean()/df['Adj Close']\n df['ewma_50'] = df['Adj Close'].ewm(span=50).mean()/df['Adj Close']\n df['ewma_200'] = df['Adj Close'].ewm(span=200).mean()/df['Adj Close']\n ## Golden Cross vs Death Cross etc.\n #df['7g(50&200)'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n #df['7l(50&200)'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g50'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g200'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n \n ##RSI and MACD\n df = RSI(df,14)\n df = MACD_mod(df,nl=macd_feat[0],nh=macd_feat[1],nsig=macd_feat[2])\n \n df['day_var'] = (df['High'] - df['Low'])/df['Close']## Days variance\n df['open_close'] = (df['Open'] - df['Close'])/df['Close'] ## Days Open-Close\n df['high_close'] = (df['High'] - df['Close'])/df['Close'] ##Days High-Close\n df['open_prev_close'] = (df['Open'] - df['Close'].shift(1))/df['Close'] ## Days open - Previos Dyas Close\n \n ##Classification target\n df['target'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2) ## Target for classification\n #df['1_day_target'] = df['Adj Close'].shift(-1) - df['Adj Close'] ## Target for Regression\n #df['target2'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2)## Will the price go up intra-day\n \n ## IS the stock Overbought or Oversold based on RSI?\n df['RSI_overbought'] = df['RSI']>70\n df['RSI_oversold'] = df['RSI']<30\n \n \n #df.drop(['Open','High','Low','Close'],axis=1,inplace=True)\n# df = df.dropna()\n \n #df = df.reset_index(drop=True)\n \n ## Calculating how large the previos hot and cold streaks were\n f = 0\n df['prev_hot_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==1:\n f += 1\n if df['target'][i+1] ==0:\n df['prev_hot_streak'][i+1] = f\n f = 0\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_hot_streak'][i]==0:\n df['prev_hot_streak'][i]=df['prev_hot_streak'][i-1]\n \n \n df['prev_cold_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==0:\n f += 1\n if df['target'][i+1] ==1:\n df['prev_cold_streak'][i+1] = f\n f = 0\n\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_cold_streak'][i]==0:\n df['prev_cold_streak'][i] = df['prev_cold_streak'][i-1]\n \n ## Calculating current hot and cold streaks\n df['current_hot_streak'] = np.zeros(df.shape[0])\n df['current_cold_streak'] = np.zeros(df.shape[0])\n fhot=0\n fcold=0\n for i in range(df.shape[0]):\n if df['target'][i]==1:\n fhot += 1\n fcold = 0\n df['current_hot_streak'][i] = fhot\n elif df['target'][i]==0:\n fcold += 1\n fhot = 0\n df['current_cold_streak'][i] = fcold\n \n df['prev_hot_streak'] = df['prev_hot_streak'].shift(1)\n df['prev_cold_streak'] = df['prev_cold_streak'].shift(1)\n df['current_hot_streak'] = df['current_hot_streak'].shift(1)\n df['current_cold_streak'] = df['current_cold_streak'].shift(1)\n \n ## Combinations of previos streaks\n df['prev_current_hot'] = df['prev_hot_streak'] - df['current_hot_streak']\n df['prev_current_cold'] = df['prev_cold_streak'] - df['current_cold_streak']\n df['current_hot_prev_cold'] = df['current_hot_streak'] - df['prev_cold_streak']\n df['current_cold_prev_hot'] = df['current_cold_streak'] - df['prev_hot_streak']\n \n ##Calculating days since max\n current_max = df['Adj Close'][0]\n df['days_from_max'] = np.zeros(df.shape[0])\n df['pct_from_max'] = np.zeros(df.shape[0])\n #print('blah')\n for i in range(1,df.shape[0]):\n if df['Adj Close'][i] > current_max:\n current_max = df['Adj Close'][i]\n # print(current_max)\n else:\n df['days_from_max'][i] = df['days_from_max'][i-1]+1\n df['pct_from_max'][i] = (df['Adj Close'][i]-current_max)/current_max\n #print(df['days_from_max'][i])\n \n \n \n df.dropna(inplace=True)\n df = df.reset_index(drop=True)\n return df", "def postprocess_features(self, featurelist):\n \n ##: To overwrite the time of features that are in a clause\n for feature in featurelist:\n if feature.inClause() or self.is_in_clause(feature.getStartPos(), feature.getSentNum()):\n feature = self.assign_feature_time_with_references(feature, self.timeReferences, feature.getStartPos(), True)\n \n ##: To set time of features after death to none. Currently disabled.\n# deathDates = []\n# for feature in featurelist:\n# if 'Death' in [tg[1] for tg in feature.getTags()]:\n# dt = feature.getDateTime()\n# if dt and feature.getTlink().getTimexes()[0].getType()!='VIRTUAL': ##: only original date counts\n# deathDates.append(dt)\n# \n# if feature.getType()=='CAUSE_OF_DEATH':\n# feature.setTlink(None)\n# \n# if deathDates:\n# deathDate = min(deathDates)\n# for feature in featurelist: \n# dt = feature.getDateTime()\n# if dt and dt>deathDate:\n# feature.setTlink(None)\n \n ##: Remove time from features in the blockout range, \n ##: e.g., A 34 years old male with{ history of leg pain }who on ....\n for feature in featurelist:\n posStart = feature.getStartPos()\n posEnd = feature.getEndPos()\n for r in self.blockout_range:\n if (posStart>r[0] and posStart<r[1]) or (posEnd>r[0] and posEnd<r[1]):\n timex = feature.getTimex()\n if timex:\n tpos = timex.getStartPos()\n if tpos>=r[0] and tpos<=r[1]:\n continue\n \n feature.setTlink(None)\n \n return featurelist", "def samples_timesteps_features(dataframe, columns, start_date, timesteps=72, \n steps_ahead=24, window_days=100, train_percent=80.):\n \n def overlap_windows(dataset, timesteps, steps_ahead):\n \"\"\" Create overlaping window of time-series data\n \n Parameters\n ----------\n dataset: pd.DataFrame\n time-series pandas dataset\n timesteps: int\n number of time steps from the past for creating output arrays\n steps_ahead: int\n number of time steps into the future for making predictions\n \n Returns\n -------\n X, y: np.array\n input and output 3-d arrays of overlaping time windows\n \"\"\"\n X = []; y = []\n \n start = 0\n for i in range(len(dataset)):\n # Define the end of the input sequence\n in_end = start + timesteps\n out_end = in_end + steps_ahead\n # Ensure that there is enough data\n if out_end <= len(dataset):\n X.append(dataset[start:in_end, :])\n # First column holds load values\n y.append(dataset[in_end:out_end, 0])\n # Move along one time step\n start += 1\n \n # Convert list to np.array\n X = np.asarray(X)\n y = np.asarray(y)\n \n return X, y\n\n\n data = dataframe.copy()\n \n if window_days*24 > data.values.shape[0]:\n raise ValueError('Variable window_days has too large value: {}*24h = {} > {}, which is more than there is data!'.format(window_days, window_days*24, \n data.values.shape[0]))\n \n # Training period\n # ---------------\n train_percent = train_percent/100.\n st = pd.to_datetime(start_date) # start date\n et = st + dt.timedelta(days=int(train_percent*window_days)) # end date\n train = data.loc[st:et].values\n \n # Standardize and transform training data set\n mean_std_values = {}\n for i, column in enumerate(columns):\n # Calculate mean and standard deviation only\n # from the training data set values\n mu = train[:,i].mean() # axis=0\n sd = train[:,i].std()\n mean_std_values[column] = (mu, sd)\n # Standardize training data\n train[:,i] = (train[:,i] - mu)/sd\n \n # Create overlapping windows with training data\n X_train, y_train = overlap_windows(train, timesteps, steps_ahead)\n \n # Testing / Validation period\n # ---------------------------\n sv = et \n ev = sv + dt.timedelta(days=int((1-train_percent)*window_days)+1)\n test = data.loc[sv:ev].values\n \n # Transform testing/validation data set\n for i, column in enumerate(columns):\n # Use mean and standard deviation from the\n # training data set\n mu = mean_std_values[column][0]\n sd = mean_std_values[column][1]\n # Standardize test data\n test[:,i] = (test[:,i] - mu)/sd\n \n # Create overlaping windows with test data\n X_test, y_test = overlap_windows(test, timesteps, steps_ahead)\n \n return mean_std_values, X_train, y_train, X_test, y_test", "def FE_start_end_date_time_features(smalldf, startTime, endTime, splitter_date_string=\"/\",splitter_hour_string=\":\"):\r\n smalldf = smalldf.copy()\r\n add_cols = []\r\n date_time_variable_flag = False\r\n if smalldf[startTime].dtype in ['datetime64[ns]','datetime16[ns]','datetime32[ns]']:\r\n print('%s variable is a date-time variable' %startTime)\r\n date_time_variable_flag = True\r\n if date_time_variable_flag:\r\n view_days = 'processing'+startTime+'_elapsed_days'\r\n smalldf[view_days] = (smalldf[endTime] - smalldf[startTime]).astype('timedelta64[s]')/(60*60*24)\r\n smalldf[view_days] = smalldf[view_days].astype(int)\r\n add_cols.append(view_days)\r\n view_time = 'processing'+startTime+'_elapsed_time'\r\n smalldf[view_time] = (smalldf[endTime] - smalldf[startTime]).astype('timedelta64[s]').values\r\n add_cols.append(view_time)\r\n else:\r\n start_date = 'processing'+startTime+'_start_date'\r\n smalldf[start_date] = smalldf[startTime].map(lambda x: x.split(\" \")[0])\r\n add_cols.append(start_date) \r\n try:\r\n start_time = 'processing'+startTime+'_start_time'\r\n smalldf[start_time] = smalldf[startTime].map(lambda x: x.split(\" \")[1])\r\n add_cols.append(start_time)\r\n except:\r\n ### there is no hour-minutes part of this date time stamp field. You can just skip it if it is not there\r\n pass\r\n end_date = 'processing'+endTime+'_end_date'\r\n smalldf[end_date] = smalldf[endTime].map(lambda x: x.split(\" \")[0])\r\n add_cols.append(end_date)\r\n try:\r\n end_time = 'processing'+endTime+'_end_time'\r\n smalldf[end_time] = smalldf[endTime].map(lambda x: x.split(\" \")[1])\r\n add_cols.append(end_time)\r\n except:\r\n ### there is no hour-minutes part of this date time stamp field. You can just skip it if it is not there\r\n pass\r\n view_days = 'processing'+startTime+'_elapsed_days'\r\n smalldf[view_days] = (pd.to_datetime(smalldf[end_date]) - pd.to_datetime(smalldf[start_date])).values.astype(int)\r\n add_cols.append(view_days)\r\n try:\r\n view_time = 'processing'+startTime+'_elapsed_time'\r\n smalldf[view_time] = (pd.to_datetime(smalldf[end_time]) - pd.to_datetime(smalldf[start_time])).astype('timedelta64[s]').values\r\n add_cols.append(view_time)\r\n except:\r\n ### In some date time fields this gives an error so skip it in that case\r\n pass\r\n #### The reason we chose endTime here is that startTime is usually taken care of by another library. So better to do this alone.\r\n year = 'processing'+endTime+'_end_year'\r\n smalldf[year] = smalldf[end_date].map(lambda x: str(x).split(splitter_date_string)[0]).values\r\n add_cols.append(year)\r\n #### The reason we chose endTime here is that startTime is usually taken care of by another library. So better to do this alone.\r\n month = 'processing'+endTime+'_end_month'\r\n smalldf[month] = smalldf[end_date].map(lambda x: str(x).split(splitter_date_string)[1]).values\r\n add_cols.append(month)\r\n try:\r\n #### The reason we chose endTime here is that startTime is usually taken care of by another library. So better to do this alone.\r\n daynum = 'processing'+endTime+'_end_day_number'\r\n smalldf[daynum] = smalldf[end_date].map(lambda x: str(x).split(splitter_date_string)[2]).values\r\n add_cols.append(daynum)\r\n except:\r\n ### In some date time fields the day number is not there. If not, just skip it ####\r\n pass\r\n #### In some date time fields, the hour and minute is not there, so skip it in that case if it errors!\r\n try:\r\n start_hour = 'processing'+startTime+'_start_hour'\r\n smalldf[start_hour] = smalldf[start_time].map(lambda x: str(x).split(splitter_hour_string)[0]).values\r\n add_cols.append(start_hour)\r\n start_min = 'processing'+startTime+'_start_hour'\r\n smalldf[start_min] = smalldf[start_time].map(lambda x: str(x).split(splitter_hour_string)[1]).values\r\n add_cols.append(start_min)\r\n except:\r\n ### If it errors, skip it\r\n pass\r\n #### Check if there is a weekday and weekends in date time columns using endTime only\r\n weekday_num = 'processing'+endTime+'_end_weekday_number'\r\n smalldf[weekday_num] = pd.to_datetime(smalldf[end_date]).dt.weekday.values\r\n add_cols.append(weekday_num)\r\n weekend = 'processing'+endTime+'_end_weekend_flag'\r\n smalldf[weekend] = smalldf[weekday_num].map(lambda x: 1 if x in[5,6] else 0)\r\n add_cols.append(weekend)\r\n #### If everything works well, there should be 13 new columns added by module. All the best!\r\n print('%d columns added using start date=%s and end date=%s processing...' %(len(add_cols),startTime,endTime))\r\n return smalldf", "def add_datepart(\n cls,\n df: pd.DataFrame,\n field_name: str,\n frequency: str,\n prefix: str = None,\n drop: bool = True,\n ) -> Tuple[pd.DataFrame, List[str]]:\n field = df[field_name]\n prefix = (re.sub(\"[Dd]ate$\", \"\", field_name) if prefix is None else prefix) + \"_\"\n attr = cls.time_features_from_frequency_str(frequency)\n added_features = []\n for n in attr:\n if n == \"Week\":\n continue\n df[prefix + n] = getattr(field.dt, n.lower())\n added_features.append(prefix + n)\n # Pandas removed `dt.week` in v1.1.10\n if \"Week\" in attr:\n week = field.dt.isocalendar().week if hasattr(field.dt, \"isocalendar\") else field.dt.week\n df.insert(3, prefix + \"Week\", week)\n added_features.append(prefix + \"Week\")\n # TODO Not adding Elapsed by default. Need to route it through config\n # mask = ~field.isna()\n # df[prefix + \"Elapsed\"] = np.where(\n # mask, field.values.astype(np.int64) // 10 ** 9, None\n # )\n # added_features.append(prefix + \"Elapsed\")\n if drop:\n df.drop(field_name, axis=1, inplace=True)\n\n # Removing features woth zero variations\n # for col in added_features:\n # if len(df[col].unique()) == 1:\n # df.drop(columns=col, inplace=True)\n # added_features.remove(col)\n return df, added_features", "def time_split_dataset(df, train_start_date, train_end_date, holdout_end_date, date_col):\n\n train_set = df.copy()[\n (df[date_col] >= train_start_date) & (df[date_col] <= train_end_date)]\n\n test_set = df.copy()[\n (df[date_col] > train_end_date) & (df[date_col] <= holdout_end_date)]\n\n return train_set, test_set", "def create_feature_based_on_spent_by_timestamp(data):\n utils.save_log('{0} :: {1}'.format(\n create_feature_based_on_spent_by_timestamp.__module__,\n create_feature_based_on_spent_by_timestamp.__name__))\n\n data = data.withColumn('RatioValueSpentByWeekOfYear',\n (data['Value'] / data['TransactionWeekOfYear']))\n data = data.withColumn('RatioValueSpentByDayOfWeek',\n (data['Value'] / data['TransactionDayOfWeek']))\n data = data.withColumn('RatioValueSpentByDayOfYear',\n (data['Value'] / data['TransactionDayOfYear']))\n\n update_list_features(\"numerical\", ['RatioValueSpentByWeekOfYear',\n 'RatioValueSpentByDayOfWeek',\n 'RatioValueSpentByDayOfYear'])\n\n return data", "def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset", "def get_lat_lon_date_features(gt_ids=[], gt_masks=None, gt_shifts=None,\n forecast_ids=[], forecast_masks=None, forecast_shifts=None,\n anom_ids=[], anom_masks=None, anom_shifts=None,\n first_year = None):\n # If particular arguments aren't lists, replace with repeating iterators\n if not isinstance(gt_masks, list):\n gt_masks = itertools.repeat(gt_masks)\n if not isinstance(gt_shifts, list):\n gt_shifts = itertools.repeat(gt_shifts)\n if not isinstance(forecast_masks, list):\n forecast_masks = itertools.repeat(forecast_masks)\n if not isinstance(forecast_shifts, list):\n forecast_shifts = itertools.repeat(forecast_shifts)\n if not isinstance(anom_masks, list):\n anom_masks = itertools.repeat(anom_masks)\n if not isinstance(anom_shifts, list):\n anom_shifts = itertools.repeat(anom_shifts)\n\n # Define canonical name for target start date column\n date_col = \"start_date\"\n # Add each ground truth feature to dataframe\n df = None\n for gt_id, gt_mask, gt_shift in zip(gt_ids, gt_masks, gt_shifts):\n print \"Getting {}_shift{}\".format(gt_id, gt_shift)\n t = time.time()\n # Load ground truth data\n gt = get_ground_truth(gt_id, gt_mask, shift=gt_shift)\n # Discard years prior to first_year\n gt = year_slice(gt, first_year = first_year)\n # Use outer merge to include union of (lat,lon,date_col)\n # combinations across all features\n df = df_merge(df, gt)\n print \"Elapsed: {}s\".format(time.time() - t)\n\n # Add each forecast feature to dataframe\n for forecast_id, forecast_mask, forecast_shift in zip(forecast_ids,\n forecast_masks,\n forecast_shifts):\n print \"Getting {}_shift{}\".format(forecast_id, forecast_shift)\n t = time.time()\n # Load forecast with years >= first_year\n forecast = get_forecast(forecast_id, forecast_mask, shift=forecast_shift)\n # Rename target start date column to \"start_date\"\n fcst_date_col = get_target_start_date_col(forecast_id)\n forecast.rename(columns={fcst_date_col: date_col}, inplace=True)\n # Discard years prior to first_year\n forecast = year_slice(forecast, first_year = first_year)\n # Use outer merge to include union of (lat,lon,date_col)\n # combinations across all features\n df = df_merge(df, forecast)\n print \"Elapsed: {}s\".format(time.time() - t)\n\n # Add anomaly features and climatology last so that climatology\n # is produced for all previously added start dates\n for anom_id, anom_mask, anom_shift in zip(anom_ids, anom_masks, anom_shifts):\n print \"Getting {}_shift{} with anomalies\".format(anom_id, anom_shift)\n t = time.time()\n # Check if ground truth column already exists\n gt_col = get_measurement_variable(anom_id, shift=anom_shift)\n if df is None or gt_col not in df.columns:\n # Add masked ground truth data if absent\n gt = get_ground_truth(anom_id, anom_mask, shift=anom_shift)\n # Discard years prior to first_year\n gt = year_slice(gt, first_year = first_year)\n # Use outer merge to include union of (lat,lon,date_col)\n # combinations across all features\n df = df_merge(df, gt)\n\n # Load masked ground truth data climatology\n climatology = get_climatology(anom_id, anom_mask, anom_shift)\n # Merge climatology into dataset\n df = pd.merge(df, climatology[[gt_col]],\n left_on=['lat', 'lon', df[date_col].dt.month,\n df[date_col].dt.day],\n right_on=[climatology.lat, climatology.lon,\n climatology[date_col].dt.month,\n climatology[date_col].dt.day],\n how='left', suffixes=('', '_clim'))\n clim_col = gt_col+\"_clim\"\n # Compute ground-truth anomalies\n anom_col = gt_col+\"_anom\"\n df[anom_col] = df[gt_col] - df[clim_col]\n print \"Elapsed: {}s\".format(time.time() - t)\n\n return df", "def month_lag_distribution(source_df, field=\"month_lag\", path=path.path, nrows=None):\n _log.info(\"Creating features from {}\".format(field))\n prefix = source_df.split(\"_\")[0]\n source_df = \"{}/{}\".format(path, source_df)\n\n _log.info(\"Reading from {}\".format(source_df))\n try:\n df = pd.read_csv(source_df, usecols=[\"card_id\", field], nrows=nrows)\n _log.info(\"Successfully read from {}\".format(source_df))\n except Exception as e:\n _log.exception(e)\n\n _log.info(\"Computing distribution of month lag\")\n func_to_be_applied = [min, max, pd.Series.nunique]\n func_to_be_applied_dummy = [max, np.mean]\n rename_dict = create_rename_dict(prefix, field, func_to_be_applied)\n rename_dict_dummy = create_rename_dict(prefix, \"dummy\", func_to_be_applied_dummy)\n\n df[\"dummy\"] = 1\n df_features = df.groupby(\"card_id\").agg({field:func_to_be_applied}).reset_index()\n df_features = pd.concat([pd.DataFrame(df_features[\"card_id\"]), df_features[field]], axis=1, sort=False)\n\n _log.info(\"Renaming columns: {}\".format(rename_dict))\n df_features.rename(columns=rename_dict, inplace=True)\n\n _log.info(\"Computing time in month between transactions\")\n df_freq = (df.groupby([\"card_id\", field]).agg({\"dummy\": np.sum}).reset_index().groupby(\"card_id\")\n .agg({\"dummy\": func_to_be_applied_dummy}).reset_index())\n df_freq = pd.concat([pd.DataFrame(df_freq[\"card_id\"]), df_freq[\"dummy\"]], axis=1, sort=False)\n df_freq.rename(columns=rename_dict_dummy, inplace=True)\n\n _log.info(\"Creating final df\")\n df_features = df_features.merge(df_freq, how=\"inner\", on=\"card_id\")\n return df_features", "def list_feature_drop(self):\n \n list_to_drop = list()\n list_not_in_df = list()\n \n #-------------------------------------------------------------------------\n # Columns are checked to be into df_invoice_line dataframe\n #-------------------------------------------------------------------------\n for col in self._list_feature_to_drop:\n if col in self.df_invoice_line.columns:\n list_to_drop.append(col)\n else:\n list_not_in_df.append(col)\n \n if 0 == len(list_to_drop):\n self.strprint(\"\\n*** ERROR : no element in list belonging to dataframe!\")\n else:\n if len(self._list_feature_to_drop) != len(list_to_drop):\n self.strprint(\"\\n*** WARNING : followings features do not belong to \\\n dataframe : {}\".format(list_not_in_df))\n else:\n pass\n list_col_keep \\\n = [col for col in self.df_invoice_line.columns \\\n if col not in list_to_drop]\n s\n self.df_invoice_line = self.df_invoice_line[list_col_keep]\n return", "def feature_engineer_ts(self, month=12):\n st_data_dt = self.get_st_data_dt()\n end_data_dt = self.get_end_data_dt()\n date_list = pd.date_range(*(pd.to_datetime([st_data_dt, end_data_dt]) + pd.offsets.MonthEnd()), freq='M').to_list()\n population = self.get_population()\n is_raw_partition = self.get_is_raw_partition()\n# Lag 2 months\n all_data = []\n# join past is_raw columns\n for d in date_list:\n \n population_partition = population[population['ft_data_dt'] == d] \n old_date = d - relativedelta(months=month)\n y = old_date.year\n m = old_date.month\n day = calendar.monthrange(y, m)[1]\n old_date = date(y, m, day)\n old_date = max(old_date, st_data_dt)\n date_list_join = pd.date_range(*(pd.to_datetime([old_date, d]) + pd.offsets.MonthEnd()), freq='M').to_list()\n date_list_join.reverse()\n for index, date_join in enumerate(date_list_join):\n if date_join.strftime(\"%Y-%m-%d\") not in is_raw_partition.keys():\n continue\n \n tmp_is_raw_partition = is_raw_partition[date_join.strftime(\"%Y-%m-%d\")]\n \n rename_col = [c for c in list(tmp_is_raw_partition.columns) if c not in ['idd', 'ft_data_dt']]\n new_col = [c+'_'+str(index+1) for c in rename_col]\n name_dict = dict(list(zip(rename_col, new_col)))\n tmp_is_raw_partition = tmp_is_raw_partition.rename(columns = name_dict)\n population_partition = population_partition.merge(tmp_is_raw_partition.drop(columns=['ft_data_dt']), on=['idd'], how='left')\n all_data.append(population_partition)\n ts_df = pd.concat(all_data)\n threshold_null = len(ts_df.columns) - 4\n ts_df = ts_df[ts_df.isnull().sum(axis=1) < threshold_null]\n \n def sum_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_sum_'+str(duration)+'mth'\n tmp_df = df[col_list].sum(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def mean_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_avg_'+str(duration)+'mth'\n tmp_df = df[col_list].mean(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def std_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_std_'+str(duration)+'mth'\n tmp_df = df[col_list].std(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def med_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_med_'+str(duration)+'mth'\n tmp_df = df[col_list].std(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def min_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_min_'+str(duration)+'mth'\n tmp_df = df[col_list].min(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def max_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_max_'+str(duration)+'mth'\n tmp_df = df[col_list].max(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def q1_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_q1_'+str(duration)+'mth'\n tmp_df = df[col_list].quantile(q=0.25, axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def q3_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_q3_'+str(duration)+'mth'\n tmp_df = df[col_list].quantile(q=0.75, axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def last_ts(self, df, feature):\n ft_name = feature+ '_last'\n tmp_df = df[feature+'_'+str(1)].to_frame(name=ft_name)\n return tmp_df\n \n ts_duration = [1, 3, 6, 9, 12]\n feature_list = self.get_is_raw_col()\n df = ts_df[['idd', 'ft_data_dt']]\n# Time Series Features\n for duration in ts_duration:\n for col in feature_list:\n col_list = [col+'_'+str(i) for i in range(1, duration+1)]\n df = pd.concat([df\\\n , sum_ts(self, ts_df, col_list, col, duration)\\\n , mean_ts(self, ts_df, col_list, col, duration)\\\n , med_ts(self, ts_df, col_list, col, duration)\\\n , q1_ts(self, ts_df, col_list, col, duration)\\\n , q3_ts(self, ts_df, col_list, col, duration)\\\n , min_ts(self, ts_df, col_list, col, duration)\\\n , max_ts(self, ts_df, col_list, col, duration)]\n , axis=1)\n self.set_all_data(df)", "def build_shape_data(self, start=None, end=None):\n # If start and end are None, then set them to be min/max of self.df_demand\n if start is None:\n start = self.df_demand['date'].min()\n if end is None:\n end = self.df_demand['date'].max()\n print(f\"date range for shape data is from {start} to {end}\")\n # Extract part of df_demand that is within start and end\n df_sub = self.df_demand[(self.df_demand['date'] >= start) & (self.df_demand['date'] <= end)]\n assert df_sub['date'].min() >= start\n assert df_sub['date'].max() <= end\n num_days = len(pd.date_range(iso8601.parse_date(start), iso8601.parse_date(end), freq='d'))\n print(f\"number of days is {num_days}\")\n # When finding variance and mean, add in missing days as 0s\n # Obtain the counts for each lat/lng region\n counts = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).size().reset_index(name='counts')\n # Group demand data by lat/lng region and average across other cols\n df = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])[['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']].mean().reset_index()\n df = df.merge(counts, on=['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])\n # print(df.head())\n # Modify averages by multiplying each by count and divide by num_days\n vars = ['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']\n for var in vars:\n df[var] = df[var]*df['counts']/num_days\n # print(df.head())\n # Calculate the variance for prob_scooter_avail\n probVariance = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).apply(lambda x: ((x['prob_scooter_avail'] - (x['prob_scooter_avail'].sum()/num_days))**2).sum()/(num_days-1)).reset_index(name='prob_scooter_avail')\n # print(probVariance.head())\n df['prob_scooter_avail_var'] = probVariance['prob_scooter_avail']\n # Check to see if there are any Nan values\n print(f\"Nan values in df? {df.isnull().values.any()}\")\n # print(df.head())\n # For each var col, create corresponding color columns (log and unlog)\n # Also create the factors list that get passed into self.create_rectangle_lst\n factors = [('avail_count', 'decimal'), ('avail_mins', 'decimal'),\n ('trips', 'decimal'), ('prob_scooter_avail', 'percent'), ('adj_trips', 'decimal')]\n i = 0\n original_len = len(factors)\n while i < original_len:\n name, type = factors[i]\n # print(f\"name={name}, type={type}\")\n # Create color column\n df = self.map_values_to_color(df, name)\n # If type is not percent than create log version\n if type != 'percent':\n df = self.create_log_column(df, name)\n factors.append(('log_'+name, type))\n i += 1\n # Deal with estimated demand and unmet demand\n # Filter out rows where prob_scooter_avail sig diff from 0\n sigDiffIdx = df.apply(lambda x: utils.sig_diff_from_zero(x['prob_scooter_avail'], x['prob_scooter_avail_var']), axis=1)\n # print(sigDiffIdx.head())\n df_sig_diff = df[sigDiffIdx]\n # Calculate estimated demand and unmet demand\n df_sig_diff = self.calculate_demand(df_sig_diff)\n # print(df_sig_diff.head())\n # Create color column and log column for unmet demand\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'unmet_demand')\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'estimated_demand')\n df_sig_diff = self.create_log_column(df_sig_diff, 'unmet_demand')\n factors.extend([('estimated_demand', 'decimal'), ('unmet_demand', 'decimal'), ('log_unmet_demand', 'decimal')])\n # Fill in the colors for the grid cells that aren't significantly different\n df_not_sig_diff = df[~sigDiffIdx]\n # print(df_not_sig_diff.head())\n df = pd.concat([df_sig_diff, df_not_sig_diff])\n # df.to_csv('../../../data_files/20210427_estimatedDemand.csv', index=False)\n # Create Rectangle information\n rectangles = self.create_rectangle_lst(df, factors)\n return rectangles, start, end", "def create_lag_features_with_time_feature(df = None, cols = None, time = None, n = 5, fillna = True):\n # assert\n assert(df is not None and cols is not None)\n # set attributes\n cols_to_rename = cols\n print(cols_to_rename)\n if type(n) == list:\n shift_range = n\n elif type(n) == int:\n shift_range = range(1, n+1)\n else:\n print(\"type of n is flase, set it to default: 5\")\n shift_range = range(1, 6)\n # try to get the new features\n for month_shift in tqdm(shift_range):\n train_shift = df.copy()\n train_shift[time] = train_shift[time] + month_shift\n foo = lambda x: '{}_lag_{}'.format(x, month_shift) if x in cols_to_rename else x\n train_shift = train_shift.rename(columns=foo)\n df = pd.merge(all_data, train_shift, on=index_cols, how='left').fillna(0)\n del train_shift\n gc.collect()\n return df", "def create_feature_names_list(df):\n features = ['date_block_num', 'shop_id', 'item_id', 'Year', 'Month', 'shop_type_1',\n 'shop_type_2', 'shop_city_type', 'shop_city', 'item_category_id',\n 'item_category_main', 'is_category_digital', 'is_category_ps_related', 'item_price_avg',\n 'when_first_sold',\n 'number_of_mondays', 'number_of_saturdays', 'number_of_sundays', 'number_of_days_in_month']\n lag_cols = [x for x in df.columns if 'lag' in x]\n features = features + lag_cols\n\n return features", "def FE_create_time_series_features(dft, ts_column, ts_adds_in=[]):\r\n dtf = copy.deepcopy(dft)\r\n reset_index = False\r\n try:\r\n # ts_column = None assumes that that index is the time series index\r\n reset_index = False\r\n if ts_column is None:\r\n reset_index = True\r\n ts_column = dtf.index.name\r\n dtf = dtf.reset_index()\r\n\r\n ### In some extreme cases, date time vars are not processed yet and hence we must fill missing values here!\r\n null_nums = dtf[ts_column].isnull().sum()\r\n if null_nums > 0:\r\n # missing_flag = True\r\n new_missing_col = ts_column + '_Missing_Flag'\r\n dtf[new_missing_col] = 0\r\n dtf.loc[dtf[ts_column].isnull(),new_missing_col]=1\r\n dtf[ts_column].fillna(method='ffill', inplace=True)\r\n print(' adding %s column due to missing values in data' %new_missing_col)\r\n if dtf[dtf[ts_column].isnull()].shape[0] > 0:\r\n dtf[ts_column].fillna(method='bfill', inplace=True)\r\n\r\n if dtf[ts_column].dtype == float:\r\n dtf[ts_column] = dtf[ts_column].astype(int)\r\n\r\n ### if we have already found that it was a date time var, then leave it as it is. Thats good enough!\r\n items = dtf[ts_column].apply(str).apply(len).values\r\n #### In some extreme cases,\r\n if all(items[0] == item for item in items):\r\n if items[0] == 4:\r\n ### If it is just a year variable alone, you should leave it as just a year!\r\n dtf[ts_column] = pd.to_datetime(dtf[ts_column],format='%Y')\r\n ts_adds = []\r\n else:\r\n ### if it is not a year alone, then convert it into a date time variable\r\n dtf[ts_column] = pd.to_datetime(dtf[ts_column], infer_datetime_format=True)\r\n ### this is where you create the time series features #####\r\n dtf, ts_adds = _create_ts_features(df=dtf, tscol=ts_column)\r\n else:\r\n dtf[ts_column] = pd.to_datetime(dtf[ts_column], infer_datetime_format=True)\r\n ### this is where you create the time series features #####\r\n dtf, ts_adds = _create_ts_features(df=dtf, tscol=ts_column)\r\n if not ts_adds_in:\r\n ts_adds_copy = dtf[ts_adds].select_dtypes(include='number').columns.tolist()\r\n ### drop those columns where all rows are same i.e. zero variance ####\r\n for col in ts_adds_copy:\r\n if dtf[col].std() == 0:\r\n dtf.drop(col, axis=1, inplace=True)\r\n print(' dropping column due to zero variance in %s column' %col)\r\n ts_adds.remove(col)\r\n else:\r\n rem_cols = left_subtract(dtf.columns.tolist(), ts_adds_in)\r\n dtf = dtf[rem_cols+ts_adds_in]\r\n\r\n # If you had reset the index earlier, set it back before returning\r\n # to make it consistent with the dataframe that was sent as input\r\n if reset_index:\r\n dtf = dtf.set_index(ts_column)\r\n elif ts_column in dtf.columns:\r\n dtf.drop(ts_column, axis=1, inplace=True)\r\n else:\r\n pass\r\n except Exception as e:\r\n print(e)\r\n print('Error in Processing %s column for date time features. Continuing...' %ts_column)\r\n return dtf, ts_adds", "def feature_list(user_id: str, session: str, tap_feature: str, task_name: str, window: DataFrame):\n if window.shape[0] == 0:\n return None\n #Add user ID, session, task name\n features = [user_id, session, task_name]\n\n #Add orientation\n orientation = mode(window['Phone_orientation_accel'])\n features.append(orientation)\n\n #Add tap type\n features.append(tap_feature)\n\n lead_file = 'Accelerometer.csv'\n\n time_col = x_columns[lead_file]\n\n before_start = window[window[tap_feature] == 4].index[0]\n during_start = window[window[tap_feature] == 2].index[0]\n after_start = window[window[tap_feature] == 3].index[0] + 1\n after_end = window[window[tap_feature] == 5].index[0]\n\n before = window.loc[before_start : during_start]\n during = window.loc[during_start : after_start]\n after = window.loc[after_start : after_end + 1]\n\n if during.shape[0] < 2:\n # If there were none or one measurements during the tap,\n # add the closest ones\n during = window[during_start - 1 : after_start + 1]\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n # Feature 1: Mean during\n mean_during = mean(during[y])\n\n # Feature 2: SD during\n sd_during = sd(during[y])\n\n # Feature 3: Difference before/after\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n difference_before_after = mean_after - mean_before\n\n # Feature 4: Net change from tap\n net_change_due_to_tap = mean_during - mean_before\n\n # Feature 5: Maximal change from tap\n max_tap = max(during[y])\n max_change = max_tap - mean_before\n\n # Feature 6: Restoration time\n avgDiffs = []\n for j in range(after[y].shape[0]):\n subsequentValues = after[y].iloc[j:]\n subsequentDistances = subsequentValues.map(lambda x: abs(x - mean_before))\n averageDistance = mean(subsequentDistances)\n avgDiffs.append(averageDistance)\n time_of_earliest_restoration = min(avgDiffs)\n restoration_time = time_of_earliest_restoration - during[time_col].iloc[-1]\n\n # Feature 7: Normalized duration\n t_before_center = (before[time_col].iloc[0] + before[time_col].iloc[-1]) / 2 \n t_after_center = (after[time_col].iloc[0] + after[time_col].iloc[-1]) / 2\n normalized_duration = (t_after_center - t_before_center) / (mean_after - mean_before)\n \n # Feature 8: Ndormalized duration max\n t_max_in_tap = during[during[y] == max_tap][time_col].iloc[0]\n normalized_duration_max = (t_after_center - t_max_in_tap) / (mean_after - max_tap)\n\n\n features += [mean_during, sd_during, difference_before_after,\n net_change_due_to_tap, max_change, restoration_time,\n normalized_duration, normalized_duration_max]\n\n if random.choice(range(100))== 0:\n plot_tap('Plots/Project/' + session, before, during, after, time_col)\n \n return features", "def generate_features(df, suffix = '_diff_', step=1, relevant_features=[], ignore_columns=[]):\n # cols = self.get_active_columns(df, ignore_columns)\n cols = relevant_features\n deltas = {}\n for c in cols:\n deltas['%s%s'% (c, suffix)] = subtract_from_prev_val(df, c, step=step)\n df_new = pd.DataFrame(deltas)\n return df_new", "def dataframe_features(df, db):\n def generator():\n for gene_id in df.index:\n yield asinterval(db[gene_id])\n\n return pybedtools.BedTool(generator())", "def _extract_features_for_peak_estimation(self, groups=(), tail=0.4, skip_if_shorter_than=6, **kwargs):\n assert 0 < tail < 1, \"Tail is a fraction, it should be between 0 and 1\"\n assert 0 < skip_if_shorter_than, \"skip_if_shorter_than should be a positive int\"\n\n groups = self.df[self.col_group].unique() if len(groups) == 0 else groups\n features = pd.DataFrame({self.col_group: groups})\n\n for idx, row in features.iterrows():\n group = row[self.col_group]\n df_loc = self.df[self.df[self.col_group] == group]\n\n length = len(df_loc[self.col_t])\n if length < skip_if_shorter_than:\n continue\n\n model_full = HuberRegressor()\n\n x_full = df_loc[self.col_t].to_numpy().reshape((-1, 1))\n y_full = df_loc[self.col_obs].to_numpy()\n model_full.fit(x_full, y_full)\n slope_full = model_full.coef_[0]\n\n tail_len = int(tail * length)\n\n x_tail = df_loc[self.col_t].to_numpy()[-tail_len:].reshape((-1, 1))\n y_tail = df_loc[self.col_obs].to_numpy()[-tail_len:]\n x_head = df_loc[self.col_t].to_numpy()[:-tail_len].reshape((-1, 1))\n y_head = df_loc[self.col_obs].to_numpy()[:-tail_len]\n\n r2_full_score = model_full.score(x_full, y_full)\n r2_head_score = model_full.score(x_head, y_head)\n r2_tail_score = model_full.score(x_tail, y_tail)\n\n model_head = HuberRegressor()\n model_head.fit(x_head, y_head)\n slope_head = model_head.coef_[0]\n\n model_tail = HuberRegressor()\n model_tail.fit(x_tail, y_tail)\n slope_tail = model_tail.coef_[0]\n features.at[idx, \"R2_full\"] = r2_full_score\n features.at[idx, \"R2_head\"] = r2_head_score\n features.at[idx, \"R2_tail\"] = r2_tail_score\n features.at[idx, \"R2_tail_own\"] = model_tail.score(x_tail, y_tail)\n features.at[idx, \"slope_full\"] = slope_full\n features.at[idx, \"slope_head\"] = slope_head\n features.at[idx, \"slope_tail\"] = slope_tail\n\n y_pred_full = model_full.predict(x_full)\n self._statistics[\"linear_r2\"][group] = r2_full_score\n self._statistics[\"linear_rmse\"][group] = np.linalg.norm(np.exp(y_full) - np.exp(y_pred_full))**2\n self._statistics[\"linear_slope\"][group] = slope_full\n\n fraction_below_score = np.mean(model_full.predict(x_tail) > y_tail)\n weights = np.array([1 / (1 + i) ** 2 for i in range(1, tail_len + 1)][::-1])\n weighted_fraction_below_score = np.dot(weights, model_full.predict(x_tail) > y_tail)\n features.at[idx, \"fraction_below\"] = fraction_below_score\n features.at[idx, \"weighted_fraction_below\"] = weighted_fraction_below_score\n\n return features.dropna()", "def calculate_timebase_features(self, X: pd.DataFrame) -> pd.DataFrame:\n X = self._add_lagged_features(X, [1, 3, 7, 14, 21, 365])\n\n X = self._add_rolling(X, 'mean', [5, 50])\n X = self._add_rolling(X, 'min', [5, 50])\n X = self._add_rolling(X, 'max', [5, 50])\n\n return X" ]
[ "0.66726613", "0.5643213", "0.55374706", "0.5524301", "0.5516429", "0.54855764", "0.5438203", "0.538857", "0.53348887", "0.53311694", "0.5320984", "0.5259109", "0.5249251", "0.52357846", "0.52223134", "0.5205125", "0.5173679", "0.5165818", "0.51619375", "0.5140185", "0.5136399", "0.51243925", "0.510697", "0.5103279", "0.5085388", "0.5066197", "0.5062592", "0.50466233", "0.49861452", "0.49712297" ]
0.75834435
0
Ceilings datetime dt to interval num_minutes, then returns the unix timestamp.
def rounded_unix_timestamp(dt, num_minutes=15): nsecs = dt.minute * 60 + dt.second + dt.microsecond * 1e-6 delta = math.ceil(nsecs / (60 * num_minutes)) * (60 * num_minutes) - nsecs return int((dt + timedelta(seconds=delta)).timestamp())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)", "def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000", "def _get_milleseconds(self):\n return int(round(time.time() * 1000))", "def minutes_in(sec):\r\n return int((sec - (hours_in(sec)*3600))//60)", "def calculate_seconds_in_minutes(minutes):\n return int(minutes * 60)", "def to_minutes(delta):\n return int(math.ceil(delta.total_seconds() / 60))", "def get_closest_minute(t):\n ts = dt.datetime.utcfromtimestamp(t/1000)\n s = ts.second\n if s < 30:\n return dt.datetime(ts.year, ts.month, ts.day, ts.hour, ts.minute)\n else:\n return dt.datetime(ts.year, ts.month, ts.day, ts.hour, ts.minute) + dt.timedelta(minutes=1)", "def round_time(dt=None, roundTo=60): # IGNORE:W0621\n\n if dt is None:\n dt = datetime.now()\n\n dt = np.asarray(dt, dtype='datetime64[s]').reshape(-1)\n\n for li in range(len(dt)):\n date = dt[li].astype(object)\n seconds = (date - date.min).seconds\n\n # // is a floor division, not a comment on following line:\n rounding = (seconds + roundTo / 2) // roundTo * roundTo\n\n dt[li] = date + timedelta(0, rounding - seconds, -date.microsecond)\n\n return len(dt) == 1 and dt[0].astype(object) or dt", "def timestamp_floor(ts: int, how: str = 'day', unit: str = 'ms'):\n dt = datetime.fromtimestamp(ts / 1000 if unit == 'ms' else ts,\n tz = timezone.utc)\n if how == 'second':\n new_dt = datetime(year = dt.year, month = dt.month, day = dt.day,\n hour = dt.hour, minute = dt.minute, second = dt.second,\n tzinfo = timezone.utc)\n elif how == 'minute':\n new_dt = datetime(year = dt.year, month = dt.month, day = dt.day,\n hour = dt.hour, minute = dt.minute, tzinfo = timezone.utc)\n elif how == 'hour':\n new_dt = datetime(year = dt.year, month = dt.month, day = dt.day,\n hour = dt.hour, tzinfo = timezone.utc)\n else:\n new_dt = datetime(year = dt.year, month = dt.month, tzinfo = timezone.utc)\n\n timestamp = dt.replace(tzinfo = timezone.utc).timestamp()\n return int(timestamp * 1000 if unit == 'ms' else timestamp)", "def calculate_minutes(time):\n return int(time / 60)", "def timeToMinutes(timestamp):\n if len(timestamp) == 5: \n return int(timestamp[0])*600 + int(timestamp[1])*60 + int(timestamp[3])*10 + int(timestamp[4])\n return None", "def get_minutes(self, datetime):\n return datetime.hour*60.0+datetime.minute+datetime.second/60", "def floor_time(self, ts):\n return datetime.datetime.fromtimestamp(\n int(ts.timestamp()) // self.interval * self.interval\n )", "def multMinuteAlign(ts, min):\n\tintv = secInMinute * min\n\treturn int((ts / intv)) * intv", "def dt_epoch_msecs(value):\n return long(calendar.timegm(value.timetuple())) * 1000", "def unix_time_nanos(dt):\n return timedelta_to_micros(dt - epoch)", "def _floor_to_dt(value: np.datetime64) -> np.datetime64:\n integral = int(value.astype(\"<M8[h]\").astype(\"int64\") /\n 3) # type: ignore\n return np.datetime64(integral * 3, \"h\")", "def _to_minutes(seconds):\n return '%d:%d' % divmod(seconds, 60)", "def get_minutes(video: Video) -> int:\n mins = re.findall(r'PT(\\d+)M', video.duration)\n if mins:\n return int(mins[0])\n return 1000", "def minutes_in_day_to_time(minutes):\n return seconds_in_day_to_time(minutes*60)", "def minutes(self):\n return int((self.end - self.start).total_seconds()) / 60", "def _round(self, x):\n return x - x % self.minutes_per_step", "def get_time_to_end_stream(minutes):\n time_now = datetime.datetime.now()\n now_plus_10 = time_now + datetime.timedelta(minutes=minutes)\n return now_plus_10.strftime('%H:%M')", "def minutes_to_seconds(minutes) -> int:\n return int(minutes) * 60", "def total_minutes(td):\n return total_seconds(td) / 60", "def sct_numericdate(cls, d):\n return (d-cls.SCT_EPOCH).total_seconds() / 60", "def seconds_to_minutes(seconds: int, round: Optional[bool] = True) -> Union[int, float]:\n return int(seconds / 60) if round else seconds / 60", "def clock_helper(total_seconds):\n seconds_in_minute = total_seconds % 60", "def unixTimeMs(dateAndTime):\n dateAndTime = dateAndTime + datetime.timedelta(hours=HOUR_ADJUSTMENT)\n return int((dateAndTime - EPOCH).total_seconds() * 1000.0)", "def datetime2UnixTime(dt):\n\n # UTC unix timestamp\n unix_timestamp = (dt - datetime(1970, 1, 1)).total_seconds()\n\n return unix_timestamp" ]
[ "0.57981074", "0.5748822", "0.5663159", "0.56519955", "0.5603236", "0.5469628", "0.5442284", "0.54418385", "0.5433084", "0.5365743", "0.5359583", "0.53497386", "0.53274846", "0.5236749", "0.5208083", "0.520233", "0.51816577", "0.5176535", "0.51646566", "0.51259786", "0.51006633", "0.5091771", "0.5088361", "0.50492877", "0.5043618", "0.50284106", "0.5025343", "0.5017732", "0.5005033", "0.5000358" ]
0.7375313
0
Return current sample rate in Sa/s
def sample_rate(self): return self.query_float('ENTER Current Sample Rate (Sa/s)')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample_rate(self):\r\n return self.config.sample_rate", "def sample_rate(self):\n return self._sample_rate", "def sample_rate(self):\n return self._sample_rate", "def sample_rate(self):\n return self._sample_rate", "def sample_rate(self, sr=None):\n return self._sample_rate", "def samp_rate(self):\n return self._samp_rate", "def sampling_rate(self):\n return self.track.sampling_rate", "def get_samp_rate(self):\n return _uhd_swig.usrp_sink_get_samp_rate(self)", "def get_sample_rate(self):\n return 1", "def get_samp_rate(self):\n return _uhd_swig.usrp_source_get_samp_rate(self)", "def get_samplerate(self):\n\t\treturn _PM_UPDATE_RATE / self.output_decimation", "def sampling_rate(self):\n with audioread.audio_open(self.path) as f:\n return f.samplerate", "def get_samp_rate(self):\n return _uhd_swig.usrp_sink_sptr_get_samp_rate(self)", "def sample_interval(self):\n\n if self.sample_rate != 0:\n return 1.0 / self.sample_rate\n return 0.0", "def get_samp_rate(self):\n return _uhd_swig.usrp_source_sptr_get_samp_rate(self)", "def samplerate(self):\n return self.sound.samplerate", "def sample_rate(self):\n if self.has_data():\n try:\n return round(\n 1.0\n / np.float64(\n (\n np.median(\n np.diff(self.dataset.coords[\"time\"].to_index())\n / np.timedelta64(1, \"s\")\n )\n )\n ),\n 0,\n )\n except AttributeError:\n self.logger.warning(\n \"Something weird happend with xarray time indexing\"\n )\n\n raise ValueError(\n \"Something weird happend with xarray time indexing\"\n )\n return self.run_metadata.sample_rate", "def rate(self):\n if self._rate:\n return self._rate\n else:\n return self._wave.getframerate()", "def get_channel_sampling_rate(self)->float:\n return self.__sampling_rate", "def input_data_sample_rate(self):\n return self._input_data_sample_rate", "def rate(self):\n return self.__rate", "def rate(self):\n return self._rate", "def get_current_rate(self):\n pass", "def update_rate_hz(self) -> float:\n return self._update_rate_hz", "def get_scan_rate(self):\n raise NotImplementedError", "def read(self):\n beats, interval_ms = self.read_raw()\n if 0 < interval_ms < 2500:\n rate = 60000.0 / interval_ms\n else:\n raise RuntimeError(\"Value out of range or device not connected.\")\n return rate", "def update_rate(self):\n self._rate = (\n (self._received - self._samples[0]) / float(self.sample_size)\n )\n self._samples.append(self._received)", "def data_rate(self):\n return self._data_rate", "def get_samp_rates(self):\n return _uhd_swig.usrp_sink_get_samp_rates(self)", "def bandwidth(self):\n return self.stop_hz - self.start_hz" ]
[ "0.81212515", "0.8000023", "0.8000023", "0.7969299", "0.7864392", "0.7847092", "0.782177", "0.7783444", "0.7727639", "0.76789045", "0.763086", "0.74798954", "0.7479725", "0.7386037", "0.7357198", "0.73316765", "0.7272404", "0.72663385", "0.7222648", "0.70849174", "0.6931208", "0.68862706", "0.68820935", "0.68451726", "0.68282443", "0.6816689", "0.67112553", "0.66999406", "0.66733843", "0.6672461" ]
0.86877906
0
assert unexpected_content has not been written to stdout
def assertStdoutDoesNotContain(self, unexpected_content): if type(unexpected_content) is not types.ListType: unexpected_content = [ unexpected_content ] stdout_message = sys.stdout.getvalue() for the_text in unexpected_content: self.assertNotIn(the_text, stdout_message,('Stdout "%s" contains text "%s"' % (stdout_message, the_text)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_cot_output(self, expected):\n sys.stdout = StringIO.StringIO()\n output = None\n try:\n self.instance.run()\n except (TypeError, ValueError, SyntaxError, LookupError):\n self.fail(traceback.format_exc())\n finally:\n output = sys.stdout.getvalue()\n sys.stdout = sys.__stdout__\n self.maxDiff = None\n self.assertMultiLineEqual(expected.strip(), output.strip())", "def check_stdout(self, expected: str):\n assert self._std_out is not None, f\"You first need to `execute` the program before checking stdout!\"\n self._test.assertEqual(self._std_out.strip(), expected.strip())", "def test_debug_output(self):\n assert output(self.msg) is not None", "def test_stdout(self):\n stdout = StringIO()\n self.patch(sys, 'stdout', stdout)\n\n # Suppress warnings so that if there are any old-style plugins that\n # lore queries for don't confuse the assertion below. See #3070.\n self.patch(warnings, 'warn', lambda *a, **kw: None)\n self.test_buildTeX()\n self.assertEqual(stdout.getvalue(), '')", "def assertOutput(cls, expected, actual):\n if expected != actual:\n raise Exception(\"'\" + expected + \"' != '\" + actual + \"'\")", "def assert_output(self, parser_args, expected_output):\n c = count_nginx_log_frequency(\n parser_args.file,\n parser_args.segment,\n NGINX_ACCESS_LOG_REGEX\n )\n saved_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n print_report(\n c,\n parser_args.segment,\n parser_args.limit,\n parser_args.file\n )\n output = out.getvalue().strip()\n assert output == expected_output\n finally:\n sys.stdout = saved_stdout", "def assertStdoutContains(self, expected_content):\n if type(expected_content) is not types.ListType:\n expected_content = [ expected_content ]\n stdout_message = sys.stdout.getvalue()\n for the_text in expected_content:\n self.assertIn(the_text, stdout_message,('Stdout \"%s\" does not contain text \"%s\"' % (stdout_message, the_text)))", "def test_ignore_capture():\n\n sys.stdout.write('Print to stdout')\n sys.stderr.write('Print to stderr')\n\n assert True", "def test_is_information_written_through_stderr_methods(self):\n\n io = BufferedSystemIO()\n io._stdout = lambda *args, **kwargs: None\n\n try:\n raise IndexError('Invalid index 5')\n except Exception as exc:\n output_formatted_exception(exc, ':my-test-task', io)\n\n self.assertIn('IndexError', io.get_value())\n self.assertIn('Invalid index 5', io.get_value())\n self.assertIn('Retry with \"-rl debug\" switch before failed task to see stacktrace', io.get_value())", "def test_output_interception(self):\n expected_output = 'testing, 1, 2, 3 ..'\n actual_output = capture(['echo', expected_output])\n assert actual_output.strip() == expected_output.strip()", "def test_capture_stdout():\n\n sys.stdout.write('Print to stdout')\n\n assert False", "def test_no_eof(self):", "def test_unknown(self):\n msg = \"*** Unknown syntax: asd\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"asd\")\n st = f.getvalue()\n self.assertEqual(msg, st)", "def assertContent(self, response, expected_response):\n self.assertEqual(list(response.streaming_content)[0], expected_response)", "def test_stdout_pattern(f, result):\n if not os.path.exists(f):\n return\n\n expected = open(f, encoding=\"utf-8\").read()\n\n # curl debug logs are too dependent on the context, so we filter\n # them and not take them into account for testing differences.\n expected = remove_curl_debug_lines(expected)\n expected_lines = expected.split(\"\\n\")\n expected_pattern_lines = [parse_pattern(line) for line in expected_lines]\n\n actual = decode_string(result.stdout)\n actual = remove_curl_debug_lines(actual)\n actual_lines = re.split(r\"\\r?\\n\", actual)\n\n if len(actual_lines) != len(expected_pattern_lines):\n print(\">>> error in stdout / mismatch in number of lines\")\n print(\n f\"actual: {len(actual_lines)} lines\\nexpected: {len(expected_pattern_lines)} lines\"\n )\n print(f\"actual <{actual}>\")\n print(\"# Actual lines\")\n for i, line in enumerate(actual_lines):\n print(\"%2d: %s\" % (i, line))\n print(\"# Expected lines\")\n for i, line in enumerate(expected_lines):\n print(\"%2d: %s\" % (i, line))\n print(\"# Expected Pattern lines\")\n for i, line in enumerate(expected_pattern_lines):\n print(\"%2d: %s\" % (i, line))\n\n sys.exit(1)\n for i in range(len(expected_pattern_lines)):\n if not re.match(expected_pattern_lines[i], actual_lines[i]):\n print(f\">>> error in stdout in line {i+1}\")\n print(f\"actual: <{actual_lines[i]}>\")\n print(\n f\"expected: <{expected_lines[i]}> (translated to regex <{expected_pattern_lines[i]}>)\"\n )\n sys.exit(1)", "def testStdoutAndStderr(self):\n with self.OutputCapturer():\n print('foo')\n print('bar', file=sys.stderr)\n self.AssertOutputContainsLine('foo')\n self.AssertOutputContainsLine('bar', check_stdout=False, check_stderr=True)", "def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"EOF\")\n out = f.getvalue()\n self.assertTrue(len(out) == 1)\n self.assertEqual(\"\\n\", out)\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"EOF fake\")\n msj = f.getvalue().strip()\n self.assertFalse(len(msj) == 1)\n self.assertEqual(\"\", msj)", "def _verify_export_failure(self, expectedText):\r\n resp = self.client.get(self.url, HTTP_ACCEPT='application/x-tgz')\r\n self.assertEquals(resp.status_code, 200)\r\n self.assertIsNone(resp.get('Content-Disposition'))\r\n self.assertContains(resp, 'Unable to create xml for module')\r\n self.assertContains(resp, expectedText)", "def compare_output(self, input, output, expected):\n if type(input) == UnicodeType:\n input = input.encode('raw_unicode_escape')\n if type(output) == UnicodeType:\n output = output.encode('raw_unicode_escape')\n if type(expected) == UnicodeType:\n expected = expected.encode('raw_unicode_escape')\n # Remove \"generated on\" lines.\n output = self.remove_lines(output, ('generated on --',))\n expected = self.remove_lines(expected, ('generated on --',))\n try:\n self.assertEquals('\\n' + output, '\\n' + expected)\n except AssertionError:\n print >>sys.stderr, '\\n%s\\ninput:' % (self,)\n print >>sys.stderr, input\n print >>sys.stderr, '-: expected\\n+: output'\n print >>sys.stderr, ''.join(self.compare(expected.splitlines(1),\n output.splitlines(1)))\n raise", "def test_print_mimic_no_newlines(self):\n d = self.module.create_mimic_dict(\"imdev.txt\")\n buffer = StringIO()\n with redirect_stdout(buffer):\n self.module.print_mimic_random(d, 200)\n output = buffer.getvalue()\n self.assertNotIn(\n '\\n', output,\n \"There should not be any newline (\\\\n) characters in output\"\n )", "def test_do_not_need_alternate(self):\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-fail.xml'\n ))\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-success.xml'\n ))\n actual = self._analyze_make_output()\n self.assertEqual(1, actual)\n self.assertIn('E999 lint error from txt-file.', self.errors[0])", "def nostdout():\n f = io.StringIO()\n with redirect_stdout(f):\n try:\n yield\n except Exception as err:\n raise err", "def test_handle_print_rich_exception(self):\n\n with io.StringIO() as buf:\n # Capture stdout logs (rich logs to stdout)\n with contextlib.redirect_stdout(buf):\n _print_rich_exception(Exception(\"boom!\"))\n # Capture the stdout output\n captured_output = buf.getvalue()\n\n assert \"Exception:\" in captured_output\n assert \"boom!\" in captured_output", "def test_normal_goes_normal(self):\n eq_(self.msg, output(self.msg,\"OUTPUT\"))", "def expect_output(self, file, parse_json=False):\n contents = self._data_file(file)\n patcher = mock.patch('sys.stdout', new_callable=StringIO)\n output = patcher.start()\n yield\n patcher.stop()\n if parse_json:\n self.assertEqual(json.loads(output.getvalue()),\n json.loads(contents))\n else:\n self.assertEqual(output.getvalue().split('\\n'), contents.split('\\n'))", "def test_capture_both():\n\n sys.stdout.write('Print to stdout')\n sys.stderr.write('Print to stderr')\n\n assert False", "def test_is_not_google_file(self):\r\n bad_file = StringIO.StringIO()\r\n bad_file.write('failing tests please')", "def test_add_unexpected_success(self):\n self.protocol.addUnexpectedSuccess(self.test)\n self.assertEqual(\n self.io.getvalue(), compat._b(\"uxsuccess: %s\\n\" % self.test.id()))", "def test_output_invalid(self):\n assert (\n self.route.output_invalid(hug_core.output_format.json).route[\"output_invalid\"]\n == hug_core.output_format.json\n )", "def out_test(self, func, arg, expect):\n std_out = StringIO()\n sys.stdout = std_out\n func(arg)\n output = std_out.getvalue()\n self.assertEqual(output, expect + '\\n')\n return output" ]
[ "0.6872373", "0.65048116", "0.6423676", "0.6372315", "0.63059235", "0.6282092", "0.62726283", "0.62563837", "0.61430126", "0.6134692", "0.61007786", "0.60741216", "0.6065867", "0.60580695", "0.6008732", "0.597988", "0.58616245", "0.58479875", "0.5832719", "0.5825116", "0.5822783", "0.58165854", "0.5811914", "0.5793671", "0.578525", "0.57807565", "0.5743345", "0.5714698", "0.57134557", "0.5711795" ]
0.69911766
0
Render the image represented by (rgbobj) at dst_x, dst_y in the offscreen pixmap.
def render_image(self, rgbobj, dst_x, dst_y): self.logger.debug("redraw pixmap=%s" % (self.pixmap)) if self.pixmap is None: return self.logger.debug("drawing to pixmap") # Prepare array for rendering arr = rgbobj.get_array(self.rgb_order, dtype=np.uint8) (height, width) = arr.shape[:2] return self._render_offscreen(self.pixmap, arr, dst_x, dst_y, width, height)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self):\r\n self.screen.blit(self.image, self.image.get_rect())", "def draw(self, surface):\r\n surface.blit(self.image, self.rect)", "def draw(self):\n self.screen.blit(self.image, self.rect)", "def draw(self, surface):\n surface.blit(self.image, self.rect)", "def draw(self, surface):\n surface.blit(self.image, self.rect)", "def paint(self, game_screen: pygame.Surface) -> None:\r\n game_screen.blit(self._img, (self._x - self._l_w / 2, self._y - self._l_w / 2,\r\n self._l_w, self._l_w))", "def draw(self, screen):\n\n if self.exist:\n screen.blit(self._img, self._rect)", "def blit(self, screen):\r\n self.image.blit(screen, (self.rect.x, self.rect.y))\r\n pygame.display.update()", "def blitme(self):\r\n #draw the image to the screen at the position specifid by self.rect.\r\n self.screen.blit(self.image,self.rect)", "def blit(self):\n self.screen.blit(self.image, self.rect)", "def draw(self):\n if self.dirty or (self.image is None):\n self._render()\n self.screen.blit(self.image, self.rect)", "def draw(self, screen):\n pg.draw.rect(screen, self.bg_color, self.rect)\n\n for y, surf in enumerate(self.images):\n # Don't blit below the rect area.\n if y * self.font_height + self.font_height > self.rect.h:\n break\n screen.blit(surf, (self.rect.x, self.rect.y+y*self.font_height))", "def draw(self, surface):\n\n\t\tsurface.blit(self.image, self.rect.topleft)", "def draw(self):\n self.game.screen.blit(self.image, self.game.off(self.pos))", "def blitme(self):\n self.screen.blit(self.image, self.rect)\n # print('y = ' + str(self.rect.centery))\n # print('x = ' + str(self.rect.centerx))", "def blitme(self):\r\n self.screen.blit(self.image, self.rect)", "def blitme(self):\r\n self.screen.blit(self.image, self.rect)", "def blitme(self):\r\n self.screen.blit(self.image, self.rect)", "def blitme(self):\r\n self.screen.blit(self.image, self.rect)", "def draw(self, frame):\n xpos = OFS + self.x * TILE_SIZE\n ypos = OFS + self.y * TILE_SIZE\n frame[ypos:ypos+TILE_SIZE, xpos:xpos+TILE_SIZE] = self.image", "def draw(self, surface):\r\n if self.visible:\r\n surface.blit(self.image, (self.x, self.y))", "def render_image(self,\n frame=None,\n factor=4,\n antialias=True,\n trim=False,\n transparent=False):\n if frame is not None:\n self.frame = frame\n params = dict(\n factor=factor,\n antialias=antialias,\n trim=trim,\n transparent=transparent)\n self._remote_call('_exportImage', target='Widget', kwargs=params)", "def blitme(self):\n self.screen.blit(self.image, self.rect)", "def blitme(self):\n self.screen.blit(self.image, self.rect)", "def blitme(self):\n self.screen.blit(self.image, self.rect)", "def blitme(self):\n self.screen.blit(self.image, self.rect)", "def blitme(self):\n self.screen.blit(self.image, self.rect)", "def blitme(self):\n self.screen.blit(self.image, self.rect)", "def blitme(self):\n self.screen.blit(self.image, self.rect)", "def blitme(self):\n self.screen.blit(self.image, self.rect)" ]
[ "0.61686844", "0.5998365", "0.5990171", "0.59325355", "0.59325355", "0.5924451", "0.5898981", "0.5876628", "0.58228207", "0.5799751", "0.5746103", "0.5726643", "0.5718437", "0.56835777", "0.565392", "0.5641117", "0.5641117", "0.5641117", "0.5641117", "0.56196475", "0.5606678", "0.5602619", "0.55967903", "0.55967903", "0.55967903", "0.55967903", "0.55967903", "0.55967903", "0.55967903", "0.55967903" ]
0.8753144
0
Called when a mouse button is pressed in the widget. Adjust method signature as appropriate for callback.
def button_press_event(self, widget, event): x, y = event.x, event.y # x, y = coordinates where the button was pressed self.last_win_x, self.last_win_y = x, y button = 0 # Prepare a button mask with bits set as follows: # left button: 0x1 # middle button: 0x2 # right button: 0x4 # Others can be added as appropriate self.logger.debug("button down event at %dx%d, button=%x" % (x, y, button)) data_x, data_y = self.check_cursor_location() return self.make_ui_callback('button-press', button, data_x, data_y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_mouse_press(self, x, y, button):\n\n pass", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def handle_mouse_press(self, event):", "def mouse_press_event(self, x: int, y: int, button: int):\n pass", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def _press(self, event):", "def ev_mousebuttonup(self, event: MouseButtonUp) -> None:", "def on_mouse_release(self, x, y, button):\n pass", "def eventHandler(self, event: pygame.event):\n # change selected color if this button's rectangle was clicked\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n if self.rect.collidepoint(event.pos): # is mouse over button\n self.image = self._images[ButtonImages.CLICKING_IMAGE.value]\n self.beingClicked = True\n for func, *args in self.functionsToInvokeWhenClicked:\n func(*args)\n elif event.type == pygame.MOUSEBUTTONUP and self.beingClicked:\n if event.button == 1:\n self.beingClicked = False\n self.image = self._images[ButtonImages.DEFAULT_IMAGE.value]", "def press(self):\n self.clicked = True\n if self.command:\n self.command(self.name)", "def ev_mousebuttonup(self, event: tcod.event.MouseButtonUp) -> T | None:", "def bind(self):\n self.canvas.bind(\"<ButtonPress-1>\", self.click)", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def emitPressEvent(self, clickLocation, button, currentKbKey, items):\n # emit the mousePressEvent signal\n self.mousePress.emit(self, clickLocation, button, currentKbKey, items)", "def on_mouse_up(self, pos, mouse_button):\n for item in button.Button.all_buttons:\n if item.collidepoint(pos):\n self.buttons_clicked.append((item, mouse_button))\n item.on_click(mouse_button)", "def HandlePress(self, event: tkEvent):\n pass", "def HandButton(self, event):\n pass", "def input(self, event: pygame.event) -> None:\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n self.user_clicked = True", "def button_press_cb(self, source, event):\n\n if event.button == MOUSE_BUTTON_RIGHT:\n pass\n return True\n elif event.button == MOUSE_BUTTON_MIDDLE:\n self.emit('begin-move')\n return True", "def handle_mouse_click(self, button: Button) -> None:\n if button.name == 'BACK':\n self._clear_all_input()\n self.current_page -= 1\n self._focused_button = None\n if self.current_page == len(self.pages) - 2:\n self.current_page -= 1\n elif button.name == 'Show Graph':\n self._plot_graph()\n elif button.name == 'Multiple Regression':\n self._selection.handle_selection(self.current_page, button.name)\n self.current_page += 2\n self._update_ghg_coefs()\n elif button.tag == 'normal' and self.current_page < len(self.pages) - 2:\n self._selection.handle_selection(self.current_page, button.name)\n self.current_page += 1\n elif isinstance(button, InputButton):\n self._focused_button = button", "def on_press(self):\n self.pressed = True", "def on_press(self):\n self.pressed = True", "def button_release_event(self, widget, event):\n x, y = event.x, event.y\n\n # x, y = coordinates where the button was released\n self.last_win_x, self.last_win_y = x, y\n\n button = 0\n # prepare button mask as in button_press_event()\n\n data_x, data_y = self.check_cursor_location()\n\n return self.make_ui_callback('button-release', button, data_x, data_y)", "def mousePressEvent(self, mouse_event):\r\n return", "def __on_click(self, evt):\n if evt.button() == Qt.LeftButton:\n return self._on_left_click(evt)\n if evt.button() == Qt.RightButton:\n return self._on_right_click(evt)", "def _press(self, event):\n # make the drawn box/line visible get the click-coordinates,\n # button, ...\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if ((self._active_handle is None or not self._interactive) and\n self._allow_creation):\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n if (self._active_handle is None and not self.ignore_event_outside and\n self._allow_creation):\n x = event.xdata\n y = event.ydata\n self._visible = False\n self.extents = x, x, y, y\n self._visible = True\n else:\n self.set_visible(True)\n\n self._extents_on_press = self.extents\n self._rotation_on_press = self._rotation\n self._set_aspect_ratio_correction()\n\n return False", "def button_press_cb(self, darea, event):\n x, y = event.x, event.y\n self.draw_pointer(self.cr, x, y)\n self.queue_draw()\n self.oldx, self.oldy = x, y\n rel_x, rel_y = self.absolute_to_relative(x, y)\n self.emit('dnd-value', rel_x, rel_y)\n self.emit('start-dnd')\n return True", "def m_press(self, button: MButton):\n pass", "def ev_MOUSEDOWN(self, event):" ]
[ "0.8186688", "0.7803107", "0.7685904", "0.7667033", "0.7550329", "0.75264764", "0.74540734", "0.74537903", "0.7434162", "0.71306", "0.71141076", "0.7086629", "0.70717835", "0.70475805", "0.70216006", "0.70136315", "0.69730556", "0.69179136", "0.69159424", "0.69106615", "0.69092005", "0.69086593", "0.69086593", "0.6883387", "0.6881508", "0.687479", "0.68471086", "0.6836545", "0.6808178", "0.67737955" ]
0.786055
1
Called when a drop (drag/drop) event happens in the widget. Adjust method signature as appropriate for callback.
def drop_event(self, widget, event): # make a call back with a list of URLs that were dropped #self.logger.debug("dropped filename(s): %s" % (str(paths))) #self.make_ui_callback('drag-drop', paths) raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dropEvent(self, de):\n # dragging a track\n if hasattr(Globals.dragObject, \"trackFrame\"):\n de.accept()\n trackFrame = Globals.dragObject.trackFrame\n oldParent = trackFrame.parentWidget()\n if oldParent:\n args = (trackFrame, self, oldParent.parentWidget())\n else:\n args = (trackFrame, self, None)\n self.emit(PYSIGNAL('dropped'), (args))\n # not yet used\n #Animation.animate(trackFrame, self, doneFunc=self.slotAnimationDone)", "def on_drop(self):\n print(\"You have dropped\", self.name)", "def dropEvent(self, event):\n\n # Get the id color to drop the items into\n drop_id_color = self.itemAt(event.pos())\n drop_id_color = self.invisibleRootItem() \\\n if drop_id_color is None else drop_id_color\n\n # If the drop position is not valid we pass\n if drop_id_color is None:\n event.ignore()\n return\n\n # If the drop position is not an id color item we pass\n if drop_id_color.data(0, QtCore.Qt.UserRole) != \"color\":\n event.ignore()\n return\n\n # Get the drop items - the selected tree items\n drop_items = [x for x in self.selectedItems()\n if x.data(0, QtCore.Qt.UserRole) == \"object\"] or None\n\n # If not items selected we pass\n if drop_items is None:\n event.ignore()\n return\n\n # Drop the items into the new tree parent\n self._drop_tree_items(drop_items, drop_id_color)\n\n event.accept()\n\n return None", "def dropEvent(self, event):\n if event.mimeData().hasImage:\n event.setDropAction(Qt.CopyAction)\n file_path = event.mimeData().urls()[0].toLocalFile()\n self.set_image(file_path)\n self.folderLocation.setText(file_path)\n \n event.accept()\n else:\n event.ignore()", "def dropEvent(self, event):\r\n source = event.mimeData()\r\n if source.hasUrls():\r\n files = mimedata2url(source)\r\n if files:\r\n files = [\"r'%s'\" % path for path in files]\r\n if len(files) == 1:\r\n text = files[0]\r\n else:\r\n text = \"[\" + \", \".join(files) + \"]\"\r\n self.shell.insert_text(text)\r\n elif source.hasText():\r\n lines = unicode(source.text())\r\n self.shell.set_cursor_position('eof')\r\n self.shell.execute_lines(lines)\r\n event.acceptProposedAction()", "def dropEvent(self, event):\r\n source = event.mimeData()\r\n if source.hasUrls():\r\n files = mimedata2url(source)\r\n if files:\r\n self.plugin.load(files)\r\n elif source.hasText():\r\n editor = self.currentWidget()\r\n if editor is not None:\r\n editor.insert_text( source.text() )\r\n event.acceptProposedAction()", "def _on_drop(self, event):\n data = event.mimeData().data(constants.QGRAPH_DD_MIME_TYPE)\n if not data.isNull():\n data_stream = QDataStream(data, QIODevice.ReadOnly)\n parsed = json.loads(data_stream.readString().decode('utf8'))\n\n # Refer to `mime.py` for docs about format\n version = parsed['version']\n if version not in (1, 2):\n raise ValueError(\"Unsupported version of QmxGraph MIME data: {}\".format(version))\n\n x = event.pos().x()\n y = event.pos().y()\n\n if version in (1, 2):\n vertices = parsed.get('vertices', [])\n scale = self.api.get_zoom_scale()\n for v in vertices:\n # place vertices with an offset so their center falls\n # in the event point.\n vertex_x = x + (v['dx'] - v['width'] * 0.5) * scale\n vertex_y = y + (v['dy'] - v['height'] * 0.5) * scale\n self.api.insert_vertex(\n x=vertex_x,\n y=vertex_y,\n width=v['width'],\n height=v['height'],\n label=v['label'],\n style=v.get('style', None),\n tags=v.get('tags', {}),\n )\n\n if version in (2,):\n decorations = parsed.get('decorations', [])\n for v in decorations:\n self.api.insert_decoration(\n x=x,\n y=y,\n width=v['width'],\n height=v['height'],\n label=v['label'],\n style=v.get('style', None),\n tags=v.get('tags', {}),\n )\n\n event.acceptProposedAction()\n else:\n event.ignore()", "def dropEvent(self, event: QtGui.QDropEvent) -> None:\n if event.mimeData().hasImage:\n event.setDropAction(Qt.CopyAction)\n self.image = event.mimeData().urls()[0].toLocalFile()\n x = self.width()\n y = self.height()\n im = QPixmap(self.image).scaled(x, y) # , aspectRatioMode=Qt.KeepAspectRatio)\n im.save(os.getcwd() + \"/tmp.jpg\")\n self.image = (os.getcwd() + \"/tmp.jpg\")\n self.setPixmap(im)\n # self.setPixmap(QPixmap(self.image))\n self.setStyleSheet(\"\")\n event.accept()\n else:\n event.ignore()", "def addDropListener(self, callback: 'callable'):\n self.getView().addDropListener(callback)", "def dropEvent(self, QDropEvent):\n srcItems = self.selectedItems()\n dstInd = (self.indexAt(QDropEvent.pos()).row() + 1)\n kbMod = QDropEvent.keyboardModifiers()\n #-- Create New Items --#\n for n, srcItem in enumerate(srcItems):\n itemDict = self.treeParent.getItemDict(srcItem)\n newItem = self.treeParent.on_addVar(index=(dstInd + n))\n self.treeParent.setItem(newItem, **itemDict)\n #-- Remove Items --#\n if not kbMod == QtCore.Qt.ControlModifier:\n for srcItem in srcItems:\n self.takeTopLevelItem(self.indexOfTopLevelItem(srcItem))\n self.treeParent.reindexVar()", "def dropMimeData(self, p_int, QMimeData, Qt_DropAction): # real signature unknown; restored from __doc__\r\n return False", "def on_item_dropped(self, url):\n print 'Weld.on_item_dropped:', url\n #make sure all struct are present\n if not(self.project and self.project.level):\n print >> sys.stderr, 'it\\'s too early to drop stuff: '\\\n 'create a project and a level first !'\n return\n\n #retrieve data if it comes from weld\n if url in self.resMan:\n props = self.resMan.file_props(url)\n if props is None:\n print >> sys.stderr, curr_f(), ': url(\\'%s\\') in self.resMan '\\\n 'but can\\'t retrieve props.' % (url)\n return\n props = self.project.level.resMan.add_resource(self.resMan.base_path,\n props)\n url = props['url']\n if props == {} or url not in self.project.level.resMan:\n print >> sys.stderr, curr_f(), 'could not retrieve file and/or '\\\n 'dependencies for props:', pp(props)\n return\n\n #instanciate it\n if url in self.project.level.resMan:\n props = self.project.level.resMan.file_props(url)\n dtp = self.project.level.qsteelwidget.dropTargetPosition(Config.instance().drop_target_vec)\n props['position'] = dtp\n props['rotation'] = self.project.level.qsteelwidget.dropTargetRotation()\n if props['resource_type'] == 'meshes':\n props['meshName'] = props['name']\n self.project.level.instanciate(props)\n s = 'dropped agent \\'%s\\' with id %i' % (props['name'], props['agentId'])\n print s\n Ui.instance().show_status(s)\n else:\n Ui.instance().show_status('can only drop meshes so far')", "def DoDrop(self, docks, panes, target, pt, offset=wx.Point(0, 0)):\r\n\r\n if target.IsToolbar():\r\n return self.DoDropToolbar(docks, panes, target, pt, offset)\r\n elif target.IsFloating():\r\n return self.DoDropFloatingPane(docks, panes, target, pt)\r\n else:\r\n return self.DoDropNonFloatingPane(docks, panes, target, pt)", "def drag_and_drop_attempt():\n\n class InitialState(BaseState):\n \"\"\"\n Initial state for the SimpleGUI.\n \"\"\"\n\n def _on_enter(self, gui):\n \"\"\"\n Construct the buttons upon entering the state.\n\n :return:\n \"\"\"\n print(\"In initial state.\")\n\n '''Create drag and drop window'''\n gui.entry_sv = tk.StringVar()\n gui.drop_box_list = []\n gui.drop_box_items = tk.Listbox(master=gui.root, listvariable=gui.drop_box_list)\n gui.drop_box_text = tk.StringVar()\n gui.drop_box_text.set(\"Drop images here\")\n gui.entry = tk.Entry(gui.root, textvar=gui.drop_box_text, justify='center')\n gui.entry.config(font=(\"Courier\", 44))\n gui.entry.place(x = 200, y=200, width=800, height=800)\n #gui.entry.pack()\n gui.entry.drop_target_register(DND_FILES)\n gui.entry.dnd_bind('<<Drop>>', self.drop(gui))\n gui.update()\n\n def _on_exit(self, gui):\n \"\"\"\n Return the next state.\n\n :param gui:\n :return:\n \"\"\"\n gui.update()\n return WaitForDrop()\n\n def drop(self, gui):\n def _drop(event):\n files = root.tk.splitlist(event.data)\n gui.entry_sv.set(files)\n return _drop\n\n class WaitForDrop(BaseState):\n \"\"\"\n State for having buttons on.\n \"\"\"\n\n def _on_enter(self, gui):\n \"\"\"\n\n :param gui:\n :return:\n \"\"\"\n print(\"In wait for drop state.\")\n\n def _state_main(self, gui):\n \"\"\"\n The main code for the ButtonsOn state.\n\n :param gui:\n :return:\n \"\"\"\n gui.entry.wait_variable(gui.entry_sv)\n\n '''Clean string'''\n files = literal_eval(gui.entry_sv.get())\n\n '''Remove previous images'''\n if hasattr(gui, \"panel\"):\n gui.panel.destroy()\n\n '''Load each image'''\n for file_name in files:\n file_name = file_name.replace(\"{\", \"\").replace(\"}\", \"\")\n # image = tk.PhotoImage(file=file_name)\n if \".CR2\" in file_name:\n '''Rawpy implementation'''\n file_image = rawpy.imread(file_name)\n file_image = file_image.postprocess()\n '''Rawkit implementation'''\n '''file_image = Raw(file_name)\n file_image = np.array(file_image.to_buffer())'''\n '''OpenCV implementation'''\n '''file_image = cv2.imread(file_name)'''\n else:\n file_image = Image.open(file_name)\n '''image = file_image.resize((500, 500), Image.ANTIALIAS)\n image = ImageTk.PhotoImage(image)\n gui.panel = tk.Label(gui.root, image=image)\n gui.panel.image = image\n gui.panel.pack()'''\n # panel.grid(row=2)\n\n image_data = np.array(file_image)\n image_data = cv2.cvtColor(image_data, cv2.COLOR_RGB2GRAY)\n '''print(image_data.shape)\n print(image_data)\n print(len(image_data))\n print(len(image_data[0]))'''\n returned_image = Image.fromarray(image_data)\n '''cv2.imshow(\"Gray\", image_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Gray\")'''\n\n '''enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(file_image))\n enhanced_image = enhanced_contrast.enhance(255)\n enhanced_data = np.array(enhanced_image)\n plot_functions.imshow(enhanced_image)\n plot_functions.show()'''\n\n # color_space = cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV)\n # print(color_space)\n \n '''Create mask for white-ish pixels'''\n '''lower_background = np.array([150, 150, 150])\n upper_background = np.array([255, 255, 255])\n print(image_data)\n white_mask = cv2.inRange(image_data, lower_background, upper_background)\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n white_mask = white_mask / 255'''\n\n '''Create mask for black-ish pixels'''\n '''lower_background = np.array([0, 0, 0])\n upper_background = np.array([25, 25, 25])\n black_mask = cv2.inRange(image_data, lower_background, upper_background)\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n black_mask = black_mask / 255'''\n\n '''Add masks together'''\n '''background_mask = white_mask\n # Ensure no value is above 1\n background_mask = np.clip(background_mask, 0, 1)'''\n \n copied_image_data = np.asarray(returned_image).copy()\n # background_mask = np.logical_not(background_mask)\n '''for row_index, [mask_row, image_row] in enumerate(zip(background_mask, copied_image_data)):\n # place black pixel on corresponding masked pixels\n # copied_image_data[row_index] = np.array([image_row[pixel] * int(mask_row[pixel]) for pixel in range(len(mask_row))])\n # make pixel fully white on corresponding masked pixels\n copied_image_data[row_index] = np.array([np.array([255, 255, 255]) if int(mask_row[pixel]) else image_row[pixel] for pixel in range(len(mask_row))])'''\n\n '''Turn removed pixels red'''\n '''mask_image = Image.fromarray(copied_image_data)\n plot_functions.imshow(mask_image)\n plot_functions.show()'''\n trapezoid_data = copied_image_data.copy()\n\n enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(trapezoid_data))\n enhanced_image = enhanced_contrast.enhance(255)\n trapezoid_data = np.array(enhanced_image)\n\n '''Detect lines'''\n edges = cv2.Canny(trapezoid_data, 75, 150)\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, maxLineGap=1000)\n # print(lines)\n for line in lines:\n x1, y1, x2, y2 = line[0]\n if y1 == y2:\n cv2.line(copied_image_data, (x1, y1), (x2, y2), (255, 255, 255), 1)\n\n '''Trapezoid attempt'''\n\n # filters image bilaterally and displays it\n bilatImg = cv2.bilateralFilter(trapezoid_data, 5, 175, 175)\n\n # finds edges of bilaterally filtered image and displays it\n edgeImg = cv2.Canny(bilatImg, 75, 200)\n\n # gets contours (outlines) for shapes and sorts from largest area to smallest area\n contours, hierarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n\n # drawing red contours on the image\n for con in contours:\n cv2.drawContours(trapezoid_data, con, -1, (255, 255, 255), 3)\n\n '''Detect corners'''\n dst = cv2.cornerHarris(edges, 30, 31, 0.001)\n dst = cv2.dilate(dst, None)\n ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)\n dst = np.uint8(dst)\n\n # find centroids\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n # define the criteria to stop and refine the corners\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,\n 0.001)\n corners = cv2.cornerSubPix(edges, np.float32(centroids), (5, 5),\n (-1, -1), criteria)\n\n good_corners = []\n for corner in corners:\n if (corner[1] < 1000) & (corner[1] > 650) & (corner[0] > 250) & (corner[0] < 2250):\n good_corners.append(corner)\n cv2.circle(edges, (corner[0], corner[1]), 10, (255, 255, 255))\n\n print(good_corners)\n if len(good_corners) >= 3:\n corner_combos = itertools.combinations(good_corners, 3)\n elif len(good_corners) > 1:\n corner_combos = itertools.combinations(good_corners, 2)\n\n best_corner_combo = None\n best_coef = np.inf\n for corner_combo in corner_combos:\n regression = LinearRegression().fit(np.array([corner[0] for corner in corner_combo]).reshape(-1, 1),\n np.array([corner[1] for corner in corner_combo]))\n if np.abs(regression.coef_) < best_coef:\n best_coef = np.abs(regression.coef_)\n best_corner_combo = np.array([corner[1] for corner in corner_combo])\n\n y_edge = int(round(np.mean(best_corner_combo)))\n edges = edges[y_edge:3000, 200:2200]\n copied_image_data = copied_image_data[y_edge:2500, 200:2200]\n trapezoid_data = trapezoid_data[y_edge:2500, 200:2200]\n\n # and double-checking the outcome\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.imshow(\"Contours check\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Contours check\")\n\n # find the perimeter of the first closed contour\n perim = cv2.arcLength(contours[0], True)\n # setting the precision\n epsilon = 0.02 * perim\n # approximating the contour with a polygon\n approxCorners = cv2.approxPolyDP(contours[0], epsilon, True)\n # check how many vertices has the approximate polygon\n approxCornersNumber = len(approxCorners)\n\n for corners in approxCorners:\n cv2.circle(trapezoid_data, (corners[0], corners[1]), radius=10, color=(255, 255, 255), thickness=-1)\n cv2.imshow(\"Vertex position\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Vertex position\")\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n def _on_exit(self, gui):\n if gui.program_running:\n gui.update()\n return WaitForDrop()\n else:\n return None\n\n class DragAndDropGUI:\n \"\"\"\n Object for a simple gui.\n \"\"\"\n\n def __init__(self, root):\n \"\"\"\n Initializing the SimpleGUI object.\n \"\"\"\n self.root = root\n w, h = root.winfo_screenwidth(), self.root.winfo_screenheight()\n self.root.geometry(\"%dx%d+0+0\" % (w, h))\n self.root.protocol(\"WM_DELETE_WINDOW\", self.end_program)\n self.program_running = True\n\n def update(self):\n \"\"\"\n Update the GUI.\n\n :return:\n \"\"\"\n self.root.update_idletasks()\n self.root.update()\n return self.root\n\n def end_program(self):\n \"\"\"\n Ends the program.\n\n :return:\n \"\"\"\n if self.entry_sv.get() != \" \":\n self.entry_sv.set(\" \")\n else:\n self.entry_sv.set(\"!\")\n self.root.destroy()\n self.program_running = False\n\n '''Initialize and run GUI object'''\n root = tkinterdnd2.Tk()\n # Maximize window while maintaining title bar\n gui = DragAndDropGUI(root)\n state_machine = StateMachine(initial_state=InitialState())\n state_machine.run(gui)", "def dropEvent(self, e: QtGui.QDropEvent):\n src = e.source()\n if src is not self:\n for item in src.selectedItems():\n clone = item.clone()\n clone.setFlags(clone.flags() | Qt.ItemIsEditable)\n self.addTopLevelItem(clone)\n super().dropEvent(e) # Call the original function", "def drop(self, event):\n self.config(cursor='arrow')", "def player_drop(self, item):\n dropped = self.drop(item)\n if dropped:\n self.handler.message_box.add_msg(\"You drop the {}!\".format(dropped),\n data.COLOURS['player_item_text'])", "def mouseReleaseEvent (self, event):\n if self.itemMoved:\n self.parentWidget.DataChanged.emit()\n self.itemMoved = False; \n super(DiagramItem, self).mouseReleaseEvent(event)", "def mouseMoveEvent(self, e):\n if e.buttons() != Qt.LeftButton:\n return\n\n mimeData = QtCore.QMimeData()\n mimeData.setData(\n app.NODE_MIMETYPE,\n QtCore.QByteArray(bytes('data string', 'utf-8')),\n )\n\n drag = QtGui.QDrag(self)\n drag.setMimeData(mimeData)\n drag.setHotSpot(e.pos() - self.rect().topLeft())\n \n dropAction = drag.exec_(Qt.MoveAction)", "def drop(self):\n init_x = self.x\n init_y = self.y\n init_z = self.z\n drop_z = self.drop_height\n \n #drop to given height\n self.move_to(init_x, init_y, drop_z)\n \n #open gripper\n self.gripper.command_position(100)\n \n #return to initial position\n self.move_to(init_x, init_y, init_z)", "def dragEnterEvent(self, event):", "def drag_and_drop(self, droppable):\n self.scroll_to()\n ActionChains(self.driver).drag_and_drop(\n self._element,\n droppable._element,\n ).perform()", "def slider_dragged(self):\n pass", "def SetCallbackFunc( self, dropCallbacFunc=None ) :\n \n # Create a dropFiles event association for this control.\n # [ SetDropTarget ] is a built-in method for (all ?) controls.\n self.folderDropTarget.SetDropTarget( ddt.FilesDropTarget( self.folderDropTarget ) )\n \n # Install the callback-function for this class's parent-widget dropFiles-event.\n self.folderDropTarget.dropFunc = dropCallbacFunc", "def drag_data_received(self, widget, context, x, y, sel_data, info, time):\n if not sel_data:\n return\n #modern file managers provide URI_LIST. For Windows split sel_data.data\n files = sel_data.get_uris()\n for file in files:\n if win():\n clean_string = conv_to_unicode(\n file.replace('\\0',' ').replace(\"\\r\", \" \").strip(),\n None)\n else:\n clean_string = file\n protocol, site, mfile, j, k, l = urlparse(clean_string)\n if protocol == \"file\":\n name = url2pathname(mfile)\n mime = get_type(name)\n if not is_valid_type(mime):\n return\n photo = MediaObject()\n self.uistate.set_busy_cursor(True)\n photo.set_checksum(create_checksum(name))\n self.uistate.set_busy_cursor(False)\n base_dir = cuni(media_path(self.dbstate.db))\n if os.path.exists(base_dir):\n name = relative_path(name, base_dir)\n photo.set_path(name)\n photo.set_mime_type(mime)\n basename = os.path.basename(name)\n (root, ext) = os.path.splitext(basename)\n photo.set_description(root)\n with DbTxn(_(\"Drag Media Object\"), self.dbstate.db) as trans:\n self.dbstate.db.add_object(photo, trans)\n widget.emit_stop_by_name('drag_data_received')", "def dragMoveEvent(self, event):\n if event.mimeData().hasImage:\n event.accept()\n else:\n event.ignore()", "def drag_and_drop(self,param,ignore_error_handle = False):\n message = {}\n origin_element = param.get('origin',None);\n destination_element = param.get('destination',None);\n step = 'drag a element to another element'\n try:\n self.driver.drag_and_drop(origin_element,destination_element);\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def button_release_cb(self, darea, event):\n self.oldx, self.oldy = event.x, event.y\n self.draw_pointer(self.cr, None, None)\n self.queue_draw()\n self.oldx, self.oldy = None, None\n self.emit('end-dnd')\n return True", "def _fire_dropping(self):\n\t\tif len(self.droppings) < self.settings.droppings_allowed:\n\t\t\tnew_dropping = Dropping(self)\n\t\t\tself.droppings.add(new_dropping)", "def button_press_cb(self, darea, event):\n x, y = event.x, event.y\n self.draw_pointer(self.cr, x, y)\n self.queue_draw()\n self.oldx, self.oldy = x, y\n rel_x, rel_y = self.absolute_to_relative(x, y)\n self.emit('dnd-value', rel_x, rel_y)\n self.emit('start-dnd')\n return True" ]
[ "0.72224736", "0.7178574", "0.7033632", "0.69308865", "0.69137365", "0.68517953", "0.66538286", "0.660371", "0.6481881", "0.6399347", "0.6314507", "0.63019335", "0.6220606", "0.6072313", "0.60482043", "0.6036351", "0.59223694", "0.59200025", "0.58687717", "0.58277893", "0.5734516", "0.56892866", "0.56366456", "0.56327146", "0.5616902", "0.55873185", "0.5570762", "0.5556108", "0.5525009", "0.55130804" ]
0.81672764
0
Gets details on currently logged in athlete.
def get_athlete(token): url = "https://www.strava.com/api/v3/athlete" params = {'access_token': token} response = return_json(url, "GET", parameters=params, timeout=10) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_athlete(response):\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_token': response['refresh_token'],\n 'expires_at': response['expires_at'],\n 'ts_activity': 0}\n return athlete", "def fetch_profile(access_token):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch athlete profile\n r = requests.get(API_URL + \"/athlete\", headers=headers)\n profile = r.json()\n if \"errors\" in profile:\n raise AuthError(profile[\"message\"])\n\n return {\n \"firstName\": profile[\"firstname\"],\n \"lastName\": profile[\"lastname\"],\n \"imgUrl\": profile[\"profile\"],\n \"profileUrl\": \"https://www.strava.com/athletes/{}\".format(profile[\"id\"]),\n }", "def get_account_details(self):\n pass", "def user_info(self):\n return self.auth.get_user_by_session()", "def getInfo(self):\n self.name, self.description = achievements[self.id]", "def getAccidental(self):\n return self.accidental", "def get_teacher(self) -> str :\n return self.teacher", "def display_accounts_details():\n return Credentials.display_credentials()", "def get_user_details(self, response):\n\n log.info(str(response) + \"-\" * 80)\n log.info(str(dir(self)) + \"-\" * 80)\n\n return response", "def get_teacher():\n\n rows = db.engine.execute(f\"SELECT * FROM teacher_login WHERE loginid = {g.user.loginid}\")\n res = []\n for row in rows:\n res.append(dict(row))\n return jsonify(res)", "def details(self):\n logging.info(self.user)", "def getDBAthletesUsingAPI():\n athletes_response = api_requester.getAthletes()\n\n # Parse response into Athlete db objects\n athletes_to_return = list()\n for athlete in athletes_response.json():\n athletes_to_return.append(\n getAthleteObjectFromJSON(athlete))\n\n return athletes_to_return", "def user_info(self):\n \n return self.auth.get_user_by_session()", "def get (self):\n\n logged_in, db_user = ADayThere.logged_in_user ()\n if not logged_in:\n self.response.status = 401\n return\n\n res = self.__build_response (db_user)\n self.response.write (json.dumps (res))", "def account_info(request):\r\n user = request.user\r\n\r\n return _api_response(request, user.safe_data())", "def get_user_info(self) -> str:\n return self._searcher.get_user_info()", "def _athlete_endpoint(self, athlete):\n return '{host}{athlete}'.format(\n host=self.host,\n athlete=quote_plus(athlete)\n )", "def get_adventure_detail(request):\n if request.is_ajax():\n user = request.user\n game_saved = user.game_saved\n adventure_id = game_saved.adventure_saved\n task_num = game_saved.task_saved\n adventure = Adventure.objects.get(adventure_id=adventure_id)\n Adventures_info = adventures_info.objects.get(adventure_name=adventure)\n task = Task.objects.get(adventure_name=adventure, task_number=task_num)\n\n\n alist =[\n {\n \"name\" : str(adventure.adventure_name),\n \"items\" : str(Adventures_info.items_needed),\n \"expenses\" : str(Adventures_info.expenses),\n \"locations\" : Adventures_info.locations,\n \"mapaddress\" : str(task.google_map),\n \"theme_character_url\" : str(adventure.theme_character_url)\n }\n\n ]\n\n return JsonResponse(alist, safe=False)\n else:\n raise PermissionDenied()", "def get(self):\r\n return get_user(request)", "def fetch_stats(access_token, athlete_id):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch athlete stats\n r = requests.get(API_URL + \"/athletes/{}/stats\".format(athlete_id), headers=headers)\n stats = r.json()\n if \"errors\" in stats:\n raise AuthError(stats[\"message\"])\n\n return {\n \"recentRuns\": stats[\"recent_run_totals\"],\n \"yearRuns\": stats[\"ytd_run_totals\"],\n \"allRuns\": stats[\"all_run_totals\"],\n }", "def get_self_account_details(self):\n return self.mrr_obj.get('/whoami')", "def get():\n return prepare_response(get_user_info())", "def fusion_api_get_active_user(self):\n return self.loginsession.get_active_user()", "def get(self):\n return self.context.as_dict(self.user)", "def get_account_info(self):\n resp = requests.get(\n self.URL + 'info/',\n headers={'Authorization': 'Token ' + self.api_key}\n )\n\n return self.__handle_response(resp)", "def get_profile_details(self):\n cursor = self.__connection.cursor()\n cursor.execute(\n \"select first_name, last_name, purchased_products from neutron_buyer where buyer_id=%s\",\n (self.__buyer_id,)\n )\n result = cursor.fetchone()\n if result:\n return result\n raise IDNotFoundException", "def get_user_details():\n rv = query_db('select * from user')\n return rv[0] if rv else None", "def get_amenity(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n return jsonify(amenity.to_dict())", "def get_amenity(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n return jsonify(amenity.to_dict())", "def user_data(self, access_token, *args, **kwargs):\n headers = {'Authorization': 'Bearer %s' % access_token}\n try:\n resp = requests.get(ASANA_USER_DETAILS_URL,\n headers=headers)\n resp.raise_for_status()\n return resp.json()['data']\n except ValueError:\n return None" ]
[ "0.62125313", "0.603639", "0.5979062", "0.5663854", "0.5658526", "0.5654593", "0.55721027", "0.5450002", "0.5400566", "0.5396792", "0.5345879", "0.5323987", "0.53042555", "0.52950203", "0.5274707", "0.5259616", "0.52581507", "0.5236326", "0.52285975", "0.52084494", "0.519947", "0.51918435", "0.5183672", "0.5168598", "0.51325107", "0.51316017", "0.5130489", "0.51264215", "0.51264215", "0.5110099" ]
0.636169
0
Stores athlete's id, first name, last name, weight and ftp into strava_athlete KV Store collection.
def kvstore_save_athlete(session_key, athlete_id, firstname, lastname, weight, ftp): # pylint: disable=too-many-arguments url = 'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/storage/collections/data/strava_athlete/batch_save' headers = {'Content-Type': 'application/json', 'Authorization': f'Splunk {session_key}'} payload = [{"_key": athlete_id, "id": athlete_id, "firstname": firstname, "lastname": lastname, "fullname": firstname + " " + lastname, "weight": weight, "ftp": ftp}] helper.send_http_request(url, "POST", headers=headers, payload=payload, verify=False, use_proxy=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_athlete(response):\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_token': response['refresh_token'],\n 'expires_at': response['expires_at'],\n 'ts_activity': 0}\n return athlete", "def save(self):\n for t in self.ace_types:\n self.api.api_request(\"PUT\", self.url + t, data={t: self[t]})", "def log_strava_event(athlete_id, action):\n strava_event = StravaEvent(athlete_id=athlete_id, action=action, timestamp=datetime.utcnow())\n db.session.add(strava_event)\n db.session.commit()", "def store_in_db(offers):\n with open(OFFERS_FILE, 'w', encoding='utf8') as f:\n json.dump(offers, f, ensure_ascii=False, indent=4)", "def store_triples(self, triples):\n cursor = self.db.cursor()\n cursor.executemany(\"INSERT INTO triples VALUES (?, ?, ?)\", triples)\n self.db.commit()", "def store_elasticsearch(self, item):\n self.datastore.create(\n index=\"dminer-alphabay-{date}\".format(\n date=datetime.datetime.strptime(item[\"timestamp\"], \"%Y:%m:%d %H:%M:%S\").date().strftime(\"%Y-%m-%d\")\n ),\n doc_type= \"alphabay_listing\",\n body=item\n )", "def save(self):\n store = datastore.DataStore()\n store.connect()\n store.setup()\n store.put(self.as_doc())", "def save_credentials(self):\n Stores.account_store.append(self.register_stores())", "def store_all_to_database(self, session):\n\n description = 'Established in 1974, JSM is a family-owned provider of quality apartments. We offer a variety of units from studios to five bedrooms with every location benefitting from our award winning amenities, responsive 24 hour maintenance, and friendly property management staff. JSM Development began in Champaign, IL, and manages roughly 1,500 apartments and 450,000 sq/ft of commercial space. JSM has been a major contributor to the development of Campustown in Champaign and the East Campus area in Urbana at the University of Illinois. These popular locations are now home to major national retailers such as Urban Outfitters, Chipotle, Panera, Cold Stone Creamery, and Noodles & Co.'\n\n # Insert a JSM company instance into the database\n current_company = Company(\n name='JSM',\n baseurl='https://apartments.jsmliving.com/',\n description = description\n )\n session.add(current_company)\n\n # Iterate over the apartments, storing each in the database\n for apartment in self.apartment_data:\n logging.info(\"Inserting %s to database\", apartment['name'])\n new_apartment = Apartment(\n company=current_company,\n url=apartment['url'],\n name=apartment['name'],\n bedrooms=apartment['bedrooms'],\n bathrooms=apartment['bathrooms'],\n price=apartment['price'],\n leasing_period=apartment['leasing_period'],\n description=apartment['description'],\n address=apartment['address'],\n lat=apartment['lat'],\n lng=apartment['lng']\n )\n session.add(new_apartment)\n\n # Insert images for the given apartment\n for index, image_url in enumerate(apartment['image_urls']):\n new_image = Image(\n url=image_url,\n apartment_id=new_apartment.id,\n type=0,\n image_index=index\n )\n session.add(new_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_image)\n\n # Insert floorplan image, if it exists\n if apartment['floorplan_url'] != 0:\n new_floorplan_image = Image(\n url=apartment['floorplan_url'],\n apartment_id=new_apartment.id,\n type=1,\n image_index=len(apartment['image_urls'])\n )\n session.add(new_floorplan_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_floorplan_image)\n\n # Insert amenities for the given apartment\n for amenity in apartment['amenities']:\n new_amenity = Amenity(\n apartment_id=new_apartment.id,\n amenity=amenity\n )\n session.add(new_amenity)\n\n # Connect amenity to apartment\n new_apartment.amenities.append(new_amenity)\n\n # Write all queries to the database\n session.commit()", "def save(self):\n self.lock()\n\n trader = self.strategy.trader()\n\n for trade in self.trades:\n t_data = trade.dumps()\n ops_data = [operation.dumps() for operation in trade.operations]\n\n # store per trade\n Database.inst().store_user_trade((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, trade.id, trade.trade_type, t_data, ops_data))\n\n # dumps of regions\n trader_data = {}\n regions_data = [region.dumps() for region in self.regions]\n\n Database.inst().store_user_trader((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, self.activity, trader_data, regions_data))\n\n self.unlock()", "def get_athlete(token):\n url = \"https://www.strava.com/api/v3/athlete\"\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response", "def Store(self):\n\n if FLAGS.verbose or FLAGS.verbose_writes:\n print 'Writing track:'\n for key in sorted(self.persistant):\n print ' %s = %s' %(key, self.persistant[key])\n\n if not self.persistant:\n return\n \n try:\n self.db.WriteOneRow('tracks', 'id', self.persistant)\n except MySQLdb.Error, (errno, errstr):\n if errno != 1064:\n raise TrackException(self.db, 'Could not store track %s: %s \"%s\"'\n %(self.persistant['id'], errno, errstr))\n except sql.FormatException, e:\n raise e\n except Exception, e:\n raise TrackException(self.db, 'Could not store track: %s: \"%s\" (%s)'\n %(self.persistant['id'], e, type(e)))", "def persist_db(database, tweets):\n log.debug(\"{} tweets to db\".format(len(tweets)))\n\n for tweet in tweets:\n tweet['_id'] = tweet['id_str']\n database.update(tweets)", "def save_in_db(self):\n self.sql_database.table_name = self.table_db\n self.sql_database.db_name = self.db\n if self.sql_database.insert_item(text_path=self.path, word_first=self.word_1.get(),\n word_second=self.word_2.get(),\n word_third=self.word_3.get(), word_fourth=self.word_4.get(),\n word_fifth=self.word_5.get()):\n msg.showinfo(message=\"Done\")", "def _persist(self):\n trunk.set(self.uuid, self.json)", "def save_favorited_trail(hike_id, user_id):\n\n trail = Trail(hike_id = hike_id, user_id = user_id)\n\n db.session.add(trail)\n db.session.commit()\n\n return (trail)", "def store_tweet(tweet, keyword):\n\tglobal _docs_to_store\n\tdoc = {'tweet': tweet, 'keyword': keyword, 'timestamp': int(time.time())}\n\t_docs_to_store.append(doc)\n\tif len(_docs_to_store) == UPDATE_CHUNK:\n\t\tcloudant.update(_docs_to_store)\n\t\t_docs_to_store = []", "def add_to_db(ark_obj):\n session = Session()\n session.add(ark_obj)\n session.commit()\n session.close()", "def _athlete_endpoint(self, athlete):\n return '{host}{athlete}'.format(\n host=self.host,\n athlete=quote_plus(athlete)\n )", "def save_aliment(self, aliment_name):\n aliment = Aliment.objects.get(name=aliment_name)\n self.aliments_pref.add(aliment)", "def save(self, db):\n db.googleResults.insert_one(\n {\n \"searchQuery\": self.search_query,\n \"title\": self.title,\n \"link\": self.link,\n \"subtext\": self.subtext,\n \"searchterms\" : self.searchterms, # array\n \"queryTime\": datetime.datetime.now(),\n \"details\": self.link_scripts\n }\n )", "def save_data(db, dict_key, url, data_to_store):\n if dict_key not in db:\n db[dict_key] = []\n data = db[dict_key]\n data.append({\n 'url': url,\n 'data': data_to_store,\n })\n db[dict_key] = data", "def save(self, db):\n db.query(\n \"INSERT INTO fellows (name, accomodation)\\\n VALUES(:name, :accomodation)\",\n name=self.name, accomodation=self.wants_accomodation\n )", "def getDBAthletesUsingAPI():\n athletes_response = api_requester.getAthletes()\n\n # Parse response into Athlete db objects\n athletes_to_return = list()\n for athlete in athletes_response.json():\n athletes_to_return.append(\n getAthleteObjectFromJSON(athlete))\n\n return athletes_to_return", "def save(self):\n response = settings.database.put_item(Item=self.to_dict())\n raise_for_response(response)", "def save(cls):\n playerdata = getAttributes(cls)\n Data.object_dump(playerdata, \"savedata.dat\")\n del playerdata", "def store_offers(offers):\n reducer((serialize, store_in_db), offers)", "def saveTeachersData():\n with open(\"TeacherData.txt\",\"wb\") as teacherData:\n pickle.dump(teacherEntities,teacherData)", "def _save_tally_share(\n self, guardian_id: GuardianId, guardians_tally_share: DecryptionShare\n ) -> None:\n self._tally_shares[guardian_id] = guardians_tally_share", "def store_trades(self, trades):\n trades_file = self.current_trades_path()\n fo = trades_file.open(\"wb\")\n LOGGER.info(f\"storing {len(trades)} trades on disk for league {self.league_id}\")\n pickle.dump(trades, fo)" ]
[ "0.65488034", "0.5803945", "0.56478375", "0.52747154", "0.5271561", "0.52378386", "0.51092994", "0.50849545", "0.50378954", "0.5033445", "0.4992036", "0.49815983", "0.4975764", "0.4963135", "0.49445814", "0.48963758", "0.48822185", "0.48742172", "0.48721516", "0.48705444", "0.4864023", "0.48592743", "0.48569056", "0.48561573", "0.48419502", "0.48218003", "0.48206583", "0.48117527", "0.48082826", "0.48065695" ]
0.8156723
0
Creates dict with athlete details, including token expiry.
def set_athlete(response): name = response['athlete']['firstname'] + " " + response['athlete']['lastname'] athlete = { 'id': response['athlete']['id'], 'name': name, 'access_token': response['access_token'], 'refresh_token': response['refresh_token'], 'expires_at': response['expires_at'], 'ts_activity': 0} return athlete
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_athlete(token):\n url = \"https://www.strava.com/api/v3/athlete\"\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response", "def asdict(self):\n return {\n \"access_token\": self.access_token,\n \"audience\": self.audience,\n \"token_type\": self.token_type,\n \"expires_in\": self.expires_in,\n \"expires_at\": self.expires_at,\n }", "def alpaca_create(self, keyname = \"ALPACA_API_KEY\", secret = \"ALPACA_SECRET_KEY\"):\n aak = os.getenv(keyname)\n ask = os.getenv(secret)\n if type(aak) is not str | type(aak) is not str:\n raise Exception(\"Could not load API or Secret Key\")\n #try to create object regardless \n alpaca = tradeapi.REST(\n aak,\n ask,\n api_version=\"v2\"\n )\n self.alpaca_api = alpaca\n return alpaca", "def getDBAthletesUsingAPI():\n athletes_response = api_requester.getAthletes()\n\n # Parse response into Athlete db objects\n athletes_to_return = list()\n for athlete in athletes_response.json():\n athletes_to_return.append(\n getAthleteObjectFromJSON(athlete))\n\n return athletes_to_return", "def get_sso_data(self):\n return {\n 'access_token': self.access_token,\n 'refresh_token': self.refresh_token,\n 'expires_in': (\n self.access_token_expires - datetime.utcnow()\n ).total_seconds()\n }", "def fetch_profile(access_token):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch athlete profile\n r = requests.get(API_URL + \"/athlete\", headers=headers)\n profile = r.json()\n if \"errors\" in profile:\n raise AuthError(profile[\"message\"])\n\n return {\n \"firstName\": profile[\"firstname\"],\n \"lastName\": profile[\"lastname\"],\n \"imgUrl\": profile[\"profile\"],\n \"profileUrl\": \"https://www.strava.com/athletes/{}\".format(profile[\"id\"]),\n }", "def create_access_token(\n data: tp.Mapping[str, tp.Any],\n *,\n expires_delta: tp.Optional[timedelta] = None\n) -> str:\n to_encode = data.copy()\n expires_delta = expires_delta or DEFAULT_EXPIRES_DELTA\n expires = datetime.utcnow() + expires_delta\n to_encode.update({\"exp\": expires, \"sub\": ACCESS_TOKEN_SUBJECT})\n return jwt.encode(\n to_encode,\n config.SECRET_KEY,\n algorithm=ALGORITHM,\n json_encoder=JSONEncoderUUID\n )", "def get_token(self):\r\n token = {'id': self.catalog['access']['token']['id'],\r\n 'expires': self.catalog['access']['token']['expires'], }\r\n try:\r\n token['user_id'] = self.catalog['access']['user']['id']\r\n token['tenant_id'] = (\r\n self.catalog['access']['token']['tenant']['id'])\r\n except Exception:\r\n # just leave the tenant and user out if it doesn't exist\r\n pass\r\n return token", "def _create_auth_headers(self):\n auth_headers = {**self.get_headers()}\n auth_headers['Authorization'] = 'Bearer ' + self.get_access_token()\n return auth_headers", "def get_access(access_token='',expire_time=0):\r\n #Get a new access token if it expires or is five minutes away from exp#iration\r\n if (expire_time==0) or (len(access_token)==0) or (time.time()-expire_time>=-300):\r\n\r\n #API needed to authorize account with refresh token\r\n auth_url = 'https://api.tdameritrade.com/v1/oauth2/token'\r\n\r\n #Data needed for token\r\n data = {'grant_type':'refresh_token',\r\n 'refresh_token':TDAuth_Info.refresh_token,\r\n 'client_id':TDAuth_Info.client_id}\r\n\r\n #Post the data to get the token\r\n auth_reply_json = requests.post(url=auth_url,data=data)\r\n auth_reply=auth_reply_json.json()\r\n\r\n #Now use the token to get account information\r\n access_token = auth_reply['access_token']\r\n expire_time=time.time()+auth_reply['expires_in']\r\n \r\n return (access_token,expire_time)", "def test_to_dict_amenity(self):\n format = \"%Y-%m-%dT%H:%M:%S.%f\"\n holi = Amenity()\n d = holi.to_dict()\n self.assertIsInstance(d, dict)\n for keys in d:\n self.assertTrue(keys, d)\n self.assertTrue('__class__' in d)\n self.assertEqual(d[\"__class__\"], \"Amenity\")\n self.assertIsInstance(d[\"created_at\"], str)\n self.assertIsInstance(d[\"updated_at\"], str)\n self.assertEqual(d[\"created_at\"], holi.created_at.strftime(format))\n self.assertEqual(d[\"updated_at\"], holi.updated_at.strftime(format))", "def _generate_voter_in_dict(id: bytes, timestamp: int, prep: 'Prep') -> dict:\n voter_in_dict = {\n \"id\": '0x' + bytes.hex(id),\n \"timestamp\": timestamp,\n \"address\": str(prep.address),\n \"name\": prep.name,\n \"amount\": prep.delegated\n }\n return voter_in_dict", "def post_amenity_obj():\n dic = {}\n dic = request.get_json(silent=True)\n if dic is None:\n abort(400, \"Not a JSON\")\n if \"name\" not in dic.keys():\n abort(400, \"Missing name\")\n new_ame = amenity.Amenity()\n for k, v in dic.items():\n setattr(new_ame, k, v)\n storage.new(new_ame)\n storage.save()\n return jsonify(new_ame.to_dict()), 201", "def set_auth(self):\n timestamp = str(int(time.time()))\n unique = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(16))\n hashstr = sha1((self.callerid + timestamp +\n self.privatekey + unique).encode('utf8')).hexdigest()\n logger.debug(\"Time from api {}\".format(timestamp))\n\n return {\"callerId\": self.callerid,\n \"time\": timestamp,\n \"unique\": unique,\n \"hash\": hashstr\n }", "def generate_pair(cls, user: User) -> Dict[str, str]:\n if not isinstance(user, User):\n raise PermissionDenied()\n\n refresh_token = RefreshToken.objects.create(user=user)\n access_payload = refresh_token.get_payload_by_token()\n access_payload['type'] = 'access'\n access_token = jwt_encode(access_payload)\n\n return {\n 'access_token': access_token,\n 'refresh_token': refresh_token.token,\n }", "def create_access_token(self):\n\t\t# Wraper for also caching invalid results\n #def getMetadataRofs(path):\n #\ttry:\n # \treturn self.client.metadata(path)\n # except Exception, e:\n # log.write('Exception at getMetadataRofs for path '+ path + '\\n')\n # pprint(e, log)\n # return False\n\n\t\ttry:\n\t\t\trequest_token = self.session.obtain_request_token()\n\t\t\turl = self.session.build_authorize_url(request_token)\n\t\t\tprint url\n\t\t\traw_input()\n\t\t\taccess_token = self.session.obtain_access_token(request_token)\n\t\t\tself.client = client.DropboxClient(self.session)\n\t\t\t\n\t\t\t# Build cache for metadata querying\n\n\t\t\t# Wraper for also caching invalid results\n\t\t\tdef getMetadataRofs(path):\n\t\t\t\ttry:\n\t\t\t\t\treturn self.client.metadata(path)\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogger.error('Exception at getMetadataRofs for path '+ path + '\\n')\n\t\t logger.debug(sys.exc_info()[0])\n\t\t\t\t\treturn False\n\n\t\t\tself.cache_metadata = Cache(getMetadataRofs)\n\t\t\tself.cache_files = {}\n\n\t\texcept Exception, e:\n\t\t\tlogger.error('Exception %s at create_access_token' % (sys.exc_info()[0]))\n\t\t\tlogger.debug(pformat(sys.exc_info()))", "def create_temporary_access_token(self, api_token: str) -> dict:\n query = \"\"\"\n mutation CreateToken {\n createMyProfileTemporaryReadAccessToken(input: {}) {\n temporaryReadAccessToken {\n token\n expiresAt\n }\n }\n }\n \"\"\"\n\n path = jmespath.compile(\n \"\"\"\n data.createMyProfileTemporaryReadAccessToken.temporaryReadAccessToken.{\n token: token\n expires_at: expiresAt\n }\n \"\"\"\n )\n data = self.do_query(query, api_token=api_token)\n parsed_data = path.search(data)\n self.contains_keys(parsed_data, [\"token\", \"expires_at\"])\n parsed_data[\"expires_at\"] = parse_datetime(parsed_data[\"expires_at\"])\n return parsed_data", "def create_amenity():\n new_amenity_dict = request.get_json(silent=True)\n if new_amenity_dict is None:\n return jsonify({\"error\": \"Not a JSON\"}), 400\n if 'name' not in request.json:\n return jsonify({\"error\": \"Missing name\"}), 400\n new_amenity = Amenity(**new_amenity_dict)\n storage.new(new_amenity)\n storage.save()\n return jsonify(new_amenity.to_dict()), 201", "def get_initial_author_dict():\n adict = {}\n try:\n ah = run_sql(\"select aterm,hitlist from rnkAUTHORDATA\")\n for (a, h) in ah:\n adict[a] = deserialize_via_marshal(h)\n return adict\n except:\n register_exception(prefix=\"could not read rnkAUTHORDATA\", alert_admin=True)\n return {}", "def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:\n to_encode = data.copy()\n if expires_delta:\n expire = datetime.utcnow() + expires_delta\n else:\n expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n\n to_encode.update({\"exp\": expire})\n return jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)", "def fetch_stats(access_token, athlete_id):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch athlete stats\n r = requests.get(API_URL + \"/athletes/{}/stats\".format(athlete_id), headers=headers)\n stats = r.json()\n if \"errors\" in stats:\n raise AuthError(stats[\"message\"])\n\n return {\n \"recentRuns\": stats[\"recent_run_totals\"],\n \"yearRuns\": stats[\"ytd_run_totals\"],\n \"allRuns\": stats[\"all_run_totals\"],\n }", "def create_amenity():\n my_dict = request.get_json()\n if my_dict is None:\n abort(400, \"Not a JSON\")\n elif \"name\" not in my_dict:\n abort(400, \"Missing name\")\n new_amenity = Amenity(**my_dict)\n new_amenity.save()\n return jsonify(new_amenity.to_dict()), 201", "def generate_oauth_headers(access_token: str) -> dict:\n return {'Authorization': 'Bearer ' + access_token}", "def create_amenity():\n amenity_json = request.get_json()\n if amenity_json is None:\n abort(400, 'Not a JSON')\n if amenity_json.get('name') is None:\n abort(400, \"Missing name\")\n amenity = Amenity(**amenity_json)\n storage.new(amenity)\n storage.save()\n return jsonify(amenity.to_dict()), 201", "def get_agol_token():\n params = {\n 'client_id': app.config['ESRI_APP_CLIENT_ID'],\n 'client_secret': app.config['ESRI_APP_CLIENT_SECRET'],\n 'grant_type': \"client_credentials\"\n }\n request = requests.get(\n 'https://www.arcgis.com/sharing/oauth2/token',\n params=params\n )\n token = request.json()\n print(\"AGOL token acquired: {0}\".format(token))\n return token", "def _standard_token(self):\n return {\n 'iss': 'https://iam-test.idc.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'iat': int(time.time()) - 2000000,\n 'sub': CLIENT_ID,\n 'exp': int(time.time()) + 200000\n }", "def amenity_ret():\n ame_list = []\n all_objs = storage.all(\"Amenity\")\n for obj in all_objs.values():\n ame_list.append(obj.to_dict())\n return jsonify(ame_list)", "def __extract_athletes(self):\n for ath in self.athletes:\n if dl.get_squad_id(ath) not in self.data_engine:\n # Athlete has no squad. Just skip over it.\n continue\n\n team_criteria = \\\n self.data_engine[dl.get_squad_id(ath)][\"team_criteria\"]\n\n if not team_criteria:\n # Probably already generated a team for athlete[\"squad_id\"]\n continue\n\n if athlete_match(ath, make_athlete_criteria(team_criteria)):\n self.__update_team_criteria(team_criteria, ath)\n yield ath", "def create_enrollment(context: dict) -> dict:\n enrollment = Enrollment()\n\n for attr in context.keys():\n setattr(enrollment, attr, context[attr])\n\n enrollment.save()\n return enrollment.asdict()", "def to_dictionary(apartment_obj):\n if isinstance(apartment_obj, Apartment):\n return {\"expenses\": apartment_obj.expenses}\n\n raise Exception(\"apartment_obj is not of type Apartment\")" ]
[ "0.6095435", "0.5521886", "0.5451787", "0.5388297", "0.5352944", "0.5180581", "0.5166357", "0.5155647", "0.5131322", "0.51014733", "0.50938517", "0.50851166", "0.5080323", "0.5078669", "0.505904", "0.50572944", "0.5016414", "0.50079054", "0.49948767", "0.49728522", "0.49649662", "0.49618575", "0.49447498", "0.49316362", "0.49138573", "0.4899521", "0.48984912", "0.48825777", "0.48758575", "0.4871646" ]
0.7620931
0
Writes activity to Splunk index.
def write_to_splunk(**kwargs): event = helper.new_event(**kwargs) ew.write_event(event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, data):\n activities = [json.loads(activity['Json']) for activity in data]\n\n for i in range(len(activities)):\n activities[i]['created_at'] = to_datetime(activities[i]['created_at'])\n\n with Elastic(index='wink', doc_type='activity') as elastic:\n elastic.upload(activities, 'created_at')\n\n Log.info(\"Successfully uploaded wink activity data into elasticsearch.\")", "def write(self, record):\n # Make Splunk ready payload data and append it to self._buffers list.\n self._buffer.append({\n 'index': self._index,\n 'sourcetype': 'json',\n 'event': record\n })\n\n # If the records count in self._buffer is more than allowed by\n # self._buffer_size, send those records to Splunk.\n if len(self._buffer) >= self._buffer_size:\n self._flush()", "def write_index(self):\n self.Lock = True\n self.file_out.seek(self.index_offset)\n for identifier, offset in self.index.items():\n self._write_identifier(identifier)\n self._write_offset(offset)", "def write(self, host, index):\n msg = []\n operation = \"WRITE\"\n if not self.create_uid(host, index):\n return False\n url = \"%s%s%s\" % (\"http://\", host, \"/api/put\")\n payload = {\"metric\": METRIC_NAME, \"timestamp\": TIMESTAMP_MILLIS(), \\\n \"value\": METRIC_VAL, \"tags\":{TAGK: \"%s.%d\" % (TAGV, index)}}\n headers = {\"content-type\": \"application/json\"}\n try:\n response = requests.post(url, data=json.dumps(payload), headers=headers)\n if response.status_code == 204:\n LOGGER.debug(\"Value 1 inserted to metric %s\", METRIC_NAME)\n self.process_resp([], operation, \"1\", index)\n return True\n response_dict = json.loads(response.text)\n msg.append(response_dict[\"error\"][\"message\"])\n LOGGER.warning(\"Unable to write 1, error message is %s\", \\\n response_dict[\"error\"][\"message\"])\n self.process_resp(msg, operation, \"0\", index)\n return False\n except requests.exceptions.ConnectionError as ex_message:\n LOGGER.warning(\"Unable to write 1, error message is %s\", str(ex_message))\n self.process_resp([str(ex_message)], operation, \"0\", index)\n return False", "def save_index(self):\n vsn_objs = [dict(Id = v['id'], Name = v['name']) for v in self.versions]\n self.backend.write_json(dict(\n Versions = vsn_objs,\n Channels = [], # This is unused.\n ApiVersion = 0,\n ), self.index_path())", "def write(self, index, data):\n isNotFirstCmd = False\n # Write Opcode\n self.__ser_wr_trans(RG_WR, isNotFirstCmd)\n isNotFirstCmd = True\n # Write Address\n self.__ser_wr_trans(index, isNotFirstCmd)\n # Write Data\n self.__ser_wr_trans(data, isNotFirstCmd)", "def write(cls, activity_type, actor, target, data):\n\n activity_log = ActivityLog.objects.create(\n activity_type=activity_type, actor=actor, target=target, data=data\n )\n cls.notify(activity_log)", "def write_activityMessage(self, value):\n # PROTECTED REGION ID(SdpMasterLeafNode.activityMessage_write) ENABLED START #\n self.update_attr_map(\"activityMessage\", value)\n # PROTECTED REGION END # // SdpMasterLeafNode.activityMessage_write", "def _write_shard(filename, dataset, indices):\n with tf.io.TFRecordWriter(filename) as writer:\n for j in indices:\n writer.write(dataset[j])", "def _write_shard(filename, dataset, indices):\n with tf.python_io.TFRecordWriter(filename) as writer:\n for j in indices:\n writer.write(dataset[j])", "def _flush(self):\n buffer_len = len(self._buffer)\n\n if buffer_len == 0:\n _log.info('No pending records to index; URI: %s; index: %s',\n self._uri, self._index)\n return\n\n _log.info('Indexing %d records; URI: %s; index: %s ...',\n buffer_len, self._uri, self._index)\n\n headers = {'Authorization': 'Splunk ' + self._token}\n\n try:\n response = self._session.post(self._uri,\n headers=headers,\n data=json.dumps(self._buffer),\n verify=self._ca_cert)\n\n log_data = ('URI: {}; index: {}; response status: {}; '\n 'response content: {}'\n .format(self._uri, self._index,\n response.status_code, response.text))\n\n if response.status_code != 200:\n _log.error('Failed to index %d records; HTTP status '\n 'code indicates error; %s',\n buffer_len, log_data)\n return\n\n try:\n j = response.json()\n except Exception as e:\n _log.error('Failed to get JSON from response; %s; '\n 'error: %s; %s', log_data, type(e).__name__, e)\n return\n\n if j['code'] != 0:\n _log.error('Failed to index %d records; Splunk status '\n 'code in JSON indicates error; %s',\n buffer_len, log_data)\n return\n\n _log.info('Indexed %d records; %s', buffer_len, log_data)\n del self._buffer[:]\n\n except requests.ConnectionError as e:\n _log.error('Failed to index %d records; connection error; '\n 'URI: %s; index: %s; error: %s: %s; ',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)\n\n except Exception as e:\n _log.error('Failed to index %d records; unexpected error; '\n 'URI: %s; index: %s; error: %s: %s',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)", "def write(self, segment, result):\n pass", "def write(self, batch):\n time.sleep(self.WRITE_DELAY)", "def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")", "def sendIndex(self):\n self.updateIndex()\n outpkg = json.dumps(self.serverindex)\n self.send(outpkg)", "def push_write(self, s):\n ...", "def save(self):\n self.index.saveIndex(c.index_path('hnsw.index'))\n joblib.dump(self.ys, \"%s.ys\" % self.index_file_prefix)", "def _write(self, location, data):\n self._connector.write(location=location, data=data)", "def write_activityMessage(self, value):\n self.update_attr_map(\"activityMessage\", value)", "def write(self):", "def write(self):", "def save_index(self, fn):\n utils.save_obj(self.tweetTerms, \"TweetTerm_%s\" % (self.counterOfTweetTermsFiles))\n self.computeTfIdf(self.counterOfTweets)\n self.deleteSingleEntities()\n inv_dict = {'inverted_idx': self.inverted_idx, 'posting': self.postingFiles}\n utils.save_obj(inv_dict, fn)", "def write(self):\n\t\traise NotImplementedError('%s: No write function implemented!' % self.name)", "def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))", "def _write_stream(self):\n enrich_df = self._process_stream()\n df_writer = enrich_df \\\n .writeStream \\\n .queryName(\"Agro Data Writer\") \\\n .foreachBatch(db_utils.foreach_batch_function) \\\n .option(\"checkpointLocation\", \"chk-point-dir\") \\\n .trigger(processingTime=\"1 minute\") \\\n .start()\n\n df_writer.awaitTermination()", "def write_index(self, file_name):\n self.df_index.to_csv(file_name, sep='\\t')", "def save(self) -> None:\n try:\n js = json.loads(\n self.reset_index().to_json(orient=\"records\", date_format=\"iso\")\n )\n\n with open(self._fp, \"w\") as f:\n f.writelines(json.dumps(js, indent=4))\n logger.debug(f\"Saved index to {self._fp}\")\n except Exception as e:\n logger.error(f\"Could not update database -- {e}\")", "def write_to_index(self,write_dict):\n self.__mode = self.WRITE_MODE\n if not self.__storage:\n self.__load_index()\n try:\n for key,value in write_dict.iteritems():\n self.__storage[key]=value\n except Exception,e:\n print e\n self.__storage = None\n return False\n\n self.__close_storage()\n return True", "def write_log(self):\n with open(self.trav_stat_file, 'a') as stat_file:\n travel_writer = csv.writer(stat_file)\n # Every row starts with the start and destnation\n row = [self.start, self.dest]\n # This uses a static list so that the order is fixed\n for state in [\"waiting\", \"riding\", \"transferring\"]:\n state_total = sum(self.time_record[state])\n row.append(state_total)\n travel_writer.writerow(row)", "def _write_index(self):\n # Make sure to only write non-default objects to the index.\n self.store.write_object(\n object=[obj.to_dict() for ns in self.index.values() for obj in ns.values() if not obj.is_default],\n object_id=self.identifier\n )" ]
[ "0.6238934", "0.5661036", "0.56116706", "0.55407304", "0.54127765", "0.5401947", "0.5389567", "0.5381006", "0.53470606", "0.53407896", "0.5288701", "0.5285223", "0.5240927", "0.5197521", "0.5191341", "0.51774335", "0.5176707", "0.51560175", "0.5123677", "0.50669056", "0.50669056", "0.50389946", "0.5025154", "0.5023109", "0.5019835", "0.501346", "0.50076485", "0.5005588", "0.50030744", "0.50013804" ]
0.5662735
1
objectiin querysetiig avna. Tuhain querysetiin date_time uy deh datag excel export hiine
def export_to_excel(self, worksheet, row_start, col_start, queryset, date_time=timezone.now()): if queryset: [row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start) for q in queryset: # object_excel_write function---date_time uyiin history objectiig excel -ruu horvuulne [row_write, col_write] = q.object_excel_write(worksheet, row_write, col_write, date_time=date_time) else: worksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_ho_dan_as_excel_action(fields=None, exclude=None, header=True):\n def export_as_excel(modeladmin, request, queryset):\n opts = modeladmin.model._meta\n field_names = [\"name\", \"status\", \"location\", \"tinh\",\n \"xa\", \"huyen\", \"phone\", \"cuuho\", \"update_time\", \"note\"]\n display_names = [\"Tên hộ dân\", \"Tình trạng\", \"Vị trí\", \"Tỉnh\", \"Xã\",\n \"Huyện\", \"Sdt\", \"hỗ trợ\", \"Thời gian cuối cùng cập nhật\", \"Ghi chú\"]\n file_name = \"Danh_sach_ho_dan\"\n\n output = io.BytesIO()\n\n workbook = xlsxwriter.Workbook(output, {'in_memory': True})\n worksheet = workbook.add_worksheet()\n row = 0\n if header:\n write_a_row(worksheet, row, display_names)\n row += 1\n for obj in queryset:\n arr = []\n for field in field_names:\n if field == \"status\" and obj.status:\n arr.append(obj.status.name)\n elif field == \"update_time\":\n utc_time = getattr(obj, field)\n local_datetime = utc_to_local(utc_time)\n arr.append(local_datetime.strftime(\"%d/%m/%Y %H:%M:%S\"))\n else:\n arr.append(str(getattr(obj, field) or \"\"))\n write_a_row(worksheet, row, arr)\n row += 1\n\n workbook.close()\n\n output.seek(0)\n\n response = HttpResponse(output.read(\n ), content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n response['Content-Disposition'] = f\"attachment; filename={file_name}.xlsx\"\n\n output.close()\n\n return response\n\n export_as_excel.short_description = \"Xuất file excel\"\n return export_as_excel", "def export(self, queryset=None):\n self.queryset = queryset or self.queryset\n exported_datetime = get_utcnow()\n filename = self.get_filename(exported_datetime)\n path = os.path.join(self.export_folder, filename)\n with open(path, 'w') as f:\n csv_writer = csv.DictWriter(\n f, fieldnames=self.field_names, delimiter=self.delimiter)\n csv_writer.writeheader()\n for model_obj in self.queryset:\n object_helper = self.object_history_helper_cls(\n model_obj=model_obj, create=True)\n objects = object_helper.get_not_exported()\n for obj in objects:\n row = self.prepare_row(\n model_obj=model_obj,\n exported_datetime=exported_datetime,\n export_change_type=obj.export_change_type)\n csv_writer.writerow(row)\n object_helper.update_as_exported(\n objects=objects, exported_datetime=exported_datetime)\n file_history_updater = self.file_history_updater_cls(\n path=path,\n delimiter=self.delimiter,\n model=self.model_cls._meta.label_lower,\n filename=filename)\n file_history_updater.update()\n return path", "def export_any_queryset(request, queryset, filename, excluded_fields=[], included_fields=[], csv_field_delimiter = \";\"):\n\n name, extension = os.path.splitext(filename)\n file_format = extension[1:]\n\n output = None\n if file_format == 'csv':\n content_type = 'text/csv'\n output = io.StringIO()\n writer = csv.writer(output, delimiter=csv_field_delimiter, quoting=csv.QUOTE_MINIMAL)\n exporter = SpreadsheetQuerysetExporter(writer, file_format=file_format)\n exporter.export_queryset(queryset)\n elif file_format == 'xlsx':\n content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n #content_type = 'application/vnd.ms-excel'\n output = io.BytesIO()\n with open_xlsx_file(output) as writer:\n # # Write Spreadsheet\n # writer.write_headers_from_strings(\n # ['Cliente', 'Commessa', 'Progetto', 'Attività', ] +\n # ['Totale', ],\n # )\n # writer.apply_autofit()\n exporter = SpreadsheetQuerysetExporter(writer, file_format=file_format)\n exporter.export_queryset(queryset, excluded_fields=excluded_fields, included_fields=included_fields)\n writer.apply_autofit()\n assert writer.is_closed()\n else:\n raise Exception('Wrong export file format \"%s\"' % file_format)\n\n # send \"output\" object to stream with mimetype and filename\n assert output is not None\n output.seek(0)\n # response = HttpResponse(\n # output.read(),\n response = StreamingHttpResponse(\n output,\n content_type=content_type,\n )\n #response['Content-Disposition'] = 'inline; filename=\"%s\"' % filename\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n return response", "def export(self,**kwargs):\n \n # import pdb;pdb.set_trace()\n \n # provide for case where recs are set extenally\n if not self.recs:\n self.select_recs(**kwargs)\n if self.recs:\n if self.export_file_name:\n filename = self.export_file_name\n else:\n filename = \"{table_name}_report_{datetime}.csv\".format(\n table_name = self.table.display_name,\n datetime = date_to_string(local_datetime_now(),'iso_datetime'),\n ).replace(' ','_').lower()\n \n if not self.export_fields:\n # include all fields by default\n self.export_fields = self._set_default_list_fields(include_all=True).copy()\n\n self.set_list_fields(self.export_fields)\n \n \n if self.export_template:\n result = render_template(self.export_template, data=self)\n else:\n # add a descriptive title row\n if self.export_title:\n result = self.export_title.strip() + '\\n'\n else:\n result = \"Export of table {} as of {}\\n\".format(self.table.table_name,excel_date_and_time_string(local_datetime_now()))\n \n result += ','.join([x['label'] for x in self.export_fields]) + '\\n'\n for rec in self.recs:\n rec_row = []\n for field in self.export_fields:\n data = rec.__getattribute__(field['name'])\n if field['type'].upper() == \"DATE\":\n data = local_date_string(data)\n elif field['type'].upper() == \"DATETIME\":\n data = excel_date_and_time_string(data)\n else:\n # just text\n data = str(data).strip()\n \n # replace double quotes with double-double quotes\n data = data.replace('\"','\"\"') #double up on double quotes\n \n if \",\" in data:\n # if any commas, wrap in quotes\n data = '\"' + data + '\"'\n \n #replace returns\n data = data.replace('\\r\\n',' -crnl- ')\n data = data.replace('\\n',' -nl- ')\n data = data.replace('\\r',' -rtn- ')\n\n rec_row.append(data)\n \n result += ','.join([str(x) for x in rec_row]) + '\\n'\n \n return DataStreamer(result,filename,'text/csv').send()\n \n self.result_text = \"No records selected\"\n self.success = False\n \n flash(self.result_text)\n return self.list(**kwargs)", "def export_to_excel(self, workbook, tailan_queryset):\n\t\t# workbook argumentdaa avna\n\t\tif tailan_queryset:\n\t\t\t#[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\t\n\t\t\tworksheet = workbook.add_worksheet(u'Гүний худаг')\n\t\t\tqueryset = Hudag.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Hudag.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.gunii_hudags:\n\t\t\t\t\tqueryset = tailan.gunii_hudags.hudags.all()\n\t\t\t\t\t[row_write, col_write] = Hudag.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэршүүлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsevershuuleh:\n\t\t\t\t\tqueryset = tailan.tsevershuuleh.tsevershuuleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэрлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tseverleh:\n\t\t\t\t\tqueryset = tailan.tseverleh.tseverleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Усан сан')\n\t\t\tqueryset = UsanSan.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsanSan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.usansan:\n\t\t\t\t\tqueryset = tailan.usansan.usan_sans.all()\n\t\t\t\t\t[row_write, col_write] = UsanSan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_nasos_stants:\n\t\t\t\t\tqueryset = tailan.tsever_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_nasos_stants:\n\t\t\t\t\tqueryset = tailan.bohir_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Лаборатори')\n\t\t\tqueryset = Lab.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Lab.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.lab:\n\t\t\t\t\tqueryset = tailan.lab.labs.all()\n\t\t\t\t\t[row_write, col_write] = Lab.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.tsever_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.bohir_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'АХББ')\n\t\t\tqueryset = ABB.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = ABB.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.abb:\n\t\t\t\t\tqueryset = tailan.abb.abbs.all()\n\t\t\t\t\t[row_write, col_write] = ABB.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус, дулаан дамжуулах төв')\n\t\t\tqueryset = UsDamjuulahBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsDamjuulahBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_damjuulah_tov:\n\t\t\t\t\tqueryset = tailan.us_damjuulah_tov.usDamjuulahBair.all()\n\t\t\t\t\t[row_write, col_write] = UsDamjuulahBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус түгээх байр')\n\t\t\tqueryset = UsTugeehBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsTugeehBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_tugeeh:\n\t\t\t\t\tqueryset = tailan.us_tugeeh.us_tugeeh_bairs.all()\n\t\t\t\t\t[row_write, col_write] = UsTugeehBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны машин')\n\t\t\tqueryset = WaterCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = WaterCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.water_car:\n\t\t\t\t\tqueryset = tailan.water_car.water_cars.all()\n\t\t\t\t\t[row_write, col_write] = WaterCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны машин')\n\t\t\tqueryset = BohirCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = BohirCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_car:\n\t\t\t\t\tqueryset = tailan.bohir_car.bohir_cars.all()\n\t\t\t\t\t[row_write, col_write] = BohirCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ажилчдын судалгаа')\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ajiltan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.ajiltans:\n\t\t\t\t\tqueryset = tailan.ajiltans.ajiltans.all()\n\t\t\t\t\t[row_write, col_write] = Ajiltan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\t\n\t\telse:\n\t\t\tworksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')", "def export_outstanding_fires(request, region_id, queryset):\n #regions = Region.objects.filter(id=region_id) if region_id else Region.objects.all()\n regions = Region.objects.filter(id=region_id) if region_id else Region.objects.filter(dbca=True)\n region_name = regions[0].name if region_id else 'All-Regions'\n\n rpt_date = datetime.now()\n filename = 'outstanding_fires_{}_{}.xls'.format(region_name, rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n for region in regions:\n outstanding_fires(book, region, queryset, rpt_date)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_to_csv(self, request, queryset):\n fields = self.get_table_fields()\n field_names = [field.name for field in fields]\n field_verbose_names = [field.verbose_name.encode(\n 'utf-8'\n ) for field in fields]\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; \\\nfilename=%s.csv' % unicode(self.model._meta).replace('.', '_')\n\n writer = csv.writer(response)\n writer.writerow(field_verbose_names)\n for obj in queryset:\n writer.writerow([unicode(getattr(obj, field)).encode(\n \"utf-8\",\n \"replace\"\n ) for field in field_names])\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response", "def export_repayment_csv(request):\n import csv\n from django.utils.encoding import smart_str\n # response = HttpResponse(content_type='text/csv')\n # response['Content-Disposition'] = 'attachment; filename=Repayment_report.csv'\n from_date = request.GET.get('from_date')\n to_date = request.GET.get('to_date')\n search = request.GET.get('search_value') or ''\n search_query = Q()\n if search:\n search_query = Q(user__user__username__icontains=search) | \\\n Q(user__user__first_name__icontains=search) | \\\n Q(project__title__icontains=search) | \\\n Q(amount__icontains=search) | \\\n Q(user__user__last_name__icontains=search) | \\\n Q(user__user__email__icontains=search)\n if from_date and to_date:\n import datetime\n import pytz\n date1 = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()\n date2 = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00,\n created_at__range=[\n datetime.datetime(date1.year, date1.month, date1.day, 8, 15,\n 12, 0, pytz.UTC),\n datetime.datetime(date2.year, date2.month, date2.day, 8, 15,\n 12, 0, pytz.UTC)]).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").filter(search_query).iterator()\n else:\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00).filter(search_query).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").iterator()\n # writer = csv.writer(response, csv.excel)\n # response.write(u'\\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)\n\n def stream():\n buffer_ = StringIO()\n writer = csv.writer(buffer_)\n writer.writerow([\n smart_str(u\"FIRST NAME\"),\n smart_str(u\"LAST NAME\"),\n smart_str(u\"USERNAME\"),\n smart_str(u\"EMAIL\"),\n smart_str(u\"DATE\"),\n smart_str(u\"NAME OF PROJECT\"),\n smart_str(u\"DONATION AMOUNT\"),\n smart_str(u\"REPAYMENT AMOUNT\"),\n\n ])\n\n for payment in repayments:\n writer.writerow([\n smart_str(payment.user.user.first_name),\n smart_str(payment.user.user.last_name),\n smart_str(payment.user.user.username),\n smart_str(payment.user.user.email),\n smart_str(payment.created_at),\n smart_str(payment.project.title),\n smart_str(round(\n Payment.objects.filter(user=payment.user).filter(project=payment.project).aggregate(Sum('amount'))[\n 'amount__sum'] or 0, 2)),\n smart_str(round(payment.amount, 2)),\n ])\n buffer_.seek(0)\n data = buffer_.read()\n buffer_.seek(0)\n buffer_.truncate()\n yield data\n\n # Create the streaming response object with the appropriate CSV header.\n response = StreamingHttpResponse(stream(), content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"Repayment_report.csv\"'\n return response", "def get_export_data(self, file_format, queryset, *args, **kwargs):\n request = kwargs.pop(\"request\")\n resource_class = self.get_export_resource_class()\n data = resource_class(**self.get_export_resource_kwargs(request)).export(queryset, *args, **kwargs)\n export_data = file_format.export_data(data)\n return export_data", "def export_any_dataset(request, *fields, queryset, filename, csv_field_delimiter = \";\"):\n\n name, extension = os.path.splitext(filename)\n file_format = extension[1:]\n headers, rows = render_queryset_as_data(*fields, queryset=queryset)\n\n output = None\n if file_format == 'csv':\n content_type = 'text/csv'\n output = io.StringIO()\n writer = csv.writer(output, delimiter=csv_field_delimiter, quoting=csv.QUOTE_MINIMAL)\n\n writer.writerow(headers)\n for row in rows:\n writer.writerow(row)\n\n elif file_format == 'xlsx':\n content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n #content_type = 'application/vnd.ms-excel'\n output = io.BytesIO()\n with open_xlsx_file(output) as writer:\n\n writer.write_headers_from_strings(headers)\n for row in rows:\n writer.writerow(row)\n writer.apply_autofit()\n\n assert writer.is_closed()\n else:\n raise Exception('Wrong export file format \"%s\"' % file_format)\n\n # send \"output\" object to stream with mimetype and filename\n assert output is not None\n output.seek(0)\n # response = HttpResponse(\n # output.read(),\n response = StreamingHttpResponse(\n output,\n content_type=content_type,\n )\n #response['Content-Disposition'] = 'inline; filename=\"%s\"' % filename\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n return response", "def export_as_csv(modeladmin, request, queryset):\n if not request.user.is_staff:\n raise PermissionDenied\n opts = modeladmin.model._meta\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')\n writer = csv.writer(response, delimiter=';')\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n values = []\n for field in field_names:\n value = (getattr(obj, field))\n if callable(value):\n try:\n value = value() or ''\n except:\n value = 'Error retrieving value'\n if value is None:\n value = ''\n values.append(unicode(value).encode('utf-8'))\n writer.writerow(values)\n #writer.writerow([getattr(obj, field) for field in field_names])\n return response", "def dump_to_file_format(queryset, file_format, data_zip):\n # create a temporary output stream (temp file)\n if file_format == 'xls':\n # it seems that an excel file needs to be written on a BytesIo even if on the xlwt they write exactly\n # the opposite (I was about to become fool)\n output = io.BytesIO()\n else:\n output = io.StringIO()\n\n # get queryset model\n model = queryset.model\n # the export code depends on the file format\n if file_format == 'csv':\n # create an instance of csv writer that writes on the stream 'output' opened above\n csv_writer = csv.writer(output, dialect='excel', delimiter=';')\n\n # there are some things that may be different from a model to another\n\n # for example, I also want to write in the project csv the username of the members\n if model == Projet:\n csv_writer.writerow(['ID', 'NAME', 'MEMBERS'])\n for project in queryset:\n # build a comma separated list with all the users and the tasks that are in the project\n members = ', '.join([member.username for member in project.members.all()])\n csv_writer.writerow([project.id, project.name, members])\n # if the model is User, only export non confidential fields\n if model == User:\n csv_writer.writerow(['USERNAME', 'NAME', 'SURNAME', 'E-MAIL'])\n for user in queryset:\n csv_writer.writerow([user.username, user.first_name, user.last_name, user.email])\n # for the other models that's what is going to happen\n else:\n # get all the field names and write them as headers\n field_names = [field.name for field in model._meta.fields]\n csv_writer.writerow(field.upper() for field in field_names)\n # for each instance in the queryset\n for obj in queryset:\n # \"\"\"general backup code\"\"\"\n # csv_writer.writerow([getattr(obj, field) for field in field_names])\n\n row = [] # create an empty row list\n # for each field of the model\n for field in field_names:\n # get the field value\n field_value = getattr(obj, field)\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n row.append(field_value) # append the field value to the end of the row list\n\n csv_writer.writerow(row)\n\n # the .json and .xml formats are generated with the django serializers utilities\n elif file_format == 'json' or file_format == 'xml':\n # if the model is User, only export non confidential fields\n if model == User:\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True,\n fields=('username', 'first_name', 'last_name', 'email'))\n else:\n # use_natural_foreign_keys=True means that the foreign keys won't be written as just numbers\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True)\n\n output.write(json_xml)\n\n elif file_format == 'xls':\n wb = xlwt.Workbook(encoding='utf-8') # create excel workbook\n ws = wb.add_sheet(model._meta.model.__name__) # create sheet\n\n # Sheet header, first row\n row_num = 0\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n '''This code is pretty similar to the code to export in .csv, but in excel each cell (row and column) \n must written separately'''\n # get all the field names and write them as headers\n # if User only confidential data\n if model == User:\n field_names = ['username', 'first_name', 'last_name', 'email']\n else:\n field_names = [field.name for field in model._meta.fields]\n for col_num in range(len(field_names)):\n ws.write(row_num, col_num, field_names[col_num].upper(), font_style)\n\n # add a column for the members of the project\n # (otherwise it won't be done automatically because it's ManytoMany)\n if model == Projet:\n ws.write(row_num, col_num + 1, 'MEMBERS', font_style)\n\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n\n # for each instance in the queryset\n for obj in queryset:\n row_num += 1\n # for each field of the model\n for col_num in range(len(field_names)):\n # get the field value\n field_value = getattr(obj, field_names[col_num])\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n ws.write(row_num, col_num, field_value.__str__(), font_style)\n\n # add the column with the members of the project\n if model == Projet:\n members = ', '.join([member.username for member in obj.members.all()])\n ws.write(row_num, col_num + 1, members, font_style)\n\n # save the excel file on the output stream\n wb.save(output)\n\n # generates the name of the output file depending on the model and the file format\n file_name = model._meta.model.__name__.lower() + '_data.' + file_format\n # add the file to the zip archive and close the output stream\n data_zip.writestr(file_name, output.getvalue())\n output.close()", "def uploader_actividad(df,to_model):\n\tengine = create_engine(\"mssql+pyodbc://sa:[email protected]:1433/vpcanales?driver=SQL+Server+Native+Client+11.0\")\n\n\tfecha = df.loc[0,'Fecha']\n\tprint(fecha.month)\n\tprint(fecha.year)\n\n\tif to_model.__name__==\"Activacion\":\n\n\t\tActivacion.objects.filter(fecha_actividad__month=fecha.month ,\n\t\t\tfecha_actividad__year=fecha.year).delete()\n\n\t\tfor row in df.itertuples():\n\t\t\tconnection = engine.raw_connection()\n\t\t\tcursor=connection.cursor()\n\t\t\t#Se ejecuta el SP por cada registro del dataframe\n\t\t\tstring=\"\"\"exec sp_insert_into_activacion\n\t\t\t@fecha_actividad='{0}',\n\t\t\t@plataforma='{1}',\n\t\t\t@tecnologia='{2}',\n\t\t\t@terminal='{3}',\n\t\t\t@cantidad='{4}',\n\t\t\t@codigo_plan='{5}',\n\t\t\t@mes={6},\n\t\t\t@ano={7},\n\t\t\t@codigo_agente='{8}'\n\t\t\t \"\"\".format(row[2],\n\t\t\trow[5],\n\t\t\trow[6],\n\t\t\trow[7],\n\t\t\trow[-2],\n\t\t\trow[4],\n\t\t\trow[2].month,\n\t\t\trow[2].year,\n\t\t\trow[3])\n\t\t\tcursor.execute(string).commit()\n\n\t\tresults = Activacion.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(suma = Sum('cantidad'))\n\n\n\t\tresults.update(Activacion.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(count = Count('cantidad')))\n\n\t\tprint(\"*************************\",results)\n\n\t\treturn results\n\n\n\n\telse:\n\n\t\tAlta.objects.filter(fecha_actividad__month=fecha.month ,\n\t\t\tfecha_actividad__year=fecha.year).delete()\n\n\t\tfor row in df.itertuples():\n\t\t\tconnection = engine.raw_connection()\n\t\t\tcursor=connection.cursor()\n\t\t\t#Se ejecuta el SP por cada registro del dataframe\n\t\t\tstring=\"\"\"exec sp_insert_into_alta\n\t @fecha_actividad='{0}',\n\t @plataforma='{1}',\n\t @tecnologia='{2}',\n\t @terminal='{3}',\n\t @cantidad='{4}',\n\t @codigo_plan='{5}',\n\t @mes={6},\n\t @ano={7},\n\t @codigo_agente='{8}' \"\"\".format(row[2],\n\t row[5],\n\t row[6],\n\t row[7],\n\t row[-2],\n\t row[4],\n\t row[2].month,\n\t row[2].year,\n\t row[3])\n\t\t\tcursor.execute(string).commit()\n\n\n\t\tresults = Alta.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(suma = Sum('cantidad'))\n\n\n\t\tresults.update(Alta.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(count = Count('cantidad')))\n\n\t\tprint(\"*************************\",results)\n\n\n\n\t\treturn results", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'quarterly_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def download_queryset(self, queryset, export_format):\n dataset = StockItemResource().export(queryset=queryset)\n\n filedata = dataset.export(export_format)\n\n filename = 'InvenTree_StockItems_{date}.{fmt}'.format(\n date=datetime.now().strftime(\"%d-%b-%Y\"),\n fmt=export_format\n )\n\n return DownloadFile(filedata, filename)", "def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True", "def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscripcions-tallers-%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n regtaller_list = TallerRegistration.objects.all()\n\n table = ExportTallerRegistrationTable(regtaller_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n if isinstance(value, basestring):\n row.append(value.encode('utf8'))\n else:\n row.append(value)\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response", "def export_xlsx(request):\n import openpyxl\n try:\n from openpyxl.cell import get_column_letter\n except ImportError:\n from openpyxl.utils import get_column_letter\n\n from_date = request.GET.get('from_date')\n to_date = request.GET.get('to_date')\n search = request.GET.get('search_value') or ''\n search_query = Q()\n if search:\n search_query = Q(user__user__username__icontains=search) | \\\n Q(user__user__first_name__icontains=search) | \\\n Q(project__title__icontains=search) | \\\n Q(amount__icontains=search) | \\\n Q(user__user__last_name__icontains=search) | \\\n Q(user__user__email__icontains=search)\n if from_date and to_date:\n import datetime\n import pytz\n date1 = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()\n date2 = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()\n payments = Payment.objects.filter(\n created_at__range=[datetime.datetime(date1.year, date1.month, date1.day, 8, 15, 12, 0, pytz.UTC),\n datetime.datetime(date2.year, date2.month, date2.day, 8, 15, 12, 0,\n pytz.UTC)]).order_by('-created_at').filter(search_query)\\\n .select_related(\"user\", \"project\", \"admin_reinvestment\", \"user_reinvestment\", \"tip\", \"user__user\").iterator()\n else:\n payments = Payment.objects.filter(search_query).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"admin_reinvestment\", \"user_reinvestment\", \"tip\", \"user__user\")\\\n .iterator()\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=RE-volv.xlsx'\n wb = openpyxl.Workbook()\n ws = wb.get_active_sheet()\n ws.title = \"RE-volv\"\n\n row_num = 0\n\n columns = [\n (u\"FIRST NAME\", 30),\n (u\"LAST NAME\", 30),\n (u\"USERNAME\", 30),\n (u\"EMAIL\", 30),\n (u\"DATE\", 30),\n (u\"NAME OF PROJECT\", 30),\n (u\"DONATION TO SOLAR SEED FUND\", 30),\n (u\"REINVESTMENT IN SOLAR SEED FUND\", 20),\n (u\"ADMIN REINVESTMENT IN SOLAR SEED FUND\", 20),\n (u\"DONATION TO OPERATION\", 20),\n (u\"TOTAL DONATIONS\", 20),\n ]\n\n for col_num in xrange(len(columns)):\n c = ws.cell(row=row_num + 1, column=col_num + 1)\n c.value = columns[col_num][0]\n ws.column_dimensions[get_column_letter(col_num + 1)].width = columns[col_num][1]\n\n for payment in payments:\n if payment.admin_reinvestment:\n admin_reinvestment = round(payment.amount, 2)\n else:\n admin_reinvestment = 0\n\n if payment.user_reinvestment:\n user_reinvestment = round(payment.user_reinvestment.amount, 2)\n else:\n user_reinvestment = 0\n\n if payment.admin_reinvestment or payment.user_reinvestment:\n donation_amount = 0\n else:\n donation_amount = payment.amount\n\n if payment.tip:\n tip = round(payment.tip.amount, 2)\n else:\n tip = 0\n\n if payment.tip and payment.amount:\n total = round(payment.tip.amount + payment.amount, 2)\n if payment.tip and not payment.amount:\n total = round(payment.tip.amount, 2)\n if payment.amount and not payment.tip:\n total = round(payment.amount, 2)\n if not payment.amount and not payment.tip:\n total = 0\n if AnonymousUserDonation.objects.filter(payment_id=payment.id):\n email = AnonymousUserDonation.objects.get(payment_id=payment.id).email\n else:\n email = payment.user.user.email\n\n row_num += 1\n row = [\n payment.user.user.first_name,\n payment.user.user.last_name,\n payment.user.user.username,\n email,\n payment.created_at,\n payment.project.title,\n donation_amount,\n user_reinvestment,\n admin_reinvestment,\n tip,\n total,\n ]\n for col_num in xrange(len(row)):\n c = ws.cell(row=row_num + 1, column=col_num + 1)\n c.value = row[col_num]\n\n wb.save(response)\n payments.close()\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_10yr_average_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_data(self):\r\n if len(app.entry6.get()) != 0:\r\n\r\n if app.var.get() == 'xls':\r\n\r\n wb = Workbook()\r\n sheet = wb.add_sheet('Sheet1')\r\n self.columns = ['id', 'Name', 'Section', 'Dept.', 'Gpa', 'MP1', 'MP2', 'MP3', 'MT', 'FINAL']\r\n style = xlwt.easyxf('font: bold 1')\r\n for col in range(10):\r\n sheet.write(0, col, self.columns[col], style)\r\n index=0\r\n for row in range(1,162):\r\n sheet.write(row, 1, open_data.sort_list[index])\r\n index += 1\r\n index1 = -1\r\n for row in range(1,162):\r\n index1 += 1\r\n index2=0\r\n for col in range(10):\r\n if col == 1 or index2 == 1:\r\n index2 += 1\r\n continue\r\n if index2 == 0:\r\n sheet.write(row, col, int(open_data.student[open_data.sort_list[index1]][index2]))\r\n index2 += 1\r\n continue\r\n sheet.write(row, col, open_data.student[open_data.sort_list[index1]][index2])\r\n index2 += 1\r\n file_name=app.entry6.get()\r\n if '.xls' not in file_name:\r\n wb.save(file_name+'.xls')\r\n else:\r\n wb.save(file_name)\r\n\r\n elif app.var.get() == 'txt':\r\n\r\n file_name = app.entry6.get()\r\n if '.txt' not in file_name:\r\n file_name = file_name + '.txt'\r\n file = open(file_name, 'w')\r\n index2 = 0\r\n for key in open_data.student:\r\n for index in range(10):\r\n if index == 0:\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n continue\r\n if index == 1:\r\n try:\r\n self.split_names = open_data.sort_list[index2].split(' ')\r\n file.write(self.split_names[0])\r\n file.write(', ')\r\n file.write(self.split_names[1])\r\n file.write(', ')\r\n index2 += 1\r\n except UnicodeEncodeError:\r\n index2 += 1\r\n pass\r\n continue\r\n if index >= 5 and index <= 9:\r\n if open_data.student[key][index] != '':\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n else:\r\n file.write('\\n')\r\n break\r\n if index == 9:\r\n file.write('\\n')\r\n continue\r\n try:\r\n file.write(str(open_data.student[key][index]))\r\n file.write(', ')\r\n except UnicodeEncodeError:\r\n pass\r\n file.close()\r\n\r\n\r\n\r\n elif app.var.get() == 'csv':\r\n app.info.configure(text=\"INFO: Type not Supported\")\r\n # The program does not support saving in 'csv' type. If the user selects 'csv' file type, 'Info' Label\r\n # shows the message: 'INFO: Type not Supported'.\r\n\r\n else:\r\n app.info.configure(text='INFO: Type not chosen!')\r\n # Also, If the user presses on 'Export Data' button, with a file name provided, but without choosing a\r\n # file type, 'Info' Label shows the message: 'INFO: Type not chosen'.\r\n\r\n else:\r\n app.info.configure(text=\"INFO: Please provide the name of the file.\")\r\n # Also, if the user presses 'Export Data' button without giving a file name, 'Info' Label shows the message:\r\n # 'INFO: Please provide the name of the file.'\r", "def action_date_ret(self):\n for wh in self.browse():\n if not wh.date_ret:\n self.write([wh.id],\n {'date_ret': time.strftime('%Y-%m-%d')})\n return True", "def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)\n data = {\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'guest_id': self.guest_id.id,\n 'model_id': self.id,\n 'check_out': self.check_out,\n 'date_today': fields.Datetime.now()\n }\n\n print(\"XLSX Wizard data : \", data)\n\n return {\n 'type': 'ir.actions.report',\n 'data': {\n 'model': 'accommodation.reporting',\n 'options': json.dumps(data, default=date_utils.json_default),\n 'output_format': 'xlsx',\n 'report_name': 'Accommodation Report'\n },\n 'report_type': 'xlsx'\n }", "def make_csv(user_id, fobj):\n data = show_history(user_id)\n report = csv.writer(fobj)\n report.writerow([\n 'Status',\n 'Date',\n 'Amount',\n 'From Curr',\n 'To Curr',\n 'To Address',\n ])\n for row in data:\n report.writerow([\n row.exchange_status.capitalize(),\n row.created_at.strftime('%Y-%m-%d %H:%I:%M'),\n row.amount,\n row.from_curr,\n row.to_curr,\n row.address_out\n ])", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def download_data(request, file_format, exp_p=False, exp_m=False, exp_t=False, exp_j=False, exp_s=False,\n querysets=None):\n\n # set the response so that the browser will understand that the user is receiving a zip file to download\n response = HttpResponse(content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=\"data.zip\"'\n\n # create the zip archive by using the python library ZipFile\n data_zip = ZipFile(response, 'w')\n\n file_format = file_format.lower() # it may be helpful\n\n \"\"\" ONLY the data that refers to the projects of which the AUTHENTICATED USER is MEMBER will be exported\"\"\"\n user = request.user\n # models queryset to be used to generate to export the database\n projects_queryset = user.projets.all() # only projects that the user has access to\n projects_members_queryset = User.objects.filter(\n projets__in=projects_queryset).distinct() # infos about project members\n tasks_queryset = Task.objects.filter(projet__in=projects_queryset) # all the tasks in these projects\n journals_queryset = Journal.objects.filter(task__in=tasks_queryset) # all the journals in these tasks\n status_queryset = Status.objects.all()\n\n def dump_to_file_format(queryset, file_format, data_zip):\n \"\"\" Subfunction used not to repeat the same code for the export process\n\n :param queryset: a generic queryset of a model\n :param file_format:\n :param data_zip: a zip archive\n\n \"\"\"\n # create a temporary output stream (temp file)\n if file_format == 'xls':\n # it seems that an excel file needs to be written on a BytesIo even if on the xlwt they write exactly\n # the opposite (I was about to become fool)\n output = io.BytesIO()\n else:\n output = io.StringIO()\n\n # get queryset model\n model = queryset.model\n # the export code depends on the file format\n if file_format == 'csv':\n # create an instance of csv writer that writes on the stream 'output' opened above\n csv_writer = csv.writer(output, dialect='excel', delimiter=';')\n\n # there are some things that may be different from a model to another\n\n # for example, I also want to write in the project csv the username of the members\n if model == Projet:\n csv_writer.writerow(['ID', 'NAME', 'MEMBERS'])\n for project in queryset:\n # build a comma separated list with all the users and the tasks that are in the project\n members = ', '.join([member.username for member in project.members.all()])\n csv_writer.writerow([project.id, project.name, members])\n # if the model is User, only export non confidential fields\n if model == User:\n csv_writer.writerow(['USERNAME', 'NAME', 'SURNAME', 'E-MAIL'])\n for user in queryset:\n csv_writer.writerow([user.username, user.first_name, user.last_name, user.email])\n # for the other models that's what is going to happen\n else:\n # get all the field names and write them as headers\n field_names = [field.name for field in model._meta.fields]\n csv_writer.writerow(field.upper() for field in field_names)\n # for each instance in the queryset\n for obj in queryset:\n # \"\"\"general backup code\"\"\"\n # csv_writer.writerow([getattr(obj, field) for field in field_names])\n\n row = [] # create an empty row list\n # for each field of the model\n for field in field_names:\n # get the field value\n field_value = getattr(obj, field)\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n row.append(field_value) # append the field value to the end of the row list\n\n csv_writer.writerow(row)\n\n # the .json and .xml formats are generated with the django serializers utilities\n elif file_format == 'json' or file_format == 'xml':\n # if the model is User, only export non confidential fields\n if model == User:\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True,\n fields=('username', 'first_name', 'last_name', 'email'))\n else:\n # use_natural_foreign_keys=True means that the foreign keys won't be written as just numbers\n json_xml = serializers.serialize(file_format, queryset, use_natural_foreign_keys=True)\n\n output.write(json_xml)\n\n elif file_format == 'xls':\n wb = xlwt.Workbook(encoding='utf-8') # create excel workbook\n ws = wb.add_sheet(model._meta.model.__name__) # create sheet\n\n # Sheet header, first row\n row_num = 0\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n '''This code is pretty similar to the code to export in .csv, but in excel each cell (row and column) \n must written separately'''\n # get all the field names and write them as headers\n # if User only confidential data\n if model == User:\n field_names = ['username', 'first_name', 'last_name', 'email']\n else:\n field_names = [field.name for field in model._meta.fields]\n for col_num in range(len(field_names)):\n ws.write(row_num, col_num, field_names[col_num].upper(), font_style)\n\n # add a column for the members of the project\n # (otherwise it won't be done automatically because it's ManytoMany)\n if model == Projet:\n ws.write(row_num, col_num + 1, 'MEMBERS', font_style)\n\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n\n # for each instance in the queryset\n for obj in queryset:\n row_num += 1\n # for each field of the model\n for col_num in range(len(field_names)):\n # get the field value\n field_value = getattr(obj, field_names[col_num])\n # this is to control the format of the date that will be written in the csv\n if isinstance(field_value, datetime.datetime):\n field_value = field_value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n ws.write(row_num, col_num, field_value.__str__(), font_style)\n\n # add the column with the members of the project\n if model == Projet:\n members = ', '.join([member.username for member in obj.members.all()])\n ws.write(row_num, col_num + 1, members, font_style)\n\n # save the excel file on the output stream\n wb.save(output)\n\n # generates the name of the output file depending on the model and the file format\n file_name = model._meta.model.__name__.lower() + '_data.' + file_format\n # add the file to the zip archive and close the output stream\n data_zip.writestr(file_name, output.getvalue())\n output.close()\n\n '''\n uses the function defined above the export the data\n '''\n if exp_p:\n dump_to_file_format(projects_queryset, file_format, data_zip)\n if exp_m:\n dump_to_file_format(projects_members_queryset, file_format, data_zip)\n if exp_t:\n dump_to_file_format(tasks_queryset, file_format, data_zip)\n if exp_j:\n dump_to_file_format(journals_queryset, file_format, data_zip)\n if exp_s:\n dump_to_file_format(status_queryset, file_format, data_zip)\n\n # it is also possible to pass whatever list of querysets to this function\n if not querysets is None:\n for queryset in querysets:\n dump_to_file_format(queryset, file_format, data_zip)\n\n # closes the zip file\n data_zip.close()\n\n # finally send the zip file as a the HTTP response\n return response", "def export_as_csv(modeladmin, request, queryset):\n if not request.user.is_staff:\n raise PermissionDenied\n opts = modeladmin.model._meta\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response", "def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscrits%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n person_list = Person.objects.all()\n\n table = ExportPersonTable(person_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n row.append(value.encode('utf8'))\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_tenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response" ]
[ "0.679835", "0.6526606", "0.6513021", "0.63559425", "0.6347008", "0.6267613", "0.61500996", "0.604096", "0.59455335", "0.5921458", "0.58053875", "0.5804869", "0.57998806", "0.5739592", "0.57366", "0.5705012", "0.5652193", "0.5652061", "0.564152", "0.56380385", "0.5635472", "0.5631188", "0.56273067", "0.56179833", "0.5613689", "0.56049776", "0.55923617", "0.5577416", "0.5553695", "0.5525446" ]
0.75960785
0
Durations are 'dict string keys'. The keys need to be converted to floats. The keys need to be ordered and the scenes returned with calculated durations
def parse_scene_order(self, data, timesigniture): if not data: return () num_scenes = len(data) def attempt_parse_key_timecode(value): if not value: return value try: return float(value) except (ValueError, TypeError): pass try: return timecode_to_beat(value, timesigniture) except (AssertionError, ValueError, AttributeError): pass return value # Surface the original key value in the dict (useful for debugging) for key, value in data.items(): if value: value['key'] = key data_float_indexed = {attempt_parse_key_timecode(k): v for k, v in data.items()} assert len(data_float_indexed) == num_scenes sorted_keys = sorted(data_float_indexed.keys()) assert len(sorted_keys) == num_scenes def normalise_duration(index): """ Convert any time code or alias to a linear float value. e.g. '1.2' parses to -> 1.5 'match_next' resolves to -> 4.0 """ key = sorted_keys[index] item = data_float_indexed[key] if not item: item = {'duration': 'auto'} data_float_indexed[key] = item duration = attempt_parse_key_timecode(item.get('duration')) if duration == 'match_next': duration = normalise_duration(index+1) if duration == 'match_prev': duration = normalise_duration(index-1) if isinstance(duration, str) and duration.startswith('match '): duration = normalise_duration(sorted_keys.index(float(duration.strip('match ')))) if (not duration or duration == 'auto') and index < len(sorted_keys)-1: duration = sorted_keys[index+1] - key if not isinstance(duration, float): #log.info('Unparsed duration: {0}'.format(duration)) duration = self.DEFAULT_DURATION if duration != item.get('duration'): item['duration'] = duration return duration for index in range(len(sorted_keys)): normalise_duration(index) scene_items = [] for key in sorted_keys: scene_item = data_float_indexed[key] assert scene_item and scene_item.get('duration') >= 0, "All scene must have durations. Something has failed in parsing. {0}:{1}".format(key, scene_item) scene_items.append(scene_item) return scene_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalise_duration(index):\n key = sorted_keys[index]\n item = data_float_indexed[key]\n if not item:\n item = {'duration': 'auto'}\n data_float_indexed[key] = item\n duration = attempt_parse_key_timecode(item.get('duration'))\n if duration == 'match_next':\n duration = normalise_duration(index+1)\n if duration == 'match_prev':\n duration = normalise_duration(index-1)\n if isinstance(duration, str) and duration.startswith('match '):\n duration = normalise_duration(sorted_keys.index(float(duration.strip('match '))))\n if (not duration or duration == 'auto') and index < len(sorted_keys)-1:\n duration = sorted_keys[index+1] - key\n if not isinstance(duration, float):\n #log.info('Unparsed duration: {0}'.format(duration))\n duration = self.DEFAULT_DURATION\n if duration != item.get('duration'):\n item['duration'] = duration\n return duration", "def test_durations_per_type(self):\n sim = ss.Simulation()\n assert type(sim.durations_per_type()) == dict", "def _get_dur(inst):\n for fil, sig in inst['localization'].items():\n ke = sorted([int(i) for i in sig.keys()], key=int)\n if (len(ke) != 2):\n log(0, \"Error: Instance has two ranges\\n%s\" % (str(inst)))\n exit(1)\n dur = ke[1] - ke[0]\n assert dur > 0, \"Duration <= 0\"\n return(dur)", "def stats():\r\n times_lst = []\r\n time_dict = {}\r\n for album, details in dbase().items():\r\n time_m = 0\r\n time_s = 0\r\n for songs, details_s in details[0].items():\r\n time = details_s[1].split(\":\")\r\n min = int(time[0])\r\n sec = int(time[1])\r\n time_m += min\r\n time_s += sec\r\n time_s = datetime.timedelta(seconds=time_s)\r\n time_m = datetime.timedelta(seconds=time_m)\r\n time = time_m + time_s\r\n time = str(time)\r\n times_lst.append(time)\r\n time_dict[album] = time\r\n\r\n time_dict = sorted(time_dict.items(), key=lambda x: x[1], reverse=True)\r\n return time_dict", "def create_event_dur_score(self):\n for inst in self.instruments:\n #[rest/midipitch, dur, vel]\n inst_score=[]\n running_clock = 0\n for n, note in enumerate(inst.notes):\n freq = mp_to_adjusted_freq(note[0], self.ratios)\n if type(freq) != int: freq = np.asscalar(freq)\n if type(note[0]) != int: inst.notes[n][0] = np.asscalar(note[0])\n if type(note[1]) != int: inst.notes[n][1] = np.asscalar(note[1])\n if type(note[2]) != int: inst.notes[n][2] = np.asscalar(note[2])\n # if type(note[3]) != int: inst.notes[n][3] = np.asscalar(note[3])\n if note[1] != running_clock:\n inst_score.append(['Rest()', note[1] - running_clock, 0])\n inst_score.append([freq, note[2], note[3]])\n running_clock = note[1] + note[2]\n inst.event_dur_score = inst_score", "def durations_per_type(self):\n pass", "def dictagdur2(kind, fname):\n\n #x = ['movie', 'moviecode', 'offset', 'well', 'agmin', 'agsec', 'agdur', \n #'agtype', 'agcomm', 'escmin', 'escsec', 'escdur', 'esctype', 'escbeh', \n #'esccomm', 'gen', 'date', 'assay', 'fps', 'flyid', 'side', 'moviepart']\n\n d = {}\n y = '1'\n b = []\n dur = []\n \n with open(fname) as f:\n for l in f:\n #print(l)\n adict = agline2(l)\n \n if adict['well'] != y:\n if len(dur) > 0:\n agdurcmd(kind, b, dur, d[gen])\n b = []\n dur = []\n \n if adict['agtype'] != '-' and adict['agtype'] != 'x' and \\\n adict['agdur'] != '':\n b.append(adict['agtype'])\n dur.append(adict['agdur'])\n \n if adict['esctype'] != '' and adict['escdur'] != '':\n b.append(adict['esctype'])\n dur.append(adict['escdur'])\n\n gen = adict['gen']\n #print(gen)\n if gen not in d:\n d[gen] = []\n \n y = adict['well']\n \n agdurcmd(kind, b, dur, d[gen])\n\n return(d)", "def sort_duration(self):\n self.sort('duration')", "def __init__(self, scene_items, timesigniture=DEFAULT_TIMESIGNITURE_):\n self.scene_items = scene_items\n self.total_beats = sum(scene_item['duration'] for scene_item in self.scene_items)\n self.timesigniture = timesigniture", "def get_state(self, duration):\n metrics = []\n\n if duration:\n for count_key in self.kv_counts:\n metrics.append(\n MetricObject(\n count_key,\n self.kv_counts[count_key] / duration\n )\n )\n\n for time_key in self.kv_times:\n values = self.kv_times[time_key]['values']\n unit = self.kv_times[time_key]['unit']\n\n metrics.append(\n MetricObject(\n '.'.join([time_key, 'mean']),\n stats_helper.find_mean(values),\n unit\n )\n )\n\n metrics.append(\n MetricObject(\n '.'.join([time_key, 'median']),\n stats_helper.find_median(values),\n unit\n )\n )\n\n for pct in self.percentiles:\n metrics.append(\n MetricObject(\n '.'.join([time_key, \"%sth_percentile\" % pct]),\n stats_helper.find_percentile(values, int(pct)),\n unit\n )\n )\n\n return metrics", "def _generate_case_durations(self):\n return pd.Series(self.df_cases[\"Median Duration\"].values, index=self.df_cases[\"CaseID\"]).to_dict()", "def _gather_durations(ret, minion_id):\n if isinstance(ret.data, dict) and isinstance(\n ret.data.get(minion_id, None), dict\n ):\n duration = 0\n for _, state_ret in ret.data[minion_id].items():\n try:\n duration += state_ret[\"duration\"]\n except KeyError:\n break\n else:\n return duration\n pytest.skip(\"Something went wrong with the states, skipping.\")", "def dictagdur(kind, fname):\n\n with open(fname, 'r') as g:\n g.next()\n g.next()\n m = g.next()\n startdict = agline(m)\n genold = startdict['gen']\n\n f = open(fname)\n f.next()\n f.next()\n d = {}\n y = '1'\n nb = []\n for l in f:\n adict = agline(l)\n kdur = kind + 'dur'\n gen = adict['gen']\n well = adict['well']\n\n if adict['gen'] not in d:\n d[gen] = []\n \n if gen != genold:\n d[genold].append(sum(nb))\n nb = []\n else: \n if adict['well'] != y:\n #print(sum(nb))\n d[gen].append(sum(nb))\n nb = []\n \n if adict[kdur] == '':\n nb.append(0)\n elif int(adict[kdur]) >= 0:\n nb.append(int(adict[kdur]))\n elif adict[ks] == '-':\n pass\n \n\n y = adict['well']\n genold = adict['gen']\n \n d[gen].append(sum(nb))\n\n return(d)", "def test_duration(self):\n for duration_, _, _ in self.test_cases:\n self.assertEqual(Rest(duration_).duration, duration_)", "def times(self):\n ret = {}\n for tag in self.TIMETAGLIST:\n if self.has_tag(tag):\n try:\n ret[tag] = safeInt(self.tag(tag))\n except TypeError:\n pass\n return ret", "def getTranslationKeyTimes(self, view) -> list[float]:\n ...", "def get_timestamps(filename, dictionary):\n \n with open(filename, 'r') as f_obj:\n text = f_obj.readlines()\n inferred_name = re.sub(r'[0-9_\\-]+', ' ', filename).split('/')[-1].split('.lab')[0].split('CD')[-1].strip().lower()\n end_stamp = float(text[-1].split()[1]) # relic of an old idea.\n for line in text:\n line = line.split() \n start = float(line[0])\n stop = float(line[1])\n musical_key = line[2]\n new_key = (inferred_name, start, stop)\n dictionary[new_key] = musical_key", "def get_event_start_idxs_durations(self):\n durations = []\n start_idxs = []\n prev = 0\n count = 0\n for idx, score in enumerate(self.summary):\n if score == 1 and prev == 0:\n count += 1\n start_idxs.append(idx)\n if score == 1 and prev == 1:\n count += 1\n elif score == 0 and prev == 1:\n durations.append(count)\n count = 0\n prev = score\n return dict(zip(start_idxs, durations))", "def get_dur(self):\n return [char.get_dur() for char in self.string]", "def process_notes_in_song(dict_time_notes, seq_len = 50):\n list_of_dict_keys_time = []\n \n for key in dict_time_notes:\n sample = dict_time_notes[key]\n times = np.unique(np.where(sample > 0)[1])\n index = np.where(sample > 0)\n dict_keys_time = {}\n\n for time in times:\n index_where = np.where(index[1] == time)\n notes = index[0][index_where]\n dict_keys_time[time] = notes\n list_of_dict_keys_time.append(dict_keys_time)\n return list_of_dict_keys_time", "def _generate_session_durations(self):\n return pd.Series(self.df_sessions[\"Duration\"].values, index=self.df_sessions[\"SessionID\"]).to_dict()", "def _sort_by_duration(self) -> None:\n total_samples = len(self.paths)\n if total_samples == 0:\n return\n samples = zip(self.paths, self.durations, self.transcriptions)\n sorted_samples = sorted(samples, key=lambda sample: sample[1])\n self.paths, self.durations, self.transcriptions = [\n list(c) for c in zip(*sorted_samples)\n ]\n assert (\n total_samples\n == len(self.paths)\n == len(self.durations)\n == len(self.transcriptions)\n ), \"_sort_by_duration len mis-match\"", "def testHrtDuration(self):\n attr = self.session.create_visit_attr()\n\n self.util.stringTypeTest(self, attr, \"duration\")\n\n self.util.stringPropertyTest(self, attr, \"duration\")", "def plotagdur(kind, d, iskeyfile='True', keyfile='keylist', type='b'):\n md = cl.dictmeans(d)\n\n if iskeyfile == 'True':\n keylist = cmn.load_keys(keyfile)\n else:\n keylist = sorted(d.keys())\n\n ylabel = 'Seconds'\n \n ftitle = 'Mean duration of behavior'\n\n if kind == 'escd':\n ftitle = 'Mean duration of dominant escalation'\n \n if kind == 'escm':\n ftitle = 'Mean duration of mutual escalation'\n\n fig1 = gpl.plotdata(d, md, keylist, type, ylabel=ylabel, ftitle=ftitle, \n titlesize='large', err='none', figw=10, figh=8)\n \n plt.ylim(0)", "def getDurations(self):\n return self.durations", "def duration(timedelta):\r\n duration = {}\r\n duration['day'] = timedelta.days\r\n minutes_temp, duration['second'] = divmod(timedelta.seconds, 60)\r\n duration['hour'], duration['minute'] = divmod(minutes_temp, 60)\r\n\r\n return duration", "def time_taken(json_cutlist, laser):\r\n\tcutlist = json.loads(json_cutlist)\r\n\ttime = 0\r\n\tcoordinate_array = [0, 0]\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\tcoordinate_array = [float(a[1]) - coordinate_array[0], float(a[2]) - coordinate_array[1]]\r\n\t\t\tmag = math.sqrt(coordinate_array[0]**2 + coordinate_array[1]**2)\r\n\t\t\tif a[0] == \"jump\":\r\n\t\t\t\ttime += mag/laser[\"jump_speed\"]\r\n\t\t\telse:\r\n\t\t\t\ttime += mag/laser[\"mark_speed\"]\r\n\t\t\tcoordinate_array = [float(a[1]), float(a[2])]\r\n\t\telif a[0] == \"z_abs\" or a[0] == \"z_rel\":\r\n\t\t\tzSet = float(a[1])\r\n\t\telif a[0] == \"c_abs\" or a[0] == \"c_rel\":\r\n\t\t\tcSet = float(a[1])\r\n\t\telif a[0] == \"a_abs\" or a[0] == \"a_rel\":\r\n\t\t\taSet = float(a[1])\r\n\t\telse:\r\n\t\t\tpass\r\n\treturn str(datetime.timedelta(seconds=int(time)))", "def getTransformKeyTimes(self, view) -> list[float]:\n ...", "def get_duration(self):\n duration = 0\n\n for entry in self.entries:\n duration += entry.get_duration()\n return duration", "def test_time_dicts():\n dmd = DMD()\n dmd.fit(X=sample_data_1, Y=sample_data_2)\n expected_dict = {\"dt\": 1, \"t0\": 0, \"tend\": 13}\n np.testing.assert_equal(dmd.original_time, expected_dict)\n np.testing.assert_equal(dmd.dmd_time, expected_dict)" ]
[ "0.6176038", "0.59368414", "0.5897897", "0.58419317", "0.5807034", "0.5672501", "0.56141585", "0.5577464", "0.555346", "0.5521533", "0.5519391", "0.55059147", "0.5491283", "0.54520303", "0.5369046", "0.5366904", "0.53086877", "0.52912253", "0.52870804", "0.5256063", "0.5221701", "0.5210851", "0.51985115", "0.51917607", "0.51570517", "0.5146656", "0.5144112", "0.5126075", "0.51170737", "0.511182" ]
0.6748647
0
Once the order of the items is known, we can iterate over the scenes calculating/prerendering the dmx state for each section This make seeking much faster
def pre_render_scene_item(self, current_scene_item, previous_scene_item): assert current_scene_item current_scene_dmx = current_scene_item.setdefault(Scene.SCENE_ITEM_DMX_STATE_KEY, {}) # Aquire a reference to the previous DMX state current_scene_dmx['previous'] = copy.copy(previous_scene_item.get(Scene.SCENE_ITEM_DMX_STATE_KEY, {})['target']) if previous_scene_item else AbstractDMXRenderer.new_dmx_array() # The target state is a copy of the previous state current_scene_dmx['target'] = copy.copy(current_scene_dmx['previous']) # Modify the starting/previous state based on any overrides in this scene (this is a shortcut feature as I kept requireing this) self.render_state_dict(current_scene_item.get('state_start'), current_scene_dmx['previous']) # Modify the target state based on this scene item self.render_state_dict(current_scene_item.get('state'), current_scene_dmx['target'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def loadData(self, actions):\n # begin to clear the scene\n self.scene.clear()\n self.scene.drawGrid()\n \n # and draw all items\n maxItemId = self.itemId\n for graphicalItem in actions:\n\n # extract item info\n itemType = int(graphicalItem['item-type'])\n itemId = graphicalItem['item-id']\n if sys.version_info > (3,): # py3 support\n graphicalItem['item-text'] = graphicalItem['item-text']\n else:\n graphicalItem['item-text'] = graphicalItem['item-text'].decode('utf8')\n itemText = graphicalItem['item-text']\n posX = float(graphicalItem['pos-x'])\n posY = float(graphicalItem['pos-y'])\n itemData = graphicalItem['item-data']\n\n\n # define the color of the item\n color = self.getItemColor(itemType=itemType)\n \n # add item in first\n self.addItem( itemType=itemType, itemId=itemId, itemText=itemText, \n itemColor=QBrush(color), itemPos=QPointF(posX,posY), itemData=itemData )\n \n # kept the max id\n if int(itemId) > maxItemId:\n maxItemId = int(itemId)\n \n self.itemId = maxItemId\n\n # endly draw all arrows\n for curItem in self.scene.items():\n for saveItem in actions:\n if not isinstance(curItem, DiagramItem):\n continue\n if curItem.itemId == int(saveItem['item-id']):\n if 'item-links' in saveItem:\n if isinstance(saveItem['item-links'], dict):\n saveItem['item-links'] = [saveItem['item-links']]\n for lnk in saveItem['item-links']:\n itemId = lnk['next-item-id']\n toHotspotId = lnk['to-hotspot-id']\n fromHotspotId = lnk['from-hotspot-id']\n \n endItem = self.findItem(id=itemId)\n if endItem is not None:\n self.trace( \"Arrow: %s -> %s\" % (fromHotspotId,toHotspotId) )\n arrow = Arrow(curItem, endItem, toHotspotId=toHotspotId, fromHotspotId=fromHotspotId)\n arrow.setColor(self.scene.myLineColor)\n curItem.addArrow(arrow)\n endItem.addArrow(arrow)\n arrow.setZValue(-1000.0)\n self.scene.addItem(arrow)\n arrow.updatePosition()", "def __getitem__(self, index):\n\n #get the image name \n image_names = self.image_names[index]\n\n #make single name a list\n if(type(image_names) is not list):\n image_names = [image_names]\n\n image_target_list = []\n for image_name in image_names:\n\n #build the path to the image and annotation file\n #see format tab on Get Data page on AVD dataset website\n if image_name[0] == '0':\n scene_type = 'Home'\n else:\n scene_type = 'Office'\n scene_name = scene_type + \"_\" + image_name[1:4] + \"_\" + image_name[4]\n \n #read the image and bounding boxes for this image\n #(doesn't get the movement pointers) \n img = (Image.open(os.path.join(self.root,scene_name, \n images_dir,image_name)))\n with open(os.path.join(self.root,scene_name,annotation_filename)) as f:\n annotations = json.load(f)\n target = annotations[image_name]['bounding_boxes'] \n \n #apply target transform\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n #crop images for classification if flag is set\n if self.classification:\n img = np.asarray(img)\n images = []\n ids = []\n for box in target:\n cur_img = Image.fromarray(img[box[1]:box[3],\n box[0]:box[2],\n :])\n if self.transform is not None:\n cur_img = self.transform(cur_img)\n images.append(cur_img)\n ids.append(box[4])\n\n img = images\n target = ids\n \n #apply image transform \n if self.transform is not None:\n img = self.transform(img)\n\n image_target_list.append([img,target])\n\n #special case for single image/label\n if(len(image_target_list) == 1):\n image_target_list = image_target_list[0]\n\n return image_target_list", "def calculate_scene(self):\n \n if self.is_game_going:\n for pl in self.player_list:\n pl.make_step()\n\n #obj_list = self.player_list + self.foot_list + self.border_list\n obj_list = self.player_list + self.grafik_item_list\n for pl in self.player_list:\n pl.check_for_intersection(obj_list)\n\n for pl in self.player_list:\n if pl.status_remove:\n print(\"x killll xxx\")\n self.stop_game()\n\n # filter removed/killed objecets\n for item in self.grafik_item_list:\n if item.status_remove:\n if item.type == \"food\":\n item.set_random_position()\n\n # end game if a player is removed", "def __handle_view_item(self, gamestate_component):", "def voxelize4(self, materials):\n\t\tlayers = list()\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\t\n\t\tlayerMaterial = list()\n\t\tself.volumeComposition = list()\n\t\tfor l in range(len(materials)):\n\t\t\tlayerMaterial.append(list())\n\t\t\tself.volumeComposition.append(list())\n\n\t\tvolumeGeneral = list()\n\t\tm = 0\n\t\tfor i in self.slicePoints:\n\t\t\t#print self.boolResult[m].shape\n\t\t\ttupleResultR = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleMaterial = list()\n\t\t\tfor l in range(len(materials)):\n\t\t\t\ttupleMaterial.append(numpy.zeros(self.boolLayers[m].shape, dtype=float))\n\t\t\t\n\t\t\tj = numpy.nditer(self.boolLayers[m], flags=['multi_index'], op_flags=['readwrite'])\n\t\t\twhile not j.finished:\n\t\t\t\tif j[0] == True:\n\t\t\t\t#tupleResult[j.multi_index] = round((i[direction] - minValue) * ratio)\n\t\t\t\t#tupleResult[j.multi_index] = 78\n\t\t\t\t\tprint type(j.multi_index)\n\t\t\t\t\tprint j.multi_index\n\t\t\t\t\t#tupleResult[j.multi_index] = planeWeight * math.fabs((j.multi_index[1] - planeOrigin[0]) * planeNormal[0] + (j.multi_index[0] - planeOrigin[1]) * planeNormal[1] + (i[2] - planeOrigin[2]) * planeNormal[2]) + pointWeight * math.sqrt(math.pow((j.multi_index[1]- pointValue[0]),2) + math.pow((j.multi_index[0] - pointValue[1]), 2)+math.pow((i[2] - pointValue[2]),2))\n\t\t\t\t\t\n\t\t\t\t\tdistanceList = []\n\t\t\t\t\ttotalDistance = 0.0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Plane\":\n\t\t\t\t\t\t\tGplane = math.fabs((j.multi_index[1] - materials[k].origin[0]) * materials[k].normal[0] + (j.multi_index[0] - materials[k].origin[1]) * materials[k].normal[1] + (i[2] - materials[k].origin[2]) * materials[k].normal[2])\n\t\t\t\t\t\t\tdistanceList.append(Gplane)\n\t\t\t\t\t\t\ttotalDistance += Gplane\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Point\":\n\t\t\t\t\t\t\tGpoint = (math.sqrt(math.pow((j.multi_index[1]- materials[k].point[0]),2) + math.pow((j.multi_index[0] - materials[k].point[1]), 2)+math.pow((i[2] - materials[k].point[2]),2)))\n\t\t\t\t\t\t\tdistanceList.append(Gpoint)\n\t\t\t\t\t\t\ttotalDistance += Gpoint\n\t\t\t\t\tfor k in range(len(distanceList)):\n\t\t\t\t\t\tdistanceList[k] = distanceList[k] / totalDistance\n\t\t\t\t\t\tdistanceList[k] = 1.0 - distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleResultR[j.multi_index] += materials[k].materialColor[0] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultG[j.multi_index] += materials[k].materialColor[1] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultB[j.multi_index] += materials[k].materialColor[2] * distanceList[k] * materials[k].weight\n\t\t\t\t\t#if(tupleResult[j.multi_index] > 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(tupleResult[j.multi_index]) \n\t\t\t\t\t#if(tupleResult[j.multi_index] == 0):\n\t\t\t\t\t#\t\ttupleResult[j.multi_index] = 1\n\t\t\t\t\t#if(tupleResult[j.multi_index] < 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(0 - tupleResult[j.multi_index]) \n\t\t\t\telse:\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = 0.0\n\t\t\t\tj.iternext()\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\t\tfor k in range(len(materials)):\n\t\t\t\tlayerMaterial[k].append(tupleMaterial[k])\n\t\t\t\t\n\t\t\tm = m + 1\n\t\tprint \"i got here\"\n\t\tvolumeR=numpy.array(layersR) # create the 3d volume\n\t\tvolumeG=numpy.array(layersG) \n\t\tvolumeB=numpy.array(layersB)\n\t\tfor k in range(len(materials)):\n\t\t\tself.volumeComposition[k] = numpy.array(layerMaterial[k])\n\t\t\n\t\tvolumeGeneral.append(volumeR)\n\t\tvolumeGeneral.append(volumeG)\n\t\tvolumeGeneral.append(volumeB)\n\t\treturn volumeGeneral", "def update_scenes(self) -> None:\n self.scenes.update(\n {\n f\"{group.id}_{scene.id}\": scene\n for group in self.groups.values() # type: ignore\n for scene in group.scenes.values()\n if f\"{group.id}_{scene.id}\" not in self.scenes\n }\n )", "def items(self):\n return _osgAnimation.mapVertexInfluence_items(self)", "def __getitem__(self, index):\n\n #get the image name and box\n #image_name,box_index = self.name_and_box_index[index]\n name_and_index = self.name_and_box_index[index]\n #name_and_index needs to be alist of lists\n if(len(name_and_index) >0 and type(name_and_index[0]) is not list): \n name_and_index = [name_and_index] \n \n image_target_list = []\n\n for image_name,box_index in name_and_index:\n #build the path to the image and annotation file\n #see format tab on Get Data page on AVD dataset website\n if image_name[0] == '0':\n scene_type = 'Home'\n else:\n scene_type = 'Office'\n scene_name = scene_type + \"_\" + image_name[1:4] + \"_\" + image_name[4]\n \n #read the image and bounding boxes for this image\n #(doesn't get the movement pointers) \n img = (Image.open(os.path.join(self.root,scene_name, \n images_dir,image_name)))\n with open(os.path.join(self.root,scene_name,annotation_filename)) as f:\n annotations = json.load(f)\n target = annotations[image_name]['bounding_boxes'] \n \n #apply target transform\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n #get the single box\n target = target[box_index]\n\n #crop images for classification if flag is set\n if self.classification:\n img = np.asarray(img)\n img = img[target[1]:target[3],target[0]:target[2],:]\n img = Image.fromarray(img)\n target = target[4] \n \n \n #apply image transform \n if self.transform is not None:\n img = self.transform(img)\n\n image_target_list.append([img,target])\n\n #special case for single image/label\n if(len(image_target_list) == 1):\n image_target_list = image_target_list[0]\n\n return image_target_list", "def items():", "async def async_get_scenes(work_dir=None):\n scenes: Dict[Group, Dict[str, Union[Dict[ResponderAddress, LinkInfo], str]]] = {}\n if work_dir:\n await async_load_scene_names(work_dir=work_dir)\n for addr in devices:\n device = devices[addr]\n if device == devices.modem:\n continue\n for rec in device.aldb.find(\n target=devices.modem.address, is_controller=False, in_use=True\n ):\n if rec.group == 0:\n continue\n if not scenes.get(rec.group):\n scenes[rec.group] = {}\n scenes[rec.group][\"name\"] = _scene_names.get(\n rec.group, f\"Insteon Scene {rec.group}\"\n )\n scenes[rec.group][\"group\"] = rec.group\n scenes[rec.group][\"devices\"] = {}\n scene = scenes.get(rec.group)\n if not scene[\"devices\"].get(device.address):\n scene[\"devices\"][device.address] = []\n has_controller = False\n for _ in devices.modem.aldb.find(\n target=device.address, group=rec.group, is_controller=True, in_use=True\n ):\n has_controller = True\n break\n scene[\"devices\"][device.address].append(\n LinkInfo(rec.data1, rec.data2, rec.data3, has_controller, True)\n )\n return scenes", "def load_items(self):\n # LIST OF THE ITEMS TO COLLECT TO WIN\n list_items = [self.aiguille, self.ether, self.tube]\n # CALLING OF THE METHODS define_position\n list_items[0].define_position_item_1()\n list_items[1].define_position_item_2()\n list_items[2].define_position_item_3()\n # LOOP FOR, FOREACH ITEM IN THE LIST, WE DRAW IT ON THE SCREEN\n for item in list_items:\n # CALLING OF THE METHOD display_items\n item.display_items(self.window)\n # IF MACGVER COLLECTS AN ITEM...\n if (self.macgyver.position_x == list_items\n [list_items.index(item)].obj_sprite_x) \\\n and (self.macgyver.position_y == list_items\n [list_items.\n index(item)].obj_sprite_y):\n # IT MAKES A SOUND\n pygame.mixer.music.load(ITEM_SOUND)\n pygame.mixer.music.play()\n # IT INCREMENTS MACGYVER'S BAG\n self.macgyver.bag += 1\n # IT MOVES THE OBJECT TO THE BAG\n list_items[list_items.index(item)].obj_sprite_x = \\\n TILESIZE*(5 + list_items.index(item))\n list_items[list_items.index(item)].obj_sprite_y = \\\n NBCASES*TILESIZE\n # IT HIDES THE QUESTIONS MARK\n self.noitem.fill(TRANSPARENT)", "def process_items():\n global HAS_WATCH\n global HAS_FIRST_AID_KIT\n global HAS_FLASHLIGHT\n global HAS_RAINCOAT\n global HAS_COMPASS\n global HAS_BEARTRAP\n\n if \"Watch\" in ITEMS:\n HAS_WATCH = True\n if \"First Aid Kit\" in ITEMS:\n HAS_FIRST_AID_KIT = True\n if \"Flashlight\" in ITEMS:\n HAS_FLASHLIGHT = True\n if \"Raincoat\" in ITEMS:\n HAS_RAINCOAT = True\n if \"Compass\" in ITEMS:\n HAS_COMPASS = True\n if \"Bear Trap\" in ITEMS:\n HAS_BEARTRAP = True\n\n # Stupid little hack to provide 'immediate updates/effect' of having the below items\n if HAS_WATCH:\n update_title_area(\" Day: %d Time: %d:00 \" % (DAY, TIME))\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS] = \"Y\"", "def mosaicLoaded(self):\n for item in self.item_store.itemIterator(item_type = imageItem.ImageItem):\n if (item.getZValue() > self.current_z):\n self.current_z = item.getZValue() + self.z_inc", "def __getitem__(self, index):\n A_path = self.A_paths[index % self.A_size] # make sure index is within then range\n #if self.opt.serial_batches: # make sure index is within then range\n \n\n A_img = Image.open(A_path).convert('L')\n \n A = self.transform_A(A_img)\n # B20 = self.transform_B(B20_img)\n #B2 = self.transform_B(B2_img)\n\n\n index_B50 = index % self.B50_size\n B50_path = self.B50_paths[index_B50]\n B50_img = Image.open(B50_path).convert('L')\n B50 = self.transform_B(B50_img)\n\n\n\n index_B100 = index % self.B100_size\n B100_path = self.B100_paths[index_B100]\n B100_img = Image.open(B100_path).convert('L')\n B100 = self.transform_B(B100_img)\n\n index_B150 = index % self.B150_size\n B150_path = self.B150_paths[index_B150]\n B150_img = Image.open(B150_path).convert('L')\n B150 = self.transform_B(B150_img)\n\n\n \n\n index_m0 = 0\n m0_path = self.m0_paths[index_m0]\n m0_img = Image.open(m0_path).convert('L')\n m0 = self.transform_B(m0_img)\n \n index_m50 = 0\n m50_path = self.m50_paths[index_m50]\n m50_img = Image.open(m50_path).convert('L')\n m50 = self.transform_B(m50_img)\n\n index_m100 = 0\n m100_path = self.m100_paths[index_m100]\n m100_img = Image.open(m100_path).convert('L')\n m100 = self.transform_B(m100_img)\n\n index_m150 = 0\n m150_path = self.m150_paths[index_m150]\n m150_img = Image.open(m150_path).convert('L')\n m150 = self.transform_B(m150_img)\n\n\n\n return {'A': A, 'B50': B50,'B100': B100, 'B150': B150, 'A_paths': A_path, 'B50_paths': B50_path,'B100_paths': B100_path, 'B150_paths': B150_path, 'm0':m0, 'm50':m50,'m100':m100, 'm150':m150}", "def reindex_graphics(self):\n for obj in self.context.static_objects:\n self.canvas.children.remove(obj.widget.canvas)\n # fill _objects_z_index\n _objects_z_index = {}\n for obj in self.context.static_objects:\n y = obj.widget.pos[1]\n if not y in _objects_z_index:\n _objects_z_index[y] = []\n _objects_z_index[y].append(obj)\n _keys = _objects_z_index.keys()\n _keys.sort()\n _keys.reverse()\n for k in _keys:\n objs = _objects_z_index[k]\n for obj in objs:\n self.canvas.add(obj.widget.canvas)", "def update_gl_state(self, *args, **kwargs):\n for v in self._subvisuals:\n v.update_gl_state(*args, **kwargs)", "def __getitem__(self, cur_episode):\n if self.platform == \"win\":\n env = lmdb.open(self.lmdb_file, subdir=False,\n readonly=True, lock=False,\n readahead=False, meminit=False)\n else:\n env = self.env\n # episode_set = self.episode_sets[episode]\n total_support_x = []\n total_query_x = []\n total_support_y = []\n total_query_y = []\n\n for t in range(self.t_task):\n # create a task (n_way*k_shot+ n_way*k_query)\n\n support_x = []\n query_x = []\n support_y = []\n query_y = []\n\n support_imgs = []\n query_imgs = []\n\n # select n_way classes randomly\n selected_classes = np.random.choice(self.total_cls, self.n_way)\n # select k_shot + k_query for each class\n for selected_class in selected_classes:\n selected_imgs = np.random.choice(\n self.dic_img_label[self.num2label[selected_class]], self.k_shot + self.k_query, False)\n support_imgs += selected_imgs[:self.k_shot].tolist()\n query_imgs += selected_imgs[self.k_shot:].tolist()\n\n with env.begin(write=False) as txn:\n for i, img_id in enumerate(support_imgs):\n res = pyarrow.deserialize(txn.get(u'{}'.format(img_id).encode('ascii')))\n support_x.append(self.transform(res[0]))\n support_y.append(np.array([self.label2num[res[1]]]))\n\n for i, img_id in enumerate(query_imgs):\n res = pyarrow.deserialize(txn.get(u'{}'.format(img_id).encode('ascii')))\n query_x.append(self.transform(res[0]))\n query_y.append(np.array([self.label2num[res[1]]]))\n support_x = torch.stack(support_x, 0)\n query_x = torch.stack(query_x, 0)\n support_y = np.array(support_y)\n query_y = np.array(query_y)\n\n # shuffle:\n index = np.random.permutation(len(support_y))\n support_x = support_x[index]\n if not self.fet_global:\n support_y = np.array([i for i in range(self.n_way) for j in range(self.k_shot)])\n support_y = support_y[index]\n\n index = np.random.permutation(len(query_y))\n query_x = query_x[index]\n if not self.fet_global:\n query_y = np.array([i for i in range(self.n_way) for j in range(self.k_query)])\n\n query_y = query_y[index]\n\n # a batch\n total_query_x.append(query_x)\n total_query_y.append(query_y)\n total_support_x.append(support_x)\n total_support_y.append(support_y)\n\n total_query_x = torch.cat(total_query_x, 0)\n total_query_y = np.hstack(total_query_y)\n total_support_x = torch.cat(total_support_x, 0)\n total_support_y = np.hstack(total_support_y)\n\n imgs = torch.cat([total_support_x, total_query_x], 0)\n labels = torch.from_numpy(np.hstack([total_support_y, total_query_y]).reshape([-1, 1]))\n return imgs, labels", "def update_all_states(self):\n \n for l in range(0,4):\n temp_s = \"/loop/{}/mode\".format(l+1) # stupid using loop 1 to 4\n temp_m = self.loop_modes[self.loop_states[l]]\n self.osc_client.send_message(temp_s,temp_m)\n print(\"sent_message {} {}\".format(temp_s,temp_m))\n self.set_loop_led(l)\n \n for l in range(0,4):\n for s in range(0,8):\n temp_s = self.osc_slice_string.format(l+1,s)\n temp_m = self.slice_modes[self.slice_states[l][s] ]\n self.osc_client.send_message(temp_s,temp_m)\n print(\"sent message {} {}\".format(temp_s, temp_m))\n self.set_slice_led(l,s)\n return", "def populateSceneRefs(*args):\n pi.referenceDictionary = {}\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, ra=True)\n\n #get reference paths\n refs = cmds.file(q=True, r=True)\n\n buff = []\n # loaded = []\n for ref in refs:\n #get the associated namespace\n ns = cmds.file(ref, q=True, ns=True)\n pi.referenceDictionary[ns] = ref\n\n # put files in buffer list to sort\n for g in pi.referenceDictionary.keys():\n buff.append(g)\n buff.sort()\n\n # now put the sorted namespaces in the list\n for b in buff:\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, append=b, dcc = selectRefs)\n\n # if ref is deferred(not loaded), change it's font\n for ref in refs:\n if cmds.file(ref, q=True, deferReference=True):\n ns = cmds.file(ref, q=True, ns=True) # get the namespace in order to get the item name\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, selectItem=ns) # sel the item in order to query it\n index = cmds.textScrollList(widgets[\"shotAssListTSL\"], q=True, selectIndexedItem=True)[0] # query the index of sel\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, lineFont = [index, \"obliqueLabelFont\"])\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, deselectAll=True)\n\n # if we're in a lgt file, look through current refs and for each one of type \"anm\", check the frame rates, etc. and give option to change\n curr = paths.PathManager(cmds.file(q=True, sn=True))\n if curr.shotType == \"lgt\":\n for ref in refs:\n p=paths.PathManager(ref)\n if p.shotType == \"anm\":\n dict = cFuncs.getFileFrameInfo(cFuncs.fixPath(ref))\n csi.compareSceneInfo(dict)", "def on_draw( self ):\r\n self.clear()\r\n self.setup_3D()\r\n print \"DEBUG:\" , \"There are\" , len( self.renderlist ) , \"items in 'self.renderlist'\"\r\n for obj in self.renderlist:\r\n obj.draw()", "def get_items(self):\n\n to_process_mat_ids = self._find_to_process()\n\n self.logger.info(\n \"Updating all substrate calculations for {} materials\".format(\n len(to_process_mat_ids)\n )\n )\n\n for mpid in to_process_mat_ids:\n e_tensor = self.elasticity.query_one(\n criteria={self.elasticity.key: mpid},\n properties=[\"elasticity\", \"last_updated\"],\n )\n e_tensor = (\n e_tensor.get(\"elasticity\", {}).get(\"elastic_tensor\", None)\n if e_tensor\n else None\n )\n mat = self.materials.query_one(\n criteria={self.materials.key: mpid},\n properties=[\"structure\", \"deprecated\", \"material_id\", \"last_updated\"],\n )\n\n yield {\n \"structure\": mat[\"structure\"],\n \"material_id\": mat[self.materials.key],\n \"elastic_tensor\": e_tensor,\n \"deprecated\": mat[\"deprecated\"],\n \"last_updated\": max(\n mat.get(\"last_updated\"), e_tensor.get(\"last_updated\")\n ),\n }", "def get_section_sprites(self):\n visible = set()\n for rect_info in self.sections:\n if pg.Rect(rect_info).colliderect(self.view_rect):\n visible.update(self.sections[rect_info])\n return visible", "def __getitem__(self, index):\n ID = self.ID[index]\n turn_id = self.turn_id[index]\n turn_belief = self.turn_belief[index]\n turn_belief_dict = self.turn_belief_dict[index]\n sorted_domainslots = self.sorted_domainslots[index]\n turn_uttr = self.turn_uttr[index]\n context_plain = self.dialog_history[index] \n sorted_lenval = self.sorted_lenval[index]\n sorted_in_domains2 = self.sorted_in_domains2[index]\n sorted_in_slots2 = self.sorted_in_slots2[index]\n sorted_generate_y = self.sorted_generate_y[index]\n c = copy.deepcopy\n context = self.preprocess(context_plain, self.src_word2id)\n delex_context = None\n if self.args['delex_his']:\n temp = self.delex_dialog_history[index].split()\n original = self.dialog_history[index].split()\n if self.split == 'train' and 'p_delex_noise' in self.args and np.random.uniform() < self.args['p_delex_noise']:\n prob = np.random.uniform()\n if prob < 0.5:\n indices = [idx for idx,i in enumerate(temp) if len(i.split('-'))==2]\n if len(indices) > 0:\n random_idx = random.choice(indices)\n temp[random_idx] = original[random_idx] # removal \n else:\n random_token = random.choice(self.all_slots)\n out_words = list(self.mem_word2id.keys())[4:]\n indices = [idx for idx,i in enumerate(original) if i in out_words]\n if len(indices) > 0:\n index = random.choice(indices)\n temp[index] = random_token\n delex_context = ' '.join(temp)\n delex_context = self.preprocess(delex_context, self.src_word2id) \n tag_x, tag_y = None, None\n if not self.args['sep_input_embedding']:\n sorted_in_domains = self.preprocess_seq(self.sorted_in_domains[index], self.src_word2id)\n sorted_in_slots = self.preprocess_seq(self.sorted_in_slots[index], self.src_word2id)\n sorted_in_domains2 = self.preprocess_seq(sorted_in_domains2, self.src_word2id)\n sorted_in_slots2 = self.preprocess_seq(sorted_in_slots2, self.src_word2id)\n else:\n sorted_in_domains = self.preprocess_seq(self.sorted_in_domains[index], self.domain_word2id)\n sorted_in_slots = self.preprocess_seq(self.sorted_in_slots[index], self.slot_word2id)\n sorted_in_domains2 = self.preprocess_seq(sorted_in_domains2, self.domain_word2id)\n sorted_in_slots2 = self.preprocess_seq(sorted_in_slots2, self.slot_word2id)\n sorted_in_domainslots2_idx, y_in, y_out = None, None, None\n if args['auto_regressive']:\n sorted_in_domainslots2_idx = self.sorted_in_domainslots2_idx[index]\n y_in, y_out = self.preprocess_atrg_seq(self.atrg_generate_y[index], self.src_word2id) \n if self.args['pointer_decoder']:\n sorted_generate_y = self.preprocess_seq(sorted_generate_y, self.src_word2id)\n else:\n sorted_generate_y = self.preprocess_seq(sorted_generate_y, self.mem_word2id)\n sorted_gates = None\n if self.sorted_gates[index] is not None:\n sorted_gates = self.sorted_gates[index]\n user_uttr_plain, user_uttr = None, None\n turn_prev_bs_plain, turn_prev_bs = None, None\n \n item_info = {\n \"ID\":ID, \n \"turn_id\":turn_id, \n \"turn_belief\":turn_belief, #?\n \"context\":context,\n \"delex_context_plain\": self.delex_dialog_history[index],\n \"delex_context\": delex_context,\n \"context_plain\":context_plain, \n \"user_uttr\": user_uttr,\n \"user_uttr_plain\": user_uttr_plain, \n \"sorted_in_domains\": sorted_in_domains,\n \"sorted_in_domains2\": sorted_in_domains2,\n \"sorted_in_slots\": sorted_in_slots,\n \"sorted_in_slots2\": sorted_in_slots2,\n \"sorted_in_domainslots2_idx\": sorted_in_domainslots2_idx, \n \"sorted_lenval\": sorted_lenval,\n \"sorted_gates\": sorted_gates,\n \"sorted_generate_y\": sorted_generate_y,\n \"y_in\": y_in,\n \"y_out\": y_out\n }\n return item_info", "def __getitem__(self, index):\n path, name, txt = self.imgs[index]\n img = self.loader(path)\n\n img_size = img.size\n img_size = (400,400)\n\n loader = loadjson\n \n data = loader(txt, self.objectsofinterest,img)\n\n pointsBelief = data['pointsBelief'] \n objects_centroid = data['centroids']\n points_all = data['points']\n points_keypoints = data['keypoints_2d']\n translations = torch.from_numpy(np.array(\n data['translations'])).float()\n rotations = torch.from_numpy(np.array(\n data['rotations'])).float() \n\n if len(points_all) == 0:\n points_all = torch.zeros(1, 10, 2).double()\n \n # self.save == true assumes there is only \n # one object instance in the scene. \n if translations.size()[0] > 1:\n translations = translations[0].unsqueeze(0)\n rotations = rotations[0].unsqueeze(0)\n\n # If there are no objects, still need to return similar shape array\n if len(translations) == 0:\n translations = torch.zeros(1,3).float()\n rotations = torch.zeros(1,4).float()\n\n # Camera intrinsics\n path_cam = path.replace(name,'_camera_settings.json')\n with open(path_cam) as data_file: \n data = json.load(data_file)\n # Assumes one camera\n cam = data['camera_settings'][0]['intrinsic_settings']\n\n matrix_camera = np.zeros((3,3))\n matrix_camera[0,0] = cam['fx']\n matrix_camera[1,1] = cam['fy']\n matrix_camera[0,2] = cam['cx']\n matrix_camera[1,2] = cam['cy']\n matrix_camera[2,2] = 1\n\n # Load the cuboid sizes\n path_set = path.replace(name,'_object_settings.json')\n with open(path_set) as data_file: \n data = json.load(data_file)\n\n cuboid = torch.zeros(1)\n\n if self.objectsofinterest is None:\n cuboid = np.array(data['exported_objects'][0]['cuboid_dimensions'])\n else:\n for info in data[\"exported_objects\"]:\n if self.objectsofinterest in info['class']:\n cuboid = np.array(info['cuboid_dimensions'])\n\n img_original = img.copy() \n\n \n def Reproject(points,tm, rm):\n \"\"\"\n Reprojection of points when rotating the image\n \"\"\"\n proj_cuboid = np.array(points)\n\n rmat = np.identity(3)\n rmat[0:2] = rm\n tmat = np.identity(3)\n tmat[0:2] = tm\n\n new_cuboid = np.matmul(\n rmat, np.vstack((proj_cuboid.T, np.ones(len(points)))))\n new_cuboid = np.matmul(tmat, new_cuboid)\n new_cuboid = new_cuboid[0:2].T\n\n return new_cuboid\n\n # Random image manipulation, rotation and translation with zero padding\n dx = round(np.random.normal(0, 2) * float(self.random_translation[0]))\n dy = round(np.random.normal(0, 2) * float(self.random_translation[1]))\n angle = round(np.random.normal(0, 1) * float(self.random_rotation))\n\n tm = np.float32([[1, 0, dx], [0, 1, dy]])\n rm = cv2.getRotationMatrix2D(\n (img.size[0]/2, img.size[1]/2), angle, 1)\n\n for i_objects in range(len(pointsBelief)):\n points = pointsBelief[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n pointsBelief[i_objects] = new_cuboid.tolist()\n objects_centroid[i_objects] = tuple(new_cuboid.tolist()[-1])\n pointsBelief[i_objects] = list(map(tuple, pointsBelief[i_objects]))\n\n for i_objects in range(len(points_keypoints)):\n points = points_keypoints[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n points_keypoints[i_objects] = new_cuboid.tolist()\n points_keypoints[i_objects] = list(map(tuple, points_keypoints[i_objects]))\n \n image_r = cv2.warpAffine(np.array(img), rm, img.size)\n result = cv2.warpAffine(image_r, tm, img.size)\n img = Image.fromarray(result)\n\n # Note: All point coordinates are in the image space, e.g., pixel value.\n # This is used when we do saving --- helpful for debugging\n if self.save or self.test: \n # Use the save to debug the data\n if self.test:\n draw = ImageDraw.Draw(img_original)\n else:\n draw = ImageDraw.Draw(img)\n \n # PIL drawing functions, here for sharing draw\n def DrawKeypoints(points):\n for key in points:\n DrawDot(key,(12, 115, 170),7) \n \n def DrawLine(point1, point2, lineColor, lineWidth):\n if not point1 is None and not point2 is None:\n draw.line([point1,point2],fill=lineColor,width=lineWidth)\n\n def DrawDot(point, pointColor, pointRadius):\n if not point is None:\n xy = [point[0]-pointRadius, point[1]-pointRadius, point[0]+pointRadius, point[1]+pointRadius]\n draw.ellipse(xy, fill=pointColor, outline=pointColor)\n\n def DrawCube(points, which_color = 0, color = None):\n '''Draw cube with a thick solid line across the front top edge.'''\n lineWidthForDrawing = 2\n lineColor1 = (255, 215, 0) # yellow-ish\n lineColor2 = (12, 115, 170) # blue-ish\n lineColor3 = (45, 195, 35) # green-ish\n if which_color == 3:\n lineColor = lineColor3\n else:\n lineColor = lineColor1\n\n if not color is None:\n lineColor = color \n\n # draw front\n DrawLine(points[0], points[1], lineColor, 8) #lineWidthForDrawing)\n DrawLine(points[1], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[0], lineColor, lineWidthForDrawing)\n \n # draw back\n DrawLine(points[4], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[7], lineColor, lineWidthForDrawing)\n DrawLine(points[4], points[7], lineColor, lineWidthForDrawing)\n \n # draw sides\n DrawLine(points[0], points[4], lineColor, lineWidthForDrawing)\n DrawLine(points[7], points[3], lineColor, lineWidthForDrawing)\n DrawLine(points[5], points[1], lineColor, lineWidthForDrawing)\n DrawLine(points[2], points[6], lineColor, lineWidthForDrawing)\n\n # draw dots\n DrawDot(points[0], pointColor=(255,255,255), pointRadius = 3)\n DrawDot(points[1], pointColor=(0,0,0), pointRadius = 3)\n\n # Draw all the found objects. \n for points_belief_objects in pointsBelief:\n DrawCube(points_belief_objects)\n for keypoint in points_keypoints:\n DrawKeypoints(keypoint)\n\n img = self.transform(img)\n \n return {\n \"img\":img,\n \"translations\":translations,\n \"rot_quaternions\":rotations,\n 'pointsBelief':np.array(points_all[0]),\n 'matrix_camera':matrix_camera,\n 'img_original': np.array(img_original),\n 'cuboid': cuboid,\n 'file_name':name,\n }\n\n # Create the belief map\n beliefsImg = CreateBeliefMap(\n img, \n pointsBelief=pointsBelief,\n nbpoints = 9,\n sigma = self.sigma)\n\n # Create the image maps for belief\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n for j in range(len(beliefsImg)):\n beliefsImg[j] = self.target_transform(beliefsImg[j])\n # beliefsImg[j].save('{}.png'.format(j))\n beliefsImg[j] = totensor(beliefsImg[j])\n\n beliefs = torch.zeros((len(beliefsImg),beliefsImg[0].size(1),beliefsImg[0].size(2)))\n for j in range(len(beliefsImg)):\n beliefs[j] = beliefsImg[j][0]\n \n\n # Create affinity maps\n scale = 8\n if min (img.size) / 8.0 != min (img_size)/8.0:\n # print (scale)\n scale = min (img.size)/(min (img_size)/8.0)\n\n affinities = GenerateMapAffinity(img,8,pointsBelief,objects_centroid,scale)\n img = self.transform(img)\n\n # Transform the images for training input\n w_crop = np.random.randint(0, img.size[0] - img_size[0]+1)\n h_crop = np.random.randint(0, img.size[1] - img_size[1]+1)\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n if not self.normal is None:\n normalize = transforms.Compose([transforms.Normalize\n ((self.normal[0],self.normal[0],self.normal[0]),\n (self.normal[1],self.normal[1],self.normal[1])),\n AddNoise(self.noise)])\n else:\n normalize = transforms.Compose([AddNoise(0.0001)])\n \n img = crop(img,h_crop,w_crop,img_size[1],img_size[0])\n img = totensor(img)\n\n img = normalize(img)\n\n w_crop = int(w_crop/8)\n h_crop = int(h_crop/8)\n\n affinities = affinities[:,h_crop:h_crop+int(img_size[1]/8),w_crop:w_crop+int(img_size[0]/8)]\n beliefs = beliefs[:,h_crop:h_crop+int(img_size[1]/8),w_crop:w_crop+int(img_size[0]/8)]\n\n if affinities.size()[1] == 49 and not self.test:\n affinities = torch.cat([affinities,torch.zeros(16,1,50)],dim=1)\n\n if affinities.size()[2] == 49 and not self.test:\n affinities = torch.cat([affinities,torch.zeros(16,50,1)],dim=2)\n\n return {\n 'img':img, \n \"affinities\":affinities, \n 'beliefs':beliefs,\n }", "def dispatch_items_randomly(self, level):\n for item in self.list:\n item.position = Item.define_random_position(item, level)", "def compute_relations(self):\n\n visible_nodes = {}\n\n self.cameras = self.get_all_cameras()\n rospy.logdebug(self.cameras)\n\n if self.cameras.items():\n try:\n if self.visibility_monitor is None:\n self.visibility_monitor = VisibilityMonitor(self.ctx, self.source)\n rospy.loginfo(\"[perspective_filter] Visibility monitor now running, please active the Pygame windows.\")\n visible_nodes = self.visibility_monitor.compute_all()\n rospy.logdebug(\"[perspective_filter] %d perspectives computed \" % len(visible_nodes))\n #rospy.logdebug(visible_nodes)\n except Exception as e:\n rospy.logwarn(\"[perspective_filter] Exception occurred while computing relation : %s\" % str(e))\n if self.visibility_monitor:\n self.visible_nodes = {} #visible_nodes\n for camera_name, visibles_obj in visible_nodes.items():\n camera_id = self.source.scene.nodebyname(camera_name)[0].id\n self.visible_nodes[camera_id] = visibles_obj\n for node in visibles_obj:\n if node.parent in self.cameras.keys():\n if self.source.scene.nodes[node.parent] not in visibles_obj:\n visibles_obj.append(self.source.scene.nodes[node.parent])\n\n for agent_id, nodes_seen in self.visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_seen:\n if agent_id in self.previously_visible_nodes:\n if node not in self.previously_visible_nodes[agent_id]:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n for agent_id, nodes_previously_seen in self.previously_visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_previously_seen:\n if agent_id in self.visible_nodes:\n if node not in self.visible_nodes[agent_id]:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n self.publish_perspectives()\n self.previously_visible_nodes = self.visible_nodes", "def next_scene(self):\n if self.current_scene == len(self.scenes) - 1:\n self.current_scene = 0\n else:\n self.current_scene += 1\n print 'Scene: {}'.format(self.current_scene)\n self.redraw()", "def update_state(self):\n self.reset_state()\n for piece in self.pieces:\n coordinates = piece.get_block_positions()\n for coor in coordinates:\n x, y = coor\n self.state[y][x] = piece", "def _recompute_indexes(self, first_index=0, free_index=None):\n if free_index is None:\n free_index = self.index + 1\n\n # Cleanup the linkable_vars for all the pulses which will be reindexed.\n linked_vars = self.root.linkable_vars\n for var in linked_vars[:]:\n if var[0].isdigit() and int(var[0]) >= free_index:\n linked_vars.remove(var)\n\n for item in self.items[first_index:]:\n\n item.index = free_index\n prefix = '{}_'.format(free_index)\n linkable_vars = [prefix + var for var in item.linkable_vars]\n linked_vars.extend(linkable_vars)\n\n if isinstance(item, Sequence):\n item.unobserve('_last_index', self._item_last_index_updated)\n item._recompute_indexes()\n item.observe('_last_index', self._item_last_index_updated)\n free_index = item._last_index + 1\n\n # We have a non indexed item (pulse or template).\n else:\n free_index += 1\n\n self._last_index = free_index - 1" ]
[ "0.5440126", "0.5405451", "0.5320529", "0.5313562", "0.5288161", "0.52768517", "0.52676857", "0.52513534", "0.52176017", "0.521186", "0.5171025", "0.5155609", "0.5147708", "0.51323056", "0.51169014", "0.5096112", "0.5093142", "0.5089115", "0.50864947", "0.5068929", "0.50373554", "0.5031787", "0.50227576", "0.5019718", "0.5002441", "0.49724245", "0.49591285", "0.49512327", "0.49497825", "0.4947155" ]
0.56032324
0