query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Formats the input of a whisper post and converts all values to HEX | def inputPostFormatter(post):
post["ttl"] = from_decimal(post["ttl"])
post["workToProve"] = from_decimal(post.get("workToProve", 0))
post["priority"] = from_decimal(post["priority"])
if not is_array(post.get("topics")):
post["topics"] = [post["topics"]] if post.get("topics") else []
post["topics"] = [topic if is_0x_prefixed(topic) else encode_hex(topic)
for topic in post["topics"]]
return post | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def outputPostFormatter(post):\n\n post[\"expiry\"] = to_decimal(post[\"expiry\"])\n post[\"sent\"] = to_decimal(post[\"sent\"])\n post[\"ttl\"] = to_decimal(post[\"ttl\"])\n post[\"workProved\"] = to_decimal(post[\"workProved\"])\n\n if not post.get(\"topics\"):\n post[\"topics\"] = []\n\n post[\"topics\"] = [decode_hex(topic) for topic in post[\"topics\"]]\n\n return post",
"def _encode_post(self):\n\t\tpost_data = self.config.get('post_data')\n\t\tif post_data is not None:\n\t\t\tpost_data = self._encode_data(post_data, self.config.get('post_data_bits'))\n\t\t\tif self.config.get('post'):\n\t\t\t\tpost_pulse = self._encode_tuple(self.config['post'])\n\t\t\t\treturn post_pulse + post_data\n\t\t\telse:\n\t\t\t\treturn post_data",
"def stringify(self):\n hexcode = \"#\"\n for x in self.value:\n part = hex(x)[2:]\n if len(part) < 2: part = \"0\" + part\n hexcode += part\n return hexcode",
"def w__format_hex(self, string):\n d = map(None, string)\n d = map(ord, d)\n d = map(lambda x: \"%02x\" % x, d)\n return ' '.join(d)",
"def format_data(self, data):",
"def _encode_text(self):\n\n print(f\"Hex encode; received message is {self.message}\")\n return self.message.encode(\"utf-8\").hex()",
"def format(self, data):",
"def hex(space, w_val):\n return space.hex(w_val)",
"def _stata_hex_format(self, value):\n return self._convert_hex(float(value).hex())",
"def hexify(text):\r\n return ' '.join([hexify_word(word) for word in text.split()])",
"def encode(self):\n color_str = []\n if self.brightness is not None:\n color_str.append(f\"brightness:{self.brightness}\")\n if self.hue is not None:\n color_str.append(f\"hue:{self.hue}\")\n if self.saturation is not None:\n color_str.append(f\"saturation:{self.saturation}\")\n if self.kelvin is not None:\n color_str.append(f\"kelvin:{self.kelvin}\")\n\n return ' '.join(color_str)",
"def _format_data(self, data, charset):\n\n return self._encode_data(data) if data else u''",
"def formatData(self, temp):\n \n bits = 32 # Required for this protocol\n temp = int(temp*100) # Multiply by 100 to preserve decimal places\n \n if temp == 0:\n r ='0x00000000'\n elif temp < 0: # 2's complement for negatives\n temp = 2**bits + temp\n r = hex(temp)[:-1] # Remove trailing L for Long\n else:\n temph = hex(temp)\n r = '0x'+'0'*(10-len(temph)) + temph[2:]\n \n return r[2:]",
"def hex_str (self):\n return \"#%02X%02X%02X\"%(self.r, self.g, self.b)",
"def __formatHex(self,hex_):\n output = []\n lineEnd = hex_.find('\\n')\n while lineEnd > 0:\n output.append(hex_[0:lineEnd])\n hex_ = hex_[lineEnd+1:len(hex_)]\n lineEnd = hex_.find('\\n')\n Flash, EEPROM,IDlocs,Config = self.__formatAsPICFlash(output)\n return Flash, EEPROM,IDlocs,Config",
"def sanatize_hex(data: str) -> str:\n return data.replace(\"0x\", \"\").replace(\"0X\", \"\")",
"def hex_form(hash):\n final_hash = ''\n for i in range(len(hash)):\n final_hash += format(hash[i], '02x')\n return final_hash",
"def format_value(text):\n return text.encode('utf8').replace('\\n', ' ').replace('\\r', ' ')",
"def encode_feed_hashes(self, params):\n body = \"\"\n\n for p in params:\n body += \"feed[]=%s&\" % p\n\n return body",
"def toHex(self):\r\n rgb = self.toRGB()\r\n return ('#%02s%02s%02s' % (hex(rgb[0])[2:], hex(rgb[1])[2:],\r\n hex(rgb[2])[2:])).replace(' ', '0')",
"def escape_values(bfo):\n return 0",
"def escape_values(bfo):\n return 0",
"def escape_values(bfo):\n return 0",
"def phex(value, expected):\n return f\"{value:#0{expected}x}\"",
"def pretty_hebrew(val):\n return 'font-size:20px; font-family: Times New Roman; text-align: right; max-width: 500px'",
"def to_hex6_string(self):\n def c(x):\n return int(x * 255.0)\n return '#{:02x}{:02x}{:02x}'.format(c(self.r), c(self.g), c(self.b))",
"def format_data(self)->float: \n try:\n formatted = chr(self.data[0])\n for i in range(1, len(self.data)): \n formatted = formatted + (chr(self.data[i])) \n return str(formatted)\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)",
"def toHex(self):\n \n t=self.boolVals[:]\n t.reverse()\n \n string=str(self)\n \n \n string=hex(int(string,2))\n string=string[2:]\n\n d=ceil(self.n/4)-len(string)\n string=d*\"0\"+string\n return string",
"def wkb_hex(self): # -> str:\n ...",
"def sanitizte_color(value):\n if len(value) == 7 and value[0] == '#':\n return \"#%06x\" % int(value[1:], 16)\n raise ValueError('invalid color')"
] | [
"0.6161056",
"0.6024747",
"0.5889979",
"0.5726662",
"0.5721067",
"0.5715071",
"0.5712564",
"0.56954545",
"0.56508905",
"0.5574359",
"0.5507912",
"0.5491416",
"0.5455812",
"0.543887",
"0.54006004",
"0.537959",
"0.536963",
"0.5341674",
"0.5320729",
"0.5312651",
"0.5311158",
"0.5311158",
"0.5311158",
"0.53070915",
"0.5299621",
"0.52676135",
"0.5263652",
"0.5242282",
"0.521867",
"0.5197538"
] | 0.6506106 | 0 |
Formats the output of a received post message | def outputPostFormatter(post):
post["expiry"] = to_decimal(post["expiry"])
post["sent"] = to_decimal(post["sent"])
post["ttl"] = to_decimal(post["ttl"])
post["workProved"] = to_decimal(post["workProved"])
if not post.get("topics"):
post["topics"] = []
post["topics"] = [decode_hex(topic) for topic in post["topics"]]
return post | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format(self, message):",
"def inputPostFormatter(post):\n\n post[\"ttl\"] = from_decimal(post[\"ttl\"])\n post[\"workToProve\"] = from_decimal(post.get(\"workToProve\", 0))\n post[\"priority\"] = from_decimal(post[\"priority\"])\n\n if not is_array(post.get(\"topics\")):\n post[\"topics\"] = [post[\"topics\"]] if post.get(\"topics\") else []\n\n post[\"topics\"] = [topic if is_0x_prefixed(topic) else encode_hex(topic)\n for topic in post[\"topics\"]]\n\n return post",
"def post(self):\n return write_msg(request.json)",
"def render_POST(self, request):\n\t\tprint request.args[\"message\"][0]\n\t\tself.jabberClient.sendMessage(request.args[\"message\"][0])\n\t\treturn ''",
"def render_post(response, post):\n response.out.write('<b>' + post.subject + '</b><br>')\n response.out.write(post.content)",
"def render_post(response, post):\n\n response.out.write('<b>' + post.subject + '</b><br>')\n response.out.write(post.content)",
"def formatText(input_text):\n\tdata = {\"text\": input_text}\n\tprint 'Waiting for return ...'\n\treq = requests.post('http://34.212.39.136:5678/format', json = data)\n\n\toutput_text = req.json()['result']\n\treturn output_text",
"def input_post(): #TODO, error handling for privacy checks\n\n message = request.form['message']\n page_token = session['page']['access_token']\n resp = utils.post_message(message, page_token, session['visibility'])\n return render_template('success.html', post_id = resp['id'])",
"def process_post_result(resp):\n resp_json = resp.json()\n if 'message' in resp_json:\n message = resp_json['message']\n print_info(f'{message}.')\n return\n\n raise Exception(f'{response_message(resp_json)}')",
"def send_echo(self, post_data):\n # Get sent message\n message = self.extract_message(\"/echo\", post_data.text)\n return message",
"def format(self, data):",
"def _encode_post(self):\n\t\tpost_data = self.config.get('post_data')\n\t\tif post_data is not None:\n\t\t\tpost_data = self._encode_data(post_data, self.config.get('post_data_bits'))\n\t\t\tif self.config.get('post'):\n\t\t\t\tpost_pulse = self._encode_tuple(self.config['post'])\n\t\t\t\treturn post_pulse + post_data\n\t\t\telse:\n\t\t\t\treturn post_data",
"def printPost(self, id):\n enc = getpreferredencoding()\n output = self._extractPost(id)['formatted_text']\n print output.encode(enc)",
"def create_output(self, messages):",
"def postMessage(self, message):\n if self.BotOutputRequested:\n pass\n else:\n SiteDetailOutput.PrintStandardOutput(message, verbose=self._verbose)",
"def print_post():\n print('| | |'),",
"def post(self):\n user = request.form['user_name']\n text = request.form['text']\n return {\"text\": 'msg sent successfully.\\ntext:'\n + text + '\\nuser:' + user}",
"def post(self):\n text = 'HELLO from socnet API Server!'\n return push_to_mattermost(text)",
"def format(self, message):\n\t\tif type(self.protocol[0]).__name__ == \"Raw\":\n\t\t\treturn self.name + \":\" + message\n\t\treturn message",
"def output(self, msg):",
"def post(self):\n r = request.get_json()['text']\n # Recupero dalla richiesta il JSON che mi è stato inviato\n # e salvo il valore contenuto \n # sotto la chiave text su una variabile r.\n # In reguito ritorno r in formato stringa.\n # In quanto il valore di ritorno deve essere una stringa.\n return str(r)",
"def pretty_print_POST(req):\n print('{}\\n{}\\n{}\\n\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))",
"async def render_post(self, request):\n self._hx711.tare()\n return Message(code=CHANGED)",
"def format_response(response):\n start_line = _format_status_line(response.status, response.reason)\n msg = _format_message(start_line, response.header, response.body)\n return msg",
"def post_data():\n return json.loads('{\"success\":true, \"message\":\"Data created (but not really)\" }')",
"def post_command(self) -> str:\n rtn = ''\n if self.terminator:\n rtn += self.terminator\n\n if self.suffix:\n rtn += ' ' + self.suffix\n\n if self.pipe_to:\n rtn += ' | ' + self.pipe_to\n\n if self.output:\n rtn += ' ' + self.output\n if self.output_to:\n rtn += ' ' + self.output_to\n\n return rtn",
"def pretty_print_POST(req):\n print('{}\\n{}\\n{}\\n\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))\n print('------------END------------\\n')",
"def create_series_msg(self, post_url: str) -> str:\n return self._render(\"series-pm\", post_url=post_url)",
"def horde_message(self, message):",
"def format_response_for_display(self, response, case):\n out_bits = []\n parsed = self.parse_response(response, case)\n\n request = parsed['request']\n out_bits.append(request['request_line'])\n for header, value in request['headers'].items():\n out_bits.append('%s: %s' % (header, value))\n if request['body']:\n out_bits.extend(('', request['body']))\n\n out_bits.extend([''] * 2)\n\n response = parsed['response']\n out_bits.append(response['response_line'])\n for header, value in response['headers'].items():\n out_bits.append('%s: %s' % (header, value))\n if response['body']:\n out_bits.extend(('', response['body']))\n\n return '\\n'.join(out_bits)"
] | [
"0.6517435",
"0.6332223",
"0.6274383",
"0.6151796",
"0.6082813",
"0.60550827",
"0.5994535",
"0.580875",
"0.58030015",
"0.57783115",
"0.5777758",
"0.57161885",
"0.56622404",
"0.5656379",
"0.56329155",
"0.5609236",
"0.5584325",
"0.5571311",
"0.55095625",
"0.5502143",
"0.5492511",
"0.54570204",
"0.54463303",
"0.5421501",
"0.5417131",
"0.541599",
"0.5402953",
"0.5394032",
"0.53855395",
"0.53842014"
] | 0.71348196 | 0 |
Tests the import from local file for cities works fine | def test_csv_import_city(self):
from django.contrib.messages import get_messages
path = reverse("import-csv")
user = mixer.blend(User, is_staff=True, is_superuser=True)
file = open("city.csv")
client = Client()
client.force_login(user)
r = client.post(path, {"title": "city", "csv_file": file})
messages = list(get_messages(r.wsgi_request))
assert r.status_code == 200
assert len(messages) == 1
assert str(messages[0]) == "Successfully Uploaded!" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetWorldCities():\n return GetDataFromCsvFile('world_cities.csv')",
"def _import_insee_city(self, cr, uid, ids, data_dir, context=None):\n if context is None:\n context = {}\n filepath = os.path.abspath(os.path.join(data_dir, 'comsimp2011.csv'))\n city_obj = self.pool.get('insee.city')\n department_obj = self.pool.get('insee.department')\n with open(filepath, 'rb') as cityfile:\n reader = csv.DictReader(cityfile)\n for row in reader:\n args = [('dep', '=', row['DEP'])]\n department_ids = department_obj.search(cr, uid, args)\n department_id = department_ids and department_ids[0] or None\n ncc = row['ARTMAJ'] and row['ARTMAJ'].strip(\"()\") + \\\n row['NCC'] or row['NCC']\n nccenr = row['ARTMIN'] and row['ARTMIN'].strip(\"()\") + \\\n row['NCCENR'] or row['NCCENR']\n values = {\n 'cdc': row['CDC'],\n 'cheflieu': row['CHEFLIEU'],\n 'reg': row['REG'],\n 'dep': row['DEP'],\n 'department_id': department_id,\n 'com': row['COM'],\n 'ar': row['AR'],\n 'ct': row['CT'],\n 'tncc': row['TNCC'],\n 'artmaj': row['ARTMAJ'],\n 'ncc': ncc,\n 'artmin': row['ARTMIN'],\n 'nccenr': nccenr,\n }\n city_obj.create(cr, uid, values, context=context)",
"def GetUsCities():\n return GetDataFromCsvFile('us_cities.csv')",
"async def test_get_location_data(self):\n for city_name in ['dublin', 'London', 'Copenhagen']:\n response = await self.http_client.fetch(request=HTTPRequest(\n url=self.get_url(path=\"/location-data/{}\".format(city_name)),\n method='GET'\n ))\n self.assertEqual(response.code, HTTPStatus.OK)\n self.check_city_response(response, city_name.lower())",
"def load_cities (filename):\n if not os.path.isfile(filename):\n return None\n # try to decode a plain file\n try:\n with open(filename) as input:\n return [ json.loads(line) for line in input if line ]\n except:\n pass\n # try to decode a gzipped file\n try:\n with gzip.open(filename) as input:\n return [ json.loads(line) for line in input if line ]\n except:\n pass\n return None",
"def __init__(self):\n\n with open('../examples/streets.txt', 'r') as sf:\n self.streets = sf.read()\n self.streets = self.streets.lower()\n\n with open('../examples/cities.txt', 'r') as cf:\n self.cities = cf.read()\n self.cities = self.cities.lower()",
"def setUp(self):\n self.my_city = City()",
"def test_city_country(self):\n dublin_ireland = city_country('dublin', 'ireland')\n self.assertEqual(dublin_ireland, 'Dublin, Ireland')",
"def read_locations(db, openfile):\n pass",
"def test_city_country(self):\n formatted_city = get_full_city(\"santiago\", \"chile\")\n self.assertEqual(formatted_city, \"Santiago, Chile\")",
"def read_cities(filename):\n reader = csv.reader(open(filename, \"rb\")) # may raise IOError\n rows = [line for line in reader]\n cities = [City(r[2], index, r[3], float(r[0]), float(r[1])) for index, r in enumerate(rows[1:])]\n return cities",
"def test_path_to_location(self):\n\n print \"Starting import\"\n modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])\n print \"finished import\"\n\n check_path_to_location(modulestore)",
"def get_cities(self, city_name: str = \"\"):",
"def test_path_to_location(self):\r\n\r\n print \"Starting import\"\r\n modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])\r\n print \"finished import\"\r\n\r\n check_path_to_location(modulestore)",
"def get_cities(self, city_name: str = None):",
"def load_projector(self, projector_file):\n self._test(projector_file)",
"def load_random_cities(data):\n cities = list(set([elem['name'] for elem in data]))\n city_objects = [City(data=city) for city in cities]\n City.objects.bulk_create(city_objects)",
"def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')",
"def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')",
"def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')",
"def test_city_country(self):\n santiago_chile = get_city_name('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')",
"def loadCity(fileid):\n dinf = {}\n root = etree.Element(\"city\")\n text = None\n statename = \"\"\n statefile = \"\"\n cityname = \"\"\n dinf['m'] = {}\n dinf['m']['events'] = {}\n # TODO: put this in a global variable, and make a function to populate it from the DTD.\n tags = [\"name\",\"state\",\"statefile\",\"start\",\"scue\",\"end\",\"ecue\",\"place\",\"aspects\"]\n for tag in tags:\n dinf[tag] = [\"\",False]\n dinf['aspects'] = {}\n if not dinf.get(\"places\"): dinf['places'] = {}\n if not idExists(fileid):\n status.push(0,\"new city created... '%s'\" % fileid)\n return dinf\n fn = os.path.join(config['realmdir'],\"%s.xml\" % fileid)\n status.push(0,\"loading city from XML... '%s'\" % fn)\n try:\n with codecs.open(fn,'rU','utf-8') as f:\n tree = etree.parse(f)\n f.close()\n root = tree.getroot()\n except IOError as e:\n print \"c: Could not open configuration file: %s\" % e\n\n ir = 0\n for i in range(len(root)):\n if root[i].tag is not None:\n if root[i].tag == \"place\":\n if len(root[i]) > 0:\n node = \"\"\n node = root[i].find(\"file\")\n if node.text:\n node = node.text.strip()\n node = common.validateFileid(node)\n dinf['places'][node] = {}\n for j in root[i]:\n if j.tag and j.text and j.tag != \"file\":\n dinf['places'][node][j.tag] = [j.text.strip(),False]\n if config['debug'] > 3: print dinf['places'][node]\n else:\n if config['debug'] > 0:\n print \"Invalid place tag:\"\n for c in root[i]:\n print c.tag + ': ' + c.text,\n else: # no relat length\n if config['debug'] > 0: print \"Empty place tag.\"\n elif root[i].tag == \"events\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['m']['events']))\n dinf['m']['events'][k] = {}\n for j in node:\n if j.tag and j.text:\n dinf['m']['events'][k][j.tag] = [j.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid milestone tag:\"\n for c in node:\n print c.tag + ': ' + c.text,\n if config['debug'] > 3: printPretty(dinf['m']['events'])\n else: # no relat length\n if config['debug'] > 0: print \"Empty milestone tag.\"\n elif root[i].tag == \"aspects\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['aspects']))\n dinf['aspects'][k] = {}\n if node.tag and node.text:\n dinf['aspects'][k] = [node.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid aspects tag:\"\n print node.tag + ': ' + node.text,\n else: # no aspects length\n if config['debug'] > 0: print \"Empty aspects tag.\"\n elif root[i].text is not None:\n if root[i].tag == \"statefile\":\n statefile = root[i].text.strip()\n statefile = common.validateFileid(statefile)\n if statefile is None: statefile = \"\"\n elif root[i].tag == \"state\":\n statename = root[i].text.strip()\n elif root[i].tag == \"name\":\n cityname = root[i].text.strip()\n dinf[root[i].tag] = [root[i].text.strip(), False]\n if config['debug'] > 2: print str(i) + \" \",\n if len(statefile) > 0: pushLoc(statefile,statename,fileid,cityname)\n return dinf",
"def test_init(self, fixture_environment):\n\n # Generate city object\n city_object = cit.City(environment=fixture_environment)\n\n # Check inheritance from citydistrict object of pycity\n assert city_object._kind == 'citydistrict'",
"def __import_locustfile__(filename, path):\n try:\n # Python 3 compatible\n source = importlib.machinery.SourceFileLoader(os.path.splitext(locustfile)[0], path)\n imported = source.load_module()\n except AttributeError:\n # Python 2.7 compatible\n import imp\n imported = imp.load_source(os.path.splitext(locustfile)[0], path)\n\n return imported",
"def test_setting_csv_city(self):\n from django.contrib.messages import get_messages\n path = reverse(\"setting-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"title\": \"city\", \"url\": \"http://rachel.maykinmedia.nl/djangocase/city.csv\",\n \"username\": \"python-demo\", \"password\": \"claw30_bumps\", \"save\": \"on\"})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"",
"def load_data(city, month, day):\n file_name = CITY_DATA.get(city)\n df = pd.read_csv(file_name)\n\n # convert \"Start Time\" column from string to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # create new column \"Month\" by extracting the month form datetime\n df['Month'] = df['Start Time'].dt.month\n\n # create new column \"Day\" by extracting the day form datetime\n df['Day'] = df['Start Time'].dt.day_name()\n df['Day'] = df['Day'].str.lower()\n\n # filter by month\n if month != \"all\":\n month_index = months.index(month) + 1\n df = df[df['Month'] == month_index]\n\n # filter by day\n if day != \"all\":\n df = df[df['Day'] == day]\n\n return df",
"def load_data(city, month, day):\n if city == 'chicago':\n city_df = pd.read_csv('chicago.csv')\n elif city == 'new york city':\n city_df = pd.read_csv('new_york_city.csv')\n else:\n # city_df = pd.read_csv('washington.csv')\n print(\"else is running\")\n\n print(city_df.head())\n\n return city_df",
"def import_from_file(jamsite, source='jammers.csv', fieldnames=None):\n\t# import jammers.csv\n\twith open(source) as csvfile:\n\t\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )",
"def test_find_cities(self):\n\n # Given\n game_state: CarcassonneGameState = CarcassonneGameState()\n\n city_one_side_straight_road = base_tiles[\"city_top_straight_road\"].turn(3)\n city_with_road = inns_and_cathedrals_tiles[\"ic_15\"].turn(3)\n\n game_state.board = [[None for column in range(2)] for row in range(1)]\n\n game_state.board[0][0] = city_with_road\n game_state.board[0][1] = city_one_side_straight_road\n\n # When\n cities: [City] = CityUtil.find_cities(\n game_state=game_state,\n coordinate=Coordinate(0, 0)\n )\n\n # Then\n self.assertEqual(1, len(cities))\n self.assertEqual(2, len(cities[0].city_positions))\n self.assertTrue(cities[0].finished)",
"def expected_city_names_fixture():\n return {'b', 'a', 'c'}"
] | [
"0.6581644",
"0.64106566",
"0.6348623",
"0.6177727",
"0.59844106",
"0.58887535",
"0.58847845",
"0.5876422",
"0.58704174",
"0.5821457",
"0.5782551",
"0.57631767",
"0.5752825",
"0.573229",
"0.5729731",
"0.5711235",
"0.56939304",
"0.5689702",
"0.5689702",
"0.5689702",
"0.5674587",
"0.56686014",
"0.56476337",
"0.562758",
"0.5616515",
"0.5608479",
"0.5598399",
"0.55836976",
"0.55825526",
"0.55596155"
] | 0.656946 | 1 |
Log control data at each step during evaluation. | def _log_control_data(self, action, global_reward):
action_r = ','.join(['%d' % a for a in action])
cur_control = {'episode': self.cur_episode,
'step': self.t,
'action': action_r,
'reward': global_reward}
self.control_data.append(cur_control) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _log(self, data):\n if self.log_data is not None:\n self.log_data(data)",
"def on_eval_batch_begin(self, step, logs=None):",
"def on_eval_begin(self, logs=None):",
"def log_eval(self, epoch, dataset_name):\n pass",
"def record(self, step):",
"def on_log(self):\n monitors = self.monitors\n if self.monitors is None:\n monitors = self.trainer.metrics.keys()\n\n\n hparams = self.hparams\n if self.hparams is None:\n hparams = self.trainer.hparams.keys()\n\n metrics = {name: format_metric(self.trainer.metrics[name])\n for name in monitors\n if name in self.trainer.metrics}\n hparams = {name: format_metric(self.trainer.hparams[name])\n for name in hparams\n if name in self.trainer.hparams}\n\n\n step_bar = self.step_bars[-1]\n step_bar.set_description(\"Epoch {}\".format(self.trainer.epoch+1))\n step_bar.set_postfix(**metrics, **hparams)\n step_bar.update(self.trainer.steps_trained - self.last_step)\n self.last_step = self.trainer.steps_trained",
"def logStep(self):\n n = self.mirror.cv['dp']\n self.r_Vm[n] = self.cv['Vm']\n self.r_Va[n] = self.cv['Va']",
"def log(self, step, data=''):\n if self.debug:\n print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'\n for k in range(0, len(step), 68):\n print '+{:^68.68}+'.format(step[k:k + 68])\n for k in range(0, len(data), 68):\n print '+{:^68.68}+'.format(data[k:k + 68])\n print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'\n print",
"def on_x(self):\r\n self.log()",
"def log_all(self):\n self.save_raw()\n self.log()",
"def _print_log(self, step, data=None):\n \n # Set mode to append to log file\n mode = 'a'\n\n if self.logfile is None:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n if step == 1:\n # Output log header\n output = \"\\nSKLearnForQlik Log: {0} \\n\\n\".format(time.ctime(time.time()))\n # Set mode to write new log file\n mode = 'w'\n \n elif step == 2:\n # Output the parameters\n output = \"Model Name: {0}\\n\\n\".format(self.model.name)\n output += \"Execution arguments: {0}\\n\\n\".format(self.exec_params)\n \n try:\n output += \"Scaler: {0}, missing: {1}, scale_hashed: {2}, scale_vectors: {3}\\n\".format(\\\n self.model.scaler, self.model.missing,self.model.scale_hashed, self.model.scale_vectors)\n output += \"Scaler kwargs: {0}\\n\\n\".format(self.model.scaler_kwargs)\n except AttributeError:\n output += \"scale_hashed: {0}, scale_vectors: {1}\\n\".format(self.model.scale_hashed, self.model.scale_vectors)\n\n try:\n if self.model.dim_reduction:\n output += \"Reduction: {0}\\nReduction kwargs: {1}\\n\\n\".format(self.model.reduction, self.model.dim_reduction_args)\n except AttributeError:\n pass\n \n output += \"Estimator: {0}\\nEstimator kwargs: {1}\\n\\n\".format(self.model.estimator, self.model.estimator_kwargs)\n \n elif step == 3: \n # Output the request dataframe\n output = \"REQUEST: {0} rows x cols\\nSample Data:\\n\\n\".format(self.request_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.request_df.head().to_string(), self.request_df.tail().to_string())\n \n elif step == 4:\n # Output the response dataframe/series\n output = \"RESPONSE: {0} rows x cols\\nSample Data:\\n\\n\".format(self.response.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.response.head().to_string(), self.response.tail().to_string())\n \n elif step == 5:\n # Print the table description if the call was made from the load script\n output = \"\\nTABLE DESCRIPTION SENT TO QLIK:\\n\\n{0} \\n\\n\".format(self.table)\n \n elif step == 6:\n # Message when model is loaded from cache\n output = \"\\nModel {0} loaded from cache.\\n\\n\".format(self.model.name)\n \n elif step == 7:\n # Message when model is loaded from disk\n output = \"\\nModel {0} loaded from disk.\\n\\n\".format(self.model.name)\n \n elif step == 8:\n # Message when cache is updated\n output = \"\\nCache updated. Models in cache:\\n{0}\\n\\n\".format([k for k,v in self.__class__.model_cache.items()])\n \n elif step == 9:\n # Output when a parameter grid is set up\n output = \"Model Name: {0}, Estimator: {1}\\n\\nGrid Search Arguments: {2}\\n\\nParameter Grid: {3}\\n\\n\".\\\n format(self.model.name, self.model.estimator, self.model.grid_search_args, self.model.param_grid)\n \n elif step == 10:\n # self.model.estimator_kwargs['architecture']\n output = \"\\nKeras architecture added to Model {0}:\\n\\n{1}\\n\\n\".format(self.model.name,\\\n self.model.architecture.to_string())\n\n elif step == 11:\n # Output after adding lag observations to input data\n output = \"Lag observations added ({0} per sample). New input shape of X is {1}.\\n\\n\".format(self.model.lags, data.shape)\n output += \"Feature Definitions:\\n{0}\\n\\n\".format(self.model.features_df.to_string())\n output += \"Sample Data:\\n{0}\\n...\\n{1}\\n\\n\".format(data.head(5).to_string(), data.tail(5).to_string())\n \n sys.stdout.write(output)\n with open(self.logfile, mode, encoding='utf-8') as f:\n f.write(output)",
"def log(self):\n\n\t\t# Only every 1/10 second (or so) to avoid flooding networktables\n\t\tif not self.log_timer.running or not self.log_timer.hasPeriodPassed(self.log_timer_delay):\n\t\t\treturn\n\n\t\twpilib.SmartDashboard.putString('Pressure', '{0:.2f}'.format(self.get_pressure()))\n\t\twpilib.SmartDashboard.putBoolean(\"Garbo?\", self.is_pbot)\n\n\t\tself.drive.log()\n\t\tself.elevator.log()\n\t\tself.intake.log()",
"def log_trainable_variables(self):\n var_names = list(self.trainable_variables.keys())\n self.logger.log_trainable_variables(var_names)",
"def simulation_step(self):\n if self.data_valid.get():\n print(\"Output pin %s writing %s\" % (self.name, self.debug_data.get()))",
"def log(self, step):\n # log mean\n tf.summary.scalar(self.name, self.result(), step=step)\n # call log method of each child\n for child in self.children_real_fake:\n child[0].log(step)\n child[1].log(step)",
"def _on_step(self) -> None:\n self._n_calls += 1\n # Account for multiple environments\n # each call to step() corresponds to n_envs transitions\n if self._n_calls % max(self.target_update_interval // self.n_envs, 1) == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n # Copy running stats, see GH issue #996\n polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n self.logger.record(\"rollout/exploration_rate\", self.exploration_rate)",
"def logStuff(self, i, epoch, numEpochs, trainData):\r\n step = i + epoch*self.numBatchesPerEpoch\r\n numSteps = numEpochs*self.numBatchesPerEpoch\r\n if step%2000==0:\r\n self.metricLog['G_loss'] = self.genLoss(*trainData).cpu().data[0]\r\n self.metricLog['D_loss'] = self.discLoss(*trainData).cpu().data[0]\r\n if len(self.lab_train):\r\n xy_lab = self.getLabeledXYonly(trainData)\r\n self.metricLog['Train_Acc(Batch)'] = self.batchAccuracy(*xy_lab)\r\n self.metricLog['Val_acc'] = self.getDevsetAccuracy()\r\n #TODO: add Inception and FID\r\n self.writer.add_scalars('metrics', self.metricLog, step)\r\n prettyPrintLog(self.metricLog, epoch, numEpochs, step, numSteps)\r\n\r\n self.scheduleLog['lr'] = self.lr_scheduler.get_lr()[0]\r\n self.writer.add_scalars('schedules', self.scheduleLog, step)\r\n\r\n fakeImages = self.G(self.fixed_z).cpu().data\r\n self.writer.add_image('fake_samples', \r\n vutils.make_grid(fakeImages, normalize=True), step)",
"def on_eval_end(self, logs=None):",
"def log(self, report, epoch):\n train_return_values = np.asarray([trajectory['reward'].sum()\n for trajectory in report['training_trajectories']])\n trajectories_infos = [trajectory['info'] for trajectory in report.pop('training_trajectories')]\n sum_costs = np.asarray([sum(list(map(lambda info: info.get('cost', 0.0), trajectory)))\n for trajectory in trajectories_infos])\n report.update(dict(\n training_rl_objective=train_return_values.mean(),\n sum_rewards_stddev=train_return_values.std(),\n mean_sum_costs=sum_costs.mean()\n ))\n training_step = report.pop('total_training_steps')\n for key, value in report.items():\n self.training_logger.log_scalar(value, key, training_step)\n self.training_logger.flush()",
"def _logging(self):\n msgs = []\n # patch to log stdout spawned processes of dataloader\n logger = init_logger()\n for ds_name, ds_count in self._counts.items():\n msgs.append(f\"\\t\\t\\t* {ds_name}: {ds_count}\")\n logger.info(\"Weighted corpora loaded so far:\\n\" + \"\\n\".join(msgs))",
"def on_eval_batch_end(self, step, logs=None):",
"def log_training_results(engine: Engine):\n train_evaluator.run(self.train_dl)\n metrics: Dict[str, float] = train_evaluator.state.metrics\n avg_accuracy: float = metrics['accuracy']\n avg_bce: float = metrics['bce']\n pbar.log_message(\n f'Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.4f} Avg loss: {avg_bce:.4f}')",
"def _log(self, action: types.NestedArray) -> None:\n if self._logger is None:\n return\n self._logger.info('{}, {}, {}, {}, {}, {}, {}'.format(\n self._last_timestep.observation['STAGE'],\n self._last_timestep.observation['CHIPS'],\n self._last_timestep.observation['PLAYER_TOTAL'],\n self._last_timestep.observation['PLAYER_ACES'],\n self._last_timestep.observation['DEALER_TOTAL'],\n action,\n self._deck_distribution))",
"def callback(_locals, _globals):\n global n_steps\n # Print stats every 20 calls\n if (n_steps + 1) % 1 == 0:\n # Evaluate policy training performance\n episode_rewards, episode_lengths = evaluate_policy(_locals['self'], eval_real_env,\n n_eval_episodes=n_eval_episodes,\n render=False,\n deterministic=False,\n return_episode_rewards=False)\n print(\"Last mean reward per episode at target: {:.2f}\".format(episode_rewards))\n\n episode_rewards_grnd, episode_lengths_grnd = evaluate_policy(_locals['self'], eval_grnd_env,\n n_eval_episodes=n_eval_episodes,\n render=False,\n deterministic=False,\n return_episode_rewards=False)\n print(\"Last mean reward per episode at grounded environment: {:.2f}\".format(episode_rewards_grnd))\n\n with open(os.path.join(log_dir, 'eval_at_target.txt'), 'a') as f:\n f.write(\"{}, {}, {}\\n\".format(n_steps, episode_rewards, episode_lengths/n_eval_episodes))\n f.close()\n with open(os.path.join(log_dir, 'eval_at_grnd.txt'), 'a') as f:\n f.write(\"{}, {}, {}\\n\".format(n_steps, episode_rewards_grnd, episode_lengths_grnd/n_eval_episodes))\n f.close()\n n_steps += 1\n return True",
"def log_tensorboard(self, value_dict, step):\n for key, value in value_dict.items():\n summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=value)])\n self.writer.add_summary(summary, step)",
"def on_L3(self):\r\n self.log()",
"def compute_debug(self):",
"def simulate(self):\n #loop to perform additional steps until the current temperature is no longer greater than the ending_temperature\n while self.current_T >= self.end_temp: \n self.step(self.current_T)\n \n #log various parameters that changed in the MCMCSampler object after a single step\n self.temperature.append(self.current_T)\n self.iteration.append(self.current_iteration)\n self.energy.append(self.current_energy)\n #return a pandas dataframe that will hold all of the information requested above\n log_table = pd.DataFrame(list(zip(self.iteration, self.energy, self.temperature)), columns =['iteration', 'energy', 'temperature']) \n return(log_table)",
"def log(self):\n self.logger = logging.getLogger(self.log_name)\n self.logger.info(f\"Name: {self.name}\")\n self.logger.info(f\"Grid points: {self.gp}\")\n self.logger.info(f\"Nadir points: {self.nadir_p}\")\n self.logger.info(f\"Penalty weight: {self.eps}\")\n self.logger.info(f\"Early exit: {self.early_exit}\")\n self.logger.info(f\"Bypass coefficient: {self.bypass}\")\n self.logger.info(f\"Flag array: {self.flag}\")\n self.logger.info(f\"CPU Count: {self.cpu_count}\")\n self.logger.info(f\"Redivide work: {self.redivide_work}\")\n self.logger.info(f\"Shared flag array: {self.shared_flag}\")\n self.logger.info(Helper.separator())",
"def internal_event (self):\n self.clock_time += 1\n self.log()"
] | [
"0.6422396",
"0.6307403",
"0.6306143",
"0.6290289",
"0.6281441",
"0.6219549",
"0.61849844",
"0.61498964",
"0.6084978",
"0.6076392",
"0.60656595",
"0.60503274",
"0.5975851",
"0.597538",
"0.59376127",
"0.5935147",
"0.5904615",
"0.58918566",
"0.5876711",
"0.5858225",
"0.58474666",
"0.5846372",
"0.58054787",
"0.57902116",
"0.5784475",
"0.5778492",
"0.57668",
"0.5735868",
"0.5714438",
"0.57084805"
] | 0.65032554 | 0 |
Get actions of each agents neighbour in the graph. | def get_neighbor_action(self, action):
naction = []
for i in range(self.n_agent):
naction.append(action[self.neighbor_mask[i] == 1])
return naction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def actions(self) -> list:\n if self.debug: print(f\"StateNode.actions()\")\n if not self._examined:\n if self.debug: print(f\"\\tExamining...\")\n self._edges = self.state.actions()\n for e in self._edges:\n e: Action\n e.source = self\n e.cost = self.get_cost(e)\n self._examined = True\n return self._edges",
"def actions(self, agent_state):\n raise NotImplementedError(\"Don't know what actions are available\")",
"def act(self, states: np.ndarray, eps: float = 0.0) -> List[np.ndarray]:\n actions = [\n agent.act(state.reshape(-1, 1).T, eps)\n for agent, state in zip(self.agents, states)\n ]\n return actions",
"def get_all_valid_actions(self):\r\n\r\n # Select, for each agent, the valid actions based on its position (state).\r\n agent_actions = self.searchenv.valid_actions[self.searchstate.positions[0]]\r\n\r\n #print(\"Agent Action: \",agent_actions)\r\n\r\n # Mask the rail transition actions for idle agents.\r\n if self.searchstate.actives == 0:\r\n agent_actions = [0, 0, 1, 0, 1] # STOP_MOVING, or MOVE_FORWARD.\r\n\r\n # Mask the rail transition actions for done agents.\r\n if self.agents_at_goal() == True:\r\n agent_actions = [1, 0, 0, 0, 0] # DO_NOTHING only.\r\n\r\n # Identify for each agent the IDs of the valid actions (i.e., [0, 1, 1, 0, 0] --> [1, 2])\r\n agent_action_list =[]\r\n for i in range(len(agent_actions)):\r\n if agent_actions[i] == 1:\r\n agent_action_list.append(i)\r\n\r\n # Return list containing for each agent, the IDs of the actions available to it.\r\n return agent_action_list",
"def get_actions(self):\n return self.agent.get_actions()",
"def get_possible_actions(self, world_state,agent_host):\n action_list = []\n possibilities = {'movenorth 1': -3,'movesouth 1': 3,'moveeast 1': 1,'movewest 1': -1}\n #check walls to see whether can move left,right,back,forward\n #check floor beneath to see whether should do anything at all, or just nothing and sink\n obs_text = world_state.observations[-1].text\n obs = json.loads(obs_text)\n grid = load_grid(world_state)\n print 'GRID SIZE: ', len(grid)\n for k,v in possibilities.items():\n #with current grid, index 31 will always be our agent's current location\n #check walls to see whether can move left,right,back,forward\n if grid[31+v+9] == 'water' or grid[31+v+9] == 'wooden_door': #+9 because we want to check\n action_list.append(k) #where our feet are located\n #check if you can teleport down a level\n if grid[31-27] == 'water' or grid[31-27] == 'wooden_door':\n action_list.append(self.teleport(agent_host,False))\n #check if you can teleport up a level\n if grid[31+45] == 'water' or grid[31+45] == 'wooden_door':\n action_list.append(self.teleport(agent_host,True))\n\n print(\"ACTION LIST: {}\".format(action_list))\n return action_list",
"def traverse(self, action_details: Dict):\n agent = action_details[\"agent_id\"]\n self.agents[agent-1].traversing = True\n # distanation node\n dest_node = action_details[\"to\"]\n\n # TODO add checks for from and to nodes\n\n node1, node2, distance = self.agents_location[agent]\n # people_collected = 0\n \n # If the agent is in node ( not on the edge ) check if the distination node is its neighbor\n if node1 == node2 and self.graph.is_neighbours(node1, dest_node) and not (node2,dest_node) in self.blocked_edges :\n # Get (node1,dest_node) edge weight\n\n edge_weight = self.graph.get_weight(node1, dest_node)\n\n # Move the agent into the edge (node1,dest_node)\n distance = edge_weight - 1\n self.agents_location[agent] = [node1, dest_node, distance]\n action_succeed = True\n\n # If the agent is already inside the edge , check whether destination node is correct\n elif node1 != node2 and node2 == dest_node:\n\n # Move the agent one step on the edge\n distance -= 1\n self.agents_location[agent][2] = distance\n\n action_succeed = True\n else:\n # If the destination node is wrong\n action_succeed = False\n # TODO write warning\n\n # If the agent arrived to some node , collect all the people there and change the location from [node1,node2,X]\n # to [dest_node,dest_node,0]\n if distance == 0 and action_succeed:\n self.agents_location[agent] = [dest_node, dest_node, 0]\n self.agents[agent-1].traversing = False\n self.agents[agent-1].location = dest_node\n action_succeed = True\n\n self.agents_last_action[agent] = action_succeed\n\n new_observation = self.get_observation({})\n\n return new_observation",
"def process_actions(self, n_steps, actions):\n # Each row of actions is one time step,\n # row contains action indices for all agents\n # Convert to [time, agents, l_action]\n # so each agent gets its own 1-hot row vector\n actions_1hot = np.zeros([n_steps, self.n_agents, self.l_action], dtype=int)\n grid = np.indices((n_steps, self.n_agents))\n actions_1hot[grid[0], grid[1], actions] = 1\n # Convert to format [time*agents, agents-1, l_action]\n # so that the set of <n_agent> actions at each time step\n # is duplicated <n_agent> times, and each duplicate\n # now contains all <n_agent>-1 actions representing\n # the OTHER agents actions\n list_to_interleave = []\n for n in range(self.n_agents):\n # extract all actions except agent n's action\n list_to_interleave.append( actions_1hot[:, np.arange(self.n_agents)!=n, :] )\n # interleave\n actions_others_1hot = np.zeros([self.n_agents*n_steps, self.n_agents-1, self.l_action])\n for n in range(self.n_agents):\n actions_others_1hot[n::self.n_agents, :, :] = list_to_interleave[n]\n # In-place reshape of actions to [time*n_agents, l_action]\n actions_1hot.shape = (n_steps*self.n_agents, self.l_action)\n\n return actions_1hot, actions_others_1hot",
"def _obtain_OtherAgentsActionsSummationTensor(self):\n dim = np.concatenate(([self.N], # agent i\n [self.N for _ in range(self.N-1)], # other agnt\n [self.M], # agent a of agent i\n [self.M for _ in range(self.N)], # all acts\n [self.M for _ in range(self.N-1)])) # other a's\n Omega = np.zeros(dim.astype(int), int)\n\n for index, _ in np.ndenumerate(Omega):\n I = index[0]\n notI = index[1:self.N]\n A = index[self.N]\n allA = index[self.N+1:2*self.N+1]\n notA = index[2*self.N+1:]\n\n if len(np.unique(np.concatenate(([I], notI)))) is self.N:\n # all agents indicides are different\n\n if A == allA[I]:\n # action of agent i equals some other action\n cd = allA[:I] + allA[I+1:] # other actionss\n areequal = [cd[k] == notA[k] for k in range(self.N-1)]\n if np.all(areequal):\n Omega[index] = 1\n\n return Omega",
"def getNeighbors(self):\n targets = set()\n for arc in self._arcsFrom:\n targets.add(arc.getFinish())\n return [ node for node in sorted(targets) ]",
"def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions",
"def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions",
"def actions(self):\n x, y = self._empty\n\n actions = []\n\n if x > 0: actions.append((x - 1, y))\n if y > 0: actions.append((x, y - 1))\n if x < self._size - 1: actions.append((x + 1, y))\n if y < self._size - 1: actions.append((x, y + 1))\n\n return actions",
"def act(self, obs_all_agents, noise=0.0):\n actions_next = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions_next",
"def get_possible_actions(self) -> [Action]:\r\n if self.fields[self.agent_x][self.agent_y] == Field.EMPTY or self.terminated:\r\n return [Action.NORTH, Action.EAST, Action.SOUTH, Action.WEST]\r\n else: # must be terminal\r\n return [Action.TERMINAL]",
"def act(self, states, add_noise=True):\n actions = np.zeros([self.num_agents, self.action_size])\n for index, agent in enumerate(self.agents):\n actions[index, :] = agent.act(states[index], add_noise)\n return actions",
"def actions(self):\n return {0, 1, 2, 3, 4, 5, 11, 12}",
"def solution(self):\n return [node.action for node in self.path()[1:]]",
"def solution(self):\n return [node.action for node in self.path()[1:]]",
"def solution(self):\n return [node.action for node in self.path()[1:]]",
"def solution(self):\n return [node.action for node in self.path()[1:]]",
"def actions(self, state):\n\n\t\tpossibleActions = []\n\n\t\tflashlightLocation = state[0]\n\n\t\t\"\"\"\n\t\t\tIf a person is on the side of the flashlight, then they can cross the bridge by themselves or \n\t\t\tthey can cross with another person who is also on their side (the side of the flashlight).\n\t\t\t-\tSo we add an action for this person crossing by themselves, and also actions for them crossing\n\t\t\t\twith other people (each of these actions is them crossing with one of these other \n\t\t\t\tpeople, making 2 of them crossing the bridge)\n\t\t\t\t\n\t\t\tNote that person i and person j crossing the bridge is the same action as person j and person i crossing, \n\t\t\tand we only want to add this action once so when determining the people that person i can cross with \n\t\t\twe look at people who come after this person i (a person j where j > i) \n\t\t\"\"\"\n\n\t\tfor personI in range(1, self.n+1): # exclude the flashlight - only traverse the peoples' locations\n\t\t\tif state[personI] == flashlightLocation: #This person can cross the bridge\n\t\t\t\taction = [personI] # This person (person i) can cross bridge on their own (with the flashlight)\n\t\t\t\tpossibleActions.append(action)\n\t\t\t\tfor personJ in range(personI+1, self.n+1):\n\t\t\t\t\tif state[personJ] == flashlightLocation: # This person (person j) can cross the bridge\n\t\t\t\t\t\taction = [personI, personJ] # person i can cross the bridge with person j (and the flashlight)\n\t\t\t\t\t\tpossibleActions.append(action)\n\n\t\treturn possibleActions",
"def solution(self):\n\t\treturn [node.action for node in self.path()[1:]]",
"def actions(self, states, agent_indices):\n return NotImplementedError()",
"def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target",
"def neighbours(self):\n return [x.node for x in self.edges]",
"def neighbors(\n self, state: Grid2D.State\n ) -> Iterable[Tuple[Grid2D.Action, Grid2D.State]]:\n # pylint: disable=invalid-name\n for a, cell in self.adjacent_coordinates(cell=state.agent_position):\n if not self.is_wall(cell):\n yield (a, Grid2D.State(cell))",
"def actions(self):\n return self._action_list",
"def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return np.array(actions)",
"def generate_actions(self):\n \n # For all state nodes\n action = 0\n \n for l in range(self.u0_n):\n for m in range(self.u1_n):\n \n u = np.array([ self.ud[0][l] , self.ud[1][m] ])\n \n # State and grid index based on node #\n self.actions_input[action,:] = u\n self.actions_index[action,:] = np.array([l,m])\n \n # Increment node number\n action = action + 1"
] | [
"0.7049442",
"0.6467031",
"0.643074",
"0.6362425",
"0.62686896",
"0.6207406",
"0.6155371",
"0.6118249",
"0.6094078",
"0.6003385",
"0.59684646",
"0.59684646",
"0.59386194",
"0.5927922",
"0.5917781",
"0.58938307",
"0.58807933",
"0.5880295",
"0.5880295",
"0.5880295",
"0.5880295",
"0.58579725",
"0.5857972",
"0.58551204",
"0.5835745",
"0.58263546",
"0.5824062",
"0.5822234",
"0.57835925",
"0.5774486"
] | 0.71311367 | 0 |
! resources object of Resources class contain resources from config file options object of MergeOptions class contain merge options from config file str_name default value same as the class name "SynsetsSUMOMerger2" | def __init__(self, resources, options, str_name = 'SynsetsSUMOMerger2'):
super(SynsetsSUMOMerger2, self).__init__(resources, options, str_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resources(self):",
"def register_resources(self, resources):\n from tw.api import merge_resources\n merge_resources(self.request_local.resources, resources)",
"def MergeLogic(self) -> str:",
"def _merge_resource(self, resource, desired, unmanaged):\n unmanaged_resource = unmanaged[resource] # this always exists\n desired_resource = desired.get(resource)\n if desired_resource is None:\n desired_data = {}\n else:\n desired_data = desired_resource.data\n\n # determine if any changes occurred after merging\n if unmanaged_resource.merge(desired_data):\n return unmanaged_resource\n return None",
"def mergeConfig(self):\n config = \\\n \"from Configuration.DataProcessing.Merge import mergeProcess\\nprocess = mergeProcess(\\n \"\n config += \",\".join(self.merge_inputs)\n config += \",\\n\"\n config += \" output_file = \\\"%s\\\",\\n\" % os.path.basename(self.lfn)\n config += \" output_lfn = \\\"%s\\\"\\n) \" % self.lfn\n return config",
"def merge(): #Status: WIP\r\n pass",
"def copyResource(game, channel, packageName, sdkDir, decompileDir, operations, name, pluginInfo=None):\n\n if operations != None:\n for child in operations:\n if child['type'] == 'mergeManifest':\n manifestFrom = utils_file.getFullPath(os.path.join(sdkDir, child['from']))\n manifestFromTemp = manifestFrom\n manifestTo = utils_file.getFullPath(os.path.join(decompileDir, child['to']))\n\n if 'orientation' in game:\n if game['orientation'] == 'portrait':\n manifestFrom = manifestFrom[:-4] + \"_portrait.xml\"\n else:\n manifestFrom = manifestFrom[:-4] + \"_landscape.xml\"\n\n if not os.path.exists(manifestFrom):\n manifestFrom = manifestFromTemp\n\n utils_log.info(\"The sdk manifest file is %s\", manifestFrom)\n\n # merge into xml\n bRet = mergeManifest(channel, manifestTo, manifestFrom)\n if bRet:\n utils_log.info(\"merge manifest file success.\")\n else:\n utils_log.error(\"merge manifest file failed.\")\n return 1\n\n elif child['type'] == 'copyRes':\n\n if child['from'] == None or child['to'] == None:\n utils_log.error(\"the sdk config file error. 'copyRes' need 'from' and 'to'.sdk name:%s\", name)\n return 1\n\n copyFrom = utils_file.getFullPath(os.path.join(sdkDir, child['from']))\n copyTo = utils_file.getFullPath(os.path.join(decompileDir, child['to']))\n\n if child['to'] == 'lib':\n copyLibs(game, copyFrom, copyTo, decompileDir)\n else:\n copyResToApk(copyFrom, copyTo)\n\n elif child['type'] == 'script' and pluginInfo != None:\n # now only third-plugin support script\n if child['from'] == None:\n utils_log.error(\"the sdk config file is error. 'script' need 'from' attrib to specify script.py\")\n return 1\n\n scriptName = child['from']\n utils_log.info(\"now to execute plugin script. name:%s\", scriptName)\n doScript(channel, pluginInfo, decompileDir, packageName, sdkDir, scriptName)\n\n return 0",
"def createMergedConfigFile(self):\n # Read config data\n if os.path.isfile(self.config_file):\n with open(self.config_file, 'r') as stream:\n try:\n cfg = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n if debug:\n print(\"Using Config file: \" + self.config_file)\n else:\n if debug:\n print(\"Config file does not exist: \" + self.config_file)\n exit(1)\n\n # If project namespace was not in the config file, set a default\n if (cfg is not None\n and 'generic' in cfg\n and 'project_namespace' in cfg['generic']\n and cfg['generic']['project_namespace'] is not None\n and len(cfg['generic']['project_namespace']) > 0):\n if debug:\n print(\"Using specified namespace\")\n else:\n conf_dir = os.path.dirname(self.config_file)\n cmd = \"cd \" + conf_dir + ' && basename `git rev-parse --show-toplevel`'\n try:\n result_bytes = subprocess.check_output(cmd,\n timeout=300,\n shell=True)\n project_namespace = result_bytes.decode('UTF-8').rstrip()\n if debug:\n print(\"Derived namespace from git: \" + project_namespace)\n except subprocess.CalledProcessError as e:\n if debug:\n print(\"Error deriving project namespace from git: \", e.output)\n sys.exit(1)\n # Insert the project_namespace into the config data\n if cfg is None:\n cfg = {}\n if 'generic' not in cfg:\n cfg['generic'] = {}\n cfg['generic']['project_namespace'] = project_namespace\n\n # Confirm project namespace\n if debug:\n print(\"Project Namespace: \" + cfg['generic']['project_namespace'])\n\n # Read overrides\n override_file_data = {}\n if os.path.isfile(self.override_file):\n with open(self.override_file, 'r') as stream:\n try:\n override_file_data = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n # Created merged data\n self.config_data = cfg\n # print(\"Applying override_file_data: \" + str(override_file_data))\n if override_file_data is not None:\n self.config_data = merge(self.config_data, override_file_data)\n\n # Ensure parent directory for merged file exists\n directory = Path(self.merged_file).parent\n if not os.path.exists(directory):\n os.makedirs(directory)\n # Created merged file\n with open(self.merged_file, 'w') as out_file:\n yaml.dump(self.config_data, out_file)",
"def build(self) -> Optional[Bundle]:\n # Prepare STIX2 bundle objects with author.\n bundle_objects = [self.author]\n\n # Add object marking definitions to bundle.\n bundle_objects.extend(self.object_markings)\n\n # Create intrusion sets and add to bundle.\n intrusion_sets = self._create_intrusion_sets()\n bundle_objects.extend(intrusion_sets)\n\n # Create sectors and add to bundle.\n sectors = self._create_sectors()\n bundle_objects.extend(sectors)\n\n # Intrusion sets target sectors and add to bundle.\n intrusion_sets_target_sectors = self._create_targets_relationships(\n intrusion_sets, sectors\n )\n bundle_objects.extend(intrusion_sets_target_sectors)\n\n # Create locations and add to bundle.\n locations = self._create_locations()\n bundle_objects.extend(locations)\n\n # Intrusion sets target locations and add to bundle.\n intrusion_sets_target_locations = self._create_targets_relationships(\n intrusion_sets, locations\n )\n bundle_objects.extend(intrusion_sets_target_locations)\n\n # Create observations.\n observations = self._create_ioc_observations()\n\n # Get observables and add to bundle.\n observables = [o.observable for o in observations if o.observable is not None]\n bundle_objects.extend(observables)\n\n # Get indicators, create YARA indicators and to bundle.\n indicators = [o.indicator for o in observations if o.indicator is not None]\n indicators.extend(self._create_yara_indicators())\n bundle_objects.extend(indicators)\n\n # Get observation relationships and add to bundle.\n indicators_based_on_observables = [\n o.relationship for o in observations if o.relationship is not None\n ]\n bundle_objects.extend(indicators_based_on_observables)\n\n # Indicator indicates entities, add to bundle.\n indicator_indicates = intrusion_sets\n\n indicator_indicates_entities = self._create_indicates_relationships(\n indicators, indicator_indicates\n )\n bundle_objects.extend(indicator_indicates_entities)\n\n # Create object references for the report.\n object_refs = create_object_refs(\n intrusion_sets,\n sectors,\n intrusion_sets_target_sectors,\n locations,\n intrusion_sets_target_locations,\n observables,\n indicators,\n indicators_based_on_observables,\n indicator_indicates_entities,\n )\n\n # TODO: Ignore reports without any references or not?\n # Hack, the report must have at least on object reference.\n if not object_refs:\n dummy_object = self._create_dummy_object()\n\n bundle_objects.append(dummy_object)\n object_refs.append(dummy_object)\n\n # Create report and add to bundle.\n report = self._create_report(object_refs)\n bundle_objects.append(report)\n\n # XXX: Without allow_custom=True the observable with the custom property\n # will cause an unexpected property (x_opencti_score) error.\n return Bundle(objects=bundle_objects, allow_custom=True)",
"def merge(self, skel):\n return Skeleton.simple_merge((self, skel)).consolidate()",
"def merge_spec(self):\n from django_swagger_utils.spec_client.merge_spec import MergeSpec\n merge_spec = MergeSpec(self.paths['api_spec_dir'], self.paths['base_dir'])\n merge_spec.merge()",
"def merge(self, ref, *args):\n return self.cmd('merge', ref, *args)",
"def merge(self, obj):\n pass",
"def resourceManager(*args, nameFilter: AnyStr=\"\", saveAs: List[AnyStr, AnyStr]=None,\n **kwargs)->None:\n pass",
"def resources(self, resources):\n self._resources = resources",
"def _resolve_duplicates(self) -> None:\n resource_ids_resources: DefaultDict[str, List[Resource]] = defaultdict(list)\n for resource in self.resources:\n resource_ids_resources[resource.resource_id].append(resource)\n merged_resources: List[Resource] = []\n for resource_id, resources in resource_ids_resources.items():\n if len(resources) > 1:\n merged_resource = ResourceSpec.merge_resources(\n resource_id=resource_id, resources=resources\n )\n merged_resources.append(merged_resource)\n for merged_resource in merged_resources:\n self.resources = [\n resource\n for resource in self.resources\n if resource.resource_id != merged_resource.resource_id\n ]\n self.resources.append(merged_resource)",
"def _load_resources(self):\n puts = (getattr(self, 'project', None) or self).puts\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n for name in self.settings.get(resource_type, {}):\n extra = {\n 'project': getattr(self, 'project', None) or self,\n 'app': self if hasattr(self, 'project') else None,\n }\n\n with indent(4 if hasattr(self, 'project') else 2):\n puts(colored.green(u\"✓ {}:{}\".format(resource_type, name)))\n\n self._resources[resource_type].append(\n resource_cls.factory(\n name=name,\n settings=self.settings.get(resource_type, {})[name],\n **extra\n )\n )",
"def _merge(self):\n raise NotImplementedError",
"def _build_resources_template(self, output_filename=\"{}_r.json\"):\n\n template = self._base_troposphere_template()\n\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n resource_cls.register_type_resources_template(self, template)\n for r in self.get_resources(resource_type):\n r.register_resources_template(template)\n\n template = utils.fix_troposphere_references(template)\n\n if template and template.resources:\n output_filename = output_filename.format(self._get_next_build_sequence_id())\n self.puts(colored.cyan(output_filename))\n with open(os.path.join(self.build_path, output_filename), 'w') as f:\n f.write(template.to_json())",
"def merge(self):\n rdr = Reader(self.config)\n rdr.read_string(utils.paste(single_line=False))\n if len(rdr.get_entry_collection().entries) == 0:\n self.visual.error(\"Zero items extracted from the collection to merge.\")\n return\n eids = []\n for entry in rdr.get_entry_collection().entries.values():\n self.entry_collection.add_new_entry(entry)\n eids.append(entry.ID)\n self.selector.update_reference(self.reference_entry_id_list)\n # select them\n res = self.selector.select_by_id(eids)\n if res is None:\n self.visual.error(\"Failed to select merged entry!\")\n self.visual.log(\"Merged new entr{}:\".format(\"y\" if len(res) == 1 else \"ies\"))\n self.show_entries()",
"def combine_data(self, object, additional_data):\n object[\"ancestors\"] = additional_data[\"ancestors\"] if self.cartographer_client else []\n object[\"position\"] = additional_data.get(\"order\", 0) if additional_data else 0\n object = super(ResourceMerger, self).combine_data(object, additional_data)\n return combine_references(object)",
"def __init__(self):\r\n self.label = \"mergeAreas\"\r\n self.description = \"Merges dark targets feature classes into single \\\r\n acquisition swathes by day.\"\r\n self.canRunInBackground = False",
"def is_merged(self):\r\n url = '{0}/merge'.format(self.get_url())\r\n\r\n return http.Request('GET', url), resource.parse_boolean",
"def resource_map(self):",
"def getResourceReplacers(self):\n replacers = {}\n replacerDir = os.path.join(self.dir,'Replacers')\n if not os.path.exists(replacerDir):\n return replacers\n if 'mosh.resourceReplacer.applied' not in settings:\n settings['mosh.resourceReplacer.applied'] = []\n for name in os.listdir(replacerDir):\n path = os.path.join(replacerDir,name)\n if os.path.isdir(path):\n replacers[name] = ResourceReplacer(replacerDir,name)\n return replacers",
"def merge():\n click.echo(\"Not implemented yet. In the future, this command will be used for merging models.\")\n sys.exit(-2)",
"def get_resources(self):\n res = set()\n res.update(self.get_inputs())\n res.update(self.get_outputs())\n return res",
"def createResourceSims(self):\n if self.game.myEmpire['viewResources'] == 0:\n return\n import anwp.sims\n # remove old sims if any\n self.removeResourceSims()\n # create resource sims\n self.resourceSims = []\n for systemID, systemDict in self.game.allSystems.iteritems():\n if systemDict['myEmpireID'] == self.game.myEmpireID:\n # create resource sims representing resources on system\n i = 0\n for attr in ['AL', 'EC', 'IA']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] - 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1\n \n # create resource sims representing resources being generated\n i = 0\n for attr in ['prodAL', 'prodEC', 'prodIA', 'prodCR']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s_gen.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] + 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1",
"def merge(self, filename = None, format = 'srt'):\n \n return self.download(filename, format, True)",
"def merge_asset(self, other):\n for asset in other.asset:\n asset_name = asset.get(\"name\")\n asset_type = asset.tag\n # Avoids duplication\n pattern = \"./{}[@name='{}']\".format(asset_type, asset_name)\n if self.asset.find(pattern) is None:\n self.asset.append(asset)"
] | [
"0.5547208",
"0.54064465",
"0.5403034",
"0.53563666",
"0.53249174",
"0.5320973",
"0.53159565",
"0.5253915",
"0.5152775",
"0.51470643",
"0.5145946",
"0.5095782",
"0.50935775",
"0.5056793",
"0.501863",
"0.50170356",
"0.49941415",
"0.4964399",
"0.49395525",
"0.4936977",
"0.49245661",
"0.49073938",
"0.48995975",
"0.48994577",
"0.48935193",
"0.48932666",
"0.4886419",
"0.48360097",
"0.48348394",
"0.48240554"
] | 0.82576424 | 0 |
! Create dictionary based on mapping PLWN on SUMO ontology file. Dictionary format and mapping PLWN on SUMO ontology file format are presented below. | def get_plwn2sumo_dict(self):
if not os.path.exists(self.resources().mapping_sumo_file()):
raise IOError(
"%s file not found!" % \
self.resources().mapping_sumo_file()
)
plwn2sumo_dict = defaultdict(set)
with open(self.resources().mapping_sumo_file()) as sumofile:
next(sumofile)
for line in sumofile:
synset_id = int(line.strip().split(';')[0])
sumo = line.strip().split(';')[-2]
plwn2sumo_dict[sumo].add(synset_id)
return plwn2sumo_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mapping_stratum(download_files =True):\r\n # get code description _index \r\n ix_= AGSO_PROPERTIES['props_codes'].index('name')\r\n def mfunc_(d): \r\n \"\"\" Set individual layer in dict of properties \"\"\"\r\n _p= {c: k.lower() if c not in ('code', 'label', 'name') else k \r\n for c, k in zip(AGSO_PROPERTIES['props_codes'], d) }\r\n id_= d[ix_].replace('/', '_').replace(\r\n ' ', '_').replace('\"', '').replace(\"'\", '').lower()\r\n return id_, _p \r\n rock_and_structural_props =list()\r\n for agso_data in tuple(set_agso_properties(download_files)): \r\n # remove the header of the property file\r\n rock_and_structural_props.append(\r\n dict(map( lambda x: mfunc_(x), agso_data[1:])))\r\n \r\n return tuple(rock_and_structural_props)",
"def map_rule4(self):\n odml.terminology.terminologies['map'] = parse(\"\"\"\n S1[T1]\n - P2\n S2[T2]\n - P1\n S3[T3]\n - P1\n - P2\n - P3\n \"\"\")",
"def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"",
"def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('ANAP').get('abstractTypes')\n exolinks = globalMap.get('ANAP').get('exolinks')\n\n # DataType GraphicsHandlerType\n currentMap = {}\n abstractTypes['GraphicsHandlerType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'] = currentMap\n loadMaps['ANAP.GraphicsHandlerType'] = currentMap\n currentMap['tag'] = 'ANAP.GraphicsHandlerType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AnalysisProfile\n currentMap = {}\n abstractTypes['AnalysisProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'analysisProfiles'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnalysisProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnalysisProfile.bgColor\n currentMap = {}\n contentMap['bgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'] = currentMap\n loadMaps['ANAP.AnalysisProfile.bgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.bgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'\n currentMap['name'] = 'bgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#FFFFFF'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.fgColor\n currentMap = {}\n contentMap['fgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'] = currentMap\n loadMaps['ANAP.AnalysisProfile.fgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.fgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'\n currentMap['name'] = 'fgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#000000'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.font\n currentMap = {}\n contentMap['font'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'] = currentMap\n loadMaps['ANAP.AnalysisProfile.font'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.font'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'\n currentMap['name'] = 'font'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.graphicsHandler\n currentMap = {}\n contentMap['graphicsHandler'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'] = currentMap\n loadMaps['ANAP.AnalysisProfile.graphicsHandler'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.graphicsHandler'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'\n currentMap['name'] = 'graphicsHandler'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'Tk'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001')\n\n # Attribute AnalysisProfile.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AnalysisProfile.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'] = currentMap\n loadMaps['ANAP.AnalysisProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AnalysisProfile.panView\n currentMap = {}\n contentMap['panView'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'] = currentMap\n loadMaps['ANAP.AnalysisProfile.panView'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.panView'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'\n currentMap['name'] = 'panView'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.sendBugReports\n currentMap = {}\n contentMap['sendBugReports'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile.sendBugReports'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.sendBugReports'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'\n currentMap['name'] = 'sendBugReports'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'maybe'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2010-11-17-16:21:33_00001')\n\n # Attribute AnalysisProfile.transientDialogs\n currentMap = {}\n contentMap['transientDialogs'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientDialogs'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientDialogs'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'\n currentMap['name'] = 'transientDialogs'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.transientWindows\n currentMap = {}\n contentMap['transientWindows'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientWindows'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientWindows'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'\n currentMap['name'] = 'transientWindows'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.twoCharShortcuts\n currentMap = {}\n contentMap['twoCharShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'] = currentMap\n loadMaps['ANAP.AnalysisProfile.twoCharShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.twoCharShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'\n currentMap['name'] = 'twoCharShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useCrosshair\n currentMap = {}\n contentMap['useCrosshair'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useCrosshair'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useCrosshair'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'\n currentMap['name'] = 'useCrosshair'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useGlobalShortcuts\n currentMap = {}\n contentMap['useGlobalShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useGlobalShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useGlobalShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'\n currentMap['name'] = 'useGlobalShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.userEmail\n currentMap = {}\n contentMap['userEmail'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userEmail'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userEmail'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'\n currentMap['name'] = 'userEmail'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute AnalysisProfile.userName\n currentMap = {}\n contentMap['userName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userName'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'\n currentMap['name'] = 'userName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.userOrganisation\n currentMap = {}\n contentMap['userOrganisation'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userOrganisation'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userOrganisation'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'\n currentMap['name'] = 'userOrganisation'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.webBrowser\n currentMap = {}\n contentMap['webBrowser'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'] = currentMap\n loadMaps['ANAP.AnalysisProfile.webBrowser'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.webBrowser'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'\n currentMap['name'] = 'webBrowser'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role AnalysisProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnalysisProfile.colorSchemes\n currentMap = {}\n contentMap['colorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'] = currentMap\n loadMaps['ANAP.AnalysisProfile.colorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.colorSchemes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'\n currentMap['name'] = 'colorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.macros\n currentMap = {}\n contentMap['macros'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'] = currentMap\n loadMaps['ANAP.AnalysisProfile.macros'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.macros'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'\n currentMap['name'] = 'macros'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.marksColor\n currentMap = {}\n contentMap['marksColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'] = currentMap\n loadMaps['ANAP.AnalysisProfile.marksColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.marksColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'\n currentMap['name'] = 'marksColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n\n # Role AnalysisProfile.refExpProfiles\n currentMap = {}\n contentMap['refExpProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'] = currentMap\n loadMaps['ANAP.AnalysisProfile.refExpProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.refExpProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'\n currentMap['name'] = 'refExpProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.residueProfiles\n currentMap = {}\n contentMap['residueProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'] = currentMap\n loadMaps['ANAP.AnalysisProfile.residueProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.residueProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'\n currentMap['name'] = 'residueProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.rulersColor\n currentMap = {}\n contentMap['rulersColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'] = currentMap\n loadMaps['ANAP.AnalysisProfile.rulersColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.rulersColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'\n currentMap['name'] = 'rulersColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n # End of AnalysisProfile\n\n currentMap = abstractTypes.get('AnalysisProfile')\n aList = ['createdBy', 'graphicsHandler', 'guid', 'isModifiable', 'lastUnlockedBy', 'name', 'panView', 'sendBugReports', 'transientDialogs', 'transientWindows', 'twoCharShortcuts', 'useCrosshair', 'useGlobalShortcuts', 'userEmail', 'webBrowser']\n currentMap['headerAttrs'] = aList\n aList = ['bgColor', 'fgColor', 'font', 'userName', 'userOrganisation', 'marksColor', 'rulersColor']\n currentMap['simpleAttrs'] = aList\n aList = ['residueProfiles', 'refExpProfiles', 'macros', 'colorSchemes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['colorSchemes', 'macros', 'refExpProfiles', 'residueProfiles']\n currentMap['children'] = aList\n\n # Class ColorScheme\n currentMap = {}\n abstractTypes['ColorScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'] = currentMap\n loadMaps['ANAP.ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'colorSchemes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ColorScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ColorScheme.colors\n currentMap = {}\n contentMap['colors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'] = currentMap\n loadMaps['ANAP.ColorScheme.colors'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.colors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'\n currentMap['name'] = 'colors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute ColorScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'] = currentMap\n loadMaps['ANAP.ColorScheme.name'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ColorScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ColorScheme\n\n currentMap = abstractTypes.get('ColorScheme')\n aList = ['colors', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Macro\n currentMap = {}\n abstractTypes['Macro'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'] = currentMap\n loadMaps['ANAP.Macro'] = currentMap\n currentMap['tag'] = 'ANAP.Macro'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'macros'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Macro.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Macro.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'] = currentMap\n loadMaps['ANAP.Macro.details'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Macro.function\n currentMap = {}\n contentMap['function'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'] = currentMap\n loadMaps['ANAP.Macro.function'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.function'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'\n currentMap['name'] = 'function'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.isInMenu\n currentMap = {}\n contentMap['isInMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'] = currentMap\n loadMaps['ANAP.Macro.isInMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'\n currentMap['name'] = 'isInMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.isInMouseMenu\n currentMap = {}\n contentMap['isInMouseMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'] = currentMap\n loadMaps['ANAP.Macro.isInMouseMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMouseMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'\n currentMap['name'] = 'isInMouseMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.module\n currentMap = {}\n contentMap['module'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'] = currentMap\n loadMaps['ANAP.Macro.module'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.module'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'\n currentMap['name'] = 'module'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'] = currentMap\n loadMaps['ANAP.Macro.name'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Macro.ordering\n currentMap = {}\n contentMap['ordering'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'] = currentMap\n loadMaps['ANAP.Macro.ordering'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.ordering'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'\n currentMap['name'] = 'ordering'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.path\n currentMap = {}\n contentMap['path'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'] = currentMap\n loadMaps['ANAP.Macro.path'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.path'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'\n currentMap['name'] = 'path'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00003')\n\n # Attribute Macro.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'] = currentMap\n loadMaps['ANAP.Macro.serial'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.shortcut\n currentMap = {}\n contentMap['shortcut'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'] = currentMap\n loadMaps['ANAP.Macro.shortcut'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.shortcut'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'\n currentMap['name'] = 'shortcut'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Macro.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Macro\n\n currentMap = abstractTypes.get('Macro')\n aList = ['function', 'isInMenu', 'isInMouseMenu', 'module', 'ordering', 'serial', 'shortcut']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'path']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class RefExpProfile\n currentMap = {}\n abstractTypes['RefExpProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'] = currentMap\n loadMaps['ANAP.RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refExpProfiles'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefExpProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefExpProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'] = currentMap\n loadMaps['ANAP.RefExpProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute RefExpProfile.peakSymbolColors\n currentMap = {}\n contentMap['peakSymbolColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakSymbolColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakSymbolColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'\n currentMap['name'] = 'peakSymbolColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.peakTextColors\n currentMap = {}\n contentMap['peakTextColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakTextColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakTextColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'\n currentMap['name'] = 'peakTextColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.refExpNames\n currentMap = {}\n contentMap['refExpNames'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'] = currentMap\n loadMaps['ANAP.RefExpProfile.refExpNames'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.refExpNames'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'\n currentMap['name'] = 'refExpNames'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role RefExpProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefExpProfile.negColorSchemes\n currentMap = {}\n contentMap['negColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'] = currentMap\n loadMaps['ANAP.RefExpProfile.negColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.negColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'\n currentMap['name'] = 'negColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role RefExpProfile.posColorSchemes\n currentMap = {}\n contentMap['posColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'] = currentMap\n loadMaps['ANAP.RefExpProfile.posColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.posColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'\n currentMap['name'] = 'posColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of RefExpProfile\n\n currentMap = abstractTypes.get('RefExpProfile')\n aList = ['name']\n currentMap['headerAttrs'] = aList\n aList = ['peakSymbolColors', 'peakTextColors', 'refExpNames', 'negColorSchemes', 'posColorSchemes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ResidueProfile\n currentMap = {}\n abstractTypes['ResidueProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'] = currentMap\n loadMaps['ANAP.ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'residueProfiles'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ResidueProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ResidueProfile.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'] = currentMap\n loadMaps['ANAP.ResidueProfile.ccpCode'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.guiName\n currentMap = {}\n contentMap['guiName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'] = currentMap\n loadMaps['ANAP.ResidueProfile.guiName'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.guiName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'\n currentMap['name'] = 'guiName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'] = currentMap\n loadMaps['ANAP.ResidueProfile.molType'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ResidueProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ResidueProfile\n\n currentMap = abstractTypes.get('ResidueProfile')\n aList = ['ccpCode', 'guiName', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AnalysisProfile\n currentMap = {}\n exolinks['AnalysisProfile'] = currentMap\n loadMaps['ANAP.exo-AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-AnalysisProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['name'] = 'AnalysisProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to ColorScheme\n currentMap = {}\n exolinks['ColorScheme'] = currentMap\n loadMaps['ANAP.exo-ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ColorScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['name'] = 'ColorScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Macro\n currentMap = {}\n exolinks['Macro'] = currentMap\n loadMaps['ANAP.exo-Macro'] = currentMap\n currentMap['tag'] = 'ANAP.exo-Macro'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['name'] = 'Macro'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to RefExpProfile\n currentMap = {}\n exolinks['RefExpProfile'] = currentMap\n loadMaps['ANAP.exo-RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-RefExpProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['name'] = 'RefExpProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ResidueProfile\n currentMap = {}\n exolinks['ResidueProfile'] = currentMap\n loadMaps['ANAP.exo-ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ResidueProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['name'] = 'ResidueProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))",
"def generate_antonym_pairs(config: SettingConfig) -> dict:\n print(f\"Generating initial antonym pairs from RoWordNet @ {datetime.now()}\")\n wn = rwn.RoWordNet()\n\n # Create the output dictionary that will be of type dict(str : set(pair(str, str)) where the key is\n # the PoS and the value is a set of pairs of words of PoS specified by the key\n pairs = dict()\n\n # Iterate over the selected parts of speech\n for part_of_speech in config.pos.values():\n\n pos_pairs = list()\n\n # Return all synsets corresponding to the PoS\n synset_ids = wn.synsets(pos=part_of_speech)\n\n # Iterate all the synsets for the current PoS\n for synset_id in synset_ids:\n\n # Get the synset object specified by synset_id\n synset = wn.synset(synset_id)\n\n # Get the outbound relations of type antonym from\n outbound_relations = filter(lambda x: x[1] == 'near_antonym', wn.outbound_relations(synset_id))\n\n # Iterate outbound relations\n for relation in outbound_relations:\n # Get the synset corresponding to the target of the outbound relation\n target_synset = wn.synset(relation[0])\n\n # Get all the pairs, sort them by first word to keep set entries unique\n current_iteration_pairs = get_cross_synset_pairs(synset, target_synset)\n\n # Add the current set of pairs\n pos_pairs.extend(current_iteration_pairs)\n\n # Get corresponding key in pos dictionary and add the pair to the resulting dictionary\n for key, value in config.pos.items():\n if value == part_of_speech:\n pairs[key] = unique(pos_pairs)\n\n # Return the whole dictionary\n print(f\"Successfully generated antonym paris @ {datetime.now()}\")\n return pairs",
"def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'",
"def _makeimap(self):\n self.map_['source'] = 'NAOJ'\n self.map_['provider'] = 'NRO'\n self.map_['instrument'] = 'NORH'\n self.map_['phyobs'] = ''",
"def osp2():\n return dict(\n kloc= range(75,125),\n docu = [3,4], ltex = [2,5],\n sced = [2,3,4], Pmat = [4,5],\n Prec = [3,4, 5],\n Resl = [4], Team = [3],\n acap = [4], aexp = [4],\n cplx = [4], data = [4],\n Flex = [3], pcap = [3],\n pcon = [3], pexp = [4],\n pvol = [3], rely = [5],\n ruse = [4], site = [6],\n stor = [3], time = [3],\n tool = [5])",
"def _output_dict(self):\n lang = self.ddnGuiLanguage.get()\n\n fileout = os.path.normpath('{}/{}-{}.xml'.\\\n format(self.MapCreator, self.Source, self.ddnCurProject.get()))\n linesout = ['<?xml version=\"1.0\" encoding=\"UTF-8\"?>', \\\n '<DictionarySet xmlns:mc=\"urn:fmosoft-map-creator\" xmlns=\"urn:fmosoft-map-creator\" Version=\"1\">', \\\n ' <Dictionary SourceLanguage=\"{}\" SourceLanguageIsPredefined=\"true\" TargetLanguage=\"{}\" TargetLanguageIsPredefined=\"false\">'.\\\n format(self.Source, self.ddnCurProject.get()), \\\n ]\n for child in self.tree.get_children('approved'):\n vv = self.tree.item(child)['values']\n linesout.append(' <Translation Source=\"{}\" Target=\"{}\"/>'.format(vv[0], vv[1]))\n linesout.append(' </Dictionary>')\n linesout.append('</DictionarySet>')\n linesout.append('')\n\n if os.path.exists(fileout):\n os.remove(fileout)\n\n if fileout:\n output = codecs.open(fileout, mode='w', encoding='utf-8')\n output.write('\\n'.join(linesout))\n output.close()\n pass",
"def parse_pl(pl_file_name):\n with open(pl_file_name, 'r') as f:\n # read lines without blank lines\n lines = [l for l in (line.strip() for line in f) if l]\n\n # Skip the first line: UCLA nodes ...\n lines_iter = iter(lines[1:])\n \n pl_dict = dict()\n for l in lines_iter:\n if l.startswith('#'): continue\n\n tokens = l.split()\n assert len(tokens) >= 5\n\n name, x, y, orient = \\\n tokens[0], float(tokens[1]), float(tokens[2]), tokens[4]\n\n # for ICCAD\n orient = 'N'\n\n pl_dict[name] = (x, y, orient)\n\n return pl_dict",
"def generate_synonym_pairs(config: SettingConfig) -> dict:\n wn = rwn.RoWordNet()\n\n # Create the output dictionary that will be of type dict(str : set(pair(str, str)) where the key is\n # the PoS and the value is a set of pairs of words of PoS specified by the key\n pairs = dict()\n\n # Iterate over the selected parts of speech\n for part_of_speech in config.pos.values():\n\n pos_pairs = list()\n\n # Return all synsets corresponding to the PoS\n synset_ids = wn.synsets(pos=part_of_speech)\n\n # Iterate all the synsets for the current PoS\n for synset_id in synset_ids:\n # Get the synset object specified by synset_id\n synset = wn.synset(synset_id)\n\n # Get all the pairs, sort them by first word to keep set entries unique\n current_iteration_pairs = get_synset_pairs(synset)\n\n # Append all pairs from the current PoS to the global set\n pos_pairs.extend(current_iteration_pairs)\n\n # Get corresponding key in pos dictionary and add the pair to the resulting dictionary\n for key, value in config.pos.items():\n if value == part_of_speech:\n pairs[key] = unique(pos_pairs)\n\n return pairs",
"def _create_dnp3_object_map(self):\n\n feeders = self.file_dict.get(\"feeders\", [])\n measurements = list()\n capacitors = list()\n regulators = list()\n switches = list()\n solarpanels = list()\n batteries = list()\n fuses = list()\n breakers = list()\n reclosers = list()\n energyconsumers = list()\n for x in feeders:\n measurements = x.get(\"measurements\", [])\n capacitors = x.get(\"capacitors\", [])\n regulators = x.get(\"regulators\", [])\n switches = x.get(\"switches\", [])\n solarpanels = x.get(\"solarpanels\", [])\n batteries = x.get(\"batteries\", [])\n fuses = x.get(\"fuses\", [])\n breakers = x.get(\"breakers\", [])\n reclosers = x.get(\"reclosers\", [])\n energyconsumers = x.get(\"energyconsumers\", [])\n\n # Unique grouping of measurements - GroupBy Name, Type and Connectivity node\n groupByNameTypeConNode = defaultdict(list) \n for m in measurements:\n groupByNameTypeConNode[m['name']+m.get(\"measurementType\")+m.get(\"ConnectivityNode\")].append(m)\n\n # Create Net Phase DNP3 Points\n for grpM in groupByNameTypeConNode.values():\n\n if grpM[0]['MeasurementClass'] == \"Analog\" and grpM[0].get(\"measurementType\") == \"VA\":\n measurement_type = grpM[0].get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n \n\n name1 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VAR-value'\n name2 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-Watts-value'\n name3 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VA-value'\n\n description1 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VAR\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description2 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-Watts\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description3 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VA\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n # Create Each Phase DNP3 Points\n for m in measurements:\n attribute = attribute_map['regulators']['attribute']\n measurement_type = m.get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n name= m['name'] + '-' + m['phases']\n description = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + measurement_type + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name, description, measurement_type, measurement_id)\n self.c_ai += 1\n\n if m.get(\"measurementType\") == \"VA\":\n measurement_id = m.get(\"mRID\")\n name1 = m['name'] + '-' + m['phases'] + '-VAR-value'\n name2 = m['name'] + '-' + m['phases'] + '-Watts-value'\n name3 = m['name'] + '-' + m['phases'] + '-angle'\n\n description1 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"VAR\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description2 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"Watt\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description3 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"angle\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") + \",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n\n elif m['MeasurementClass'] == \"Discrete\" and measurement_type == \"Pos\":\n if \"RatioTapChanger\" in m['name'] or \"reg\" in m[\"SimObject\"]:\n # TODO: Do we need step?\n for r in range(5, 7): # [r==4]: Step, [r==5]: LineDropR, [r==6]:LineDropX \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, attribute[r])\n self.c_ao += 1\n else:\n self.assign_val_a(\"DI\", 1, 2, self.c_di, name, description, measurement_type, measurement_id)\n self.c_di += 1\n\n for m in capacitors:\n measurement_id = m.get(\"mRID\")\n cap_attribute = attribute_map['capacitors']['attribute'] # type: List[str]\n\n for l in range(0, 4):\n # publishing attribute value for capacitors as Bianry/Analog Input points based on phase attribute\n name = m['name']\n description = \"Name:\" + m['name'] + \"ConductingEquipment_type:LinearShuntCompensator\" + \",Attribute:\" + cap_attribute[l] + \",Phase:\" + m['phases']\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, cap_attribute[l])\n self.c_ao += 1\n for p in range(0, len(m['phases'])):\n name = m['name'] + m['phases'][p]\n description = \"Name:\" + m['name'] + \",ConductingEquipment_type:LinearShuntCompensator\" + \",controlAttribute:\" + cap_attribute[p] + \",Phase:\" + m['phases'][p]\n # description = \"Capacitor, \" + m['name'] + \",\" + \"phase -\" + m['phases'][p] + \", and attribute is - \" + cap_attribute[4]\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, cap_attribute[4])\n self.c_do += 1\n\n for m in regulators:\n reg_attribute = attribute_map['regulators']['attribute']\n # bank_phase = list(m['bankPhases'])\n for n in range(0, 4):\n measurement_id = m.get(\"mRID\")\n name = m['bankName'] + '-' + m['bankPhases']\n description = \"Name:\" + m['bankName'] + \",ConductingEquipment_type:RatioTapChanger_Reg\" +\",Phase:\" + m['bankPhases'] + \",Attribute:\" + reg_attribute[n]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id[0], reg_attribute[n])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id[0], reg_attribute[n])\n self.c_ai += 1\n for i in range(5, 7):\n for j in range(0, len(m['bankPhases'])):\n measurement_id = m.get(\"mRID\")[j]\n name = m['tankName'][j] + '-' + m['bankPhases'][j]\n description = \"Name:\" + m['tankName'][j] + \",ConductingEquipment_type:RatioTapChanger_Reg\"+ \",Phase:\" + m['bankPhases'][j] + \",controlAttribute:\" + reg_attribute[i]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id,reg_attribute[i])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id,reg_attribute[i])\n self.c_ai += 1\n \n for m in solarpanels:\n for k in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-Watts-value'\n description = \"Solarpanel:\" + m['name'] + \",Phase:\" + m['phases'] + \",measurementID:\" + measurement_id\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n \n name1 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name2 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name2, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name3 = \"Solar\"+ m['name'] + '-' + m['phases'][k] + '-Watts-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name3, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n\t\t\t\n for m in batteries:\n for l in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = m['name'] + '-' + m['phases'][l] + '-Watts-value'\n description = \"Battery, \" + m['name'][l] + \",Phase: \" + m['phases'] + \",ConductingEquipment_type:PowerElectronicConnections\"\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description,measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n name1 = m['name'] + '-' + m['phases'][l] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description,measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n for m in switches:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][k]\n description = \"Name:\" + m[\"name\"] + \",ConductingEquipment_type:LoadBreakSwitch\" + \"Phase:\" + phase_value[k] +\",controlAttribute:\"+switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in fuses:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for l in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][l]\n description = \"Name:\" + m[\"name\"] + \",Phase:\" + phase_value[l] + \",Attribute:\" + switch_attribute + \",mRID\" + measurement_id\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in breakers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for n in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][n]\n description = \"Name: \" + m[\"name\"] + \",Phase:\" + phase_value[n] + \",ConductingEquipment_type:Breaker\" + \",controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n \n for m in reclosers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for i in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][i]\n description = \"Recloser, \" + m[\"name\"] + \"Phase: - \" + phase_value[i] + \",ConductingEquipment_type:Recloser\"+\"controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in energyconsumers:\n measurement_id = m.get(\"mRID\")\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name']+\"phase:\" + m['phases'][k]\n description = \"EnergyConsumer, \" + m[\"name\"] + \"Phase: \" + phase_value[k] \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"EnergyConsumer.p\")\n self.c_ao += 1\n \n name1 = m['name']+\"phase:\" + m['phases'][k] + \"control\"\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name1, description, measurement_id, \"EnergyConsumer.p\")\n self.c_do += 1\n\n return self.out_json",
"def make_sol_dict():\n file_names = [\"FORMAT3_Copy of KommuneMTPLforTriangle.xls\",\n \"C Triangulations analysis R2017 GC20161109.xls\",\n \"EVOLUTION 2017 _ M+F - Triangles cat nat brut net.xls\",\n \"Bsp8 _ Dreiecke aus GCNA für CU1.4.1.xls\",\n \"Analysis MTPL MOD.xls\",\n \"Bsp6 _ Dreiecke aus GCNA für CU1.4.1.xls\",\n \"FORMAT6_sinistres.xls\",\n \"FORMAT1_LOSSES-MTPL-OVER-500-GROUP-2005_modified.xls\"]\n solutions_dict = dict()\n raw_dict = dict()\n for file_name in file_names:\n sr_list, file_name = ExcelLoader.load_excel(pdir.RESOURCES_DIR + \"/raw_test_files/\" + file_name)\n dh = DataHolder()\n for sr in sr_list:\n dh.add_sheet(sr.sheet_name, pd.DataFrame(columns=sr.headers, data=sr.row_vals),\n pd.DataFrame(columns=sr.headers, data=sr.xls_types), orig_sheet_name=sr.sheet_name)\n\n dh = SheetPreProcessor.separate_components(dh)\n raw_dict[file_name] = dh.encode()\n dh = HorizontalMerger.horizontal_merge(dh)\n #temp_path = pdir.RESOURCES_DIR + \"/temp/\"\n #dh.write_excel(temp_path + file_name)\n solutions_dict[file_name] = dh\n solutions_dict = MergePararametersOptimizer.make_ind_col_dict(solutions_dict)\n with open(pdir.RESOURCES_DIR + \"/test/merge_solutions.obj\", \"wb\") as temp_file:\n pickle.dump(solutions_dict, temp_file)\n with open(pdir.RESOURCES_DIR + \"/test/raw_test.obj\", \"wb\") as temp_file:\n pickle.dump(raw_dict, temp_file)",
"def create_dicts(self):\n \n # remove this string from filename to make output file names more manageable\n pre_output1 = self.file1.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n pre_output2 = self.file2.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n \n # Build the output file name.\n # if prefix is present add it\n if self.out_file_prefix is not None:\n # concatenate prefix, filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = self.out_file_prefix+pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n # if no prefix don't add it!\n else:\n # concatenate filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n\n # add temp to end of file name to create a temporary output filename\n self.tempoutputfilename = self.outputfilename.replace(\".txt\", '') + \"temp.txt\"\n\n # open temp output file\n self.tempoutputfile = open(self.outputfolder + self.tempoutputfilename, 'w')\n\n \n # open FE files\n file1_open = open(self.chosenfolder + self.file1, 'r')\n file2_open = open(self.chosenfolder + self.file2, 'r')\n\n # open file1 and create a dict of the features.\n for linenumber, line in enumerate(file1_open):\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file1_dict[int(splitline[1])] = line\n # get n of rows in file1 (take the linenumber of the last line)\n self.file1_len = linenumber\n\n # repeat for features in second file but first writing the feparam and stats to temp file - when pairing with control this ensures the \"header\" comes from the test (file2) not control (file1), NB NEITHER ARE ACCURATE!!!!\n for linenumber, line in enumerate(file2_open):\n if linenumber < 10:\n self.tempoutputfile.write(line)\n # then add all features to a dictionary, with the unique feature number as a key\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file2_dict[int(splitline[1])] = line\n # get n of rows in file2\n self.file2_len = linenumber\n\n # close files\n file1_open.close()\n file2_open.close()",
"def read_pronunciation(pronunciation_file):\n # file = open('dictionary.txt', 'r')\n #\n # for line in file:\n # print line\n\n ################# https://m.reddit.com/r/CompSciPortfolio/comments/303fyo/assignment_3_poetry_reader/\n\n pronunciation_dictionary = {}\n line = pronunciation_file.readline()\n while line.startswith(';;;'):\n line = pronunciation_file.readline()\n while line != '':\n stripped_line = line.strip()\n separation = stripped_line.find(' ')\n pronunciation_dictionary[stripped_line[:separation]] = stripped_line[(separation + 2):].split()\n line = pronunciation_file.readline()\n return pronunciation_dictionary\n\n\n\n # my_list = {}\n # for line in pronunciation_file.readlines():\n # line = line.strip()\n # if line and \";;;\" not in line:\n # r = line.split()\n # word = r[0]\n # phonemes = r[1:]\n # my_list[word] = phonemes\n # return my_list",
"def process_pathway_ontology(self) -> None:\n # Load pathway ontology from file\n pw = PathwayOntology(name=\"PW\",\n filename=self.pathway_ontology_file)\n pw.load_from_file()\n\n pw_dict = dict()\n\n for cl in pw.owl_classes:\n synonyms, annotations = pw.get_synonyms(cl)\n pw_dict[cl] = {\n 'name': pw.get_label(cl),\n 'aliases': pw.get_all_labels(cl) + synonyms,\n 'synonyms': annotations,\n 'definition': pw.get_definition(cl),\n 'subClassOf': pw.get_subClassOf(cl),\n 'part_of': pw.get_part_of(cl)\n }\n\n with open(self.pw_json_file, 'w') as outf:\n json.dump(pw_dict, outf, indent=4, sort_keys=True)",
"def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CCLB').get('abstractTypes')\n exolinks = globalMap.get('CCLB').get('exolinks')\n\n # Class AtomLabel\n currentMap = {}\n abstractTypes['AtomLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'] = currentMap\n loadMaps['CCLB.AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'atomLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AtomLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AtomLabel.isotopeCode\n currentMap = {}\n contentMap['isotopeCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'] = currentMap\n loadMaps['CCLB.AtomLabel.isotopeCode'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.isotopeCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'\n currentMap['name'] = 'isotopeCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'] = currentMap\n loadMaps['CCLB.AtomLabel.name'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.subType\n currentMap = {}\n contentMap['subType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'] = currentMap\n loadMaps['CCLB.AtomLabel.subType'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.subType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'\n currentMap['name'] = 'subType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute AtomLabel.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'] = currentMap\n loadMaps['CCLB.AtomLabel.weight'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role AtomLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of AtomLabel\n\n currentMap = abstractTypes.get('AtomLabel')\n aList = ['isotopeCode', 'name', 'subType', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ChemCompLabel\n currentMap = {}\n abstractTypes['ChemCompLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'] = currentMap\n loadMaps['CCLB.ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemCompLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemCompLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemCompLabel.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'] = currentMap\n loadMaps['CCLB.ChemCompLabel.ccpCode'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ChemCompLabel.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'] = currentMap\n loadMaps['CCLB.ChemCompLabel.molType'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ChemCompLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemCompLabel.isotopomers\n currentMap = {}\n contentMap['isotopomers'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'] = currentMap\n loadMaps['CCLB.ChemCompLabel.isotopomers'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.isotopomers'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'\n currentMap['name'] = 'isotopomers'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of ChemCompLabel\n\n currentMap = abstractTypes.get('ChemCompLabel')\n aList = ['ccpCode', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['isotopomers', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['isotopomers']\n currentMap['children'] = aList\n\n # Class Isotopomer\n currentMap = {}\n abstractTypes['Isotopomer'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'] = currentMap\n loadMaps['CCLB.Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'isotopomers'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Isotopomer.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Isotopomer.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'] = currentMap\n loadMaps['CCLB.Isotopomer.serial'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Isotopomer.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'] = currentMap\n loadMaps['CCLB.Isotopomer.weight'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role Isotopomer.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Isotopomer.atomLabels\n currentMap = {}\n contentMap['atomLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'] = currentMap\n loadMaps['CCLB.Isotopomer.atomLabels'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.atomLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'\n currentMap['name'] = 'atomLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of Isotopomer\n\n currentMap = abstractTypes.get('Isotopomer')\n aList = ['serial', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['atomLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['atomLabels']\n currentMap['children'] = aList\n\n # Class LabelingScheme\n currentMap = {}\n abstractTypes['LabelingScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'] = currentMap\n loadMaps['CCLB.LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'labelingSchemes'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute LabelingScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute LabelingScheme.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'] = currentMap\n loadMaps['CCLB.LabelingScheme.details'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute LabelingScheme.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute LabelingScheme.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.longName\n currentMap = {}\n contentMap['longName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'] = currentMap\n loadMaps['CCLB.LabelingScheme.longName'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.longName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'\n currentMap['name'] = 'longName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute LabelingScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'] = currentMap\n loadMaps['CCLB.LabelingScheme.name'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role LabelingScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role LabelingScheme.chemCompLabels\n currentMap = {}\n contentMap['chemCompLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'] = currentMap\n loadMaps['CCLB.LabelingScheme.chemCompLabels'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.chemCompLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'\n currentMap['name'] = 'chemCompLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of LabelingScheme\n\n currentMap = abstractTypes.get('LabelingScheme')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy', 'name']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'longName']\n currentMap['simpleAttrs'] = aList\n aList = ['chemCompLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['chemCompLabels']\n currentMap['children'] = aList\n\n # Out-of-package link to AtomLabel\n currentMap = {}\n exolinks['AtomLabel'] = currentMap\n loadMaps['CCLB.exo-AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-AtomLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['name'] = 'AtomLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ChemCompLabel\n currentMap = {}\n exolinks['ChemCompLabel'] = currentMap\n loadMaps['CCLB.exo-ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-ChemCompLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['name'] = 'ChemCompLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n\n # Out-of-package link to Isotopomer\n currentMap = {}\n exolinks['Isotopomer'] = currentMap\n loadMaps['CCLB.exo-Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.exo-Isotopomer'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['name'] = 'Isotopomer'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to LabelingScheme\n currentMap = {}\n exolinks['LabelingScheme'] = currentMap\n loadMaps['CCLB.exo-LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.exo-LabelingScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['name'] = 'LabelingScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))",
"def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')",
"def store_wn_lookup():\n syns = list( wn.all_synsets() )\n #syn_str = map(lambda s: str(s).replace(\"Synset\",'').strip('()').strip(\"'\"), syns)\n syn_str = map(lambda s: str(s).replace(\"Synset\",'').strip('()').strip(\"'\").strip('\"'), syns)\n #offsets_list = [(\"n%08d\" % s.offset, s) for s in syns]\n olist = map(lambda a, b: (\"n%08d\" % a.offset, b), syns, syn_str)\n offset_dict = dict(olist)\n pickle.dump(offset_dict, open('/Users/xlx/Documents/proj/imgnet-flickr/db3/wn_offset_dict.pickle', 'wb'))",
"def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CLAS').get('abstractTypes')\n exolinks = globalMap.get('CLAS').get('exolinks')\n\n # Class AbstractCategory\n currentMap = {}\n abstractTypes['AbstractCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:19:17_00001'] = currentMap\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:19:17_00001'\n currentMap['eType'] = 'cplx'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.AbstractCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AbstractCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AbstractCategory.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001'] = currentMap\n loadMaps['CLAS.AbstractCategory.details'] = currentMap\n currentMap['tag'] = 'CLAS.AbstractCategory.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute AbstractCategory.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014'] = currentMap\n loadMaps['CLAS.AbstractCategory.name'] = currentMap\n currentMap['tag'] = 'CLAS.AbstractCategory.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AbstractCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of AbstractCategory\n\n currentMap = abstractTypes.get('AbstractCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Classification\n currentMap = {}\n abstractTypes['Classification'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'] = currentMap\n loadMaps['CLAS.Classification'] = currentMap\n currentMap['tag'] = 'CLAS.Classification'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'classifications'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'namingSystem'\n currentMap['class'] = ccp.api.lims.Classification.Classification\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Classification.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Classification.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute Classification.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.namingSystem\n currentMap = {}\n contentMap['namingSystem'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00013'] = currentMap\n loadMaps['CLAS.Classification.namingSystem'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.namingSystem'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00013'\n currentMap['name'] = 'namingSystem'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role Classification.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Classification.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00002'] = currentMap\n loadMaps['CLAS.Classification.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.experimentTypes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00002'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.hazardPhrases\n currentMap = {}\n contentMap['hazardPhrases'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00006'] = currentMap\n loadMaps['CLAS.Classification.hazardPhrases'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.hazardPhrases'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00006'\n currentMap['name'] = 'hazardPhrases'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.holderCategorys\n currentMap = {}\n contentMap['holderCategorys'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:33:23_00002'] = currentMap\n loadMaps['CLAS.Classification.holderCategorys'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.holderCategorys'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:33:23_00002'\n currentMap['name'] = 'holderCategorys'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.instrumentTypes\n currentMap = {}\n contentMap['instrumentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:33_00001'] = currentMap\n loadMaps['CLAS.Classification.instrumentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.instrumentTypes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:33_00001'\n currentMap['name'] = 'instrumentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.sampleCategories\n currentMap = {}\n contentMap['sampleCategories'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00008'] = currentMap\n loadMaps['CLAS.Classification.sampleCategories'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.sampleCategories'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00008'\n currentMap['name'] = 'sampleCategories'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.sampleComponentCategory\n currentMap = {}\n contentMap['sampleComponentCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00004'] = currentMap\n loadMaps['CLAS.Classification.sampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.sampleComponentCategory'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00004'\n currentMap['name'] = 'sampleComponentCategory'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.targetScoreboards\n currentMap = {}\n contentMap['targetScoreboards'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00012'] = currentMap\n loadMaps['CLAS.Classification.targetScoreboards'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.targetScoreboards'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00012'\n currentMap['name'] = 'targetScoreboards'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.targetStatus\n currentMap = {}\n contentMap['targetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00010'] = currentMap\n loadMaps['CLAS.Classification.targetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.targetStatus'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00010'\n currentMap['name'] = 'targetStatus'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n # End of Classification\n\n currentMap = abstractTypes.get('Classification')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['namingSystem']\n currentMap['simpleAttrs'] = aList\n aList = ['targetStatus', 'targetScoreboards', 'sampleComponentCategory', 'sampleCategories', 'instrumentTypes', 'holderCategorys', 'hazardPhrases', 'experimentTypes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['experimentTypes', 'hazardPhrases', 'holderCategorys', 'instrumentTypes', 'sampleCategories', 'sampleComponentCategory', 'targetScoreboards', 'targetStatus']\n currentMap['children'] = aList\n\n # Class SampleComponentCategory\n currentMap = {}\n abstractTypes['SampleComponentCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'] = currentMap\n loadMaps['CLAS.SampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.SampleComponentCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleComponentCategory'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.SampleComponentCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleComponentCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleComponentCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute SampleComponentCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role SampleComponentCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of SampleComponentCategory\n\n currentMap = abstractTypes.get('SampleComponentCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ExperimentType\n currentMap = {}\n abstractTypes['ExperimentType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'] = currentMap\n loadMaps['CLAS.ExperimentType'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'experimentTypes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.ExperimentType\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ExperimentType.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ExperimentType.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute ExperimentType.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role ExperimentType.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ExperimentType.instrumentTypes\n currentMap = {}\n contentMap['instrumentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00002'] = currentMap\n loadMaps['CLAS.ExperimentType.instrumentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType.instrumentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00002'\n currentMap['name'] = 'instrumentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role ExperimentType.sampleCategories\n currentMap = {}\n contentMap['sampleCategories'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00031'] = currentMap\n loadMaps['CLAS.ExperimentType.sampleCategories'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType.sampleCategories'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00031'\n currentMap['name'] = 'sampleCategories'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of ExperimentType\n\n currentMap = abstractTypes.get('ExperimentType')\n aList = ['details', 'name', 'instrumentTypes', 'sampleCategories']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class TargetScoreboard\n currentMap = {}\n abstractTypes['TargetScoreboard'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'] = currentMap\n loadMaps['CLAS.TargetScoreboard'] = currentMap\n currentMap['tag'] = 'CLAS.TargetScoreboard'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'targetScoreboards'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.TargetScoreboard\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute TargetScoreboard.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute TargetScoreboard.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute TargetScoreboard.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role TargetScoreboard.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role TargetScoreboard.targetStatus\n currentMap = {}\n contentMap['targetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00039'] = currentMap\n loadMaps['CLAS.TargetScoreboard.targetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.TargetScoreboard.targetStatus'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00039'\n currentMap['name'] = 'targetStatus'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of TargetScoreboard\n\n currentMap = abstractTypes.get('TargetScoreboard')\n aList = ['details', 'name', 'targetStatus']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class HolderCategory\n currentMap = {}\n abstractTypes['HolderCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'] = currentMap\n loadMaps['CLAS.HolderCategory'] = currentMap\n currentMap['tag'] = 'CLAS.HolderCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'holderCategorys'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.HolderCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute HolderCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute HolderCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute HolderCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role HolderCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of HolderCategory\n\n currentMap = abstractTypes.get('HolderCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class HazardPhrase\n currentMap = {}\n abstractTypes['HazardPhrase'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'] = currentMap\n loadMaps['CLAS.HazardPhrase'] = currentMap\n currentMap['tag'] = 'CLAS.HazardPhrase'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'hazardPhrases'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.HazardPhrase\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute HazardPhrase.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute HazardPhrase.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute HazardPhrase.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Attribute HazardPhrase.phrase\n currentMap = {}\n contentMap['phrase'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00017'] = currentMap\n loadMaps['CLAS.HazardPhrase.phrase'] = currentMap\n currentMap['tag'] = 'CLAS.HazardPhrase.phrase'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00017'\n currentMap['name'] = 'phrase'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Role HazardPhrase.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of HazardPhrase\n\n currentMap = abstractTypes.get('HazardPhrase')\n aList = ['details', 'name', 'phrase']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class InstrumentType\n currentMap = {}\n abstractTypes['InstrumentType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'] = currentMap\n loadMaps['CLAS.InstrumentType'] = currentMap\n currentMap['tag'] = 'CLAS.InstrumentType'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'instrumentTypes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.InstrumentType\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute InstrumentType.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute InstrumentType.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute InstrumentType.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role InstrumentType.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role InstrumentType.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00001'] = currentMap\n loadMaps['CLAS.InstrumentType.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.InstrumentType.experimentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00001'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of InstrumentType\n\n currentMap = abstractTypes.get('InstrumentType')\n aList = ['details', 'name', 'experimentTypes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class SampleCategory\n currentMap = {}\n abstractTypes['SampleCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'] = currentMap\n loadMaps['CLAS.SampleCategory'] = currentMap\n currentMap['tag'] = 'CLAS.SampleCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleCategories'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.SampleCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute SampleCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role SampleCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role SampleCategory.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00030'] = currentMap\n loadMaps['CLAS.SampleCategory.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.SampleCategory.experimentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00030'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of SampleCategory\n\n currentMap = abstractTypes.get('SampleCategory')\n aList = ['details', 'name', 'experimentTypes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class TargetStatus\n currentMap = {}\n abstractTypes['TargetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'] = currentMap\n loadMaps['CLAS.TargetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.TargetStatus'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'targetStatus'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.TargetStatus\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute TargetStatus.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute TargetStatus.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute TargetStatus.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role TargetStatus.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role TargetStatus.targetScoreboards\n currentMap = {}\n contentMap['targetScoreboards'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00040'] = currentMap\n loadMaps['CLAS.TargetStatus.targetScoreboards'] = currentMap\n currentMap['tag'] = 'CLAS.TargetStatus.targetScoreboards'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00040'\n currentMap['name'] = 'targetScoreboards'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of TargetStatus\n\n currentMap = abstractTypes.get('TargetStatus')\n aList = ['details', 'name', 'targetScoreboards']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to Classification\n currentMap = {}\n exolinks['Classification'] = currentMap\n loadMaps['CLAS.exo-Classification'] = currentMap\n currentMap['tag'] = 'CLAS.exo-Classification'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'\n currentMap['name'] = 'Classification'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.Classification\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to SampleComponentCategory\n currentMap = {}\n exolinks['SampleComponentCategory'] = currentMap\n loadMaps['CLAS.exo-SampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-SampleComponentCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'\n currentMap['name'] = 'SampleComponentCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.SampleComponentCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to ExperimentType\n currentMap = {}\n exolinks['ExperimentType'] = currentMap\n loadMaps['CLAS.exo-ExperimentType'] = currentMap\n currentMap['tag'] = 'CLAS.exo-ExperimentType'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'\n currentMap['name'] = 'ExperimentType'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.ExperimentType\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to TargetScoreboard\n currentMap = {}\n exolinks['TargetScoreboard'] = currentMap\n loadMaps['CLAS.exo-TargetScoreboard'] = currentMap\n currentMap['tag'] = 'CLAS.exo-TargetScoreboard'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'\n currentMap['name'] = 'TargetScoreboard'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.TargetScoreboard\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to HolderCategory\n currentMap = {}\n exolinks['HolderCategory'] = currentMap\n loadMaps['CLAS.exo-HolderCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-HolderCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'\n currentMap['name'] = 'HolderCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.HolderCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to HazardPhrase\n currentMap = {}\n exolinks['HazardPhrase'] = currentMap\n loadMaps['CLAS.exo-HazardPhrase'] = currentMap\n currentMap['tag'] = 'CLAS.exo-HazardPhrase'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'\n currentMap['name'] = 'HazardPhrase'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.HazardPhrase\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to InstrumentType\n currentMap = {}\n exolinks['InstrumentType'] = currentMap\n loadMaps['CLAS.exo-InstrumentType'] = currentMap\n currentMap['tag'] = 'CLAS.exo-InstrumentType'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'\n currentMap['name'] = 'InstrumentType'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.InstrumentType\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to SampleCategory\n currentMap = {}\n exolinks['SampleCategory'] = currentMap\n loadMaps['CLAS.exo-SampleCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-SampleCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'\n currentMap['name'] = 'SampleCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.SampleCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to TargetStatus\n currentMap = {}\n exolinks['TargetStatus'] = currentMap\n loadMaps['CLAS.exo-TargetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.exo-TargetStatus'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'\n currentMap['name'] = 'TargetStatus'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.TargetStatus\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))",
"def construct_dict(self):\n i = 0\n self.word2idx = dict()\n fi = open(self.config.word_vec_fi_glove, 'r')\n\n for line in fi:\n self.word2idx[line.split(\" \")[0]] = i\n i += 1\n\n self.vocab_size = i\n self.write_dict()\n fi.close()",
"def build_article_map(f='./wikispeedia_paths-and-graph/articles.tsv'):\n out_dict = {}\n count = 0\n with open(f, 'r') as r:\n for _ in xrange(12):\n next(r)\n for line in r:\n out_dict[line.strip('\\n')] = count\n count += 1\n return out_dict",
"def get_dictionary(filename):\n asop_dict = {}\n # Defaults for standard observational data\n if 'CMORPH_V1.0.mjodiab_period_3hrmeans.precip.nc' in filename or \\\n 'TRMM_3B42V7A.mjodiab_period_3hrmeans.precip.nc' in filename:\n asop_dict['infile'] = filename\n asop_dict['name'] = ''\n asop_dict['dt'] = 10800\n asop_dict['dx'] = 27\n asop_dict['dy'] = 27\n asop_dict['constraint'] = 'precipitation'\n asop_dict['scale_factor'] = 8.0\n asop_dict['legend_name'] = ''\n asop_dict['region'] = [-10,10,60,90]\n asop_dict['box_size'] = 1680\n asop_dict['color'] = 'red'\n asop_dict['region_size'] = 7\n asop_dict['lag_length'] = 6\n asop_dict['grid_type'] = 'native'\n asop_dict['time_type'] = '3hr'\n asop_dict['grid_desc'] = 'native'\n asop_dict['time_desc'] = '3-hourly'\n asop_dict['autocorr_length'] = 60*60*24\n else:\n asop_dict=build_asop_dict(filename)\n return(asop_dict)",
"def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CHEL').get('abstractTypes')\n exolinks = globalMap.get('CHEL').get('exolinks')\n\n # DataType HalfLifeType\n currentMap = {}\n abstractTypes['HalfLifeType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002'] = currentMap\n loadMaps['CHEL.HalfLifeType'] = currentMap\n currentMap['tag'] = 'CHEL.HalfLifeType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class ChemElement\n currentMap = {}\n abstractTypes['ChemElement'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'] = currentMap\n loadMaps['CHEL.ChemElement'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemElements'\n currentMap['objkey'] = 'symbol'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElement\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemElement.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemElement.atomNumber\n currentMap = {}\n contentMap['atomNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00017'] = currentMap\n loadMaps['CHEL.ChemElement.atomNumber'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.atomNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00017'\n currentMap['name'] = 'atomNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute ChemElement.atomicRadius\n currentMap = {}\n contentMap['atomicRadius'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00018'] = currentMap\n loadMaps['CHEL.ChemElement.atomicRadius'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.atomicRadius'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00018'\n currentMap['name'] = 'atomicRadius'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.covalentRadius\n currentMap = {}\n contentMap['covalentRadius'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00019'] = currentMap\n loadMaps['CHEL.ChemElement.covalentRadius'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.covalentRadius'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00019'\n currentMap['name'] = 'covalentRadius'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.mass\n currentMap = {}\n contentMap['mass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00020'] = currentMap\n loadMaps['CHEL.ChemElement.mass'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.mass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00020'\n currentMap['name'] = 'mass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00021'] = currentMap\n loadMaps['CHEL.ChemElement.name'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00021'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055')\n\n # Attribute ChemElement.symbol\n currentMap = {}\n contentMap['symbol'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00015'] = currentMap\n loadMaps['CHEL.ChemElement.symbol'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.symbol'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00015'\n currentMap['name'] = 'symbol'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055')\n\n # Role ChemElement.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemElement.isotopes\n currentMap = {}\n contentMap['isotopes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00004'] = currentMap\n loadMaps['CHEL.ChemElement.isotopes'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.isotopes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00004'\n currentMap['name'] = 'isotopes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CHEL').get('abstractTypes')\n # End of ChemElement\n\n currentMap = abstractTypes.get('ChemElement')\n aList = ['atomNumber', 'atomicRadius', 'covalentRadius', 'mass', 'name', 'symbol']\n currentMap['headerAttrs'] = aList\n aList = ['isotopes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['isotopes']\n currentMap['children'] = aList\n\n # Class ChemElementStore\n currentMap = {}\n abstractTypes['ChemElementStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'] = currentMap\n loadMaps['CHEL.ChemElementStore'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemElementStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElementStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemElementStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemElementStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute ChemElementStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00024'] = currentMap\n loadMaps['CHEL.ChemElementStore.name'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00024'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ChemElementStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemElementStore.chemElements\n currentMap = {}\n contentMap['chemElements'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00023'] = currentMap\n loadMaps['CHEL.ChemElementStore.chemElements'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore.chemElements'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00023'\n currentMap['name'] = 'chemElements'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CHEL').get('abstractTypes')\n # End of ChemElementStore\n\n currentMap = abstractTypes.get('ChemElementStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['chemElements', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['chemElements']\n currentMap['children'] = aList\n\n # Class Isotope\n currentMap = {}\n abstractTypes['Isotope'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'] = currentMap\n loadMaps['CHEL.Isotope'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'isotopes'\n currentMap['objkey'] = 'massNumber'\n currentMap['class'] = ccp.api.molecule.ChemElement.Isotope\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Isotope.abundance\n currentMap = {}\n contentMap['abundance'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00011'] = currentMap\n loadMaps['CHEL.Isotope.abundance'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.abundance'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00011'\n currentMap['name'] = 'abundance'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00058')\n\n # Attribute Isotope.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Isotope.gyroMagneticRatio\n currentMap = {}\n contentMap['gyroMagneticRatio'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00008'] = currentMap\n loadMaps['CHEL.Isotope.gyroMagneticRatio'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.gyroMagneticRatio'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00008'\n currentMap['name'] = 'gyroMagneticRatio'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.halfLife\n currentMap = {}\n contentMap['halfLife'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00001'] = currentMap\n loadMaps['CHEL.Isotope.halfLife'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLife'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00001'\n currentMap['name'] = 'halfLife'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute Isotope.halfLifeError\n currentMap = {}\n contentMap['halfLifeError'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00002'] = currentMap\n loadMaps['CHEL.Isotope.halfLifeError'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLifeError'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00002'\n currentMap['name'] = 'halfLifeError'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute Isotope.halfLifeType\n currentMap = {}\n contentMap['halfLifeType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00003'] = currentMap\n loadMaps['CHEL.Isotope.halfLifeType'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLifeType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00003'\n currentMap['name'] = 'halfLifeType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'unknown'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002')\n\n # Attribute Isotope.magneticMoment\n currentMap = {}\n contentMap['magneticMoment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00013'] = currentMap\n loadMaps['CHEL.Isotope.magneticMoment'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.magneticMoment'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00013'\n currentMap['name'] = 'magneticMoment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.mass\n currentMap = {}\n contentMap['mass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00010'] = currentMap\n loadMaps['CHEL.Isotope.mass'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.mass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00010'\n currentMap['name'] = 'mass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.massNumber\n currentMap = {}\n contentMap['massNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00007'] = currentMap\n loadMaps['CHEL.Isotope.massNumber'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.massNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00007'\n currentMap['name'] = 'massNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Isotope.quadrupoleMoment\n currentMap = {}\n contentMap['quadrupoleMoment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00014'] = currentMap\n loadMaps['CHEL.Isotope.quadrupoleMoment'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.quadrupoleMoment'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00014'\n currentMap['name'] = 'quadrupoleMoment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.receptivity\n currentMap = {}\n contentMap['receptivity'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00012'] = currentMap\n loadMaps['CHEL.Isotope.receptivity'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.receptivity'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00012'\n currentMap['name'] = 'receptivity'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.spin\n currentMap = {}\n contentMap['spin'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00009'] = currentMap\n loadMaps['CHEL.Isotope.spin'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.spin'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00009'\n currentMap['name'] = 'spin'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Isotope.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Isotope\n\n currentMap = abstractTypes.get('Isotope')\n aList = ['abundance', 'gyroMagneticRatio', 'halfLife', 'halfLifeError', 'halfLifeType', 'magneticMoment', 'mass', 'massNumber', 'quadrupoleMoment', 'receptivity', 'spin']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to ChemElement\n currentMap = {}\n exolinks['ChemElement'] = currentMap\n loadMaps['CHEL.exo-ChemElement'] = currentMap\n currentMap['tag'] = 'CHEL.exo-ChemElement'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'\n currentMap['name'] = 'ChemElement'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElement\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055'))\n\n # Out-of-package link to ChemElementStore\n currentMap = {}\n exolinks['ChemElementStore'] = currentMap\n loadMaps['CHEL.exo-ChemElementStore'] = currentMap\n currentMap['tag'] = 'CHEL.exo-ChemElementStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'\n currentMap['name'] = 'ChemElementStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElementStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to Isotope\n currentMap = {}\n exolinks['Isotope'] = currentMap\n loadMaps['CHEL.exo-Isotope'] = currentMap\n currentMap['tag'] = 'CHEL.exo-Isotope'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'\n currentMap['name'] = 'Isotope'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.Isotope\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))",
"def file_to_dictionary():\n\n return;",
"def __init__(self, filename):\n\n self.term_dict = {}\n for line in open(filename):\n if line.startswith(\"#\"):\n continue\n\n #print line\n word, w_type = line.strip().split(\"\\t\")\n self.term_dict[word.strip().lower()] = \"CHESS_\" + w_type.strip().lower()",
"def get_wiki_synonyms_mapping() -> Dict[str, str]:\n with open(Config.Path.synonym_mapping, \"r\") as f:\n return json.load(f)",
"def label_maps_from_file(path_to_summary, label_mapping_code, separate_apostrophe_embedding, saved_dict=False):\n with open(path_to_summary, 'r') as mapping_file:\n mapping_list = mapping_file.readlines()\n\n apostrophe_options = find_apostrophe_options(mapping_list)\n\n label_map = {}\n for line in mapping_list:\n split_line = line.split()\n # Design the mapping such that the mapping code can be used to index the list\n # code: [code, phonetic English, native characters]\n code = split_line[1]\n code_options = [code, split_line[-1], split_line[0]]\n label_map[code] = code_options[label_mapping_code]\n if not separate_apostrophe_embedding:\n label_map[code + APOSTROPHE_TOKEN] = code_options[label_mapping_code] + apostrophe_options[label_mapping_code]\n # Add special characters sp and sil\n label_map['sp'] = 'sp'\n label_map['sil'] = 'sil'\n # Add special characters for the two hesitation characters\n label_map['G00'] = 'G00'\n label_map['G01'] = 'G01'\n return label_map",
"def create_mapping_file(options):\n\n mapping_file = open(os.path.splitext(options.bco)[0] + \"mapping.txt\", 'w')\n mapping_file.writelines(\n\"\"\"# Use this file to provide mapping values for a bco.\n# MISSING PROPERTIES/FIELDS lists properties/fields that are missing from bco\n# NONALLOWED PROPERTIES/FIELDS shows properties that are not allowed\n# Syntax for specifying values\n# To delete a value\n# PATH --> FIELD: DELETE\n# To add a value\n# PATH --> FIELD: ADD-value_to_add\n# To rename a field name\n# PATH --> FIELD: RENAME-new_field_name\n# To swap a field name with another current field name\n# PATH --> FIELD: SWAP-other_field_name\n# Blank values will be skipped. Data does not need to be double represented\n# For example, \n# if <bco_id> needs renamed to <object_id>, either\n# ['object_id'] --> object_id: \n# SWAP-bco_id\n# OR \n# ['bco_id'] --> bco_id: RENAME:object_id \n# will work. No need to fill out both values.\n\"\"\"\n)\n validate_bco(options)\n\n missing_reg = r'(.*?) is a required property' # missing required property\n additional_reg = r'Additional properties are not allowed (.*?)' # unalloewd extra property\n\n attribute_reg = r\"'(.*?)'\" # getting an attribute (field surronded by single quotes)\n index_reg = r\"On instance(.*?)\" # getting key path\n\n failed_validation_reg = r'Failed validating (.*?)' # invalid type\n\n missing = []\n additional = []\n invalid = []\n\n path = {}\n\n with open('error.log') as errors:\n for line in errors:\n if re.match(missing_reg, line): # if there is a missing property\n to_add = re.findall(attribute_reg, line)\n for match in to_add:\n missing.append(match)\n elif re.match(additional_reg, line): # if there is an additional property\n to_add = re.findall(attribute_reg, line)\n for match in to_add:\n additional.append(match)\n elif re.match(failed_validation_reg, line): # if a property is invalid\n # additional and required properties are already represnted by the above regexes,\n # so skip\n if line.__contains__(\"'additionalProperties'\") is False \\\n and line.__contains__(\"'required'\") is False:\n to_add = [line.split(\"schema\")[1].split(\"['\")[-1].strip(\"']:\\n\")]\n invalid.append(to_add[0])\n\n # field contains an index for some attribute\n # this attribute will be the last attribute found the above regexes, and is stored in\n # to_add\n if re.match(index_reg, line):\n keys = \"\"\n index_path = line.removeprefix(\"On instance\").removesuffix(\":\\n\")\n if index_path is not None:\n keys = str(index_path)\n if len(to_add) > 0: # if there are any attributes to add\n for item in to_add:\n add_or_update_list_HELPER(path, str(item), keys + \"['\" + str(item) +\n \"']\")\n to_add = [] # reset to_add\n mapping_file.write(\"====MISSING PROPERTIES/FIELDS====\\n\")\n for attribute in missing:\n mapping_file.write(str(path[attribute][0]) + \"-->\" + str(attribute) + \":\\n\")\n path[attribute].pop(0)\n\n mapping_file.write(\"====NONALLOWED PROPERTIES/FIELDS====\\n\")\n for attribute in additional:\n mapping_file.write(str(path[attribute][0]) + \"-->\" + str(attribute) + \":\\n\")\n path[attribute].pop(0)\n for attribute in invalid:\n mapping_file.write(str(path[attribute][0]).split(\"]\")[0]\n + \"]-->\" + str(attribute) + \":\\n\")\n path[attribute].pop(0)\n\n return mapping_file.name",
"def make_lex_dict(self):\n lex_dict = {}\n for line in self.lexicon_full_filepath.split('\\n'):\n sp = line.strip().split('\\t')\n if(len(sp) > 1):\n (word, measure) = line.strip().split('\\t')[0:2]\n lex_dict[word] = float(measure)\n return lex_dict"
] | [
"0.61954165",
"0.5963542",
"0.5888741",
"0.5863957",
"0.58637106",
"0.58131486",
"0.5805316",
"0.57554185",
"0.5731265",
"0.57232267",
"0.5722358",
"0.5715003",
"0.5702462",
"0.5668924",
"0.56644607",
"0.5656563",
"0.56421024",
"0.56056917",
"0.55921763",
"0.558435",
"0.5583649",
"0.55582",
"0.55463654",
"0.5545056",
"0.55318254",
"0.55279696",
"0.5525093",
"0.5518909",
"0.5515069",
"0.55044395"
] | 0.6826442 | 0 |
! Merge two given graphs, namely synsets graph and SUMO graph. The final graph contain one type of nodes, namely synsets nodes. Each synset node has an attribute named "synset", | def merge(self, g1, g2):
logger = logging.getLogger(__name__)
g = BaseGraph()
g.copy_graph_from(g1)
plwn2sumo_dict = defaultdict(set)
plwn2sumo_dict = self.get_plwn2sumo_dict()
synset_on_vertex_dict = {}
for node in g.all_nodes():
synset_id = node.synset.synset_id
if synset_id in synset_on_vertex_dict:
logger.warning("ID of some synset is not unique.")
continue
synset_on_vertex_dict[synset_id] = node
num_of_edge = 0
for edge in g2.all_edges():
num_of_edge += 1
logger.info("%d/%d", num_of_edge, g2.num_edges())
parent_sumo_concept = edge.source().sumo
child_sumo_concept = edge.target().sumo
if parent_sumo_concept not in plwn2sumo_dict:
logger.warning("The mapping file doesn't contain sumo concept '%s'.", parent_sumo_concept)
continue
if child_sumo_concept not in plwn2sumo_dict:
logger.warning("The mapping file doesn't contain sumo concept '%s'.", child_sumo_concept)
continue
for parent_syn_id in plwn2sumo_dict[parent_sumo_concept]:
if parent_syn_id not in synset_on_vertex_dict:
logger.warning("The mapping file contains synset '%d' that is not in the graph.", parent_syn_id)
continue
p_node = synset_on_vertex_dict[parent_syn_id]
for child_syn_id in plwn2sumo_dict[child_sumo_concept]:
if child_syn_id not in synset_on_vertex_dict:
logger.warning("The mapping file contains synset '%d' that is not in the graph.", child_syn_id)
continue
ch_node = synset_on_vertex_dict[child_syn_id]
g.add_edge(p_node,
ch_node,
[("rel", edge.rel)],
simply=True)
return g | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)",
"def merge(self, other: \"GraphSet\") -> None:\n if other.name != self.name:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with name {other.name} into {self.name}\"\n )\n if other.version != self.version:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with version {other.version} into {self.version}\"\n )\n self.start_time = min(self.start_time, other.start_time)\n self.end_time = max(self.end_time, other.end_time)\n self.resources += other.resources\n self._resolve_duplicates()\n self.errors += other.errors\n self.stats.merge(other.stats)",
"def graph_union(*args, **kwargs):\n\n if not len(args) > 1:\n raise AttributeError('At least two input Graphs required')\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(*args)\n\n all_share_common_origin = all([share_common_origin(args[0], n) for n in args[1:]])\n if all_share_common_origin and not kwargs.get('return_copy', False):\n\n nids = []\n for graph in args:\n nids.extend([n for n in graph.nodes if n not in nids])\n\n eids = []\n for graph in args:\n eids.extend([e for e in graph.edges if e not in eids])\n\n result = args[0].origin.getnodes(nids)\n result.edges.set_view(eids)\n return result\n else:\n\n # make a deep copy of the first graph\n result = args[0].copy(deep=True, copy_view=False)\n\n # we need control over the node ID to add\n # temporary turn off auto_nid if needed\n auto_nid = result.data.auto_nid\n result.data.auto_nid = False\n\n for graph in args[1:]:\n for node, attrib in graph.nodes.items():\n if node not in result.nodes:\n result.add_node(node, **attrib)\n\n for edge, attrib in graph.edges.items():\n if edge not in result.edges:\n result.add_edge(*edge, **attrib)\n\n # Restore auto_nid\n result.data.auto_nid = auto_nid\n\n return result",
"def mix_graphs(source_graph1, source_graph2):\n g = clone_graph(source_graph1, identifier=source_graph1.identifier)\n g = clone_graph(source_graph2, target_graph=g)\n return g",
"def merge_graphs(graphs, G=nx.Graph(), contig=None, coords=None):\n for graph in graphs:\n G = append_graph(G, graph, contig=contig, coords=coords)\n return G",
"def sub_graph_merging(self):",
"def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1",
"def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))",
"def concatenate_graphs(G1, G2):\n V = G1.V + G2.V\n edges = np.vstack((G1.edges, G1.V + G2.edges))\n weights = np.hstack((G1.weights, G2.weights))\n G = WeightedGraph(V, edges, weights)\n return G",
"def merge(self, ASGgraph ):\r\n \r\n self.mergedASG.append(ASGgraph)\t\t\t\t\t# add the graph to the list of merged graphs\r\n for nodeType in ASGgraph.listNodes.keys():\r\n if not nodeType in self.listNodes.keys():\t\t\t# node type was not known\r\n self.listNodes[nodeType] = ASGgraph.listNodes[nodeType]\r\n self.nodeTypes.append(nodeType)\r\n else: \t# node type existed...\r\n for node in ASGgraph.listNodes[nodeType]:\t\t\t# add each node of merged graph to actual graph\r\n self.listNodes[nodeType].append(node)\r\n \r\n # copy also the model's attribute\r\n errors = []\r\n for attr in ASGgraph.generatedAttributes.keys():\r\n if attr in self.generatedAttributes.keys(): # Attribute is present!\r\n #print \"Attribute collision for \", attr, \"<-- New attribute value ignored\" \r\n errors.append(attr)\r\n if( not self.__collidedAttributeTracker.has_key( attr ) ):\r\n self.__collidedAttributeTracker[ attr ] = 1\r\n else:\r\n self.__collidedAttributeTracker[ attr ] += 1\r\n continue\r\n self.generatedAttributes[attr] = ASGgraph.generatedAttributes[attr]\r\n # now create the attribute!\r\n self.setAttrValue(attr, ASGgraph.getAttrValue(attr).clone())\r\n if( errors ):\r\n print 'Attribute name collisions occured during load (could affect '\\\r\n + 'old formalisms)\\nThe following attributes collided: '\\\r\n + str(errors) \r\n ## print 'In fact, these messages are slated for removal, as this ' \\\r\n ## 'attribute system is being bypassed to fix this problem'\r",
"def merge_pores(self, n1, n2, setcategory='union', radius=None, center=None, check_throats=True, inner_category='inner', verbose=False):\n\n if not self.graph.has_node(n1) or not self.graph.has_node(n2):\n warn(\"Nodes {} or {} does not exist. Cannot merge them\".format(u, v))\n return\n elif verbose:\n print(\"Merging pore {} and {}\".format(n1, n2))\n\n if center is not None:\n self.graph.nodes[n1]['center'] = center\n\n if radius is not None:\n self.graph.nodes[n1]['radius'] = radius\n\n category = self.graph.nodes[n2]['category']\n\n if setcategory == 'union':\n self.graph.nodes[n1]['category'] = self.graph.nodes[n1]['category'].union(\n category)\n if len(self.graph.nodes[n1]['category']) > 1 and 'inner' in self.graph.nodes[n1]['category']:\n self.graph.nodes[n1]['category'] = self.graph.nodes[n1]['category'].difference(\n set(['inner']))\n\n # if not G.has_edge(u,v):\n # warn(\"Nodes {} and {} will be merged but they are not adjacent\".format(u,v))\n\n # Warning : here we just copy the old edge attributes to the new one, so that the attributes are already defined.\n # The values must however be checked !\n new_edges = [(n1, n3, d)\n for _, n3, d in self.graph.edges(n2, data=True)\n if (n3 != n1 and n3 != n2)]\n try:\n self.graph.add_edges_from(new_edges)\n except:\n warn(\n 'Error trying to create new edges when merging pores {} and {}'.format(n1, n2))\n warn('Edges list {}'.format(new_edges))\n\n self.graph.remove_node(n2)\n\n if check_throats:\n for n3 in self.graph[n1]:\n self._compute_auto_throat_length(n1, n3)\n self._compute_auto_throat_radius(n1, n3)",
"def merge_networks_in_series(n1, n2):\n new_l_size = n1.l_size + n2.l_size + 1 # One additional vertex in between.\n new_u_size = n1.u_size + n2.u_size\n\n # Connect the 0-pole and the inf-pole in the result network.\n new_link_edge = n1.zero_pole.insert_before()\n new_link_edge_opp = n2.inf_pole.insert_after()\n new_link_edge.opposite = new_link_edge_opp\n new_link_edge_opp.opposite = new_link_edge\n\n # Merge the 0-pole of n1 with the inf-pole of n2.\n n1.inf_pole.insert_all_after(n2.zero_pole)\n\n # Remove the link edges in n1 and n2 if they are not real.\n if not n1.is_linked:\n n1.zero_pole.remove()\n n1.inf_pole.remove()\n if not n2.is_linked:\n n2.zero_pole.remove()\n n2.inf_pole.remove()\n\n # After a serial merge the poles are never linked.\n res = Network(new_link_edge, is_linked=False, l_size=new_l_size, u_size=new_u_size)\n res.type = 'S'\n return res\n\n # # Extract the poles from both networks.\n # first_net_zero_pole_edge = n1.zero_pole\n # first_net_inf_pole_edge = n1.inf_pole\n #\n # second_net_zero_pole_edge = n2.zero_pole\n # second_net_inf_pole_edge = n2.inf_pole\n #\n # # Create a new half edges for connecting the poles of the network. The\n # # edge will not be part from the edges list.\n # new_root_half_edge = first_net_zero_pole_edge.insert_after()\n # new_root_opposite = second_net_inf_pole_edge.insert_after()\n #\n # new_root_half_edge.opposite = new_root_opposite\n # new_root_opposite.opposite = new_root_half_edge\n #\n # # Get the half edges from both networks for merging\n # first_net_inf_pole_prior = first_net_inf_pole_edge.prior\n # second_net_zero_pole_edge_prior = second_net_zero_pole_edge.prior\n #\n # # Merge the both networks so that the inf-pole from the first network is\n # # identified with the zero-pole from the second one. Handling different\n # # while merging the two networks.\n # first_net_inf_pole_edge.prior = second_net_zero_pole_edge_prior\n # second_net_zero_pole_edge_prior.next = first_net_inf_pole_edge\n #\n # first_net_inf_pole_prior.next = second_net_zero_pole_edge\n # second_net_zero_pole_edge.prior = first_net_inf_pole_prior\n #\n # # Update the node numbers in the second network zero-pole edges\n # half_edge_walker = first_net_inf_pole_prior.next\n # while half_edge_walker != first_net_inf_pole_prior:\n # half_edge_walker.node_nr = first_net_inf_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next\n #\n # # Check whether the original poles of the network that are merged are\n # # linked or not. If they are not linked then the corresponding half\n # # edges between them have to be removed.\n # if not n1.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # first_net_zero_pole_edge.remove()\n # first_net_inf_pole_edge.remove()\n #\n # if not n2.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # second_net_zero_pole_edge.remove()\n # second_net_inf_pole_edge.remove()\n #\n # # After a serial merge the poles are never linked.\n # res = Network(new_root_half_edge, is_linked=False,\n # l_size=new_l_size, u_size=new_u_size)\n # res.type = 'S'\n # return res",
"def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val",
"def merge(self):\n collapsable = self.findSameSubtrees()\n\n dummy = GraphiusNode(-1, None)\n for i, node in self.nodes.items():\n dummy.addNeighbor(node)\n\n # Perform the merge\n self.mergeHelper(dummy, collapsable)\n\n # Regenerate trees\n newNodes = self.dfs(dummy)\n assert(-1 not in newNodes)\n self.nodes = newNodes",
"def union(*graphs):\n from sets import Set\n out = {}\n for G in graphs:\n for v in G:\n out.setdefault(v,Set()).update(list(G[v]))\n return out",
"def union(self, node1, node2):\n root1 = self.find(node1)\n root2 = self.find(node2)\n if root1 != root2: # only merge if the connected components differ\n if self.ranks[root1] > self.ranks[root2]:\n self.parents[root2] = root1\n else:\n self.parents[root1] = root2\n if self.ranks[root1] == self.ranks[root2]:\n self.ranks[root2] += 1",
"def merge(self, a, b):\n old_id, target_id = sorted((self.node_id[a], self.node_id[b]), key = lambda id: len(self.groups[id]))\n for node in self.groups[old_id]:\n self.node_id[node] = target_id\n self.groups[target_id] |= self.groups[old_id]\n del self.groups[old_id]",
"def combine_graphs(\n graphs: List[dgl.DGLGraph],\n atom_map_number: List[List[int]],\n bond_map_number: List[List[int]],\n) -> dgl.DGLGraph:\n\n # Batch graph structure for each relation graph\n\n relations = graphs[0].canonical_etypes\n ntypes = graphs[0].ntypes\n\n edges_dict = defaultdict(list)\n num_nodes_dict = defaultdict(int)\n\n # reorder atom nodes\n for i, g in enumerate(graphs):\n for rel in relations:\n srctype, etype, dsttype = rel\n u, v, eid = g.edges(form=\"all\", order=\"eid\", etype=rel)\n\n # deal with nodes (i.e. atom and optionally global)\n if srctype == \"atom\":\n src = [atom_map_number[i][j] for j in u]\n else:\n # global nodes\n src = u + num_nodes_dict[srctype]\n src = src.numpy().tolist()\n\n if dsttype == \"atom\":\n dst = [atom_map_number[i][j] for j in v]\n else:\n # global nodes\n dst = v + num_nodes_dict[dsttype]\n dst = dst.numpy().tolist()\n\n edges_dict[rel].extend([(s, d) for s, d in zip(src, dst)])\n\n for ntype in ntypes:\n num_nodes_dict[ntype] += g.number_of_nodes(ntype)\n\n # reorder bond edges (bond edges)\n bond_map_number_list = []\n for i in itertools.chain.from_iterable(bond_map_number):\n bond_map_number_list.extend([2 * i, 2 * i + 1])\n bond_reorder = [\n bond_map_number_list.index(i) for i in range(len(bond_map_number_list))\n ]\n\n rel = (\"atom\", \"bond\", \"atom\")\n a2a_edges = edges_dict.pop(rel)\n a2a_edges = [a2a_edges[i] for i in bond_reorder]\n\n edges_dict[rel] = a2a_edges\n\n # create graph\n new_g = dgl.heterograph(edges_dict, num_nodes_dict=num_nodes_dict)\n\n # Batch features\n\n # reorder node features (atom and global)\n atom_map_number_list = list(itertools.chain.from_iterable(atom_map_number))\n atom_reorder = [\n atom_map_number_list.index(i) for i in range(len(atom_map_number_list))\n ]\n\n for ntype in graphs[0].ntypes:\n feat_dicts = [g.nodes[ntype].data for g in graphs]\n\n # concatenate features\n keys = feat_dicts[0].keys()\n new_feats = {k: torch.cat([fd[k] for fd in feat_dicts], 0) for k in keys}\n\n # reorder atom features\n if ntype == \"atom\":\n new_feats = {k: v[atom_reorder] for k, v in new_feats.items()}\n\n new_g.nodes[ntype].data.update(new_feats)\n\n # reorder edge features (bond)\n\n for etype in graphs[0].etypes:\n feat_dicts = [g.edges[etype].data for g in graphs]\n\n # concatenate features\n keys = feat_dicts[0].keys()\n new_feats = {k: torch.cat([fd[k] for fd in feat_dicts], 0) for k in keys}\n\n if etype == \"bond\":\n new_feats = {k: v[bond_reorder] for k, v in new_feats.items()}\n\n new_g.edges[etype].data.update(new_feats)\n\n # add _ID to atom feature\n new_g.nodes[\"atom\"].data[\"_ID\"] = torch.arange(new_g.num_nodes(\"atom\"))\n\n return new_g",
"def union(self, a, b):\n if (a in self.node_id) and (b in self.node_id) and (self.node_id[a] != self.node_id[b]):\n self.merge(a, b)\n elif (a in self.node_id) or (b in self.node_id):\n self.add(a,b)\n else:\n self.create_new_group(a,b)",
"def synsets_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n # Compute similarity\n if len(synsets_sentence_1) != 0 and len(synsets_sentence_2) != 0:\n similarity = 1 - jaccard_distance(set(synsets_sentence_1), set(synsets_sentence_2))\n return similarity\n else:\n return 0",
"def coco_union(dsets):\n merged = ub.odict([\n ('categories', []),\n ('licenses', []),\n ('info', []),\n ('images', []),\n ('annotations', []),\n ])\n\n merged_cat_name_to_id = {}\n\n def update_ifnotin(d1, d2):\n \"\"\" copies keys from d2 that doent exist in d1 into d1 \"\"\"\n for k, v in d2.items():\n if k not in d1:\n d1[k] = v\n return d1\n\n for key, old_dset in dsets.items():\n # hack: in our case the key is the subdir\n subdir = key\n\n # Create temporary indexes to map from old to new\n cat_id_map = {}\n img_id_map = {}\n\n # Add the licenses / info into the merged dataset\n # Licenses / info are unused in our datas, so this might not be correct\n merged['licenses'].extend(old_dset['licenses'])\n merged['info'].extend(old_dset['info'])\n\n # Add the categories into the merged dataset\n for old_cat in old_dset['categories']:\n new_id = merged_cat_name_to_id.get(old_cat['name'], None)\n if new_id is None:\n # The same category might exist in different datasets.\n new_id = len(merged_cat_name_to_id) + 1\n merged_cat_name_to_id[old_cat['name']] = new_id\n\n new_cat = ub.odict([\n ('id', new_id),\n ('name', old_cat['name']),\n ('supercategory', old_cat['supercategory']),\n ])\n update_ifnotin(new_cat, old_cat)\n cat_id_map[old_cat['id']] = new_cat['id']\n merged['categories'].append(new_cat)\n\n # Add the images into the merged dataset\n for old_img in old_dset['images']:\n new_img = ub.odict([\n ('id', len(merged['images']) + 1),\n ('file_name', join(subdir, old_img['file_name'])),\n ])\n # copy over other metadata\n update_ifnotin(new_img, old_img)\n img_id_map[old_img['id']] = new_img['id']\n merged['images'].append(new_img)\n\n # Add the annotations into the merged dataset\n for old_annot in old_dset['annotations']:\n old_cat_id = old_annot['category_id']\n old_img_id = old_annot['image_id']\n new_cat_id = cat_id_map.get(old_cat_id, None)\n new_img_id = img_id_map.get(old_img_id, None)\n if new_cat_id is None:\n continue\n print('annot {} in {} has bad category-id {}'.format(old_annot['id'], key, old_cat_id))\n if new_img_id is None:\n continue\n print('annot {} in {} has bad image-id {}'.format(old_annot['id'], key, old_img_id))\n new_annot = ub.odict([\n ('id', len(merged['annotations']) + 1),\n ('image_id', new_img_id),\n ('category_id', new_cat_id),\n ])\n update_ifnotin(new_annot, old_annot)\n merged['annotations'].append(new_annot)\n return merged",
"def union(set1, set2):",
"def merge(self, graph):\n # keep previous self.filename\n # copy data\n for x in graph.data:\n self.data.append(x)\n # copy headers, unless already exists (is so, info is lost)\n for key in graph.headers:\n if key not in self.headers:\n self.headers.update({key: graph.headers[key]})\n # copy graphInfo, unless already exists (is so, info is lost)\n for key in graph.graphInfo:\n if key not in self.graphInfo:\n self.graphInfo.update({key: graph.graphInfo[key]})\n # copy sampleInfo, unless already exists (is so, info is lost)\n for key in graph.sampleInfo:\n if key not in self.sampleInfo:\n self.sampleInfo.update({key: graph.sampleInfo[key]})",
"def graph_union(g1, g2, intersection=None, props=None, include=False,\n internal_props=False):\n pnames = None\n if props is None:\n props = []\n if internal_props:\n pnames = []\n for (k, name), p1 in g1.properties.items():\n if k == 'g':\n continue\n p2 = g2.properties.get((k, name), None)\n props.append((p1, p2))\n pnames.append(name)\n for (k, name), p2 in g2.properties.items():\n if k == 'g' or (k, name) in g1.properties:\n continue\n props.append((None, p2))\n pnames.append(name)\n gprops = [[(name, g1.properties[('g', name)]) for name in g1.graph_properties.keys()],\n [(name, g2.properties[('g', name)]) for name in g2.graph_properties.keys()]]\n if not include:\n g1 = GraphView(g1, skip_properties=True)\n p1s = []\n for i, (p1, p2) in enumerate(props):\n if p1 is None:\n continue\n if p1.key_type() == \"v\":\n g1.vp[str(i)] = p1\n elif p1.key_type() == \"e\":\n g1.ep[str(i)] = p1\n\n g1 = Graph(g1, prune=True)\n\n for i, (p1, p2) in enumerate(props):\n if p1 is None:\n continue\n if str(i) in g1.vp:\n props[i] = (g1.vp[str(i)], p2)\n del g1.vp[str(i)]\n else:\n props[i] = (g1.ep[str(i)], p2)\n del g1.ep[str(i)]\n else:\n emask, emask_flip = g1.get_edge_filter()\n emask_flipped = False\n if emask is not None and not emask_flip:\n emask.a = numpy.logical_not(emask.a)\n emask_flipped = True\n g1.set_edge_filter(emask, True)\n\n vmask, vmask_flip = g1.get_vertex_filter()\n vmask_flipped = False\n if vmask is not None and not vmask_flip:\n vmask.a = not vmask.a\n g1.set_vertex_filter(vmask, True)\n vmask_flipped = True\n\n if intersection is None:\n intersection = g2.new_vertex_property(\"int64_t\", -1)\n else:\n intersection = intersection.copy(\"int64_t\")\n\n u1 = GraphView(g1, directed=True, skip_properties=True)\n u2 = GraphView(g2, directed=True, skip_properties=True)\n\n vmap, emap = libgraph_tool_generation.graph_union(u1._Graph__graph,\n u2._Graph__graph,\n _prop(\"v\", g1,\n intersection))\n\n if include:\n emask, emask_flip = g1.get_edge_filter()\n if emask is not None and emask_flipped:\n emask.a = numpy.logical_not(emask.a)\n g1.set_edge_filter(emask, False)\n\n vmask, vmask_flip = g1.get_vertex_filter()\n if vmask is not None and vmask_flipped:\n vmask.a = numpy.logical_not(vmask.a)\n g1.set_vertex_filter(vmask, False)\n\n n_props = []\n for p1, p2 in props:\n if p1 is None:\n p1 = g1.new_property(p2.key_type(), p2.value_type())\n if p2 is None:\n p2 = g2.new_property(p1.key_type(), p1.value_type())\n if not include:\n p1 = g1.copy_property(p1)\n if p2.value_type() != p1.value_type():\n p2 = g2.copy_property(p2, value_type=p1.value_type())\n if p1.key_type() == 'v':\n libgraph_tool_generation.\\\n vertex_property_union(u1._Graph__graph, u2._Graph__graph,\n vmap, emap,\n _prop(p1.key_type(), g1, p1),\n _prop(p2.key_type(), g2, p2))\n else:\n libgraph_tool_generation.\\\n edge_property_union(u1._Graph__graph, u2._Graph__graph,\n vmap, emap,\n _prop(p1.key_type(), g1, p1),\n _prop(p2.key_type(), g2, p2))\n n_props.append(p1)\n\n if pnames is not None:\n for name, p in zip(pnames, n_props):\n g1.properties[(p.key_type(), name)] = p\n if not include:\n for name, p in gprops[0]:\n g1.graph_properties[name] = p.copy()\n for name, p in gprops[1]:\n if name not in g1.graph_properties:\n g1.graph_properties[name] = p.copy()\n n_props = []\n\n if len(n_props) > 0:\n return g1, n_props\n else:\n return g1",
"def merge_graphs(\n graph: DiGraph,\n ) -> Tuple[list[str], GraphAccess, Generator[Tuple[str, GraphAccess], None, None]]:\n\n # Find merge nodes: all nodes that are marked as merge node -> all children (merge roots) should be merged.\n # This method returns all merge roots as key, with the respective predecessor nodes as value.\n def merge_roots() -> dict[str, set[str]]:\n graph_root = GraphAccess.root_id(graph)\n merge_nodes = [node_id for node_id, data in graph.nodes(data=True) if data.get(\"merge\", False)]\n assert len(merge_nodes) > 0, \"No merge nodes provided in the graph. Mark at least one node with merge=true!\"\n result: dict[str, set[str]] = {}\n for node in merge_nodes:\n # compute the shortest path from root to here and sort out all successors that are also predecessors\n pres: set[str] = reduce(lambda res, p: res | set(p), all_shortest_paths(graph, graph_root, node), set())\n for a in graph.successors(node):\n if a not in pres:\n result[a] = pres\n return result\n\n # Walk the graph from given starting node and return all successors.\n # A successor which is also a predecessor is not followed.\n def sub_graph_nodes(from_node: str, parent_ids: set[str]) -> set[str]:\n to_visit = [from_node]\n visited: set[str] = {from_node}\n\n def successors(node: str) -> list[str]:\n return [a for a in graph.successors(node) if a not in visited and a not in parent_ids]\n\n while to_visit:\n to_visit = reduce(lambda li, node: li + successors(node), to_visit, [])\n visited.update(to_visit)\n return visited\n\n # Create a generator for all given merge roots by:\n # - creating the set of all successors\n # - creating a subgraph which contains all predecessors and all succors\n # - all predecessors are marked as visited\n # - all predecessor edges are marked as visited\n # This way it is possible to have nodes in the graph that will not be touched by the update\n # while edges will be created from successors of the merge node to predecessors of the merge node.\n def merge_sub_graphs(\n root_nodes: dict[str, set[str]], parent_nodes: set[str], parent_edges: set[Tuple[str, str, str]]\n ) -> Generator[Tuple[str, GraphAccess], None, None]:\n all_successors: Set[str] = set()\n for root, predecessors in root_nodes.items():\n successors: set[str] = sub_graph_nodes(root, predecessors)\n # make sure nodes are not \"mixed\" between different merge nodes\n overlap = successors & all_successors\n if overlap:\n raise AttributeError(f\"Nodes are referenced in more than one merge node: {overlap}\")\n all_successors |= successors\n # create subgraph with all successors and all parents, where all parents are already marked as visited\n sub = GraphAccess(graph.subgraph(successors | parent_nodes), root, parent_nodes, parent_edges)\n yield root, sub\n\n roots = merge_roots()\n parents: set[str] = reduce(lambda res, ps: res | ps, roots.values(), set())\n parent_graph = graph.subgraph(parents)\n graphs = merge_sub_graphs(roots, parents, set(parent_graph.edges(data=\"edge_type\")))\n return list(roots.keys()), GraphAccess(parent_graph, GraphAccess.root_id(graph)), graphs",
"def _build_graphs(self):\n g1 = self._build_graph1()\n g2 = self._build_graph2(g1)\n return g1, g2",
"def merge_synset(wn, synsets, reason, lexfile, ssid=None, change_list=None):\n pos = synsets[0].part_of_speech.value\n if not ssid:\n ssid = new_id(wn, pos, synsets[0].definitions[0].text)\n ss = Synset(ssid, \"in\",\n PartOfSpeech(pos), lexfile)\n ss.definitions = [d for s in synsets for d in s.definitions]\n ss.examples = [x for s in synsets for x in s.examples]\n members = {}\n wn.add_synset(ss)\n\n for s in synsets:\n # Add all relations\n for r in s.synset_relations:\n if not any(r == r2 for r2 in ss.synset_relations):\n add_relation(\n wn, ss, wn.synset_by_id(\n r.target), r.rel_type, change_list)\n # Add members\n for m in wn.members_by_id(s.id):\n if m not in members:\n members[m] = add_entry(wn, ss, m, change_list)\n add_entry(wn, ss, m, change_list)\n e = [e for e in [wn.entry_by_id(e2) for e2 in wn.entry_by_lemma(m)]\n if e.lemma.part_of_speech.value == pos][0]\n for f in e.forms:\n if not any(f2 == f for f in members[m].forms):\n members[m].add_form(f)\n # syn behaviours - probably fix manually for the moment\n if change_list:\n change_list.change_synset(ss)\n return ss",
"def merge_working_sets(self, other):\n\n for dist in other.by_key.values(): self.add(dist)\n return self",
"def testMergeNoEdges():\n\n n1 = DummyNode(x=1, y=2, z=4)\n n2 = DummyNode(x=1, y=2, z=3)\n\n assert n1.z == 4\n\n n1.merge_with(n2)\n\n assert n1.z == 3",
"def union(G, H):\n\n if G.order() != H.order():\n msg = \"Node sets of the two directed graphs are not equal!\"\n raise StaticGraphNotEqNodesException(msg)\n\n n_nodes = G.order()\n edges = ((u, v) for u in G.nodes()\n\t\t for v in chain(G.successors(u), H.successors(u)))\n deg = make_deg(n_nodes, edges) \n edges = ((u, v) for u in G.nodes()\n\t\t for v in chain(G.successors(u), H.successors(u)))\n GC = make(n_nodes, G.size() + H.size(), edges, deg)\n return GC"
] | [
"0.6905344",
"0.6667315",
"0.66233647",
"0.654952",
"0.6426134",
"0.6303256",
"0.63004285",
"0.6278962",
"0.6188941",
"0.6159831",
"0.61385214",
"0.60857415",
"0.60827035",
"0.59966093",
"0.5975047",
"0.58856934",
"0.5844211",
"0.58405745",
"0.58178586",
"0.57996076",
"0.579917",
"0.5797398",
"0.57669127",
"0.5750537",
"0.57353175",
"0.57105595",
"0.57101643",
"0.56960154",
"0.5684606",
"0.5668889"
] | 0.77939695 | 0 |
Builds and returns (in the form returned by decoderawtransaction) a transaction that spends the given utxo, pays CHI to some output | def build_tx (self, utxo, chiOut, name, nameAddr, value):
nameData = self.nodes[0].name_show (name)
inputs = [nameData, utxo]
outputs = {nameAddr: Decimal ('0.01')}
outputs.update (chiOut)
tx = self.nodes[0].createrawtransaction (inputs, outputs)
nameOp = {
"op": "name_update",
"name": name,
"value": value,
}
tx = self.nodes[0].namerawtransaction (tx, 0, nameOp)
res = self.nodes[0].decoderawtransaction (tx["hex"])
res["hex"] = tx["hex"]
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sochain_utxo_to_xchain_utxo(utxo):\n hash = utxo['txid']\n index = utxo['output_no']\n \n value = round(float(utxo['value']) * 10 ** 8)\n script = bytearray.fromhex(utxo['script_hex']) #utxo['script_hex']\n witness_utxo = Witness_UTXO(value, script)\n return UTXO(hash, index, witness_utxo)",
"def select_utxo_and_create_tx(transaction_input: TransactionInput) -> (TransactionOutput, str):\n\ttry:\n\t\tunspent = Utxo.get_unspent_outputs(transaction_input.source_address)\n\texcept Exception as e:\n\t\t# It should be logging using the default log\n\t\tprint(f\"There was a problem trying to get unspent outputs: {e}\")\n\t\treturn None, \"There was a problem trying to get unspent outputs\"\n\n\ttotal_unspent = sum([u['value'] for u in unspent])\n\n\tbest_selected = SelectedInfo(total_unspent, \"\", list(unspent), dict(transaction_input.outputs))\n\t# It checks which selector gives the best results in terms of lower fees\n\tfor selector in [BiggerFirst(), SmallerFirst(), FirstFit(), BestFit()]:\n\t\toutputs = dict(transaction_input.outputs)\n\t\ttotal_outputs = sum([u for u in outputs.values()])\n\n\t\tselected, err = create_transaction_with_change(\n\t\t\tselector, outputs, total_outputs, unspent, total_unspent,\n\t\t\ttransaction_input.source_address, transaction_input.fee_kb)\n\n\t\tif err is not None:\n\t\t\treturn None, err\n\n\t\t# Case it's found a smaller fee or less UTXO are used or less no change is necessary\n\t\tbest_selected = min(best_selected, selected)\n\n\tif len(best_selected.selected) == 0:\n\t\treturn None, \"It was unable the select the UTXO for creating the transaction\"\n\n\tresp = TransactionOutput(best_selected.raw, [])\n\tfor utxo in best_selected.selected:\n\t\tresp.inputs += [Utxo.to_tx_output_item(utxo)]\n\n\treturn resp, None",
"def create_tx(self, coin, account, to, amount):\n if coin is ETH:\n gasEstimate = self.w3.eth.estimateGas(\n {\"from\": account.address, \"to\": to, \"value\": amount}\n )\n return {\n \"from\": account.address,\n \"to\": to,\n \"value\": self.w3.toWei(amount, 'ether'),\n \"gasPrice\": self.w3.eth.gasPrice,\n \"gas\": gasEstimate,\n \"nonce\": self.w3.eth.getTransactionCount(account.address),\n }\n elif coin is BTCTEST:\n return PrivateKeyTestnet.prepare_transaction(account.address, [(to, amount, BTC)])\n elif coin is BTC:\n return PrivateKey.prepare_transaction(account.address, [(to, amount, BTC)])\n else:\n return None",
"def createrawtransaction(self, inputs, outputs):\n return self.proxy.createrawtransaction(inputs, outputs)",
"def createrawtransaction(inputs, outputs, outScriptGenerator=p2pkh):\n if not type(inputs) is list:\n inputs = [inputs]\n\n tx = CTransaction()\n for i in inputs:\n tx.vin.append(CTxIn(COutPoint(i[\"txid\"], i[\"vout\"]), b\"\", 0xffffffff))\n for addr, amount in outputs.items():\n if addr == \"data\":\n tx.vout.append(CTxOut(0, CScript([OP_RETURN, unhexlify(amount)])))\n else:\n tx.vout.append(CTxOut(amount * BTC, outScriptGenerator(addr)))\n tx.rehash()\n return hexlify(tx.serialize()).decode(\"utf-8\")",
"def create_tx(coin, account, recipient, amount):\n if coin ==ETH:\n gasEstimate = w3.eth.estimateGas(\n {\"from\": account.address, \"to\": recipient, \"value\": amount})\n return{\n \"to\": recipient,\n \"from\": account.address,\n \"value\": amount,\n \"gasPrice\": w3.eth.gasPrice,\n \"gas\": gasEstimate,\n \"nonce\": w3.eth.getTransactionCount(account.address)\n }\n if coin == BTCTEST:\n return PrivateKeyTestnet.prepare_transaction(account.address, [(recipient, amount, BTC)])",
"def send_tx(args):\n kwargs = {\n '--privkey': args.privkey,\n '--to': AMEND_ADDR,\n '--code': args.code,\n '--value': str(args.value),\n }\n args = functools.reduce(\n lambda lst, kv: lst + list(kv),\n kwargs.items(),\n [],\n )\n print(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'send_tx.py'])\n with open('../output/transaction/hash') as fobj:\n return fobj.read().strip()",
"def transact(self, args):\n private_key = os.environ.get('private_key')\n if private_key:\n set_gas_prices(self.w3, args)\n tx = send(self.w3, private_key, args)\n return self.w3.toHex(tx)",
"def mk_simple_transaction(self, from_addr, to_addr, send_value):\n transaction = dict(\n nonce=self.web3.eth.get_transaction_count(from_addr),\n gasPrice=self.web3.eth.gasPrice,\n # there must be an automated way to automatically set the gas price\n # based off of the gas strategy\n gas=100000,\n to=to_addr,\n value=self.web3.toWei(send_value, 'wei')\n )\n return transaction",
"def __create_transaction(self):\n log.debug(\"Displaying __create_transaction\")\n # Make the admin select an user\n user = self.__user_select()\n # Allow the cancellation of the operation\n if isinstance(user, CancelSignal):\n return\n # Create an inline keyboard with a single cancel button\n cancel = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get(\"menu_all_cancel\"),\n callback_data=\"cmd_cancel\")]])\n # Request from the user the amount of money to be credited manually\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_credit\"), reply_markup=cancel)\n # Wait for an answer\n reply = self.__wait_for_regex(r\"(-? ?[0-9]{1,3}(?:[.,][0-9]{1,2})?)\", cancellable=True)\n # Allow the cancellation of the operation\n if isinstance(reply, CancelSignal):\n return\n # Convert the reply to a price object\n price = self.Price(reply)\n # Ask the user for notes\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_transaction_notes\"), reply_markup=cancel)\n # Wait for an answer\n reply = self.__wait_for_regex(r\"(.*)\", cancellable=True)\n # Allow the cancellation of the operation\n if isinstance(reply, CancelSignal):\n return\n # Create a new transaction\n transaction = db.Transaction(user=user,\n value=int(price),\n provider=\"Manual\",\n notes=reply)\n self.session.add(transaction)\n # Change the user credit\n user.recalculate_credit()\n # Commit the changes\n self.session.commit()\n # Notify the user of the credit/debit\n self.bot.send_message(user.user_id,\n self.loc.get(\"notification_transaction_created\",\n transaction=transaction.text(w=self)))\n # Notify the admin of the success\n self.bot.send_message(self.chat.id, self.loc.get(\"success_transaction_created\",\n transaction=transaction.text(w=self)))",
"def call_contract(w3, account, func):\n tx = func.buildTransaction({\n 'nonce': w3.eth.getTransactionCount(account.address),\n 'gas': func.estimateGas()\n })\n signed_tx = w3.eth.account.signTransaction(tx, account.privateKey)\n tx_hash = w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n return tx_hash",
"def send_tx(coin, account, recipient, amount):\n if coin =='eth':\n txn = create_tx(coin, account, recipient, amount)\n signed_txn = w3.eth.account.signTransaction(txn)\n result = w3.eth.sendRawTransaction(signed_txn.rawTransaction)\n print(result.hex())\n return result.hex()\n\n else:\n tx_btctest= create_tx(coin, account, recipient, amount)\n sign_tx_btctest = account.sign_transaction(tx_btctest)\n from bit.network import NetworkAPI\n NetworkAPI.broadcast_tx_testnet(sign_tx_btctest) \n return sign_tx_btctest",
"def create_transaction(inputs: list, outputs: dict) -> ((str, int), str):\n\ttry:\n\t\tc = Bitcoin(testnet=bitcoin_is_testnet)\n\t\touts = []\n\t\tfor outk, outv in outputs.items():\n\t\t\touts += [{'value': outv, 'address': outk}]\n\t\ttx = c.mktx(inputs, outs)\n\t\ttx_serialize = serialize(tx)\n\n\t\t# Signing each input to predict the transaction size\n\t\tpriv = sha256('a big long brainwallet password')\n\t\ttx_signed = tx.copy()\n\t\tfor i in range(len(inputs)):\n\t\t\ttx_signed = c.sign(tx_signed, i, priv)\n\n\t\t# The serialization uses one char per nibble so in order the get the number of bytes it's necessary to\n\t\t# divide the size of the string serialization by 2\n\t\treturn (str(tx_serialize), len(str(serialize(tx_signed))) // 2), None\n\texcept Exception as e:\n\t\t# It should be logging using the default log\n\t\tprint(f\"There was a problem trying to create the transaction: {e}\")\n\t\treturn (None, None), \"There was a problem trying to create the transaction\"",
"def create_transaction():\n data = request.get_json()\n response = None\n status_code = None\n\n # Proposed transaction document validity checks\n if balance() < (data['amount']):\n response = dict(message='Your balance is not enough to complete transaction')\n status_code = 400\n elif not (\n any(node_['public_key'] == data['sender_address'] for node_ in node.network) and\n any(node_['public_key'] == data['recipient_address'] for node_ in node.network) and\n isinstance((data['amount']), (int, float))\n ):\n response = dict(message='Please make sure the proposed transaction is valid.')\n status_code = 400\n\n if response and status_code:\n return jsonify(response), status_code\n\n transaction_id = str(uuid4())\n\n # Use as many utxos as necessary to create the new transaction inputs\n sender_address = data['sender_address']\n sum_ = 0\n tx_inputs = []\n for utxo in node.blkchain.utxos[sender_address]:\n if sum_ >= (data['amount']):\n break\n elif not node.blkchain.transaction_unconfirmed(utxo):\n sum_ += utxo.amount\n tx_inputs.append(TransactionInput.from_output(utxo))\n\n # Create 2 transaction outputs, one for the transfer and one for the sender's change\n tx_outputs = [\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['recipient_address'],\n amount=(data['amount'])\n ),\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['sender_address'],\n amount=sum_ - (data['amount'])\n )\n ]\n\n # Actual transaction object:\n tx = Transaction(\n sender_address=data['sender_address'],\n recipient_address=data['recipient_address'],\n amount=(data['amount']),\n transaction_inputs=tx_inputs,\n transaction_outputs=tx_outputs,\n transaction_id=transaction_id\n )\n\n response = tx.to_dict()\n return jsonify(response), 200",
"def fundrawtransaction(self, given_transaction, *args, **kwargs):\n # just use any txid here\n vintxid = lx(\"99264749804159db1e342a0c8aa3279f6ef4031872051a1e52fb302e51061bef\")\n\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to fundrawtransaction.\")\n\n # this is also a clever way to not cause a side-effect in this function\n transaction = CMutableTransaction.deserialize(given_bytes)\n\n for vout_counter in range(0, self._num_fundrawtransaction_inputs):\n txin = CMutableTxIn(COutPoint(vintxid, vout_counter))\n transaction.vin.append(txin)\n\n # also allocate a single output (for change)\n txout = make_txout()\n transaction.vout.append(txout)\n\n transaction_hex = b2x(transaction.serialize())\n\n return {\"hex\": transaction_hex, \"fee\": 5000000}",
"def add_tx(self, txid, tx):\n outputs = tx.outputs()\n so = outputs and outputs[0][1]\n # Note: ScriptOutput here is the subclass defined in this file, not\n # address.ScriptOutput\n if not isinstance(so, ScriptOutput):\n return\n transaction_type = so.message.transaction_type\n try:\n if transaction_type == \"GENESIS\":\n self._add_genesis_or_mint_tx(so, outputs, txid, tx)\n elif transaction_type == \"MINT\":\n self._add_genesis_or_mint_tx(so, outputs, txid, tx)\n elif transaction_type == \"SEND\":\n self._add_send_tx(so, outputs, txid, tx)\n elif transaction_type == \"COMMIT\":\n return # ignore COMMIT, they don't produce any tokens\n else:\n raise InvalidOutputMessage(\"Bad transaction type\")\n except (AssertionError, ValueError, KeyError, TypeError, IndexError) as e:\n self.print_error(f\"ERROR: tx {txid}; exc =\", repr(e))",
"def testnet_receive_coin(self):\n try:\n datas = get_transaction_details(tx_hash=self.tx_hash,\n coin_symbol='bcy')\n except:\n raise ValidationError('Hash da transacao invalido ou nao '\n 'identificado.')\n if datas.get('error'):\n raise ValidationError('Transacao nao encontrada.')\n vals = {'name': datas.get('hash')}\n if datas.get('confirmations') >= 2:\n vals.update({'confirmation': datas.get('confirmations'),\n 'date_time': str(datas.get('confirmed')),\n 'state': 'D',\n 'satoshi': datas.get('outputs')[0].get('value')})\n self.write(vals)\n return datas.get('hash')",
"def CreateTx(self, request, context):\n channel_name = request.channel or conf.LOOPCHAIN_DEFAULT_CHANNEL\n utils.logger.info(f\"peer_outer_service::CreateTx request({request.data}), channel({channel_name})\")\n\n channel_stub = StubCollection().channel_stubs[channel_name]\n result_hash = asyncio.run_coroutine_threadsafe(\n channel_stub.async_task().create_tx(request.data),\n self.peer_service.inner_service.loop\n ).result()\n\n return loopchain_pb2.CreateTxReply(\n response_code=message_code.Response.success,\n tx_hash=result_hash,\n more_info='')",
"def makeTx(self):\n new_tx = transaction.Tx(self.simulation.tick, self.id, self.id_bag.getNextId(), [])\n self.simulation.all_tx.append(new_tx)\n return new_tx",
"def createrawtransaction(\n self,\n outpoints: List[Dict[str, Any]],\n send_to: Dict[str, float],\n locktime: Optional[int] = None,\n ) -> str:\n assert type(outpoints) == list\n assert type(send_to) == dict\n assert locktime is None or type(locktime) == int\n return self.rpc_call(\"createrawtransaction\", outpoints, send_to, locktime)",
"async def new_tx(request: Request) -> dict:\n peer = request.client.host\n tx = await request.json()\n tx = Transaction(**tx)\n chain.mempool.put_nowait(tx)\n return {\"sender\": peer, \"receipt\": tx.receipt()}",
"def post_transaction():\n tx_dict = encode_transaction(\"gautham=awesome\") \n print(tx_dict)\n\n tendermint_host = 'localhost'\n tendermint_port = 26657\n endpoint = 'http://{}:{}/'.format(tendermint_host, tendermint_port)\n\n payload = {\n 'method': 'broadcast_tx_commit',\n 'jsonrpc': '2.0',\n #'params': [encode_transaction(tx_dict)],\n 'params': [tx_dict],\n 'id': str(uuid4())\n }\n # TODO: handle connection errors!\n print(payload)\n return requests.post(endpoint, json=payload)",
"def create_raw_transaction(amount, network_fee, from_address, to_address):\n tx_total = amount + network_fee\n tx_inputs = []\n input_total = 0\n unspent = list_unspent(from_address)\n\n # Are there enough funds in one block to cover the amount\n for block in unspent:\n if float(block[\"amount\"]) >= tx_total:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total = float(block[\"amount\"])\n tx_inputs.append(tx_input)\n break\n # If tx_inputs is empty that means we have to\n # build the transaction from multiple blocks\n if not tx_inputs:\n for block in unspent:\n if input_total >= tx_total:\n break\n else:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total += float(block[\"amount\"])\n tx_inputs.append(tx_input)\n\n # Amount left over after amount to send and network fees are subtracted\n # from input_total. Change is sent back to sender\n change = round((input_total - amount) - network_fee, 8)\n \n if change < dust:\n tx_output = {to_address: amount}\n else:\n tx_output = {to_address: amount, from_address: change}\n \n try:\n tx_hex_string = subprocess.check_output([\"litecoin-cli\", \"createrawtransaction\", json.dumps(tx_inputs), json.dumps(tx_output)])\n except:\n sys.exit(1)\n\n return tx_hex_string.strip()",
"def get_utxo_in_wallet(env):\n command = build_command(env.cardano_cli, \"query\", \"utxo\", \"--address\", \"$(cat \" + env.wallet_payment_addr + \")\",\n \"--testnet-magic\", env.magic)\n success, message = run_command(command)\n lines = message.split(\"\\\\n\")\n if len(lines) <= 2:\n raise Exception(\"Could not find utxo with \" + command + \" in\\n \" + format_shell_error(command, message))\n\n # TODO: smarter selection than just using the first one\n # parse while handling multiple whitespaces\n first_utxo_line = re.sub(\" +\", \" \", lines[2]).split(\" \")\n utxo = first_utxo_line[0] + \"#\" + first_utxo_line[1]\n return utxo",
"def create_god_transaction(to_pk):\n\n god_pk, god_sk = signature.generate_keys()\n tx = Transaction(god_pk, to_pk, SEED_COIN_SUPPLY)\n tx.sign(god_sk)\n return tx",
"def send_unsigned_transaction(self, tx: Dict[str, Any], private_key: Optional[str] = None,\n public_key: Optional[str] = None, retry: bool = False,\n block_identifier: Optional[str] = 'pending') -> bytes:\n if private_key:\n address = self.private_key_to_address(private_key)\n elif public_key:\n address = public_key\n else:\n logger.error('No ethereum account provided. Need a public_key or private_key')\n raise ValueError('Ethereum account was not configured or unlocked in the node')\n\n if tx.get('nonce') is None:\n tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier)\n\n number_errors = 5\n while number_errors >= 0:\n try:\n if private_key:\n signed_tx = self.w3.eth.account.sign_transaction(tx, private_key=private_key)\n logger.debug('Sending %d wei from %s to %s', tx['value'], address, tx['to'])\n try:\n return self.send_raw_transaction(signed_tx.rawTransaction)\n except TransactionAlreadyImported as e:\n # Sometimes Parity 2.2.11 fails with Transaction already imported, even if it's not, but it's\n # processed\n tx_hash = signed_tx.hash\n logger.error('Transaction with tx-hash=%s already imported: %s' % (tx_hash.hex(), str(e)))\n return tx_hash\n elif public_key:\n tx['from'] = address\n return self.send_transaction(tx)\n except ReplacementTransactionUnderpriced as e:\n if not retry or not number_errors:\n raise e\n current_nonce = tx['nonce']\n tx['nonce'] = max(current_nonce + 1, self.get_nonce_for_account(address,\n block_identifier=block_identifier))\n logger.error('Tx with nonce=%d was already sent for address=%s, retrying with nonce=%s',\n current_nonce, address, tx['nonce'])\n except InvalidNonce as e:\n if not retry or not number_errors:\n raise e\n logger.error('address=%s Tx with invalid nonce=%d, retrying recovering nonce again',\n address, tx['nonce'])\n tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier)\n number_errors -= 1",
"def generate_privacy_transaction(self):\n\n # recharge transaction sending wallet\n self.recharge_account()\n\n # recharge transaction receiving wallet, (recharging because we need to use this wallet in otaRefund)\n self.privacy_transaction_receiving_wallet = Recharge()\n self.privacy_transaction_receiving_wallet.recharge_account()\n\n # wait for some time, recharge takes time to reflect.\n time.sleep(commonUtil.default_wait_after_recharge)\n\n child = pexpect.spawn('node privacyTransaction', cwd='../src/')\n if commonUtil.show_logs:\n child.logfile = sys.stdout\n\n commonUtil.check_expect(\"Input file name\", child, test_name, \"'Input file name' prompt not found\")\n child.sendline(self.get_file_name())\n\n commonUtil.check_expect(\"Input password\", child, test_name, \"'Input password' prompt not found\")\n child.sendline(self.get_password())\n\n commonUtil.check_expect(\"wallet has been unlocked\", child, test_name,\n \"'wallet has been unlocked' message not found\")\n child.sendline(\"Y\")\n\n commonUtil.check_expect(\"Input receiver\\'s waddress\", child, test_name,\n \"'Input receiver\\'s waddress' prompt not found\")\n child.sendline(self.privacy_transaction_receiving_wallet.get_wan_address());\n\n commonUtil.check_expect(\"Input\", child, test_name, \"Input eth address prompt not found\")\n child.sendline(commonUtil.default_eth_privacy_transaction)\n\n child.expect(commonUtil.default_eth_privacy_transaction)\n\n result = child.read()\n\n if result.find(\"value: \" + commonUtil.default_stamp_value) == -1:\n commonUtil.exit_test(\"'value: \" + commonUtil.default_stamp_value +\n \"' not found in summary\", test_name, child)\n\n if result.find(\"otaDestAddress\") == -1:\n commonUtil.exit_test(\"'otaDestAddress' text not found \", test_name, child)\n\n ota_address_start = result.find('0x', result.find(\"otaDestAddress\"))\n if ota_address_start == -1:\n commonUtil.exit_test(\"'otaDestAddress' value not found\", test_name, child)\n\n self.ota_address = result[ota_address_start + 2:ota_address_start + 135]\n\n if result.find(commonUtil.default_stamp_value) == -1:\n commonUtil.exit_test(\"stamp value \" + commonUtil.default_stamp_value + \" not found\", test_name, child)\n\n child.expect(pexpect.EOF)",
"def tx(self):\n cmd = Command()\n cmd.set_num(0x1C)\n cmd.set_subcmd_num(0x00)\n cmd.set_data([0x01])\n self.connection.send_cmd(cmd.render())",
"def getrawtransaction(self, txid, verbose=True):\n if verbose:\n return TransactionInfo(**self.proxy.getrawtransaction(txid, 1))\n return self.proxy.getrawtransaction(txid, 0)",
"def decoderawtransaction_asm_sighashtype(self):\n\n self.log.info(\"- various mainnet txs\")\n # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.\n tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])\n\n # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.\n # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc\n # verify that we have not altered scriptPubKey decoding.\n tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])\n assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])\n assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])\n assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])\n txSave = tx_from_hex(tx)\n\n self.log.info(\"- tx not passing DER signature checks\")\n # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type\n tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])\n\n self.log.info(\"- tx passing DER signature checks\")\n # verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks\n tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])\n assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])\n\n # some more full transaction tests of varying specific scriptSigs. used instead of\n # tests in decodescript_script_sig because the decodescript RPC is specifically\n # for working on scriptPubKeys (argh!).\n push_signature = txSave.vin[0].scriptSig.hex()[2:(0x48*2+4)]\n signature = push_signature[2:]\n der_signature = signature[:-2]\n signature_sighash_decoded = der_signature + '[ALL]'\n signature_2 = der_signature + '82'\n push_signature_2 = '48' + signature_2\n signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'\n\n self.log.info(\"- P2PK scriptSig\")\n txSave.vin[0].scriptSig = bytes.fromhex(push_signature)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n # make sure that the sighash decodes come out correctly for a more complex / lesser used case.\n txSave.vin[0].scriptSig = bytes.fromhex(push_signature_2)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n self.log.info(\"- multisig scriptSig\")\n txSave.vin[0].scriptSig = bytes.fromhex('00' + push_signature + push_signature_2)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n self.log.info(\"- scriptSig that contains more than push operations\")\n # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.\n txSave.vin[0].scriptSig = bytes.fromhex('6a143011020701010101010101020601010101010101')\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])"
] | [
"0.65796894",
"0.630716",
"0.62986237",
"0.6173014",
"0.60841966",
"0.5914946",
"0.5743247",
"0.5731016",
"0.5521235",
"0.55083513",
"0.5494718",
"0.5486861",
"0.54759413",
"0.54701596",
"0.54108584",
"0.54077893",
"0.53893286",
"0.5363596",
"0.5303798",
"0.52935517",
"0.52907497",
"0.52741724",
"0.52684134",
"0.5263301",
"0.5262662",
"0.5258399",
"0.52453554",
"0.5237799",
"0.52267975",
"0.52017426"
] | 0.70295274 | 0 |
Signs a transaction (in format of build_tx) with the given node, and returns the decoderawtransactiontype result again. | def sign (self, node, tx):
signed = node.signrawtransactionwithwallet (tx["hex"])
res = node.decoderawtransaction (signed["hex"])
res.update (signed)
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sign_transaction():\n data = request.get_json()\n\n try:\n tx = Transaction.from_dict(data)\n except TypeError:\n response = dict(message='Improper transaction json provided.')\n status_code = 400\n return jsonify(response), status_code\n\n signature = tx.sign(node.wallet.private_key_rsa)\n response = dict(signature=signature)\n return jsonify(response), 200",
"def sign_transaction(self, transaction, prvkey):\n return self.web3.eth.account.sign_transaction(transaction, prvkey)",
"def signrawtransaction(self, given_transaction):\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to signrawtransaction.\")\n\n transaction = CMutableTransaction.deserialize(given_bytes)\n transaction_hex = b2x(transaction.serialize())\n return {\"hex\": transaction_hex}",
"def sign_transaction(self, transaction):\n try:\n address = transaction.from_address\n private_key = self.addresses[address]['private_key']\n transaction.sign_transaction(private_key)\n except Exception as ex:\n print(\"Error signing transaction from address: \" + address + \" \" + str(ex))",
"def sign_transaction(self):\n private_key=RSA.importKey(binascii.unhexlify(self.sender_private_key))\n signer=PKCS1_v1_5.new(private_key)\n h=SHA.new(str(self.to_dict()).encode('utf8'))\n return binascii.hexlify(signer.sign(h)).decode('ascii')",
"def sign_tx(self, network, txn, inputs, change, use_ae_signatures=False):\n # 1st message contains txn and number of inputs we are going to send.\n # Reply ok if that corresponds to the expected number of inputs (n).\n base_id = 100 * random.randint(1000, 9999)\n params = {'network': network,\n 'txn': txn,\n 'num_inputs': len(inputs),\n 'use_ae_signatures': use_ae_signatures,\n 'change': change}\n\n reply = self._jadeRpc('sign_tx', params, str(base_id))\n assert reply\n\n # Send inputs and receive signatures\n return self._send_tx_inputs(base_id, inputs, use_ae_signatures)",
"def sign_raw_transaction(hexstring):\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"signrawtransaction\", hexstring])\n signed_tx = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return signed_tx",
"def _sign_ledger_tx(self, tx_message: TransactionMessage) -> Any:\n if tx_message.ledger_id == OFF_CHAIN:\n crypto_object = self.wallet.crypto_objects.get(\"ethereum\")\n # TODO: replace with default_ledger when recover_hash function is available for FETCHAI\n else:\n crypto_object = self.wallet.crypto_objects.get(tx_message.ledger_id)\n tx = tx_message.signing_payload.get(\"tx\")\n tx_signed = crypto_object.sign_transaction(tx)\n return tx_signed",
"def sign_tx(self, tx):\n if self.privkey:\n log.info('signing tx', tx=tx, account=self)\n tx.sign(self.privkey)\n else:\n raise ValueError('Locked account cannot sign tx')",
"def send_transaction(self, signd_txn):\n return self.web3.eth.send_raw_transaction(signd_txn.rawTransaction).hex()",
"def sign_tx(self, tx: payloads.Transaction, password: str, magic: Optional[int] = None) -> None:\n if magic is None:\n magic = settings.network.magic\n\n self._validate_tx(tx)\n\n message = magic.to_bytes(4, byteorder=\"little\", signed=False) + tx.hash().to_array()\n signature = self.sign(message, password)\n\n invocation_script = vm.ScriptBuilder().emit_push(signature).to_array()\n # mypy can't infer that the is_watchonly check ensures public_key has a value\n verification_script = contracts.Contract.create_signature_redeemscript(self.public_key) # type: ignore\n tx.witnesses.insert(0, payloads.Witness(invocation_script, verification_script))",
"def getrawtransaction(self, txid, verbose=True):\n if verbose:\n return TransactionInfo(**self.proxy.getrawtransaction(txid, 1))\n return self.proxy.getrawtransaction(txid, 0)",
"def sign_transaction(self, private_key):\n\n to_be_hashed = (str(self.timestamp) +\n str(self.sender_address) +\n str(self.recipient_address) +\n str(self.amount) +\n # str(self.transaction_inputs) +\n # str(self.transaction_outputs) +\n str(self.transaction_id))\n\n # Create a hash value of the whole message\n sha_hash = SHA256.new(to_be_hashed.encode())\n\n # Import private key\n key = RSA.importKey(private_key)\n\n # print(sha_hash)\n\n # Construct an instance of the crypto object\n cipher = PKCS1_v1_5.new(key)\n\n # Create and return the signature\n self.transaction_signature = cipher.sign(sha_hash)",
"def submit_transaction():\n data = request.get_json()\n\n # Create candidate transaction object\n try:\n tx = Transaction.from_dict(data['transaction'])\n except (KeyError, TypeError):\n response = dict(message='Improper transaction json provided.')\n status_code = 400\n return jsonify(response), status_code\n\n statuses = []\n # Broadcast if needed and turn off broadcasting for other nodes\n if request.args.get('broadcast', type=int, default=0):\n for node_ in node.network:\n if not node_['id'] == node.node_id:\n response = requests.post(\n node_['ip'] + '/transactions/submit?broadcast=0',\n json=dict(\n transaction=data['transaction'],\n signature=data['signature']\n )\n )\n statuses.append(response.status_code)\n\n if not response.status_code == 200:\n response = dict(message='Transaction rejected by the network.')\n return jsonify(response), 202\n\n # Validate transaction as-is\n val_result = validate_transaction_document(tx)\n if isinstance(val_result, str):\n response = dict(message=val_result)\n status_code = 400\n return jsonify(response), status_code\n\n # Verify signature\n # defined in backend/utils\n sign_result = verify_signature(tx, data['signature'])\n if isinstance(sign_result, str):\n response = dict(message=sign_result)\n status_code = 400\n return jsonify(response), status_code\n\n # Add transaction to local blockchain\n node.blkchain.add_transaction(tx)\n myurl = node.network[node.node_id]['ip']\n url = myurl + '/blockchain/mine_block'\n mine_resp = requests.get(url=url)\n if mine_resp.status_code == 200:\n block_dict = mine_resp.json()\n add_resp = requests.post(url=myurl + '/blockchain/add_block?\\\n broadcast=1', json=block_dict)\n # run consensus \n requests.get(url=myurl+'/blockchain/consensus')\n\n response = dict(message='Transaction added.')\n\n return jsonify(response), 200",
"def post_transaction():\n tx_dict = encode_transaction(\"gautham=awesome\") \n print(tx_dict)\n\n tendermint_host = 'localhost'\n tendermint_port = 26657\n endpoint = 'http://{}:{}/'.format(tendermint_host, tendermint_port)\n\n payload = {\n 'method': 'broadcast_tx_commit',\n 'jsonrpc': '2.0',\n #'params': [encode_transaction(tx_dict)],\n 'params': [tx_dict],\n 'id': str(uuid4())\n }\n # TODO: handle connection errors!\n print(payload)\n return requests.post(endpoint, json=payload)",
"def decoderawtransaction_asm_sighashtype(self):\n\n self.log.info(\"- various mainnet txs\")\n # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.\n tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])\n\n # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.\n # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc\n # verify that we have not altered scriptPubKey decoding.\n tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])\n assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])\n assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])\n assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])\n txSave = tx_from_hex(tx)\n\n self.log.info(\"- tx not passing DER signature checks\")\n # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type\n tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])\n\n self.log.info(\"- tx passing DER signature checks\")\n # verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks\n tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'\n rpc_result = self.nodes[0].decoderawtransaction(tx)\n assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])\n assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])\n\n # some more full transaction tests of varying specific scriptSigs. used instead of\n # tests in decodescript_script_sig because the decodescript RPC is specifically\n # for working on scriptPubKeys (argh!).\n push_signature = txSave.vin[0].scriptSig.hex()[2:(0x48*2+4)]\n signature = push_signature[2:]\n der_signature = signature[:-2]\n signature_sighash_decoded = der_signature + '[ALL]'\n signature_2 = der_signature + '82'\n push_signature_2 = '48' + signature_2\n signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'\n\n self.log.info(\"- P2PK scriptSig\")\n txSave.vin[0].scriptSig = bytes.fromhex(push_signature)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n # make sure that the sighash decodes come out correctly for a more complex / lesser used case.\n txSave.vin[0].scriptSig = bytes.fromhex(push_signature_2)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n self.log.info(\"- multisig scriptSig\")\n txSave.vin[0].scriptSig = bytes.fromhex('00' + push_signature + push_signature_2)\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])\n\n self.log.info(\"- scriptSig that contains more than push operations\")\n # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.\n txSave.vin[0].scriptSig = bytes.fromhex('6a143011020701010101010101020601010101010101')\n rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())\n assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])",
"def send_tx(coin, account, recipient, amount):\n if coin =='eth':\n txn = create_tx(coin, account, recipient, amount)\n signed_txn = w3.eth.account.signTransaction(txn)\n result = w3.eth.sendRawTransaction(signed_txn.rawTransaction)\n print(result.hex())\n return result.hex()\n\n else:\n tx_btctest= create_tx(coin, account, recipient, amount)\n sign_tx_btctest = account.sign_transaction(tx_btctest)\n from bit.network import NetworkAPI\n NetworkAPI.broadcast_tx_testnet(sign_tx_btctest) \n return sign_tx_btctest",
"def send_raw_transaction(signed_tx):\n try:\n txid = subprocess.check_output([\"litecoin-cli\", \"sendrawtransaction\", signed_tx])\n except:\n sys.exit(1)\n return txid.strip()",
"def sign_with(self, account: Account) -> CosignatureSignedTransaction:\n\n transaction_info = self.transaction.transaction_info\n if transaction_info is None:\n raise ValueError('Transaction info not present.')\n parent_hash = typing.cast(TransactionInfo, transaction_info).hash\n if parent_hash is None:\n raise ValueError('Transaction info to cosign has no hash.')\n\n signature = util.hexlify(account.sign_data(parent_hash))\n signer = account.public_key\n return CosignatureSignedTransaction(parent_hash, signature, signer)",
"def signed_transaction(self) -> CustomSignedTransaction:\n enforce(\n self.is_set(\"signed_transaction\"),\n \"'signed_transaction' content is not set.\",\n )\n return cast(CustomSignedTransaction, self.get(\"signed_transaction\"))",
"def sendrawtransaction(self, given_transaction):\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to sendrawtransaction.\")\n transaction = CMutableTransaction.deserialize(given_bytes)\n return b2lx(transaction.GetHash())",
"def signrawtransaction(self, hexstring, previous_transactions=None, private_keys=None):\n return dict(self.proxy.signrawtransaction(hexstring, previous_transactions, private_keys))",
"def submit_and_store_transaction(self, signed_transaction_data):\n return self._call_account_method(\n 'submitAndStoreTransaction', {\n 'signedTransactionData': signed_transaction_data\n }\n )",
"def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)",
"def test_new_transaction_return_type(self):\n transaction = self.blockchain.new_transaction(self.sender, self.recipient, self.amount)\n self.assertIsInstance(transaction, int)",
"def sign_liquid_tx(self, network, txn, inputs, commitments, change, use_ae_signatures=False,\n asset_info=None):\n # 1st message contains txn and number of inputs we are going to send.\n # Reply ok if that corresponds to the expected number of inputs (n).\n base_id = 100 * random.randint(1000, 9999)\n params = {'network': network,\n 'txn': txn,\n 'num_inputs': len(inputs),\n 'trusted_commitments': commitments,\n 'use_ae_signatures': use_ae_signatures,\n 'change': change,\n 'asset_info': asset_info}\n\n reply = self._jadeRpc('sign_liquid_tx', params, str(base_id))\n assert reply\n\n # Send inputs and receive signatures\n return self._send_tx_inputs(base_id, inputs, use_ae_signatures)",
"def create_transaction():\n data = request.get_json()\n response = None\n status_code = None\n\n # Proposed transaction document validity checks\n if balance() < (data['amount']):\n response = dict(message='Your balance is not enough to complete transaction')\n status_code = 400\n elif not (\n any(node_['public_key'] == data['sender_address'] for node_ in node.network) and\n any(node_['public_key'] == data['recipient_address'] for node_ in node.network) and\n isinstance((data['amount']), (int, float))\n ):\n response = dict(message='Please make sure the proposed transaction is valid.')\n status_code = 400\n\n if response and status_code:\n return jsonify(response), status_code\n\n transaction_id = str(uuid4())\n\n # Use as many utxos as necessary to create the new transaction inputs\n sender_address = data['sender_address']\n sum_ = 0\n tx_inputs = []\n for utxo in node.blkchain.utxos[sender_address]:\n if sum_ >= (data['amount']):\n break\n elif not node.blkchain.transaction_unconfirmed(utxo):\n sum_ += utxo.amount\n tx_inputs.append(TransactionInput.from_output(utxo))\n\n # Create 2 transaction outputs, one for the transfer and one for the sender's change\n tx_outputs = [\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['recipient_address'],\n amount=(data['amount'])\n ),\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['sender_address'],\n amount=sum_ - (data['amount'])\n )\n ]\n\n # Actual transaction object:\n tx = Transaction(\n sender_address=data['sender_address'],\n recipient_address=data['recipient_address'],\n amount=(data['amount']),\n transaction_inputs=tx_inputs,\n transaction_outputs=tx_outputs,\n transaction_id=transaction_id\n )\n\n response = tx.to_dict()\n return jsonify(response), 200",
"def sign(self, payload):\n raise NotImplementedError",
"def sign_trx(self, signture):\n self.trx_signature = signture",
"def signSign(self):\r\n if \"signature\" in self: # or \"signatures\" in self ?\r\n self.pop(\"id\", False)\r\n try:\r\n self[\"signSignature\"] = dposlib.core.crypto.getSignature(\r\n self, self._secondPrivateKey,\r\n exclude_second_sig=True,\r\n )\r\n except AttributeError:\r\n raise Exception(\"no second private Key available\")\r\n else:\r\n raise Exception(\"transaction not signed\")"
] | [
"0.6919308",
"0.6778266",
"0.6380237",
"0.627283",
"0.61029476",
"0.6083491",
"0.5987487",
"0.58167666",
"0.57365465",
"0.5712428",
"0.56981504",
"0.56660604",
"0.56095326",
"0.55846405",
"0.55624753",
"0.5552044",
"0.54918426",
"0.5468147",
"0.54531074",
"0.54221904",
"0.5362187",
"0.534654",
"0.53127396",
"0.5284009",
"0.52756524",
"0.527408",
"0.5249939",
"0.5228976",
"0.5205368",
"0.5204117"
] | 0.77226585 | 0 |
Pick section of signal | def pick_section(signal, section=None):
len_noise = signal.shape[-1]
if section is None:
len_sig = len_noise
ii = 0
elif isinstance(section, int):
len_sig = section
ii = np.random.randint(0, len_noise - len_sig)
else:
len_sig = np.asarray(section).shape[-1]
ii = np.random.randint(0, len_noise - len_sig)
return signal[..., ii:ii + len_sig] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def phasing_signal(self, phasing, r, c):\r\n def signal():\r\n value = phasing.currentIndex()\r\n if value >= 0 and value <= 2:\r\n globals.sections[r, c] = value\r\n return signal",
"def onPick(self, event):\n\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n isShift = modifiers == QtCore.Qt.ShiftModifier\n\n logger.info(f'isShift:{isShift}')\n line = event.artist\n\n # filter out clicks on 'Annotation' used by mplcursors\n try:\n # when Scatter, line is 'PathCollection', a list of (x,y)\n offsets = line.get_offsets()\n except (AttributeError) as e:\n return\n\n ind = event.ind # ind is a list []\n if len(ind)==0:\n return\n ind = ind[0]\n\n # ind is the ith element in (x,y) list of offsets\n # ind 10 (0 based) is index 11 (1 based) in table list\n logger.info(f' selected from plot ind:{ind}, offsets values are {offsets[ind]}')\n selectDict = self.getAnnotation(ind)\n\n # to do, just put copy of state dict ???\n selectDict['plotType'] = self.stateDict['plotType']\n selectDict['dataType'] = self.stateDict['dataType']\n\n selectDict['isShift'] = isShift\n\n #\n # emit\n logger.info(f' -->> signalSelectFromPlot.emit()')\n for _k, _v in selectDict.items():\n logger.info(f' {_k}: {_v}')\n self.signalSelectFromPlot.emit(selectDict)",
"def selection_fn(self, trace, points, selector):\n self.segment = self.fig.layout[\"sliders\"][0].active\n seg = self.segment\n\n xrange = selector.xrange\n wave = self.wave[seg]\n mask = self.mask[seg]\n\n # Choose pixels and value depending on selected type\n if self.mask_type == \"good\":\n value = 1\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask == 0)\n elif self.mask_type == \"bad\":\n value = 0\n idx = (wave > xrange[0]) & (wave < xrange[1])\n elif self.mask_type == \"line\":\n value = 1\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask != 0)\n print(np.count_nonzero(idx))\n elif self.mask_type == \"cont\":\n value = 2\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask == 1)\n else:\n return\n\n # Apply changes if any\n if np.count_nonzero(idx) != 0:\n self.mask[seg][idx] = value\n\n with self.fig.batch_update():\n # Update Line Mask\n m = self.line_mask_idx[seg]\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 1\n )\n self.fig.data[m].x = x\n self.fig.data[m].y = y\n\n # Update Cont Mask\n m = self.cont_mask_idx[seg]\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 2\n )\n self.fig.data[m].x = x\n self.fig.data[m].y = y",
"def touching_choice(self,p):\n\n part = ['head', 'foot1', 'foot2', 'foot3', 'foot4', 'back', 'stomach', 'tail']\n if len(self.select[p]) == 0:\n return random.sample(part,2)\n elif len(self.select[p]) == 1:\n part.remove(self.select[p][0])\n c = random.sample(part,1)\n return [self.select[p][0], c[0]]\n else:\n return random.sample(self.select[p],2)",
"def slicewhere(condition):\n regions = ndimage.find_objects(ndimage.label(condition)[0])\n return [region[0] for region in regions]",
"def get_sample_mask(self):",
"def segment(data):",
"def _pickFull(self, context):\n rayObject = context.getPickingSegment(frame=self._getScenePrimitive())\n if rayObject is None:\n return None\n rayObject = rayObject[:, :3]\n\n data = self.getData(copy=False)\n bins = utils.segmentVolumeIntersect(\n rayObject, numpy.array(data.shape) - 1)\n if bins is None:\n return None\n\n # gather bin data\n offsets = [(i, j, k) for i in (0, 1) for j in (0, 1) for k in (0, 1)]\n indices = bins[:, numpy.newaxis, :] + offsets\n binsData = data[indices[:, :, 0], indices[:, :, 1], indices[:, :, 2]]\n # binsData.shape = nbins, 8\n # TODO up-to this point everything can be done once for all isosurfaces\n\n # check bin candidates\n level = self.getLevel()\n mask = numpy.logical_and(numpy.nanmin(binsData, axis=1) <= level,\n level <= numpy.nanmax(binsData, axis=1))\n bins = bins[mask]\n binsData = binsData[mask]\n\n if len(bins) == 0:\n return None # No bin candidate\n\n # do picking on candidates\n intersections = []\n depths = []\n for currentBin, data in zip(bins, binsData):\n mc = MarchingCubes(data.reshape(2, 2, 2), isolevel=level)\n points = mc.get_vertices() + currentBin\n triangles = points[mc.get_indices()]\n t = glu.segmentTrianglesIntersection(rayObject, triangles)[1]\n t = numpy.unique(t) # Duplicates happen on triangle edges\n if len(t) != 0:\n # Compute intersection points and get closest data point\n points = t.reshape(-1, 1) * (rayObject[1] - rayObject[0]) + rayObject[0]\n # Get closest data points by rounding to int\n intersections.extend(points)\n depths.extend(t)\n\n if len(intersections) == 0:\n return None # No intersected triangles\n\n intersections = numpy.array(intersections)[numpy.argsort(depths)]\n indices = numpy.transpose(numpy.round(intersections).astype(numpy.int64))\n return PickingResult(self, positions=intersections, indices=indices)",
"def selectData(self, rubberBandRect, fromScenePoint, toScenePoint):\n if fromScenePoint == toScenePoint:\n return\n\n if QApplication.keyboardModifiers() != Qt.ShiftModifier and QApplication.keyboardModifiers() != Qt.ControlModifier:\n # unselect all currently selected items\n for h in self.highlightedItems:\n h.highlighted = False\n self.highlightedItems.clear()\n self.highlightedRings.clear()\n\n sel = self.items(rubberBandRect)\n for s in sel:\n if type(s) == PlotLine:\n parent = s.parentItem()\n siblings = parent.childItems()\n\n if QApplication.keyboardModifiers() == Qt.ControlModifier:\n for sib in siblings:\n if sib in self.highlightedItems:\n sib.highlighted = False\n self.highlightedItems.remove(sib)\n if parent in self.highlightedRings:\n self.highlightedRings.remove(parent)\n else:\n for sib in siblings:\n sib.highlighted = True\n self.highlightedItems.add(sib)\n self.highlightedRings.add(parent)\n\n self.__selectionUpdateTimer.start(self.selectionUpdateDelay)",
"def take(self, condition):\n full_data = self._data.copy()\n series_data = full_data['@1'].copy()\n slicer, _ = get_logic_index(series_data, condition, full_data)\n return slicer",
"def pick(self, inv, pl, group):\r\n if self.rect.colliderect(pl):\r\n group.remove(self)\r\n if inv.count('key') == 0:\r\n inv += ['key']\r\n music_acceptor.activatedPortalSound()",
"def pick(self,i):\n x_i = self.all[i,:]\n return x_i",
"def cbSelectSignal( BoardNum, Direction, Signal, Connection, Polarity ):\n CHK( cbw.cbSelectSignal( BoardNum, Direction, Signal, Connection, Polarity ) )",
"def pick_signals(processor, source = 'input'):\n\n if source == 'input':\n bin_edges = processor.input_parameters['bin_edges']\n raw_signal = processor.input_signal\n elif source == 'output':\n bin_edges = processor.output_parameters['bin_edges']\n raw_signal = processor.output_signal\n else:\n raise ValueError('Unknown value for the data source')\n t = np.zeros(len(raw_signal)*4)\n bins = np.zeros(len(raw_signal)*4)\n signal = np.zeros(len(raw_signal)*4)\n value = 1.\n\n for i, edges in enumerate(bin_edges):\n t[4*i] = edges[0]\n t[4*i+1] = edges[0]\n t[4*i+2] = edges[1]\n t[4*i+3] = edges[1]\n bins[4*i] = 0.\n bins[4*i+1] = value\n bins[4*i+2] = value\n bins[4*i+3] = 0.\n signal[4*i] = 0.\n signal[4*i+1] = raw_signal[i]\n signal[4*i+2] = raw_signal[i]\n signal[4*i+3] = 0.\n value *= -1\n\n z = t * c\n return (t, z, bins, signal)",
"def touching_choice(self,p):\n choose = random.sample(part,2)\n\n return choose",
"def randselwave(sample, minlen=0, maxlen=None, nosilence=True):\n if nosilence:\n sig = rmsilence(sample)\n else:\n sig = sample.signal\n\n sigsize = len(sig)\n minoffset = int(minlen * sample.samplerate)\n maxoffset = min(int(maxlen*sample.samplerate),\n sigsize) if maxlen else sigsize\n\n assert (minoffset < maxoffset) and (minoffset <= sigsize), \\\n f\"\"\"BAD: siglen={sigsize}, minlen={minoffset}, maxlen={maxoffset}\"\"\"\n\n # Select begin sample\n ns = randrange(max(1, sigsize-minoffset))\n ne = randrange(ns+minoffset, min(ns+maxoffset, sigsize+1))\n\n return sig[ns:ne]",
"def on_select_clip_slot(self, clip_slot):\n pass",
"def onpick(cls, event):\n event_len = len(event.ind)\n if not event_len:\n return True\n value = event.ind[-1] + FigureControl.minPossibleGenNumber\n vis_now = FigureControl.isVisible(value)\n FigureControl.makeGenVisible(value, not vis_now, \"dist\")",
"def randselphon(sample, phonfunc=None):\n (ns, ne), ph = sample.phonemeseq[randrange(len(sample.phonemeseq))]\n if phonfunc is not None:\n while not phonfunc(ph):\n (ns, ne), ph = sample.phonemeseq[randrange(len(sample.phonemeseq))]\n\n return sample.signal[ns:ne], ph",
"def _on_pick(self, event):\n pix_id = event.ind[-1]\n xx, yy, aa = u.Quantity(self.geom.pix_x[pix_id]).value, \\\n u.Quantity(self.geom.pix_y[pix_id]).value, \\\n u.Quantity(np.array(self.geom.pix_area)[pix_id])\n if self.geom.pix_type.startswith(\"hex\"):\n self._active_pixel.xy = (xx, yy)\n else:\n rr = sqrt(aa)\n self._active_pixel.xy = (xx - rr / 2., yy - rr / 2.)\n self._active_pixel.set_visible(True)\n self._active_pixel_label.set_x(xx)\n self._active_pixel_label.set_y(yy)\n self._active_pixel_label.set_text(f\"{pix_id:003d}\")\n self._active_pixel_label.set_visible(True)\n self._update()\n self.on_pixel_clicked(pix_id) # call user-function",
"def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling",
"def sample(self, seg_logit, seg_label):",
"def _pickFull(self, context):\n rayObject = context.getPickingSegment(frame=self._getScenePrimitive())\n if rayObject is None:\n return None\n\n points = utils.segmentPlaneIntersect(\n rayObject[0, :3],\n rayObject[1, :3],\n planeNorm=self.getNormal(),\n planePt=self.getPoint())\n\n if len(points) == 1: # Single intersection\n if numpy.any(points[0] < 0.):\n return None # Outside volume\n z, y, x = int(points[0][2]), int(points[0][1]), int(points[0][0])\n\n data = self.getData(copy=False)\n if data is None:\n return None # No dataset\n\n depth, height, width = data.shape\n if z < depth and y < height and x < width:\n return PickingResult(self,\n positions=[points[0]],\n indices=([z], [y], [x]))\n else:\n return None # Outside image\n else: # Either no intersection or segment and image are coplanar\n return None",
"def getSplitDetectorSignal(self):\r\n\t\treturn self.splitData",
"def selectregion(self, group=None):\n points = pylab.ginput(n=2, timeout=0)\n bounds = [int(point[not self.waveaxis]) for point in points]\n bounds = self._validateregion(bounds)\n try:\n self.regions.append({'min': bounds[0], 'max': bounds[1],\n 'group': group})\n except TypeError:\n pass",
"def nearest_test_pulse(self):",
"def RecursiveLowPassFast(signal, coeff, self):\n # Creates running mean value of the input\n ml = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], signal) \n # Plot Running threshold value at the current plot\n self.p1.plot(self.t, ml, pen=pg.mkPen(color=(246, 178, 255), width=3))\n\n # Creates running square deviation from the mean\n vl = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], np.square(signal - ml))\n # Creates \"threshold line\". If current value < sl[i] -> i belongs to event. \n sl = ml - coeff['S'] * np.sqrt(vl)\n self.p1.plot(self.t, sl, pen=pg.mkPen(color=(173, 27, 183), width=3))\n # Finds the length of the initial signal\n Ni = len(signal)\n # Finds those points where signal less than \"threshold line\"\n points = np.array(np.where(signal<=sl)[0])\n to_pop=np.array([]) # Empty supplementary array for finding adjacent points \n # For loop for finding adjacent points \n for i in range(1,len(points)):\n if points[i] - points[i - 1] == 1:\n to_pop=np.append(to_pop, i)\n # Points contain only border points of events\n points = np.delete(points, to_pop)\n # Empty list for Event location storage\n RoughEventLocations = []\n NumberOfEvents=0 #Number of events\n\n # For Loop for finding separating edges of different events and satisfying Event length limits\n for i in points:\n if NumberOfEvents is not 0:\n if i >= RoughEventLocations[NumberOfEvents-1][0] and i <= RoughEventLocations[NumberOfEvents-1][1]:\n continue\n NumberOfEvents += 1\n start = i\n El = ml[i] - coeff['E'] * np.sqrt(vl[i])\n Mm = ml[i]\n Vv = vl[i]\n duration = 0\n while signal[i + 1] < El and i < (Ni - 2) and duration < coeff['eventlengthLimit']:\n duration += 1\n i += 1\n if duration >= coeff['eventlengthLimit'] or i > (Ni - 10):\n NumberOfEvents -= 1\n else:\n k = start\n while signal[k] < Mm and k > 1:\n k -= 1\n start = k - 1\n k2 = i + 1\n while signal[k2] > Mm:\n k2 -= 1\n endp = k2\n if start<0:\n start=0\n RoughEventLocations.append((start, endp, ml[start], vl[start]))\n\n return np.array(RoughEventLocations)",
"def slot_selectPoint(self, selectionDict):\n\t\tprint('bStackWidget.slot_selectPoint() selectionDict:', selectionDict)\n\t\tif selectionDict is None:\n\t\t\treturn\n\t\tif selectionDict['name'] == 'toggle rect roi':\n\t\t\treturn\n\t\ttype = selectionDict['type']\n\t\tidx = selectionDict['idx']\n\t\tif type == 'Nodes':\n\t\t\tnodeIdx = idx\n\t\t\tself.myStackView2.selectNode(nodeIdx, snapz=True, isShift=False, doEmit=True)\n\t\telif type == 'Edges':\n\t\t\tedgeIdx = idx\n\t\t\tself.myStackView2.selectEdge(edgeIdx, snapz=True, isShift=False, doEmit=True)",
"def selectPointsUnderCursor(self):\n #spw = self.spw\n #sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids == None:\n return\n #t0 = time.time()\n #if not sw.panel.maxed_out:\n # spw.SelectSpikes(sids, on=self.selecting)\n #else:\n # # for speed, while the mouse is held down and the sort panel is maxed out,\n # # don't call SelectSpikes, only call it once when the mouse is released\n self.collected_sids.append(sids)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()",
"def selected(self, point):\n local_point = (point[0] - self.x, point[1] - self.y)\n self.remove(self.slide.rect)\n self.slide.update(local_point)\n self.insert(1, self.slide.rect)\n self.slide.rect.fill = self.slide_color\n self.title.text = f\"{self.name}:{int(self.slide.value)}\""
] | [
"0.6373907",
"0.6050952",
"0.5974559",
"0.54288924",
"0.54075724",
"0.5390018",
"0.5377656",
"0.5331255",
"0.5308175",
"0.5240958",
"0.5233869",
"0.51971555",
"0.5189088",
"0.5158308",
"0.5147491",
"0.5136177",
"0.51356614",
"0.50236404",
"0.49819353",
"0.49715742",
"0.4927835",
"0.49169132",
"0.49138194",
"0.4899514",
"0.48878258",
"0.48869595",
"0.48764083",
"0.48672202",
"0.48371017",
"0.48254293"
] | 0.6796413 | 0 |
Calculate the average level across all sentences. The levels are calculated according to the toolbox's reference level. Returns | def average_level(self):
spl = [utils.dbspl(x) for x in self.load_files()]
return np.mean(spl), np.std(spl) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rouge_l_sentence_level(eval_sentences, ref_sentences):\n\n f1_scores = []\n for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):\n m = float(len(ref_sentence))\n n = float(len(eval_sentence))\n lcs = _len_lcs(eval_sentence, ref_sentence)\n f1_scores.append(_f_lcs(lcs, m, n))\n return np.mean(f1_scores, dtype=np.float32)",
"def _averageOfLevels(self, root):\n level = [ root ]\n averages = []\n while len(level) != 0:\n averages.append(float(sum(l.val for l in level)) / len(level))\n level = [kid for node in level for kid in (node.left, node.right) if kid]\n return averages",
"def showAverageBetUsed(self) :\n averageBetUsed = 0\n for level in self.level_history :\n averageBetUsed += level.bet\n averageBetUsed = averageBetUsed/len(self.level_history)\n Scenario.messageGetAverageBetUsed(averageBetUsed)",
"def showAverageGainWon(self) :\n averageGainWon = 0\n for level in self.level_history :\n averageGainWon += level.profit\n averageGainWon = averageGainWon/len(self.level_history)\n Scenario.messageGetAverageGainWon(averageGainWon)",
"def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average",
"def average_score(self, sentenceValue):\r\n sumValues = 0\r\n for entry in sentenceValue:\r\n sumValues += sentenceValue[entry]\r\n\r\n # Average value of a sentence from original summary_text\r\n average = (sumValues / len(sentenceValue))\r\n\r\n return average",
"def get_avg_sentence_length(self):\n sentences = self.blob.sentences\n average_sentence_length = np.mean(np.array([len(sentence.words) for sentence in sentences]))\n return average_sentence_length",
"def _eed_compute(sentence_level_scores: List[Tensor]) ->Tensor:\n if len(sentence_level_scores) == 0:\n return tensor(0.0)\n average = sum(sentence_level_scores) / tensor(len(sentence_level_scores))\n return average",
"def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average",
"def average_score(sentence_scores):\r\n sumValues = 0\r\n for score in sentence_scores:\r\n sumValues += sentence_scores[score]\r\n\r\n # Average value of a sentence from original text\r\n average = (sumValues / len(sentence_scores))\r\n\r\n return average",
"def calculate_avg_cholesterol(self):\n total = 0\n no_of_valid_patients = 0\n for patient in self._patient_list:\n try:\n total += patient.get_cholesterol_data()[0]\n no_of_valid_patients += 1\n except AttributeError:\n continue\n except TypeError:\n continue\n if no_of_valid_patients == 0:\n return 0\n average = total/no_of_valid_patients\n self.average_cholesterol_level = average\n return average",
"def level_time_average(start_levels, attack_style, attack_bonus, strength_bonus):\n ticks_per_attack = 4 # Scimitar attack speed\n max_hit, accuracy = get_max_hit_and_accuracy(\n start_levels, attack_style, attack_bonus, strength_bonus)\n \n if attack_style == Attack_Style.ATTACK:\n start_exp = osrs.experience[start_levels.attack]\n end_exp = osrs.experience[start_levels.attack+1]\n elif attack_style == Attack_Style.STRENGTH:\n start_exp = osrs.experience[start_levels.strength]\n end_exp = osrs.experience[start_levels.strength+1]\n \n experience = end_exp - start_exp\n avg_hit = accuracy * max_hit / 2\n exp_per_hit = avg_hit * osrs.BASE_EXP_PER_DAMAGE\n ticks = experience / exp_per_hit * ticks_per_attack\n return ticks",
"def averages():\r\n totalsubs = 0\r\n for sub in subs:\r\n totalsubs += sub\r\n avgsubs = totalsubs / len(subs)\r\n\r\n totalsent = 0\r\n for sent in sentiments:\r\n totalsent += sent\r\n avgsent = totalsent / len(sentiments)\r\n print('The average subjectivity is: ' + str(avgsubs))\r\n print('The average sentiment is: ' + str(avgsent))",
"def avg_e_score(self, entity):\n return float(entity['es']) / float(entity['count'])",
"def rouge_l_summary_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n # total number of words in reference sentences\n m = len(_split_into_words(reference_sentences))\n\n # total number of words in evaluated sentences\n n = len(_split_into_words(evaluated_sentences))\n\n union_lcs_sum_across_all_references = 0\n for ref_s in reference_sentences:\n union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,\n ref_s)\n return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)",
"def rouge_l_summary_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n # total number of words in reference sentences\n m = len(_split_into_words(reference_sentences))\n\n # total number of words in evaluated sentences\n n = len(_split_into_words(evaluated_sentences))\n\n union_lcs_sum_across_all_references = 0\n for ref_s in reference_sentences:\n union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,\n ref_s)\n return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)",
"def showAverageNbAttemptsByLevels(self) :\n level_current = 1\n while level_current <= len(self.list_level) :\n self.showAverageNbAttemptsByLevel(level_current)\n level_current += 1",
"def rouge_l_sentence_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n reference_words = _split_into_words(reference_sentences)\n evaluated_words = _split_into_words(evaluated_sentences)\n m = len(reference_words)\n n = len(evaluated_words)\n lcs = _len_lcs(evaluated_words, reference_words)\n return _f_p_r_lcs(lcs, m, n)",
"def accuracy_text_level(dataset, beta):\r\n X, gs, gl = dataset_as_arrays(dataset)\r\n \r\n #value of the sigmoid function for all embeddings in X, given beta \r\n sentence_predictions = ghf.sigmoid_over_array(X, beta)\r\n \r\n text_predictions = []\r\n for index in range(gs.shape[0]):\r\n frm = numpy.sum(gl[0:index])\r\n to = frm + gl[index]\r\n text_prediction = numpy.average(sentence_predictions[frm:to])\r\n text_predictions.append(0 if text_prediction < 0.5 else 1)\r\n \r\n prediction_right = 0\r\n for index, prediction in enumerate(text_predictions): \r\n if prediction == gs[index]:\r\n prediction_right += 1\r\n \r\n return prediction_right/gs.shape[0]",
"def avg_text(mukey, layers):\n #read appropriate soils.in content to a python list\n mukey = str(mukey)\n soil_path = \"/data/paustian/ernie/SSURGO_master_script/soil_test2/\"\n soil_fpath = soil_path+mukey[:-3]+\"/\"+mukey+\".in\"\n cont = [[]]\n data_input = open(soil_fpath, 'r')\n for line in data_input:\n cont.append(line.split())\n del cont[0]\n\n #convert all entries in the 2D list to float format where possible, or zero in the case\n #of very small numbers recorded in scientific notation\n for k in range(len(cont)):\n for l in range(len(cont[k])):\n cont[k][l] = float(cont[k][l])\n\n #loop through list and compute the depth-weighted fraction of each texture component\n sand_tot = 0\n silt_tot = 0\n clay_tot = 0\n for i in range(len(cont)):\n if i+1 <= layers:\n depth = float(cont[i][1]) - float(cont[i][0])\n sand = float(cont[i][7])\n clay = float(cont[i][8])\n silt = round(1-sand-clay, 2)\n sand_tot += sand * depth\n silt_tot += silt * depth\n clay_tot += clay * depth\n final_depth = float(cont[i][1])\n\n if layers > len(cont):\n print \"NOTE: specified layer limit exceeds number of layers found in soils.in file\"\n\n # normalize by total depth\n sand_avg = sand_tot/final_depth\n silt_avg = silt_tot/final_depth\n clay_avg = clay_tot/final_depth\n\n return sand_avg, silt_avg, clay_avg, final_depth",
"def rouge_l_sentence_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n reference_words = _split_into_words(reference_sentences)\n evaluated_words = _split_into_words(evaluated_sentences)\n m = len(reference_words)\n n = len(evaluated_words)\n lcs = _len_lcs(evaluated_words, reference_words)\n return _f_p_r_lcs(lcs, m, n)",
"def get_average_sentiment(self, list_sentiments):\n average_polarity = 0\n for sentiment in list_sentiments: \n polarity = sentiment[1]\n average_polarity += polarity \n average_polarity /= len(list_sentiments)\n return average_polarity",
"def find_shrunken_averages(tuple_input):\n #The categorical level.\n level = tuple_input[0]\n # The labels list (y varaibale) from a map function.\n labels = tuple_input[1]\n # The total number of level occurances in the frame (ie count)\n level_n = len(labels)\n level_mean = sum(labels) / level_n\n\n # Determine if there enough occurances of a level. If NOT return overall_mean\n if level_n >= threshold:\n return(level,level_mean)\n else:\n return(level, ((1 - lambda_) * level_mean) +\\\n (lambda_ * overall_mean) )",
"def getLevels():",
"def accuracy_sentence_level(dataset, beta, d2v_model):\r\n \r\n prediction_right = 0\r\n \r\n for row in dataset:\r\n sentence_vec = numpy.reshape(d2v_model.infer_vector(row[0].split(), alpha=0.1, steps=20), newshape=(200,-1))\r\n prediction = (1 / (1 + numpy.exp(-numpy.dot(numpy.transpose(sentence_vec), beta))))\r\n prediction = 1 if prediction>0.5 else 0\r\n if int(row[1]) == prediction:\r\n prediction_right += 1\r\n \r\n return prediction_right / len(dataset)",
"def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm",
"def score(self, sentence):\n # count each incremented word\n for word in sentence:\n if word not in self.unigramCounts:\n self.zeroCount += 1\n\n # apply laplace smoothing to unigram model\n score = 0.0\n for word in sentence:\n count = self.unigramCounts[word]\n score += math.log(count + 1)\n score -= math.log(self.totalCount + self.zeroCount)\n return score",
"def leafScore(self) :\n return 0",
"def score(self, sentence):\n s = 0;\n\n #for every word\n for i in xrange(len(sentence)):\n score = self.getBackOff(tuple(sentence[:i+1]));\n if(score != 0):\n s += math.log(score);\n\n return s",
"def score(self, sentence):\n # TODO your code here\n score = 0.0\n for i,token in enumerate(sentence[1:]):\n prev = sentence[i]\n current = token\n freq = self.vocab[current][prev] + self.epsilon\n\n score += math.log(freq)\n score -= math.log(self.word_counts[prev] + self.epsilon * self.v)\n return score"
] | [
"0.69772524",
"0.61759144",
"0.61583227",
"0.61080706",
"0.6031079",
"0.59640235",
"0.57921195",
"0.5750104",
"0.57479537",
"0.5734042",
"0.5672754",
"0.5670753",
"0.566068",
"0.5644736",
"0.5632639",
"0.5631451",
"0.5594872",
"0.559045",
"0.5566701",
"0.55663264",
"0.5546832",
"0.5533839",
"0.55271715",
"0.54641473",
"0.5463559",
"0.54608595",
"0.538545",
"0.5376117",
"0.5370085",
"0.53462446"
] | 0.6812981 | 1 |
Instantiate a new TypeDefer | def __init__(self, raw_defer: Dict):
self.kind = raw_defer.get("kind")
self.name = raw_defer.get("name")
self.of_type: TypeDefer = TypeDefer(raw_defer.get("ofType")) if raw_defer.get("ofType") is not None else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def instantiate():\n d = defer.Deferred()",
"def __init__(self, type_):\n\n self.type = type_",
"def Instance(self) -> TypeManager:",
"def __call__(self, *args):\n return TypeCall(self, args)",
"def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attribs'] = [x for x in attrs]\n t = type(name, (type_,), d)\n t = attr.s(t, repr_ns=type_.__name__, **kwargs)\n return t",
"def deferredInit(deferredName):\n def _deferredInit(func):\n def __deferredInit(self, *args, **kwargs):\n initDeferred = None\n if(hasattr(self, deferredName)):\n initDeferred = getattr(self, deferredName)\n if(initDeferred.called):\n return defer.maybeDeferred(func, self, *args, **kwargs)\n else:\n raise RuntimeError(\"%s doesn't define the Deferred attribute `%s`.\" % (self.__class__.__name__, deferredName))\n \n def _finish(result):\n return func(self, *args, **kwargs)\n \n def _finish_error(failure):\n print '_finish_err: %s' % failure\n \n resultDeferred = defer.Deferred()\n resultDeferred.addCallbacks(_finish, _finish_error)\n \n initDeferred.chainDeferred(resultDeferred)\n \n return resultDeferred\n return __deferredInit\n \n # if it's a callable, that means there's no arguments\n # so we use the defaultname for the instance's deferred\n if(callable(deferredName)):\n func = deferredName\n deferredName = 'initDeferred'\n return _deferredInit(func)\n \n return _deferredInit",
"def __constructor__(self):\n return type(self)",
"def __init__(self, type=np.float64):\n self._inst = None\n self._type = type",
"def type_instance(typedef):\n if subclassof(typedef, Type):\n # Type class passed, create no-arg instance\n typedef = typedef()\n return typedef",
"def observe(self) -> \"defer.Deferred[_T]\":\n ...",
"def register(dmm, typecls):\n def wraps(fn):\n dmm.register(typecls, fn)\n return fn\n\n return wraps",
"def __init__(self):\n self.instantiable = {self: self}\n self.is_generic = False",
"def __init__(self, aType):\n if not isinstance(aType, TypeType):\n aType = type(aType)\n self.aType = aType\n self.fast_validate = (12, aType)",
"def get_declaration(self, type_):\n return self.__apply_sequence(type_)",
"def __init__(self, field: FT):\n self.field: Final[FT] = field",
"def Reference(cls):\n return type(cls.__name__, (Typed, ), {\"type\": cls})",
"def define(**names):\n module = initialize(2)\n __deferred_definitions__ = module.__deferred_definitions__\n for name, specifier in names.items():\n __deferred_definitions__[name] = Deferred(name, specifier)",
"def defer(self, *args, **kwargs):\n return DeferredRoutineCall(self, *args, **kwargs)",
"def __init__(self, data_type=None):\n self.type = data_type",
"def _instantiate(cls, **kwargs):\n return cls(**kwargs)",
"def __init__(self, type, name, verbose):\n self._name = name\n self._verbose = verbose\n self.__fdev_id = _cantera.flowdev_new(type)",
"def factory(type_or_name: str | type, singleton: bool = False) -> Callable[[T], T]:\n\n def _decorator(original: T) -> T:\n setattr(original, 'factory_provider', (type_or_name, singleton))\n return original\n\n return _decorator",
"def __init_subclass__(cls, type_: CalibrationTargetType):\n cls._types[type_] = cls",
"def __init__(\n self,\n type_: Type[T],\n *,\n type_is_generic_self: bool = False,\n coerce: bool = False,\n compcoef: Optional[float] = None,\n inheritable: bool = True,\n simpledelta: bool = True,\n merge_fn: MergeFunction = default_field_merge,\n ephemeral: bool = False,\n weak_ref: bool = False,\n allow_ddl_set: bool = False,\n describe_visibility: DescribeVisibilityPolicy = (\n DescribeVisibilityPolicy.SHOW_IF_EXPLICIT),\n ddl_identity: bool = False,\n aux_cmd_data: bool = False,\n special_ddl_syntax: bool = False,\n reflection_method: ReflectionMethod = ReflectionMethod.REGULAR,\n reflection_proxy: Optional[Tuple[str, str]] = None,\n name: Optional[str] = None,\n reflection_name: Optional[str] = None,\n patch_level: int = -1,\n **kwargs: Any,\n ) -> None:\n if not isinstance(type_, type):\n raise ValueError(f'{type_!r} is not a type')\n\n self.type = type_\n self.type_is_generic_self = type_is_generic_self\n self.coerce = coerce\n self.allow_ddl_set = allow_ddl_set\n self.ddl_identity = ddl_identity\n self.aux_cmd_data = aux_cmd_data\n self.special_ddl_syntax = special_ddl_syntax\n self.describe_visibility = describe_visibility\n\n self.compcoef = compcoef\n self.inheritable = inheritable\n self.simpledelta = simpledelta\n self.weak_ref = weak_ref\n self.reflection_method = reflection_method\n self.reflection_proxy = reflection_proxy\n self.is_reducible = issubclass(type_, s_abc.Reducible)\n self.patch_level = patch_level\n\n if name is not None:\n self.name = name\n if reflection_name is not None:\n self.sname = reflection_name\n\n if (\n merge_fn is default_field_merge\n and callable(\n type_merge_fn := getattr(self.type, 'merge_values', None)\n )\n ):\n self.merge_fn = type_merge_fn\n else:\n self.merge_fn = merge_fn\n\n self.ephemeral = ephemeral",
"def __init__(self, name: str, python_type: type):\n self.name = name\n self.python_type = python_type",
"def wrapped_unit(cls) -> MyType:\n MyType.clear_interning_cache()\n return MyType.decorate(MyUnit)",
"def __init__(self, line, context):\n match = Ftype_type_decl.type_match(line)\n if match is None:\n raise ParseSyntaxError(\"type declaration\", token=line, context=context)\n else:\n self._match_len = len(match.group(0))\n self._class = match.group(1)\n self._typestr = match.group(2)\n self._kind = self.typestr\n # End if",
"def type(cls):",
"def fountToDeferred(fount):\n d = Deferred(fount.stopFlow)\n fount.flowTo(_DeferredAggregatingDrain(d))\n return d",
"def __init__(self, aType):\n if not isinstance(aType, TypeType):\n aType = type(aType)\n self.aType = aType\n try:\n self.fast_validate = CoercableTypes[aType]\n except:\n self.fast_validate = (11, aType)"
] | [
"0.64138234",
"0.54614604",
"0.54489744",
"0.54139173",
"0.5383102",
"0.5300838",
"0.52134734",
"0.5191931",
"0.5120031",
"0.50648534",
"0.49831468",
"0.49792293",
"0.49551898",
"0.49469346",
"0.49264267",
"0.49214688",
"0.48970965",
"0.48778772",
"0.48720497",
"0.48700586",
"0.4867235",
"0.48640287",
"0.48631537",
"0.48529202",
"0.4852579",
"0.48455602",
"0.48381415",
"0.4833245",
"0.4824423",
"0.48197848"
] | 0.7597686 | 0 |
Create a new Schema instance. Firstly the schema will be loaded synchronously from the endpoint and stored as raw json for further processing. Then the request types will be parsed. Those are "Query", "Mutation" and "Subscription". After that the schema types and directives are parsed. | def __init__(self, endpoint: str, transporter: Transporter, settings: Settings, cache: Optional[Cache]):
self.endpoint = endpoint
self.transport = transporter
self.settings = settings
self.cache = cache
if self.cache is not None:
schema_introspection = self.cache.retrieve(self.endpoint, SCHEMA_KEY)
if schema_introspection is None:
schema_introspection = self.introspect_schema(endpoint, transporter)
self.cache.store(self.endpoint, SCHEMA_KEY, schema_introspection)
else:
schema_introspection = self.introspect_schema(endpoint, transporter)
# graphql schema properties
self.raw_schema = schema_introspection.get(self.settings.default_response_key, {}).get("__schema", {})
self.query_type: str = self.parse_query_type(self.raw_schema)
self.mutation_type: str = self.parse_mutation_type(self.raw_schema)
self.subscription_type: str = self.parse_subscription_type(self.raw_schema)
self.types: Dict[str, SchemaType] = self.parse_types(self.raw_schema.get("types", []))
self.directives: Dict[str, Directive] = self.parse_directives(self.raw_schema.get("directives", []))
# custom schema properties
self.queries: Tuple[Operation] = self.parse_operations(self.query_type)
self.mutations: Tuple[Operation] = self.parse_operations(self.mutation_type)
self.subscriptions: Tuple[Operation] = self.parse_operations(self.subscription_type) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get(\"inputFields\") or [] if i]\n self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get(\"interfaces\") or [] if i]\n self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get(\"enumValues\") or [] if e]\n self.possible_types = raw_type.get(\"possibleTypes\")",
"def _CreateSchemas(self) -> None:\n self.schema_objs = dict() # Holds OpenAPI representations of types.\n\n # Add the OpenAPI schemas of protobuf primitive types.\n primitive_type_schemas = {\n primitive_type[\"name\"]: primitive_type[\"schema\"]\n for primitive_type in primitive_types.values()\n }\n self.schema_objs.update(\n cast(Dict[str, Dict[str, str]], primitive_type_schemas))\n # Add the OpenAPI schemas of the statically described RDF types.\n self.schema_objs.update(rdf_type_schemas)\n\n # Holds state of type extraction (white/gray nodes).\n visiting: Set[str] = set()\n self._CreateRouterMethodSchemas(visiting)\n self._CreateFlowSchemas(visiting)",
"def _get_schema(self):\n\n schema = ProtocolSchema()\n\n schema.id = self.id\n schema.type = type(self).__name__\n\n for input_path in self.required_inputs:\n\n if not (input_path.start_protocol is None or (input_path.start_protocol == self.id and\n input_path.start_protocol == input_path.last_protocol)):\n\n continue\n\n # Always make sure to only pass a copy of the input. Changing the schema\n # should NOT change the protocol.\n schema.inputs[input_path.full_path] = copy.deepcopy(self.get_value(input_path))\n\n return schema",
"def introspect_schema(cls, endpoint: str, transport: Transporter) -> Dict:\n return request_schema(endpoint, transport.session)",
"async def get_schema(\n self, refresh: bool = False, headers: Optional[Dict[str, str]] = None\n ) -> graphql.GraphQLSchema:\n # TODO: consider adding ttl logic for expiring schemas for long running services\n if self._schema is None or refresh:\n self._schema = await self.introspect(headers=headers)\n return self._schema",
"def get_schema_cls() -> t.Any:\n return SignupRequestSchema",
"def fetch_schema(self) -> None:\n if self.schema_file:\n logger.info(\"Loaded schema from file '%s'\", self.schema_file)\n self._schema = load_schema_file(self.schema_file)\n else:\n url = self.schema_url or urljoin(self.base_url, \"schema/openapi.yaml\")\n logger.info(\"Fetching schema at '%s'\", url)\n self._schema = schema_fetcher.fetch(url, {\"v\": \"3\"})",
"def _get_schema(self):\n self._pick()\n return Schema()",
"def __init__(\n self,\n name,\n namespace,\n fields=None,\n make_fields=None,\n names=None,\n record_type=RECORD,\n doc=None,\n other_props=None\n ):\n if record_type == REQUEST:\n # Protocol requests are not named:\n super(RecordSchema, self).__init__(\n data_type=REQUEST,\n other_props=other_props,\n )\n elif record_type in [RECORD, ERROR]:\n # Register this record name in the tracker:\n super(RecordSchema, self).__init__(\n data_type=record_type,\n name=name,\n namespace=namespace,\n names=names,\n other_props=other_props,\n )\n else:\n raise SchemaParseException(\n 'Invalid record type: %r.' % record_type)\n\n if record_type in [RECORD, ERROR]:\n avro_name = names.get_name(name=name, namespace=namespace)\n nested_names = names.new_with_default_namespace(namespace=avro_name.namespace)\n elif record_type == REQUEST:\n # Protocol request has no name: no need to change default namespace:\n nested_names = names\n\n if fields is None:\n fields = make_fields(names=nested_names)\n else:\n assert make_fields is None\n self._fields = tuple(fields)\n\n self._field_map = RecordSchema._make_field_map(self._fields)\n\n self._props['fields'] = fields\n if doc is not None:\n self._props['doc'] = doc",
"def _CreateSchema(\n self,\n cls: Optional[TypeHinter],\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n if cls is None:\n raise ValueError(\"Trying to extract schema of None.\")\n\n if (inspect.isclass(cls) and issubclass(cls, rdf_structs.RDFProtoStruct)):\n cls = cls.protobuf.DESCRIPTOR\n\n type_name = _GetTypeName(cls)\n # \"Primitive\" types should be already present in `self.schema_objs`.\n if type_name in self.schema_objs:\n return\n\n if type_name in visiting:\n # Dependency cycle.\n return\n\n if isinstance(cls, FieldDescriptor):\n if _IsMapField(cls):\n self._CreateMapFieldSchema(cls, visiting)\n return\n\n descriptor = cls.message_type or cls.enum_type\n if descriptor:\n self._CreateSchema(descriptor, visiting)\n # else, this field is of a primitive type whose schema is already created.\n\n return\n\n if isinstance(cls, Descriptor):\n self._CreateMessageSchema(cls, visiting)\n return\n\n if isinstance(cls, EnumDescriptor):\n self._CreateEnumSchema(cls)\n return\n\n raise TypeError(f\"Don't know how to handle type \\\"{type_name}\\\" \"\n f\"which is not a protobuf message Descriptor, \"\n f\"nor an EnumDescriptor, nor a primitive type.\")",
"def __build_schema(meta_data):\n \n # Builds the dictionary that represents the schema.\n temporary_dictionary = {'$schema': None, '$id': None, 'title': None, 'type': None, 'properties': []}\n for x in meta_data:\n temporary_dictionary['properties'].append({\n 'name': x,\n 'type': None,\n 'description': None})\n # Creates a new instance of the schema and inserts the dictionary as a json into the field and returns it.\n returned_schema = Schema()\n returned_schema.data = json.dumps(temporary_dictionary)\n return returned_schema",
"def __init__(self, schema=None):\n self.schema = schema or {}",
"def create_schema():\n schema = Schema(idx=ID(stored=True),\n data=STORED,\n body=TEXT(analyzer=StemmingAnalyzer()),\n )\n print(\"schema creation successful\")\n return schema",
"def load_schema(self):\n\n schema = {\n \"type\": \"object\",\n \"properties\": {}\n }\n\n msd = self.parse_xml(self.schema_path)\n for concept in msd.findall('.//Concept'):\n concept_id = self.alter_key(concept.attrib['id'])\n self.add_item_to_field_order(concept_id)\n concept_name = concept.find('./Name').text\n concept_description = concept.find('./Description').text\n parent = concept.find('./Parent/Ref')\n key_parts = [concept_id, concept_id] if parent is None else [parent.attrib['id'], concept_id]\n translation_key = '.'.join(key_parts)\n jsonschema_field = {\n 'type': ['string', 'null'],\n 'title': concept_name,\n 'description': concept_description,\n 'translation_key': translation_key,\n }\n if self.scope is not None:\n jsonschema_field['scope'] = self.scope\n schema['properties'][concept_id] = jsonschema_field\n\n self.schema = schema",
"def schema(self):\n # NOTE This is exactly the same as the other thing.\n return {\n \"$id\": f\"{self.request.resource_url(self)}#schema\",\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n # generated fields shouldn't be submitted or in forms\n \"url\": {\"type\": \"string\", \"generated\": True},\n }\n }",
"def create_schema(self, schema: str):\n return",
"def __init__(self, endpoint: str, ws_endpoint: str = None, transporter=None, settings=None, cache=None):\n if not endpoint:\n raise ValueError(\"No Endpoint specified.\")\n self.endpoint = endpoint\n\n if not ws_endpoint:\n ws_endpoint = adapt_websocket_endpoint(endpoint)\n self.ws_endpoint = ws_endpoint\n\n self.transporter: Transporter = transporter or Transporter()\n self.settings: Settings = settings or Settings()\n self.cache: Optional[Cache] = cache\n\n self._query_services: Optional[QueryServiceProxy] = None\n self._mutation_services: Optional[MutationServiceProxy] = None\n self._subscription_services: Optional[SubscriptionServiceProxy] = None\n\n self.schema = Schema(self.endpoint, self.transporter, self.settings, self.cache)",
"def endpoint_schema(endpoint, extra_definitions={}):\n # load common schema template and update metadata\n schema = common.load_json(\"./templates/provider/endpoint.json\")\n schema[\"$id\"] = schema[\"$id\"].replace(\"endpoint.json\", f\"{endpoint}.json\")\n schema[\"title\"] = schema[\"title\"].replace(\"endpoint\", endpoint)\n\n # merge custom definitions with relevant common definitions\n definitions = common.load_definitions(\n \"string\",\n \"timestamp\",\n \"uuid\",\n \"version\",\n common.MDS_FEATURE_POINT\n )\n definitions.update(common.point_definition())\n definitions.update(extra_definitions)\n\n endpoint_schema = common.load_json(f\"./templates/provider/{endpoint}.json\")\n\n # for all but stops, merge standard vehicle info with items schema\n if endpoint not in [\"stops\"]:\n items = endpoint_schema[endpoint][\"items\"]\n vehicle = common.vehicle_definition()\n items[\"required\"] = vehicle[\"required\"] + items[\"required\"]\n items[\"properties\"] = { **vehicle[\"properties\"], **items[\"properties\"] }\n definitions.update(common.load_definitions(\"propulsion_type\", \"propulsion_types\", \"vehicle_type\"))\n\n # merge endpoint schema into the endpoint template\n data_schema = schema[\"properties\"][\"data\"]\n data_schema[\"required\"] = [endpoint]\n data_schema[\"properties\"] = endpoint_schema\n\n # insert definitions\n schema[\"definitions\"].update(definitions)\n\n return schema",
"def get_schema_structure(self) -> SchemaStructure:\n constructors: List[CombinatorData] = list(\n self._combinator_map.values()\n )\n methods: List[FunctionData] = list(\n self._function_map.values()\n )\n\n return SchemaStructure(constructors=constructors, methods=methods)",
"def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))",
"def _CreateMessageSchema(\n self,\n descriptor: Descriptor,\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n type_name = _GetTypeName(descriptor)\n\n properties = dict()\n visiting.add(type_name)\n\n # Create schemas for the fields' types.\n for field_descriptor in descriptor.fields:\n self._CreateSchema(field_descriptor, visiting)\n field_name = casing.SnakeToCamel(field_descriptor.name)\n\n properties[field_name] = self._GetDescribedSchema(field_descriptor)\n\n visiting.remove(type_name)\n\n self.schema_objs[type_name] = cast(MessageSchema, {\n \"type\": \"object\",\n \"properties\": properties,\n })",
"def getSchema(cls):\n pass",
"def __init__(self, schema=None):\n self._dict = {}\n self.schema = schema",
"def create_model(self, ApiId: str, Name: str, Schema: str, ContentType: str = None, Description: str = None) -> Dict:\n pass",
"def from_schema(cls, tag, schema):\n cls.tag = tag\n cls.schema = schema\n cls._parser = generate_parser(tag, schema)\n return cls",
"def testLazySchemaForCreation(self):\n api = self.ApiFromDiscoveryDoc(self.__TEST_DISCOVERY_DOC)\n for schema in ['Activity', 'Comment', 'ActivityObject']:\n self.assertTrue(isinstance(api._schemas[schema], Schema))",
"def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'name': 'str',\n 'store_data': 'object',\n 'discovered': 'datetime',\n 'extraction_failure': 'bool',\n 'in_trash': 'bool',\n 'is_extracted': 'bool',\n 'meta_available': 'bool',\n 'size': 'int',\n 'start_time': 'datetime',\n 'end_time': 'datetime',\n 'duration': 'float',\n 'messages': 'int',\n 'tags': 'list[Tag]'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'name': 'name',\n 'store_data': 'store_data',\n 'discovered': 'discovered',\n 'extraction_failure': 'extraction_failure',\n 'in_trash': 'in_trash',\n 'is_extracted': 'is_extracted',\n 'meta_available': 'meta_available',\n 'size': 'size',\n 'start_time': 'start_time',\n 'end_time': 'end_time',\n 'duration': 'duration',\n 'messages': 'messages',\n 'tags': 'tags'\n }\n\n self._detail_type = None\n self._name = None\n self._store_data = None\n self._discovered = None\n self._extraction_failure = None\n self._in_trash = None\n self._is_extracted = None\n self._meta_available = None\n self._size = None\n self._start_time = None\n self._end_time = None\n self._duration = None\n self._messages = None\n self._tags = None",
"def __init__(self):\n self.swagger_types = {\n 'ids': 'list[str]',\n 'consumer': 'str',\n 'entity_type': 'str',\n 'start_date': 'datetime',\n 'end_date': 'datetime',\n 'created_date': 'datetime',\n 'updated_date': 'datetime',\n 'scope': 'str',\n 'disabled': 'bool',\n 'id': 'str'\n }\n\n self.attribute_map = {\n 'ids': 'ids',\n 'consumer': 'consumer',\n 'entity_type': 'entityType',\n 'start_date': 'startDate',\n 'end_date': 'endDate',\n 'created_date': 'createdDate',\n 'updated_date': 'updatedDate',\n 'scope': 'scope',\n 'disabled': 'disabled',\n 'id': 'id'\n }\n\n self._ids = None\n self._consumer = None\n self._entity_type = None\n self._start_date = None\n self._end_date = None\n self._created_date = None\n self._updated_date = None\n self._scope = None\n self._disabled = None\n self._id = None",
"def __new__(cls, **kwargs):\n schema = type(\"Schema\", (cls,), {\"__doc__\": cls.__doc__})\n schema.__class_attrs__ = OrderedDict()\n schema.__attrs__ = OrderedDict()\n for name, attr in kwargs.items():\n if not hasattr(attr, \"name\"):\n attr.name = name\n schema.__class_attrs__[attr.name] = attr\n schema.__attrs__[attr.name] = attr\n return schema",
"def schema(self):\n return _parse_schema_resource(self._properties.get(\"schema\", {}))"
] | [
"0.62221414",
"0.6016679",
"0.5968359",
"0.5946103",
"0.5910788",
"0.59025574",
"0.5863402",
"0.58183753",
"0.5814539",
"0.57973915",
"0.57765687",
"0.5719145",
"0.57047343",
"0.5694497",
"0.565712",
"0.56565285",
"0.56380844",
"0.56073123",
"0.55723673",
"0.5570385",
"0.55048895",
"0.54854393",
"0.5479786",
"0.54777753",
"0.54689896",
"0.5460747",
"0.5459226",
"0.5449624",
"0.5425599",
"0.5399636"
] | 0.67564994 | 0 |
Parse the query type from the root schema. This can either return a string or None. The latter when the endpoint does not support queries. | def parse_query_type(raw_schema: Dict) -> Union[str, None]:
return Schema.parse_operation_type(raw_schema, "queryType") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")",
"def _schema_type(self) -> Optional[type]:\n return SearchMetaSchema",
"def _schema_type(self) -> Optional[type]:\n return None",
"def parse_query_spec(self, query_spec):\n try:\n return self.QUERY_TYPE_MAP[query_spec['type']](query_spec)\n except KeyError:\n raise exceptions.QueryError('invalid query spec')\n except TypeError:\n raise exceptions.QueryError('Query must be a dictionary specifyng type and value of the query')",
"def _schema_type(self) -> Optional[type]:\n pass",
"def query(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"query\")",
"def query(self) -> Optional[str]:\n return pulumi.get(self, \"query\")",
"def get_schema_type(arg_schema: Dict[str, Any]) -> str:\n return arg_schema['schema']['type']",
"def _schema_type(self) -> Optional[type]:\n return EpisodeSchema",
"def typ(self) -> Optional[str]:\n return self.get(\"/Type\")",
"def _schema_type(self) -> Optional[type]:\n return IndexSchema",
"def parse_query(self, query_dict):\n if query_dict is None:\n return xapian.Query('') # Match everything\n elif query_dict == {}:\n return xapian.Query() # Match nothing\n\n query_tree = self.build_query_tree(query_dict)\n\n return query_tree.to_query(self.schema, self.database)",
"def parse_mutation_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"mutationType\")",
"def _schema_type(self) -> Optional[type]:\n return MovieSchema",
"def result_type(self) -> Optional[str]:\n if hasattr(self, \"_result_type\"):\n return self._result_type\n _args: list[Arg] = []\n _ctx = self._select(\"resultType\", _args)\n return _ctx.execute_sync(Optional[str])",
"def base_query(self) -> Optional[str]:\n return pulumi.get(self, \"base_query\")",
"def parse_subscription_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"subscriptionType\")",
"def _schema_type(self) -> Optional[type]:\n return SeriesSchema",
"def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")",
"def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")",
"def make_query(graph, ns, request_schema, response_schema):\n @graph.route(\"/v1/foo/get\", Operation.Query, ns)\n @qs(request_schema)\n @response(response_schema)\n def foo_query():\n \"\"\"\n My doc string\n \"\"\"\n request_data = load_query_string_data(request_schema)\n response_data = dict(\n result=True,\n value=request_data[\"required_value\"],\n )\n return dump_response_data(response_schema, response_data, Operation.Query.value.default_code)",
"def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")",
"def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")",
"def query(self, query, request_type=None):\n\n #encode to UTF-8\n try: query = query.encode(\"utf-8\")\n except: query = query.decode('raw_unicode_escape').encode(\"utf-8\")\n\n lowercase_query = query.lower()\n if lowercase_query.startswith(\"select\") or \\\n lowercase_query.startswith(\"describe\") or \\\n lowercase_query.startswith(\"show\") or \\\n request_type==\"GET\":\n\n return self._get(urllib.urlencode({'sql': query}))\n\n else:\n return self._post(urllib.urlencode({'sql': query}))",
"def query_schema(self, name, param):\n\n alias, name, need_list = self.parse_entry(name)\n\n if not name:\n result = self.process_multiple_query(need_list, param)\n else:\n result = self.process_single_query(name, need_list, param)\n return alias, result",
"def get_query(self):\r\n\r\n split = self.path_s.split(\"?\", 1)\r\n if len(split) == 1: return \"\"\r\n else: return split[1]",
"def simd_type(self):\n for node in self.query_nodes:\n return node.get('infos').get('system_configurations').get('simd_type')\n raise Exception(\"No query node found\")",
"def infer_value_type(self, value):\n if isinstance(value, str):\n if self.TIMESTAMP_MATCHER.match(value):\n return 'TIMESTAMP'\n elif self.DATE_MATCHER.match(value):\n return 'DATE'\n elif self.TIME_MATCHER.match(value):\n return 'TIME'\n elif not self.quoted_values_are_strings:\n # Implement the same type inference algorithm as 'bq load' for\n # quoted values that look like ints, floats or bools.\n if self.INTEGER_MATCHER.match(value):\n if (int(value) < self.INTEGER_MIN_VALUE\n or self.INTEGER_MAX_VALUE < int(value)):\n return 'QFLOAT' # quoted float\n else:\n return 'QINTEGER' # quoted integer\n elif self.FLOAT_MATCHER.match(value):\n return 'QFLOAT' # quoted float\n elif value.lower() in ['true', 'false']:\n return 'QBOOLEAN' # quoted boolean\n else:\n return 'STRING'\n else:\n return 'STRING'\n # Python 'bool' is a subclass of 'int' so we must check it first\n elif isinstance(value, bool):\n return 'BOOLEAN'\n elif isinstance(value, int):\n if value < self.INTEGER_MIN_VALUE or self.INTEGER_MAX_VALUE < value:\n return 'FLOAT'\n else:\n return 'INTEGER'\n elif isinstance(value, float):\n return 'FLOAT'\n elif value is None:\n return '__null__'\n elif isinstance(value, dict):\n if value:\n return 'RECORD'\n else:\n return '__empty_record__'\n elif isinstance(value, list):\n if value:\n return '__array__'\n else:\n return '__empty_array__'\n else:\n raise Exception(\n f'Unsupported node type: {type(value)} (should not happen)'\n )",
"def get_type_from_doc(doc):\n try:\n return doc.replace('\\n',' ').split('-> ')[1].split(' ')[0]\n except:\n return None",
"def wql_istype_query(self, node, nodetype):\n self.tr_id = get_tr_id()\n if isinstance(node, Literal) or isinstance(type, Literal):\n return None # No literals allowed here\n xml_msg = self._create_wql_istype_msg(self.tr_id,\n node, type)\n self.conn.connect()\n self.conn.send(xml_msg)\n response = self.conn.receive()\n self._check_error(response)\n if \"results\" in response:\n if response[\"results\"] == \"TRUE\":\n return True\n else:\n return False\n else:\n raise SIBError(M3_SIB_ERROR)"
] | [
"0.70593387",
"0.5939255",
"0.5924762",
"0.5911261",
"0.5840816",
"0.57200265",
"0.57065624",
"0.5640995",
"0.5613772",
"0.5587588",
"0.55517954",
"0.54139596",
"0.53744495",
"0.5370533",
"0.53518116",
"0.5295712",
"0.52832526",
"0.5279426",
"0.52663785",
"0.52663785",
"0.52650476",
"0.52557695",
"0.52557695",
"0.52380216",
"0.5224146",
"0.5205046",
"0.518808",
"0.51812875",
"0.5163579",
"0.51416224"
] | 0.8196511 | 0 |
Parse the mutation type from the root schema. This can either return a string or None. The latter when the endpoint does not support mutations. | def parse_mutation_type(raw_schema: Dict) -> Union[str, None]:
return Schema.parse_operation_type(raw_schema, "mutationType") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")",
"def set_mutation_type(self, mut_type=''):\n if mut_type:\n # specified mutation type\n self.mutation_type = mut_type\n else:\n # interpret mutation type from attributes\n if not self.is_valid:\n # does not correctly fall into a category\n self.mutation_type = 'not valid'\n elif self.unknown_effect:\n self.mutation_type = 'unknown effect'\n elif self.is_missing_info:\n self.mutation_type = 'missing'\n elif self.is_substitution:\n self.mutation_type = 'substitution'\n elif self.is_deletion:\n self.mutation_type = 'deletion'\n elif self.is_insertion:\n self.mutation_type = 'insertion'\n\n # check if mutation at splice site\n self.__set_splice_mutation()",
"def parse_query_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"queryType\")",
"def _is_mutation_type(data):\n try:\n QuiverMutationType(data)\n return True\n except Exception:\n return False",
"def _mutation_type_error(data):\n if data[2] is None:\n del data[2]\n return_str = str(data) + ' is not a valid quiver mutation type'\n return_str += '\\n Finite types have the form [ \\'?\\', n ] for type ? and rank n'\n return_str += '\\n Affine type A has the form [ \\'A\\', [ i, j ], 1 ] for rank i+j'\n return_str += '\\n Affine type ? has the form [ \\'?\\', k, \\pm 1 ] for rank k+1'\n return_str += '\\n Elliptic type ? has the form [ \\'?\\', k, [i, j] ] (1 <= i,j <= 3) for rank k+2'\n return_str += '\\n For correct syntax in other types, please consult the documentation.'\n\n raise ValueError(return_str)",
"def get_random_mutation_type(self):\n return self.random_state.choice(\n self.mutation_types, p=self.mutation_probabilities)",
"def _schema_type(self) -> Optional[type]:\n return None",
"def _schema_type(self) -> Optional[type]:\n pass",
"def _schema_type(self) -> Optional[type]:\n return EpisodeSchema",
"def get_type(self):\n if not self.xmlnode.hasProp(\"type\"):\n self.upgrade()\n return from_utf8(self.xmlnode.prop(\"type\"))",
"def token_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"token_type\")",
"def _schema_type(self) -> Optional[type]:\n return MoviePanelMetaSchema",
"def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")",
"def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")",
"def Type(self):\n if self.currtok[1].name in {\"INT\", \"FLOAT\", \"BOOLEAN\"}:\n type = self.currtok[0]\n self.currtok = next(self.tg)\n return type\n raise SLUCSyntaxError(\"ERROR: Unexpected token {0} on line {1}\".\n format(self.currtok[1], str(self.currtok[2] - 1)))",
"def get_schema_type(arg_schema: Dict[str, Any]) -> str:\n return arg_schema['schema']['type']",
"def parse_subscription_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"subscriptionType\")",
"def _schema_type(self) -> Optional[type]:\n return MovieSchema",
"def get_type_from_doc(doc):\n try:\n return doc.replace('\\n',' ').split('-> ')[1].split(' ')[0]\n except:\n return None",
"def operation_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"operation_type\")",
"def typ(self) -> Optional[str]:\n return self.get(\"/Type\")",
"def mutations_node(self):\n return self._mutations_node",
"def _repr_(self):\n return \"QuiverMutationType\"",
"def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")",
"def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")",
"def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")",
"def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")",
"def type(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value",
"def action_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action_type\")",
"def event_data_content_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"event_data_content_type\")"
] | [
"0.6133898",
"0.5771993",
"0.55856854",
"0.55424494",
"0.55416554",
"0.5471137",
"0.52651036",
"0.52617633",
"0.5190931",
"0.5140541",
"0.50858825",
"0.5052965",
"0.5041517",
"0.5041517",
"0.50238866",
"0.50197643",
"0.49824637",
"0.4972244",
"0.4961925",
"0.49438107",
"0.49402195",
"0.49166182",
"0.48954162",
"0.48913866",
"0.48913866",
"0.48913866",
"0.48913866",
"0.48912144",
"0.48700345",
"0.48580256"
] | 0.80457693 | 0 |
Parse the subscription type from the root schema. This can either return a string or None. The latter when the endpoint does not support subscriptions. | def parse_subscription_type(raw_schema: Dict) -> Union[str, None]:
return Schema.parse_operation_type(raw_schema, "subscriptionType") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subscription_type(self) -> str:\n return pulumi.get(self, \"subscription_type\")",
"def typ(self) -> Optional[str]:\n return self.get(\"/Type\")",
"def get_type_from_doc(doc):\n try:\n return doc.replace('\\n',' ').split('-> ')[1].split(' ')[0]\n except:\n return None",
"def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")",
"def _schema_type(self) -> Optional[type]:\n return SigningPolicySchema",
"def _schema_type(self) -> Optional[type]:\n return None",
"def get_type(self) -> str:\n # Note: this name conflicts with existing python builtins\n return self[\"Sns\"][\"Type\"]",
"def get_xsd_type(self, item):\n if not self.xsd_types or isinstance(self.xsd_types, AbstractSchemaProxy):\n return\n elif isinstance(item, str):\n xsd_type = self.xsd_types.get(item)\n elif isinstance(item, AttributeNode):\n xsd_type = self.xsd_types.get(item[0])\n else:\n xsd_type = self.xsd_types.get(item.tag)\n\n if not xsd_type:\n return\n elif not isinstance(xsd_type, list):\n return xsd_type\n elif isinstance(item, AttributeNode):\n for x in xsd_type:\n if x.is_valid(item[1]):\n return x\n elif not isinstance(item, str):\n for x in xsd_type:\n if x.is_simple():\n if x.is_valid(item.text):\n return x\n elif x.is_valid(item):\n return x\n\n return xsd_type[0]",
"def _schema_type(self) -> Optional[type]:\n pass",
"def get_type(self):\n if not self.xmlnode.hasProp(\"type\"):\n self.upgrade()\n return from_utf8(self.xmlnode.prop(\"type\"))",
"def prefix_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix_type\")",
"def get_schema_type(arg_schema: Dict[str, Any]) -> str:\n return arg_schema['schema']['type']",
"def get_resource_type(self):\n category = self.get_first_category(DATA_KIND_SCHEME)\n if category is not None:\n return category.label\n else:\n return None",
"def parse_query_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"queryType\")",
"def _get_resource_type(self, resource_path):\n remove_query = resource_path.split('?')[0] # remove query parameters\n remove_slashes = remove_query.strip('/') # strip leading and trailing slashes\n return remove_slashes.rstrip('s') # remove trailing 's'",
"def _schema_type(self) -> Optional[type]:\n return EpisodeSchema",
"def _schema_type(self) -> Optional[type]:\n return SeriesSchema",
"def discover_schema(node):\n xmlns = node.get('__xmlns__', None)\n\n if xmlns:\n node['Schema'] = 'Unknown'\n if xmlns.startswith('smpte_stereo'):\n node['Schema'] = 'SMPTE Stereoscopic'\n elif xmlns.startswith('smpte'):\n node['Schema'] = 'SMPTE'\n elif xmlns.startswith('interop'):\n node['Schema'] = 'Interop'\n elif xmlns.startswith('atmos'):\n node['Schema'] = 'Atmos'",
"def get_unsubscription_type(self):\n unsubscription_types = dict(UNSUBSCRIPTION_TYPE_CHOICES)\n return unsubscription_types.get(self.unsubscription_type, \"N/A\")",
"def service_type_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_type_name\")",
"def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")",
"def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")",
"def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")",
"def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")",
"def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> Optional[str]:\n return pulumi.get(self, \"type\")"
] | [
"0.660987",
"0.5427683",
"0.5366075",
"0.5344622",
"0.531378",
"0.5235341",
"0.52084655",
"0.514429",
"0.514082",
"0.5107135",
"0.5022559",
"0.4995361",
"0.4984606",
"0.49678668",
"0.49628568",
"0.49607036",
"0.49584213",
"0.4956243",
"0.49261507",
"0.49177787",
"0.49068826",
"0.49068826",
"0.48645335",
"0.48645335",
"0.48605448",
"0.48605448",
"0.48605448",
"0.48605448",
"0.48605448",
"0.48605448"
] | 0.8100664 | 0 |
Parse an operation type from the root schema. This can either return a string or None. The latter when the endpoint does not support the passed by operation. | def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:
query_type = raw_schema.get(op_type, {})
if not query_type:
return None
return query_type.get("name") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def operation_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"operation_type\")",
"def get_operation_type(self, operation_name):\n # type: (Optional[str]) -> Optional[str]\n operations_map = self.operations_map\n if not operation_name and len(operations_map) == 1:\n return next(iter(operations_map.values()))\n return operations_map.get(operation_name)",
"def parse_query_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"queryType\")",
"def parse_operations(self, operation_type: str) -> Tuple[Operation]:\n if operation_type is None:\n return tuple()\n query_type: SchemaType = self.types.get(operation_type)\n if query_type is None:\n return tuple()\n return tuple([Operation(f, self.settings) for f in query_type.fields])",
"def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")",
"def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")",
"def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")",
"def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")",
"def get_op_type(self):\n return self.op_type",
"def parse_mutation_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"mutationType\")",
"def _get_singa_op_type(cls, op):\n return type(op).__name__",
"def typ(self) -> Optional[str]:\n return self.get(\"/Type\")",
"def unaryop_type(cls, op):\n return None",
"def parse_operation(self, data, ip):\n json_decoded = json.loads(data)\n op = json_decoded['OPERATION']\n if op in self._callbacks:\n self.logger.info(\"Got Operation: \" + op)\n self._callbacks[op](json_decoded, ip)\n else:\n self.logger.error(\"Unknown operation\")",
"def parse_subscription_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"subscriptionType\")",
"def get_op(self, op_complete_url):\n url_parsed = urlsplit(op_complete_url)\n op_url = url_parsed.path\n\n conf, op = self.best_match(op_url)\n if op is not None:\n return Operation(\n op_complete_url,\n op,\n conf[\"conf\"][op],\n conf[\"tp\"],\n conf[\"sparql_http_method\"],\n conf[\"addon\"],\n )\n else:\n sc = 404\n return (\n sc,\n \"HTTP status code %s: the operation requested does not exist\" % sc,\n \"text/plain\",\n )",
"def get_type_from_doc(doc):\n try:\n return doc.replace('\\n',' ').split('-> ')[1].split(' ')[0]\n except:\n return None",
"def operation_id(self) -> Optional[str]:\n return pulumi.get(self, \"operation_id\")",
"def get_operation_id(self):\n operation_id = self.yaml_parser.object.get('operationId', None)\n if not operation_id:\n operation_id = self.method + \"-\" + self.path.strip(\"/\").replace(\"/\", \"-\")\n\n return operation_id",
"def get_schema_type(arg_schema: Dict[str, Any]) -> str:\n return arg_schema['schema']['type']",
"def _schema_type(self) -> Optional[type]:\n return None",
"def _OpTypeByName(op_name):\n op_name_to_type = {\n 'REPLACE': common.OpType.REPLACE,\n 'REPLACE_BZ': common.OpType.REPLACE_BZ,\n 'MOVE': common.OpType.MOVE,\n 'BSDIFF': common.OpType.BSDIFF,\n 'SOURCE_COPY': common.OpType.SOURCE_COPY,\n 'SOURCE_BSDIFF': common.OpType.SOURCE_BSDIFF,\n 'ZERO': common.OpType.ZERO,\n 'DISCARD': common.OpType.DISCARD,\n 'REPLACE_XZ': common.OpType.REPLACE_XZ,\n 'PUFFDIFF': common.OpType.PUFFDIFF,\n 'BROTLI_BSDIFF': common.OpType.BROTLI_BSDIFF,\n }\n return op_name_to_type[op_name]",
"def _parse_op_node(self, topological_index, node_proto):\n name = node_proto.name.split('/')[-1]\n node_id = name.split('op')[-1]\n name = f'{node_proto.op_type}-op{node_id}'\n node_name = Node.create_node_name(node_proto.scope, name)\n\n if node_proto.full_name and node_proto.op_type != NodeTypeEnum.LOAD.value:\n node_name = node_proto.full_name\n\n if node_proto.full_name and any(\n node_proto.full_name.lower().endswith(f'[:{plugin.value.lower()}]') for plugin in PluginNameEnum):\n node_name = Node.create_node_name(scope=node_proto.scope,\n base_name=f'{node_proto.op_type}-op{node_proto.name}')\n\n # The Graphviz plug-in that the UI USES can't handle these special characters.\n check_invalid_character(node_name)\n\n node = Node(name=node_name, node_id=node_id, topological_index=topological_index)\n node.full_name = node_proto.full_name\n node.type = node_proto.op_type\n if getattr(node_proto, 'source_address', None):\n node.stack = DebuggerSource.build_stack_from_source_address(node_proto.source_address)\n self._parse_attributes(node_proto.attribute, node)\n self._parse_inputs(node_proto.input, node)\n\n node.output_i = node_proto.output_i\n node.scope = node_proto.scope\n node.output_shape = self._get_shape_by_parse_type_proto(node_proto.output_type)\n node.output_nums = len(node.output_shape)\n node.output_data_type = self._get_data_type_by_parse_type_proto(node_proto.output_type, node)\n\n self._cache_node(node)",
"def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")",
"def endpoint_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_type\")",
"def operation2string(self, operation):\n op = \"Custom\"\n if operation == QNetworkAccessManager.HeadOperation:\n op = \"HEAD\"\n elif operation == QNetworkAccessManager.GetOperation:\n op = \"GET\"\n elif operation == QNetworkAccessManager.PutOperation:\n op = \"PUT\"\n elif operation == QNetworkAccessManager.PostOperation:\n op = \"POST\"\n elif operation == QNetworkAccessManager.DeleteOperation:\n op = \"DELETE\"\n return op",
"def _schema_type(self) -> Optional[type]:\n pass",
"def get_operation(operation):\n if operation == 'query':\n return banking_pb2.QUERY\n if operation == 'deposit':\n return banking_pb2.DEPOSIT\n if operation == 'withdraw':\n return banking_pb2.WITHDRAW",
"def deserialize(cls, payload):\n return operations_pb2.Operation.FromString(payload)",
"def action_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action_type\")"
] | [
"0.67735726",
"0.6728999",
"0.6351823",
"0.63367105",
"0.59829354",
"0.59829354",
"0.59829354",
"0.59829354",
"0.5836189",
"0.5831232",
"0.5829081",
"0.5590442",
"0.5587668",
"0.55367893",
"0.5521317",
"0.55096656",
"0.5479498",
"0.5452105",
"0.5383543",
"0.5341075",
"0.530389",
"0.5299449",
"0.5298689",
"0.52676916",
"0.52676916",
"0.52222395",
"0.52138656",
"0.520446",
"0.520236",
"0.51969385"
] | 0.79709095 | 0 |
Parse all operations for a given operation type. | def parse_operations(self, operation_type: str) -> Tuple[Operation]:
if operation_type is None:
return tuple()
query_type: SchemaType = self.types.get(operation_type)
if query_type is None:
return tuple()
return tuple([Operation(f, self.settings) for f in query_type.fields]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_operation(self, data, ip):\n json_decoded = json.loads(data)\n op = json_decoded['OPERATION']\n if op in self._callbacks:\n self.logger.info(\"Got Operation: \" + op)\n self._callbacks[op](json_decoded, ip)\n else:\n self.logger.error(\"Unknown operation\")",
"def __operations(self, conf):\n result = \"\"\"## Operations [back to top](#toc)\nThe operations that this API implements are:\n\"\"\"\n ops = \"\\n\"\n\n for op in conf[\"conf_json\"][1:]:\n params = []\n for p in findall(PARAM_NAME, op[\"url\"]):\n p_type = \"str\"\n p_shape = \".+\"\n if p in op:\n p_type, p_shape = findall(\"^\\s*([^\\(]+)\\((.+)\\)\\s*$\", op[p])[0]\n\n params.append(\n \"<em>%s</em>: type <em>%s</em>, regular expression shape <code>%s</code>\"\n % (p, p_type, p_shape)\n )\n result += \"\\n* [%s](#%s): %s\" % (\n op[\"url\"],\n op[\"url\"],\n op[\"description\"].split(\"\\n\")[0],\n )\n ops += \"\"\"<div id=\"%s\">\n<h3>%s <a href=\"#operations\">back to operations</a></h3>\n\n%s\n\n<p class=\"attr\"><strong>Accepted HTTP method(s)</strong> <span class=\"attr_val method\">%s</span></p>\n<p class=\"attr params\"><strong>Parameter(s)</strong> <span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Result fields type</strong><span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Example</strong><span class=\"attr_val\"><a target=\"_blank\" href=\"%s\">%s</a></span></p>\n<p class=\"ex attr\"><strong>Exemplar output (in JSON)</strong></p>\n<pre><code>%s</code></pre></div>\"\"\" % (\n op[\"url\"],\n op[\"url\"],\n markdown(op[\"description\"]),\n \", \".join(split(\"\\s+\", op[\"method\"].strip())),\n \"</li><li>\".join(params),\n \", \".join(\n [\n \"%s <em>(%s)</em>\" % (f, t)\n for t, f in findall(FIELD_TYPE_RE, op[\"field_type\"])\n ]\n ),\n conf[\"website\"] + conf[\"base_url\"] + op[\"call\"],\n op[\"call\"],\n op[\"output_json\"],\n )\n return markdown(result) + ops",
"def get_op_types(self):\n return self.cur_config['ops']",
"def _pull_argops(op_dict):\n import inspect\n out = []\n keys = op_dict.keys()\n keys.sort() # Not necessary, but makes scanning the printout easier\n for k in keys:\n # Create a dictionary that will be used to fill the 'code' template\n d = {}\n d[\"enum_name\"] = enum_name = op_dict[k][3:] # <NAME>\n d[\"funct_name\"] = \"%s\" % enum_name.lower() # <name>\n class_name = \"%s4args\" % enum_name\n klass = getattr(_type, class_name, None)\n if klass is None:\n # This operation takes no arguments\n d[\"funct_args\"] = d[\"create_args\"] = d[\"set_args\"] = \"\"\n else:\n if type(klass) is dict:\n arg_list = \"enum_value\"\n d[\"create_args\"] = \"args = enum_value\"\n else:\n arg_list = \", \".join(inspect.getargspec(klass.__init__)[0][1:])\n d[\"create_args\"] = \"args = _type.%s(%s)\" % (class_name, arg_list)\n d[\"funct_args\"] = arg_list\n if enum_name.startswith(\"CB_\"):\n d[\"set_args\"] = \"opcb%s=args\" % enum_name.lower()[3:]\n else:\n d[\"set_args\"] = \"op%s=args\" % enum_name.lower()\n if enum_name.startswith(\"CB_\"):\n d[\"argop\"] = \"nfs_cb_argop4\"\n else:\n d[\"argop\"] = \"nfs_argop4\"\n out.append(d)\n return out",
"def operations_map(self):\n # type: () -> Dict[Union[str, None], str]\n document_ast = self.document_ast\n operations = {} # type: Dict[Union[str, None], str]\n for definition in document_ast.definitions:\n if isinstance(definition, ast.OperationDefinition):\n if definition.name:\n operations[definition.name.value] = definition.operation\n else:\n operations[None] = definition.operation\n\n return operations",
"def list_operations():",
"def _extract_operation(self, words):\n operation = self.client\n\n for word in words:\n attr = getattr(operation, word, None)\n if attr is None:\n return operation, words[-1]\n\n operation = attr\n\n return operation, \"\"",
"def parse(self, data):\n val = data.get(self.name, missing)\n if not isinstance(val, dict):\n return (self.operators['$eq'], self.field.deserialize(val)),\n\n return tuple(\n (\n self.operators[op],\n (self.field.deserialize(val)) if op not in self.list_ops else [\n self.field.deserialize(v) for v in val])\n for (op, val) in val.items() if op in self.operators\n )",
"def split_terms(self, operation):\n return [self.format_number(t) for t in operation.split('/')]",
"def find_operations(self, span_kind: str, service: str) -> List[Operation]:\n match_query = [{\"process.serviceName\": service}]\n if span_kind != \"\":\n tag_query = {\"tags\": {\"$elemMatch\": {\"key\": SPAN_KIND_NAME, \"vStr\": span_kind}}}\n match_query.append(tag_query)\n match_stage = {\"$and\": match_query}\n aggregation = [\n {\"$match\": match_stage},\n {\"$unwind\": {\"path\": \"$tags\"}},\n {\"$match\": {\"tags.key\": \"span.kind\"}},\n {\"$group\": {\"_id\": {\"operationName\": \"$operationName\", \"tags\": \"$tags\"}}},\n {\"$replaceRoot\": {\"newRoot\": \"$_id\"}},\n ]\n results = self.collection.aggregate(aggregation)\n return [\n Operation(name=result[\"operationName\"], span_kind=result[\"tags\"][\"vStr\"])\n for result in results\n ]",
"def parse_command(self, command):\n \n #chcek operation type\n mod_type = re.findall('.*(rotate|translate|zoom|make|time).*',command)[0]\n \n #for each operation type recover necessary parameters\n if mod_type == 'rotate':\n angle = int(re.findall('.*rotate by (\\d+).*', command)[0])\n axis = list(map(int,re.findall('.*around \\((\\d+)\\,(\\d+)\\,(\\d+).*', command)[0]))\n\n #if the rotation angle is large split it into 3 to ensure the rotation is accomplished fully\n if angle >= 180:\n new_q = self.q.create_from_axis_angle(angle/3*2*np.pi/360, axis[0], axis[1], axis[2], degrees=False)\n result = [(mod_type, new_q),(mod_type, new_q),(mod_type, new_q)]\n else:\n new_q = self.q.create_from_axis_angle(angle*2*np.pi/360, axis[0], axis[1], axis[2], degrees=False)\n result = (mod_type, new_q)\n\n elif mod_type == 'zoom':\n factor = float(re.findall('.*factor of (\\d*\\.*\\d+).*', command)[0])\n result = (mod_type, factor)\n\n elif mod_type == 'translate':\n translate = np.array(list(map(int,re.findall('.*by \\((\\-*\\d+)\\,(\\-*\\d+)\\,(\\-*\\d+).*', command)[0])))\n result = (mod_type, translate)\n\n elif mod_type == 'make':\n layer = int(re.findall('.*make layer (\\d+).*', command)[0])\n vis_status = command.split()[-1]\n if vis_status == 'invisible':\n result = ('vis', layer, False)\n else:\n result = ('vis', layer, True)\n \n elif mod_type == 'time':\n time_shift = int(re.findall('.*by (\\-*\\d+).*', command)[0])\n result = (mod_type, time_shift)\n return result",
"def parse(seq):\n\tdef eval_expr(z, list):\n\t\treturn reduce(lambda s, (f, x): f(s, x), list, z)\n\tunarg = lambda f: lambda x: f(*x)\n\tconst = lambda x: lambda _: x # like ^^^ in Scala\n\n\ttokval = lambda x: x.value # returns the value of a token\n\top = lambda s: a(Token('Op', s)) >> tokval # return the value if token is Op\n\top_ = lambda s: skip(op(s)) # checks if token is Op and ignores it\n\ttoktype = lambda t: some(lambda x: x.type == t) >> tokval # checks type of token\n\tdef lst(h,t):\n\t\treturn [h,] + t\n\tcall = lambda x: Call(x[0], x[1])\n\n\tmakeop = lambda s, f: op(s) >> const(f)\n\n\tadd = makeop('+', Plus)\n\tsub = makeop('-', Minus)\n\tmul = makeop('*', Times)\n\tdiv = makeop('/', Div)\n\n\tdef make_const(i):\n\t\treturn const(int(i))\n\n\tnumber = toktype('Number') >> Const\n\n\tmul_op = mul | div\n\tadd_op = add | sub\n\n\tfactor = with_forward_decls(lambda:\n\t\tnumber | op_('(') + exp + op_(')') | call)\n\tterm = factor + many(mul_op + factor) >> unarg(eval_expr)\n\texp = term + many(add_op + term) >> unarg(eval_expr)\n\texp_lst = with_forward_decls(lambda:\n\t\texp + many(op_(',') + exp) >> unarg(lst))\n\tcall = toktype('Name') + op_('(') + exp_lst + op_(')') >> call\n\n\treturn exp.parse(seq)",
"def __commandparser(self, data):\n # zum bearbeiten einen String daraus machen\n cmdstr = data.decode('utf-8')\n self.log.debug(\"cmd: %s\" % cmdstr)\n # json parsen und dictonary Objekt daraus machen\n cmd = json.loads(cmdstr)\n #\n # ist es ein GET Kommando?\n #\n if 'get' in cmd:\n self.log.debug(\"get cmd recognized...\")\n return self.__get_cmd_parse(cmd['get'])\n elif 'set' in cmd:\n self.log.debug(\"set cmd recognized...\")\n return self.__set_cmd_parse(cmd['set'])\n elif 'delete' in cmd:\n self.log.debug(\"DELETE cmd recognized...\")\n return self.__delete_cmd_parse(cmd['delete'])\n else:\n self.log.warning(\"unknown command recived! Data: <{}>\".format(cmdstr))\n return json.dumps({'error': 'unknown command or not implemented yet'}).encode(encoding='utf-8')\n # ENDE __commandparser",
"def find_node_by_op_type(self, op_type: str) -> List[Operator]:\n return list(self.__op_type_list[op_type])",
"def operations(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"operations\")",
"def operations(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"operations\")",
"def operations(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"operations\")",
"def operations(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"operations\")",
"def parse(self, commands):\n raise NotImplementedError()",
"def _parse_op_node(self, topological_index, node_proto):\n name = node_proto.name.split('/')[-1]\n node_id = name.split('op')[-1]\n name = f'{node_proto.op_type}-op{node_id}'\n node_name = Node.create_node_name(node_proto.scope, name)\n\n if node_proto.full_name and node_proto.op_type != NodeTypeEnum.LOAD.value:\n node_name = node_proto.full_name\n\n if node_proto.full_name and any(\n node_proto.full_name.lower().endswith(f'[:{plugin.value.lower()}]') for plugin in PluginNameEnum):\n node_name = Node.create_node_name(scope=node_proto.scope,\n base_name=f'{node_proto.op_type}-op{node_proto.name}')\n\n # The Graphviz plug-in that the UI USES can't handle these special characters.\n check_invalid_character(node_name)\n\n node = Node(name=node_name, node_id=node_id, topological_index=topological_index)\n node.full_name = node_proto.full_name\n node.type = node_proto.op_type\n if getattr(node_proto, 'source_address', None):\n node.stack = DebuggerSource.build_stack_from_source_address(node_proto.source_address)\n self._parse_attributes(node_proto.attribute, node)\n self._parse_inputs(node_proto.input, node)\n\n node.output_i = node_proto.output_i\n node.scope = node_proto.scope\n node.output_shape = self._get_shape_by_parse_type_proto(node_proto.output_type)\n node.output_nums = len(node.output_shape)\n node.output_data_type = self._get_data_type_by_parse_type_proto(node_proto.output_type, node)\n\n self._cache_node(node)",
"def group_operations(text: str) -> list:\n \n parts = text_to_parts(text)\n \n def modify_list_group_by(operators):\n i = 0\n len_parts = len(parts)\n while i < len_parts:\n part = parts[i]\n if part[0] in operators:\n part0 = parts.pop(i-1)\n operation = parts.pop(i-1)\n part1 = parts.pop(i-1)\n parts.insert(i-1, \"(\" + part0 + operation + part1 + \")\")\n i -= 1\n len_parts -= 2\n i += 1\n \n for i, part in enumerate(parts):\n if part[0] == \"(\":\n parts[i] = \"\".join(group_operations(part[1:-1]))\n \n modify_list_group_by(\"*/\")\n modify_list_group_by(\"+-\")\n\n return parts",
"def parse(self, ins):\n if type(ins)!=Instr:\n raise Exception(\"You are parsing object that isn't a instruction\")\n self.type = ins.instr\n if ins.instr in control_instructions:\n self.parse_control(ins)\n elif ins.instr in loadstore_instructions:\n self.parse_ls(ins) \n elif ins.instr in intarithm_instructions :\n self.parse_int(ins)\n elif ins.instr in floatarithm_instructions:\n self.parse_float(ins)\n elif ins.instr in misc_instructions:\n self.parse_misc(ins)\n else:\n self.parse_unknown(ins)",
"def parse_operand(binary, module, kind):\n if kind == 'Id':\n return [parse_id(binary, module)]\n elif kind == 'LiteralNumber':\n return [binary.get_next_word()]\n elif kind == 'LiteralString':\n return [parse_literal_string(binary)]\n elif kind == 'OptionalLiteralString':\n word = binary.get_next_word(peek=True, accept_eol=True)\n if word is None:\n return []\n return [parse_literal_string(binary)]\n elif kind == 'VariableLiteralNumber' or kind == 'OptionalLiteralNumber':\n operands = []\n while True:\n word = binary.get_next_word(accept_eol=True)\n if word is None:\n return operands\n operands.append(word)\n elif kind in ['VariableId', 'OptionalId']:\n operands = []\n while True:\n tmp_id = parse_id(binary, module, accept_eol=True)\n if tmp_id is None:\n return operands\n operands.append(tmp_id)\n elif kind == 'VariableIdLiteralPair':\n operands = []\n while True:\n tmp_id = parse_id(binary, module, accept_eol=True)\n if tmp_id is None:\n return operands\n operands.append(tmp_id)\n word = binary.get_next_word()\n operands.append(word)\n elif kind == 'VariableLiteralIdPair':\n operands = []\n while True:\n word = binary.get_next_word(accept_eol=True)\n if word is None:\n return operands\n operands.append(word)\n tmp_id = parse_id(binary, module)\n operands.append(tmp_id)\n elif kind == 'OptionalMemoryAccessMask':\n val = binary.get_next_word(accept_eol=True)\n if val is None:\n return []\n result = expand_mask(kind[8:], val)\n try:\n aligned_idx = result.index('Aligned')\n except ValueError:\n pass\n else:\n result[aligned_idx] = (\n 'Aligned', binary.get_next_word(accept_eol=False))\n return [result]\n\n elif kind[:8] == 'Optional' and kind[-4:] == 'Mask':\n val = binary.get_next_word(accept_eol=True)\n if val is None:\n return []\n return [expand_mask(kind[8:], val)]\n elif kind in ir.MASKS:\n val = binary.get_next_word()\n return [expand_mask(kind, val)]\n elif kind in spirv.spv:\n val = binary.get_next_word()\n constants = spirv.spv[kind]\n for name in constants:\n if constants[name] == val:\n return [name]\n raise ParseError('Unknown \"' + kind + '\" value' + str(val))\n\n raise ParseError('Unknown kind \"' + kind + '\"')",
"def _operation_tree(self):\n\n # initial state\n i = 0\n level = 0\n stack = []\n current = None\n\n def _create_operation(args):\n profile_stats = None\n name = args[0].strip()\n args.pop(0)\n if len(args) > 0 and \"Records produced\" in args[-1]:\n records_produced = int(\n re.search(\"Records produced: (\\\\d+)\", args[-1]).group(1)\n )\n execution_time = float(\n re.search(\"Execution time: (\\\\d+.\\\\d+) ms\", args[-1]).group(1)\n )\n profile_stats = ProfileStats(records_produced, execution_time)\n args.pop(-1)\n return Operation(\n name, None if len(args) == 0 else args[0].strip(), profile_stats\n )\n\n # iterate plan operations\n while i < len(self.plan):\n current_op = self.plan[i]\n op_level = current_op.count(\" \")\n if op_level == level:\n # if the operation level equal to the current level\n # set the current operation and move next\n child = _create_operation(current_op.split(\"|\"))\n if current:\n current = stack.pop()\n current.append_child(child)\n current = child\n i += 1\n elif op_level == level + 1:\n # if the operation is child of the current operation\n # add it as child and set as current operation\n child = _create_operation(current_op.split(\"|\"))\n current.append_child(child)\n stack.append(current)\n current = child\n level += 1\n i += 1\n elif op_level < level:\n # if the operation is not child of current operation\n # go back to it's parent operation\n levels_back = level - op_level + 1\n for _ in range(levels_back):\n current = stack.pop()\n level -= levels_back\n else:\n raise Exception(\"corrupted plan\")\n return stack[0]",
"def extract_operators(e, independent=False):\n ops = []\n\n if isinstance(e, Operator):\n ops.append(e)\n\n elif isinstance(e, Add):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n\n elif isinstance(e, Mul):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n else:\n if debug:\n print(\"Unrecongized type: %s: %s\" % (type(e), str(e)))\n\n return list(set(ops))",
"def print_operation(operations):\n for operation in operations:\n print ' ',\n change_color_by_tag(operation)\n if operation['ExtAttributes']:\n print_extattributes_of_member(operation['ExtAttributes'])\n print operation['Type'],\n if operation['Arguments']:\n print operation['Name'],\n print_argument(operation['Arguments'])\n else:\n print operation['Name']",
"def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")",
"def operation_list(self):\n return OPERATION_LIST",
"def list_operators():\n for operator_symbol in operations:\n print(operator_symbol)",
"def _op(op):\n def _process(self, ty, args=None, result=None, **metadata):\n if args is None:\n args = []\n assert ty is not None\n assert isinstance(args, list), args\n assert not any(arg is None for arg in flatten(args)), args\n result = Op(op, ty, args, result)\n if metadata:\n result.add_metadata(metadata)\n self._insert_op(result)\n return result\n\n def _process_void(self, *args, **kwds):\n result = kwds.pop('result', None)\n op = _process(self, types.Void, list(args), result)\n if kwds:\n op.add_metadata(kwds)\n return op\n\n if ops.is_void(op):\n build_op = _process_void\n else:\n build_op = _process\n\n if config.op_verify:\n build_op = op_verifier(build_op)\n\n return build_op"
] | [
"0.6407225",
"0.5742777",
"0.56923646",
"0.5603622",
"0.55325913",
"0.54617524",
"0.5408668",
"0.5370978",
"0.5358051",
"0.5347028",
"0.5345967",
"0.53198713",
"0.52810514",
"0.5279077",
"0.5233153",
"0.5233153",
"0.5233153",
"0.5233153",
"0.52281266",
"0.52163815",
"0.520678",
"0.5190903",
"0.5181302",
"0.513552",
"0.512595",
"0.5117479",
"0.51017326",
"0.50932896",
"0.5056284",
"0.50495887"
] | 0.7862727 | 0 |
Parse a list of arguments into a dictionary where the key is the name of the argument and the argument itself is the value. | def parse_arguments(args: List[Dict]) -> 'Dict[str, Argument]':
if not args:
return {}
result = {}
for a in args:
if not a:
continue
arg = Argument(a)
result[arg.name] = arg
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def arglist2dict(args):\n arg_dict = {}\n\n if len(args) == 0:\n return arg_dict\n\n if not args[0].startswith('--'):\n raise ValueError(f\"Positional keywords are not supported: {args[0]}\")\n\n i = 0\n while i < len(args):\n arg = args[i]\n i = i + 1\n if arg.startswith('--'):\n dest = arg[2:]\n j, arglist = Parser.get_args(args[i:])\n i = i + j\n Parser.update_arg_dict(arg_dict, dest, arglist)\n return arg_dict",
"def _parse_args(argv):\n result = {}\n for arg in argv:\n k, v = arg.split(\"=\")\n result[k] = v\n return result",
"def parse(args: list, keyword_set: set) -> dict:\n parsed_dict = {'': []}\n while args:\n keyword = get_keyword(arg=args[0], keyword_set=keyword_set)\n\n if keyword is not None:\n args.pop(0)\n keyword_name = keyword.keyword_name\n\n if keyword_name in parsed_dict:\n raise necrobot.exception.DoubledArgException(keyword=keyword.keyword)\n\n if keyword.param_for is not None:\n parsed_dict[keyword_name] = [keyword.keyword]\n else:\n parsed_dict[keyword_name] = []\n num_args_pulled = 0\n while num_args_pulled < keyword.num_args:\n if not args:\n raise necrobot.exception.NumParametersException(\n keyword=keyword,\n num_expected=keyword.num_args,\n num_given=num_args_pulled\n )\n else:\n num_args_pulled += 1\n parsed_dict[keyword_name].append(args[0])\n args.pop(0)\n else:\n parsed_dict[''].append(args[0])\n args.pop(0)\n\n return parsed_dict",
"def retrieve_args_dict():\n process_args = sys.argv[1:]\n dictionary = dict()\n for process_arg in process_args:\n splitted = process_arg.split(\":\")\n if len(splitted) > 1:\n key = splitted[0]\n value = \"\".join(splitted[1:])\n dictionary[key] = value\n return dictionary",
"def parse_kwargs(kwargs_list: List[str]) -> Dict[str, Any]:\n\n kwargs_dict = {}\n\n for kwarg in kwargs_list:\n key = kwarg[2:].split('=')[0]\n value = '='.join(kwarg.split('=')[1:])\n\n try:\n if re.match(r'^(-)?[0-9]+$', value):\n value = int(value)\n\n elif re.match(r'^(-)?[0-9]*.[0-9]+$', value) or re.match(r'^(-)?[0-9]*(\\.)?[0-9]+e(-|\\+)[0-9]+$', value):\n value = float(value)\n\n elif re.match(r'^\\[.*]$', value) or re.match(r'^\\{.*}$', value):\n value = json.loads(value)\n\n elif value.lower() in ('true', 'false'):\n value = value.lower() == 'true'\n\n elif value.lower() == 'none':\n value = None\n\n except:\n logging.warning(f'Could not automatically parse argument \"{key}.\" Its type will remain string.')\n\n kwargs_dict[key] = value\n\n return kwargs_dict",
"def args_to_dictionaty(args):\n\tres_args = {}\n\tfor i, arg in enumerate(args[1:]):\n\t\tif i % 2 == 0:\n\t\t\tkey = arg\n\t\telse:\n\t\t\tres_args[key] = arg\n\treturn res_args",
"def params_commandline(lista):\n if len(lista)%2!=0:\n print('Error: The number of parameter names and values does not match')\n sys.exit()\n dict={}\n for i in range(0,len(lista),2):\n key=lista[i]\n if type(key)!=type(''):\n raise 'Keyword not string!'\n #replace commas in case they're present\n if key[0]=='-':key=key[1:]\n lista[i+1]=replace(lista[i+1],',',' ')\n values=tuple(split(lista[i+1]))\n if len(values)<1:\n mensaje='No value(s) for parameter '+key\n raise mensaje\n dict[key]=values\n if len(dict[key])==1: dict[key]=dict[key][0]\n return dict",
"def parse_launch_arguments(launch_arguments: List[Text]) -> List[Tuple[Text, Text]]:\n parsed_launch_arguments = OrderedDict() # type: ignore\n for argument in launch_arguments:\n count = argument.count(':=')\n if count == 0 or argument.startswith(':=') or (count == 1 and argument.endswith(':=')):\n raise RuntimeError(\n \"malformed launch argument '{}', expected format '<name>:=<value>'\"\n .format(argument))\n name, value = argument.split(':=', maxsplit=1)\n parsed_launch_arguments[name] = value # last one wins is intentional\n return parsed_launch_arguments.items()",
"def _parse_arg_list(self):\n\t\targ_list = {}\n\t\tfor arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:\n\t\t\targ_list[arg[0][1:]] = arg[1]\n\t\n\t\treturn arg_list",
"def parse_args_dict(args=None):\n return vars(parse_args(args))",
"def parse_arguments(args):",
"def arglist_parse_to_dict(arg_l):\n\n prop_d = {}\n for prop in arg_l:\n if len(prop) == 2:\n prop_l = prop\n elif ':' in prop:\n prop_l = prop.split(':')\n elif '=' in prop:\n prop_l = prop.split('=')\n else:\n exit( \"==> ERROR: invalid config. Use '=' or ':'.\" )\n if not len(prop_l) == 2:\n exit( \"==> ERROR: invalid config. Use one '=' per setting.\" )\n prop_d[prop_l[0]] = prop_l[1]\n return prop_d",
"def _parse_config_args(args):\r\n config_dict = dict()\r\n for config_str in args:\r\n try:\r\n components = config_str.split('=')\r\n if len(components) >= 2:\r\n config_dict[components[0]] = \"=\".join(components[1:])\r\n\r\n except:\r\n print \"Warning: could not interpret config value '{0}'\".format(config_str)\r\n pass\r\n\r\n return config_dict",
"def crude_arg_parser(args=sys.argv):\n args_dict = {}\n key = None\n for e in args[1:]:\n if e[:2] == '--':\n if key:\n args_dict[key] = True # Switch arg\n key = e[2:]\n elif key:\n args_dict[key] = e\n key = None\n\n return args_dict",
"def parse_arguments(\n input_args: List[str] = None, argument_parser: argparse.ArgumentParser = None\n) -> dict:\n if argument_parser is None:\n argument_parser = argparse.ArgumentParser()\n\n argument_parser.add_argument(\n \"--\" + FLAG_DOCKER_IMAGE_PREFIX.replace(\"_\", \"-\"),\n help=\"Provide a prefix for a Docker image, e.g. 'mltooling/' or even a repository path. When leaving blank, the default Dockerhub Repository is used.\",\n required=False,\n default=\"\",\n )\n\n return build_utils.parse_arguments(\n input_args=input_args, argument_parser=argument_parser\n )",
"def args2dict(args, dict_args={}):\n \n for arg in args:\n #this_entry = re.findall(r'[^\"\\s]\\S*|\".+?\"', arg)\n p_arg = arg.split('=')\n if len(p_arg) > 1:\n dict_args[p_arg[0]] = False if p_arg[1].lower() == 'false' else \\\n True if p_arg[1].lower() == 'true' else \\\n None if p_arg[1].lower() == 'none' else \\\n '='.join(p_arg[1:]) if len(p_arg) > 2 else \\\n p_arg[1]\n \n return(dict_args)",
"def _parse_args(self, args : dict):\n result = {}\n for key, value in args.items():\n if key in self._subparsers:\n # if it's a list, it is because it's a preset\n if isinstance(value, list):\n result[key] = value[0]\n else:\n result[key] = self._subparsers[key]._parse_args(value)\n elif key in self._actions:\n result[key] = self._actions[key](value)\n else:\n raise ValueError(f\"Unknown argument {key}\")\n\n return result",
"def parse_key_value_pairs(arg_string):\n try:\n return {key: value for (key, value) in [tuple(str(arg).split('=', 1)) for arg in arg_string]}\n except ValueError:\n raise click.ClickException(\"argument string must be in the form x=y\")",
"def parseCommandLine(argv):\n parameters = {}\n for p in argv[1:]: # skip 0th element (module name)\n pair = split(p, '=', 1)\n if (2 != len(pair)):\n print 'bad parameter: %s (had no equals sign for pairing)' % p\n sys.exit()\n else:\n parameters[pair[0]] = pair[1]\n return parameters",
"def parse_args(argv: t.Iterable[str] = None):\n if argv is None:\n argv = sys.argv[1:]\n\n args: t.List[str] = []\n kwargs: t.MutableMapping[str, t.Any] = {}\n\n key = None\n for arg in argv:\n if arg.startswith('--'):\n if arg == '--help':\n print(USAGE)\n raise SystemExit\n if key is not None:\n kwargs[key] = True\n key = arg[2:]\n continue\n\n match = re.match('^(\\\\w+)=(.*)$', arg)\n if match:\n if key is not None:\n kwargs[key] = True\n key = None\n kwargs[match.group(1)] = match.group(2)\n continue\n\n if key is not None:\n kwargs[key] = arg\n key = None\n continue\n\n args.append(arg)\n\n if key is not None:\n kwargs[key] = True\n\n return (tuple(args), kwargs)",
"def parse_generate_arguments(arguments):\n return_value = {}\n for key in arguments:\n return_value[key] = CONFIG_KEY_PARSER[key](arguments[key])\n\n return return_value",
"def parse(self, arg_list):\n\n if self._meta.ignore_unknown_arguments is True:\n args, unknown = self.parse_known_args(arg_list)\n self.parsed_args = args\n self.unknown_args = unknown\n else:\n args = self.parse_args(arg_list)\n self.parsed_args = args\n return self.parsed_args",
"def parse_unknown_args(args):\n retval = {}\n preceded_by_key = False\n for arg in args:\n if arg.startswith('--'):\n if '=' in arg:\n key = arg.split('=')[0][2:]\n value = arg.split('=')[1]\n retval[key] = value\n else:\n key = arg[2:]\n preceded_by_key = True\n elif preceded_by_key:\n retval[key] = arg\n preceded_by_key = False\n\n return retval",
"def args_to_params(args: list) -> dict:\n found = {}\n\n # Setup the dictionary identifying the parameters\n found['sensor'] = args.sensor\n found['filename'] = args.filename\n found['working_space'] = args.working_space\n if args.userid:\n found['userid'] = args.userid\n\n # Note: Return an empty dict if we're missing mandatory parameters\n return found",
"def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)",
"def process_cli_config_args(config_args:List[str]) -> Dict:\n # assert len(config_args) % 3 == 0, \\\n # \"You should pass config args in [--config.arg_name arg_value arg_type] format\"\n assert len(config_args) % 2 == 0, \\\n \"You should pass config args in [--config.arg_name arg_value] format\"\n arg_names = [config_args[i] for i in range(0, len(config_args), 2)]\n arg_values = [config_args[i] for i in range(1, len(config_args), 2)]\n\n result = {}\n\n for name, value in zip(arg_names, arg_values):\n assert name.startswith(CONFIG_ARG_PREFIX), \\\n f\"Argument {name} is unkown and does not start with `config.` prefix. Cannot parse it.\"\n\n result[name[len(CONFIG_ARG_PREFIX):]] = infer_type_and_convert(value)\n\n return result",
"def parse_args(args):\n if len(args) == 1:\n return {}",
"def parse_args(argparser_args):\n return {k: v for k, v in vars(argparser_args).items() if v is not None}",
"def parse_args():\n # Argument objects\n argument_objects = [\n FindInterfaceArg(),\n InterfaceArg(),\n NaughtyCountArg(),\n FirewallArg(),\n ModelTypeArg(),\n LogArg(),\n ]\n\n # Create the parser and parse the args\n parser = create_parser(argument_objects)\n parsed_args = parser.parse_args()\n options = {}\n\n # Parse all of the options\n for obj in argument_objects:\n if not obj.process_argument(parsed_args, options):\n parser.print_usage()\n exit()\n\n return options",
"def parseArgs(arguments=None):\n\tparser = generateParser(None)\n\treturn parser.parse_known_args(arguments)"
] | [
"0.75726336",
"0.7397223",
"0.7325528",
"0.71652824",
"0.7037274",
"0.6999478",
"0.6991102",
"0.69558775",
"0.69498295",
"0.69380295",
"0.6924944",
"0.69120455",
"0.6848294",
"0.6756323",
"0.6723195",
"0.66975105",
"0.6656867",
"0.6617072",
"0.6584447",
"0.6567549",
"0.65363634",
"0.6525429",
"0.6501903",
"0.6501504",
"0.6482272",
"0.6439208",
"0.64390683",
"0.64336765",
"0.6341237",
"0.6329849"
] | 0.85187536 | 0 |
Parse a list of directives into a dictionary where the key is the name of the directive and the value is the directive itself.o | def parse_directives(schema_directives: List[Dict]) -> Dict[str, Directive]:
result = {}
for schema_directive in schema_directives:
new_directive = Directive(schema_directive)
result[new_directive.name] = new_directive
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def directives():\n cmd = \"{} -L\".format(_detect_os())\n ret = {}\n out = __salt__[\"cmd.run\"](cmd)\n out = out.replace(\"\\n\\t\", \"\\t\")\n for line in out.splitlines():\n if not line:\n continue\n comps = line.split(\"\\t\")\n desc = \"\\n\".join(comps[1:])\n ret[comps[0]] = desc\n return ret",
"def directives(self, directive):\n signature_regex = compile(\"^\\w+:[\\w\\.]+:\\d+:[\\w\\.]+:[\\w/]+$\")\n\n if directive is None:\n raise ValueError(\"A directive name must be given.\")\n if not isinstance(directive, dict):\n raise TypeError(\"The directive name must be a dictionary, not %s.\" % (type(directive)))\n if 'signature' not in directive.keys():\n raise ValueError(\"A directive is expected to have a 'signature'.\")\n if not isinstance(directive['signature'], str):\n raise TypeError(\"The signature is expected as a string, not %s.\" % (type(directive['signature'])))\n if not signature_regex.match(directive['signature']):\n raise ValueError(\"A signature must have the following format: 'alias:ip:port:server_name:location'\")\n\n if directive not in self._directives:\n self._directives.append(directive)\n\n self._build()",
"def directives(self, directive):\n signature_regex = compile(\"^\\w+:[\\w\\.]+:\\d+:[\\w\\.]+:[\\w/]+$\")\n\n if directive is None:\n raise ValueError(\"A directive name must be given.\")\n if not isinstance(directive, dict):\n raise TypeError(\"The directive name must be a dictionary, not %s.\" % (type(directive)))\n if 'signature' not in directive.keys():\n raise ValueError(\"A directive is expected to have a 'signature'.\")\n if not isinstance(directive['signature'], str):\n raise TypeError(\"The signature is expected as a string, not %s.\" % (type(directive['signature'])))\n if not signature_regex.match(directive['signature']):\n raise ValueError(\"A signature must have the following format: 'alias:ip:port:server_name:location'\")\n\n if directive not in self._directives:\n self._directives.append(directive)\n\n self._build()",
"def parse_directive(line):\n composite = list()\n pointer = line.find(\"#\")\n composite.append(line[0: pointer])\n composite.append(line[pointer + 1: len(line) - 1])\n return composite",
"def get_definitions(wlist):\n ddict = {}\n for word in wlist:\n text = get_def_page(word)\n defs = extract_defs(text)\n ddict[word] = defs\n return ddict",
"def directives(self):\n return self._directives",
"def directives(self):\n return self._directives",
"def parse_cache_control(\n header_value: str) -> dict[str, str | int | bool | None]:\n directives: dict[str, str | int | bool | None] = {}\n\n for segment in parse_list(header_value):\n name, sep, value = segment.partition('=')\n if sep != '=':\n directives[name] = None\n elif sep and value:\n value = _dequote(value.strip())\n try:\n directives[name] = int(value)\n except ValueError:\n directives[name] = value\n # NB ``name='' is never valid and is ignored!\n\n # convert parameterless boolean directives\n for name in _CACHE_CONTROL_BOOL_DIRECTIVES:\n if directives.get(name, '') is None:\n directives[name] = True\n\n return directives",
"def _convert_tags_to_dict(text_list_tags):\n return OrderedDict([re.findall(r\"\"\"\\s*_(\\w+)\\s+(.+?)\\s*$\"\"\", row)[0] for row in text_list_tags])",
"def _parse(self, content):\n os.environ['ASTER_VERSION_DIR'] = self.dirn\n cfg = {}\n self._content = content\n for l in split_endlines(self._content):\n if not re.search('^[ ]*#', l):\n try:\n typ, nam, ver, val = l.split('|')\n #print '========>', typ, '//', nam, '//', ver, '//', val\n typ = re.sub('^[ ]*', '', re.sub('[ ]*$', '', typ)).strip()\n val = re.sub('^[ ]*', '', re.sub('[ ]*$', '', val)).strip()\n if val != '':\n val = osp.expandvars(val)\n if cfg.has_key(typ):\n cfg[typ].append(val)\n else:\n cfg[typ] = [val]\n except ValueError:\n pass\n return cfg",
"def parse_vars(items):\n return dict((parse_var(item) for item in items))",
"def find_regions(directives):\n regions = {}\n for directive in directives:\n if directive.startswith(\"sequence-region\"):\n try:\n _, accession, start, end = directive.split(\" \")\n regions[accession] = (int(start), int(end))\n except ValueError:\n # likely sequence-region without coordinates\n pass\n return regions",
"def parse_list(constant_list):\n\n values = dict()\n descriptions = dict()\n for (key, value, desc) in constant_list:\n values[key] = value\n descriptions[value] = desc\n return (values, descriptions)",
"def export_commentary_text_as_dictionary(commentary_parts_list):\n verse_string = str(commentary_parts_list[0])\n header_string = str(commentary_parts_list[1])\n \n verse = re.search(r\"\\[(\\d+)\\]\", verse_string).group(1)\n header = re.search(r'\\<u\\>\\s*\"(.+)\"\\s*\\<\\/u\\>', header_string).group(1)\n\n commentary_text = commentary_parts_list[2].replace(\": \", \"\")\n key = verse + \"__\" + header\n \n return key, commentary_text.strip()",
"def GetDirective(item,directive):\n pat=re.compile(' '+directive + '[\\s= ]*([\\S, ]*)\\n')\n m=pat.search(item)\n if m:\n return m.group(1)",
"def to_dict(tags: list):\n result = {}\n for tag in tags:\n result[tag.name] = tag.get_text()\n return result",
"def makeGcauCfgDictFromAgc(lineList): \r\n diction = {}\r\n withinCfgData = False\r\n for eachString in lineList:\r\n if re.match(RE_COMPILED_CFG_START, eachString):\r\n withinCfgData = True\r\n elif re.match(RE_COMPILED_CFG_END, eachString):\r\n withinCfgData = False\r\n elif withinCfgData:\r\n p = re.match(RE_COMPILED_CFG_ITEM, eachString)\r\n if p:\r\n obj = p.groups()[0]\r\n attr = p.groups()[1]\r\n val = p.groups()[2]\r\n if obj not in diction:\r\n diction[obj] = {}\r\n diction[obj][attr] = val\r\n return diction",
"def _process_directives(self, db):\n term = Term('_directive')\n directive_node = db.find(term)\n if directive_node is not None:\n directives = db.get_node(directive_node).children\n\n gp = LogicFormula()\n while directives:\n current = directives.pop(0)\n self.execute(current, database=db, context=self.create_context((), define=None),\n target=gp)\n return True",
"def _parse_tags(tags: str):\n return dict(item.split(\":\") for item in shlex.split(tags)) # type: ignore",
"def crm2dict(conf_list=None):\n if conf_list is None:\n conf_list=configure_parse()\n conf_dict=dict(conf_list)\n results={}\n groupkeys = getkeys(conf_dict, 'group')\n primitivekeys = getkeys(conf_dict, 'primitive')\n for gk in groupkeys:\n results.setdefault(gk.split()[1], {})\n locationkeys = getkeys(conf_dict, 'location')\n for key in conf_dict.keys():\n conf_type, tag = key.split()\n if conf_type == 'group':\n members=[x for x in conf_dict[key] if not (x.startswith('target-role') or x == 'meta')]\n results[tag].update({'members' : members })\n elif conf_type == 'location':\n service_name, loc=parse_tag(tag)\n balancer = conf_dict[key][2]\n if service_name not in results.keys():\n results.setdefault(service_name, {'loadbalancers' : {loc:balancer}})\n elif 'loadbalancers' not in results[service_name].keys():\n results[service_name].update({'loadbalancers' : {loc:balancer}})\n else:\n results[service_name]['loadbalancers'].update({loc:balancer})\n elif conf_type == 'primitive':\n service_name, service_type = parse_tag(tag)\n if service_type == 'ld':\n results[service_name].update({'type' : 'ldirectord'})\n elif service_type[:2] == 'ip':\n params = conf_dict[key]\n parsed_params={}\n for param in params:\n if param[:3] == 'ip=':\n parsed_params.setdefault('ip', param[4:-1])\n elif param[:13] == 'cidr_netmask=':\n parsed_params.setdefault('cidr_netmask', param[14:-1])\n elif param[:4] == 'nic=':\n parsed_params.setdefault('nic', param[5:-1])\n if 'ips' not in results[service_name].keys():\n results[service_name].update({'ips' : [haipstr(parsed_params)]})\n else:\n results[service_name]['ips'].append(haipstr(parsed_params))\n return results",
"def _parse_comments(self, tokens: TokenIterator):\n metadata = {}\n while tokens.peek().type == 'COMMENT':\n comment = tokens.next().text\n while comment:\n comment, found, meta = comment.rpartition('::')\n if found:\n key, _, value = meta.partition(' ')\n metadata[key] = value.rstrip()\n return metadata",
"def produce_parse_duckduckgo(self, value_list:list) -> dict:\n value_html, value_link = value_list\n value_dict = {'search': value_link, 'names': [], 'links': []}\n if len(value_html) < 1000:\n return value_dict\n soup = BeautifulSoup(value_html, 'html.parser')\n soup = soup.find('div', id='links')\n soup = soup.find_all('h2')\n value_name = [f.text.strip() for f in soup]\n value_link = [f.find('a').get('href', '') for f in soup]\n if '//duckduckgo.com/l/?uddg=' in value_link[0]:\n value_link = [urllib.parse.urlparse(f) for f in value_link]\n value_link = [urllib.parse.parse_qs(f.query).get('uddg', '')[0] for f in value_link]\n value_dict['names'] = value_name\n value_dict['links'] = value_link\n return value_dict",
"def potcar_str2dict(potcar_list: Optional[str]) -> dict:\n if potcar_list is None:\n return {}\n elif isinstance(potcar_list, str):\n potcar_list = potcar_list.split()\\\n\n d = {}\n for p in potcar_list:\n element = p.split(\"_\")[0]\n d[element] = p\n return d",
"def list(file_path):\n output = utils.run_process(['mdls', file_path])\n # get metadata into list, allowing for nested attributes\n md = [[y.strip()\n for y in line.split('=')]\n for line in output]\n # iterate over list to deal with nested attributes\n # then build dictionary\n listed_item, md_dict = [], {}\n for item in md:\n # item is pair\n if len(item) == 2:\n k, v = item\n # if second item is parens, then first is key\n if v == '(':\n listed_key = utils.clean_attribute(k)\n # else, it's a simple `key: value` pair\n else:\n # attempt to convert to `int`\n try:\n val = int(v)\n except (ValueError, TypeError):\n val = v.replace('\"', '')\n # convert shell nulls to Python `None`\n if val in ('\"\"', '(null)'):\n val = None\n key = utils.clean_attribute(k)\n md_dict[key] = val\n # single item is part of a nested attribute\n elif len(item) == 1 and item[0] != ')':\n value = item[0].replace('\"', '')\n listed_item.append(value)\n # single item marks end of a nested attribute\n elif len(item) == 1 and item[0] == ')':\n md_dict[listed_key] = listed_item\n listed_item = []\n return md_dict",
"def register_based_directives():\n if not BASED_LIB_RST:\n return\n\n if \"directives\" in BASED_LIB_RST:\n for dir_name, dir_cls_str in BASED_LIB_RST[\"directives\"].items():\n class_ = import_string(dir_cls_str)\n directives.register_directive(dir_name, class_)",
"def parse(self):\n try:\n self.match_value(Operator, \"#\")\n\n # Check for a match against known directives\n candidates = [self.define, self.undef, self.include, self.ifdef,\n self.ifndef, self.if_, self.elif_, self.else_, self.endif, self.pragma]\n for f in candidates:\n try:\n directive = f()\n if not self.eol():\n log.warning(\"Additional tokens at end of preprocessor directive\")\n return directive\n except ParseError:\n pass\n\n # Any other line beginning with '#' is a preprocessor\n # directive, we just don't handle it (yet). Suppress\n # warnings for common directives that shouldn't impact\n # correctness.\n common_unhandled = [\"line\", \"warning\", \"error\"]\n if len(self.tokens) > 2 and str(self.tokens[1]) not in common_unhandled:\n log.warning(\"Unrecognized directive\")\n return UnrecognizedDirectiveNode(self.tokens)\n except ParseError:\n raise ParseError(\"Not a directive.\")",
"def get_commands_dict() -> dict:\n commands_dict = {}\n f = open(f\"data/metadata/commands.dict.txt\", \"r\", encoding=\"utf-8\").read()\n for command in f.split(\"\\n\"):\n commands_dict[command.split(\":\")[0]] = command.split(\":\")[1]\n return commands_dict",
"def process(path, name):\n d = {}\n path = path / name\n with open(path.as_posix()) as fd:\n file_contents = fd.read()\n module = ast.parse(file_contents)\n docstring = ast.get_docstring(module)\n docstring_line = get_value(docstring)\n d['name'] = name\n if docstring_line:\n d['docstring'] = docstring_line\n else:\n d['docstring'] = 'No docstring provided.'\n return d",
"def _parse_single_definition(unparsedDefinition):\r\n parsed = {'definition': unparsedDefinition['difino']}\r\n parsed['subdefinitions'] = [\r\n _parse_subdefinitions(subdefinition)\r\n for subdefinition in unparsedDefinition['pludifinoj']\r\n ]\r\n \r\n parsed['examples'] = [\r\n {'example': example['ekzemplo']}\r\n for example in unparsedDefinition['ekzemploj']\r\n ]\r\n return parsed",
"def parse(self, content):\n self._sections = {}\n self._filters = []\n section = None\n\n def error(msg):\n print('autodl.cfg: line {}: {}'.format(i + 1, msg))\n # log('autodl.cfg: line {}: {}'.format(i + 1, msg))\n\n first_prog = re.compile(ur'^\\[\\s*([\\w\\-]+)\\s*(?:([^\\]]+))?\\s*]$')\n second_prog = re.compile(ur'^([\\w\\-]+)\\s*=(.*)$')\n lines = content['data'].split('\\n')\n for line in lines:\n i = 0\n line = line.strip()\n if line == '':\n continue\n\n first_array = first_prog.match(line)\n second_array = second_prog.match(line)\n if line[0] == '#':\n if section:\n section.add_comment(line)\n elif first_array:\n _type = first_array.group(1).strip().lower()\n try:\n _name = first_array.group(2).strip().lower()\n except AttributeError:\n _name = None\n section = self.get_section(_type, _name)\n elif second_array:\n if section is None:\n error('Missing a [section]')\n else:\n _option = second_array.group(1).strip().lower()\n _value = second_array.group(2).strip().lower()\n section.add_option(_option, _value)\n else:\n error('Ignoring line')\n i += 1"
] | [
"0.6590887",
"0.60470355",
"0.60470355",
"0.58491236",
"0.5815651",
"0.57697064",
"0.57697064",
"0.5671237",
"0.5338338",
"0.52868664",
"0.52477413",
"0.5196038",
"0.51685214",
"0.51405686",
"0.5023584",
"0.49965042",
"0.4984144",
"0.49639156",
"0.4882775",
"0.4877682",
"0.48603687",
"0.48333287",
"0.48227295",
"0.48154616",
"0.4812607",
"0.47794577",
"0.47505942",
"0.47324657",
"0.4712543",
"0.47108516"
] | 0.77479357 | 0 |
Given a Unity Environment and a QNetwork, this method will generate a buffer of Experiences obtained by running the Environment with the Policy derived from the QNetwork. | def generate_trajectories(
env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float
):
# Create an empty Buffer
buffer: Buffer = []
# Reset the environment
env.reset()
# Read and store the Behavior Name of the Environment
behavior_name = list(env.behavior_specs)[0]
# Read and store the Behavior Specs of the Environment
spec = env.behavior_specs[behavior_name]
# Create a Mapping from AgentId to Trajectories. This will help us create
# trajectories for each Agents
dict_trajectories_from_agent: Dict[int, Trajectory] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_obs_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_action_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to cumulative reward (Only for reporting)
dict_cumulative_reward_from_agent: Dict[int, float] = {}
# Create a list to store the cumulative rewards obtained so far
cumulative_rewards: List[float] = []
while len(buffer) < buffer_size: # While not enough data in the buffer
# Get the Decision Steps and Terminal Steps of the Agents
decision_steps, terminal_steps = env.get_steps(behavior_name)
# For all Agents with a Terminal Step:
for agent_id_terminated in terminal_steps:
# Create its last experience (is last because the Agent terminated)
last_experience = Experience(
obs=dict_last_obs_from_agent[agent_id_terminated].copy(),
reward=terminal_steps[agent_id_terminated].reward,
done=not terminal_steps[agent_id_terminated].interrupted,
action=dict_last_action_from_agent[agent_id_terminated].copy(),
next_obs=terminal_steps[agent_id_terminated].obs[0],
)
# Clear its last observation and action (Since the trajectory is over)
dict_last_obs_from_agent.pop(agent_id_terminated)
dict_last_action_from_agent.pop(agent_id_terminated)
# Report the cumulative reward
cumulative_reward = (
dict_cumulative_reward_from_agent.pop(agent_id_terminated)
+ terminal_steps[agent_id_terminated].reward
)
cumulative_rewards.append(cumulative_reward)
# Add the Trajectory and the last experience to the buffer
buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated))
buffer.append(last_experience)
# For all Agents with a Decision Step:
for agent_id_decisions in decision_steps:
# If the Agent does not have a Trajectory, create an empty one
if agent_id_decisions not in dict_trajectories_from_agent:
dict_trajectories_from_agent[agent_id_decisions] = []
dict_cumulative_reward_from_agent[agent_id_decisions] = 0
# If the Agent requesting a decision has a "last observation"
if agent_id_decisions in dict_last_obs_from_agent:
# Create an Experience from the last observation and the Decision Step
exp = Experience(
obs=dict_last_obs_from_agent[agent_id_decisions].copy(),
reward=decision_steps[agent_id_decisions].reward,
done=False,
action=dict_last_action_from_agent[agent_id_decisions].copy(),
next_obs=decision_steps[agent_id_decisions].obs[0],
)
# Update the Trajectory of the Agent and its cumulative reward
dict_trajectories_from_agent[agent_id_decisions].append(exp)
dict_cumulative_reward_from_agent[agent_id_decisions] += (
decision_steps[agent_id_decisions].reward
)
# Store the observation as the new "last observation"
dict_last_obs_from_agent[agent_id_decisions] = (
decision_steps[agent_id_decisions].obs[0]
)
# Generate an action for all the Agents that requested a decision
# Compute the values for each action given the observation
actions_values = (
q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy()
)
# Pick the best action using argmax
print("ACTION VALS", actions_values)
actions_values += epsilon * (
np.random.randn(actions_values.shape[0], actions_values.shape[1])
).astype(np.float32)
actions = np.argmax(actions_values, axis=1)
actions.resize((len(decision_steps), 1))
# Store the action that was picked, it will be put in the trajectory later
for agent_index, agent_id in enumerate(decision_steps.agent_id):
dict_last_action_from_agent[agent_id] = actions[agent_index]
# Set the actions in the environment
# Unity Environments expect ActionTuple instances.
action_tuple = ActionTuple()
action_tuple.add_discrete(actions)
env.set_actions(behavior_name, action_tuple)
# Perform a step in the simulation
env.step()
return buffer, np.mean(cumulative_rewards) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def collect_experiences(self):\n for i in range(self.num_frames_per_proc):\n # Do one agent-environment interaction\n\n preprocessed_obs0 = self.preprocess_obss(self.obs0, device=self.device)\n \n preprocessed_obs1 = self.preprocess_obss(self.obs1, device=self.device)\n \n with torch.no_grad():\n \n model_results0 = self.acmodel0(preprocessed_obs1, self.memory0 * self.mask0.unsqueeze(1)) ### NOTE\n \n dist0 = model_results0['dist'] ### NOTE\n value0 = model_results0['value']\n memory0 = model_results0['memory']\n msg0 = model_results0['message']\n dists_speaker0 = model_results0['dists_speaker']\n extra_predictions0 = model_results0['extra_predictions']\n #self.rng_states0[i] = model_results0['rng_states']\n #if torch.cuda.is_available():\n # self.cuda_rng_states0[i] = model_results0['cuda_rng_states']\n \n preprocessed_obs0.instr *= 0\n preprocessed_obs0.image *= 0\n model_results1 = self.acmodel1(preprocessed_obs0, self.memory1 * self.mask1.unsqueeze(1), msg=(msg0.transpose(0, 1) * self.mask1.unsqueeze(1).unsqueeze(2)).transpose(0, 1)) ### NOTE\n \n dist1 = model_results1['dist']\n value1 = model_results1['value']\n memory1 = model_results1['memory']\n msg1 = model_results1['message']\n dists_speaker1 = model_results1['dists_speaker']\n extra_predictions1 = model_results1['extra_predictions']\n #self.rng_states1[i] = model_results1['rng_states']\n #if torch.cuda.is_available():\n # self.cuda_rng_states1[i] = model_results1['cuda_rng_states']\n \n #state = torch.get_rng_state()\n action0 = dist0.sample()\n \n #torch.set_rng_state(state)\n action1 = dist1.sample()\n\n obs0, reward0, done0, env_info0 = self.env0.step(action0.cpu().numpy())\n \n obs1, reward1, done1, env_info1 = self.env1.step(action1.cpu().numpy())\n \n # mask any rewards based on (previous) been_done\n rewardos0 = [0] * self.num_procs\n rewardos1 = [0] * self.num_procs\n for j in range(self.num_procs):\n rewardos0[j] = reward0[j] * (1 - self.been_done0[j].item())\n rewardos1[j] = reward1[j] * (1 - self.been_done1[j].item())\n \n reward0 = tuple(rewardos0)\n reward1 = tuple(rewardos1)\n \n #reward0 = tuple(0.5*r0 + 0.5*r1 for r0, r1 in zip(reward0, reward1)) ### NOTE\n #reward1 = reward0\n \n # reward sender agent (0) equally for success of receiver agent (1) ### NOTE\n reward0 = reward1\n \n self.been_done0 = (1 - (1 - self.been_done0) * (1 - torch.tensor(done0, device=self.device, dtype=torch.float)))\n self.been_done1 = (1 - (1 - self.been_done1) * (1 - torch.tensor(done1, device=self.device, dtype=torch.float)))\n both_done = self.been_done0 * self.been_done1\n \n # reset if receiver agent (1) is done ### NOTE\n both_done = self.been_done1\n \n obs0 = self.env0.sync_reset(both_done, obs0)\n obs1 = self.env1.sync_reset(both_done, obs1)\n \n if self.aux_info:\n env_info0 = self.aux_info_collector0.process(env_info0)\n # env_info0 = self.process_aux_info0(env_info0)\n \n env_info1 = self.aux_info_collector1.process(env_info1)\n # env_info1 = self.process_aux_info1(env_info1)\n\n # Update experiences values\n\n self.obss0[i] = self.obs0\n self.obs0 = obs0\n \n self.obss1[i] = self.obs1\n self.obs1 = obs1\n\n self.memories0[i] = self.memory0\n self.memory0 = memory0\n \n self.memories1[i] = self.memory1\n self.memory1 = memory1\n \n self.msgs0[i] = self.msg0\n self.msg0 = msg0\n \n self.msgs1[i] = self.msg1\n self.msg1 = msg1\n \n self.msgs_out0[i] = msg0\n \n self.msgs_out1[i] = msg1\n\n self.masks0[i] = self.mask0\n #self.mask0 = 1 - torch.tensor(done0, device=self.device, dtype=torch.float)\n self.mask0 = 1 - both_done\n self.actions0[i] = action0\n self.values0[i] = value0\n if self.reshape_reward is not None:\n self.rewards0[i] = torch.tensor([\n self.reshape_reward(obs_, action_, reward_, done_)\n for obs_, action_, reward_, done_ in zip(obs0, action0, reward0, done0)\n ], device=self.device)\n else:\n self.rewards0[i] = torch.tensor(reward0, device=self.device)\n self.log_probs0[i] = dist0.log_prob(action0)\n self.speaker_log_probs0[i] = self.acmodel0.speaker_log_prob(dists_speaker0, msg0)\n \n self.masks1[i] = self.mask1\n #self.mask1 = 1 - torch.tensor(done1, device=self.device, dtype=torch.float)\n self.mask1 = 1 - both_done\n self.actions1[i] = action1\n self.values1[i] = value1\n if self.reshape_reward is not None:\n self.rewards1[i] = torch.tensor([\n self.reshape_reward(obs_, action_, reward_, done_)\n for obs_, action_, reward_, done_ in zip(obs1, action1, reward1, done1)\n ], device=self.device)\n else:\n self.rewards1[i] = torch.tensor(reward1, device=self.device)\n self.log_probs1[i] = dist1.log_prob(action1)\n self.speaker_log_probs1[i] = self.acmodel1.speaker_log_prob(dists_speaker1, msg1)\n\n if self.aux_info:\n self.aux_info_collector0.fill_dictionaries(i, env_info0, extra_predictions0)\n \n self.aux_info_collector1.fill_dictionaries(i, env_info1, extra_predictions1)\n\n # Update log values\n\n self.log_episode_return0 += torch.tensor(reward0, device=self.device, dtype=torch.float)\n self.log_episode_reshaped_return0 += self.rewards0[i]\n \n self.log_episode_return1 += torch.tensor(reward1, device=self.device, dtype=torch.float)\n self.log_episode_reshaped_return1 += self.rewards1[i]\n \n self.log_episode_num_frames0 += torch.ones(self.num_procs, device=self.device)\n self.log_episode_num_frames1 += torch.ones(self.num_procs, device=self.device)\n \n #for i, done_ in enumerate(done0):\n for i in range(self.num_procs):\n #if done_:\n if both_done[i]:\n self.log_done_counter0 += 1\n self.log_return0.append(self.log_episode_return0[i].item())\n self.log_reshaped_return0.append(self.log_episode_reshaped_return0[i].item())\n self.log_num_frames0.append(self.log_episode_num_frames0[i].item())\n \n #for i, done_ in enumerate(done1):\n #if done_:\n self.log_done_counter1 += 1\n self.log_return1.append(self.log_episode_return1[i].item())\n self.log_reshaped_return1.append(self.log_episode_reshaped_return1[i].item())\n self.log_num_frames1.append(self.log_episode_num_frames1[i].item())\n\n # if both are done, reset both to not done\n self.been_done0 *= (1 - both_done)\n self.been_done1 *= (1 - both_done)\n\n self.log_episode_return0 *= self.mask0\n self.log_episode_reshaped_return0 *= self.mask0\n self.log_episode_num_frames0 *= self.mask0\n\n self.log_episode_return1 *= self.mask1\n self.log_episode_reshaped_return1 *= self.mask1\n self.log_episode_num_frames1 *= self.mask1\n\n # Add advantage and return to experiences\n\n preprocessed_obs0 = self.preprocess_obss(self.obs0, device=self.device)\n preprocessed_obs1 = self.preprocess_obss(self.obs1, device=self.device)\n \n with torch.no_grad():\n tmp = self.acmodel0(preprocessed_obs1, self.memory0 * self.mask0.unsqueeze(1)) ### NOTE\n next_value0 = tmp['value']\n \n preprocessed_obs0.instr *= 0\n preprocessed_obs0.image *= 0\n next_value1 = self.acmodel1(preprocessed_obs0, self.memory1 * self.mask1.unsqueeze(1), msg=(tmp['message'].transpose(0, 1) * self.mask1.unsqueeze(1).unsqueeze(2)).transpose(0, 1))['value'] ### NOTE\n\n for i in reversed(range(self.num_frames_per_proc)):\n next_mask0 = self.masks0[i+1] if i < self.num_frames_per_proc - 1 else self.mask0\n next_value0 = self.values0[i+1] if i < self.num_frames_per_proc - 1 else next_value0\n next_advantage0 = self.advantages0[i+1] if i < self.num_frames_per_proc - 1 else 0\n \n next_mask1 = self.masks1[i+1] if i < self.num_frames_per_proc - 1 else self.mask1\n next_value1 = self.values1[i+1] if i < self.num_frames_per_proc - 1 else next_value1\n next_advantage1 = self.advantages1[i+1] if i < self.num_frames_per_proc - 1 else 0\n\n delta0 = self.rewards0[i] + self.discount * next_value0 * next_mask0 - self.values0[i]\n self.advantages0[i] = delta0 + self.discount * self.gae_lambda * next_advantage0 * next_mask0\n \n delta1 = self.rewards1[i] + self.discount * next_value1 * next_mask1 - self.values1[i]\n self.advantages1[i] = delta1 + self.discount * self.gae_lambda * next_advantage1 * next_mask1\n\n # Flatten the data correctly, making sure that\n # each episode's data is a continuous chunk\n\n exps0 = DictList()\n exps0.obs = [self.obss0[i][j]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n \n exps1 = DictList()\n exps1.obs = [self.obss1[i][j]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n \n # In commments below T is self.num_frames_per_proc, P is self.num_procs,\n # D is the dimensionality\n\n # T x P x D -> P x T x D -> (P * T) x D\n exps0.memory = self.memories0.transpose(0, 1).reshape(-1, *self.memories0.shape[2:])\n \n exps1.memory = self.memories1.transpose(0, 1).reshape(-1, *self.memories1.shape[2:])\n \n exps0.message = self.msgs0.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel0.max_len_msg, self.acmodel0.num_symbols)\n \n exps1.message = self.msgs1.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel1.max_len_msg, self.acmodel1.num_symbols)\n \n exps0.message_out = self.msgs_out0.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel0.max_len_msg, self.acmodel0.num_symbols)\n \n exps1.message_out = self.msgs_out1.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel1.max_len_msg, self.acmodel1.num_symbols)\n \n #exps0.rng_states = self.rng_states0.transpose(0, 1).reshape(-1, *self.rng_states0.shape[2:])\n #if torch.cuda.is_available():\n # exps0.cuda_rng_states = self.cuda_rng_states0.transpose(0, 1).reshape(-1, *self.cuda_rng_states0.shape[2:])\n \n #exps1.rng_states = self.rng_states1.transpose(0, 1).reshape(-1, *self.rng_states1.shape[2:])\n #if torch.cuda.is_available():\n # exps1.cuda_rng_states = self.cuda_rng_states1.transpose(0, 1).reshape(-1, *self.cuda_rng_states1.shape[2:])\n \n # T x P -> P x T -> (P * T) x 1\n exps0.mask = self.masks0.transpose(0, 1).reshape(-1).unsqueeze(1)\n \n exps1.mask = self.masks1.transpose(0, 1).reshape(-1).unsqueeze(1)\n\n # for all tensors below, T x P -> P x T -> P * T\n exps0.action = self.actions0.transpose(0, 1).reshape(-1)\n exps0.value = self.values0.transpose(0, 1).reshape(-1)\n exps0.reward = self.rewards0.transpose(0, 1).reshape(-1)\n exps0.advantage = self.advantages0.transpose(0, 1).reshape(-1)\n exps0.returnn = exps0.value + exps0.advantage\n exps0.log_prob = self.log_probs0.transpose(0, 1).reshape(-1)\n exps0.speaker_log_prob = self.speaker_log_probs0.transpose(0, 1).reshape(-1)\n \n exps1.action = self.actions1.transpose(0, 1).reshape(-1)\n exps1.value = self.values1.transpose(0, 1).reshape(-1)\n exps1.reward = self.rewards1.transpose(0, 1).reshape(-1)\n exps1.advantage = self.advantages1.transpose(0, 1).reshape(-1)\n exps1.returnn = exps1.value + exps1.advantage\n exps1.log_prob = self.log_probs1.transpose(0, 1).reshape(-1)\n exps1.speaker_log_prob = self.speaker_log_probs1.transpose(0, 1).reshape(-1)\n\n if self.aux_info:\n exps0 = self.aux_info_collector0.end_collection(exps0)\n \n exps1 = self.aux_info_collector1.end_collection(exps1)\n\n # Preprocess experiences\n\n exps0.obs = self.preprocess_obss(exps0.obs, device=self.device)\n\n exps1.obs = self.preprocess_obss(exps1.obs, device=self.device)\n\n # Log some values\n\n keep0 = max(self.log_done_counter0, self.num_procs)\n\n keep1 = max(self.log_done_counter1, self.num_procs)\n\n log0 = {\n \"return_per_episode\": self.log_return0[-keep0:],\n \"reshaped_return_per_episode\": self.log_reshaped_return0[-keep0:],\n \"num_frames_per_episode\": self.log_num_frames0[-keep0:],\n \"num_frames\": self.num_frames,\n \"episodes_done\": self.log_done_counter0,\n }\n\n log1 = {\n \"return_per_episode\": self.log_return1[-keep1:],\n \"reshaped_return_per_episode\": self.log_reshaped_return1[-keep1:],\n \"num_frames_per_episode\": self.log_num_frames1[-keep1:],\n \"num_frames\": self.num_frames,\n \"episodes_done\": self.log_done_counter1,\n }\n\n self.log_done_counter0 = 0\n self.log_return0 = self.log_return0[-self.num_procs:]\n self.log_reshaped_return0 = self.log_reshaped_return0[-self.num_procs:]\n self.log_num_frames0 = self.log_num_frames0[-self.num_procs:]\n\n self.log_done_counter1 = 0\n self.log_return1 = self.log_return1[-self.num_procs:]\n self.log_reshaped_return1 = self.log_reshaped_return1[-self.num_procs:]\n self.log_num_frames1 = self.log_num_frames1[-self.num_procs:]\n\n return exps0, log0, exps1, log1",
"def collect_experience(env_, agent_, size):\n env_.reset()\n state, reward, done, _ = env_.step(env_.action_space.sample())\n for data in range(size):\n action = env_.action_space.sample()\n next_state, reward, done, _ = env_.step(action)\n # penalize reward based on the position of the cart\n reward = max(0, reward * (1 - abs(next_state[0]/2.4)))\n if done:\n next_state = np.zeros(state.shape)\n # save experience in agent's memory\n agent_.remember((state, action, reward, next_state))\n env_.reset()\n state, reward, done, _ = env_.step(env.action_space.sample())\n else:\n # save experience in agent's memory\n agent_.remember((state, action, reward, next_state))\n state = next_state",
"def append(self, experience: Experience) -> None:\n self.buffer.append(experience)",
"def SendExperiences(self, request_iterator, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def explorer(global_rb, queue, trained_steps, n_transition,\n is_training_done, lock, env_fn, policy_fn,\n buffer_size=1024, max_transition=None,\n episode_max_steps=1000):\n env = env_fn()\n policy = policy_fn(env, \"Explorer\", global_rb.get_buffer_size())\n local_rb = ReplayBuffer(obs_shape=env.observation_space.shape,\n act_dim=env.action_space.low.size,\n size=buffer_size)\n\n s = env.reset()\n episode_steps = 0\n total_reward = 0.\n total_rewards = []\n start = time.time()\n sample_at_start = 0\n\n while not is_training_done.is_set():\n # Periodically copy weights of explorer\n if not queue.empty():\n actor_weights, critic_weights, critic_target_weights = queue.get()\n update_target_variables(policy.actor.weights, actor_weights, tau=1.)\n update_target_variables(policy.critic.weights, critic_weights, tau=1.)\n update_target_variables(policy.critic_target.weights, critic_target_weights, tau=1.)\n\n n_transition.value += 1\n episode_steps += 1\n a = policy.get_action(s)\n s_, r, done, _ = env.step(a)\n done_flag = done\n if episode_steps == env._max_episode_steps:\n done_flag = False\n total_reward += r\n local_rb.add(s, a, r, s_, done_flag)\n\n s = s_\n if done or episode_steps == episode_max_steps:\n s = env.reset()\n total_rewards.append(total_reward)\n total_reward = 0\n episode_steps = 0\n\n # Add collected experiences to global replay buffer\n if local_rb.get_stored_size() == buffer_size - 1:\n temp_n_transition = n_transition.value\n samples = local_rb.sample(local_rb.get_stored_size())\n states, next_states, actions, rewards, done = samples[\"obs\"], samples[\"next_obs\"], samples[\"act\"], samples[\"rew\"], samples[\"done\"]\n done = np.array(done, dtype=np.float64)\n td_errors = policy.compute_td_error(\n states, actions, next_states, rewards, done)\n print(\"Grad: {0: 6d}\\tSamples: {1: 7d}\\tTDErr: {2:.5f}\\tAveEpiRew: {3:.3f}\\tFPS: {4:.2f}\".format(\n trained_steps.value, n_transition.value, np.average(np.abs(td_errors).flatten()),\n sum(total_rewards) / len(total_rewards), (temp_n_transition - sample_at_start) / (time.time() - start)))\n total_rewards = []\n lock.acquire()\n global_rb.add(\n states, actions, rewards, next_states, done,\n priorities=np.abs(td_errors)+1e-6)\n lock.release()\n local_rb.clear()\n start = time.time()\n sample_at_start = n_transition.value\n\n if max_transition is not None and n_transition.value >= max_transition:\n is_training_done.set()",
"def accumulate_experience(teacher, exp_replay: Supervised_ExperienceReplay, config=student_config):\n\n env = gym.make(\"PongNoFrameskip-v4\")\n env = wrap_deepmind(env, frame_stack=True)\n steps = 0\n while 1:\n state = env.reset()\n state = np.asarray(state)\n done = False\n while not done:\n steps += 1\n teacher_q_value = teacher.get_q(state=np.reshape(state, (1, state.shape[0], state.shape[1], state.shape[2])))\n action = teacher.select_action(teacher_q_value)\n next_state, reward, done, _ = env.step(action + 1)\n next_state = np.asarray(next_state)\n exp_replay.add_memory(state, teacher_q_value, action) # feeding the experience replay\n state = next_state\n if steps > config.OBSERVE: # we have OBSERVE number of exp in exp_replay\n try:\n del env\n except ImportError:\n pass\n break",
"def sample(self):\n experiences = random.sample(self.memory, k=self.batch_size)\n \n states = torch.from_numpy(np.vstack([exp.state for exp in experiences if exp is not None])).float()#.to(deepQAgent.device)\n\n actions = torch.from_numpy(np.vstack([exp.action for exp in experiences if exp is not None])).long()#.to(deepQAgent.device)\n\n rewards = torch.from_numpy(np.vstack([exp.reward for exp in experiences if exp is not None])).float()#.to(deepQAgent.device)\n\n next_states = torch.from_numpy(np.vstack([exp.next_state for exp in experiences if exp is not None])).float()#.to(deepQAgent.device)\n\n\n return (states, actions, rewards, next_states)",
"def sample_trajectory(self, env, animate_this_episode, is_evaluation):\n # Using current task with meta inside\n env.reset_task(is_evaluation=is_evaluation)\n stats = []\n #====================================================================================#\n # ----------PROBLEM 2----------\n #====================================================================================#\n ep_steps = 0\n steps = 0\n\n num_samples = max(self.history, self.max_path_length + 1)\n meta_obs = np.zeros((num_samples + self.history + 1, self.meta_ob_dim))\n rewards = []\n\n while True:\n if animate_this_episode:\n env.render()\n time.sleep(0.1)\n\n if ep_steps == 0:\n ob = env.reset()\n # first meta ob has only the observation\n # set a, r, d to zero, construct first meta observation in meta_obs\n # YOUR CODE HERE\n ac = np.zeros(self.ac_dim); rew = np.zeros(self.reward_dim); done = np.zeros(self.terminal_dim)\n meta_obs[steps, :] = np.concatenate((ob, ac, rew, done))\n steps += 1\n\n # index into the meta_obs array to get the window that ends with the current timestep\n # please name the windowed observation `in_` for compatibilty with the code that adds to the replay buffer (lines 418, 420)\n # YOUR CODE HERE\n # padding for input obs size\n sample_action_in_ = meta_obs[steps-self.history:steps, :] if steps>=self.history else np.squeeze(np.concatenate(([meta_obs[0,:], ] * (self.history - steps), meta_obs[:steps, :]), axis=0))\n # need to clear hidden size, in order to avoid previous hidden state as it may be generated by the other totally different task (env setting may be changed)\n hidden = np.zeros((1, self.gru_size), dtype=np.float32)\n\n # get action from the policy\n # YOUR CODE HERE\n # Tensor(\"ob:0\", shape=(?, 1, 10), dtype=float32)\n # print(self.sy_ob_no)\n # Tensor(\"hidden:0\", shape=(?, 32), dtype=float32)\n # print(self.sy_hidden)\n ac = self.sess.run(self.sy_sampled_ac, feed_dict={\n self.sy_ob_no: sample_action_in_.reshape(-1, self.history, self.meta_ob_dim),\n self.sy_hidden: hidden,\n })\n assert len(ac) == 1\n ac = ac[0]\n\n # step the environment\n # YOUR CODE HERE\n ob, rew, done, _= env.step(ac)\n\n ep_steps += 1\n\n done = bool(done) or ep_steps == self.max_path_length\n # construct the meta-observation and add it to meta_obs\n # YOUR CODE HERE\n meta_obs[steps, :] = np.concatenate((ob, ac, [rew], [done]))\n\n rewards.append(rew)\n steps += 1\n\n in_ = meta_obs[steps, :]\n # add sample to replay buffer\n if is_evaluation:\n self.val_replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)\n else:\n self.replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)\n\n # start new episode\n if done:\n # compute stats over trajectory\n s = dict()\n s['rewards']= rewards[-ep_steps:]\n s['ep_len'] = ep_steps\n stats.append(s)\n ep_steps = 0\n\n if steps >= num_samples:\n break\n\n return steps, stats",
"def __init__(self, gym_env: gym.Env) -> None:\n super().__init__()\n self._queue: Queue = Queue()\n self._action_counter: int = 0\n self.gym_address = str(GYM_CONNECTION_PUBLIC_ID)\n self._agent = ProxyAgent(\n name=\"proxy\", gym_env=gym_env, proxy_env_queue=self._queue\n )\n self._agent_thread = Thread(target=self._agent.start)\n self._active_dialogue = None # type: Optional[GymDialogue]\n self.gym_skill = \"fetchai/gym:0.1.0\"\n self.gym_dialogues = GymDialogues(self.gym_skill, role_from_first_message)",
"def Collecting_experiences(self)-> None:\n for epoch_no in range(self.epochs):\n print(\"EPOCH %d\", epoch_no + 1)\n \n #beam_dqn = self.beam_min + int(self.beam_max * epoch_no/self.epochs)\n #egreed = self.egreed_max*(1 - epoch_no/(1.1*self.epochs))\n #self.gamma = self.gamma_max*(1 - epoch_no/(2*self.epochs))\n\n beam_dqn = 1\n egreed = 0.5\n #self.gamma = self.gamma_max\n self.gamma = 0.6\n\n self.tb_writer.add_scalar(\"parameters/beam_dqn\",\n beam_dqn, epoch_no)\n self.tb_writer.add_scalar(\"parameters/egreed\",\n egreed, epoch_no)\n self.tb_writer.add_scalar(\"parameters/gamma\",\n self.gamma, epoch_no)\n if beam_dqn > self.actions_size:\n print(\"The beam_dqn cannot exceed the action size!\")\n print(\"then the beam_dqn = action size\")\n beam_dqn = self.actions_size\n\n print(' beam_dqn, egreed, gamma: ', beam_dqn, egreed, self.gamma)\n for _, data_set in self.data_to_train_dqn.items():\n \n valid_iter = make_data_iter(\n dataset=data_set, batch_size=1, batch_type=self.batch_type,\n shuffle=False, train=False)\n #valid_sources_raw = data_set.src\n # disable dropout\n #self.model.eval()\n\n i_sample = 0\n for valid_batch in iter(valid_iter):\n freeze_model(self.model)\n batch = Batch(valid_batch\n , self.pad_index, use_cuda=self.use_cuda)\n \n encoder_output, encoder_hidden = self.model.encode(\n batch.src, batch.src_lengths,\n batch.src_mask)\n # if maximum output length is not globally specified, adapt to src len\n \n if self.max_output_length is None:\n self.max_output_length = int(max(batch.src_lengths.cpu().numpy()) * 1.5)\n \n batch_size = batch.src_mask.size(0)\n prev_y = batch.src_mask.new_full(size=[batch_size, 1], fill_value=self.bos_index,\n dtype=torch.long)\n output = []\n hidden = self.model.decoder._init_hidden(encoder_hidden)\n prev_att_vector = None\n finished = batch.src_mask.new_zeros((batch_size, 1)).byte()\n\n # print(\"Source_raw: \", batch.src)\n # print(\"Target_raw: \", batch.trg_input)\n # print(\"y0: \", prev_y)\n \n \n \n exp_list = []\n # pylint: disable=unused-variable\n for t in range(self.max_output_length):\n if t != 0:\n if self.state_type == 'hidden':\n state = torch.cat(hidden, dim=2).squeeze(1).detach().cpu().numpy()[0]\n else:\n if t == 0:\n state = hidden[0].squeeze(1).detach().cpu().numpy()[0]\n else:\n state = prev_att_vector.squeeze(1).detach().cpu().numpy()[0]\n \n \n # decode one single step\n logits, hidden, att_probs, prev_att_vector = self.model.decoder(\n encoder_output=encoder_output,\n encoder_hidden=encoder_hidden,\n src_mask=batch.src_mask,\n trg_embed=self.model.trg_embed(prev_y),\n hidden=hidden,\n prev_att_vector=prev_att_vector,\n unroll_steps=1)\n # logits: batch x time=1 x vocab (logits)\n if t != 0:\n if self.state_type == 'hidden':\n state_ = torch.cat(hidden, dim=2).squeeze(1).detach().cpu().numpy()[0]\n else:\n state_ = prev_att_vector.squeeze(1).detach().cpu().numpy()[0]\n \n # if t == 0:\n # print('states0: ', state, state_)\n\n # greedy decoding: choose arg max over vocabulary in each step with egreedy porbability\n \n if random.uniform(0, 1) < egreed:\n i_ran = random.randint(0,beam_dqn-1)\n next_word = torch.argsort(logits, descending=True)[:, :, i_ran]\n else:\n next_word = torch.argmax(logits, dim=-1) # batch x time=1\n # if t != 0:\n a = prev_y.squeeze(1).detach().cpu().numpy()[0]\n #a = next_word.squeeze(1).detach().cpu().numpy()[0]\n \n # print(\"state \",t,\" : \", state )\n # print(\"state_ \",t,\" : \", state_ )\n # print(\"action \",t,\" : \", a )\n # print(\"__________________________________________\")\n\n output.append(next_word.squeeze(1).detach().cpu().numpy())\n\n #tup = (self.memory_counter, state, a, state_)\n \n \n prev_y = next_word\n # check if previous symbol was <eos>\n is_eos = torch.eq(next_word, self.eos_index)\n finished += is_eos\n if t != 0:\n self.memory_counter += 1\n tup = (self.memory_counter, state, a, state_, 1)\n exp_list.append(tup)\n \n #print(t)\n # stop predicting if <eos> reached for all elements in batch\n if (finished >= 1).sum() == batch_size:\n a = next_word.squeeze(1).detach().cpu().numpy()[0]\n self.memory_counter += 1\n #tup = (self.memory_counter, state_, a, np.zeros([self.state_size]) , is_eos[0,0])\n tup = (self.memory_counter, state_, a, np.zeros([self.state_size]), 0)\n exp_list.append(tup)\n #print('break')\n break\n if t == self.max_output_length-1:\n #print(\"reach the max output\")\n a = 0\n self.memory_counter += 1\n #tup = (self.memory_counter, state_, a, np.zeros([self.state_size]) , is_eos[0,0])\n tup = (self.memory_counter, state_, a, -1*np.ones([self.state_size]), 1)\n exp_list.append(tup)\n \n \n \n \n #Collecting rewards\n hyp = np.stack(output, axis=1) # batch, time\n\n if epoch_no == 0:\n if i_sample == 0 or i_sample == 3 or i_sample == 6:\n #print(i_sample)\n r = self.Reward(batch.trg, hyp, show=True) # 1 , time-1 \n else:\n r = self.Reward(batch.trg, hyp, show=False) # 1 , time -1 \n else:\n #print(\"aaaa - \",i_sample)\n r = self.Reward(batch.trg, hyp, show=False) # 1 , time -1 \n \n # if i_sample == 0 or i_sample == 3 or i_sample == 6:\n # print(\"\\n Sample Collected: \", i_sample, \"-------------Target vs Eval_net prediction:--Raw---and---Decoded-----\")\n # print(\"Target: \", batch.trg, decoded_valid_out_trg)\n # print(\"Eval : \", stacked_output, decoded_valid_out)\n # print(\"Reward: \", r, \"\\n\")\n \n i_sample += 1\n self.store_transition(exp_list, r)\n \n #Learning.....\n if self.memory_counter > self.mem_cap - self.max_output_length:\n self.learn()\n \n self.tb_writer.close()",
"def add(self, experience):\n self.buffer.append(experience)",
"def eval(self) -> None:\n\n config = self.config.clone()\n\n if len(self.config.VIDEO_OPTION) > 0:\n config.defrost()\n config.NUM_ENVIRONMENTS = 1\n config.freeze()\n\n logger.info(f\"env config: {config}\")\n with construct_envs(config, get_env_class(config.ENV_NAME)) as envs:\n observations = envs.reset()\n batch = batch_obs(observations, device=self.device)\n\n current_episode_reward = torch.zeros(\n envs.num_envs, 1, device=self.device\n )\n ppo_cfg = self.config.RL.PPO\n test_recurrent_hidden_states = torch.zeros(\n config.NUM_ENVIRONMENTS,\n self.actor_critic.net.num_recurrent_layers,\n ppo_cfg.hidden_size,\n device=self.device,\n )\n prev_actions = torch.zeros(\n config.NUM_ENVIRONMENTS,\n 1,\n device=self.device,\n dtype=torch.long,\n )\n not_done_masks = torch.zeros(\n config.NUM_ENVIRONMENTS,\n 1,\n device=self.device,\n dtype=torch.bool,\n )\n\n rgb_frames = [\n [] for _ in range(self.config.NUM_ENVIRONMENTS)\n ] # type: List[List[np.ndarray]]\n\n if len(config.VIDEO_OPTION) > 0:\n os.makedirs(config.VIDEO_DIR, exist_ok=True)\n\n self.actor_critic.eval()\n\n for _i in range(config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS):\n current_episodes = envs.current_episodes()\n\n with torch.no_grad():\n (\n _,\n actions,\n _,\n test_recurrent_hidden_states,\n ) = self.actor_critic.act(\n batch,\n test_recurrent_hidden_states,\n prev_actions,\n not_done_masks,\n deterministic=False,\n )\n\n prev_actions.copy_(actions)\n\n outputs = envs.step([a[0].item() for a in actions])\n\n observations, rewards, dones, infos = [\n list(x) for x in zip(*outputs)\n ]\n batch = batch_obs(observations, device=self.device)\n\n not_done_masks = torch.tensor(\n [[not done] for done in dones],\n dtype=torch.bool,\n device=\"cpu\",\n )\n\n rewards = torch.tensor(\n rewards, dtype=torch.float, device=self.device\n ).unsqueeze(1)\n\n current_episode_reward += rewards\n\n # episode ended\n if not not_done_masks[0].item():\n generate_video(\n video_option=self.config.VIDEO_OPTION,\n video_dir=self.config.VIDEO_DIR,\n images=rgb_frames[0],\n episode_id=current_episodes[0].episode_id,\n checkpoint_idx=0,\n metrics=self._extract_scalars_from_info(infos[0]),\n tb_writer=None,\n )\n\n print(\"Evaluation Finished.\")\n print(\"Success: {}\".format(infos[0][\"episode_success\"]))\n print(\n \"Reward: {}\".format(current_episode_reward[0].item())\n )\n print(\n \"Distance To Goal: {}\".format(\n infos[0][\"object_to_goal_distance\"]\n )\n )\n\n return\n\n # episode continues\n elif len(self.config.VIDEO_OPTION) > 0:\n frame = observations_to_image(observations[0], infos[0])\n rgb_frames[0].append(frame)\n\n not_done_masks = not_done_masks.to(device=self.device)",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n return [a.state_action_table, a.reward_hist]",
"def __init__(self, env):\n gym.RewardWrapper.__init__(self, env)",
"def append(self, experience):\n self.buffer.append(experience)\n self.number += 1",
"def render_single(env, policy, render = False, n_episodes=100):\n total_rewards = 0\n for _ in range(n_episodes):\n ob = env.reset() # initialize the episode\n done = False\n while not done:\n if render:\n env.render() # render the game\n ############################\n # YOUR IMPLEMENTATION HERE #\n #env.step(np.where(policy[0]==1)[0].tolist()[0])\n agent_next_step=env.step(np.argmax(policy[ob,:]))\n ob=agent_next_step[0]\n reward= agent_next_step[1]\n done= agent_next_step[2]\n total_rewards+=reward\n if done:\n break\n return total_rewards",
"def __init__(self, env: CityLearnEnv, **kwargs: Any):\n\n super().__init__(env, **kwargs)\n\n # internally defined\n self.normalized = [False for _ in self.action_space]\n self.soft_q_criterion = nn.SmoothL1Loss()\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.replay_buffer = [ReplayBuffer(int(self.replay_buffer_capacity)) for _ in self.action_space]\n self.soft_q_net1 = [None for _ in self.action_space]\n self.soft_q_net2 = [None for _ in self.action_space]\n self.target_soft_q_net1 = [None for _ in self.action_space]\n self.target_soft_q_net2 = [None for _ in self.action_space]\n self.policy_net = [None for _ in self.action_space]\n self.soft_q_optimizer1 = [None for _ in self.action_space]\n self.soft_q_optimizer2 = [None for _ in self.action_space]\n self.policy_optimizer = [None for _ in self.action_space]\n self.target_entropy = [None for _ in self.action_space]\n self.norm_mean = [None for _ in self.action_space]\n self.norm_std = [None for _ in self.action_space]\n self.r_norm_mean = [None for _ in self.action_space]\n self.r_norm_std = [None for _ in self.action_space]\n self.set_networks()",
"def __init__(self, agent, make_env=lambda:gym.make(\"SpaceInvaders-v0\"), n_games=1, max_size=None,\n preprocess_observation = lambda obs:obs,agent_step=None):\n if not isinstance(make_env, function):\n env_name = make_env\n make_env = lambda: gym.make(env_name)\n\n #create atari games\n self.make_env = make_env\n self.envs = [self.make_env() for _ in range(n_games)]\n self.preprocess_observation = preprocess_observation\n\n\n #initial observations\n self.prev_observations = [self.preprocess_observation(make_env.reset()) for make_env in self.envs]\n\n #agent memory variables (if you use recurrent networks\n self.prev_memory_states = [np.zeros((n_games,)+tuple(mem.output_shape[1:]),\n dtype=get_layer_dtype(mem))\n for mem in agent.agent_states]\n\n #save agent\n self.agent = agent\n self.agent_step = agent_step or agent.get_react_function()\n\n # Create experience replay environment\n self.experience_replay = SessionPoolEnvironment(observations=agent.observation_layers,\n actions=agent.action_layers,\n agent_memories=agent.agent_states)\n self.max_size = max_size\n\n #whether particular session has just been terminated and needs restarting\n self.just_ended = [False] * len(self.envs)",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')",
"def add(self, experience: []):\n if len(self.buffer) + len(experience) >= self.buffer_size:\n self.buffer[0:1] = []\n self.buffer.append(experience)",
"def __init__(self, game='pong', obs_type='ram', frameskip=(2, 5), repeat_action_probability=0.):\n\n utils.EzPickle.__init__(self, game, obs_type)\n assert obs_type in ('ram', 'image')\n\n self.game_path = atari_py.get_game_path(game)\n if not os.path.exists(self.game_path):\n raise IOError('You asked for game %s but path %s does not exist'%(game, self.game_path))\n self._obs_type = obs_type\n self.frameskip = frameskip\n self.ale = ALEInterface()\n self.viewer = None\n\n # Tune (or disable) ALE's action repeat:\n # https://github.com/openai/gym/issues/349\n assert isinstance(repeat_action_probability, (float, int)), \"Invalid repeat_action_probability: {!r}\".format(repeat_action_probability)\n self.ale.setFloat('repeat_action_probability'.encode('utf-8'), repeat_action_probability)\n\n self._seed()\n\n (screen_width, screen_height) = self.ale.getScreenDims()\n self._buffer = np.empty((screen_height, screen_width, 3), dtype=np.uint8)\n\n self._action_set = self.ale.getMinimalActionSet()\n self.action_space = spaces.Discrete(len(self._action_set))\n\n (screen_width,screen_height) = self.ale.getScreenDims()\n if self._obs_type == 'ram':\n self.observation_space = spaces.Box(low=np.zeros(128), high=np.zeros(128)+255)\n elif self._obs_type == 'image':\n self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3))\n else:\n raise error.Error('Unrecognized observation type: {}'.format(self._obs_type))",
"def experiences(self):\n return self.client.call('GET',\n self.name + 'experiences')",
"def test_cloned_policy(env, cloned_policy, num_episodes=50, render=True):\n total_rewards = []\n\n for i in range(num_episodes):\n print('Starting episode {}'.format(i))\n total_reward = 0\n state = env.reset()\n if render:\n env.render()\n time.sleep(.01)\n is_done = False\n while not is_done:\n action = np.argmax(\n cloned_policy.predict_on_batch(state[np.newaxis, ...])[0])\n state, reward, is_done, _ = env.step(action)\n total_reward += reward\n if render:\n env.render()\n time.sleep(.1)\n print(\n 'Total reward: {}'.format(total_reward))\n total_rewards.append(total_reward)\n\n mean = np.mean(total_rewards)\n std = np.std(total_rewards)\n print('Average total reward: {} (std: {})'.format(\n mean, std))\n\n return total_rewards",
"def __init__(self, environment):\n self.env = environment\n self.cumreward = 0 # tracking cumulative reward\n self.samples = 0 # tracking the number of samples\n\n self.sensor_limits = None\n self.actor_limits = None\n self.clipping = True\n\n self.current_action = 0 # Saving current action\n self.prev_action = -1 # Saving previous action",
"def store(self, experience):\n\n self.memory.store(experience)",
"def __init__(self, capacity):\n self.experiences = RingBuf(capacity)",
"def collect_episode(environment, policy, num_episodes, replay_buffer_observer):\n initial_time_step = environment.reset()\n\n driver = py_driver.PyDriver(\n environment,\n py_tf_eager_policy.PyTFEagerPolicy(policy, use_tf_function=True),\n [replay_buffer_observer],\n max_episodes=num_episodes,\n )\n initial_time_step = environment.reset()\n driver.run(initial_time_step)",
"def new_gen(self,agents,probs,p_c,p_mut_div,p_mut_fit,tour_size,elite):\n new_agents = []\n n_layers = len(agents[0].get_weights())\n # carrying over elite agent\n new_agents.append(AtariNet(\n self.obs_shape,\n self.action_shape,\n self.net_conf))\n new_agents[-1].set_weights(agents[elite].get_weights())\n exploration_size = 0\n for _ in range(len(agents)-1):\n n_parent = np.random.choice([1,2],1,p=[1-p_c,p_c])[0] # selecting whether to use crossover\n exploration_size += int(2-n_parent) # counting members of exploration population\n parent = self._tournament(probs,n_parent,tour_size)\n p_mut = self._calc_p_mut(parent,p_mut_div,p_mut_fit)\n offspring = self._create_offspring(agents,parent,n_layers,p_mut)\n new_agents.append(offspring)\n return new_agents, exploration_size",
"def render_episode(env, policy):\n\n episode_reward = 0\n ob = env.reset()\n for t in range(100):\n env.render()\n time.sleep(0.5) \n a = policy[ob]\n ob, rew, done, _ = env.step(a)\n episode_reward += rew\n if done:\n break\n assert done\n env.render()\n print(\"Episode reward: %f\" % episode_reward)",
"def retrieve_capabilities(self, url, urlchain=[], pool=None, identity=None):\n\n # detect loops in capability links\n if url in urlchain:\n return\n\n if not self._default_url:\n self.set_default_url(url)\n\n if isinstance(url, str):\n url = urllib3.util.parse_url(url)\n\n if identity is None:\n identity = self._tls_state.extract_peer_identity(url)\n\n if pool is None:\n if url.host is not None:\n pool = self._tls_state.pool_for(url.scheme, url.host, url.port)\n else:\n raise ValueError(\"HttpInitiatorClient capability retrieval missing connection pool\")\n\n if url.path is not None:\n path = url.path\n else:\n path = \"/\"\n res = pool.request('GET', path)\n\n if res.status == 200:\n ctype = res.getheader(\"Content-Type\")\n if ctype == \"application/x-mplane+json\":\n\n # Probably an envelope. Process the message.\n self.handle_message(\n mplane.model.parse_json(res.data.decode(\"utf-8\")), identity)\n elif ctype == \"text/html\":\n # Treat as a list of links to capability messages.\n parser = CrawlParser()\n parser.feed(res.data.decode(\"utf-8\"))\n parser.close()\n for capurl in parser.urls:\n self.retrieve_capabilities(url=capurl,\n urlchain=urlchain + [url],\n pool=pool, identity=identity)"
] | [
"0.5506977",
"0.5496875",
"0.514291",
"0.5113934",
"0.49886903",
"0.4943921",
"0.49165386",
"0.49162146",
"0.48796564",
"0.48758185",
"0.48588508",
"0.48554084",
"0.4835768",
"0.48339865",
"0.4816758",
"0.48112303",
"0.47756678",
"0.47557953",
"0.47492647",
"0.4732167",
"0.47211218",
"0.47182503",
"0.47160736",
"0.47087047",
"0.46735516",
"0.46719927",
"0.4670823",
"0.46672094",
"0.4664535",
"0.46585825"
] | 0.63224953 | 0 |
Performs an update of the QNetwork using the provided optimizer and buffer | def update_q_net(
q_net: VisualQNetwork,
optimizer: torch.optim,
buffer: Buffer,
action_size: int
):
BATCH_SIZE = 1000
NUM_EPOCH = 3
GAMMA = 0.9
batch_size = min(len(buffer), BATCH_SIZE)
random.shuffle(buffer)
# Split the buffer into batches
batches = [
buffer[batch_size * start : batch_size * (start + 1)]
for start in range(int(len(buffer) / batch_size))
]
for _ in range(NUM_EPOCH):
for batch in batches:
# Create the Tensors that will be fed in the network
obs = torch.from_numpy(np.stack([ex.obs for ex in batch]))
reward = torch.from_numpy(
np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1)
)
done = torch.from_numpy(
np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1)
)
action = torch.from_numpy(np.stack([ex.action for ex in batch]))
next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch]))
# Use the Bellman equation to update the Q-Network
target = (
reward
+ (1.0 - done)
* GAMMA
* torch.max(q_net(next_obs).detach(), dim=1, keepdim=True).values
)
mask = torch.zeros((len(batch), action_size))
mask.scatter_(1, action, 1)
prediction = torch.sum(qnet(obs) * mask, dim=1, keepdim=True)
criterion = torch.nn.MSELoss()
loss = criterion(prediction, target)
# Perform the backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_optimizer(self, context, optimizer, host):\n pass",
"def learn(self):\n ## obtain sample batch using priority based sampling.\n states, actions, rewards, next_states, dones, weights, sample_inds = self.buffer.sample_batch(BETA)\n \n ## obtain the discounted sum of rewards from reward list\n ## also obtain final gamma multiplier\n reduced_rewards, gamma_multipliers = self.reduce_rewards(rewards)\n \n ## convert to tensors\n states = np_to_tensor(states)\n actions = np_to_tensor(actions)\n reduced_rewards = np_to_tensor(reduced_rewards)\n gamma_multipliers = np_to_tensor(gamma_multipliers)\n next_states = np_to_tensor(next_states)\n dones = np_to_tensor(dones)\n weights = np_to_tensor(np.array(weights))\n \n #### Updating Qnet\n \n ## actions from the target actor network\n greedy_actions = self.actor_target(next_states)\n ## compute temporal difference\n targets = reduced_rewards + torch.mul( torch.mul(gamma_multipliers , self.QNetwork_target(next_states, greedy_actions)) , (1-dones).unsqueeze(1))\n Q_sa = self.QNetwork_local(states, actions)\n \n td_error = targets - Q_sa\n \n ## update the priorities using temporal differences\n self.buffer.update_priority(sample_inds,\n (td_error).detach().abs().squeeze().cpu().data.numpy()+REPLAY_EPS)\n \n ## compute the loss, importance sampling weights are used\n loss = ((td_error).pow(2)*weights).mean()\n \n self.QNet_optim.zero_grad()\n loss.backward()\n self.QNet_optim.step()\n \n ### Updating Actor\n pred_actions = self.actor_local(states)\n actor_loss = - self.QNetwork_local(states, pred_actions).mean()\n \n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n \n #### Polyak Updates\n self.soft_update(self.QNetwork_local, self.QNetwork_target, TAU)\n self.soft_update(self.actor_local, self.actor_target, TAU)",
"def _refresh_buffers(self) -> None:",
"def reload(self,offline_buffer):\n #loading online buffer from offline buffer by sampling (online_buffer.buffer_size) samples \n self.buffer = SumTree(self.buffer_size)\n names, idxs = offline_buffer.sample_batch(self.buffer_size)\n self.offline_idxs = idxs\n state , action , reward, done = data_handler.handler.fetch_single_image(directory = self.directory, branch_name = self.name, observation_name = names[0])\n #loop on names and load in the online buffer\n for i in range(len(names)-1):\n next_state , next_action , next_reward , done = data_handler.handler.fetch_single_image(directory = self.directory, branch_name = self.name, observation_name = names[i+1])\n #done = 0\n self.memorize(state, action, reward, done, next_state, error=[1])\n state , action , reward = next_state , next_action , next_reward",
"def update(src):",
"def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)",
"def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()",
"def update_target_network(self):\n self.target_dqn.set_weights.remote(self.dqn.get_weights.remote())",
"def update_target_q_network(self):\n assert self.target_network != None\n self.target_network.run_copy()",
"def updateRPC(loc,weight): #status: Done, not tested\r\n pass",
"def _update_optimizer(self, hyperparameters, score, fit=True):\n if self.do_maximize:\n score = -score\n self.optimizer_result = self.optimizer.tell(hyperparameters, score, fit=fit)",
"def update(self, params):",
"def update_parameters(parameters, grads, learning_rate):\n pass",
"def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()",
"def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()",
"def run_optimization(self):\n # Get batch\n (obs, action, old_logp, old_value, return_, advantage) = self.buffer.eject()\n\n # Train pi\n print(\"-\" * 20 + \"\\nPi Update\" + \"\\n\" + \"-\" * 20)\n (policy_loss, entropy,\n kl_divergence, clipping_fraction, steps) = self.update_actor(obs, action, old_logp, advantage)\n\n # Train value function\n print(\"-\" * 20 + \"\\nValue Function Update\" + \"\\n\" + \"-\" * 20)\n (value_loss,\n explained_variance) = self.update_critic(obs, old_value, return_)\n\n # Logging\n self.update_counter += 1\n self.log_update(policy_loss, entropy, kl_divergence, clipping_fraction,\n value_loss, explained_variance, steps)\n\n # Update learning rate\n self.decay_lr()\n\n # Save current weights (overwrites previous weights)\n self.save_weights()\n\n # Empty scenario counter\n self.scenario_counter = dict.fromkeys(self.scenario_counter, 0)",
"def __init__(self, state_size, action_size, fc1_units, fc2_units, buffer_size, batch_size, alpha, gamma, tau,\n local_update_every, target_update_every, seed, a, b, b_increase, b_end, dbl_dqn=False, priority_rpl=False, duel_dqn=False):\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(seed)\n\n # Hyperparameters\n self.alpha = alpha # Learning rate\n self.gamma = gamma # Discount parameter\n self.tau = tau # Interpolation parameter\n self.local_update_every = local_update_every # Number of actions to take before updating local net weights\n self.target_update_every = target_update_every # Number of actions to take before updating target net weights\n self.batch_size = batch_size # Number of experiences to sample during learning\n self.buffer_size = buffer_size # Size of memory buffer\n self.a = a # Sampling probability (0=random | 1=priority)\n self.b = b # Influence of importance sampling weights over learning\n self.b_increase = b_increase # Amount to increase b by every learning step\n self.b_end = b_end # Maximum value for b\n\n # Agent modifications\n self.dbl_dqn = dbl_dqn # Double Q Learning\n self.priority_rpl = priority_rpl # Prioritised Experience Replay\n self.duel_dqn = duel_dqn # Duelling Q Networks\n\n # Q-Network\n if self.duel_dqn:\n self.qnetwork_local = DuellingQNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n self.qnetwork_target = DuellingQNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n else:\n self.qnetwork_local = QNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n self.qnetwork_target = QNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=self.alpha)\n\n # Replay memory\n self.memory = ReplayBuffer(action_size, buffer_size, batch_size, seed, priority_rpl)\n # Initialize time step (for updating every local_update_every/target_update_every steps)\n self.t_step = 0",
"def update_params(self, learning_rate=0.1):\n\n self.params['W'] = self.params['W'] - learning_rate * self.dW # update weights\n self.params['b'] = self.params['b'] - learning_rate * self.db # update bias(es)",
"def updateGraphs(self):\n # first update all three buffers\n tuiBufferName = self.dataClient.recv() # receive 'error'\n while tuiBufferName != 'end buffers':\n tuiData = self.dataClient.recv()\n self.logger.debug(f'Appending {tuiData} to buffer {tuiBufferName}')\n\n if(tuiBufferName == 'error'):\n self.model.errorBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'output'):\n self.model.outputBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'reference'):\n self.model.referenceBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'output-error'):\n self.model.errorPercentage = tuiData.flat[0]\n\n tuiBufferName = self.dataClient.recv()",
"def update(self, ex):\r\n if not self.optimizer:\r\n raise RuntimeError('No optimizer set.')\r\n\r\n # Train mode\r\n self.network.train()\r\n\r\n source_ids = ex['source_ids']\r\n source_pos_ids = ex['source_pos_ids']\r\n source_type_ids = ex['source_type_ids']\r\n source_mask = ex['source_mask']\r\n label = ex['label']\r\n\r\n if self.use_cuda:\r\n label = label.cuda(non_blocking=True)\r\n source_ids = source_ids.cuda(non_blocking=True)\r\n source_pos_ids = source_pos_ids.cuda(non_blocking=True) \\\r\n if source_pos_ids is not None else None\r\n source_type_ids = source_type_ids.cuda(non_blocking=True) \\\r\n if source_type_ids is not None else None\r\n source_mask = source_mask.cuda(non_blocking=True) \\\r\n if source_mask is not None else None\r\n\r\n # Run forward\r\n score = self.network(source_ids=source_ids,\r\n source_pos_ids=source_pos_ids,\r\n source_type_ids=source_type_ids,\r\n source_mask=source_mask)\r\n\r\n # Compute loss and accuracies\r\n loss = self.criterion(score, label)\r\n\r\n if self.args.gradient_accumulation_steps > 1:\r\n loss = loss / self.args.gradient_accumulation_steps\r\n\r\n if self.args.fp16:\r\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\r\n scaled_loss.backward()\r\n else:\r\n loss.backward()\r\n\r\n if (self.updates + 1) % self.args.gradient_accumulation_steps == 0:\r\n if self.args.fp16:\r\n torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.grad_clipping)\r\n else:\r\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.args.grad_clipping)\r\n\r\n self.optimizer.step()\r\n self.scheduler.step() # Update learning rate schedule\r\n self.optimizer.zero_grad()\r\n\r\n self.updates += 1\r\n\r\n return loss.item()",
"def q_update(self):\n\n # exit if the experience buffer is not yet large enough\n if self.experience_buffer.size < self.batch_size:\n return\n \n # get the random batch\n states, action_indices, rewards, not_terminals, succ_states, succ_players, succ_legal_moves = self.experience_buffer.random_batch(self.batch_size)\n states = states.to(Globals.device)\n action_indices = action_indices.to(Globals.device)\n rewards = rewards.to(Globals.device)\n not_terminals = not_terminals.to(Globals.device)\n succ_states = succ_states.to(Globals.device)\n succ_players = succ_players.to(Globals.device)\n\n # prepare the training data\n q_values = self.target_network(succ_states)\n target = torch.empty(1, self.batch_size)\n for i in range(self.batch_size):\n if not_terminals[i] == 0:\n target[0, i] = rewards[i]\n continue\n\n if succ_players[i] == CONST.WHITE_MOVE:\n legal_q_values = q_values[0, 0:9][succ_legal_moves[i]]\n q_value, _ = legal_q_values.max(0)\n else:\n legal_q_values = q_values[0, 9:18][succ_legal_moves[i]]\n q_value, _ = legal_q_values.min(0)\n\n target[0, i] = rewards[i] + self.disc*not_terminals[i]*q_value\n\n # execute the training step of the network\n self.training_network.train_step(states, target, action_indices) # the eligibility trace is used as td target",
"def _update(self, nbrs, nbrs_y, query, query_y):\n\n # Set up the graph for our shared memory variables\n new_K, new_A, new_V = self.K, self.A, self.V\n\n # Condition (1): First returned neighbour shares the same query label\n correct_query = T.eq(nbrs_y[:, 0], query_y).nonzero()[0]\n correct_mem = nbrs[correct_query, 0] # Idx to memory keys\n\n normed_keys = tensor_norm(query[correct_query] + new_K[correct_mem])\n new_K = T.set_subtensor(new_K[correct_mem], normed_keys)\n new_A = T.set_subtensor(new_A[correct_mem], 0.)\n\n # Condition (2): First returned neighbour does not share query label.\n # Add the key and label from query to memory\n incorrect_mask = T.neq(nbrs_y[:, 0], query_y)\n incorrect_query = incorrect_mask.nonzero()[0]\n\n # We need to find len(incorrect_query) locations in memory to write to.\n # Noise is added to randomize selection.\n age_mask = T.ge(new_A, T.max(new_A) - self.C) #1d\n oldest_idx = tensor_choose_k(age_mask, self.rng,\n k=T.sum(incorrect_mask),\n random=True).flatten()\n\n new_K = T.set_subtensor(new_K[oldest_idx], query[incorrect_query])\n new_V = T.set_subtensor(new_V[oldest_idx], query_y[incorrect_query])\n new_A = T.set_subtensor(new_A[oldest_idx], 0.)\n\n # Increment the age of all non-updated indices by 1\n new_A = new_A + 1.\n new_A = T.inc_subtensor(new_A[correct_mem], -1.)\n new_A = T.inc_subtensor(new_A[oldest_idx], -1.)\n\n return OrderedDict({(self.K, new_K), (self.V, new_V), (self.A, new_A)})",
"def update(self, niter, expert_gen, pq_buffer, batch_size, num_grad_steps):\n self.train()\n pqb_gen = pq_buffer.data_gen_infinite(min(batch_size, len(pq_buffer)))\n\n if niter <= self.warmup:\n num_grad_steps *= (self.warmup + 1 - niter)\n\n loss_val = 0\n n = 0\n for _ in range(num_grad_steps):\n\n expert_batch = next(expert_gen)\n pqb_batch = next(pqb_gen)\n\n expert_state = expert_batch[0]\n pqb_state = pqb_batch[0]\n\n pqb_out = self.tower(pqb_state)\n expert_out = self.tower(expert_state)\n\n reward_bias = - torch.clamp(pqb_out, max=0).mean(0) - torch.clamp(expert_out, max=0).mean(0)\n loss = pqb_out.mean(0) - expert_out.mean(0) + 2*reward_bias\n\n loss_val += loss.item()\n n += 1\n\n self.optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self.parameters(), max_norm=10.)\n self.optimizer.step()\n\n # weight clamping to enforce the Lipchitz constraint\n for p in self.parameters():\n p.data.clamp_(-self.clip, self.clip)\n\n return loss_val / n",
"def update_policy(self):\n self.trainer_metrics.start_policy_update_timer(\n number_experiences=len(self.training_buffer.update_buffer[\"actions\"]),\n mean_return=float(np.mean(self.cumulative_returns_since_policy_update)),\n )\n self.cumulative_returns_since_policy_update = []\n n_sequences = max(\n int(self.trainer_parameters[\"batch_size\"] / self.policy.sequence_length), 1\n )\n value_total, policy_total = [], []\n advantages = self.training_buffer.update_buffer[\"advantages\"].get_batch()\n self.training_buffer.update_buffer[\"advantages\"].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10)\n )\n num_epoch = self.trainer_parameters[\"num_epoch\"]\n for _ in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(\n len(self.training_buffer.update_buffer[\"actions\"]) // n_sequences\n ):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(\n buffer.make_mini_batch(start, end), n_sequences\n )\n value_total.append(run_out[\"value_loss\"])\n policy_total.append(np.abs(run_out[\"policy_loss\"]))\n self.stats[\"Losses/Value Loss\"].append(np.mean(value_total))\n self.stats[\"Losses/Policy Loss\"].append(np.mean(policy_total))\n for _, reward_signal in self.policy.reward_signals.items():\n update_stats = reward_signal.update(\n self.training_buffer.update_buffer, n_sequences\n )\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n if self.policy.bc_module:\n update_stats = self.policy.bc_module.update()\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n self.training_buffer.reset_update_buffer()\n self.trainer_metrics.end_policy_update()",
"def update(self, sess, batch, *args, **kwargs):\n # Calculated target Q values using target estimator\n assert \"state\" in batch and \"action\" in batch and \\\n \"reward\" in batch and \"next_state\" in batch and \\\n \"episode_done\" in batch\n target_q_val = self._target_estimator.estimate(\n batch[\"state\"], batch[\"action\"], batch[\"reward\"],\n batch[\"next_state\"], batch[\"episode_done\"])\n\n # Prepare data and fit Q network\n feed_dict = {self._input_target_q: target_q_val,\n self._input_action: batch[\"action\"]}\n if \"_weight\" in batch:\n feed_dict[self._input_sample_weight] = batch[\"_weight\"]\n feed_dict.update(self._q.input_dict(batch[\"state\"]))\n fetch_dict = {\n \"action\": batch[\"action\"], \"reward\": batch[\"reward\"],\n \"done\": batch[\"episode_done\"],\n \"q\": self.selected_q, \"target_q\": target_q_val,\n \"optimizer_loss\": self._sym_loss,\n \"td\": self._op_td,\n \"td_losses\": self._op_losses,\n \"td_losses_weighted\": self._op_losses_weighted}\n update_run = network.UpdateRun(feed_dict=feed_dict, fetch_dict=fetch_dict)\n\n return update_run",
"def update(self):\n # pull all available chunks\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t\n\n # update graph handles\n if self.gbuffer.any():\n for k in range(0, self.channel_count):\n self.handles[k].setData(self.gtimes,\n self.gbuffer[k::self.channel_count])",
"def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here",
"def update_send(self, parameters, loss): #parameters为训练网络的参数\n # Increase the clock value\n self.clock += 1\n\n # Serve the new parameters\n state = {'clock': self.clock, 'loss': loss}\n # 在rx线程中保存此时的loss和模型参数等\n self.rx.set_current_state(state, parameters)\n\n self.fetching = True\n self.tx.fetch_send()",
"def update(self):\n self.arest.update()",
"def update1(self, state, action, nextState, reward):\n #print \"update1 in ApproximateQAgent\"\n \"*** YOUR CODE HERE ***\"\n ##################################################################################################################################Eric Did Stuff\n actionList = nextState.getLegalActions(self.index)\n\n\n #print \"Action List\", actionList\n\n\n\n\n weights = self.getWeights()\n\n features = self.featExtractor.getFeatures(state, action, self)\n #self.myFeats = features\n if self.index == 0:\n print \"FEATURES: \",features\n value = self.computeValueFromQValues(nextState)\n qValue = self.getQValue(state,action)\n #print \"value\", value, \"qValue\", qValue\n for feature in features:\n if len(actionList) != 0:\n weights[feature] = weights[feature] + self.alpha * (reward + self.discount * value - qValue) * features[feature]\n else:\n weights[feature] = weights[feature] + self.alpha * (reward - qValue) * features[feature]\n #print \"feature\", feature, \"weights\", weights[feature]\n #print \"weights\", weights\n\n #util.raiseNotDefined()"
] | [
"0.6433153",
"0.5484779",
"0.5369425",
"0.53328884",
"0.52981716",
"0.52732706",
"0.5266272",
"0.521479",
"0.5200708",
"0.51710343",
"0.5088552",
"0.50860375",
"0.50787526",
"0.507192",
"0.507192",
"0.50715756",
"0.505829",
"0.5000706",
"0.4999721",
"0.4979605",
"0.496521",
"0.49646664",
"0.49576077",
"0.49535808",
"0.49422154",
"0.49336103",
"0.49218482",
"0.49211517",
"0.49178496",
"0.49150893"
] | 0.62525785 | 1 |
Search for lback index self._in_loop becomes true in the second state of the loop | def _get_lback_index(self, model, last) -> int:
assert last > 0
# last state cannot be loop-back.
assert model.get_value(self.totime(self._in_loop, last)).is_true()
assert model.get_value(self.totime(self._in_loop, 0)).is_false()
idx = last - 1
while model.get_value(self.totime(self._in_loop, idx)).is_true():
idx -= 1
assert idx >= 0
assert model.get_value(self.totime(self._in_loop, idx + 1)).is_true()
assert model.get_value(self.totime(self._in_loop, idx)).is_false()
assert model.get_value(self.totime(self.start_loop, idx)).is_true()
return idx | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def detect_loop(self):\n tortoise = self.head\n hare = self.head\n while hare:\n tortoise = tortoise.next\n hare = hare.next.next\n if tortoise == hare:\n return True\n return False",
"def bookkeep(self) :\n\t\tself.loopiter += 1",
"def KeepAdvancingSolutionLoop(self):\n return self.step < self.nsteps",
"def step_back_while(cur_index, condition):\n while cur_index >= 0 and condition(cur_index):\n cur_index -= 1\n return cur_index",
"def loop():\n global loop_idx\n sys.stdout.write('loop index %d/%d\\r\\n' % (loop_idx, _LOOPS))\n time.sleep(0.5)\n loop_idx += 1\n return loop_idx > _LOOPS",
"def end_loop(self):\n # if (not self.tape.current_cell()):\n # Jump to the start of the loop\n self.instruction_pointer = (self.jump_map[self.instruction_pointer]-1)\n #else:\n # pass",
"def backward_step():\n #print 'a step backward'\n maze.turn_left()\n maze.turn_left()\n if maze.found():\n return maze.found()\n maze.go()\n maze.turn_left()\n maze.turn_left()",
"def has_loop(self) -> bool:\n try:\n list(self)\n return False\n except ContainsLoopError:\n return True",
"def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass",
"def goingToBreak(self):\n \n if (\n (self.current_loc == 0 and not self.direction_forward) or\n (self.current_loc == len(self.destinations)-1 and self.direction_forward)\n ):\n return True\n return False",
"def endloop(self):\n try:\n n, start = self._loop_stack[-1]\n except IndexError:\n print(\"No loops remaining.\")\n return\n if n == 1:\n self._loop_stack.pop()\n else:\n self._loop_stack[-1][0] -= 1\n self._pc = start",
"async def checkNewLoop(self):\n pass",
"def _run_backtest(self):\n i = 0\n while True:\n i += 1\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n #print(self.data_handler.get_latest_bar_datetime(self.symbol_list[0]))\n else:\n break\n while self.backtest:\n try:\n event = self.events.get(False)\n except Empty:\n break\n else:\n if event is not None:\n if event.type == EventType.MARKET:\n try:\n self.strategy.On_Bars(event)\n self.portfolio.update_balance(event)\n self.portfolio.order_check(event)\n except EquityError:\n print('Not Engough Equity,Backtest Will be Stop...')\n self.backtest=False\n break\n elif event.type == EventType.ORDER_SEND:\n self.portfolio.update_order(event)\n elif event.type == EventType.ORDER_CLOSE:\n try:\n self.portfolio.update_order(event)\n self.portfolio.update_euity(event)\n except EquityError:\n print ('Not Engough Equity,Backtest Will be Stop...')\n self.backtest=False\n break\n elif event.type == EventType.ORDER_MODIFY:\n self.portfolio.update_order(event)\n time.sleep(self.heartbeat)",
"def _is_at_end(self, binvect):\n last = max(k for (k, v) in enumerate(binvect) if v == 1)\n n_step = len(self.pas)\n steps_between = np.arange(last + 1, n_step)\n if 0 <= len(steps_between) <= self._n_to_end:\n self._set_label(binvect, still)\n for k in steps_between:\n self.labels[k] = still\n return True\n else:\n return False",
"def _is_at_end(self):\n return self.current >= len(self.source)",
"def one_step_back(self):\n if (self.row -1<0):\n return False\n elif (self.battery == 0):\n return False\n elif (self.maze[self.row - 1][self.column] == False):\n return False\n else:\n self.row -= 1\n self.battery -= 1\n return True",
"def can_go_back(self):\n return self._pointer >= 1",
"def recurrent(self):\n return False",
"def train_loop_pre(self, current_step):\r\n pass",
"def _do_iteration(self):\n return True",
"def on_reset(self):\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- seeking back before first step\")\n self.set('_index', None)",
"def has_previous(self):\n if self.idx < len(self.nodes):\n return True\n else:\n return False",
"def detectIfListHasLoop (self):\n\t\tslow = self.head\n\t\tif slow is None:\n\t\t\treturn False\n\t\tfast = self.head\n\t\tlength = 0\n\t\twhile (fast is not None) and ((slow != fast) or (length == 0)):\n\t\t\tslow = slow.getNext()\n\t\t\tfast = fast.getNext()\n\t\t\tif fast is None:\n\t\t\t\tbreak\n\t\t\tfast = fast.getNext()\n\t\t\tlength += 1\n\t\tif (slow == fast) and (length > 0):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def is_up(self):\n self.loop = file_to_loop(self.loopFile)\n if len(self.loop) == 0:\n return False\n return True",
"def backtrack(self):\n last_intersection = self.intersection.pop()\n retrace = Shortest_path().shortestPath(self.graph, self.current, last_intersection)\n print retrace\n print \"Moving back...\"\n self.current = retrace.pop(0)\n if self.current in self.intersection:\n self.intersection.remove(self.current)\n while retrace:\n position = retrace.pop(0)\n self.move_to_position(position)\n if position in self.intersection:\n self.intersection.remove(position)",
"def _while_loop(self):\n bind_map = {}\n wl = set_span(tvm.relay.var(\"while_loop\"), self._loop_name)\n sb = tvm.relay.scope_builder.ScopeBuilder()\n\n lv_list = []\n expr_list = []\n extra_vars = []\n\n for i, lv in enumerate(self.loop_vars):\n if self._loop_name not in self._lvar2expr:\n self._lvar2expr[self._loop_name] = {}\n\n # Handle the case when loop var is not properly lifted.\n # This can happen when loop var node name is set accidentally\n # beginning with loop name.\n if lv not in self._lvar2expr[self._loop_name]:\n var_name = f\"{self._loop_name}_loop_var_{i}\"\n var_type = _infer_type(lv, self._mod).checked_type\n loop_var = set_span(tvm.relay.var(var_name, type_annotation=var_type), var_name)\n self._lvar2expr[self._loop_name][loop_var] = lv\n bind_map[lv] = loop_var\n self.loop_vars[i] = loop_var\n lv = loop_var\n\n lv_list.append(lv)\n expr_list.append(self._lvar2expr[self._loop_name][lv])\n\n if bind_map:\n self.cond = rewrite_subgraph(self.cond, bind_map)\n self.body = [rewrite_subgraph(b, bind_map) for b in self.body]\n\n cond = set_span(tvm.relay.op.min(self.cond), self.cond.span)\n\n for lv, exp in self._lvar2expr[self._loop_name].items():\n if lv not in self.loop_vars:\n var_checker = VarChecker(lv)\n for bd in self.body + [cond]:\n var_checker.visit(bd)\n if var_checker.used:\n lv_list.append(lv)\n expr_list.append(exp)\n extra_vars.append(lv)\n break\n\n with sb.if_scope(cond):\n sb.ret(wl(*list(self.body + extra_vars)))\n with sb.else_scope():\n sb.ret(tvm.relay.Tuple(lv_list))\n\n loop_fn = tvm.relay.Function(lv_list, sb.get())\n sb = tvm.relay.scope_builder.ScopeBuilder()\n sb.let(wl, loop_fn)\n loop_ret = wl(*expr_list)\n\n sb.ret(loop_ret)\n ret = sb.get()\n return ret",
"def algorithm_loop(self):",
"def _run_backtest(self):\n i = 0\n while True:\n i += 1\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n else:\n break\n while True:\n try:\n event = self.events.get(False)\n except Empty:\n break\n else:\n if event is not None:\n if event.type == EventType.MARKET:\n self.strategy.On_Bars(event)\n self.portfolio.update_balance(event)\n self.portfolio.order_check(event)\n elif event.type == EventType.ORDER_SEND:\n self.portfolio.update_order(event)\n elif event.type == EventType.ORDER_CLOSE:\n self.portfolio.update_order(event)\n self.portfolio.update_euity(event)\n elif event.type == EventType.ORDER_MODIFY:\n self.portfolio.update_order(event)\n time.sleep(self.heartbeat)",
"def loop(self):\n while not rospy.is_shutdown():\n\n rospy.logdebug(\"Loop\")\n state = self.move_base.get_state()\n\n self.counter +=1\n if(self.counter>6 or state==3):\n rospy.logdebug(\"-------------------------\")\n rospy.logdebug(\"Recalculate Frontriers ! \")\n rospy.logdebug(\"-------------------------\")\n\n self.counter = 0\n frontiers_num = self.update()\n\n #break condition\n if frontiers_num==0 :\n rospy.logdebug(\"---------------------------------------\")\n rospy.logdebug(\"---------------------------------------\")\n rospy.logdebug(\"NO FRONTIERS FOUND EXPLORATION COMPLETE\")\n rospy.logdebug(\"---------------------------------------\")\n rospy.logdebug(\"---------------------------------------\")\n break\n\n\n\n rate.sleep()",
"def sliding(self):\n for i in range(self.tiles_len):\n x, y = self.tilepos[i] # current pos\n X, Y = self.tilePOS[self.tiles[i]] # target pos\n if x != X or y != Y:\n return True"
] | [
"0.6223293",
"0.6218441",
"0.6189063",
"0.6179153",
"0.608254",
"0.59855604",
"0.5839952",
"0.58170766",
"0.57790136",
"0.57635117",
"0.5739355",
"0.57139474",
"0.57138425",
"0.5695078",
"0.56928277",
"0.56292295",
"0.5568077",
"0.55579966",
"0.55414546",
"0.55264825",
"0.5519991",
"0.5516658",
"0.5503106",
"0.5499639",
"0.5484257",
"0.54798913",
"0.54762626",
"0.54580015",
"0.5457094",
"0.54530954"
] | 0.684357 | 0 |
Stores in a random location in the Linked list | def add(self, item):
if self.count == 0:
random_location = 0
else:
random_location = random.randint(0, self.count - 1)
self.insert(Node(item), random_location) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_location(self):\n return random.choice(self.locations_list)",
"def random_pos(self, ):\n self.pos_item['needle'] = self.shuffle_pos()\n self.pos_item['ether'] = self.shuffle_pos()\n self.pos_item['tube'] = self.shuffle_pos()",
"def randVacantPoint(L):\n pliste = vacantPoint(L)\n\n return pliste[random.randint(0, len(pliste)-1)]",
"def getRandom(self):\n index = random.randrange(0, self.length)\n node = self.head\n while index:\n node = node.next\n index -= 1\n return node.val",
"def get_random_link(self):\n return tuple([random.randint(0, d-1) for d in self.link_idxs])",
"def getRandomFromList(self, l):\n if (len(l) == 0):\n return -1\n return l[randint(0, len(l) - 1)]",
"def random_insert_seq(lst, seq):\n insert_locations = random.sample(range(len(lst) + len(seq)), len(seq))\n inserts = dict(zip(insert_locations, seq))\n iter_lst = iter(lst)\n lst[:] = [\n inserts[pos]\n if pos in inserts else next(iter_lst)\n for pos in range(len(lst) + len(seq))]",
"def totem_random():\n random_head()\n random_head()\n random_head()",
"def move_to_random_pos(self):\n newpos = [(np.random.rand() - 0.5) * 0.1,\n (np.random.rand() - 0.5) * 0.1,\n np.random.rand() * 0.9 + 0.2]\n self.move_to(newpos)",
"def random_position():\n pos = np.random.randn(3)\n pos[2] = 0\n return pos",
"def random(self):\n adj = self.adjacent()\n self.switch(random.choice([pos for pos in adj if self.in_grid(pos) and pos != self.prev]))",
"def randLoc(this):\n from temp_aber import randperc, trapch\n\n if randperc() > 50:\n this.locId = -5\n else:\n this.locId = -183\n\n trapch(this.locId)",
"def put_items(self,*maplist):\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))\n\n while maplist[self.position_y][self.position_x] == \"x\":\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))",
"def topology_random_connect(self, probability):\n\t\tfor i in range(len(self.sites) - 1):\n\t\t\tfor j in range(i + 1, len(self.sites)):\n\t\t\t\tif not (self.sites[j] in self.sites[i].neighbors):\n\t\t\t\t\tif numpy.random.rand() < probability:\n\t\t\t\t\t\tself.sites[i].neighbors.append(self.sites[j])\n\t\t\t\t\t\tself.sites[j].neighbors.append(self.sites[i])",
"def random_link(self, state):\n raise NotImplementedError('missing data mixin')",
"def set_random_pos(self, which):\n available = [[r, c] for r, row in enumerate(self.maze)\n for c, value in enumerate(row) if value == ' ']\n choice = random.choice(available)\n if which == 'starting':\n self.current_pos = choice\n elif which == 'finishing':\n self.finish_pos = choice",
"def selectRandomFromList(ldata):\n\treturn ldata[randint(0, len(ldata)-1)]",
"def generator(self, random, args):\r\n locations = [i for i in range(len(self.weights))]\r\n random.shuffle(locations)\r\n return locations",
"def _do_update(self):\n sample = np.random.choice(self._seeds, 1, replace=False, p=self._seed_weights)[0]\n index = self._seeds.index(sample)\n new_seed = random.choice([neb for neb in self._graph.neighbors(sample)])\n self._edges.add((sample, new_seed))\n self._nodes.add(sample)\n self._nodes.add(new_seed)\n self._seeds[index] = new_seed",
"def randPlace(self):\r\n random.seed(self.seed)\r\n \r\n # Start placement on Partition A\r\n partA = True\r\n for node in self.G.nodes():\r\n \r\n randSite = random.randint(0,int(self.sitesNum/2)-1)\r\n \r\n if partA:\r\n partSite = self.sitesA\r\n self.G.node[node][\"part\"] = 'A'\r\n \r\n else:\r\n partSite = self.sitesB\r\n self.G.node[node][\"part\"] = 'B'\r\n \r\n while (partSite[randSite].isOcp()):\r\n randSite = random.randint(0,int(self.sitesNum/2)-1) \r\n\r\n partSite[randSite].setCell(node)\r\n self.G.node[node][\"site\"] = partSite[randSite]\r\n \r\n # Toggle partition for next placement\r\n partA = not partA",
"def auto_play_random(self, player=None):\r\n if player is None:\r\n player = self.get_player()\r\n legal_list = self.get_legal_list()\r\n next_move = legal_list.rand_obj()\r\n self.new_edge(next_move)",
"def getRandom(self) -> int:\n index = random.randint(0, len(self.lst) - 1)\n # self.lst[index], self.lst[len(self.lst) - 1] = self.lst[len(self.lst) - 1], self.lst[index]\n # val = self.lst.pop()\n # self.dic.pop(val)\n return self.lst[index]",
"def random_location(self):\r\n\r\n while True:\r\n pt = (random.uniform(self.worldbox.tl[0], self.worldbox.br[0]),\r\n random.uniform(self.worldbox.tl[1], self.worldbox.br[1]))\r\n if not self.is_wall(pt) and not self.is_target(pt):\r\n return pt",
"def add_unique_node(node_list, current_company):\n selected_company = int(random.random() * COMPANY_COUNT)\n while selected_company in node_list or current_company == selected_company:\n selected_company = int(random.random() * COMPANY_COUNT)\n node_list.append(selected_company)",
"def copy_list(node):\n curr = node\n map = OrderedDict()\n while curr is not None:\n if not map.get(curr, None):\n map[curr] = Node(curr.val)\n if curr.next and not map.get(curr.next, None):\n map[curr.next] = Node(curr.next.val)\n map[curr].next = map[curr.next]\n if curr.random and not map.get(curr.random, None):\n map[curr.random] = Node(curr.next.random)\n map[curr].random = map[curr.random]\n curr = curr.next\n display(node, next(iter(map)))",
"def getRandom(self):\n ans = self.head\n index = 1\n node = ans.next\n while node:\n value = random.randrange(0, index + 1)\n if value == 0:\n ans = node\n index += 1\n node = node.next\n return ans.val",
"def generate_random(self: object) -> None:\n self.random.set(Sequence.generate(length=50))",
"def getRandom(self) -> int:\n count = 0\n temp = self.head\n while temp:\n if random.randint(0,count)==0:\n res = temp.val\n temp = temp.next\n count+=1\n return res",
"def getRandom(self) -> int:\n return random.choice(self.store_list)",
"def get_random_node(self):\n if random.randint(0, 100) > self.goal_sample_rate:\n random_node = self.Node(\n random.uniform(self.min_rand, self.max_rand),\n random.uniform(self.min_rand, self.max_rand),\n )\n else: # goal point sampling\n random_node = self.Node(self.end.x, self.end.y)\n return random_node"
] | [
"0.6553104",
"0.6297904",
"0.6075966",
"0.5918239",
"0.58364403",
"0.5792438",
"0.57132536",
"0.57105196",
"0.56871873",
"0.56744456",
"0.56678665",
"0.56591904",
"0.5640225",
"0.5628301",
"0.5613032",
"0.5598745",
"0.5593619",
"0.5581534",
"0.5567704",
"0.55652964",
"0.55643505",
"0.5558386",
"0.5538791",
"0.5530747",
"0.5529713",
"0.5520587",
"0.54846376",
"0.5480163",
"0.54785717",
"0.54776603"
] | 0.7000383 | 0 |
Returns true if c is a printable character. We do this by checking for ord value above 32 (space), as well as CR (\r), LF (\n) and tab (\t) | def is_printable(c):
return ord(c)>=32 or c in ['\r','\n', '\t'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_printable(s):\n for c in s:\n if c not in PRINTABLE_CHARACTERS:\n return False\n return True",
"def is_printable(b):\n return b in e(string.printable)",
"def is_p4d_printable(c):\n if ord(c) < 0x20:\n return False\n if ord(c) == 0x7F:\n return False\n return True",
"def ascii_printable(s: str) -> bool:\n return frozenset(s).issubset(_ascii_pa)",
"def _is_control(char):\n if char == '\\t' or char == '\\n' or char == '\\r':\n return False\n cat = unicodedata.category(char)\n if cat.startswith('C'):\n return True\n return False",
"def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False",
"def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False",
"def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False # pragma: no cover\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True # pragma: no cover\n return False",
"def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False",
"def isPrintableKey(event_string):\n\n if event_string == \"space\":\n reply = True\n else:\n unicodeString = event_string.decode(\"UTF-8\")\n reply = (len(unicodeString) == 1) \\\n and (unicodeString.isalnum() or unicodeString.isspace()\n or unicodedata.category(unicodeString)[0] in ('P', 'S'))\n debug.println(debug.LEVEL_FINEST,\n \"orca.isPrintableKey: returning: %s\" % reply)\n return reply",
"def is_ascii(token):\n\n printable = set(string.printable)\n\n for char in token:\n if char not in printable:\n return False\n\n return True",
"def isascii(s):\n return len(s) == len(s.encode())",
"def isascii(s):\n return len(s) == len(s.encode())",
"def is_ascii_chars(text):\n is_ascii = True\n try:\n text.encode(encoding='utf-8').decode('ascii')\n except UnicodeDecodeError:\n is_ascii = False\n return is_ascii",
"def is_string_printable(string_):\n return set(string_) - set(string.printable)",
"def _is_whitespace(char):\n # \\t, \\n, and \\r are technically control characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True # pragma: no cover\n return False",
"def ishex(char: chr) -> bool:\n return char.isdigit() or char in \"abcdef\"",
"def _is_whitespace(char):\n if char == ' ' or char == '\\t' or char == '\\n' or char == '\\r':\n return True\n cat = unicodedata.category(char)\n if cat == 'Zs':\n return True\n return False",
"def _is_whitespace(char):\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False",
"def __contains_nonascii_characters(string):\n for c in string:\n if not ord(c) < 128:\n return True\n return False",
"def _is_whitespace(char):\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False",
"def isChar(ch):\n ret = libxml2mod.xmlIsChar(ch)\n return ret",
"def ascii_hexchar(s: str) -> bool:\n return frozenset(s).issubset(_ascii_h)",
"def _isascii(string):\n try:\n return len(string) == len(string.encode())\n except UnicodeDecodeError:\n return False\n except UnicodeEncodeError:\n return False",
"def _has_non_ascii_characters(data_string):\r\n try:\r\n data_string.encode('ascii')\r\n except UnicodeEncodeError:\r\n return True\r\n\r\n return False",
"def is_valid_char(src):\n\n return src.isalnum()",
"def isAlphanum(c):\r\n return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or\r\n (c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\\\' or (c is not None and ord(c) > 126));",
"def check_ascii_compliance(plaintext: bytes) -> bool:\n return all(c < 128 for c in plaintext)",
"def is_character_key(self, p_event):\n p = rffi.cast(RSDL.KeyboardEventPtr, p_event)\n keycode = rffi.getintfield(p.c_keysym, 'c_sym')\n return RSDL.K_BACKSPACE <= keycode <= RSDL.K_z \\\n or RSDL.K_WORLD_0 <= keycode <= RSDL.K_KP_EQUALS \\\n or keycode == RSDL.K_EURO # whoever came up with this being beyond the modifier keys etc...",
"def __valid_char(self, char: str) -> bool:\r\n if char.isdigit():\r\n raise ValueError('Characters can\\'t be numbers')\r\n\r\n return char.isalpha() or char.isspace()"
] | [
"0.77798957",
"0.7536381",
"0.7419245",
"0.7221991",
"0.718354",
"0.7084998",
"0.7084998",
"0.70768076",
"0.70703983",
"0.6941782",
"0.6763601",
"0.66158307",
"0.66158307",
"0.6564988",
"0.6561958",
"0.6391701",
"0.6390051",
"0.6385484",
"0.63393587",
"0.63295555",
"0.6323699",
"0.6310147",
"0.6301625",
"0.63009757",
"0.62561953",
"0.61797297",
"0.6173338",
"0.6128911",
"0.6028381",
"0.6020126"
] | 0.89795405 | 0 |
Filter control characters out of the string buf, given a list of control codes that represent backspaces, and a regex of escape sequences. backspaces are characters emitted when the user hits backspace. This will probably vary from terminal to terminal, and this list should grow as new terminals are encountered. escape_regex is a Regex filter to capture all escape sequences. | def sanitize(buf,
backspaces=['\x08\x1b[K', '\x08 \x08'],
escape_regex=re.compile(r'\x1b(\[|\]|\(|\))[;?0-9]*[0-9A-Za-z](.*\x07)?')):
# Filter out control characters
# First, handle the backspaces.
for backspace in backspaces:
try:
while True:
ind = buf.index(backspace)
buf = ''.join((buf[0:ind-1],buf[ind+len(backspace):]))
except:
pass
strip_escapes = escape_regex.sub('',buf)
# strip non-printable ASCII characters
clean = ''.join([x for x in strip_escapes if is_printable(x)])
return clean | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def escape(self, text, escape_chars):\n _bs = \"\\\\\"\n # backslash is always escaped\n text = text.replace(_bs, _bs * 2)\n for _el in escape_chars:\n assert _el != _bs, \"Backslash has been already escaped\"\n text = text.replace(_el, _bs + _el)\n return text",
"def escapedSeq(term):\n\tfor char in term:\n\t\tif char in escapeRules.keys():\n\t\t\tyield escapeRules[char]\n\t\telse:\n\t\t\tyield char",
"def loop_escaped(val, c):\n if not val:\n val = ''\n val = as_unicode(val)\n rc = re.compile(r'([^%s\\\\]|\\\\.)*' % re.escape(c))\n pos = 0\n while pos < len(val):\n if val[pos] == c:\n pos += 1\n continue\n m = rc.match(val, pos)\n if not m:\n raise Exception('rx bug')\n pos = m.end()\n yield unescape(m.group(0))",
"def strip_from_ansi_esc_sequences(text):\n # esc[ + values + control character\n # h, l, p commands are complicated, let's ignore them\n seq_regex = r\"\\x1b\\[[0-9;]*[mKJusDCBAfH]\"\n regex = re.compile(seq_regex)\n start = 0\n response = \"\"\n for match in regex.finditer(text):\n end = match.start()\n response += text[start:end]\n\n start = match.end()\n response += text[start:len(text)]\n return response",
"def _get_escape_translation_table(cls) -> List[str]:\n _escape_table = [chr(x) for x in range(128)]\n _escape_table[0] = \"\\\\0\"\n _escape_table[ord(\"\\\\\")] = \"\\\\\\\\\"\n _escape_table[ord(\"\\n\")] = \"\\\\n\"\n _escape_table[ord(\"\\r\")] = \"\\\\r\"\n _escape_table[ord(\"\\032\")] = \"\\\\Z\"\n _escape_table[ord('\"')] = '\\\\\"'\n _escape_table[ord(\"'\")] = \"\\\\'\"\n return _escape_table",
"def dummyOutEscapeCharacters(self, text):\n \n return re.sub(\"\\\\\\\\.\", \"\\$\", text)\n \n #escape = False\n #escapedText = text\n \n #for i in range(len(text)):\n #if escape:\n #escapedText = escapedText[:i] + self.DUMMY_CHAR + escapedText[i+1:]\n #escape = False\n #elif text[i] == \"\\\\\":\n #escape = True\n #return escapedText",
"def _escaped_text_from_text(text, escapes=\"eol\"):\n #TODO:\n # - Add 'c-string' style.\n # - Add _escaped_html_from_text() with a similar call sig.\n import re\n\n if isinstance(escapes, base_string_type):\n if escapes == \"eol\":\n escapes = {'\\r\\n': \"\\\\r\\\\n\\r\\n\", '\\n': \"\\\\n\\n\", '\\r': \"\\\\r\\r\"}\n elif escapes == \"whitespace\":\n escapes = {'\\r\\n': \"\\\\r\\\\n\\r\\n\", '\\n': \"\\\\n\\n\", '\\r': \"\\\\r\\r\",\n '\\t': \"\\\\t\", ' ': \".\"}\n elif escapes == \"eol-one-line\":\n escapes = {'\\n': \"\\\\n\", '\\r': \"\\\\r\"}\n elif escapes == \"whitespace-one-line\":\n escapes = {'\\n': \"\\\\n\", '\\r': \"\\\\r\", '\\t': \"\\\\t\", ' ': '.'}\n else:\n raise ValueError(\"unknown text escape style: %r\" % escapes)\n\n # Sort longer replacements first to allow, e.g. '\\r\\n' to beat '\\r' and\n # '\\n'.\n escapes_keys = list(escapes.keys())\n try:\n escapes_keys.sort(key=lambda a: len(a), reverse=True)\n except TypeError:\n # Python 2.3 support: sort() takes no keyword arguments\n escapes_keys.sort(lambda a,b: cmp(len(a), len(b)))\n escapes_keys.reverse()\n def repl(match):\n val = escapes[match.group(0)]\n return val\n escaped = re.sub(\"(%s)\" % '|'.join([re.escape(k) for k in escapes_keys]),\n repl,\n text)\n\n return escaped",
"def escape_control_characters(text: str, keep_spacing=True) -> str:\n if not isinstance(text, str):\n raise ValueError(\"text type must be unicode but is {}\".format(type(text).__name__))\n\n trans = _control_char_trans_newline if keep_spacing else _control_char_trans\n return text.translate(trans)",
"def _create_char_spinner():\r\n while True:\r\n for c in '|/-\\\\':\r\n yield c",
"def _terminal_command_regexes(self):\n patterns = {}\n for intent, keys in self.keywords.get(\"terminal\").items():\n if keys:\n patterns[intent] = re.compile(r'\\b' + r'\\b|\\b'.join(keys) + r'\\b')\n return patterns",
"def remove_control_chars(json_string):\n return re.sub('[\\x00-\\x1f]', '',json_string)",
"def ScanRE(self, exp):\n # Make sure the expression is not empty\n assert type(exp) is str \n assert exp\n \n self.NotedRE = list()\n\n i = 0\n while i < len(exp):\n if exp[i] == ' ':\n i += 1\n elif exp[i] == '\\\\':\n ch = exp[i:i + 2]\n i += 2\n else:\n ch = exp[i]\n i += 1\n \n self.NotedRE.append(rule.CheckCharType(ch))",
"def remove_ansi_escape_sequence(self, text):\n\n # By default no string returned\n output = \"\"\n\n # By default no escape sequence found\n esc_found = 0\n\n # Read char by char a string\n for i in text:\n\n # Display char\n # log.info(f\"{str(i).encode('ascii')}\")\n\n # No escape previously found?\n if esc_found == 0:\n\n # No escape sequence currently found\n\n # Escape?\n if i == \"\\x1b\":\n\n # Yes\n log.info(\"Esc!\")\n\n # Escape found\n esc_found = 1\n\n else:\n\n # No\n\n # Then the current char can be saved\n output += i\n\n # Escape previously found?\n elif esc_found == 1:\n\n # Yes\n\n # Then check if this is a CSI sequence\n if i == \"[\":\n\n # Beginning of CSI sequence\n log.info(\"CSI sequence\")\n\n # CSI sequence\n esc_found = 2\n\n else:\n\n # Another Escape sequence\n\n # Keep the escape sequence in the string\n output += \"\\x1b\" + i\n\n # No escape sequence next\n esc_found = 0\n\n else:\n\n # Char between 'a' and 'z' or 'A' and 'Z'?\n if (i >= \"a\" and i <= \"z\") or (i >= \"A\" and i <= \"Z\"):\n\n # Yes\n\n # Then it is the end of CSI escape sequence\n log.info(\"End of escape sequence\")\n\n # No escape sequence next\n esc_found = 0\n\n # Return a string without ANSI escape sequence\n return output",
"def CLEAN(text):\n return _control_char_re.sub('', text)",
"def _zap_esc_map(sub, _epat = re.compile(r'(\\[\\anrfbtv])')):\n for craw, cmap in [(r'\\n', '\\n'), (r'\\\\', '\\\\'), (r'\\r', '\\r'),\n (r'\\t', '\\t'), (r'\\f', '\\f'), (r'\\a', '\\a'),\n (r'\\b', '\\b'), (r'\\v', '\\v')]:\n if _epat.search(sub) is None:\n return sub\n sub = re.sub(craw, cmap, sub)\n return sub",
"def escape(raw_string): \n return ''.join(\n [_caret_escapes_for_unprintables.get(c, c) for c in raw_string])",
"def _escape(strings):\n ret = []\n for string in strings:\n if string == '[' or string == ']' or string == \"\\\"\":\n string = '\\\\' + string\n ret.append(string)\n return \"\".join(ret)",
"def compile_regex(self, paths):\r\n if isinstance(paths, list):\r\n ret = []\r\n for regex in paths:\r\n ret.append(re.compile(regex, re.I))\r\n return ret\r\n else:\r\n return re.compile(paths, re.I)",
"def remove_escape_characters(text):\n text_removed_escape = list(map(lambda x: x.replace(\"\\\\\", \"\").replace(\"'\", \"\").strip().lower(), re.split(r\"(?<=\\\\)[a-z]{1}\", repr(text))))\n text_removed_extra_spaces = list(filter(lambda x: x != \"\", text_removed_escape))\n return \" \".join(text_removed_extra_spaces)",
"def remove_ansi_escape_sequences(input_string):\n ansi_escape = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -/]*[@-~]')\n result = ansi_escape.sub('',input_string)\n return result",
"def stripEscapes(s):\r\n result = ''\r\n show = 1\r\n i = 0\r\n L = len(s)\r\n while i < L:\r\n if show == 0 and s[i] in ANSI_TERMINATORS:\r\n show = 1\r\n elif show:\r\n n = s.find(ANSI_ESCAPE_BEGIN, i)\r\n if n == -1:\r\n return result + s[i:]\r\n else:\r\n result = result + s[i:n]\r\n i = n\r\n show = 0\r\n i += 1\r\n return result",
"def _escapeSpecialCharacters(text):\n text.replace('\\\\', '\\\\\\\\')\n escape = ['~', '#', '&', '%', '_']\n for c in escape:\n text = text.replace(c, '\\\\' + c )\n return text",
"def escape_special_characters_for_regex(expression):\n spec_char_escaper = re.compile(r\"[^a-zA-Z0-9]\", re.IGNORECASE)\n expression = re.sub(spec_char_escaper, r'\\1', expression)\n return expression",
"def __create_regex(self):\n self.lexer_regex = \"|\".join(self.tokens)\n logger.debug(f\"Generated tokenizer regex {self.lexer_regex}\")",
"def strip_ansi(content):\n return ANSI_ESCAPES_REGEX.sub('', content)",
"def ansi_escape(text: object) -> str:\n return str(text).replace(\"\\x1b\", \"?\").replace(\"\\b\", \"?\")",
"def test_escape(self):\n bad_str = '''`~!@#$%^&*()_+-={}[]|\\\\;:'\",./<>?\\n\\r\\t '''\n self.run_escape_case(bad_str)",
"def escape_character_in_string(self, a, text):\n logging.debug(\"in escape character \" + text)\n #self.just_read_char()\n self.read_char()\n self.produce(STRING, text)",
"def strip_ansi_escape(data):\n if isinstance(data, bytes):\n data = data.decode(\"utf-8\")\n\n return re.sub(r\"\\x1b[^m]*m\", \"\", data)",
"def _avert_unallowable(raw_string, escape_double_special_characters=False):\n output = []\n for c in raw_string:\n if c in _caret_escapes:\n output.append(_caret_escapes[c])\n elif escape_double_special_characters and c == '\"':\n output.append('^\"')\n else:\n output.append(c)\n return ''.join(output)"
] | [
"0.49353287",
"0.49150157",
"0.49105355",
"0.46912605",
"0.4635989",
"0.4623271",
"0.4604767",
"0.4595183",
"0.45459825",
"0.4540293",
"0.45323464",
"0.450759",
"0.44997773",
"0.44368735",
"0.44090384",
"0.44047463",
"0.43948525",
"0.43837532",
"0.43768844",
"0.43508714",
"0.43080154",
"0.43022498",
"0.4266281",
"0.42568347",
"0.42557907",
"0.4253599",
"0.42483586",
"0.42445487",
"0.42350715",
"0.4199649"
] | 0.6139948 | 0 |
Tells the child process to resize its window | def resize_child_window(self):
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(0,termios.TIOCGWINSZ,s)
fcntl.ioctl(self.child_fd,termios.TIOCSWINSZ,x) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resize(self):\r\n Win.resize(self)\r\n self.write(\"### console has been resized\")",
"def __window_resizeTo(self, iWidth, iHeight):\n pass",
"def resizeEvent(self, event):\n self.resized.emit()\n return super(PiWndow, self).resizeEvent(event)",
"def signal_handler(self,sig,data):\n self.resize_child_window()",
"def resize_display(self, (w, h)):\n self.surface = pygame.display.set_mode((w, h), pygame.RESIZABLE)",
"def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()",
"def resize_to(self, width, height):\n\n self.driver.resize_window_to(self.handle, width, height)",
"def resize(self, *args):\n if self.parent is None: # when deleted\n return\n if self.parent.render_window is None: # BasePlotter\n return\n\n if self._prior_window_size != self.parent.window_size:\n self._prior_window_size = self.parent.window_size\n\n actor = self._actors['background']\n image_data = actor.GetInput()\n origin = image_data.GetOrigin()\n extent = image_data.GetExtent()\n spacing = image_data.GetSpacing()\n xc = origin[0] + 0.5 * (extent[0] + extent[1]) * spacing[0]\n yc = origin[1] + 0.5 * (extent[2] + extent[3]) * spacing[1]\n yd = (extent[3] - extent[2] + 1) * spacing[1]\n dist = self.camera.distance\n\n # make the longest dimensions match the plotting window\n img_dim = np.array(image_data.dimensions[:2])\n self.camera.focus = np.array([xc, yc, 0.0])\n self.camera.position = np.array([xc, yc, dist])\n\n ratio = img_dim / np.array(self.parent.window_size)\n scale_value = 1\n if ratio.max() > 1:\n # images are not scaled if larger than the window\n scale_value = ratio.max()\n\n if self._scale is not None:\n scale_value /= self._scale\n\n self.camera.parallel_scale = 0.5 * yd / self._scale",
"def setWindowSize(self, width, height, windowHandle='current'):\n cmdId = self.executeCommand(Command.SET_WINDOW_SIZE, {'width': int(width), 'height': int(height), \n \"windowHandle\": windowHandle})\n return cmdId",
"def setWindowSize(width,height):\n dislin.winsiz(width,height)",
"def resize(self, yx=None):\n if yx == None:\n yx = self.screen.getmaxyx()\n self.screen.clear()\n curses.resizeterm(yx[0], yx[1])\n self.setup_windows(resize = True)\n self.screen.refresh()",
"def on_resize(self, _: int = 0) -> None:\n assert CursesMenu.stdscr is not None\n screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx()\n curses.resizeterm(screen_rows, screen_cols)\n self.draw()",
"def ev_windowsizechanged(self, event: WindowResized) -> None:",
"def setwinsize(self, rows, cols):",
"def on_parent_resize(self, event):\n #self.resize()\n #self.resize_scaled(drag_rootx=self.resize_frame.winfo_rootx())\n self.resize_scaled(current=MathStat.lerp(0,\n self.prop_frame.winfo_width(), self.last_right_bias))",
"def resizeEvent(self, *args, **kwargs):\n self.windowMoved.emit()",
"def resize(self, width, height):\n\n\t\tself._window.resize(width, height)",
"def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)",
"def ev_windowresized(self, event: WindowResized) -> None:",
"def resize(self, width, height):\n geo = self.geometry\n # Start of menu.\n self.menu_start = self.window.width - (geo.menu_width +\\\n geo.horizontal_margin + geo.scroll_bar_width)\n # Update vertical span of the window.\n self.current_view_span = height - self.status_bar.height\n # Call the resize method of all objects in the current window.\n for object in self.object_list:\n object.resize(width, height)\n # Just one call to the adaptive plot height is needed. Therefore the\n # calls need to be here.\n if self.waveforms:\n self.utils.adaptPlotHeight()",
"def __window_resizeBy(self, xDelta, yDelta):\n pass",
"def setWindowGeometry(x,y,width,height):\n dislin.window(x,y,width,height)",
"def resize(self):\r\n del self.win\r\n self.__create_win()",
"def resize(self, win, width:int, height:int):\r\n\r\n\t\tglViewport(0, 0, width, height)",
"def ev_windowsizechanged(self, event: tcod.event.WindowResized) -> T | None:",
"def on_resize(self, width, height):\n self.gamestatemanager.peek().on_resize(width, height)",
"def set_resolution(self, width, height):\n self.driver.set_window_size(width, height, self.driver.window_handles[0])",
"def size_with_window(self, size_with_window):\n\n self.container['size_with_window'] = size_with_window",
"def resizeEvent(self, event):\n super().resizeEvent(event)\n self.resized.emit()",
"def set_igv_window_size(self, width=800, height=600):\n self.set_igv_window_width(width)\n self.set_igv_window_height(height)"
] | [
"0.6948274",
"0.6908833",
"0.67389023",
"0.6696905",
"0.6490529",
"0.6470183",
"0.6419227",
"0.64030665",
"0.6388671",
"0.63874537",
"0.6313409",
"0.6285529",
"0.6282814",
"0.62690467",
"0.62603426",
"0.62542343",
"0.6253225",
"0.6249994",
"0.62148416",
"0.61682737",
"0.61676556",
"0.6089843",
"0.60795474",
"0.6074318",
"0.6050025",
"0.6035615",
"0.60288364",
"0.6027586",
"0.6000829",
"0.59838"
] | 0.7933913 | 0 |
Launch the appropriate shell as a login shell It will be either bash or tcsh depending on what the user is currently running. It checks the SHELL variable to figure it out. | def run_shell():
shell = get_shell()
if shell not in ['bash','tcsh']:
raise ValueError, "Unsupported shell (only works with bash and tcsh)"
os.execvp(shell,(shell,"-l")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loginShell(self, shell=None):\n\n\t\tif shell is None:\n\t\t\traise exceptions.BadArgumentError(\n\t\t\t\t_(u'You must specify a shell'))\n\n\t\tif shell not in LMC.configuration.users.shells:\n\t\t\t\traise exceptions.BadArgumentError(_(u'Invalid shell \"{0}\". '\n\t\t\t\t\t'Valid shells are {1}.').format(stylize(ST_BAD, shell),\n\t\t\t\t\t', '.join(stylize(ST_COMMENT, shell)\n\t\t\t\t\t\tfor shell in LMC.configuration.users.shells)))\n\n\t\twith self.lock:\n\t\t\tself.__loginShell = shell\n\t\t\tself.serialize()\n\n\t\t\tLicornEvent('user_loginShell_changed', user=self.proxy).emit(priorities.LOW)\n\n\t\t\tlogging.notice(_(u'Changed user {0} shell to {1}.').format(\n\t\t\t\tstylize(ST_NAME, self.__login), stylize(ST_COMMENT, shell)))",
"def shell():\n # Provides:\n # shell\n if salt.utils.platform.is_windows():\n env_var = \"COMSPEC\"\n default = r\"C:\\Windows\\system32\\cmd.exe\"\n else:\n env_var = \"SHELL\"\n default = \"/bin/sh\"\n\n return {\"shell\": os.environ.get(env_var, default)}",
"def shell(app, shell_name, shell_path, shell_args): # no cov\n app.ensure_environment_plugin_dependencies()\n\n if app.env == app.env_active:\n app.abort(f'Already in environment: {app.env}')\n\n if app.env in app.project.config.matrices:\n app.display_error(f'Environment `{app.env}` defines a matrix, choose one of the following instead:\\n')\n for env_name in app.project.config.matrices[app.env]['envs']:\n app.display_error(env_name)\n\n app.abort()\n\n if not shell_name:\n shell_name = app.config.shell.name\n if not shell_path:\n shell_path = app.config.shell.path\n if not shell_args:\n shell_args = app.config.shell.args\n\n if not shell_path:\n import shellingham\n\n try:\n shell_name, command = shellingham.detect_shell()\n except shellingham.ShellDetectionFailure:\n from hatch.utils.fs import Path\n\n shell_path = app.platform.default_shell\n shell_name = Path(shell_path).stem\n else:\n if app.platform.windows:\n shell_path = command\n else:\n shell_path, *shell_args = app.platform.modules.shlex.split(command)\n\n with app.project.location.as_cwd():\n environment = app.get_environment()\n app.prepare_environment(environment)\n\n first_run_indicator = app.cache_dir / 'shell' / 'first_run'\n if not first_run_indicator.is_file():\n app.display_waiting(\n 'You are about to enter a new shell, exit as you usually would e.g. '\n 'by typing `exit` or pressing `ctrl+d`...'\n )\n first_run_indicator.parent.ensure_dir_exists()\n first_run_indicator.touch()\n\n environment.enter_shell(shell_name, shell_path, shell_args)",
"def get_shell_type():\n if sys.platform.startswith(\"win\"):\n parent_proc = os.getppid()\n parent_name = Process(parent_proc).name()\n\n if bool(re.match(\"pwsh*|pwsh.exe|powershell.exe\", parent_name)):\n return Shell.POWER_SHELL\n\n return Shell.WINDOWS_COMMAND_PROMPT\n\n return Shell.LINUX",
"def start_shell(self):\n cmd = 'shell'\n end_strs = ['>']\n self.run_with_output(cmd, end_strs)\n return True",
"def djshell():\n if '@' in env.host_string:\n env.shell_host_string = env.host_string\n else:\n env.shell_host_string = '%(user)s@%(host_string)s' % env\n env.shell_default_dir = env.shell_default_dir_template % env\n env.shell_interactive_djshell_str = env.shell_interactive_djshell % env\n if env.is_local:\n cmd = '%(shell_interactive_djshell_str)s' % env\n else:\n cmd = 'ssh -t -i %(key_filename)s %(shell_host_string)s \"%(shell_interactive_djshell_str)s\"' % env\n #print cmd\n os.system(cmd)",
"def test_shell_run_SHELL(tmp_home, tmp_prefix, tmp_env_name, use_prefix, tmp_path):\n skip_if_shell_incompat(\"bash\")\n\n script_path = tmp_path / \"fakeshell.sh\"\n script_path.write_text(\"#!/bin/sh\\nexit 42\")\n script_path.chmod(0o777)\n\n if use_prefix:\n cmd = [helpers.get_umamba(), \"shell\", \"-p\", tmp_prefix]\n else:\n cmd = [helpers.get_umamba(), \"shell\", \"-n\", tmp_env_name]\n\n ret = subprocess.run(cmd, env={**os.environ, \"SHELL\": script_path})\n assert ret.returncode == 42",
"def use_shell(self):\n return self._shell",
"def shell(gui=0, dryrun=0):\n render_remote_paths()\n print 'env.remote_app_dir:',env.remote_app_dir\n env.SITE = env.SITE or env.default_site\n env.shell_x_opt = '-X' if int(gui) else ''\n if '@' in env.host_string:\n env.shell_host_string = env.host_string\n else:\n env.shell_host_string = '%(user)s@%(host_string)s' % env\n env.shell_default_dir = env.shell_default_dir_template % env\n env.shell_interactive_shell_str = env.shell_interactive_shell % env\n if env.is_local:\n cmd = '%(shell_interactive_shell_str)s' % env\n else:\n cmd = 'ssh -t %(shell_x_opt)s -i %(key_filename)s %(shell_host_string)s \"%(shell_interactive_shell_str)s\"' % env\n print cmd\n if int(dryrun):\n return\n os.system(cmd)",
"def shell(self):\r\n channel = self._ssh_client.invoke_shell()\r\n interactive_shell(channel)",
"def detect_shell() -> Optional[str]:\n shell_var = os.environ.get('SHELL')\n if shell_var:\n return os.path.basename(shell_var)\n return None",
"def run_shell(kit):\n context = {\n 'kit': kit,\n }\n try:\n import IPython\n except ImportError:\n interact(local=context)\n else:\n interactive_shell = IPython.frontend.terminal.embed.InteractiveShellEmbed()\n interactive_shell(local_ns=context)",
"def command_shell(\n session_name,\n window_name,\n socket_name,\n socket_path,\n command,\n shell,\n use_pythonrc,\n use_vi_mode,\n):\n server = Server(socket_name=socket_name, socket_path=socket_path)\n\n util.raise_if_tmux_not_running(server=server)\n\n current_pane = util.get_current_pane(server=server)\n\n session = util.get_session(\n server=server, session_name=session_name, current_pane=current_pane\n )\n\n window = util.get_window(\n session=session, window_name=window_name, current_pane=current_pane\n )\n\n pane = util.get_pane(window=window, current_pane=current_pane) # NOQA: F841\n\n if command is not None:\n exec(command)\n else:\n if shell == \"pdb\" or (os.getenv(\"PYTHONBREAKPOINT\") and PY3 and PYMINOR >= 7):\n from tmuxp._compat import breakpoint as tmuxp_breakpoint\n\n tmuxp_breakpoint()\n return\n else:\n from ..shell import launch\n\n launch(\n shell=shell,\n use_pythonrc=use_pythonrc, # shell: code\n use_vi_mode=use_vi_mode, # shell: ptpython, ptipython\n # tmux environment / libtmux variables\n server=server,\n session=session,\n window=window,\n pane=pane,\n )",
"def launch_shell(*, cwd: Optional[pathlib.Path] = None) -> None:\n with emit.pause():\n subprocess.run([\"bash\"], check=False, cwd=cwd)",
"def shell(console):\n return create_shell(\n MANAGE_DICT.get(\"shell\", {}).get(\"console\", console), MANAGE_DICT\n )",
"def get_shell(self, shell):",
"def get_shell(name='bash'):\n if name.startswith('/'):\n return [name]\n return ['/usr/bin/env', name]",
"def get_shell(cls):\n tvars = cls._get_thread_vars()\n if len(tvars['shell_stack']) == 0:\n raise RuntimeError(\"No currently active shell\")\n return tvars['shell_stack'][-1]",
"def __get_adb_shell(self):\n shell = self.command + [\"shell\"]\n if self.root_adb == \"root_adb\":\n # Root adb-specific things\n pass\n elif self.root_adb == \"root_shell\":\n # Root shell-specific things\n shell.extend([\"su\", \"-c\"])\n elif self.root_adb == \"not_root\":\n # Non root-specific things\n pass\n return shell",
"def test_shell_run_activated(tmp_home, tmp_prefix):\n skip_if_shell_incompat(\"bash\")\n stdout = subprocess.check_output(\n [helpers.get_umamba(), \"shell\", \"-p\", tmp_prefix],\n input=\"echo $PATH\",\n text=True,\n )\n assert str(tmp_prefix) in stdout.split(os.pathsep)[0]",
"def make_shell_cmd(self, locals):\n\t\tdef cmd_shell():\n\t\t\timport code\n\t\t\tcode.interact(banner=self.shell_banner, local=locals, exitmsg='Returning to command shell...')\n\n\t\treturn cmd_shell",
"def _channel_invoke_shell(self) -> None:\n self._shell = True\n self.channel.shell()",
"def use_shell(self, shell):\n return ShellContext(self, shell)",
"def shell():\n\n from IPython.terminal.ipapp import TerminalIPythonApp\n import app.model as m\n from trex.support import quantum\n\n context = dict(\n app = app,\n quantum = quantum,\n m = m,\n )\n\n rc_file = os.path.normpath(os.path.join(app.root_path, os.pardir, 'shell.rc'))\n if os.access(rc_file, os.R_OK):\n execfile(rc_file, context, dict(context=context))\n\n shell = TerminalIPythonApp.instance(\n display_banner = False,\n quick = True,\n user_ns = context,\n )\n shell.initialize(argv=[])\n shell.shell.confirm_exit = False\n\n context = app.test_request_context('__shell__')\n context.push()\n shell.start()\n context.pop()",
"def login_aashell(self):\n flag = 0\n login_aashell = 'telnet 192.168.255.1 15007'\n aashell_prompt = 'AaShell>'\n\n self._current.write(login_aashell)\n self._current.read_until_regexp(aashell_prompt)\n flag = 1\n\n return flag",
"def get_shell(self, shell):\n if shell not in self._shells:\n raise Exception(\n 'Unknown shell \"{}\"'.format(shell)\n )\n return self._shells[shell]",
"def open_shell_and_run_su_user(driver):\n global sudo_results\n cmd = 'sudo ls /var/lib/sudo'\n sudo_results = ssh_sudo(cmd, host, 'ericbsd', 'testing')",
"def shell():\n pass",
"def execute_shell(self, cmd):\n try:\n return common.execute_shell(cmd, False)\n except Exception, e:\n raise exception.TermSaverException(help=_(\n\"\"\"Could not execute the command [%(cmd)s] properly.\n%(message)s \\nError details: %(error)s\"\"\") % {\n \"cmd\": \" \".join(cmd),\n \"message\": \"Make sure you have figlet installed!\",\n \"error\": str(e)\n }\n )",
"def run_shell(cmd: str):\n print_color(f\"** RUNNING: {cmd}\")\n os.system(cmd)"
] | [
"0.6784456",
"0.6614977",
"0.6381293",
"0.6362046",
"0.6199895",
"0.6197317",
"0.6101484",
"0.60623527",
"0.60271186",
"0.59255606",
"0.58501774",
"0.5833172",
"0.5826648",
"0.5798579",
"0.57554454",
"0.56577605",
"0.5635898",
"0.56230944",
"0.55727273",
"0.55245143",
"0.5498706",
"0.54953814",
"0.54935485",
"0.5481472",
"0.547323",
"0.54400617",
"0.5421676",
"0.5295253",
"0.5289749",
"0.52891004"
] | 0.69821674 | 0 |
Retrieve the name of the directory that will store the logfiles. If the SHELLLOGGERDIR environment variable is set, use that. Otherwise, default to ~/.shelllogger | def get_log_dir():
env_var = "SHELLLOGGERDIR"
if os.environ.has_key(env_var):
return os.environ[env_var]
else:
return os.path.expanduser('~/.shelllogger') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_log_directory(self):\n\n return self.__config_parser__.get('SETTINGS', 'LOGFILE_DIRECTORY')",
"def log_dir():\r\n if LogOptions._LOG_DIR is None:\r\n LogOptions._LOG_DIR = app.get_options().twitter_common_log_log_dir\r\n return LogOptions._LOG_DIR",
"def get_logging_dir(self):\n return self.logging_dir",
"def logdir(self) -> str:\n return self._logdir",
"def log_directory(self):\n\n return self.get_raw(\"log_directory\")",
"def get_logdir(self):\n return self.event_writer.get_logdir()",
"def get_log_file():\n log_file = os.getenv(\"LOG_FILE\", \"\")\n if log_file != \"\":\n return log_file\n return os.path.dirname(os.path.abspath(__file__)) + \"/server.log\"",
"def _default_log_dir():\n config_dir = os.path.abspath(os.path.dirname(self.config_filepath))\n log_dir = os.path.join(config_dir, \"logs\")\n if not os.path.isdir(log_dir):\n os.mkdir(log_dir)\n return log_dir",
"def get_log_path():\n forch_log_dir = os.getenv('FORCH_LOG_DIR')\n if not forch_log_dir:\n return None\n return os.path.join(forch_log_dir, 'forch.log')",
"def logdir(self):\n return osp.join('runs/', self.net_name, '')",
"def get_log_path():\n return LOG_PATH",
"def get_log_dir():\n base_dir = os.path.realpath(cfg.CONF.ruiner.log_dir.rstrip('/'))\n return os.path.join(base_dir, test_start_time_tag())",
"def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)",
"def getLogFile(self):\r\n return LOG.getLogFile().name",
"def log_filename():\n # use the env variable if set, or fallback to default\n return os.environ.get('NBAUTOEVAL_LOG') \\\n or os.path.join(os.getenv(\"HOME\"), \".nbautoeval\")",
"def logdir(self) -> Path:\n assert (\n self._logdir\n ), \"Log provider has not been tied to a SummaryWriter yet\"\n return self._logdir",
"def get_trial_dir() -> str:\n return logging.root._log_dir # type: ignore",
"def getLogPath():\n pwd = os.path.dirname(os.path.abspath(__file__))\n log_file = os.path.join(pwd, 'log.txt')\n\n return log_file",
"def new_custom_log_dir(self) -> str:",
"def get_system_logfile():\n return \"system\" + get_day() + \".log\"",
"def get_log_file_path(self):\n dir_path = self._get_log_file_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.LOG_FILE_NAME)",
"def log_path(self):\n return os.path.join(self._sandbox, 'log')",
"def log_path(self):\n return LOGS_RESOURCES_PATH / (self.daemon_id + '.log')",
"def GetLogFilePath():\n global _LOG_FILE\n return _LOG_FILE",
"def get_base_logfile():\n return \"baseLog\" + get_day() + \".log\"",
"def pytest_logger_logsdir(self, config):",
"def logs_directory(self):",
"def get_log_folder(cls, test_suite_name):\n if not test_suite_name:\n test_suite_name = os.path.splitext(os.path.basename(sys.modules['__main__'].__file__))[0]\n sdk_path = cls.get_sdk_path()\n log_folder = os.path.join(sdk_path, \"TEST_LOGS\",\n test_suite_name +\n time.strftime(\"_%m%d_%H_%M_%S\", time.localtime(LOG_FOLDER_TIMESTAMP)))\n if not os.path.exists(log_folder):\n os.makedirs(log_folder)\n return log_folder",
"def find_logs():\n dirname = os.path.normpath('./logs')\n d = 1\n\n while d < 5:\n if os.path.exists(dirname):\n return os.path.normpath(dirname)\n d += 1\n dirname = os.path.join('../', dirname)\n\n return dirname",
"def get_console_log_filename(self):\n return"
] | [
"0.7954862",
"0.7615036",
"0.75734735",
"0.7549197",
"0.7508237",
"0.74224484",
"0.7221798",
"0.71972513",
"0.7154093",
"0.7087117",
"0.706262",
"0.6988383",
"0.6986975",
"0.69751024",
"0.68904305",
"0.6860191",
"0.6823196",
"0.67315704",
"0.6727358",
"0.6695155",
"0.6671013",
"0.6646318",
"0.6557566",
"0.64734256",
"0.64678323",
"0.64372915",
"0.6411352",
"0.63782746",
"0.633779",
"0.6335772"
] | 0.88646066 | 0 |
Convert the .raw file, with illegal characters and escape keys, to a proper XML version. Returns the name of the XML file | def raw_to_xml(self):
xmlfilename = self.logfilename.replace('.raw','.xml')
fout = codecs.open(xmlfilename, encoding="utf-8", mode="w")
for line in codecs.open(self.logfilename,encoding="utf-8"):
fout.write(sanitize(line))
fout.close()
return xmlfilename | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sanitizeXML(filename):\n #we have to remove all illegal characters from crossref xml\n full_path = os.path.abspath(filename)\n path, filename = os.path.split(full_path)\n with open(full_path, 'r') as in_file:\n with open(os.path.join(path,\"tmp\"+filename), 'w') as out_file:\n for line in in_file:\n out_file.write(line.replace(r'&', r'&'))\n os.remove(full_path)\n os.rename(os.path.join(path, \"tmp\"+filename), os.path.join(path, filename))\n \n return full_path",
"def beautify_xml(XML):\n # convert XML file to modifiable string to beautify it\n text_string = ET.tostring(XML, encoding='UTF-8', method='xml')\n \n # insert line breaks before end of file tag\n file_string = text_string.replace('</aardvark>', '\\n\\n</aardvark>')\n \n # insert double new line before comments to create\n # blocks for each command\n file_string = file_string.replace('<!', '\\n\\n<!')\n \n # insert new line between each set of XML tags\n file_string = file_string.replace('><', '>\\n\\t<')\n \n # remove header\n # file_string = file_string.replace('<?xml version=\\'1.0\\' encoding=\\'utf8\\'?>\\n', '') \n \n return file_string",
"def _make_string(self, filename):\n\n if not os.path.isfile(filename):\n str = \"ERROR: Could not find specified XML file %s.\" % filename\n PRINT.info(str)\n raise OSError(str)\n\n return open(filename).read()",
"def _clean_xml(raw: str) -> str:\n a = raw.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n no_encoding = _strip_encoding(a)\n no_ns = _strip_namespace(no_encoding)\n return no_ns",
"def print_xml(self, filename):\n\n # TODO: check what happens when input is not an xml file\n # TODO: add xmldec, processing instructions and comments\n\n xml_string = u'' # TODO: use a string buffer\n offset = 0\n stack = []\n\n for char in self.text:\n\n # any tags on the stack that can be closed?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n # any new opening tags?\n for t in self.source_tags.opening_tags.get(offset,[]):\n stack.append(t)\n xml_string += \"<%s%s>\" % (t.name, t.attributes_as_string())\n\n # any of those need to be closed immediately (non-consuming tags)?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n xml_string += escape(char)\n offset += 1\n\n fh = open(filename, 'w')\n fh.write(xml_string.encode('utf-8'))",
"def source_xml_file(tmpdir):\n xml_input = tmpdir.mkdir('sub').join('trades_raw.xml')\n xml_input.write(\n '''\n <Trades>\n <Trade CorrelationId=\"701\" NumberOfTrades=\"1\" Limit=\"1000\" TradeID=\"A1\">700</Trade>\n <Trade CorrelationId=\"002\" NumberOfTrades=\"1\" Limit=\"1000\" TradeID=\"B2\">1170</Trade>\n <Trade CorrelationId=\"103\" NumberOfTrades=\"2\" Limit=\"500\" TradeID=\"C3\">200</Trade>\n </Trades>\n '''\n )\n return str(xml_input)",
"def example_xml40(example_xml_file40):\n return etree.fromstring(example_xml_file40.encode('utf-8'))",
"def fix_xml_encoding(self, file_path):\n\n with open(file_path + self.infile, 'rb') as original:\n with open(file_path + \"Temp File.txt\", 'wb') as temp:\n [temp.write(row.replace(\"utf-16\", \"utf-8\")) for row in original]\n\n os.remove(file_path + self.infile)\n\n with open(file_path + \"Temp File.txt\", 'rb') as temp:\n with open(file_path + self.infile, 'wb') as new:\n [new.write(row) for row in temp]\n\n os.remove(file_path + \"Temp File.txt\")",
"def meta2xml(meta, filename):\n\n # this is stupid, just use dict2xml\n xml = dict2xml(meta)\n with open(filename, 'w+') as output:\n output.write(xml)",
"def test_utf8_xml_from_xml_file(self):\n # 'Россия' is 'Russia' in Cyrillic, not that it matters.\n xml = u\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <country>Россия</country>\"\"\"\n with tempfile.NamedTemporaryFile(suffix=\".xml\") as xmlfile:\n xmlfile.write(xml.encode('utf-8'))\n xmlfile.flush()\n\n j2k = glymur.Jp2k(self.j2kfile)\n with tempfile.NamedTemporaryFile(suffix=\".jp2\") as jfile:\n jp2 = j2k.wrap(jfile.name)\n xmlbox = glymur.jp2box.XMLBox(filename=xmlfile.name)\n jp2.append(xmlbox)\n\n box_xml = jp2.box[-1].xml.getroot()\n box_xml_str = ET.tostring(box_xml,\n encoding='utf-8').decode('utf-8')\n self.assertEqual(box_xml_str,\n u'<country>Россия</country>')",
"def saving_file(xml):\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))",
"def creer_fichier(nom_file):\n fichier = open(nom_file, 'w')\n fichier.write(\"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\\n\")\n fichier.close()",
"def file_to_xml(cls, file_object):\r\n return etree.parse(file_object, parser=edx_xml_parser).getroot()",
"def ler_arquivo_xml(self, diretorio):\r\n with open(diretorio, 'r') as fxml:\r\n\t strfx = fxml.readlines()\r\n\t string = \"\".join(strfx).replace(\"&\",\" e \")\r\n return string",
"def example_xml42(example_xml_file41):\n return etree.fromstring(example_xml_file42.encode('utf-8'))",
"def example_xml(example_xml_file):\n return etree.fromstring(example_xml_file.encode('utf-8'))",
"def cleanXMLfromSpecialChars(self,line):\n return str(line).replace(\"&\", \"&\").replace(\"\\\"\",\""\").replace(\"<\",\"<\").replace(\">\",\">\").replace(\"'\",\"'\")",
"def recipe12_3():\n from xml.sax.handler import ContentHandler\n import xml.sax\n\n class textHandler(ContentHandler):\n def characters(self,ch):\n sys.stdout.write(ch.encode(\"Latin-1\"))\n parser=xml.sax.make_parser()\n handler=textHandler()\n parser.setContentHandler(handler)\n parser.parse(\"sample.xml\")",
"def example_xml41(example_xml_file41):\n return etree.fromstring(example_xml_file41.encode('utf-8'))",
"def file_to_bow(filename,\n raw=True,\n exc_start=None,\n exc_end=None,\n no_http=False):\n with open(filename, 'r') as f:\n file_text = read_doc(f)\n file_text = file_text.lower()\n if not raw:\n new_text = ''\n # Do word-by-word processing of the text.\n # maybe add html stripping\n word_list = file_text.split()\n\n for word in word_list:\n word = word.strip()\n word = word.replace('\\xef\\xbb\\xbf', '')\n word = hparser.unescape(word)\n # word = unicodedata.normalize('NFKD', unicode(word))\n # Check for optional exclude delineators.\n if exc_start and exc_end:\n # word = word.encode('utf-8')\n if word.startswith(exc_start) and \\\n word.endswith(exc_end):\n continue\n\n word = word.replace(\"'s\", \"\")\n # Check if we're excluding http:// addresses\n if no_http and word.startswith('http://'):\n continue\n # Now strip punctuation\n word = word.strip(string.punctuation)\n if word == '' or \\\n word.isdigit():\n continue\n new_text += word\n new_text += ' '\n file_text = new_text\n return file_text",
"def getrawxml(fp,fn):\n print(\"starting to get the NRE XML Data from historical file\")\n infile = open(fp+fn,\"r\",encoding=\"utf-8\")\n xml_file = infile.read()\n return xml_file",
"def make_xml_filename(file_dir, mri, suffix=\"attributes\"):\n return os.path.join(file_dir, f\"{mri.replace(':', '_')}-{suffix}.xml\")",
"def recipe12_4():\n import codecs,encodings\n \"\"\" Caller will hand this library a buffer string, and ask us to convert\n the buffer, or autodetect what codec the buffer probably uses. \"\"\"\n # 'None' stands for a potentially variable byte (\"##\" in the XML spec...)\n autodetect_dict={ # bytepattern : (\"name\",\n (0x00, 0x00, 0xFE, 0xFF) : (\"ucs4_be\"),\n (0xFF, 0xFE, 0x00, 0x00) : (\"ucs4_le\"),\n (0xFE, 0xFF, None, None) : (\"utf_16_be\"),\n (0xFF, 0xFE, None, None) : (\"utf_16_le\"),\n (0x00, 0x3C, 0x00, 0x3F) : (\"utf_16_be\"),\n (0x3C, 0x00, 0x3F, 0x00) : (\"utf_16_le\"),\n (0x3C, 0x3F, 0x78, 0x6D) : (\"utf_8\"),\n (0x4C, 0x6F, 0xA7, 0x94) : (\"EBCDIC\"),\n }\n def autoDetectXMLEncoding(buffer):\n \"\"\"buffer -> encoding_\n The buffer string should be at least four bytes long.\n Returns None if encoding cannot be detected.\n Note than encoding_name might not have an installed\n decoder (e.g., EBCDIC)\n \"\"\"\n # A more efficient implementation would not decode the whole\n # buffer at once, but then we'd have to decode a character at\n # a time looking for the quote character, and that's a pain\n encoding=\"utf_8\" # According to the XML spec, this is the default\n # This code successively tries to refine the default:\n # Whenever it fails to refine, it falls back to\n # the last place encoding was set\n bytes=byte1, byte2, byte3, byte4=map(ord,buffer[0:4])\n enc_info=autodetect_dict.get(bytes,None)\n if not enc_info: # Try autodetection again, removing potentially\n # variable bytes\n bytes=byte1,byte2,None,None\n enc_info=autodetect_dict.get(bytes)\n if enc_info:\n encoding=enc_info # We have a guess...these are\n # the new defaults\n # Try to fidn a more precise encoding using XML declaration\n secret_decoder_ring=codecs.lookup(encoding)[1]\n decoded, length=secret_decoder_ring(buffer)\n first_line=decoded.split(\"\\n\",1)[0]\n if first_line and first_line.startswith(u\"<?xml\"):\n encoding_pos=first_line.find(u\"encoding\")\n if encoding_pos!=-1:\n # Look for double quotes\n quote_pos=first_line.find('\"', encoding_pos)\n if quote_pos==-1: #Look for single quote\n quote_pos=first_line.find(\"'\", encoding_pos)\n if quote_pos>-1:\n quote_char=first_line[quote_pos]\n rest=first_line[quote_pos+1]\n encoding=rest[:rest.find(quote_char)]\n return encoding",
"def _normalize_malformed_xml(xml):\n xml = xml.strip()\n if not xml.startswith('<TXT>'):\n xml = '<TXT>' + xml\n if not xml.endswith('</TXT>'):\n xml = xml + '</TXT>'\n return xml",
"def _readXMLfragment(self) -> str:\n\t\tself._check()\n\t\tself._raven.reset_output_buffer()\n\t\tself._raven.reset_input_buffer()\n\t\tfirst_char = self._raven.read()\n\t\twhile (first_char == 0 or first_char == b' '):\n\t\t\tfirst_char = self._raven.read()\n\t\tif (first_char != b'<'):\n\t\t\tself._clear()\n\t\t\traise InvalidFormat('INVALID CHAR: {}'.format(first_char))\n\t\tstart_tag = first_char\n\t\tresult = ''\n\t\twhile (start_tag[-1] != 62):\n\t\t\tchar = self._raven.read()\n\t\t\tif len(char) == 0:\n\t\t\t\traise Exception('No data')\n\t\t\tstart_tag += char\n\t\tstart_tag = start_tag.decode('ascii').strip('\\x00')\n\t# Debugging\n\t#\tprint(start_tag)\n\t\tif \"/\" in start_tag:\n\t\t\traise InvalidFormat('\"/\" detected in XML starting tag')\n\t\tresult = start_tag\n\t\tend_tag = start_tag[0] + '/' + start_tag[1:] \n\t\tend_tag_len = len(end_tag)\n\t\twhile (result[-end_tag_len:] != end_tag):\n\t\t\tchar = self._raven.read()\n\t\t\tif len(char) == 0:\n\t\t\t\traise Exception('No data')\n\t\t\tresult += char.decode('ascii')\n\t\ttry:\n\t\t\treturn ET.fromstring(result)\n\t\texcept:\n\t\t\traise InvalidFormat",
"def prepare_xml(original_xml, mangled_xml):\n in_handle = open(original_xml)\n footer = \" </BlastOutput_iterations>\\n</BlastOutput>\\n\"\n header = \"\"\n while True:\n line = in_handle.readline()\n if not line:\n #No hits?\n stop_err(\"Problem with XML file?\")\n if line.strip() == \"<Iteration>\":\n break\n header += line\n\n if \"<BlastOutput_program>blastx</BlastOutput_program>\" in header:\n print \"BLASTX output identified\"\n elif \"<BlastOutput_program>blastp</BlastOutput_program>\" in header:\n print \"BLASTP output identified\"\n else:\n in_handle.close()\n stop_err(\"Expect BLASTP or BLASTX output\")\n\n out_handle = open(mangled_xml, \"w\")\n out_handle.write(header)\n out_handle.write(line)\n count = 1\n while True:\n line = in_handle.readline()\n if not line:\n break\n elif line.strip() == \"<Iteration>\":\n #Insert footer/header\n out_handle.write(footer)\n out_handle.write(header)\n count += 1\n out_handle.write(line)\n\n out_handle.close()\n in_handle.close()\n print \"Input has %i queries\" % count",
"def exportXml ( w, xml ):\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n rawText = xml\n pattern = re.compile (r'[^\\S ]+')\n text = re.sub ( pattern, \"\", rawText )\n reparsed = MD.parseString ( text )\n w.write ( reparsed.toprettyxml ( indent = \"\\t\", encoding = \"UTF-8\" ) )",
"def sanitize_characters(raw_input_file, clean_output_file):\n input_file = codecs.open(raw_input_file, 'r', encoding='ascii', errors='ignore')\n output_file = open(clean_output_file, 'w', encoding='ascii', errors='ignore')\n\n for line in input_file:\n # removes extra newline\n line = line.rstrip('\\n')\n output_file.write(line)",
"def get_filename(self) -> str:\n\t\treturn self.xml_name",
"def ConvertFileName(cls,infile,band):\r\n try:\r\n import os\r\n except:\r\n raise ImportError(\"Can not find module os\")\r\n try:\r\n base = str.split(infile,\"_metadata.xml\")[0]\r\n print base\r\n ext=\"_band\"+str(band)+\".ntf\"\r\n outfile=base+ext\r\n return outfile\r\n except:\r\n raise ImportError(\"Can not covert file names\")"
] | [
"0.63195187",
"0.59571916",
"0.590457",
"0.5790034",
"0.56660265",
"0.5458497",
"0.53709275",
"0.5369985",
"0.5312219",
"0.5296952",
"0.5284404",
"0.52644795",
"0.52433074",
"0.5239629",
"0.5220292",
"0.5200755",
"0.5199187",
"0.51793045",
"0.5175041",
"0.51562613",
"0.5145448",
"0.5133448",
"0.51329625",
"0.50987995",
"0.506291",
"0.50606483",
"0.5055475",
"0.50390977",
"0.50006276",
"0.4998994"
] | 0.76060027 | 0 |
get the report template | def _report_template():
current_dir = Path(__file__).parent
with open(current_dir / "report_template.html", "r") as f:
template = f.read()
template = re.sub(r"\s{2,}", " ", template)
template = re.sub(r"\n", "", template)
template = re.sub(r"> <", "><", template)
return template | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_template(self):\n try:\n template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\n template_code = Path(f'{template_path}/{self._get_template_filename()}').read_text()\n # substitute template parts\n template_code = self._substitute_template_parts(template_code)\n except Exception as err: # noqa: B902; just logging\n current_app.logger.error(err)\n raise err\n return template_code",
"def get_template(self):\n return self.template",
"def _get_template_filename(self):\n file_name = ReportMeta.reports[self._report_key]['fileName']\n return '{}.html'.format(file_name)",
"def get_template(self):\n template_string = self.remgr.render_template(self)\n return self.provider.format_template(template_string)",
"def template(self):\n return self._template",
"def template(self):\n return self._template",
"def template(self):\n return self._template",
"def get_template(self):\n if self.get_website:\n return self.get_website.get_template()\n else:\n return default_entity.get_website.get_template()",
"def _get_template(self):\n # Get templates and put them in the order of importance:\n # 1. template specified in \"modules.yaml\"\n # 2. template specified in a package directly\n # 3. default template (must be defined, check in __init__)\n module_system_name = str(self.module.__name__).split(\".\")[-1]\n package_attribute = \"{0}_template\".format(module_system_name)\n choices = [\n self.conf.template,\n getattr(self.spec.package, package_attribute, None),\n self.default_template, # This is always defined at this point\n ]\n # Filter out false-ish values\n choices = list(filter(lambda x: bool(x), choices))\n # ... and return the first match\n return choices.pop(0)",
"def template(self):\n return self.conf.get(\"template\", None)",
"def GetTemplate(self, _page_data):\n return self.template",
"def get_template(self, template):\n\n\n env = Environment(\n loader=FileSystemLoader('templates')\n )\n return env.get_template(template)",
"def template(self):\n template_names = self.get_template_names()\n if template_names:\n return template_names[0]\n return None",
"def get_template(self, templateType, blogid=1):\n return self.execute(\"metaWeblog.getTemplate\", self.appkey, blogid, self.username, self.password, templateType)",
"def get_html_report(self) -> str:\n template_contents = dict(\n vendor_bundle_js=self.vendor_bundle,\n app_bundle_js=self.app_bundle,\n # results\n results=self.results,\n # account metadata\n account_id=self.account_id,\n account_name=self.account_name,\n report_generated_time=str(self.report_generated_time),\n cloudsplaining_version=__version__,\n )\n template_path = os.path.dirname(__file__)\n env = Environment(loader=FileSystemLoader(template_path)) # nosec\n template = env.get_template(\"template.html\")\n return template.render(t=template_contents)",
"def template(self):\n with open(self.compute.submission_template, \"r\") as f:\n return f.read()",
"def template(self) -> str:\n manifest = self._get_manifest()\n\n return manifest[\"template\"]",
"def get_template(self):\n model = self.get_object()\n template_name = self.model_template_name or 'template'\n try:\n template_string = getattr(model, template_name)\n except AttributeError as e:\n raise ImproperlyConfigured(\n \"%(model)s is missing a template. Define \"\n \"%(model)s.template, %(cls)s.model_template_name \"\n \"or override %(cls)s.get_template().\" % {\n 'model': model.__class__.__name__,\n 'cls': self.__class__.__name__\n }\n )\n return template_string",
"def get_notification_template(self):\n if self.db_config_file.key_exists(\"notification_template_file\"):\n filename = self.db_config_file_value(\"notification_template_file\").strip('\"')\n return open(filename, 'rt').read()\n\n return get_data(\"asebackupcli\", \"notification.json\")",
"def _GetTemplate(self):\n# First read default template.\n tmplt = self._LoadTemplate(c.preproc_template_default)\n tmplt['proc'] = self.topdir\n self.template_type = 'default'\n\n self.templates = []\n if self.template_file is not None:\n tmplt.update(self._LoadTemplate(self.template_file))\n self.template_type = 'command-line'\n self.templates.append(os.path.abspath(self.template_file))\n found_template = True\n else:\n# Find a study specific template file.\n study_template_file = self._FindTemplateFile('%s/..' % self.topdir)\n if study_template_file is not None:\n# Merge study template into default, study template has precedence.\n if self.verbose:\n print \"Using study template at \" + study_template_file\n tmplt.update(self._LoadTemplate(study_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(study_template_file))\n found_template = True\n else:\n found_template = False\n# Now look for a subject-specific template file.\n subject_template_file = self._FindTemplateFile('%s' % self.topdir)\n if subject_template_file is not None:\n# Merge subject template, subject template has precedence.\n if self.verbose:\n print \"Using subject-specific template at %s\" % \\\n subject_template_file\n tmplt.update(self._LoadTemplate(subject_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(subject_template_file))\n found_template = True\n\n if not found_template:\n raise RuntimeError('Could not find template file.')\n\n if tmplt.get('subject','same') == 'same':\n# Default subdirectory is same as data directory.\n tmplt['subject'] = self.topdir.split('/')[-1]\n else:\n if not isinstance(tmplt['subject'],str):\n errstr = 'preprocess: Invalid subject number. Be sure to ' + \\\n 'enclose the subject number item with double quotes.'\n raise RuntimeError(errstr)\n\n# Keys that apply to all EPIs.\n self.fsl_flip = tmplt.get('fsl_flip', False)\n if self.fsl_flip:\n self.flip_opts = '-LT'\n else:\n self.flip_opts = ''\n\n# Replace strings with python types.\n for key in tmplt.keys():\n if tmplt[key] == 'None':\n tmplt[key] = None\n elif key == 'True':\n tmplt[key] = True\n elif key == 'False':\n tmplt[key] = False\n return tmplt",
"def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path",
"def getTemplate():\n\n with open('/home/sevudan/Scripts/projects/topogen/template.cfg', 'r') as file:\n data = file.read()\n file.close()\n return Template(data)",
"def get_template(self):\n return self.sep.join([self.htmls[html] for html in self.lang]).format(**self.fields)",
"def template(self) -> 'outputs.PipelineTemplateResponse':\n return pulumi.get(self, \"template\")",
"def get_template(self):\n endpoint = \"/isam/wga_templates/dynurl_template\"\n response = self.client.get_json(endpoint)\n response.success = response.status_code == 200\n return response",
"def get_string(self):\n self._populate_output()\n string = self._jinja_template.render(\n outp=self._outp, config=self._report_generator.config)\n return string",
"def html_template_file(self):\n pass",
"def get_template(self, format):\n for pattern, converter in self._patterns:\n if converter.format == format:\n template = pattern.generate('{name}')\n if template:\n return template\n return '{name}' f'.{format}'",
"def _get_template(self, template_name):\n if template_name not in self.chached_templates:\n self.chached_templates[template_name] = self.env.get_template(template_name)\n return self.chached_templates[template_name]",
"def get_report(self) -> str:\n return self.diagnostics.get_report()"
] | [
"0.76926655",
"0.73982304",
"0.7120592",
"0.70143956",
"0.6962399",
"0.6962399",
"0.6962399",
"0.6943154",
"0.68456966",
"0.6840729",
"0.6796202",
"0.6777228",
"0.67390454",
"0.6727346",
"0.6701621",
"0.6578594",
"0.6480725",
"0.6409205",
"0.6401734",
"0.63737816",
"0.6356212",
"0.63415956",
"0.6317046",
"0.6300544",
"0.6172457",
"0.6171188",
"0.61453533",
"0.6137075",
"0.61318797",
"0.61235565"
] | 0.74037963 | 1 |
Render exception_data as an html report | def render_exception_html(exception_data, report_template=None):
report_template = report_template or _report_template()
jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), extensions=["jinja2.ext.autoescape"])
exception_data["repr"] = repr
return jinja_env.from_string(report_template).render(exception_data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_exception(self, exception_trace=''):\n txt = [80 * '*', '\\n', BANNER, '\\n', 80 * '*', '\\n', '\\n', '\\n']\n txt.extend(exception_trace)\n cherrypy.response.headers['Content-Type'] = 'text/plain'\n return as_bytes(txt)",
"def create_exception_report(exc_type, exc_value, tb, output_format, storage_backend, data_processor=None, get_full_tb=False):\n exception_data = get_exception_data(exc_type, exc_value, tb, get_full_tb=get_full_tb)\n if data_processor:\n exception_data = data_processor(exception_data)\n\n if output_format == \"html\":\n text = render_exception_html(exception_data)\n elif output_format == \"json\":\n text = render_exception_json(exception_data)\n else:\n raise TypeError(\"Exception report format not correctly specified\")\n\n filename = gen_error_filename(extension=output_format)\n\n report_location = storage_backend.write(filename, text)\n\n return report_location",
"def renderHTTP_exception(request, failure):",
"def exception_report(storage_backend=LocalErrorStorage(), output_format=\"html\", data_processor=None):\n\n def _exception_reports(func, *args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, tb = sys.exc_info()\n\n report_location = create_exception_report(exc_type, exc_value, tb, output_format, storage_backend=storage_backend, data_processor=data_processor)\n\n e = append_to_exception_message(e, tb, f\"[report:{report_location}]\")\n setattr(e, \"report\", report_location)\n\n # We want to raise the original exception:\n # 1) with a modified message containing the report location\n # 2) with the original traceback\n # 3) without it showing an extra chained exception because of this handling (`from None` accomplishes this)\n raise e from None\n\n return decorator(_exception_reports)",
"def get_traceback_html(self):\n\n if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):\n self.template_does_not_exist = True\n self.loader_debug_info = []\n for loader in template_source_loaders:\n try:\n module = import_module(loader.__module__)\n if hasattr(loader, '__class__'):\n source_list_func = loader.get_template_sources\n else: # NOTE: Remember to remove this branch when we deprecate old template loaders in 1.4\n source_list_func = module.get_template_sources\n # NOTE: This assumes exc_value is the name of the template that\n # the loader attempted to load.\n template_list = [{'name': t, 'exists': os.path.exists(t)} \\\n for t in source_list_func(str(self.exc_value))]\n except (ImportError, AttributeError):\n template_list = []\n if hasattr(loader, '__class__'):\n loader_name = loader.__module__ + '.' + loader.__class__.__name__\n else: # NOTE: Remember to remove this branch when we deprecate old template loaders in 1.4\n loader_name = loader.__module__ + '.' + loader.__name__\n self.loader_debug_info.append({\n 'loader': loader_name,\n 'templates': template_list,\n })\n if (settings.TEMPLATE_DEBUG and hasattr(self.exc_value, 'source') and\n isinstance(self.exc_value, TemplateSyntaxError)):\n self.get_template_exception_info()\n\n frames = self.get_traceback_frames()\n for i, frame in enumerate(frames):\n if 'vars' in frame:\n frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]\n frames[i] = frame\n\n unicode_hint = ''\n if self.exc_type and issubclass(self.exc_type, UnicodeError):\n start = getattr(self.exc_value, 'start', None)\n end = getattr(self.exc_value, 'end', None)\n if start is not None and end is not None:\n unicode_str = self.exc_value.args[1]\n unicode_hint = smart_unicode(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')\n t = get_template(\"500_metanas.html\")\n #t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')\n c = Context({\n 'is_email': self.is_email,\n 'unicode_hint': unicode_hint,\n 'frames': frames,\n 'request': self.request,\n 'settings': debug.get_safe_settings(),\n 'sys_executable': sys.executable,\n 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],\n 'server_time': datetime.datetime.now(),\n 'sw_version': get_sw_version(),\n 'sys_path': sys.path,\n 'template_info': self.template_info,\n 'template_does_not_exist': self.template_does_not_exist,\n 'loader_debug_info': self.loader_debug_info,\n })\n # Check whether exception info is available\n if self.exc_type:\n c['exception_type'] = self.exc_type.__name__\n if self.exc_value:\n c['exception_value'] = smart_unicode(self.exc_value, errors='replace')\n if frames:\n c['lastframe'] = frames[-1]\n return t.render(c)",
"def render_exception_json(exception_data):\n return json.dumps(exception_data, default=_json_serializer)",
"def get_processor_exception_html(exception):\r\n\r\n payment_support_email = settings.PAYMENT_SUPPORT_EMAIL\r\n if isinstance(exception, CCProcessorDataException):\r\n msg = dedent(_(\r\n \"\"\"\r\n <p class=\"error_msg\">\r\n Sorry! Our payment processor sent us back a payment confirmation that had inconsistent data!\r\n We apologize that we cannot verify whether the charge went through and take further action on your order.\r\n The specific error message is: <span class=\"exception_msg\">{msg}</span>.\r\n Your credit card may possibly have been charged. Contact us with payment-specific questions at {email}.\r\n </p>\r\n \"\"\".format(msg=exception.message, email=payment_support_email)))\r\n return msg\r\n elif isinstance(exception, CCProcessorWrongAmountException):\r\n msg = dedent(_(\r\n \"\"\"\r\n <p class=\"error_msg\">\r\n Sorry! Due to an error your purchase was charged for a different amount than the order total!\r\n The specific error message is: <span class=\"exception_msg\">{msg}</span>.\r\n Your credit card has probably been charged. Contact us with payment-specific questions at {email}.\r\n </p>\r\n \"\"\".format(msg=exception.message, email=payment_support_email)))\r\n return msg\r\n elif isinstance(exception, CCProcessorSignatureException):\r\n msg = dedent(_(\r\n \"\"\"\r\n <p class=\"error_msg\">\r\n Sorry! Our payment processor sent us back a corrupted message regarding your charge, so we are\r\n unable to validate that the message actually came from the payment processor.\r\n The specific error message is: <span class=\"exception_msg\">{msg}</span>.\r\n We apologize that we cannot verify whether the charge went through and take further action on your order.\r\n Your credit card may possibly have been charged. Contact us with payment-specific questions at {email}.\r\n </p>\r\n \"\"\".format(msg=exception.message, email=payment_support_email)))\r\n return msg\r\n\r\n # fallthrough case, which basically never happens\r\n return '<p class=\"error_msg\">EXCEPTION!</p>'",
"def error(self, environ, start_response):\n \n \"Generate an error report\"\n status = '200 Handle error'\n headers = [('Content-type','text/html')]\n start_response(status, headers)\n trace = traceback.extract_tb(sys.exc_traceback)\n return ['Error<br />[Exception] <i><q>%s</q></i> <br /> [File ] <i><q>%s</q></i> <br /><pre>%s</pre>'\n % (sys.exc_info()[0],trace[-1][0],self.print_file(trace[-1][0], trace[-1][1]))]",
"def renderInlineException(request, reason):",
"def render(data):\n if data is None:\n return ''\n\n if 'rendered_result' not in data:\n if 'result' not in data:\n data['rendered_result'] = ''\n else:\n make_pretty = True\n data['rendered_result'] = SEP2Renderer.export(data['result'], make_pretty)\n\n return data['rendered_result']",
"def create_html_report():\r\n\r\n #Sample DataFrame\r\n df = pd.DataFrame(np.random.randn(7,4)\r\n ,columns=['one','two','three','four']\r\n ,index=['a','b','c','d','e','f','g'])\r\n\r\n #Formatting rule\r\n def color_negative_red(val):\r\n color = 'red' if val<0 else 'black'\r\n return f'color: {color}'\r\n\r\n styler = df.style.applymap(color_negative_red)\r\n\r\n #Chart plotting\r\n filename = \"\".join([APP_ROOT, \"\\\\static\\\\images\\\\\" , \"plot.svg\"])\r\n #Plot\r\n ax = df.plot.bar()\r\n fig = ax.get_figure()\r\n fig.savefig(filename)\r\n\r\n #Template handling\r\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='./templates/'))\r\n template = env.get_template('template.html')\r\n\r\n filename = \"file:///\" + filename\r\n html = template.render(my_table=styler.render(), img_url=filename)\r\n\r\n return html",
"def process_exception(self, request, exception):\n gc = GithubCredentials(\n user=settings.EXREPORTER_GITHUB_USER,\n repo=settings.EXREPORTER_GITHUB_REPO,\n auth_token=settings.EXREPORTER_GITHUB_AUTH_TOKEN)\n gs = GithubStore(credentials=gc)\n reporter = ExReporter(\n store=gs, labels=settings.EXREPORTER_GITHUB_LABELS)\n\n reporter.report()",
"def test_get_processor_exception_html(self):\r\n for type in [CCProcessorSignatureException, CCProcessorWrongAmountException, CCProcessorDataException]:\r\n error_msg = \"An exception message of with exception type {0}\".format(str(type))\r\n exception = type(error_msg)\r\n html = get_processor_exception_html(exception)\r\n self.assertIn(settings.PAYMENT_SUPPORT_EMAIL, html)\r\n self.assertIn('Sorry!', html)\r\n self.assertIn(error_msg, html)\r\n\r\n # test base case\r\n self.assertIn(\"EXCEPTION!\", get_processor_exception_html(CCProcessorException()))",
"def test_report_from_json():\n\n class CustomException(Exception):\n pass\n\n def a(foo):\n bar = \"hey there\" # noqa\n # ensure it can handle weird characters\n _fuzz_tokens = [\n \"http\",\n \"https\",\n \":\",\n \"//\",\n \"?\",\n \".\",\n \"aaaaa\",\n \"союз\",\n \"-\",\n \"/\",\n \"@\",\n \"%20\",\n \"🌞\",\n \",\",\n \".com\",\n \"http://\",\n \"gov.uk\",\n \"\\udcae\",\n \"%\",\n \"#\",\n \" \",\n \"~\",\n \"\\\\\",\n \"'\",\n \" \" * 180,\n ]\n\n class HardToRender:\n def __repr__(self):\n return \"\".join(_fuzz_tokens)\n\n obj = HardToRender() # noqa\n\n b(foo)\n\n def b(foo):\n c(foo)\n\n def c(foo):\n green = 93 # noqa\n raise CustomException(\"yolo!\")\n\n try:\n a(\"hi\")\n except Exception:\n exception_data = get_exception_data(get_full_tb=False)\n\n frames = exception_data[\"frames\"]\n\n assert exception_data[\"exception_type\"] == \"CustomException\"\n assert exception_data[\"exception_value\"] == \"yolo!\"\n assert len(frames) == 4\n assert exception_data[\"frames\"][-1][\"function\"] == \"c\"\n local_vars = dict(exception_data[\"frames\"][-1][\"vars\"])\n assert local_vars[\"green\"] == \"93\"\n\n html_1 = render_exception_html(exception_data)\n text = render_exception_json(exception_data)\n\n json_based_data = json.loads(text)\n\n html_2 = render_exception_html(json_based_data)\n assert html_1 == html_2",
"def formatException(self, exc_info):\n type_, value, trcbk = exc_info\n\n for pos, frame in enumerate(traceback.extract_tb(trcbk)):\n row = [\n type_.__name__,\n value,\n pos,\n frame.filename,\n frame.lineno,\n frame.name,\n frame.line,\n ]\n self.writer.writerow(row)\n\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()",
"def formatException(self, exc_info):\n keys = [\"type\", \"value\", \"frame\", \"filename\", \"lineno\", \"function\", \"text\"]\n type_, value, trcbk = exc_info\n rows = []\n\n for pos, frame in enumerate(traceback.extract_tb(trcbk)):\n values = [\n type_.__name__,\n value,\n pos,\n frame.filename,\n frame.lineno,\n frame.name,\n frame.line,\n ]\n rows.append(dict(zip(keys, values)))\n\n return str(CustomEncoder().encode(rows))",
"def custom_500(request, exception=None):\n return render(request, \"500.html\", {\"exception\": exception})",
"def xml(self):\n strg = \"<Exception>\\n\"\n strg += \"<Object>\\n\"\n strg += \"%s\\n\" % self.name\n strg += \"</Object>\\n\"\n strg += \"<Message>\\n\"\n strg += self._message\n strg += \"</Message>\\n\"\n strg += \"<DataItems>\\n\"\n for key, value in viewitems(self.data):\n strg += \"<DataItem>\\n\"\n strg += \"<Key>\\n\"\n strg += str(key)\n strg += \"</Key>\\n\"\n strg += \"<Value>\\n\"\n strg += str(value)\n strg += \"</Value>\\n\"\n strg += \"</DataItem>\\n\"\n strg += \"</DataItems>\\n\"\n strg += \"</Exception>\\n\"\n logging.error(strg)\n return strg",
"def formatReport(cls, instance, trcback, context=1):\n\n\theader = []\n\theader.append(\"Exception in '{0}'.\".format(getInnerMostFrame(trcback).f_code.co_name))\n\theader.append(\"Exception class: '{0}'.\".format(cls.__name__))\n\theader.append(\"Exception description: '{0}'.\".format(instance.__doc__ and instance.__doc__.strip() or \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tConstants.nullObject))\n\tfor i, line in enumerate(str(instance).split(\"\\n\")):\n\t\theader.append(\"Exception message line no. '{0}' : '{1}'.\".format(i + 1, line))\n\n\tframes = []\n\tfor frame, locals in extractLocals(trcback):\n\t\tframes.append(\"Frame '{0}' in '{1}' at line '{2}':\".format(*frame))\n\t\targuments, namelessArgs, keywordArgs, locals = locals\n\t\tany((arguments, namelessArgs, keywordArgs)) and frames.append(\"{0:>40}\".format(\"Arguments:\"))\n\t\tfor key, value in arguments.iteritems():\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tfor value in namelessArgs:\n\t\t\tframes.append(\"{0:>40}\".format(value))\n\t\tfor key, value in sorted(keywordArgs.iteritems()):\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tlocals and frames.append(\"{0:>40}\".format(\"Locals:\"))\n\t\tfor key, value in sorted(locals.iteritems()):\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tframes.append(str())\n\n\ttrcback = formatException(cls, instance, trcback)\n\n\treturn header, frames, trcback",
"def test_renderer_works_correctly_with_error_detail(self):\n rendered = self.renderer.render(\n data=ErrorDetail(\"Test\", code=status.HTTP_400_BAD_REQUEST),\n media_type=\"application/json\",\n renderer_context={},\n )\n self.assertEqual(rendered.decode(), '\"Test\"')",
"def handle_exception(self, exception, debug):\n if isinstance(exception, webapp2.HTTPException):\n context = {'error': \"%d %s\" % (exception.code, exception.title), 'detail': exception.detail}\n self.response.set_status(exception.code)\n else:\n logging.exception(exception)\n context = {'error': \"500 Server Error\"}\n self.response.set_status(500)\n return self.render_json(context)",
"def html(self, environ):\n body = self.make_body(environ, self.template, html_quote, no_quote)\n\n error_template = TEMPLATE\n template_file = None\n\n try:\n template_file = open(os.path.join(self.dir, str(self.code) + '.html'), 'r')\n except IOError:\n try:\n template_file = open(os.path.join(self.dir, 'error.html'), 'r')\n except IOError:\n pass\n\n if template_file:\n try:\n error_template = template_file.read()\n template_file.close()\n except IOError:\n template_file.close()\n\n return error_template % {\n 'title': self.title,\n 'code': self.code,\n 'server': 'OpenCore WSGI Server',\n 'explanation': self.explanation,\n 'detail': self.detail,\n 'comment': self.comment,\n 'body': body }",
"def key_error_page(e):\n return render_template(\"index.html\", error=e), 500",
"def error(self, request):\n if self.debug:\n import cgitb\n request.stdout.write('Content-Type: text/html\\r\\n\\r\\n' +\n cgitb.html(sys.exc_info()))\n else:\n errorpage = \"\"\"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n<html><head>\n<title>Unhandled Exception</title>\n</head><body>\n<h1>Unhandled Exception</h1>\n<p>An unhandled exception was thrown by the application.</p>\n</body></html>\n\"\"\"\n request.stdout.write('Content-Type: text/html\\r\\n\\r\\n' +\n errorpage)",
"def render_error(self, template, *args, **kwargs):\n self._render(template, sys.stderr, *args, **kwargs)",
"def formatException(self, exc_info):\n result = super(OneLineExceptionFormatter, self).formatException(exc_info)\n return repr(result) # or format into one line however you want to",
"def format_exception(text, status_code):\n return {\"errors\": [{\"status\": str(status_code), \"detail\": text}]}, status_code",
"def export(self) -> str:\n return self._collector.get_aggregated_exceptions().to_json() # type: ignore",
"def _generate_error_report(self, errno=None):\n # as of now we think this will be the same for every interface\n NIWORKFLOWS_LOG.warn('Report was not generated')\n\n errorstr = '<div><span class=\"error\">Failed to generate report!</span>.\\n'\n if errno:\n errorstr += (' <span class=\"error\">Interface returned exit '\n 'code %d</span>.\\n') % errno\n errorstr += '</div>\\n'\n with open(self._out_report, 'w' if PY3 else 'wb') as outfile:\n outfile.write(errorstr)",
"def get_html_report(self) -> str:\n template_contents = dict(\n vendor_bundle_js=self.vendor_bundle,\n app_bundle_js=self.app_bundle,\n # results\n results=self.results,\n # account metadata\n account_id=self.account_id,\n account_name=self.account_name,\n report_generated_time=str(self.report_generated_time),\n cloudsplaining_version=__version__,\n )\n template_path = os.path.dirname(__file__)\n env = Environment(loader=FileSystemLoader(template_path)) # nosec\n template = env.get_template(\"template.html\")\n return template.render(t=template_contents)"
] | [
"0.6812304",
"0.6662681",
"0.6421623",
"0.6406252",
"0.6321645",
"0.63113713",
"0.61840075",
"0.6142265",
"0.6085713",
"0.6048839",
"0.59619063",
"0.5947996",
"0.5916094",
"0.5889431",
"0.58686614",
"0.5866581",
"0.5849284",
"0.58273417",
"0.5797272",
"0.57954353",
"0.5734677",
"0.5728617",
"0.5714531",
"0.5712052",
"0.57115763",
"0.5706719",
"0.56628513",
"0.56088376",
"0.5578029",
"0.55709815"
] | 0.7821494 | 0 |
Render exception_data as a json object | def render_exception_json(exception_data):
return json.dumps(exception_data, default=_json_serializer) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exceptionhandler(e):\n response = e.get_response()\n response.data = json.dumps({\n \"code\" : e.code,\n \"name\": e.name,\n \"description\": e.description\n })\n response.content_type = \"application/json\"\n\n return response",
"def jsonify_http_exception(exception: HTTPException):\n return jsonify(exception.description, exception.code)",
"def handle_exception(e):\r\n # start with the correct headers and status code from the error\r\n response = e.get_response()\r\n # replace the body with JSON\r\n response.data = json.dumps({\r\n \"code\": e.code,\r\n \"name\": e.name,\r\n \"description\": e.description,\r\n })\r\n response.content_type = \"application/json\"\r\n return response",
"def handle_exception(e):\r\n # start with the correct headers and status code from the error\r\n response = e.get_response()\r\n # replace the body with JSON\r\n response.data = json.dumps({\r\n \"code\": e.code,\r\n \"name\": e.name,\r\n \"description\": e.description,\r\n })\r\n response.content_type = \"application/json\"\r\n return response",
"def handle_exception(e):\n # start with the correct headers and status code from the error\n response = e.get_response()\n # replace the body with JSON\n response.data = json.dumps({\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n })\n print(response.data)\n response.content_type = \"application/json\"\n return response",
"def handle_exception(e):\n # start with the correct headers and status code from the error\n response = e.get_response()\n # replace the body with JSON\n response.data = json.dumps({\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n })\n response.content_type = \"application/json\"\n return response",
"def json(self):\n return {\n 'uri': self.view_uri,\n 'created': time.strftime('%c', time.gmtime(self.created)),\n 'created_timestamp': self.created,\n 'exception_type': str(self.exc_type),\n 'exception': str(self.exc_value),\n }",
"def exception_data(self) -> typing.Optional[dict]:\n return self._exception_data",
"def jsonify_unknown_exception(exception: Exception):\n current_app.logger.exception('Unhandled exception has been raised!')\n return jsonify(DEFAULT_MESSAGE, 500)",
"def jsonify_exception(error: HTTPException) -> Response:\n exc_resp = error.get_response()\n response: Response = jsonify(reason=error.description)\n response.status_code = exc_resp.status_code\n return response",
"def render(self, data, accepted_media_type=None, renderer_context=None):\n\n if '(e.g:bbox=xmin,ymin,xmax,ymax)' in str(data):\n rendered = {'error': str(data)}\n return json.dumps(rendered)\n if data is None:\n return ''\n\n if 'error' in data:\n rendered = data\n elif isinstance(data, dict):\n rendered = self.render_single(data)\n else:\n rendered = self.render_many(data)\n\n return json.dumps(rendered, separators=self.separators)",
"def handle_missing_objects(exc):\n return jsonify(dict(\n message=str(exc)\n )), exc.code",
"def handle_exception(self, exception, debug):\n if isinstance(exception, webapp2.HTTPException):\n context = {'error': \"%d %s\" % (exception.code, exception.title), 'detail': exception.detail}\n self.response.set_status(exception.code)\n else:\n logging.exception(exception)\n context = {'error': \"500 Server Error\"}\n self.response.set_status(500)\n return self.render_json(context)",
"def format_exception(text, status_code):\n return {\"errors\": [{\"status\": str(status_code), \"detail\": text}]}, status_code",
"def format_exception(self):\n if isinstance(self.message, dict):\n return self.message, self.status_code\n return Request.format_exception(self.message, self.status_code)",
"def make_json_error(ex):\n if isinstance(ex, HTTPException):\n return ex;\n elif isinstance(ex, ResourceException):\n info = ex.to_dict()\n status_code = ex.http_status\n info[\"type\"] = \"exception\"\n else:\n message = \"There was an internal server error. Please try again later.\"\n info = {\"code\": \"internal_server_error\", \"message\": message, \"type\": \"exception\"}\n status_code = 500\n # generally we should log these 500 errors with the stacktrace somewhere -- we used splunk at Box.\n\n response = jsonify(**info)\n response.status_code = status_code\n return response",
"def get_er_exceptions():\n express_route_exceptions_lst = []\n try:\n for i in get_data():\n if i['expressRoute'] is False:\n express_route_exceptions_lst.append(i)\n express_route_exceptions_dic = {'expressRoutesExceptions': express_route_exceptions_lst}\n return get_json(express_route_exceptions_dic)\n except ValueError as e:\n print(e)",
"def AsJson(self):\n\n return json.dumps(self._errors)",
"def test_renderer_works_correctly_with_error_detail(self):\n rendered = self.renderer.render(\n data=ErrorDetail(\"Test\", code=status.HTTP_400_BAD_REQUEST),\n media_type=\"application/json\",\n renderer_context={},\n )\n self.assertEqual(rendered.decode(), '\"Test\"')",
"def render_exception_html(exception_data, report_template=None):\n report_template = report_template or _report_template()\n jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), extensions=[\"jinja2.ext.autoescape\"])\n exception_data[\"repr\"] = repr\n return jinja_env.from_string(report_template).render(exception_data)",
"def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n exception = kwargs.get(\"exception\")\n\n context.update({\n \"exception\": exception,\n \"exception_type\": exception.__class__.__name__ if exception else None,\n \"exception_msg\": exception.message if exception and hasattr(exception, 'message') else str(exception) if exception else None,\n \"extra_message\": kwargs.get(\"extra_message\"),\n })\n return context",
"def _ExceptionResponse(args_dict=None):\n if args_dict is None:\n args_dict = {}\n args_dict[\"code\"] = \"Exception\"\n return CGateway._DumpResponse(args_dict)",
"def render(self, data):\n logging.info(\"render (start)\")\n\n seria = json.dumps(data, ensure_ascii=False, indent=4)\n logging.info(\"rendered %s characters (end)\" % len(seria))\n return seria",
"def _format_data(self, data):\n return json.dumps(data)",
"def write_error(self, status_code, exc_info, **kwargs):\n response = {\n \"data\": None,\n \"errors\": [ str(exc_info[1]) ]\n }\n\n self.set_status(status_code)\n self.write(json.dumps(response))",
"def export(self) -> str:\n return self._collector.get_aggregated_exceptions().to_json() # type: ignore",
"def test_report_from_json():\n\n class CustomException(Exception):\n pass\n\n def a(foo):\n bar = \"hey there\" # noqa\n # ensure it can handle weird characters\n _fuzz_tokens = [\n \"http\",\n \"https\",\n \":\",\n \"//\",\n \"?\",\n \".\",\n \"aaaaa\",\n \"союз\",\n \"-\",\n \"/\",\n \"@\",\n \"%20\",\n \"🌞\",\n \",\",\n \".com\",\n \"http://\",\n \"gov.uk\",\n \"\\udcae\",\n \"%\",\n \"#\",\n \" \",\n \"~\",\n \"\\\\\",\n \"'\",\n \" \" * 180,\n ]\n\n class HardToRender:\n def __repr__(self):\n return \"\".join(_fuzz_tokens)\n\n obj = HardToRender() # noqa\n\n b(foo)\n\n def b(foo):\n c(foo)\n\n def c(foo):\n green = 93 # noqa\n raise CustomException(\"yolo!\")\n\n try:\n a(\"hi\")\n except Exception:\n exception_data = get_exception_data(get_full_tb=False)\n\n frames = exception_data[\"frames\"]\n\n assert exception_data[\"exception_type\"] == \"CustomException\"\n assert exception_data[\"exception_value\"] == \"yolo!\"\n assert len(frames) == 4\n assert exception_data[\"frames\"][-1][\"function\"] == \"c\"\n local_vars = dict(exception_data[\"frames\"][-1][\"vars\"])\n assert local_vars[\"green\"] == \"93\"\n\n html_1 = render_exception_html(exception_data)\n text = render_exception_json(exception_data)\n\n json_based_data = json.loads(text)\n\n html_2 = render_exception_html(json_based_data)\n assert html_1 == html_2",
"def _render_result(self, errno, errmsg, data=None):\n self.set_header(\"Content-Type\", \"application/json; charset=utf-8\")\n if self._finished:\n return\n self.write(tornado.escape.json_encode({\n \"errno\": errno,\n \"errmsg\": errmsg,\n \"logid\": self.logid,\n \"data\": data,\n }))",
"def json(self):\n d = [err.json for err in self.errors]\n return d",
"def handle_unknown_errors(exc):\n return jsonify(dict(\n traceback=traceback.format_exc(),\n message=str(exc),\n )), 500"
] | [
"0.72541064",
"0.67585784",
"0.6713111",
"0.6690408",
"0.66427636",
"0.66312206",
"0.65327764",
"0.65234625",
"0.6517418",
"0.6510992",
"0.64126235",
"0.6402634",
"0.63840944",
"0.63827544",
"0.63217765",
"0.6246733",
"0.62432253",
"0.61892617",
"0.6140482",
"0.6116676",
"0.60697544",
"0.60659206",
"0.6048546",
"0.60113007",
"0.6007583",
"0.5992608",
"0.5970451",
"0.5962027",
"0.59452707",
"0.5929636"
] | 0.9000337 | 0 |
Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context). | def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):
source = None
if loader is not None and hasattr(loader, "get_source"):
with suppress(ImportError):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
with suppress(OSError, IOError):
with open(filename, "rb") as fp:
source = fp.read().splitlines()
if source is None:
return None, [], None, []
try:
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = "ascii"
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br"coding[:=]\s*([-\w.]+)", line)
if match:
encoding = match.group(1).decode("ascii")
break
source = [str(sline, encoding, "replace") for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1 : upper_bound]
return lower_bound, pre_context, context_line, post_context
except Exception as e:
try:
context_line = f'<There was an error displaying the source file: "{repr(e)}" The loaded source has {len(source)} lines.>'
except Exception:
context_line = "<There was an error displaying the source file. Further, there was an error displaying that error>"
return lineno, [], context_line, [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_lines_from_file(filename, lineno, context_lines):\n\n try:\n source = open(filename).readlines()\n lower_bound = max(0, lineno - context_lines)\n upper_bound = lineno + context_lines\n\n pre_context = \\\n [line.strip('\\n') for line in source[lower_bound:lineno]]\n context_line = source[lineno].strip('\\n')\n post_context = \\\n [line.strip('\\n') for line in source[lineno + 1:upper_bound]]\n\n return lower_bound, pre_context, context_line, post_context\n except (OSError, IOError):\n return None, [], None, []",
"def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):\n source = None\n if loader is not None and hasattr(loader, \"get_source\"):\n source = loader.get_source(module_name)\n if source is not None:\n source = source.splitlines()\n if source is None:\n try:\n f = open(filename)\n try:\n source = f.readlines()\n finally:\n f.close()\n except (OSError, IOError):\n pass\n if source is None:\n return None, [], None, []\n\n encoding = 'ascii'\n for line in source[:2]:\n # File coding may be specified. Match pattern from PEP-263\n # (http://www.python.org/dev/peps/pep-0263/)\n match = re.search(r'coding[:=]\\s*([-\\w.]+)', line)\n if match:\n encoding = match.group(1)\n break\n source = [unicode(sline, encoding, 'replace') for sline in source]\n\n lower_bound = max(0, lineno - context_lines)\n upper_bound = lineno + context_lines\n\n pre_context = [line.strip('\\n') for line in source[lower_bound:lineno]]\n context_line = source[lineno].strip('\\n')\n post_context = [line.strip('\\n') for line in source[lineno+1:upper_bound]]\n\n return lower_bound, pre_context, context_line, post_context",
"def get_source_lines(self, filename, lineno, context=0):\n if not filename or not lineno:\n return ''\n\n return ''.join([' ' + linecache.getline(filename, line) for line in range(lineno - context, lineno + context + 1)])",
"def line_offsets(fname):\n line_offset = []\n offset = 0\n for _, line in enumerate( open(fname) ):\n line_offset.append(offset)\n offset += len(line)\n return line_offset",
"def _diffContext(diff, n=3):\n nlines = len(diff)\n clines = set() # set of lines to include\n for i, line in enumerate(diff):\n if line[0] != ' ':\n clines |= set(range(max(0, i-n), min(i+n+1, nlines)))\n context = []\n clines = list(clines)\n clines.sort()\n last = -1\n for i in clines:\n if i != last+1:\n context.append(\" ...\\n\")\n context.append((\"%4d: \"%i) + diff[i])\n last = i\n if clines[-1] != nlines-1:\n context.append(\" ...\\n\")\n return context",
"def line_range(self) -> Tuple[int, int]:\n if self._line_range is None:\n node_extent = self.node.extent\n comment_extent = self.node.comment_extent\n if comment_extent.start.file is None:\n comment_extent = node_extent\n\n self._line_range = (\n min(node_extent.start.line, comment_extent.start.line),\n max(node_extent.end.line, comment_extent.end.line),\n )\n\n return self._line_range",
"def findlinestarts(code):\n byte_increments = [ord(c) for c in code.co_lnotab[0::2]]\n line_increments = [ord(c) for c in code.co_lnotab[1::2]]\n result = []\n lastlineno = None\n lineno = code.co_firstlineno\n addr = 0\n for byte_incr, line_incr in zip(byte_increments, line_increments):\n if byte_incr:\n if lineno != lastlineno:\n result.append((addr, lineno))\n lastlineno = lineno\n addr += byte_incr\n lineno += line_incr\n if lineno != lastlineno:\n result.append((addr, lineno))\n return result",
"def outerLineno2():\n cf = inspect.currentframe()\n return cf.f_back.f_back.f_back.f_lineno",
"def getlineno(frame):\r\n # FrameType.f_lineno is now a descriptor that grovels co_lnotab\r\n return frame.f_lineno",
"def get_linepos(self, pos):\n lnum, cnum = self._get_linepos(pos)\n return lnum + self.LINE_NUM_BASE, cnum",
"def linenum(self):\n return self.source_frame_stack.linenum()",
"def findlinestarts(code):\n byte_increments = [ord(c) for c in code.co_lnotab[0::2]]\n line_increments = [ord(c) for c in code.co_lnotab[1::2]]\n\n lastlineno = None\n lineno = code.co_firstlineno\n addr = 0\n for byte_incr, line_incr in zip(byte_increments, line_increments):\n if byte_incr:\n if lineno != lastlineno:\n yield (addr, lineno)\n lastlineno = lineno\n addr += byte_incr\n if line_incr >= 0x80:\n # line_increments is an array of 8-bit signed integers\n line_incr -= 0x100\n lineno += line_incr\n if lineno != lastlineno:\n yield (addr, lineno)",
"def lineno():\n linenum = inspect.currentframe().f_back.f_lineno\n frameinfo = inspect.getframeinfo(inspect.currentframe())\n filename = frameinfo.filename\n return str(\"File: \" + str(filename) + \" Line: \" + str(linenum))",
"def lineno():\n return inspect.currentframe().f_back.f_lineno",
"def lineno():\n return inspect.currentframe().f_back.f_lineno",
"def lineno():\n return inspect.currentframe().f_back.f_lineno",
"def lineno():\n return inspect.currentframe().f_back.f_lineno",
"def lineno():\n return inspect.currentframe().f_back.f_lineno",
"def lineno():\n return inspect.currentframe().f_back.f_lineno",
"def currentLineno():\n cf = inspect.currentframe()\n return cf.f_back.f_lineno",
"def get_lineno(self):\n return self.lexer.get_lineno()",
"def lineno():\n\treturn inspect.currentframe().f_back.f_lineno",
"def lineno():\r\n\treturn inspect.currentframe().f_back.f_lineno",
"def GetLineno():\n return inspect.currentframe().f_back.f_lineno",
"def _get_linepos(self, pos):\n t = self.input\n if pos < 0 or pos > len(t):\n raise IndexError(\"position %d not in 0..%d\" % (pos, len(t)))\n\n lpc = self.__linepos\n\n # Locate the smallest known line index whose end is at or after p.\n def locate(p):\n self._update_linetab(p)\n lo = 0\n hi = len(lpc) - 1\n if lpc[hi] < p:\n return hi\n\n # Invariant: lpc[lo] < p; lpc[hi] >= p\n while lo + 1 < hi:\n mid = (lo + hi) // 2\n if lpc[mid] > p: hi = mid\n elif lpc[mid] < p: lo = mid\n else: return mid - 1\n return hi - 1\n\n lnum = locate(pos)\n start, end = self._get_linespan(lnum)\n cnum = pos - start\n return lnum, cnum",
"def extract_lines(infile):\n with open(infile, 'r') as src:\n return read_on(get_line, src)",
"def get_frame_info(tb, context_lines=7):\n # line numbers / function / variables\n lineno = tb.tb_lineno\n function = tb.tb_frame.f_code.co_name\n variables = tb.tb_frame.f_locals\n\n # get filename\n fn = tb.tb_frame.f_globals.get('__file__')\n if not fn:\n fn = _os.path.realpath(\n _inspect.getsourcefile(tb) or _inspect.getfile(tb)\n )\n if fn[-4:] in ('.pyc', '.pyo'):\n fn = fn[:-1]\n\n # module name\n modname = tb.tb_frame.f_globals.get('__name__')\n\n # get loader\n loader = tb.tb_frame.f_globals.get('__loader__')\n\n # sourcecode\n try:\n if not loader is None:\n source = loader.get_source(modname)\n else:\n source = file(fn).read()\n except (SystemExit, KeyboardInterrupt):\n raise\n except:\n source = ''\n pre_context, post_context = [], []\n context_line, context_lineno = None, None\n else:\n parser = PythonParser(source)\n parser.parse()\n parsed_source = parser.get_html_output()\n lbound = max(0, lineno - context_lines - 1)\n ubound = lineno + context_lines\n try:\n context_line = parsed_source[lineno - 1]\n pre_context = parsed_source[lbound:lineno - 1]\n post_context = parsed_source[lineno:ubound]\n except IndexError:\n context_line = None\n pre_context = post_context = [], []\n context_lineno = lbound\n\n return {\n 'tb': tb,\n 'filename': fn,\n 'loader': loader,\n 'function': function,\n 'lineno': lineno,\n 'vars': variables,\n 'pre_context': pre_context,\n 'context_line': context_line,\n 'post_context': post_context,\n 'context_lineno': context_lineno,\n 'source': source\n }",
"def lineno():\n\n return inspect.currentframe().f_back.f_lineno",
"def get_lines_in_file(config_file):\n lines = []\n\n line = config_file.readline()\n lines.append([1, line])\n\n line_counter = 1\n while line:\n line = config_file.readline()\n if not (line.lstrip().startswith(\"#\")):\n lines.append([line_counter, line])\n\n line_counter += 1\n\n return lines",
"def outerLineno():\n cf = inspect.currentframe()\n return cf.f_back.f_back.f_lineno"
] | [
"0.7550956",
"0.66815025",
"0.635524",
"0.60288763",
"0.56448764",
"0.5592623",
"0.55494016",
"0.55332536",
"0.5494302",
"0.5492887",
"0.5485184",
"0.5472898",
"0.5435706",
"0.54235774",
"0.54235774",
"0.54235774",
"0.54235774",
"0.54235774",
"0.54235774",
"0.5400774",
"0.5390808",
"0.53847116",
"0.5373249",
"0.5370138",
"0.5334083",
"0.53263456",
"0.53189445",
"0.52991045",
"0.5291174",
"0.5286108"
] | 0.68244785 | 1 |
Create an exception report and return its location | def create_exception_report(exc_type, exc_value, tb, output_format, storage_backend, data_processor=None, get_full_tb=False):
exception_data = get_exception_data(exc_type, exc_value, tb, get_full_tb=get_full_tb)
if data_processor:
exception_data = data_processor(exception_data)
if output_format == "html":
text = render_exception_html(exception_data)
elif output_format == "json":
text = render_exception_json(exception_data)
else:
raise TypeError("Exception report format not correctly specified")
filename = gen_error_filename(extension=output_format)
report_location = storage_backend.write(filename, text)
return report_location | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_exception(self, msg: str):",
"def formatReport(cls, instance, trcback, context=1):\n\n\theader = []\n\theader.append(\"Exception in '{0}'.\".format(getInnerMostFrame(trcback).f_code.co_name))\n\theader.append(\"Exception class: '{0}'.\".format(cls.__name__))\n\theader.append(\"Exception description: '{0}'.\".format(instance.__doc__ and instance.__doc__.strip() or \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tConstants.nullObject))\n\tfor i, line in enumerate(str(instance).split(\"\\n\")):\n\t\theader.append(\"Exception message line no. '{0}' : '{1}'.\".format(i + 1, line))\n\n\tframes = []\n\tfor frame, locals in extractLocals(trcback):\n\t\tframes.append(\"Frame '{0}' in '{1}' at line '{2}':\".format(*frame))\n\t\targuments, namelessArgs, keywordArgs, locals = locals\n\t\tany((arguments, namelessArgs, keywordArgs)) and frames.append(\"{0:>40}\".format(\"Arguments:\"))\n\t\tfor key, value in arguments.iteritems():\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tfor value in namelessArgs:\n\t\t\tframes.append(\"{0:>40}\".format(value))\n\t\tfor key, value in sorted(keywordArgs.iteritems()):\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tlocals and frames.append(\"{0:>40}\".format(\"Locals:\"))\n\t\tfor key, value in sorted(locals.iteritems()):\n\t\t\tframes.append(\"{0:>40} = {1}\".format(key, value))\n\t\tframes.append(str())\n\n\ttrcback = formatException(cls, instance, trcback)\n\n\treturn header, frames, trcback",
"def process_exception(self, request, exception):\n gc = GithubCredentials(\n user=settings.EXREPORTER_GITHUB_USER,\n repo=settings.EXREPORTER_GITHUB_REPO,\n auth_token=settings.EXREPORTER_GITHUB_AUTH_TOKEN)\n gs = GithubStore(credentials=gc)\n reporter = ExReporter(\n store=gs, labels=settings.EXREPORTER_GITHUB_LABELS)\n\n reporter.report()",
"def report_exception(logger=None, report_details=None, cleanup_details=None):\n\n if logger is None:\n raise Exception(\"A logger must be defined!\")\n\n logger.debug(report_details)\n logger.error(report_details[\"error_message\"])\n\n if report_details[\"ujs_job_id\"] is not None:\n ujs = report_details[\"ujs_client\"]\n \n job_status = ujs.get_job_status(report_details[\"ujs_job_id\"])\n\n if job_status[-2] == 0:\n ujs.complete_job(report_details[\"ujs_job_id\"], \n report_details[\"token\"], \n report_details[\"status\"][:UJS_STATUS_MAX], \n report_details[\"error_message\"], \n None)\n else:\n raise Exception(\"No report details included!\") \n \n if cleanup_details is not None: \n if not cleanup_details[\"keep_working_directory\"]:\n try:\n cleanup(logger=logger, directory=cleanup_details[\"working_directory\"]) \n except Exception, e:\n logger.exception(e)\n else:\n raise Exception(\"Unable to cleanup working directory without cleanup info!\")",
"def create_log(self, exc):\n return self.formatter.formatException(exc)",
"def _generate_error_report(self, errno=None):\n # as of now we think this will be the same for every interface\n NIWORKFLOWS_LOG.warn('Report was not generated')\n\n errorstr = '<div><span class=\"error\">Failed to generate report!</span>.\\n'\n if errno:\n errorstr += (' <span class=\"error\">Interface returned exit '\n 'code %d</span>.\\n') % errno\n errorstr += '</div>\\n'\n with open(self._out_report, 'w' if PY3 else 'wb') as outfile:\n outfile.write(errorstr)",
"def _create_issue(*, image: str, repo: str, run: str, stacktrace: str) -> Issue:\n title = f\"Automatic error report from {repo}\"\n body = _report_body(image=image, repo=repo, run=run, stacktrace=stacktrace)\n return TAGBOT_ISSUES_REPO.create_issue(title, body)",
"def __init__(self):\r\n try:\r\n self.file = open(REPORT_FILE, 'w')\r\n except OSError:\r\n print('Problem opening log file')\r\n exit(1)",
"def exception_report(storage_backend=LocalErrorStorage(), output_format=\"html\", data_processor=None):\n\n def _exception_reports(func, *args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, tb = sys.exc_info()\n\n report_location = create_exception_report(exc_type, exc_value, tb, output_format, storage_backend=storage_backend, data_processor=data_processor)\n\n e = append_to_exception_message(e, tb, f\"[report:{report_location}]\")\n setattr(e, \"report\", report_location)\n\n # We want to raise the original exception:\n # 1) with a modified message containing the report location\n # 2) with the original traceback\n # 3) without it showing an extra chained exception because of this handling (`from None` accomplishes this)\n raise e from None\n\n return decorator(_exception_reports)",
"def catch(self, report_name):\n return Catch(report_name, self)",
"def write_error_report(self):\n\n with open('runReport.txt', 'a') as report:\n report.write(\"Number of Hits: \" + str(self.num_hits) + '\\n')\n report.write(\"Number of Requests: \" + str(self.num_requests) + '\\n')\n report.write(\"Hit Rate: \" + str((self.num_hits / self.num_requests)))\n report.write(\"Datafiles downloaded: \" + str(self.num_datafiles))\n now = datetime.now()\n dt_string = now.strftime(\"%H:%M %m/%d/%Y\")\n report.write(\"Run finished \" + dt_string)",
"def reportinfo(self):\n return self.fspath, 0, f\"usecase: {self.name}\"",
"def getErrorReport(self):\n return self.sError;",
"def getErrorReport(self):\n return self.sError;",
"def __create_failure_report(self, classname, failure_desc):\n match = FAILURE_LOC_RE.match(failure_desc[0])\n if not match:\n raise ValueError(\"Unexpected failure description format.\\n\"\n \"Expected the first line to contain details \"\n \"of the location of the error.\\n\"\n \"Found '%s'\" % failure_desc[0])\n name = match.group(3)\n return TestCaseReport(classname, name, \"\\n\".join(failure_desc))",
"def _get_report(self, entry):\n script = entry.get('@fields').get('script_name', '')\n message = entry.get('@message').encode('utf8')\n error = entry.get('@context').get('error', 'n/a').encode('utf8')\n\n # extract SQL from the error\n (error, sql) = self.extract_error_and_sql(error)\n\n description = self.REPORT_TEMPLATE.format(\n full_message=message,\n error=error,\n sql=sql,\n details=json.dumps(entry, indent=True)\n ).strip()\n\n report = Report(\n summary='{} - {}'.format(script, message),\n description=description,\n label=self.REPORT_LABEL\n )\n\n return report",
"def ReportError(text):\n raise IOError(text)",
"def get_report_path_hash(report) -> str:\n report_path_hash = ''\n events = [i for i in report.bug_path if i.get('kind') == 'event']\n for event in events:\n file_name = \\\n os.path.basename(report.files.get(event['location']['file']))\n line = str(event['location']['line'] if 'location' in event else 0)\n col = str(event['location']['col'] if 'location' in event else 0)\n\n report_path_hash += line + '|' + col + '|' + event['message'] + \\\n file_name\n\n report_path_hash += report.check_name\n\n if not report_path_hash:\n LOG.error('Failed to generate report path hash!')\n LOG.error(report.bug_path)\n\n LOG.debug(report_path_hash)\n return __str_to_hash(report_path_hash)",
"def get_report_path(self):\n report_path = os.path.join(logPath, \"report.html\")\n return report_path",
"def get_error_file(self):\n pass",
"def report(self) -> Any:",
"def create_from_exception(self, exc_info=None, **kwargs):\n if not exc_info:\n exc_info = sys.exc_info()\n\n exc_type, exc_value, exc_traceback = exc_info\n\n def shorten(var):\n var = transform(var)\n if isinstance(var, basestring) and len(var) > 200:\n var = var[:200] + '...'\n return var\n\n reporter = ExceptionReporter(None, exc_type, exc_value, exc_traceback)\n frames = varmap(shorten, reporter.get_traceback_frames())\n\n if not kwargs.get('view'):\n # This should be cached\n modules = get_installed_apps()\n if conf.INCLUDE_PATHS:\n modules = set(list(modules) + conf.INCLUDE_PATHS)\n\n def iter_tb_frames(tb):\n while tb:\n yield tb.tb_frame\n tb = tb.tb_next\n \n def contains(iterator, value):\n for k in iterator:\n if value.startswith(k):\n return True\n return False\n \n # We iterate through each frame looking for an app in INSTALLED_APPS\n # When one is found, we mark it as last \"best guess\" (best_guess) and then\n # check it against SENTRY_EXCLUDE_PATHS. If it isnt listed, then we\n # use this option. If nothing is found, we use the \"best guess\".\n best_guess = None\n view = None\n for frame in iter_tb_frames(exc_traceback):\n try:\n view = '.'.join([frame.f_globals['__name__'], frame.f_code.co_name])\n except:\n continue\n if contains(modules, view):\n if not (contains(conf.EXCLUDE_PATHS, view) and best_guess):\n best_guess = view\n elif best_guess:\n break\n if best_guess:\n view = best_guess\n \n if view:\n kwargs['view'] = view\n\n data = kwargs.pop('data', {}) or {}\n if hasattr(exc_type, '__class__'):\n exc_module = exc_type.__class__.__module__\n else:\n exc_module = None\n data['__sentry__'] = {\n 'exc': map(transform, [exc_module, exc_value.args, frames]),\n }\n\n if isinstance(exc_value, TemplateSyntaxError) and hasattr(exc_value, 'source'):\n origin, (start, end) = exc_value.source\n data['__sentry__'].update({\n 'template': (origin.reload(), start, end, origin.name),\n })\n kwargs['view'] = origin.loadname\n \n tb_message = '\\n'.join(traceback.format_exception(exc_type, exc_value, exc_traceback))\n\n kwargs.setdefault('message', transform(force_unicode(exc_value)))\n\n return self.process(\n class_name=exc_type.__name__,\n traceback=tb_message,\n data=data,\n **kwargs\n )",
"def error(self, environ, start_response):\n \n \"Generate an error report\"\n status = '200 Handle error'\n headers = [('Content-type','text/html')]\n start_response(status, headers)\n trace = traceback.extract_tb(sys.exc_traceback)\n return ['Error<br />[Exception] <i><q>%s</q></i> <br /> [File ] <i><q>%s</q></i> <br /><pre>%s</pre>'\n % (sys.exc_info()[0],trace[-1][0],self.print_file(trace[-1][0], trace[-1][1]))]",
"def report(self, output_dir):",
"def _generate_report(self):\n raise NotImplementedError",
"def create_report(self, report_job: dict):\n try:\n # Run the report and wait for it to finish\n report_job_id = self.report_downloader.WaitForReport(report_job)\n return report_job_id\n except errors.AdManagerReportError as e:\n print('[INFO]: Failed to generate report. Error: %s' % e)\n sys.exit()",
"def report():\n pass",
"def pytest_runtest_makereport(item, call):\n if \"incremental\" in item.keywords:\n if call.excinfo is not None:\n parent = item.parent\n parent._previousfailed = item",
"def get_exception():\n raise Exception(\"example\")",
"def save_exception(exc):\n LOG.error(\"Error - %s\", str(exc))\n hour = time.strftime(\"_%H_%M_%S\")\n today = time.strftime(\"_%d_%m_%Y\")\n data = (str(exc)+traceback.format_exc())\n\n file = open(\"./logs/ERROR_\"+threading.currentThread().getName()+today+\".log\",'a+') #Replace to fix OSError\n file.write(\"\\n==\"+hour+\"==\\n\")\n file.write(Parser.parse_text(data))\n file.write(\"=====================================\\n\")\n file.close()"
] | [
"0.61463684",
"0.5946372",
"0.59041274",
"0.577588",
"0.57041526",
"0.569233",
"0.5647634",
"0.56435865",
"0.5620892",
"0.5555634",
"0.54271317",
"0.5426168",
"0.5407387",
"0.5407387",
"0.538084",
"0.53600556",
"0.53557533",
"0.533768",
"0.5310631",
"0.5296912",
"0.5202553",
"0.51981044",
"0.51744926",
"0.5169028",
"0.51378834",
"0.5128839",
"0.5111966",
"0.5111103",
"0.5097656",
"0.5084358"
] | 0.6724956 | 0 |
Publish a registration to the core, listing the API commands. | def register_to_core(self):
self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api})) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _cli(ctx, input, output):\n print(\"Registering...\")\n ctx.obj = dict(\n component=Registration,\n input=input,\n output=output,\n stack=ImageStack.from_path_or_url(input),\n )",
"def register_routes(self, api):\n # Device Registration\n api.add_resource(controllers.UserDeviceRegistration, '/device-registration')",
"def register_endpoints(api):\n api.add_resource(EventList, '/events')",
"def auto_discover():\n auto_registration(\"actions\")",
"def generate_registry(self):\n\n logger.debug(f'Generating registry for {self}')\n if 'paths' not in self.spec:\n raise RuntimeError(f'{self.spec_path} is not a valid OpenAPI spec.')\n for path_, path_info in self.spec['paths'].items():\n for verb, method_info in path_info.items():\n if verb.upper() not in HTTP_VERBS:\n continue\n ref_ext = {}\n ref_ext['tag'] = method_info['tags'][0]\n ref_ext['operation_id'] = method_info['operationId']\n key = self.REGISTRY_KEY_TEMPLATE.format(verb=verb.upper(),\n command=path_)\n self.registry[key] = ref_ext\n logger.debug(f'Generated registry:\\n{self.registry}')",
"def registered(self):\n log.info(\"Registered.\")\n pass",
"def get(self, request, format=None):\n operations = register.meta\n return Response(operations)",
"def register_routes(self):\n @inlineCallbacks\n def registered(response):\n if response.code != 200:\n text = yield response.text()\n self._env.logger.error('{} {}'.format(response.code, text))\n\n try:\n api_register = '{}://{}:{}/api/1.0.0/register'.format(\n self._env.api_protocol,\n self._env.api_host,\n self._env.api_port\n )\n remote_ms = self._env.get('remote_ms', None)\n\n for path in self._env.swagger.paths:\n uri = self._env.swagger.base + path.split('{')[0].rstrip('/')\n if remote_ms:\n route = {\n 'protocol': 'https',\n 'host': remote_ms,\n 'port': 443,\n }\n else:\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n }\n route = dict(route, **{'uri': uri, 'key': self._key})\n #self._env.logger.info('Route> {}'.format(str(route)))\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n swagger_paths = ['/ui/css', '/ui/lib', '/ui/images', '/swagger.json']\n ui = '/' + self._env.get('swagger_ui', 'ui')+'/'\n swagger_paths.append(ui)\n\n for path in swagger_paths:\n uri = self._env.swagger.base\n if len(uri):\n if uri[-1] == '/':\n uri = uri[:-1]\n uri += path\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n return True\n except Exception as e:\n self._env.logger.error('error registering routes \"{}\"'.format(str(e)))",
"def publish():\n pass",
"async def _register_command(self) -> JSON:\n loop = asyncio.get_event_loop()\n async with aiohttp.ClientSession() as session:\n async with session.post(\n url=InteractionRoute().application(self._application_id).commands(self._id).url,\n json=self._data\n ) as response:\n interaction: JSON = await response.json(encoding='utf-8')\n return interaction",
"def register(self):\n self._register_dockyard()\n self._register_docker()",
"def registration(self):\n response = self.app.get(\"/registration\")\n self.assertTrue(response.status_code, 200)\"\"\"\"\"\"",
"def publish(self):\n return",
"def register_commands(self):\n\n with open(self._full_register_name, 'r') as file_to_read:\n command_register = json.loads(file_to_read.read())\n\n commands = command_register.get(\"commands\")\n if commands is None:\n logging.error(\"Command register is incorrect\")\n return []\n\n command_objects = []\n\n for command in commands:\n module_name = command.get(\"module\")\n class_name = command.get(\"class_name\")\n\n if (module_name is None) or (class_name is None):\n logging.error(\"Commands in the register are described in incorrect way.\")\n raise KeyError()\n\n try:\n command_module = importlib.import_module(module_name)\n command_class = getattr(command_module, class_name)\n command_object = command_class()\n command_objects.append(command_object)\n except ModuleNotFoundError as e:\n logging.error(\"Command modules specified in the register are not found!\")\n raise e\n\n return command_objects",
"def register_cli(cls):\n for cmd in cls.SUB_GROUP_COMMANDS:\n getattr(cls, cls.SUB_GROUP_NAME).add_command(getattr(cls, cmd))",
"def register_events():\n return [Events.Command(\"example_command\")]",
"def test_registration(self):\n models = [BlogEntry, BlogRoll]\n pubsub.register(models)\n self.assertTrue(set(models).issubset(pubsub.registry))",
"def publish(self, kpi_dict):\n pass",
"def register(blueprint):\n blueprint.add_route(post, \"/call-records\", methods=['POST'])",
"def register():\n\n print(\"Request: \", request)\n print(\"foo: \", request.app.ep_mapping)\n print(json.load(request.body))\n endpoint_details = json.load(request.body)\n print(endpoint_details)\n\n # Here we want to start an executor client.\n # Make sure to not put anything into the client, until after an interchange has\n # connected to avoid clogging up the pipe. Submits will block if the client has\n # no endpoint connected.\n endpoint_id = str(uuid.uuid4())\n fw = spawn_forwarder(request.app.address, endpoint_id=endpoint_id)\n connection_info = fw.connection_info\n ret_package = {'endpoint_id': endpoint_id}\n ret_package.update(connection_info)\n print(\"Ret_package : \", ret_package)\n\n print(\"Ep_id: \", endpoint_id)\n request.app.ep_mapping[endpoint_id] = ret_package\n return ret_package",
"def run(self, registry):",
"def plugin_register(api):\n api.range_tool_register('Example/01', MyPlugin)\n return True",
"def register(self):\n raise NotImplementedError(\"Should have implemented this\")",
"def on_register(self, response):\n print('You have been registered!')\n self.on_auth(response)",
"async def register_completions(ls: RobotFrameworkLanguageServer, *args):\n params = RegistrationParams([Registration(str(uuid.uuid4()), COMPLETION, {\"triggerCharacters\": \"[':']\"})])\n response = await ls.register_capability_async(params)\n if response is None:\n ls.show_message(\"Successfully registered completions method\")\n else:\n ls.show_message(\"Error happened during completions registration.\", MessageType.Error)",
"def registration():\n registration_page = Registration()\n registration_page.registration_main_page()",
"def register(self):\n raise NotImplementedError",
"def register(self):\n raise NotImplementedError",
"def regist_list(request):\n if request.method == 'GET':\n obj = Registration.objects.all()\n serializer = RegistrationSerializer(obj, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = RegistrationSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def register_resources(self):\n raise NotImplementedError"
] | [
"0.6083113",
"0.6011665",
"0.59751356",
"0.5912697",
"0.5612715",
"0.55548126",
"0.5551435",
"0.5506128",
"0.54874337",
"0.53894794",
"0.5374884",
"0.5348596",
"0.5333862",
"0.53083724",
"0.52938396",
"0.52787286",
"0.52458644",
"0.5237489",
"0.5214423",
"0.52052796",
"0.5195345",
"0.51941556",
"0.5175724",
"0.51690984",
"0.5162744",
"0.51612574",
"0.5149885",
"0.5149885",
"0.51403415",
"0.5127455"
] | 0.6798816 | 0 |
Subscribe to the queue matching the instance's name. Pass the command to the process_command function. | def subscribe_to_commands(self):
self.basic_consume(self.process_command, queue=self.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_command(self, cmd):\n self.command_queue.put(cmd)",
"def Enqueue(self, command):\n\n self.queue.put(command)",
"def command(self, command_string):\n self.__command_queue.append(command_string)",
"def subscribe(self, queue, action):\n self.channel.queue_declare(queue=queue)\n self.channel.basic_consume(queue=queue,\n on_message_callback=action,\n auto_ack=True)\n self.channel.start_consuming()",
"def enqueue(self, xyz):\n command = 'enqueue ' + str(xyz)\n self.run_command(command)",
"def _process_command_queue(self, command_queue):\n while True:\n if len(command_queue) > 0:\n command_tuple = command_queue.pop()\n func, kwargs = command_tuple[0], command_tuple[1]\n getattr(self, func)(**kwargs)\n time.sleep(.5)",
"def enqueue(self, cmd) -> None:\n self.general_queue.append(cmd)",
"def get_from_queue(self):\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)",
"def sendCommand(self, command:str=\"?\"):\n self.commandQueue.put(command)\n #self.queueLock.release()\n pass",
"def subscribe(self, queue, action=None):\n if action:\n self.broker.subscribe(queue, action)\n else:\n self.broker.subscribe(queue)",
"def _send(self, command, payload):\n self.work_queue_client.send(command, payload)",
"def enqueue(self, command):\n\n lock = Locker(str(self.qlockfile))\n if lock.lockfile():\n q = []\n if self.queuefile.exists():\n line = self.queuefile.read_text()\n q = line.split(',')\n if command not in q:\n q.append(command)\n line = \",\".join(q)\n self.queuefile.write_text(line)\n lock.unlockfile()",
"def on_queued_command(self, event, index=None):\n self.pre_check(event)\n if not self.get_player(event.guild.id).queue:\n api_loop(\n event.channel.send_message,\n \"There aren't any songs queued right now.\",\n )\n elif index is None:\n api_loop(\n event.channel.send_message,\n \"There are {} songs queued ({} minutes). To get a specific song's info, just do this command + index.\".format(\n len(self.get_player(event.guild.id).queue),\n self.minutes_format(self.get_player(\n event.guild.id,\n ).queue_length),\n ),\n )\n elif (index.replace(\"-\", \"\").strip(\" \").isdigit() and\n 0 <= (int(index.replace(\"-\", \"\").strip(\" \")) - 1) <=\n len(self.get_player(event.guild.id).queue)):\n ytdata = self.get_ytdl_values(\n self.get_player(event.guild.id).queue[\n int(index.replace(\"-\", \"\").strip(\" \")) - 1\n ].metadata,\n )\n api_loop(\n event.channel.send_message,\n \"The song at index ``{}`` is ``{}`` by ``{}`` with length ``{}`` minutes and is sourced from ``{}``.\".format(\n int(index.replace(\"-\", \"\").strip(\" \")),\n ytdata[\"title\"],\n ytdata[\"uploader\"],\n ytdata[\"time_formated\"],\n ytdata[\"source\"],\n ),\n )\n elif index.replace(\"-\", \"\").isdigit():\n api_loop(event.channel.send_message, \"Invalid index input.\")\n else:\n matched_list = dict()\n for item in self.get_player(event.guild.id).queue:\n ratio = partial_ratio(item.metadata[\"title\"], index)\n if ratio >= 70:\n matched_list[\"#{} ({}% match)\".format(\n self.get_player(event.guild.id).queue.index(item)+1,\n ratio,\n )] = item.metadata[\"title\"]\n if matched_list:\n embed = bot.generic_embed_values(\n title=\"Queue search results\",\n footer_text=\"Requested by {}\".format(event.author),\n non_inlines={\n k: matched_list[k] for k in list(matched_list)[-25:]\n },\n footer_img=event.author.get_avatar_url(size=32),\n timestamp=event.msg.timestamp.isoformat(),\n )\n api_loop(event.channel.send_message, embed=embed)\n else:\n api_loop(\n event.channel.send_message,\n \"No similar items found in queue.\",\n )",
"def start_cron(message_queue, queue_name=settings.APN_SEARCH_QUEUE, handler_class=MessageHandler):\n logging.info('Starting search update script.')\n message_handler = handler_class().process_message\n with message_queue.open(queue_name) as queue:\n for message_body, message_id in queue:\n message_handler(message_body, message_id, queue)",
"def command(self, msg):\n self.cmd_pub.publish(msg)",
"def _enqueue_server_command(self, command: ServerCommand) -> None:\n with self._subprocess_commands_lock:\n self._subprocess_commands.append(command)",
"async def _queue(self, msg):\n if msg.voice_client is not None:\n if msg.guild.id in self.player:\n if self.player[msg.guild.id]['queue']:\n emb = discord.Embed(\n colour=self.random_color, title='queue')\n emb.set_footer(\n text=f'Command used by {msg.author.name}', icon_url=msg.author.avatar_url)\n for i in self.player[msg.guild.id]['queue']:\n emb.add_field(\n name=f\"**{i['author'].author.name}**\", value=i['title'], inline=False)\n return await msg.send(embed=emb, delete_after=120)\n\n return await msg.send(\"No songs in queue\")",
"def subscribe(self, callback):\n self.channel.basic_consume(callback, queue=self.queue_name)\n self.channel.start_consuming()",
"def _listen_to_queues(cls):\n queues = cls.get_service_queues()\n for queue in queues:\n queue.consume(cls.process_messages)",
"def subscribe(self):\n with self._rabbit_connection.connection.channel() as channel:\n self._queue = rabbitpy.Queue(\n channel=channel,\n name=self._subscriber_name + \"_queue\",\n durable=True,\n message_ttl=5 * 24 * 60 * 60 * 1000 # 5 days\n )\n self._queue.declare()\n self._queue.bind(self._exchange, self._routing_key)\n\n self._consume()",
"def listen_commands(self):\n self._consumer_rabbit_connection = BlockingConnection(ConnectionParameters(self._rabbit_host))\n self._consumer_rabbit_channel = self._consumer_rabbit_connection.channel()\n\n # Listen buy/sell orders from external system\n self._listen_queue(QueueName.CMD_BUYSELL, self.on_cmd_buysell)\n self._listen_queue(QueueName.MSG_RAW, self.on_raw_msg)\n # self._logger.info(f\"Declaring rabbit queue {QueueName.CMD_BUYSELL}\")\n # self._consumer_rabbit_channel.queue_declare(queue=QueueName.CMD_BUYSELL, durable=True, auto_delete=True)\n # self._logger.info(f\"Consiming to rabbit queue {QueueName.CMD_BUYSELL}\")\n # self._consumer_rabbit_channel.basic_consume(QueueName.CMD_BUYSELL, self.on_cmd_buysell,\n # consumer_tag=\"WebQuikBroker\")\n self._consumer_rabbit_channel.start_consuming()",
"def receive_key(self, key):\n self.queue.put(key)",
"def send_msg(self, my_queue, my_msg):",
"def add_command(self, name, command=None, scheduler=None, limit_queue=None, on_full_queue=\"skip_current\", priority=0):\n if name in self._commands:\n raise ValueError(\"command {} already exists\".format(name))\n if command is None:\n command=getattr(self,name)\n if scheduler is None:\n scheduler=callsync.QQueueLengthLimitScheduler(max_len=limit_queue or 0,on_full_queue=on_full_queue)\n self._commands[name]=(command,scheduler)\n self._add_scheduler(scheduler,priority)\n self._override_command_method(name)",
"def event_publish(self, cmd):\n for sub in self.subscribers:\n sub.event_receive(cmd)",
"def add_to_queue(self, word):\n self.q.put(word)\n print(\"word \\'{}\\' added in clients queue\".format(word))",
"def runQueueEnqueue(self):\n raise NotImplementedError",
"def send(self, job_command, payload):\n self.work_queue_client.send(job_command, payload)",
"def _listen_queue(self, queue, callback):\n # Listen buy/sell orders from external system\n self._logger.info(f\"Declaring rabbit queue {queue}\")\n self._consumer_rabbit_channel.queue_declare(queue=queue, durable=True, auto_delete=True)\n self._logger.info(f\"Declaring callback to rabbit queue: {queue}, callback: {callback}\")\n self._consumer_rabbit_channel.basic_consume(queue, callback,\n consumer_tag=queue)",
"def queue_consumer(self, q):\n\n self.status = 'Running...'\n\n while True:\n try:\n msg = q.get_nowait()\n if msg is None:\n break\n self.update_plot(msg)\n except Queue.Empty:\n time.sleep(0.1)\n\n self.status = 'Done'"
] | [
"0.63098824",
"0.62418693",
"0.6216443",
"0.6078142",
"0.6037645",
"0.5994807",
"0.59508395",
"0.5937547",
"0.5917672",
"0.5872839",
"0.58456135",
"0.58215594",
"0.5785163",
"0.5764144",
"0.5762315",
"0.5658088",
"0.55472076",
"0.55459744",
"0.5537582",
"0.5512194",
"0.5503746",
"0.54825854",
"0.54805356",
"0.54773724",
"0.54315215",
"0.5409387",
"0.5345059",
"0.53336084",
"0.5333292",
"0.53247607"
] | 0.75445706 | 0 |
Run the chat client application loop. When this function exists, the application will stop | def run_chat_client():
while must_run:
print_menu()
action = select_user_action()
perform_user_action(action)
print("Thanks for watching. Like and subscribe! 👍") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def MainLoop(self):\n self.pleaseQuit=0\n\n self.logger.info(\"Starting main eventloop\")\n try:\n self.irc.process_forever(1)\n except KeyboardInterrupt:\n self.logger.warn(\"Received interrupt, disconnecting from irc\")\n #self.irc.disconnect_all(\"^C received\")\n self.irc.disconnect_all(\"even de suiker bijvullen\")\n \n self.logger.info(\"Finished disconnecting, shutting down\")",
"def run_message_loop(self):\n raise NotImplementedError",
"def startListening(self):\n \n self.listener_thread = threading.Thread(target=self.listening, daemon=True)\n self.listener_thread.start()\n\n # stateupdate = threading.Thread(target=self.showStatus, daemon=True)\n # stateupdate.start()\n\n # Main App Loop (Keeps the Client opened)\n while self.listener_thread.is_alive():\n time.sleep(1)\n else:\n print('Shutting Main Thread-1')\n sys.exit()",
"def run(self):\n self.logger.info(\"Starting messenger.\")\n self.recv()",
"def run(self):\n\n\t\t#Begin running the clientHandler\n\t\tself.running = True\n\t\tself.rxThread.start()\n\n\t\twhile self.running:\n\t\t\ttime.sleep(0.1)\n\t\n\t\t\t#Keep a count of the number of missing Hello requests, over 5 kill client\n\t\t\tif self.missingCount >= 5:\n\t\t\t\tself.running = False",
"def _run_loop(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self._server = websockets.serve(self._log_message, self._host, self._port)\n\n loop.run_until_complete(self._server)\n loop.run_forever()",
"async def loop(self):\n\t\twhile self.active:\n\t\t\ttry:\n\t\t\t\tawait self.process_data(await self.websocket.recv())\n\t\t\texcept exceptions.ClientError as e:\n\t\t\t\tawait self.send(0, str(e))\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tawait self.unregister()",
"def run(self):\n\n try:\n while True:\n self.log.info(\"Waiting for a connection...\")\n self.mc.events.post('client_disconnected')\n self.connection, client_address = self.socket.accept()\n\n self.log.info(\"Received connection from: %s:%s\",\n client_address[0], client_address[1])\n self.mc.events.post('client_connected',\n address=client_address[0],\n port=client_address[1])\n\n # Receive the data in small chunks and retransmit it\n while True:\n try:\n data = self.connection.recv(4096)\n if data:\n commands = data.split(\"\\n\")\n for cmd in commands:\n if cmd:\n self.process_received_message(cmd)\n else:\n # no more data\n break\n\n except:\n if self.mc.config['mediacontroller']['exit_on_disconnect']:\n self.mc.shutdown()\n else:\n break\n\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join(line for line in lines)\n self.mc.crash_queue.put(msg)",
"def run_loop(self):\r\n server_log.info('Server now accepting client connections.')\r\n while not self.clients_done():\r\n asyncore.loop(timeout=config[\"server_timeout\"], count=config[\"server_loop_count\"])",
"def loop_start( self ):\n self.client.loop_start()",
"def run(self):\n self.listen(self.input_topics.filter_by(transmission='tcp'))\n\n logging.info('Getting into the listening loop')\n self.running = True\n while self.running:\n self.loop()",
"def run(self):\n if not self.running:\n self.loop.run_forever()",
"def _receive_message_loop(self):\n\n while True:\n try:\n message = self.connection_socket.recv(4096)\n if len(message) > 0:\n self.add_message_to_chat(message.decode('utf-8'))\n sleep(0.2)\n\n except ConnectionResetError:\n # messagebox.showerror(\"Client dropped\", \"The other person has dropped from the connection.\")\n self.root.destroy()",
"def Listen(self):\n while True:\n time.sleep(1)",
"def start(self):\n self.logger.debug(\"Starting loop\")\n self.client.loop_start()",
"def run(self):\n while True:\n msg = self.recv()",
"def run(self):\n watcher = self._watcher(self.on_recv)\n watcher.loop()",
"def listening(app, conn_or_socket):\n time.sleep(1)\n conn_or_socket.settimeout(None)\n while app.connected:\n if app.quit:\n conn_or_socket.close()\n break\n try:\n # Receive Message from Partner\n data = conn_or_socket.recv(1024)\n except Exception as msg:\n #print(msg)\n #print(\"!!!!!!!!!!!!!!!!!\")\n break\n # Parse Message \n message = str(data, \"utf-8\")\n if not message:\n conn_or_socket.close()\n print(\"Partner disconnected.\")\n app.chat_content = app.chat_content + \"\\n\" + f\"{app.friend_name}: {message}\"\n app.gui.setMessage(\"chat_output\", app.chat_content)\n #print(f\"{app.friend_name}: {message}\")\n #print(\"Chat not listening anymore\")\n app.connected = False\n app.chat_content = \"Partner Disconnected\"\n try:\n app.gui.setMessage(\"chat_output\", app.chat_content)\n except:\n pass",
"def run(self):\n try:\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.bind(self.address)\n self.server.listen(5) # Allows up to 5 waiting clients\n\n while True:\n self.myView.updateStatus('Waiting for connection ...')\n client, address = self.server.accept()\n self.myView.updateStatus('... connected from ' + str(address))\n handler = ClientHandler(client, self.bank, self.myView)\n handler.start()\n\n except Exception as message:\n self.myView.updateStatus(message)\n self.server.close()\n self.myView.updateStatus(\"Server shutting down.\")",
"def useChat(self):\n # Implements a subprocess to run the Kuri robot simultaneously with the user input loop\n proc_stdin = io.TextIOWrapper(self.proc.stdin, encoding='utf-8', line_buffering=True)\n\n while True:\n txt = input(\"Talk to me! (Type 'q' to quit) \").lower()\n if txt == 'q':\n proc_stdin.write('q\\n')\n quit()\n else:\n sentiment = self.sd.getSentiment(txt)\n proc_stdin.write(sentiment + '\\n')\n print(\"Sentiment: \" + sentiment + '\\n')",
"def run(self):\n self.connect()\n self.run_forever()",
"def run(self):\n if self._main_loop:\n return\n self._main_loop = GObject.MainLoop()\n self._disconnect_all()\n self._register()\n logger.info(\"--- Mainloop started ---\")\n logger.info(\"Hub is ready for onboarding\")\n try:\n self._main_loop.run()\n except KeyboardInterrupt:\n # ignore exception as it is a valid way to exit the program\n # and skip to finally clause\n pass\n except Exception as e:\n logger.error(e)\n finally:\n logger.info(\"--- Mainloop finished ---\")\n self._unregister()\n self._main_loop.quit()\n self._main_loop = None",
"async def run(self):\n self.add_msg(\"Type your nickname\")\n # Start the new thread that will listen to responses, while the main thread is sending answers\n start_new_thread(self.listenToRespone, ())",
"def handle_chat(self):\n while True:\n if self.chat_breakout:\n return\n\n time.sleep(1)\n messages = \"\"\n for i in range(5):\n try:\n messages += f\"{self.queue.popleft()}\\n\"\n except IndexError:\n # Queue is empty but no worries\n continue\n\n if messages != \"\":\n self.loop.create_task(\n self.ingame_cog.send_chat_to_discord(\n self.bot, self.channel, messages\n )\n )",
"def run(self):\n print \"Starting HumanGreeter\"\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n print \"Interrupted by user, stopping HumanGreeter\"\n self.face_detection.unsubscribe(\"HumanGreeter\")\n # stop\n sys.exit(0)",
"def run(self):\r\n while self._go.isSet(): #while app is running\r\n if self._check_console_input(): #if something to read on the console\r\n cmd = sys.stdin.readline() #read it\r\n self.inq.put(cmd) #dispatch it tpo the server\r\n response = self.outq.get(timeout=2.0) #wait for an answer\r\n sys.stdout.write(response) #write the answer on the console\r",
"def listen(self):\n\n\t\tprint(\"Connected to the room\")\n\n\t\t#: Watch for messages coming from the server.\n\t\twhile self.joined:\n\n\t\t\t#: Wait for a message to be recieved from the server.\n\t\t\ttry:\n\t\t\t\t#: Store a most recent message for testing purposes.\n\t\t\t\tself.most_recent_message = self.client.recv(1024).decode()\n\t\t\t\tself.messages.append(self.most_recent_message)\n\t\t\texcept OSError:\n\t\t\t\tprint(\"Connection to the server has been lost.\")\n\n\t\t\t\t#: Quit from the server to do cleanup.\n\t\t\t\tself.quit(False)",
"def on_run(self):\n wxMediator.on_run(self)\n listener_evt = InterThreadEventWX(self,\n wxEVT_NEW_LISTEN_CONN) \n talker_evt = InterThreadEventWX(self,\n wxEVT_NEW_TALK_CONN) \n server = self.server()\n sys.stderr.write('Starting server threads...\\n')\n sys.stderr.flush()\n server.start_other_threads(listener_evt, talker_evt)",
"def run_server(self):\n self.establish_connection()\n while True:\n self.receive_data(self.conn)",
"def run():\n listen_active_email_channel()"
] | [
"0.6931434",
"0.688994",
"0.68419933",
"0.6826242",
"0.6802301",
"0.67320764",
"0.6727489",
"0.668575",
"0.6628602",
"0.6600071",
"0.656014",
"0.6525707",
"0.6512084",
"0.6486234",
"0.6479219",
"0.64297473",
"0.6423571",
"0.64088786",
"0.6408701",
"0.6398141",
"0.6387875",
"0.6324894",
"0.63012457",
"0.62996036",
"0.6294083",
"0.62831503",
"0.6280414",
"0.62797695",
"0.6252174",
"0.62505287"
] | 0.78608006 | 0 |
Print the menu showing the available options | def print_menu():
print("==============================================")
print("What do you want to do now? ")
print("==============================================")
print("Available options:")
i = 1
for a in available_actions:
if current_state in a["valid_states"]:
# Only hint about the action if the current state allows it
print(" %i) %s" % (i, a["description"]))
i += 1
print() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _print_menu(self):\n # Create header line.\n header = \"%s Menu:\" % (self.__name)\n header = header.title()\n print(header)\n\n # Show the iterations counter.\n iterations = self._status.get_value(\"iterations\")\n print(\"(Iteration %d)\" % (iterations))\n\n self._print_custom()\n\n # Display the options alphabetically.\n option_names = list(self.__options.keys())\n option_names.sort()\n for option in option_names:\n desc, command = self.__options[option]\n print(\"\\t%s: %s\" % (option, desc))",
"def print_menu():\r\n \r\n print('Menu: \\n\\n[1] Load Inventory from File\\n[2] Add CD\\n[3] Display Current Inventory')\r\n print('[4] Delete CD from Inventory\\n[5] Save Inventory to file\\n[0] Exit Program\\n')",
"def print_menu():\r\n\r\n print('Menu\\n\\n[l] load Inventory from file\\n[a] Add CD\\n[i] Display Current Inventory')\r\n print('[d] Delete CD from Inventory\\n[s] Save Inventory to file\\n[x] exit\\n')",
"def print_menu():\r\n\r\n print('Menu\\n\\n[l] load Inventory from file\\n[a] Add CD\\n[i] Display Current Inventory')\r\n print('[d] delete CD from Inventory\\n[s] Save Inventory to file\\n[x] exit\\n')",
"def OutputMenuItems():\r\n print('''\r\n Menu of Options\r\n 1) Show current data\r\n 2) Add a new item.\r\n 3) Save Data to File\r\n 4) Exit Program\r\n ''')\r\n print() # Add an extra line for looks\r",
"def show_menu():\r\n print(\"Write a number of the next options:\")\r\n for key, value in enumerate(options):\r\n print(\"{}. {}\".format(key, value))",
"def print_menu():\n print(\"\\nMenu:\")\n print(\"\\t\" + colored('+', 'red') + \" for adding a complex number\")\n print(\"\\t\" + colored('s', 'red') + \" for showing the list of all complex numbers\")\n print(\"\\t\" + colored('f', 'red') + \" for filtering the list\")\n print(\"\\t\\t-the new list will contain only the numbers between indices `start` and `end`\")\n print(\"\\t\" + colored('u', 'red') + \" to undo the last operation\")\n print(\"\\t\" + colored('x', 'red') + \" to close the calculator\")",
"def print_menu():\n print()\n print(\"Main Menu\")\n print(\"---------\")\n print(\"1 - Process a new data file\")\n print(\"2 - Choose units\")\n print(\"3 - Edit room filter\")\n print(\"4 - Show summary statistics\")\n print(\"5 - Show temperature by date and time\")\n print(\"6 - Show histogram of temperatures\")\n print(\"7 - Quit\")\n print()",
"def print_menu_Tasks():\r\n print(\"\"\"\r\n Menu of Options\r\n 1) Add a new keyboard\r\n 2) Save Keyboards to File\r\n 3) Show current keyboard list\r\n 4) Exit Program\r\n \"\"\")",
"def printMenu():\n # tWelc = PrettyTable(['Welcome to the CLI-of the repository classifier'])\n print('Welcome to the CLI of the repository classifier')\n print(strStopper1)\n t = PrettyTable(['Action', ' Shortcut '])\n t.add_row(['Show Menu', '- m -'])\n t.add_row([' Predict repositories form txt-file ', '- i -'])\n t.add_row(['Input URL', '- u -'])\n t.add_row(['Show Info', '- f -'])\n t.add_row(['Train Model', '- t -'])\n t.add_row(['set GitHub-Token', '- g -'])\n t.add_row(['Help', '- h -'])\n t.add_row(['Quit', '- q -'])\n print(t)\n print('')",
"def menu_cust(self):\n intro = \"Here are the options available for you to choose from:\"\n option1 = \"[1] UNLOCK THE CAR\"\n option2 = \"[2] RETURN THE CAR\"\n option3 = \"[3] BACK\"\n print(intro, option1, option2, option3, sep='\\n')",
"def printMenu():\n print(\"\\nBienvenido\")\n print(\"1- Cargar Datos\")\n print(\"2- Cargar Catalogo de peliculas\")\n print(\"3- Buscar productoras\")\n print(\"0- Salir\")",
"def printMenu():\n print(\"\\nBienvenido\")\n print(\"1- Cargar Datos\")\n print(\"2- Contar los elementos de la Lista\")\n print(\"3- Contar elementos filtrados por palabra clave\")\n print(\"4- Consultar elementos a partir de dos listas\")\n print(\"5- Consultar buenas peliculas\")\n print(\"0- Salir\")",
"def display(self):\n\n print('\\n')\n for key, val in self.option.items():\n print(key, val, '\\n') # make it more confortable to read\n self.get_choice() # launch automaticly the choice method after display",
"def print_menu(title, list_options, exit_message):\n print((\"\\n\" + title + \":\"))\n for i in range(1, len(list_options) + 1):\n print(\"(\" + str(i) + \") \" + list_options[i - 1])\n print(\"(0) \" + exit_message)",
"def display_menu():\n print(\"\"\"\\nChoose option:\n (1) List statistics\n (2) Display 3 cities with longest names\n (3) Display county's name with the largest number of communities\n (4) Display locations, that belong to more than one category\n (5) Advanced search\n (0) Exit program\"\"\")",
"def print_menu():\r\n clear()\r\n print(\"Ratatouille Server\")\r\n print(\"---------------------------\")\r\n print(\"\")\r\n\r\n for (index, func) in MENU.items():\r\n print(\"%d - %s\" % (index, func.__name__))\r\n\r\n return raw_input(\"Choose an option: \").lstrip()",
"def menu_eng(self):\n intro = \"Here are the options available for you to choose from\"\n option1 = \"[1] UNLOCK BY CREDENTIALS\"\n option2 = \"[2] UNLOCK BY QR CODE\"\n option3 = \"[3] UNLOCK WITH BLUETOOTH\"\n option4 = \"[4] BACK\"\n print(intro, option1, option2, option3, option4, sep='\\n')",
"def display_menu_options(length):\r\n print('\\n***********************************************\\nVeuillez choisir une option entre 1 et', str(length))",
"def display_menu(self):\n print(\"\"\"\nLogistic System Menu\n1. Add Vehicles\n2. Add Item To The Cart\n3. Complete The Order\n4. Track The Order\n5. Quit \"\"\")",
"def options_menu(title, options):\n\tprint width_screen * \"-\"\n\tprint(title.center(width_screen))\n #\tprint '{:^{width_screen}}'.format(title,width_screen)\n\tprint width_screen * \"-\"\n\tfor x in range(len(options)):\n\t\tprint str(x+1) + \". {}\".format(options[x])\n\tprint width_screen * \"-\"\n\treturn(options)",
"def print_product_menu():\r\n print(\"\"\"\r\n Menu\r\n 1 - Display Product Price Inventory\r\n 2 - Add New Product\r\n 3 - Save Session\r\n 4 - Exit Session \r\n \"\"\")",
"def henhouseDisplayMenu () :\r\n print('1.Predict egg production')\r\n print('2.Display needs')\r\n print('0.Exit henhouse management')\r\n print()\r\n print('Please choose an option from the above menu')",
"def menu():\n\tprint (\"\\n\\tSeleccionar una opcion\")\n\n\tprint (\"\\t1.- Resistencia en un Alambre \")\n\n\tprint (\"\\t2.- Voltaje\")\n\n\tprint (\"\\t3.- Corriente\")\n\n print (\"\\t4.- Resistencia\")\n\n\tprint (\"\\t5.- salir\")",
"def display_menu():\n print()\n print(\"Commands:\")\n print(\" quit - Quit\")\n print(\" new - Create new account\")\n print(\" display - Display account information\")\n print(\" deposit - Desposit money\")\n print(\" check - Write a check\")",
"def printCurrentOptions(self):\n if self.comm.rank == 0:\n print('+---------------------------------------+')\n print('| All %s Options: |' % self.name)\n print('+---------------------------------------+')\n # Need to assemble a temporary dictionary\n tmpDict = {}\n for key in self.options:\n tmpDict[key] = self.getOption(key)\n pp(tmpDict)",
"def showMenu():\n print( \"1. Create New User\" )\n print( \"2. Authorize\" )\n print( \"3. Send SMS\" )\n print( \"4. Send Email\" )\n print( \"5. Get Recently Sent Message\" )\n print( \"6. Exit\" )",
"def display_menu(self):\n print(\"\\n{}\".format(self.message))\n for i, h in enumerate(self.menu_handlers):\n # iterate through handlers and display menu text\n print(\"\\t{}. {}\".format(i+1, h.get_display_text()))\n # add option for exiting the program\n print(\"\\t{}. {}\".format(0, \"Exit\"))",
"def DisplayMenu():\n print(\"\\n\\t\\t\\t Video Store \\n\")\n print(\"\"\" 1. Add new Title\n 2. Check out a video\n 3. Return a video\n 4. Receive a rating\n 5. Delete title\n 6. List Inventory\n E. Exit\n \"\"\")",
"def main_menu():\n print('\\n', '='*50, sep='')\n print(\"Choose an option by number: \")\n print(\"\\t 1 = Create or Connect to a new file database\")\n print(\"\\t 2 = Create a new memory database\")\n print('Type exit to quit program!')\n print('='*50, '\\n', sep='')"
] | [
"0.83484006",
"0.80100524",
"0.79167485",
"0.79120266",
"0.7911782",
"0.7821441",
"0.7816",
"0.7809606",
"0.7785762",
"0.77756435",
"0.77276397",
"0.7672742",
"0.7655172",
"0.75879997",
"0.7581685",
"0.7477574",
"0.74568194",
"0.73829365",
"0.73222256",
"0.7274187",
"0.7227577",
"0.7220892",
"0.7205434",
"0.7184923",
"0.7176142",
"0.71540016",
"0.71343493",
"0.7099784",
"0.7087489",
"0.70736736"
] | 0.828377 | 1 |
Ask the user to choose and action by entering the index of the action | def select_user_action():
number_of_actions = len(available_actions)
hint = "Enter the number of your choice (1..%i):" % number_of_actions
choice = input(hint)
# Try to convert the input to an integer
try:
choice_int = int(choice)
except ValueError:
choice_int = -1
if 1 <= choice_int <= number_of_actions:
action = choice_int - 1
else:
action = None
return action | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def choose_action(self):\r\n pass",
"def get_action(player):\n print_action(player)\n chosen_action_index = int(input('Please indicate your selection from the following list by inputting the number: '))\n return player.available_actions[chosen_action_index]",
"def select_action(self):\n pass",
"def choose_action(self, board, possible_actions):\r\n self._print_board(board)\r\n while True:\r\n user_choice = int(input(\"Which Field?\"))\r\n if user_choice in possible_actions:\r\n return user_choice\r\n else:\r\n print('Action not possible!')",
"def obtain_action(self, timestep):\r\n\t\t# Loops constantly until a valid input is obtained.\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\t# Tries to obtain a valid input manually and convert it to an\r\n\t\t\t\t# integer.\r\n\t\t\t\taction = int(input('Please provide an input action index between 0 and (number of actions - 1): %i: ' % (self.num_actions-1)))\r\n\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint('Invalid input detected, try again.')\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t# Checks if the input is within the acceptable range of action\r\n\t\t\t# index values.\r\n\t\t\tif 0 <= action < self.num_actions:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tprint('Action should be an index between 0 and (number of actions - 1): %i' % (self.num_actions-1))\r\n\r\n\t\treturn action",
"def askForAction(self, role, index, currPos, possActions):\r\n message = str(role) + \" -> \" + str(index)\r\n if role == AgentRole.COP or Settings.isDebug():\r\n message += \"\\nCurrent position : \" + str(currPos)\r\n message += \"\\nPossible actions :\\n \" + \"\\n \".join([a.__repr__() for a in possActions])\r\n self.showMessage(message, surface=self.infoPanel, bg_color=gu.INFO_PANEL_COLOR)\r\n request = raw_input(\"Destination, TicketType = \")\r\n while \",\" not in request:\r\n print \"You missed the comma!\"\r\n request = raw_input(\"Destination, TicketType = \")\r\n fields = request.split(\",\")\r\n dest, ticket = int(fields[0]), fields[1].strip().upper()\r\n return dest, ticket",
"def choose_action(self, board, possible_actions):\r\n pass",
"def askForAction(self, role, index, currPos, possActions):\r\n print str(role) + \" -> \" + str(index)\r\n print \"Current position : \" + str(currPos)\r\n print \"Possible actions : \" + str(possActions)\r\n # request = raw_input(\"Destination, TicketType = \").split(',')\r\n request = raw_input(\"Destination, TicketType = \")\r\n while \",\" not in request:\r\n print \"You missed the comma!\"\r\n request = raw_input(\"Destination, TicketType = \")\r\n fields = request.split(\",\")\r\n dest, ticket = int(fields[0]), fields[1].strip().upper()\r\n return dest, ticket",
"def get_next_action(self):\n chosen_action = None\n while chosen_action is None:\n choice = input('Action: ')\n chosen_action = KEY_MAP.get(choice)\n\n return chosen_action",
"def select_action(self, state):",
"def choose_action(self, valid_list):\n \n action_str = input(\"Choose action: \").lower()\n print()\n \n if action_str in valid_list:\n return action_str\n \n else:\n print(\"Invalid action!\")\n return False",
"def main_menu_selection():\n action = input('''\n Pleaes select one:\n\n a - Send a thank you\n b - Create a report\n c - Quit\n >''')\n\n return action.strip()",
"def choose_action(self, obs, **kwargs):\n pass",
"def action(self,input,session,context):\n #index = int(input) - 1\n #if index < 0:\n # raise IndexError('Menu option can not be less than 1')\n def make_index(elt):\n idx, item = elt\n if item.custom_index is not None: return str(item.custom_index)\n else: return str(idx)\n\n valid_inputs = map(make_index, enumerate(self.menu_items))\n index = valid_inputs.index(input)\n\n return self.menu_items[index].next_screen",
"def action(self, gstate, actions):\n self.log.debug(\"Picking among actions %s\" % actions)\n return actions[0]",
"def ChooseAction(self):\n self.lastAction = None\n self.lastState = None\n if(self.attention is None or self.attention == \"\"): return\n # find best action for the currently attended node\n actions = list(self.vi.Q[self.states.index(self.attention)])\n actionIndex = actions.index(max(actions))\n actionName = self.actions[actionIndex]\n # execute the best action for the currently attended node\n self.nodes[actionName].Activate()\n self.lastAction = actionName\n self.lastState = self.attention",
"def chooseAction(self):\n print \"nothing\"\n pass",
"def action(self, option):\n try:\n i = int(option) - 1\n try:\n task = self.tasks[i]\n print(\"\\n*** Steps for\", task.name, \"P\" + str(task.priority), \"***\")\n s = 0\n for step in task.steps:\n s += 1\n print(\"\\t\", s, \". \", step)\n input()\n print(\"*********************\" + len(task.name)*\"*\")\n except IndexError as e:\n print(\"\\n\\\"\" + str(i) + \"\\\" is not a valid task index.\", type(e))\n except ValueError:\n if option in (\":A\", \"A\"):\n self.assign()\n elif option in (\":D\", \"D\"):\n self.delete()\n elif option in (\":Q\", \"Q\"):\n pass\n else:\n print(\"\\n\\\"\" + option + \"\\\" is not a valid option.\")",
"def action(self,input,session,context):\n index = int(input) - 1\n if index < 0:\n raise IndexError('Menu option can not be less than 1')\n return self.menu_items[index].next_screen",
"def choose_action(self):\n\n # Set the agent state and default action\n action=None\n if len(self.action_sequence) >=1:\n action = self.action_sequence[0] \n if len(self.action_sequence) >=2:\n self.action_sequence=self.action_sequence[1:]\n else:\n self.action_sequence=[]\n return action",
"def choose_action(self, game_state):\n util.raise_not_defined()",
"def select_action(self, state):\n pass",
"def choose_action(self, board):\n raise NotImplementedError",
"def choose_action(self, state, task=0):\n pass",
"def onActionChosen(self, agent, action):\n\n pass",
"def select_action(self, state):\n\t\treturn sample(range(0, self.action_space), 1)[0]",
"def _take_action(self, action_idx: ActionType) -> None:\n raise NotImplementedError(\"\")",
"def choose_action(self, *args, **kwargs):\n return NotImplementedError",
"def action(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"action\")",
"def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n ''' \n You should change this in your own agent.\n '''\n\n return random.choice(actions)"
] | [
"0.77464926",
"0.7433682",
"0.72635955",
"0.7202781",
"0.71235913",
"0.70796835",
"0.70719695",
"0.70426613",
"0.69958633",
"0.69911724",
"0.69026655",
"0.69006485",
"0.68855083",
"0.6853275",
"0.68034893",
"0.67918736",
"0.67697215",
"0.6714882",
"0.6669915",
"0.66696125",
"0.66655326",
"0.6625247",
"0.6599461",
"0.65930176",
"0.65028065",
"0.64664567",
"0.64221084",
"0.64196086",
"0.6401485",
"0.6369605"
] | 0.77177477 | 1 |
Input `text` into the text field on the page. | def enter_text(self, text):
self.q(css='#fixture input').fill(text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generic_input_text(self, element_id, text):\n self._generic_input(element_id, text)",
"def input(self, text):\n self.element.clear()\n self.element.send_keys(text)",
"def input_text(self, element: Union[WebElement, Tuple[By, str]], text: str):\n element = self.find_element(element)\n element.send_keys(text)",
"def ui_input_text() -> str:\n\ttext = input('enter your text ')\n\treturn text",
"def set_text(self, input_text):\n self.clear()\n logging.getLogger(__name__).info(\n \"This text set to input field: {}\\nby = {}\\nvalue = {}\".format(input_text, self.by, self.value))\n self.driver.find_element(self.by, self.value).send_keys(input_text)",
"def text(self, text):\n\n self._text = text",
"def text(self, text):\n\n self._text = text",
"def text(self, text):\n\n self._text = text",
"def text(self, text):\n\n self._text = text",
"def text(self, text):\n\n self._text = text",
"def text(self, text):\n\n self._text = text",
"def set_text(self, text):\n\n self.text = text",
"def text(self, txt):\n\n self.web_element.clear()\n self.web_element.send_keys(txt)\n return None",
"def type_text(self, element, text):\n try:\n if element.is_displayed():\n element.clear()\n element.send_keys(text)\n print(text + \" is added to textbox\")\n else:\n print(element + \" is not displaying\")\n except Exception as e:\n print(str(e))",
"def input_text(self, text):\n self.android_device_driver.adb.exec_adb_cmd(\"shell input text \" +\n text).wait()",
"def text(self, text):\n if text is None:\n raise ValueError(\"Invalid value for `text`, must not be `None`\")\n\n self._text = text",
"def text(self, text, enter=True):\n self.ime.text(text)\n\n if enter:\n self.adb.shell_command(\"input keyevent ENTER\")",
"def inp(text):\r\n input(text)",
"def set_text(self, text):\n self.set_text_f(\"%s\", text)",
"def write_text(self, text):\n Application.write_text(self, text, self.TXT_FIELD)",
"def SetText(self, text):\r\n\r\n self._text = text",
"def text_value(self, text_value):\n\n self._text_value = text_value",
"def add_text(self, text):\n self.text = self.text + text",
"def get_text(text_input):\r\n return text_input",
"def update_text(self, text):\n self.response['text'] = text",
"def _type_text(text):\n FlaUIKeyboard.Type(str(text))",
"def TextWidget(*args, **kw):\n kw['value'] = str(kw['value'])\n kw.pop('options', None)\n return TextInput(*args,**kw)",
"def text(text, enter=True, **kwargs):\n G.DEVICE.text(text, enter=enter, **kwargs)\n delay_after_operation()",
"def set_text(self, value):\n self.clear()\n self.send_keys(value)\n return self",
"def write_text(self, text):\n self.ui.plainTextEdit.appendPlainText(text)\n logging.info(text)"
] | [
"0.7562028",
"0.74759895",
"0.74647695",
"0.7401078",
"0.73765755",
"0.7364279",
"0.7364279",
"0.7364279",
"0.7364279",
"0.7364279",
"0.7364279",
"0.7331148",
"0.72840446",
"0.72499853",
"0.72281253",
"0.7209747",
"0.7139128",
"0.7137083",
"0.7108322",
"0.70740104",
"0.7031114",
"0.69858974",
"0.6975402",
"0.6947462",
"0.6932175",
"0.69173527",
"0.6913265",
"0.69105864",
"0.6909489",
"0.68472403"
] | 0.78288025 | 0 |
Select the car with ``car_value`` in the dropdown list. | def select_car(self, car_value):
self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_car_selected(self, car):\n return self.q(css=u'select[name=\"cars\"] option[value=\"{}\"]'.format(car)).selected",
"def the_option_with_value(value: str) -> \"SelectByValue\":\n return SelectByValue(value)",
"def select_option(self, selector, value):\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support.ui import Select\n\n select = Select(self.selenium.find_element(By.CSS_SELECTOR, selector))\n select.select_by_value(value)",
"def select_by_value(self, selector, value):\n el = self.locate_element(selector)\n Select(el).select_by_value(value)",
"def car(self, value):\n self.pair.car = value",
"def dropdown_choice(value):\r\n return 'You have selected \"{}\"'.format(value)",
"def set_select(self, val):\n self.select = val\n return self",
"def select(self, value) -> str:",
"def link_to_choice(dropdown_value):\n return dropdown_value",
"def selected_value(self, selected_value):\n for option in self._options_iter:\n if option.value == selected_value:\n self.selected_option = option\n break\n else:\n raise ValueError(\n \"no option with value '{}' found\".format(selected_value)\n )",
"def selectOptionByValue(self, element_tuple, select_value):\n self.log_info(f\"Browser.selectOptionByValue: Setting {element_tuple} to {select_value}\")\n Select(self.CORE.find_element(*self.format_element(element_tuple))).select_by_value(select_value)\n return",
"def _combobox_choice(self, _=None):\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)",
"def test_select_box():\n with SeleniumDriver(\"firefox\", headless=True) as obj:\n obj.get(TEST_URL)\n\n select_value = \"1\"\n obj.fill({\"select_dropdown\": select_value})\n element = obj.element(\"select_dropdown\", \"name\")\n for ele in element.find_elements_by_tag_name(\"option\"):\n if ele.text == \"One\":\n assert ele.is_selected() is True",
"def perform_as(self, the_actor: Actor) -> None:\n if self.target is None:\n raise UnableToAct(\n \"Target was not provided for SelectByValue. Provide a target using the \"\n \".from_() or .from_the() methods.\"\n )\n\n element = self.target.found_by(the_actor)\n select = SeleniumSelect(element)\n try:\n select.select_by_value(self.value)\n except WebDriverException as e:\n msg = (\n \"Encountered an issue while attempting to select the option with value \"\n f\"{self.value} from {self.target}: {e.__class__.__name__}\"\n )\n raise DeliveryError(msg).with_traceback(e.__traceback__)",
"def set_value_in_resolution_grid_dropdown(self, column_name, column_value):\n self.single_selection_from_kendo_in_grid(self.resolution_grid_div_id, column_name, column_value)\n self.click_element(self.page_header_locator)",
"def prepare_value(self, value):\n if value is None and self.required:\n choices =list(self.choices)\n if len(choices) == 1:\n value = choices[0][0]\n return super(TemplateChoiceField, self).prepare_value(value)",
"def form_SelectChoice(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options)\n return form",
"def vehicle_type(self):\n return 'car'",
"def vehicle_type(self):\n return 'car'",
"def vehicle_type(self):\n return 'car'",
"def sankey_dropdown(df=data):\n options = []\n for b in df.PUBorough.unique():\n options.append({'label': b, 'value': b})\n return dcc.Dropdown(\n id='borough',\n placeholder='Select a pick up borough',\n options=options,\n value='Manhattan',\n multi=False\n )",
"def _select_value_from_a_profile_combo_box(combo_box_element, combo_box_list_option):\n ui_lib.wait_for_element_and_click(combo_box_element)\n ui_lib.wait_for_element_visible(combo_box_list_option)\n\n ui_lib.wait_for_element_and_click(combo_box_list_option)",
"def form_SelectWithOtherChoice(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectWithOtherChoice(options)\n return form",
"def set(self, value):\n\n if value is None:\n return\n\n self.combobox.set(value)",
"def set_dropdown_b_value(value):\n value_b = None\n if value=='A': value_b = 'C'\n if value == 'B': value_b = 'E'\n return value_b",
"def select_option(self, option):\n log.info(\"Selecting option '\" + option + \"' on element: \" + self.id)\n select = Select(self.driver.find_element(self.by, self.id))\n select.select_by_visible_text(option)",
"def set_transactions_grid_dropdown_value(self, column_name, column_value):\n self.single_selection_from_kendo_in_grid(self.transactions_grid_div_id, column_name, column_value, self.transactions_data_grid_name)\n self.click_element(self.page_header_locator)",
"def for_type_select_link_vlan_for_name_enter_vlan1043(driver):\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Type\"]').click()\n wait_on_element(driver, 0.5, 5, '//mat-option[@ix-auto=\"option__Type_VLAN\"]')\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Type_VLAN\"]').click()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').send_keys('vlan1043')",
"def __init__(self, user, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n self.fields['car'].queryset = Car.objects.filter(\n manufacturer__admin=user)",
"def select_by_value(self, *items):\n if len(items) > 1 and not self.is_multiple:\n raise ValueError(f\"The Select {self!r} does not allow multiple selections\")\n\n for value in items:\n matched = False\n for opt in self.browser.elements(f\".//option[@value={quote(value)}]\", parent=self):\n if not opt.is_selected():\n opt.click()\n\n if not self.is_multiple:\n return\n matched = True\n\n if not matched:\n raise ValueError(f\"Cannot locate option with value: {value!r}\")"
] | [
"0.6586301",
"0.6283673",
"0.624815",
"0.5832125",
"0.5829928",
"0.56058097",
"0.55932057",
"0.5486654",
"0.5476527",
"0.54241836",
"0.54193985",
"0.5406573",
"0.5401394",
"0.52843726",
"0.5263898",
"0.52331984",
"0.51806915",
"0.5153212",
"0.5153212",
"0.5153212",
"0.5129445",
"0.5080715",
"0.50471395",
"0.50445884",
"0.5030479",
"0.50072855",
"0.49755263",
"0.49543473",
"0.4934902",
"0.49242532"
] | 0.8951591 | 0 |
Return ``True`` if the given ``car`` is selected, ``False`` otherwise. | def is_car_selected(self, car):
return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_selected(self) -> bool:\n return self.proto.is_selected",
"def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False",
"def is_selected(self) -> bool:\r\n return self.selected",
"def is_selected(self):\n return self._selected",
"def is_selected(self):\n return self._selected",
"def is_selected(self):\n return self.container['is_selected']",
"def IsSelected(self, item):\r\n\r\n return item.IsSelected()",
"def carExists(self, carmake):\n data = db.session.query(Car.id).filter_by(make = carmake).first()\n if data is None:\n return False\n else:\n return True",
"def is_selected(self, selector):\n el = self.locate_element(selector)\n return el.is_selected()",
"def requires_selection(self) -> bool:\n return True",
"def is_selected(self):\n return self._element_call(lambda: self.el.is_selected)",
"def is_multi_selection(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_multi_selection\")",
"def select(condition: Union[Callable, int], meta: Counter) -> bool:\n if condition is None:\n return True\n elif isinstance(condition, int):\n return sum(meta.values()) == condition\n elif callable(condition):\n if not isinstance(condition(meta), bool):\n raise TypeError('selection condition expected to return a boolean')\n return condition(meta)\n return False",
"def add_car(self, car):\n car_coords = car.car_coordinates()\n for coord in car_coords:\n if coord not in self.cell_list(): # not in 7*7 board\n return False\n elif self.cell_content(coord) is not None:\n return False\n for old_car in self.__cars:\n if old_car.get_name() == car.get_name():\n return False\n self.__cars.append(car)\n return True",
"def __bool__(self):\n context, active_obj, actual_mode, mode = self.get_context()\n if not mode: return False\n \n if mode == 'OBJECT':\n return bool(context.selected_objects)\n elif mode == 'EDIT_MESH':\n mesh = active_obj.data\n if actual_mode == 'EDIT_MESH':\n return bool(mesh.total_vert_sel)\n else:\n return any(item.select for item in mesh.vertices)\n elif mode in {'EDIT_CURVE', 'EDIT_SURFACE'}:\n for spline in active_obj.data.splines:\n for item in spline.bezier_points:\n if (item.select_control_point or\n item.select_left_handle or\n item.select_right_handle):\n return True\n for item in spline.points:\n if item.select:\n return True\n elif mode == 'EDIT_METABALL':\n return bool(active_obj.data.elements.active)\n elif mode == 'EDIT_LATTICE':\n return any(item.select for item in active_obj.data.points)\n elif mode == 'EDIT_ARMATURE':\n return any(item.select_head or item.select_tail\n for item in active_obj.data.edit_bones)\n elif mode == 'POSE':\n return any(item.select for item in active_obj.data.bones)\n elif mode == 'PARTICLE':\n # Theoretically, particle keys can be selected,\n # but there seems to be no API for working with this\n pass\n else:\n pass # no selectable elements in other modes\n \n return False",
"def is_initially_selected(self, value):\n return value in self._get_selected_values_set()",
"def isCarAvailable(self, car, start, end):\n rentals = self.filterRentals(None, car)\n for rent in rentals:\n if start > rent.end or end < rent.start:\n continue\n return False\n return True",
"def selected(self):\n\n return self.element().is_selected() if self.exists() else False",
"def is_red_car(self):\n return self.identifier == 18",
"def is_selected(self):\n return NSCSpecIO().read()[\"profile\"] == self.path.stem",
"def is_on(self):\n return self._device.car_state.get(self._key)",
"def select_car(self, car_value):\n self.q(css=u'select[name=\"cars\"] option[value=\"{}\"]'.format(car_value)).first.click()",
"def selected(self):\n\n return self.infodock.is_instruction_selected(self.addr)",
"def has_car(self, i, lane_index):\n return self._spots[i].has_car(lane_index)",
"def is_selection(cfg):\n if LIBRARIES in list(cfg.keys()):\n return True\n else:\n return False",
"def IsObjectSelected(object_id):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n return rhobj.IsSelected(False)",
"def __is_selected_frame(self, frame_index):\n return frame_index == self.selected_index",
"def joy_select(event: EventType, widget: WidgetType) -> bool:\n return event.button == JOY_BUTTON_SELECT",
"def is_selected_option(self, xpos, ypos, i):\n\n if (\n self.x < xpos < self.x + self.width\n and self.y + self.height * (i + 1)\n < ypos\n < self.y + self.height + (i + 1) * self.height\n ):\n return True\n else:\n return False",
"def selectable(cls):\n return True"
] | [
"0.6459931",
"0.64561963",
"0.64533645",
"0.5970567",
"0.5970567",
"0.5941123",
"0.59283936",
"0.5885889",
"0.58118576",
"0.5765825",
"0.57478505",
"0.5745376",
"0.55844533",
"0.5575426",
"0.5548363",
"0.5547362",
"0.5532758",
"0.5526049",
"0.55059886",
"0.5495182",
"0.5488999",
"0.54878986",
"0.5470811",
"0.542837",
"0.5425493",
"0.54199123",
"0.5416708",
"0.539024",
"0.5382875",
"0.5349741"
] | 0.86627126 | 0 |
Toggle the box for the pill with `pill_name` (red or blue). | def toggle_pill(self, pill_name):
self.q(css=u"#fixture input#{}".format(pill_name)).first.click() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toggle_color_picker(self, wid, color_name='square_fill_ink'):\n print(\"TOGGLE COLOR PICKER\", getattr(wid, color_name), self.color_picker)\n is_open = self.color_dropdown and self.color_dropdown.attach_to\n if is_open:\n self.color_dropdown.dismiss()\n if self.color_dropdown:\n self.color_picker.unbind(color=wid.setter(color_name))\n self.color_picker = None\n self.color_dropdown = None\n if not is_open:\n self.color_dropdown = Factory.ColorPickerDD()\n self.change_flow(id_of_flow('suggest'))\n self.color_dropdown.open(wid)\n self.color_picker = self.color_dropdown.ids.col_pic\n self.color_picker.color = getattr(wid, color_name)\n self.color_picker.bind(color=wid.setter(color_name))",
"def toggle(light_id):\n if light_id == \"alloff\":\n pidomCtrl.pulse(\"alloff\")\n elif light_id == \"outside\":\n pidomCtrl.pulse(\"outside\")\n elif light_id == \"stairs\":\n pidomCtrl.pulse(\"stairs\")\n elif light_id == \"frontdoorgroupoff\":\n pidomCtrl.pulse(\"persistedoff\")\n elif light_id == \"persistedon\":\n pidomCtrl.pulse(\"frontdoorgroupon\")",
"def toggle_valve():\n new_status = not tank_valve_open\n print(\"- Toggling valve status to '{}'.\".format(\"Open\" if new_status\n else \"Closed\"))\n set_valve_open(new_status)",
"def toggle_box(self):\n if self.overlay:\n self.overlay.show_box()",
"def _checkbutton_toggle(self):\n new_value = self.value_checkbutton.var.get()\n if self.master.change_field_value(self.field_name, new_value):\n self.value_checkbutton.config(fg=\"#3F3\" if new_value else \"#F33\", text=\"ON\" if new_value else \"OFF\")\n else:\n self.value_checkbutton.var.set(not new_value)",
"def led_toggle(self):\n if self.state == GPIO.LOW:\n self.state = GPIO.HIGH\n else:\n self.state = GPIO.LOW\n return self.update_status()",
"def pin_toggle(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n port_state = gpio.HIGH\n if gpio.input(port_num) == gpio.HIGH:\n port_state = gpio.LOW\n gpio.setcfg(port_num, gpio.OUTPUT)\n gpio.output(port_num, port_state)",
"def toggle(self):\n self.checked = not self.checked\n if self.command:\n self.command(self.name)",
"def ToggleSpinner(event, state, widget):\n if state == True:\n widget.Enable()\n else:\n widget.Disable()\n event.Skip()",
"def switch_on(self,name):\n self.circles[name].switch_on()\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=1 WHERE target=%s\"\"\", (name,))",
"def toggle(self, color='all'):\n if color in ['all', 'r']:\n self.__send('r', 'toggle')\n\n if color in ['all', 'g']:\n self.__send('g', 'toggle')\n\n if color in ['all', 'b']:\n self.__send('b', 'toggle')",
"def toggle_color(self, index):\n if self.get_state(index):\n self.canvas.itemconfigure(self.cells[index], state=HIDDEN)\n else:\n self.canvas.itemconfigure(self.cells[index], state=NORMAL)",
"def switch(ind, status):\n print(\"Switching :\", ind, \">>\", status == 'on')\n GPIO.output(ind, status == 'on')",
"def change_to_tasks(self):\n self.ids[\"shp_btn\"].color = 1, 1, 1, 0.5",
"def togglePulseUI():\n if isPulseUIShowing():\n hidePulseUI()\n else:\n showPulseUI()",
"def change_colour(self) -> None:\n if self.ui.radioButtonWhite.isChecked():\n self.pen_colour = QColor(Qt.white)\n elif self.ui.radioButtonColour.isChecked():\n self.pen_colour = self.chosen_colour\n else: # Impossible but better to control\n message.show_error(config.PROG_ERR3)\n return None",
"def toggle_highlighted_spikes(self, checked):\n self.show_all_spikes = checked\n self.set_interval()",
"def toggle(self) -> None:\n ...",
"def toggle(\n id: int = typer.Argument(1),\n ip: str = typer.Option(..., \"--ip\", \"-i\", envvar=\"HUE_BRIDGE_IP\"),\n user: str = typer.Option(..., \"--user\", \"-u\", envvar=\"HUE_BRIDGE_USER\"),\n):\n light = Light(id, ip=ip, user=user)\n resp = asyncio.run(light.toggle())\n console.print(f\"[{ip}] Light {id} Toggle:\\n{json.dumps(resp, indent=2)}\")",
"def dimmer_switch(turtle, color):\n turtle.fillcolor(color + \"4\")",
"def toggle(self, layout, item, feats):\n if self.active.isChecked():\n self.fill_active(layout)\n\n self.default_button = QPushButton('set to defaults', feats)\n layout.addWidget(self.default_button)\n self.default_button.clicked.connect(self.rec_default)\n\n item.setForeground(QColor('black'));\n else:\n self.clear_params(layout, item)",
"def toggle(self) -> None:",
"def toggle(self) -> None:",
"def toggle(self, id):\n e = self.objectmanager.objects.get(id=id)\n e.enabled = not e.enabled\n e.save()\n return render({\"id\": id, \"status\": e.enabled})",
"def btnState(self, box):\n if box.text() == \"Log to File\":\n if box.isChecked():\n self.stdoutBox.setChecked(False)\n # should not edit filename\n self.logfileEdit.setReadOnly(False)\n self.debugStatements = True\n self.switchDebugOutput()\n\n if box.text() == \"Log to Stdout\":\n if box.isChecked():\n self.fileBox.setChecked(False)\n self.logfileEdit.setReadOnly(True)\n self.debugStatements = False\n self.switchDebugOutput()",
"def change_to_shopping(self):\n self.ids[\"tsk_btn\"].color = 1, 1, 1, 0.5",
"def toggle(self):\n self._interrupt_flash()\n GPIO.output(self.pin, GPIO.LOW if self.on else GPIO.HIGH)\n self.on = not self.on",
"def toggle(self):\n self._state.is_on = not self._state.is_on\n self.send_command(Command.TOGGLE, [])",
"def toggle(self):\n try:\n self.pin.toggle()\n except NotImplementedError:\n if self.ison():\n self.off()\n else:\n self.on()",
"def change_stepper_status(self, status):\n\n if status:\n GPIO.output(26, GPIO.HIGH)\n else:\n GPIO.output(26, GPIO.LOW)"
] | [
"0.5491586",
"0.5272434",
"0.51687783",
"0.5140479",
"0.5102771",
"0.50949675",
"0.49874344",
"0.49179575",
"0.48726612",
"0.4838384",
"0.4801307",
"0.47595447",
"0.47443792",
"0.47273305",
"0.4704383",
"0.4703406",
"0.46984416",
"0.4670263",
"0.46450222",
"0.46340665",
"0.46057907",
"0.4601846",
"0.4601846",
"0.45938054",
"0.45886624",
"0.45734787",
"0.45720023",
"0.45546064",
"0.45523235",
"0.45389196"
] | 0.8160374 | 0 |
Click the ``Confirm`` button and confirm the dialog. | def confirm(self):
with self.handle_alert(confirm=True):
self.q(css='button#confirm').first.click() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def confirm_dialog(self, title, message):\n return self._impl.confirm_dialog(title, message)",
"def _tap_on_confirm_button(self, yes=True, msg=\"Confirm dialog button\"):\n btn = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_yes if\n yes else DOM.DownloadManager.download_confirm_no, msg)\n btn.tap()",
"def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return",
"def show_confirm_dialog(text):\n dialog = QDialog()\n interface = confirmGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True\n return False",
"def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True",
"def confirm(self, action):\n title = \"%s : P L E A S E C O N F I R M\" % action\n question_text = \"<html><b>%s - PLEASE CONFIRM.</b><br/>\"\\\n \"<br/>Do you want to %s %s recordings for the following project?\"\\\n \"<br/><br/>PROJECT : %s\"\\\n \"<br/>CLIENT : %s\"\\\n \"<br/>DATE : %s<br/></html>\" % (\n action.upper(),\n action,\n \" & \".join(self.selected_formats),\n self.recordings_table.project_details()[2],\n self.recordings_table.project_details()[3],\n self.recordings_table.project_details()[0]\n )\n\n self.hide()\n if action == 'upload':\n self.confirmation_dialog.setText(title, question_text)\n self.confirmation_dialog.exec_()\n self.show()\n\n if self.confirmation_dialog.cancelled:\n return (False, False)\n\n return (True, self.confirmation_dialog.immediate_upload)\n else:\n self.confirmation_dialog.showQuestion(title, question_text)\n self.show()\n return self.confirmation_dialog.copy_confirmed",
"def you_should_be_able_to_confirm_and_close(driver):\n wait_on_element(driver, 0.5, 30, '//h1[contains(.,\"Test Changes\")]')\n driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__CONFIRM\"]').click()\n driver.find_element_by_xpath('//button[@ix-auto=\"button__TEST CHANGES\"]').click()\n wait_on_element_disappear(driver, 1, 30, '//h6[contains(.,\"Please wait\")]')",
"def __window_confirm(self, text):\n return True",
"def Confirm(self):\r\n \r\n global references\r\n self.from_ed = self.ed_result.get(\"1.0\",'end-1c')\r\n references.append(self.from_ed)\r\n self.confirm_b.configure(state = 'disabled')\r\n self.discard_b.configure(state = 'disabled')\r\n self.finalresult.configure(state = 'normal')\r\n self.finalresult.delete('1.0', END)\r\n \r\n self.final()",
"def javaScriptConfirm(self, frame, message):\n\n if self._robot._confirm_expected is None:\n raise Exception('You must specified a value to confirm \"%s\"' %\n message)\n confirmation, callback = self._robot._confirm_expected\n logger.debug(\"confirm('%s')\" % message)\n self._robot._confirm_expected = None\n self._robot.popup_messages = message\n\n if callback is not None:\n return callback()\n return confirmation",
"def decision(question):\n return click.confirm(question, show_default=True)",
"def confirm(text, window=None):\n return message(text, u'Confirma', M_QUESTION, B_YES_NO, window) == R_YES",
"def cancel(self):\n with self.handle_alert(confirm=False):\n self.q(css='button#confirm').first.click()",
"def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")",
"def action_confirm(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'confirmed'\n action = 'confirm'\n default = {\n 'state': status,\n 'engineering_writable': False,\n }\n doc_default = {\n 'state': status,\n 'writable': False,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Confirmed'),\n 'action': action,\n 'docaction': 'confirm',\n 'excludeStatuses': ['confirmed', 'transmitted', 'released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['draft'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)",
"def confirm():\n\t\traise NotImplementedError",
"def exitConfirm():\n\n confirm = showDialogBox('Exit the game now?', 'question', 'yesno', 'no')\n if confirm == 'yes':\n raise SystemExit",
"def messageConfirm(self,message):\n answer=self.message(message,style=wx.YES_NO|wx.ICON_QUESTION)\n return self.messageIsOk(answer)",
"def confirmDialog(*args, annotation: Union[AnyStr, List[AnyStr]]=\"\", backgroundColor:\n List[float, float, float]=None, button: Union[AnyStr, List[AnyStr]]=\"\",\n cancelButton: AnyStr=\"\", defaultButton: AnyStr=\"\", dismissString: AnyStr=\"\",\n icon: AnyStr=\"\", message: AnyStr=\"\", messageAlign: AnyStr=\"\", parent:\n AnyStr=\"\", title: AnyStr=\"\", **kwargs)->AnyStr:\n pass",
"def PresentDialog_Confirm_Call( message ):\n return call( message, [ 'Ok', 'Cancel' ] )",
"def on_confirm_button(self, negotiation_outcome):\n # Send message.\n self.ros_node.send_message(UserInput.NEGOTIATION,\n negotiation_outcome)\n # Reset label and button.\n self.confirm_label.setText(\"Selection sent.\")\n self.confirm_button.setStyleSheet('QPushButton {color: gray;}')\n self.confirm_button.setEnabled(False)",
"def ask_ok_cancel(message=\"\", title=None):\n return dialog(\"ask_ok_cancel\", message=message, title=title)",
"def confirm(text, app, version, modules=None, default_yes=False):\n print(text)\n print(' Directory: %s' % os.path.basename(app.app_dir))\n print(' App ID: %s' % app.app_id)\n print(' Version: %s' % version)\n print(' Modules: %s' % ', '.join(modules or app.modules))\n if default_yes:\n return raw_input('Continue? [Y/n] ') not in ('n', 'N')\n else:\n return raw_input('Continue? [y/N] ') in ('y', 'Y')",
"def ask_ok(title='Confirm', message=''):\n if not isinstance(title, string_types):\n raise TypeError('ask_ok() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('ask_ok() message must be a string.')\n return _get_app().ask_ok(title, message)",
"def _confirm(self) -> None:\n\n self.__series.title = self._getTitleFromView()\n\n if len(self.__series.data) == 0:\n self._showMessage(\"Invalid data. No data selected.\")\n return\n\n self._result = DialogResult.Ok\n self._close()",
"def confirm_as_variable() -> None:\n\n confirmed = click.confirm(\"Are you sure you want to drop the users table?\")\n status = click.style(\"yes\", fg=\"green\") if confirmed else click.style(\"no\", fg=\"red\")\n click.echo(\"Drop table confirmed?: \" + status)",
"def confirm_lnk_click (self, **event_args):\r\n self.raise_event('x-close-alert', value='confirm_email')",
"def confirm(self, message):\n raise NotImplementedError",
"def wait_for_confirm(self, confirm=True, callback=None):\n\n self._robot._confirm_expected = (confirm, callback)\n self._robot.wait_for(lambda: self._robot._confirm_expected is None)\n return self.popup_messages",
"def confirm(self, prompt, default):\n raise NotImplementedError(NotImplementedMessage)"
] | [
"0.7253925",
"0.7246785",
"0.72352403",
"0.7205",
"0.716402",
"0.7038067",
"0.70259655",
"0.700242",
"0.6850886",
"0.6840978",
"0.682001",
"0.6809211",
"0.6784496",
"0.67724127",
"0.6758107",
"0.6704055",
"0.65794057",
"0.6546828",
"0.65444165",
"0.6512818",
"0.6483923",
"0.6467671",
"0.64611477",
"0.64287895",
"0.64189684",
"0.6396472",
"0.63624436",
"0.6360682",
"0.6341616",
"0.6277734"
] | 0.85760707 | 0 |
Click the ``Confirm`` button and cancel the dialog. | def cancel(self):
with self.handle_alert(confirm=False):
self.q(css='button#confirm').first.click() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def confirm(self):\n with self.handle_alert(confirm=True):\n self.q(css='button#confirm').first.click()",
"def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return",
"def ask_ok_cancel(message=\"\", title=None):\n return dialog(\"ask_ok_cancel\", message=message, title=title)",
"def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")",
"def confirm_dialog(self, title, message):\n return self._impl.confirm_dialog(title, message)",
"def click_cancel(self):\n self.click_element(self.cancel_button_selector)",
"def cancelButton(self):\n \n self.answer=\"cancel\"\n self.top.destroy()",
"def exitConfirm():\n\n confirm = showDialogBox('Exit the game now?', 'question', 'yesno', 'no')\n if confirm == 'yes':\n raise SystemExit",
"def Confirm(self):\r\n \r\n global references\r\n self.from_ed = self.ed_result.get(\"1.0\",'end-1c')\r\n references.append(self.from_ed)\r\n self.confirm_b.configure(state = 'disabled')\r\n self.discard_b.configure(state = 'disabled')\r\n self.finalresult.configure(state = 'normal')\r\n self.finalresult.delete('1.0', END)\r\n \r\n self.final()",
"def __onConfirmNo(self):\n self.__confDlg.reject()",
"def confirm(self, action):\n title = \"%s : P L E A S E C O N F I R M\" % action\n question_text = \"<html><b>%s - PLEASE CONFIRM.</b><br/>\"\\\n \"<br/>Do you want to %s %s recordings for the following project?\"\\\n \"<br/><br/>PROJECT : %s\"\\\n \"<br/>CLIENT : %s\"\\\n \"<br/>DATE : %s<br/></html>\" % (\n action.upper(),\n action,\n \" & \".join(self.selected_formats),\n self.recordings_table.project_details()[2],\n self.recordings_table.project_details()[3],\n self.recordings_table.project_details()[0]\n )\n\n self.hide()\n if action == 'upload':\n self.confirmation_dialog.setText(title, question_text)\n self.confirmation_dialog.exec_()\n self.show()\n\n if self.confirmation_dialog.cancelled:\n return (False, False)\n\n return (True, self.confirmation_dialog.immediate_upload)\n else:\n self.confirmation_dialog.showQuestion(title, question_text)\n self.show()\n return self.confirmation_dialog.copy_confirmed",
"def __window_confirm(self, text):\n return True",
"def on_cancel(self, *args):\n self.response(Gtk.ResponseType.CANCEL)",
"def click_win_dispute_cancel_button(self):\n self.click_element(self.win_dispute_cancel_button_locator)\n try:\n self.dismiss_alert_pop_up()\n except:\n pass\n self.wait_for_ajax_spinner_load()",
"def decision(question):\n return click.confirm(question, show_default=True)",
"def _tap_on_confirm_button(self, yes=True, msg=\"Confirm dialog button\"):\n btn = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_yes if\n yes else DOM.DownloadManager.download_confirm_no, msg)\n btn.tap()",
"def pressCancel(self):\n self.close()",
"def sgnCancel(self):\n\n self.uiCloseWindow()",
"def confirm(text, window=None):\n return message(text, u'Confirma', M_QUESTION, B_YES_NO, window) == R_YES",
"def you_should_be_able_to_confirm_and_close(driver):\n wait_on_element(driver, 0.5, 30, '//h1[contains(.,\"Test Changes\")]')\n driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__CONFIRM\"]').click()\n driver.find_element_by_xpath('//button[@ix-auto=\"button__TEST CHANGES\"]').click()\n wait_on_element_disappear(driver, 1, 30, '//h6[contains(.,\"Please wait\")]')",
"def PresentDialog_Confirm_Call( message ):\n return call( message, [ 'Ok', 'Cancel' ] )",
"def click_statement_entry_cancel_button(self):\n self.click_element(self.statement_entry_cancel_button_locator, False, True)\n try:\n self.dismiss_alert_pop_up()\n except:\n pass",
"def show_confirm_dialog(text):\n dialog = QDialog()\n interface = confirmGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True\n return False",
"def confirm():\n\t\traise NotImplementedError",
"def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True",
"def buttonCancel_Clicked( self, event ):\n\t\tself.EndModal(wx.ID_CANCEL)",
"def javaScriptConfirm(self, frame, message):\n\n if self._robot._confirm_expected is None:\n raise Exception('You must specified a value to confirm \"%s\"' %\n message)\n confirmation, callback = self._robot._confirm_expected\n logger.debug(\"confirm('%s')\" % message)\n self._robot._confirm_expected = None\n self._robot.popup_messages = message\n\n if callback is not None:\n return callback()\n return confirmation",
"def cancelButton(self):\n \n self.answer=-1\n self.top.destroy()",
"def messageConfirm(self,message):\n answer=self.message(message,style=wx.YES_NO|wx.ICON_QUESTION)\n return self.messageIsOk(answer)",
"def confirmDialog(*args, annotation: Union[AnyStr, List[AnyStr]]=\"\", backgroundColor:\n List[float, float, float]=None, button: Union[AnyStr, List[AnyStr]]=\"\",\n cancelButton: AnyStr=\"\", defaultButton: AnyStr=\"\", dismissString: AnyStr=\"\",\n icon: AnyStr=\"\", message: AnyStr=\"\", messageAlign: AnyStr=\"\", parent:\n AnyStr=\"\", title: AnyStr=\"\", **kwargs)->AnyStr:\n pass"
] | [
"0.80490994",
"0.7658619",
"0.75887465",
"0.718055",
"0.7045372",
"0.6991206",
"0.6899526",
"0.6881284",
"0.6846061",
"0.683484",
"0.681738",
"0.68133414",
"0.6789782",
"0.6765807",
"0.6764634",
"0.6748562",
"0.6728313",
"0.67058414",
"0.6674141",
"0.66671133",
"0.6665826",
"0.6620237",
"0.66107553",
"0.66017795",
"0.65847355",
"0.65701896",
"0.65663785",
"0.65618753",
"0.6554137",
"0.6553285"
] | 0.8330856 | 0 |
Count the number of div.test elements. | def num_divs(self):
return len(self.q(css='div.test').results) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_number_of_testcase_elements(self):\n testcases = self.root.findall('testcase')\n self.assertEqual(len(testcases), 4)",
"def test_count(self):\n return len(self.tests) + sum(suite.test_count for suite in self.suites)",
"def test_element_count(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.element_count(2,\"F\"),6)",
"def b_count_test(self):\n \t \n\tsel = self.selenium\n test = \"Test B - Count Articles, Titles, Headings, Etc.\"\n print test\n \n headers = sel.get_css_count(\"css=\" + CSS[1])\n images = sel.get_css_count(\"css=\" + CSS[2])\n authors = sel.get_css_count(\"css=\" + CSS[3])\n\tdots = sel.get_css_count(\"css=\" + CSS[7]) + sel.get_css_count(\"css=\" + CSS[6])\t\n \n if ((images < 8) or (dots < 8) or (authors < 8) or (headers < 8)):\n print \"Missing articles!\"\n L.log(BROWSERS[x], test, \"FAIL, MISSING CONTENT\", \"Images: \" + str(images) + \" Dots: \" + str(dots) + \" Authors: \" + str(authors) + \" Headers: \" + str(headers)) \n \n\telse:\n\t L.log(BROWSERS[x], test, \"PASS, OK\", \"None\")\n\t \n\t######################################################################## ",
"def numberTests(self):\n for i, test in enumerate(self._tests):\n test.number = i + 1\n test.info.cs_test_num = test.number",
"def assertCountSeleniumElements(self, selector, count, root_element=None):\n from selenium.webdriver.common.by import By\n\n root_element = root_element or self.selenium\n self.assertEqual(\n len(root_element.find_elements(By.CSS_SELECTOR, selector)), count\n )",
"def get_number_of_testing(self):\n return self.n_test",
"def test_count(self):\n self._test_count_func(count)",
"def test_set_count(self) -> int:\n return pulumi.get(self, \"test_set_count\")",
"def test_abcdee():\n assert part_01.count_for('abcdee', 2) == 1\n assert part_01.count_for('abcdee', 3) == 0",
"def test_count_elements(self):\n from pykml.util import count_elements\n\n test_datafile = path.join(\n path.dirname(__file__),\n 'testfiles',\n 'google_kml_developers_guide/complete_tour_example.kml'\n )\n with open(test_datafile) as f:\n doc = parse(f, schema=Schema('kml22gx.xsd'))\n summary = count_elements(doc)\n\n self.assertTrue('http://www.opengis.net/kml/2.2' in summary)\n self.assertEqual(4,\n summary['http://www.opengis.net/kml/2.2']['Placemark']\n )\n self.assertTrue('http://www.google.com/kml/ext/2.2' in summary)\n self.assertEqual(5,\n summary['http://www.google.com/kml/ext/2.2']['FlyTo']\n )\n self.assertEqual(2,\n summary['http://www.google.com/kml/ext/2.2']['Wait']\n )",
"def testArticleCount(self):\n\n self.articleCount(17)",
"def test_abcccd():\n assert part_01.count_for('abcccd', 2) == 0\n assert part_01.count_for('abcccd', 3) == 1",
"def count():",
"def test_bababc():\n assert part_01.count_for('bababc', 2) == 1\n assert part_01.count_for('bababc', 3) == 1",
"def count(self):\n return len(self._elements)",
"def element_count(self):\n return self._internal.get_element_count()",
"def count_passages(self, step, count):\r\n count = int(count)\r\n assert_equals(len(world.css_find('.annotatable-span')), count)\r\n assert_equals(len(world.css_find('.annotatable-span.highlight')), count)\r\n assert_equals(len(world.css_find('.annotatable-span.highlight-yellow')), count)",
"def count(self):\n return len(self.find())",
"def count(self):\n\t\treturn len(list(self.nodes))",
"def element_count(self):\r\n result = conf.lib.clang_getNumElements(self)\r\n if result < 0:\r\n raise Exception('Type does not have elements.')\r\n\r\n return result",
"def test_own_count(self):\n self._test_count_func(it_count)",
"def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter",
"def count(self):\n return self.__tree.node_count",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def test_abbcde():\n assert part_01.count_for('abbcde', 2) == 1\n assert part_01.count_for('abbcde', 3) == 0",
"def elements_count(self):\n return self.__elements_count"
] | [
"0.72423947",
"0.6702707",
"0.66566426",
"0.6639454",
"0.6568075",
"0.6378697",
"0.6344244",
"0.6308958",
"0.6286621",
"0.62182474",
"0.61914384",
"0.61866164",
"0.617343",
"0.6128987",
"0.6103108",
"0.6089674",
"0.60860085",
"0.60737354",
"0.6061457",
"0.604047",
"0.6024079",
"0.60166556",
"0.60077524",
"0.59659874",
"0.5963736",
"0.5963736",
"0.5963736",
"0.5963736",
"0.5961491",
"0.594927"
] | 0.83748364 | 0 |
Return list of text for each div.test element. | def div_text_list(self):
return self.q(css='div.test').text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def div_html_list(self):\n return self.q(css='div.test').html",
"def texts(self):\n return [elem.text for elem in self.web_elements]",
"def _get_text(self, element):\n # for text in element.itertext():\n for text in self.iter_main_text(element):\n yield text.strip()",
"def div_value_list(self):\n return self.q(css='div.test').attrs('value')",
"def test(self):\n for doc, label in zip(self.test_docs(), self.test_labels()):\n yield doc, label",
"def get_tests(self):\n return self.tests[:]",
"def get_text(self) -> List[str]:\n return self.__texts",
"def return_textview_elements(self):\n return self.driver.find_elements_by_class_name('android.widget.TextView')",
"def get_all_text(self):\n result = list()\n\n for path in ['./OrgQuestion/OrgQSubject',\n './OrgQuestion/OrgQBody',\n './OrgQuestion/Thread/RelQuestion/RelQSubject',\n './OrgQuestion/Thread/RelQuestion/RelQBody',\n './OrgQuestion/Thread/RelComment/']:\n result.extend([\n element.text if element.text is not None else '' for element in self.merged_root.findall(path)\n ]) # extract text from each element matching the path\n\n return result",
"def get_elements_text(self, elements: Union[List[WebElement], Tuple[By, str]]) -> List[str]:\n elements = self.find_elements(elements)\n return [element.get_attribute('innerText') for element in elements]",
"def process_test(self, data):\n new_utts = []\n for l in data:\n tem = []\n for sent in l:\n tem.append([\"<s>\"] + sent + [\"</s>\"])\n new_utts.append(tem)\n return new_utts # 以输入的测试标题为topic,四句空诗",
"def get_texts(self) -> List[str]:\n return self.texts",
"def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]",
"def get_all_elements_text(self, *locator):\n all_texts = []\n elements = self.__driver.find_elements(*locator)\n for element in elements:\n element_text = element.text\n all_texts.append(element_text)\n return \" \".join(all_texts).strip(\"[]\")",
"def get_testing_data(self):\n\n print 'Loading testing data ', self.test_folder , '...'\n test_text = []\n cnt = 0\n\n for f in listdir(self.test_folder):\n file_path = join(self.test_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n self.test_index.append(f[:-4])\n with open(file_path, 'rb') as f:\n test_text.append( f.read() )\n\n return test_text",
"def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]",
"def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret",
"def find_text_content_by_class(bs, tag, class_name):\n result = []\n for item in bs.find_all(tag, {\"class\":class_name}):\n item_text = strip_tags(str(item))\n result.append(\" \".join(item_text.split()))\n return result",
"def _generateDisplayedText(self, obj, **args ):\n result = self._generateSubstring(obj, **args)\n if result:\n return result\n\n displayedText = self._script.utilities.displayedText(obj)\n if not displayedText:\n return []\n\n return [displayedText]",
"def _get_texts(locator, timeout=default_timeout, type = By.XPATH):\n logger.debug(\"Entered _get_text() method\")\n elts = _find_elements(locator, type = type, timeout = timeout)\n if elts:\n return [elt.text for elt in elts]\n return None",
"def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests",
"def list_feature_tests(self):\n\t\treturn self.test_names",
"def list_texts(self, start: int = None, end: int = None) -> List:\n return [str(i.text) for i in self.data[start:end]]",
"def parse_text_into_separate_test_cases(text): \n for test_case in text.split('\\n\\n'):\n yield test_case\n #for test_case in TEST_CASE_PATTERN.finditer(text):\n #yield test_case.group(0)",
"def test_text_classifier_get_details_all(self):\n pass",
"def get_random_texts(self):\n texts=[]\n nodes=self.get_random_nodes()\n for node in nodes:\n texts+=self.get_corpus_from_node(node)\n return texts",
"def get_test_results(self):\n element = self.find_element_by_id(self.results_id, wait=True)\n\n if element:\n return element.text\n else:\n return False",
"def get_tests(self):\n subtests = itertools.chain(*(s.get_tests() for s in self.suites.values()))\n tt = [t for t in itertools.chain(self.tests,subtests)]\n return tt",
"def find_text_in_content(self, el):\n try:\n content_divs = [el.get_element_by_id(\"content\")]\n except KeyError:\n # try class\n content_divs = el.find_class(\"content\")\n\n if content_divs == []:\n return None\n \n # iterate over divs and extract text\n all = []\n for div in content_divs:\n r = self.find_text_in_p(div)\n all.append(r)\n return \" \".join(all)",
"def allTextGenerator(node):\n if node.nodeType == node.TEXT_NODE:\n yield node.data\n for child in node.childNodes:\n for text in allTextGenerator(child):\n yield text"
] | [
"0.7000479",
"0.6322341",
"0.61989576",
"0.6173835",
"0.6019634",
"0.59161645",
"0.5873848",
"0.58302426",
"0.57565117",
"0.5753218",
"0.57449764",
"0.57032067",
"0.56822264",
"0.5670111",
"0.5669928",
"0.56666976",
"0.5641307",
"0.5638164",
"0.5599844",
"0.55976945",
"0.55873597",
"0.55713975",
"0.5567262",
"0.55324024",
"0.55287504",
"0.55118924",
"0.5445981",
"0.5444896",
"0.5432942",
"0.54211193"
] | 0.8607301 | 0 |
Return list of values for each div.test element. | def div_value_list(self):
return self.q(css='div.test').attrs('value') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def div_text_list(self):\n return self.q(css='div.test').text",
"def div_html_list(self):\n return self.q(css='div.test').html",
"def get_individual_performance(self):\n\n divs = self.page.find_all(\"span\", {\"class\":\"value\"})\n values = [div.text for div in divs]\n return values",
"def get_tests(self):\n return self.tests[:]",
"def get_elements(self):\n\t\treturn self._testing_cache",
"def get_elements(self):\n\t\treturn self._testing_cache",
"def evaluate(self, test=None):\n if test is None:\n test = self.testSet.input\n # Once you can classify an instance, just use map for all of the test\n # set.\n return list(map(self.classify, test))",
"def test(self):\n for doc, label in zip(self.test_docs(), self.test_labels()):\n yield doc, label",
"def evaluate(self, test=None):\n if test is None:\n test = self.testSet.input\n # Once you can classify an instance, just use map for all of the test set.\n return list(map(self.classify, test))",
"def get_tests(self):\n subtests = itertools.chain(*(s.get_tests() for s in self.suites.values()))\n tt = [t for t in itertools.chain(self.tests,subtests)]\n return tt",
"def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests",
"def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]",
"def get_test_cases(self):\n\n return self._test_cases",
"def get_results(self):\n result = [round(self.mr / self.test_size, 1), round(self.mrr / self.test_size, 3),\n round(self.hits1 / self.test_size, 3), round(self.hits3 / self.test_size, 3),\n round(self.hits5 / self.test_size, 3), round(self.hits10 / self.test_size, 3)]\n return result",
"def getTestSet(self):\r\n return self.fTestData",
"def getValues(self):\n return [ float(val.text()) for val in self.values ]",
"def Get_Test_Containers():\n\tlis = []\n\t\n\tlis.append(Container(0, 0.01, 0.01, 0.0025, 100, 293, 0))#Normal\n\tlis.append(Container(1, 0.01, 0.02, 0.0025, 75, 293*1.25, 0))#Nearly full and quite hot\n\tlis.append(Container(2, 0.03, 0.01, 0.0025, 10, 293, 0))#Nearly empty\n\tlis.append(Container(3, 0.02, 0.02, 0.0025, 1000, 293, 0))#Overfull\n\tlis.append(Container(0, 0.5*(2**0.5), 1, 0.0025, 10, 293, 3*(10**-9)))#Huge container with pump\n\t\n\treturn lis",
"def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]",
"def getTestResults():",
"def load_data(self):\n try:\n data = etree.parse(self.resultfilename).getroot()\n except OSError:\n data = []\n\n testresults = []\n for testcase in data:\n category = Category.OK\n status = 'ok'\n module = testcase.get('classname')\n name = testcase.get('name')\n message = ''\n time = float(testcase.get('time'))\n extras = []\n\n for child in testcase:\n if child.tag in ('error', 'failure', 'skipped'):\n if child.tag == 'skipped':\n category = Category.SKIP\n else:\n category = Category.FAIL\n status = child.tag\n type_ = child.get('type')\n message = child.get('message', default='')\n if type_ and message:\n message = '{0}: {1}'.format(type_, message)\n elif type_:\n message = type_\n if child.text:\n extras.append(child.text)\n elif child.tag in ('system-out', 'system-err'):\n if child.tag == 'system-out':\n heading = _('Captured stdout')\n else:\n heading = _('Captured stderr')\n contents = child.text.rstrip('\\n')\n extras.append('----- {} -----\\n{}'.format(heading,\n contents))\n\n extra_text = '\\n\\n'.join(extras)\n testresults.append(\n TestResult(category, status, name, module, message, time,\n extra_text))\n\n return testresults",
"def list_feature_tests(self):\n\t\treturn self.test_names",
"def getTestInstance(self):\n self.test_inst_condition = self.format_data.formatted_test_data[self.data_ref][0]\n self.test_inst_phenotype = self.format_data.formatted_test_data[self.data_ref][1]\n if self.data_ref < (self.format_data.numTestphenotypes - 1):\n self.data_ref += 1\n else:\n self.data_ref = 0\n return [self.test_inst_condition, self.test_inst_phenotype]",
"def test_data(self):\n return self._test_data",
"def getTestInstance(self):\r\n return [self.currentTestState, self.currentTestPhenotype]",
"def getNodeTests():\n\n nodeTestsQuery = NodeTest.query.all()\n \n if nodeTestsQuery: \n nodeTestList = []\n for nodeTestQuery in nodeTestsQuery:\n nodeTestList.append(nodeTestQueryToObject(nodeTestQuery))\n return nodeTestList\n else:\n return None",
"def parse_test_context(self, test_list_output):\n # Sample command line output:\n #\n # MyHobbesTest\n # Arrays\n # Compiler\n # Definitions\n #\n #\n # Sample Result:\n #\n # [\n # ['Arrays', []],\n # ['Compiler', []]\n # ['Definitions', []]\n # ]\n result = [[line.strip(), []] for line in test_list_output.splitlines()]\n return result",
"def get_sub_values(self):\n return list()",
"def get_test_results(self):\n element = self.find_element_by_id(self.results_id, wait=True)\n\n if element:\n return element.text\n else:\n return False",
"def get_results(self) -> List[str]:\n output = []\n for row in self.row_layout.children():\n if self.possible_values is None:\n text = row.itemAt(0).widget().text()\n else:\n text = row.itemAt(0).widget().currentText()\n\n if text != \"\":\n output.append(text)\n return output",
"def test_cases(self) -> list[str]:\n cases = []\n for t in self._test_cases:\n if t not in cases:\n cases.append(t)\n return cases"
] | [
"0.74490196",
"0.68997157",
"0.64932024",
"0.61540717",
"0.6152504",
"0.6152504",
"0.5885815",
"0.58685976",
"0.5835421",
"0.58142436",
"0.57759404",
"0.5746933",
"0.56337065",
"0.5621383",
"0.5567706",
"0.5559967",
"0.55586827",
"0.55545664",
"0.5553262",
"0.5503301",
"0.54828674",
"0.5462416",
"0.5438672",
"0.5422852",
"0.5417449",
"0.537168",
"0.5332339",
"0.5319634",
"0.53085124",
"0.53067195"
] | 0.7983814 | 0 |
Return list of html for each div.test element. | def div_html_list(self):
return self.q(css='div.test').html | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def div_text_list(self):\n return self.q(css='div.test').text",
"def test_html(self):\n tags = (('<input', 3),\n ('<span', 1),\n ('<button', 1))\n\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)",
"def div_value_list(self):\n return self.q(css='div.test').attrs('value')",
"def get_tests(self):\n return self.tests[:]",
"def test_html(self):\n \n tags = (('<form',1),\n ('<input',6),\n ('type=\"text\"',3),\n ('type=\"email\"',1),\n ('type=\"submit\"',1))\n \n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)",
"def test_gettesttools_html(self):\n pass",
"def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]",
"def test(self):\n for doc, label in zip(self.test_docs(), self.test_labels()):\n yield doc, label",
"def test_html(self):\n tags = (\n ('<form', 1),\n # Csrf, first_name, last_name, email, superuser, username and password\n ('<input', 7),\n ('type=\"text\"', 3),\n ('type=\"password\"', 1),\n ('type=\"checkbox\"', 1),\n ('type=\"email\"', 1),\n ('type=\"submit\"', 1),\n )\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.response, text, count)",
"def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests",
"def get_tests(self):\n subtests = itertools.chain(*(s.get_tests() for s in self.suites.values()))\n tt = [t for t in itertools.chain(self.tests,subtests)]\n return tt",
"def tests():\n\n\treturn render_template(\"testing.html\")",
"def test_listfield(self):\n self.assertEqual(self.scraped.urls, ['http://google.com', 'http://apple.com'])\n self.assertEqual(self.scraped.in_divs, ['Nested'])",
"def get_collection_elements(self):\n wrapper = self.data.find('div', id='main-content')\n return wrapper.find_all('section', class_='sbkBrv_SingleResult')",
"def num_divs(self):\n return len(self.q(css='div.test').results)",
"def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret",
"def generate_tests(cls):\n cases_pat = join(dirname(__file__), cls.cases_dir, \"*.html\")\n for html_path in glob(cases_pat):\n # Load an options (`*.opts` file, if any).\n # It must be a Python dictionary. It will be passed as\n # kwargs to the markdown function.\n opts = {}\n opts_path = splitext(html_path)[0] + \".opts\"\n if exists(opts_path):\n try:\n opts = eval(open(opts_path, 'r').read())\n except Exception:\n _, ex, _ = sys.exc_info()\n print(\"WARNING: couldn't load `%s' opts file: %s\" \\\n % (opts_path, ex))\n\n test_func = lambda self, t=html_path, o=opts: \\\n self._assertSimpleHtmlPath(t, opts=o)\n\n tags_path = splitext(html_path)[0] + \".tags\"\n if exists(tags_path):\n tags = []\n for line in open(tags_path):\n if '#' in line: # allow comments in .tags files\n line = line[:line.index('#')]\n tags += line.split()\n test_func.tags = tags\n\n name = splitext(basename(html_path))[0]\n name = name.replace(' - ', '_')\n name = name.replace(' ', '_')\n name = re.sub(\"[(),]\", \"\", name)\n test_name = \"test_%s\" % name\n setattr(cls, test_name, test_func)",
"def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])",
"def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])",
"def _get_markup(self):\n return make_soup(self.driver.find_element_by_id(\"contestDetailTable\").get_attribute(\"innerHTML\"))",
"def get_elements(self):\n\t\treturn self._testing_cache",
"def get_elements(self):\n\t\treturn self._testing_cache",
"def tests():\n dates, times = report_date_time()\n return render_template('tests.html',\n unit_date=dates[0], unit_time=times[0],\n integ_date=dates[1], integ_time=times[1])",
"def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]",
"def get_inner_html(self):\n\n pass",
"def run_tests(tests):\n return [test(t) for t in tests]",
"def _GetAllTestRuns(self, ispy):\n template = JINJA.get_template('list_view.html')\n data = {}\n max_keys = 1000\n marker = 'failures/%s' % self.request.get('marker')\n test_runs = list([path.split('/')[1] for path in\n ispy.GetAllPaths('failures/', max_keys=max_keys,\n marker=marker, delimiter='/')])\n base_url = '/?test_run=%s'\n next_url = '/?marker=%s' % test_runs[-1]\n data['next_url'] = next_url\n data['links'] = [(test_run, base_url % test_run) for test_run in test_runs]\n self.response.write(template.render(data))",
"def tests(self):\n return [self]",
"def List(ctx):\n \"\"\"Note: This method is available only through the per-node API endpoint 5.0 or later.\"\"\"\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n\n ctx.logger.info(\"\")\n try:\n ListTestsResult = ctx.element.list_tests()\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(ListTestsResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)",
"def test_html_output(self):\n pass"
] | [
"0.7729957",
"0.62460953",
"0.62029403",
"0.601013",
"0.597623",
"0.5949348",
"0.58157754",
"0.5773843",
"0.56723",
"0.5651943",
"0.5582805",
"0.5582524",
"0.5579697",
"0.55494183",
"0.5474166",
"0.54684716",
"0.5461556",
"0.5445097",
"0.5445097",
"0.54369843",
"0.54336387",
"0.54336387",
"0.5420988",
"0.5380152",
"0.5323457",
"0.53081125",
"0.5306144",
"0.5303314",
"0.53003",
"0.5293838"
] | 0.8394743 | 0 |
Return a list of the ids of outer divs with the specified text in a child element. | def ids_of_outer_divs_with_inner_text(self, child_text):
return self.q(css='div.outer').filter(
lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')]
).attrs('id') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]",
"def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n '(SELECT subject_id '\n 'FROM cvterm_relationship '\n 'WHERE object_id = %s '\n 'UNION '\n 'SELECT cr.subject_id '\n 'FROM cvterm_relationship cr '\n 'INNER JOIN children ch ON ch.subject_id = cr.object_id) '\n 'SELECT * FROM children')\n ids = connect(child_ids,id,conn)\n list_of_ids = []\n for item in ids:\n list_of_ids.append(item[0])\n return(list_of_ids)",
"def findChildren(widget=None, name=\"\", text=\"\"):\n\t\t# TODO: figure out why the native QWidget.findChildren method\n\t\t# does not seem to work from PythonQt\n\t\tif not widget:\n\t\t\twidget = mainWindow()\n\t\tchildren = []\n\t\tparents = [widget]\n\t\twhile parents != []:\n\t\t\tp = parents.pop()\n\t\t\tparents += p.children()\n\t\t\tif name and p.name.find(name) >= 0:\n\t\t\t\tchildren.append(p)\n\t\t\telif text:\n\t\t\t\ttry:\n\t\t\t\t\tp.text\n\t\t\t\t\tif p.text.find(text) >= 0:\n\t\t\t\t\t\tchildren.append(p)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tpass\n\t\treturn children",
"def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self.vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids",
"def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self._vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids",
"def subtype_ids(elements, subtype):\n return [i for (i, element) in enumerate(elements)\n if isinstance(element, subtype)]",
"def get_ids(self, sentence):\n return [self.get_id(word) for word in sentence.strip().split(' ')]",
"def scan_individual_identifiers(text: str, cpf: bool = True) -> List[str]:\n if cpf:\n regex = re.compile(r\"\\w{3}\\.\\w{3}\\.\\w{3}\\-\\w{2}\")\n else:\n regex = re.compile(r\"\\w{2}\\.\\w{3}\\.\\w{3}/\\w{4}\\-\\w{2}\")\n\n identifiers = re.findall(regex, text)\n return identifiers",
"def find_by_xpath(self, element):\n for context_elements in self.within_selector:\n final_elements = context_elements.find_by_xpath(element)\n return final_elements",
"def children(word, word_dict):\n res = []\n for i in range(len(word)):\n child = word[:i]+word[i+1:]\n if child in word_dict:\n res.append(child)\n return res",
"def findIds(self, query):\t\t\t\t\t\t\t## Multiple Elements\n\t\ttry:\n\t\t\tassert(type(query)) == str or Pattern\n\t\t\treturn self.driver.find_elements_by_id(query)\n\t\texcept Exception as e:\n\t\t\tprint(\"Could not find ID: {}\\n\\n{}\".format(query, e))\n\t\t\treturn -1",
"def _find_with_text(self, selector, text):\n stripped = text.strip()\n elements = self.selenium.find_elements_by_css_selector(selector)\n return [e for e in elements if e.text.strip() == stripped]",
"def get_child_elements_by_id(self, id):\n for item in self._elements:\n if item.get_parent_id() == id:\n yield item",
"def get_child_ids(cur, node):\n sql = \"\"\"\n SELECT\n id\n FROM\n nodes\n WHERE\n parent=%s\n ORDER BY\n position;\n \"\"\"\n cur.execute(sql, (str(node), ))\n for result in cur:\n yield str(result['id'])",
"def div_text_list(self):\n return self.q(css='div.test').text",
"def activeChildWellIds(self):\n lst=[]\n if self.isReplicateGroup():\n for tc in self.activeChildWells():\n lst.extend(tc.activeChildWellIds())\n else:\n if self.wellids is not None and self.wellids[0] is not None:\n wellid=self.wellids[0]\n else:\n wellid=str(self.childWellIndices()[0])\n lst.append(wellid)\n return lst",
"def _findTextWithinElement(self, selector):\n parent_text = self._getStrippedText(selector) # everybody has got text I think. so this shouldn't raise IndexError\n if parent_text: return parent_text\n subelements = selector.css('*')\n texts_found = []\n for element in subelements:\n elem_text = self._getStrippedText(element)\n if \"CDATA\" in elem_text: continue # that's a part of the document not intended to be visible\n texts_found.append(elem_text)\n return ' '.join(texts_found)",
"def GetExpandableIds(children, length_name):\n # I could write a list comprehension here. Would it make the code clearer?\n result = []\n for child_id, child in enumerate(children):\n if child.canExpand(length_name):\n result.append(child_id)\n return result",
"def get_descendant_ids(cur, node):\n sql = \"\"\"\n SELECT\n node\n FROM\n ancestors\n WHERE\n ancestor=%s;\n \"\"\"\n cur.execute(sql, (str(node), ))\n for result in cur:\n yield str(result['node'])",
"def getChildElementsByTagName(element: org.w3c.dom.Element, string: str) -> java.util.List:\n ...",
"def find_by_css(self, element):\n for context_elements in self.within_selector:\n final_elements = context_elements.find_by_css(element)\n return final_elements",
"def extract_data_listing(html):\n id_finder = re.compile(r'PR[\\d]+~')\n return html.find_all('div', id=id_finder)",
"def children(parent, data):\n\n kids = []\n for pid in data:\n if data[pid][\"parentId1\"] == parent or data[pid][\"parentId2\"] == parent:\n kids.append(pid)\n\n return kids",
"def get_ids(cls, text):\n tokens = TokenizerContainer.TOKENIZER.tokenize(text)\n token_ids = TokenizerContainer.TOKENIZER.convert_tokens_to_ids(tokens)\n input_ids = token_ids + [0] * (cls.MAX_LEN-len(token_ids))\n return tokens, input_ids",
"def get_ids(self) -> List[str]:",
"def find_elements_inside_element(self, parent_element: Union[WebElement, Tuple[By, str]],\n children_element_locator: Tuple[By, str], wait_time=10,\n skip_exception=False) -> List[WebElement]:\n parent_element = self.find_element(parent_element)\n for i in range(wait_time):\n by_type, value = children_element_locator\n if by_type == By.CSS_SELECTOR:\n children = parent_element.find_elements_by_css_selector(value)\n elif by_type == By.XPATH:\n children = parent_element.find_elements_by_xpath(value)\n else:\n children = parent_element.find_elements(children_element_locator)\n if len(children):\n return children\n time.sleep(1)\n else:\n if not skip_exception:\n raise TimeoutException(f'Elements was not found in {wait_time} seconds')\n return []",
"def get_children(search_tag, tag_list):\n list_return = []\n\n for tag in tag_list:\n if str(tag.parent) == str(search_tag):\n list_return.append(tag)\n list_return.extend(get_children(tag, tag_list))\n return list(set(list_return)) # This will return a list of unique elements",
"def getIDs():",
"def occurence(main_seq,sub_seq):\n start= 0\n indices =[]\n while True:\n start = main_seq.find(sub_seq,start)\n if start > 0:\n indices.append(start)\n else:\n break\n start +=1\n return indices",
"def get_child_ids(self, job_specifier, project=None, status=None):\n if project is None:\n project = self._project\n id_master = self.get_job_id(project=project, job_specifier=job_specifier)\n if id_master is None:\n return []\n else:\n if status is not None:\n id_lst = self._job_table[\n (self._job_table.masterid == id_master) & (self._job_table.status == status)].id.values\n else:\n id_lst = self._job_table[(self._job_table.masterid == id_master)].id.values\n return sorted(id_lst)"
] | [
"0.55548036",
"0.5479126",
"0.54318756",
"0.54250884",
"0.53954",
"0.5378344",
"0.5358235",
"0.532862",
"0.53090286",
"0.52930194",
"0.5247154",
"0.5223819",
"0.5181133",
"0.51650614",
"0.5156875",
"0.51566947",
"0.512827",
"0.5109152",
"0.5081774",
"0.5059653",
"0.5021192",
"0.5018383",
"0.49994254",
"0.49627846",
"0.49329385",
"0.49291104",
"0.49055174",
"0.48959976",
"0.48937145",
"0.48680916"
] | 0.877173 | 0 |
Wait for click handlers to be installed, then click a button and retrieve the output that appears after a delay. | def trigger_output(self):
EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill()
self.q(css='div#fixture button').first.click()
EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trigger_output(self):\n self.q(css='div#fixture button').first.click()",
"def trigger_output(self):\n self.q(css='div#fixture button').first.click()",
"def wait_for_click():\r\n global _canvas\r\n global _cue\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n while True:\r\n _cue = _canvas.wait()\r\n if _cue.getDescription() == 'mouse release': break",
"def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)",
"def display(self):\n\t\tprint('The button in the window was clicked!')",
"def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1",
"def click_button(self):\n self.widgets.get('button').click()",
"def pop_up(self):\n sleep(2)\n self.driver.find_element_by_link_text('Got It').click()\n self.get_search_results()",
"def poll(self):\n\tself.met = self.button.poll()",
"def click_button(button_to_click):\n try:\n button_to_click.click()\n except:\n print(\"Button not found\")",
"def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()",
"def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)",
"def wait(self, secs):\r\n t1 = time.time()\r\n self.driver.implicitly_wait(secs)\r\n self.my_print(\"{0} Set wait all element display in {1} seconds, Spend {2} seconds\".format(success,\r\n secs,time.time() - t1))",
"def click(self, agent):\n self.grab(agent)\n #eventlet.sleep(5)\n self.degrab(agent)",
"def is_button_output_present(self):\n self.wait_for_element_presence('div#ready', 'Page is Ready')\n self.q(css='div#fixture button').first.click()\n self.wait_for_element_presence('div#output', 'Button Output is Available')",
"def click_button(self):\n self.q(css='div#fixture button').first.click()",
"def run(self):\n run=0\n wx.CallAfter(Publisher().sendMessage, \"update\", \"\")\n time.sleep(10)\n while (run==0):\n wx.CallAfter(Publisher().sendMessage, \"updatebuttons\", \"\")\n time.sleep(10)",
"def takeControl(self):\n mainloop()",
"def takeControl(self):\n mainloop()",
"def click_green_button(self):\n self.driver.sleep(2)\n self.driver.find_or_raise(\n \"//div/a[text()='My Usage']/following-sibling::span\", xpath=True\n ).click() # Clicks the expand icon next to \"My Usage\"\n self.driver.sleep(1)\n self.driver.find(\"//a[.='My Green Button Data']\", xpath=True).click()\n self.driver.screenshot(BaseWebScraper.screenshot_path(\"select green button\"))",
"def wait():\n time.sleep(1)",
"def click_entry_complete_button(self):\n self.click_element(self.entry_complete_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.statement_entry_success_message_locator), 'statement entry success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()",
"def click(self):\r\n pass",
"def wait(self):\n self.mainloop().wait()",
"def simulate_button_clicked(self):\n self.simulate_bool = True\n self.update_change()",
"def click_process(self):\n # TODO implement print function for verbosity\n\n # Create Worker Thread\n self.worker = Worker(self)\n\n self.worker.start()\n self.worker.finished.connect(self.worker.deleteLater)\n self.worker.log.connect(self.update_log)\n\n # Safety Lock\n self.Process_Button.setEnabled(False)\n self.worker.finished.connect(lambda: self.Process_Button.setEnabled(True))",
"def WaitForTest(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('waitForTest', payload=payload, response_object=None)",
"def wait_dialog_box(self):\n while True:\n time.sleep(0.5)\n dialog = AppWindow.locate_on(SummonSelector.dialog_ok.path, (1 / 3, 2 / 3, 2 / 3, 1 / 3))\n if dialog is not None:\n self.logger.info(\"dialog popped up\")\n return",
"def wait_until_transfers_displayed(self):\n BaseElement(self.driver, locators.TRANSFER_MONEY_BUTTON).wait_until_displayed()",
"def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)"
] | [
"0.6465646",
"0.6465646",
"0.6274144",
"0.62248296",
"0.6096584",
"0.6029719",
"0.5998305",
"0.5964109",
"0.5912692",
"0.58695084",
"0.5805828",
"0.5800075",
"0.5798295",
"0.5771682",
"0.5740042",
"0.5727599",
"0.5700866",
"0.56949717",
"0.56949717",
"0.56891507",
"0.5687063",
"0.5686228",
"0.56642467",
"0.5661366",
"0.5657441",
"0.56561476",
"0.56530184",
"0.5621231",
"0.56172866",
"0.56149995"
] | 0.66481423 | 0 |
Make a promise that will not be fulfilled. Should raise a `BrokenPromise` exception. | def make_broken_promise(self):
return EmptyPromise(
self.q(css='div#not_present').is_present, "Invalid div appeared",
try_limit=3, try_interval=0.01
).fulfill() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def promise_forced(promise):\n require_type(isa(promise,Promise),\n 'the parameter of promise_forced must be a Promise')\n return promise.exprs.env.find(Symbol('already-run?'))['already-run?']",
"async def test_task_not_awaitable(arg):\n with pytest.raises(OSError):\n async with Scope() as n:\n n.spawn(arg)",
"def _wait_for(self, check_func, desc, result=False, timeout=200):\r\n if result:\r\n return Promise(check_func, desc, timeout=timeout).fulfill()\r\n else:\r\n return EmptyPromise(check_func, desc, timeout=timeout).fulfill()",
"def prove_NO() -> Proof:\n # Optional Task 6.9c",
"async def rejected(error: Exception) -> Any:\n raise error",
"def reject_waiting_call(self) -> None:",
"def maybe_future(x):\n if is_future(x):\n return x\n else:\n fut = Future()\n fut.set_result(x)\n return fut",
"def promise(self):\n return Promise(self)",
"def is_promise_type(self):\n raise exceptions.NotImplementedError()",
"def test_deferred_failure_result(self):\n passthrough = self.make_wrapped_function()\n result = passthrough(fail(ZeroDivisionError()))\n self.assertIsInstance(result, EventualResult)\n self.assertRaises(ZeroDivisionError, result.wait, 0.1)",
"def never() -> ObservableBase:\n from ..operators.observable.never import never\n return never()",
"def _on_future_cancelled(self, promise):\n promise.setCanceled()",
"def test_dies_if_no_job(self):\n from furious.async import Async\n from furious.context._execution import _ExecutionContext\n from furious.processors import run_job\n\n work = Async(\"dir\", kwargs={'something': None})\n work._options.pop('job')\n assert 'job' not in work._options\n\n with _ExecutionContext(work):\n self.assertRaises(Exception, run_job)",
"def test_reactor_thread_disallowed(self):\n self.patch(threadable, \"isInIOThread\", lambda: True)\n d = Deferred()\n dr = EventualResult(d, None)\n self.assertRaises(RuntimeError, dr.wait, 0)",
"def test_original_failure_no_result(self):\n dr = EventualResult(Deferred(), None)\n self.assertIdentical(dr.original_failure(), None)",
"def test_success_result_of_no_result(self):\n d = Deferred()\n err = self.assertRaises(FailTest, success_result_of, d)\n self.assertEqual(\n err.args[0], \"No result available for deferred: %r\" % (d,))",
"def wait_until_not_raised(condition, delay, max_attempts):\n def wrapped_condition():\n try:\n result = condition()\n except:\n return False, None\n\n return True, result\n\n attempt = 0\n while attempt < (max_attempts-1):\n attempt += 1\n success, result = wrapped_condition()\n if success:\n return result\n\n time.sleep(delay)\n\n # last attempt, let the exception raise\n return condition()",
"async def test_nursery_cant_be_reused():\n nursery = Nursery()\n async with nursery:\n pass\n\n with pytest.raises(NurseryClosed):\n async with nursery:\n pass\n\n with pytest.raises(NurseryClosed):\n nursery.start_soon(asyncio.sleep(0))",
"def as_deferred(f: Awaitable[Any]) -> Deferred:\n return Deferred.fromFuture(asyncio.ensure_future(f))",
"def make_future(result=None):\n future = Future()\n future.set_result(result)\n return future",
"def test_whenProposedFailure(self):\n cph = ConnectionPoolHelper()\n cph.setUp(self)\n cph.pauseHolders()\n firstConnection = cph.factory.willConnectTo()\n enqTxn = cph.createTransaction()\n # Execute some SQL on the connection before enqueueing the work-item so\n # that we don't get the initial-statement.\n enqTxn.execSQL(\"some sql\")\n lq = LocalQueuer(cph.createTransaction)\n cph.flushHolders()\n cph.pauseHolders()\n wp = lq.enqueueWork(enqTxn, DummyWorkItem, a=3, b=4)\n firstConnection.executeWillFail(lambda: RuntimeError(\"foo\"))\n d = wp.whenProposed()\n r = cph.resultOf(d)\n self.assertEquals(r, [])\n cph.flushHolders()\n self.assertEquals(len(r), 1)\n self.assertIsInstance(r[0], Failure)",
"async def no_sleep_coro():\n pass",
"def instantiateShootErrback():\n d = defer.Deferred()\n try:\n 1/0\n except:\n d.errback()\n d.addErrback(lambda x: None)",
"async def test_wait_for(self) -> None:\n trigger = auraxium.Trigger(auraxium.event.Death)\n\n def do_nothing(_: auraxium.event.Event) -> None:\n pass\n\n trigger.action = do_nothing\n\n await self.client.wait_for(trigger, timeout=-1.0)\n\n with self.assertRaises(TimeoutError):\n await self.client.wait_for(trigger, timeout=0.00001)",
"def maybe_future(obj):\n if inspect.isawaitable(obj):\n # already awaitable, use ensure_future\n return asyncio.ensure_future(obj)\n elif isinstance(obj, concurrent.futures.Future):\n return asyncio.wrap_future(obj)\n else:\n # could also check for tornado.concurrent.Future\n # but with tornado >= 5.1 tornado.Future is asyncio.Future\n f = asyncio.Future()\n f.set_result(obj)\n return f",
"def NeverNeededExpectation(self, expectation: BaseExpectation) -> bool:\n return self.did_fully_pass",
"def test_exception_raised_no_dlq():\n\n proc: missive.Processor[missive.RawMessage] = missive.Processor()\n\n @proc.handle_for(always)\n def crash(message, ctx):\n raise RuntimeError(\"bad bytes!\")\n\n with proc.test_client() as test_client:\n blank_message = missive.RawMessage(b\"\")\n with pytest.raises(RuntimeError):\n test_client.send(blank_message)",
"def cannot_resolve ( self, *deps, **kw ):\n return self._do_resolve_weak_greedy ( deps, kw, greedy=True ) is None",
"def expect_non_trial(self, expected_result):\n return self.expect(False, expected_result)",
"def test_raises_on_missing_job(self):\n from furious.async import Async\n from furious.errors import NotInContextError\n from furious.processors import run_job\n\n work = Async(\"nothere\")\n work._options.pop('job')\n assert 'job' not in work._options\n\n self.assertRaises(NotInContextError, run_job)"
] | [
"0.5204269",
"0.5157995",
"0.5118575",
"0.50748104",
"0.5045879",
"0.50228643",
"0.48635247",
"0.48367554",
"0.48231182",
"0.48055142",
"0.48020154",
"0.4710334",
"0.4705783",
"0.47007585",
"0.4683378",
"0.4586624",
"0.45848158",
"0.45210996",
"0.45157987",
"0.449673",
"0.4495356",
"0.4489491",
"0.44793853",
"0.44359413",
"0.44212636",
"0.44171405",
"0.44124514",
"0.4410409",
"0.44030467",
"0.43910775"
] | 0.7172702 | 0 |
Load the page named `page_name` after waiting for `delay_sec`. | def load_next(self, page, delay_sec):
time.sleep(delay_sec)
page.visit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_page_content(self, url, delay):\r\n\r\n # if browser cannot connect to the server, repeat it infinitely.\r\n while True:\r\n try:\r\n # load the page\r\n self.sel_driver.get(url)\r\n\r\n # if the page is loaded, wait for delay seconds until loading would finish.\r\n # this delay is also to avoid being blocked by upwork due to so frequent access\r\n time.sleep(delay)\r\n\r\n # read and parse the page contents\r\n soup = BeautifulSoup(self.sel_driver.page_source, 'html.parser')\r\n\r\n # page loading succeeded. escape from the endless iteration\r\n break\r\n except (WebDriverException, TimeoutException):\r\n # error occurred, do it again\r\n print(\"(ERROR) Driver could't be load: \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n self.relaunch(60)\r\n\r\n # check if the page is ACCESS DENIED\r\n # get the title of the page\r\n elements = soup.find_all(\"title\")\r\n if len(elements) == 0:\r\n return soup # if it has no title, it's may be a normal page\r\n\r\n # if the title is UPWORK ACCESS DENIED, I deal with it\r\n title = elements[0].text\r\n if 'access denied' in title.lower():\r\n print(\"(ERROR) UPWORK DENIED at \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n\r\n self.relaunch(200) # relaunch after about 3 minutes\r\n\r\n return self.get_page_content(url, delay)\r\n\r\n # if the title is Upwork - Maintenance, let it wait\r\n if title == 'Upwork - Maintenance':\r\n print(\"(ERROR) UPWORK is under the Maintenance - \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n time.sleep(random.randint(200, 400)) # We don't need relaunch browser.\r\n return self.get_page_content(url, delay)\r\n\r\n return soup",
"def wait(delay=2):\n time.sleep(delay)",
"def _httpGetDelay(self, url, waitSecs, mustGet=False, noFlash=False, useSelCookies=False, referer=None):\n page = None\n if self.useSelenium:\n page = httpGetSelenium(url, waitSecs, mustGet=mustGet)\n time.sleep(5)\n else:\n cookies = None\n if useSelCookies:\n logging.debug('Importing cookies from selenium')\n all_cookies = browser.get_cookies()\n cookies = {}\n for s_cookie in all_cookies:\n cookies[s_cookie['name']] = s_cookie['value']\n\n page = httpGetDelay(url, waitSecs, mustGet=mustGet, blockFlash=noFlash, cookies=cookies, referer=referer)\n return page",
"def get(self, url:str, time=1):\n page = self.driver.get(url)\n sleep(time)\n return page",
"def wait_for_page_load(self):\n # For right now, just wait for 2 seconds since webdriver returns when loaded.\n # TODO: switch to waiting for network idle\n time.sleep(2)",
"def update_page(self, waittime):\n if not self.runningtask.get():\n return\n if self.vars[\"enabled\"].get():\n logger.trace(\"Updating page\")\n self.display_item_set()\n self.load_display()\n self.after(waittime, lambda t=waittime: self.update_page(t))",
"def wait_page_loaded(self, timeout=10):\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support import expected_conditions as ec\n\n old_page = self.selenium.find_element(By.TAG_NAME, \"html\")\n yield\n # Wait for the next page to be loaded\n self.wait_until(ec.staleness_of(old_page), timeout=timeout)\n self.wait_page_ready(timeout=timeout)",
"def fetch(url,delay=(1,3)):\n time.sleep(random.randint(delay[0],delay[1])) # wait random seconds\n try:\n response = requests.get(url)\n except ValueError as e:\n print(str(e))\n return '', BeautifulSoup('', \"html.parser\")\n html = response.text\n soup = BeautifulSoup(html, \"html.parser\")\n return (html,soup)",
"def delay():\r\n time.sleep(2)",
"def wait_for_page_load(self):\n pass",
"def wait_for(old_html, timeout=60):\n\tstart_time = time.time() \n\twhile time.time() < start_time + timeout: \n\t\tif check_new_page_loaded(old_html): \n\t\t\treturn time.time() - start_time \n\t\telse: \n\t\t\ttime.sleep(0.1) \n\traise Exception('WebPage Load Timeout')",
"def nav(self, url):\r\n\r\n self.driver.get(url)\r\n time.sleep(3) # wait for page load\r",
"def delay_response(delay):\n delay = min(float(delay), 10)\n\n time.sleep(delay)\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\")\n )",
"def wait_for_page_loaded(self, time_for_stop=None):\n return self.wait_for(lambda: self._loaded,\n 'Unable to load requested page', time_for_stop=time_for_stop)",
"def wait(delaySec, host='default'):\n global lastCallSec\n delaySec = float(delaySec)\n nowSec = time.time()\n sinceLastCallSec = nowSec - lastCallSec.get(host, nowSec)\n if sinceLastCallSec > 0.1 and sinceLastCallSec < delaySec:\n waitSec = max(0.0, delaySec - sinceLastCallSec)\n logging.info('Waiting for %f seconds before downloading from host %s' % (waitSec, host))\n time.sleep(waitSec)\n lastCallSec[host] = time.time()",
"def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)",
"def change_to_with_delay(_path: str):\n time.sleep(1)",
"def wait_for_page_load(self, timeout=30):\n old_page = self.driver.find_element_by_tag_name('html')\n yield\n WebDriverWait(self.driver, timeout).until(\n staleness_of(old_page)\n )",
"def wait(self, sleep_time):\n time.sleep(sleep_time)",
"def loading(delay):\r\n\r\n for i in range(3):\r\n\r\n print \".\",\r\n sys.stdout.flush()\r\n time.sleep(delay)\r\n\r\n print(\"\")",
"def load_until(self, url, until):\n self.browser.get(url)\n WebDriverWait(self.browser, NEXT_WAIT_TIMEOUT) \\\n .until(EC.element_to_be_clickable((By.XPATH, until)))\n return scrapy.Selector(text=self.browser.page_source)",
"def wait_for_load(driver):\n html = driver.page_source\n time.sleep(0.5)\n while html != driver.page_source:\n html = driver.page_source\n time.sleep(0.5)",
"def load(self, filepath=''):\n sleep(20)\n pass",
"def sleep(sleep_time=0.250):\n time.sleep(sleep_time)",
"def loader(driver, stringId, page, directory):\n\n _tld = '.com/'\n if page == 'Bild':\n _tld = '.de/'\n if \".\" in page:\n _tld = \"\"\n page = page.title()\n #driver.maximize_window()\n try:\n driver.get('https://' + page + _tld)\n print (\"Page is ready!\")\n _cookies = pickle.load(open((directory +stringId+ page+ \"Cookies.pkl\"),\"rb\"))\n for _cookie in _cookies:\n driver.add_cookie(_cookie)\n except TimeoutException:\n print (\"Loading took too much time!\")\n\n try:\n driver.get('https://' + page + _tld)\n print (\"Page is ready!\")\n except TimeoutException:\n print (\"Loading took too much time!\")",
"def sleep_based_on_name_length(self, name):\n length = int(self.length_of_name(name))\n time.sleep(length + 0.001)\n return True",
"def wait_and_refresh_static_page_until_text(self, search_text, wait_time, loc_frame, loc_text):\n self.selenium.driver.refresh()\n self.selenium.select_frame(loc_frame)\n text_portion = self.selenium.get_text(loc_text)\n while text_portion != search_text:\n self.selenium.driver.refresh()\n self.selenium.select_frame(loc_frame)\n text_portion = self.selenium.get_text(loc_text)",
"async def sleep(cls, delay: float) -> None:",
"def download_page(name=None, url=None):\n if name and url:\n timestamp = construct_date()\n filename = name + '_' + timestamp + '.html'\n os.system('wget ' + url + ' -O ' + os.path.join('..', 'html', filename))\n with open(os.path.join('..', 'html', filename), 'rb') as f:\n page = f.read()\n print('done with page {}'.format(url))\n return page",
"def wait(wait_time=WAIT_TIME):\n # time.sleep(wait_time)\n pass"
] | [
"0.6076569",
"0.60745835",
"0.5974487",
"0.5962621",
"0.589742",
"0.58021855",
"0.5790462",
"0.5778986",
"0.5760904",
"0.5742551",
"0.55892485",
"0.555522",
"0.55303234",
"0.550315",
"0.5468579",
"0.54579824",
"0.54159063",
"0.5409639",
"0.53982323",
"0.53721374",
"0.5353541",
"0.5353355",
"0.5324486",
"0.5315846",
"0.53092784",
"0.5276241",
"0.5270041",
"0.5268084",
"0.52659744",
"0.52364"
] | 0.7595584 | 0 |
Give focus to the element with the ``maincontent`` ID. | def focus_on_main_content(self):
self.browser.execute_script("$('#main-content').focus()") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def SetFocus(self):\r\n \r\n self._main_win.SetFocus()",
"def onMnemoToMain(self):\n self.second_main_text.SetFocus()",
"def _focus(self, element):\n actions = ActionChains(self.selenium.driver)\n actions.move_to_element(element).click().perform()\n self.selenium.set_focus_to_element(element)",
"def setFocus(self):\n self._urlEdit.setFocus()",
"def onMainToMnemo(self):\n self.second_mnemo_text.SetFocus()",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def focus_window(i3, container_id):\n i3.command(f'[con_id=\"{container_id}\"] floating enable')\n i3.command(f'[con_id=\"{container_id}\"] focus')",
"def focus_master(qtile):\n grp = qtile.current_group\n if grp.layout.clients.current_index > 0:\n c = grp.layout.clients.focus_first()\n grp.focus(c, True)\n elif grp.layout.clients.current_index == 0 and len(grp.layout.clients.clients) > 0:\n grp.layout.cmd_down()",
"def setFocus(*args):",
"def setFocus(*args):",
"def setFocus(*args):",
"def setFocus(*args):",
"def focus(self):\n self.image_window.focus_set()",
"def XPSetKeyboardFocus(inWidget):\n pass",
"def edit_widget_focus(self):\n if self.goto:\n self.goto_node()\n self.update_position(self.get_position())",
"def _focus(self, event) -> None:\n self.focus = True",
"def focus(self):\n raise NotImplementedError",
"def get_focus(self):\n\n self.activateWindow()\n self.setFocus()",
"def OnSetFocus(self, event):\r\n\r\n self._owner.SetFocus()",
"def set_focus(self, locator: Locator) -> None:\n element = self.ctx.get_element(locator)\n if not hasattr(element.item, \"SetFocus\"):\n raise ActionNotPossible(\n f\"Element found with {locator!r} does not have 'SetFocus' attribute\"\n )\n element.item.SetFocus()",
"def setFocus(*args, **kwargs)->None:\n pass",
"def focus(self, focus_library=True):\n if focus_library:\n self.treeview.grab_focus()\n if not self.grid.is_visible():\n self.toggle()\n else:\n self.vimiv.image.vimiv.image.scrolled_win.grab_focus()\n # Update info for the current mode\n self.vimiv.statusbar.update_info()",
"def __switchFocus(self):\n if self.__focus == 0:\n self.__isoWindow.unfocus()\n self.__logWindow.focus()\n self.__focus = 1\n self.__focusedWindow = self.__logWindow\n else:\n self.__isoWindow.focus()\n self.__logWindow.unfocus()\n self.__focus = 0\n self.__focusedWindow = self.__isoWindow",
"def add_to(self, main_lay):\n main_lay.addWidget(self._tab)\n self.setParent(main_lay.parentWidget())",
"def focus_next(self):\n self.focus_item()",
"def defaultFrame(self):\n\t\tself.driver.switch_to.default_content()",
"def element_focused(step, id):\r\n\r\n elem = world.browser.find_element_by_xpath(str('id(\"{id}\")'.format(id=id)))\r\n focused = world.browser.switch_to_active_element()\r\n\r\n assert_true(step, elem == focused)"
] | [
"0.68469656",
"0.64330465",
"0.62243736",
"0.5957323",
"0.5938014",
"0.59248924",
"0.59248924",
"0.59248924",
"0.59248924",
"0.58128226",
"0.56373054",
"0.5598049",
"0.5598049",
"0.5598049",
"0.5598049",
"0.55663764",
"0.555354",
"0.55340254",
"0.5431309",
"0.5403516",
"0.53790075",
"0.53459644",
"0.53059095",
"0.5301749",
"0.52448714",
"0.5229822",
"0.5139656",
"0.5104946",
"0.51001406",
"0.5073151"
] | 0.8871579 | 0 |
Reload the page, wait for JS, then trigger the output. | def reload_and_trigger_output(self):
self.browser.refresh()
self.wait_for_js() # pylint: disable=no-member
self.q(css='div#fixture button').first.click() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def refresh_page(self):\n self.m_driver.refresh()\n time.sleep(30)",
"def refresh(self):\n self.log_info(f\"Browser.refresh: Refreshing the page\")\n self.CORE.refresh()\n return",
"def refresh_page(self, callback=None):\n if callback is not None:\n callback()\n return True",
"def Reload(self):\n self._inspector_backend.Navigate(self.url, None, 10)",
"def js(self, script):\n self.page().mainFrame().evaluateJavaScript(script)",
"def refresh(self):\n\n self.driver.implicitly_wait(5)\n self.driver.refresh()",
"def trigger_reloading(self) -> None:\n self.trigger_signal(\"reloading\")",
"def wait_for_page_load(self):\n pass",
"def execute_js(self, script):\n self.driver.execute_script(script)",
"def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()",
"def onReload(self, event):\n\n\t\tself.wv.Reload()",
"async def async_trigger_reloading(self) -> None:\n await self.async_trigger_signal(\"reloading\")",
"def refresh_page(self, check=True):\n url = self.app.page_base.url\n self.app.page_base.refresh()\n\n if check:\n assert_that(self.app.page_base.url, equal_to(url))",
"def refresh(self):\n\t\tself.driver.refresh()",
"def trigger_reload(server):\n log.info(\"Triggering /reload on %s\", server)\n screenCmd(server, 'reload')",
"def trigger_output(self):\n self.q(css='div#fixture button').first.click()",
"def trigger_output(self):\n self.q(css='div#fixture button').first.click()",
"def refresh(self, id):\n exports.execute_export.delay(id)\n return render({\"id\": id})",
"def step_impl(context):\r\n context.browser.get('https://opensource-demo.orangehrmlive.com/')\r\n time.sleep(10)",
"def _on_dom_ready(self):\n logger.debug('_on_dom_ready')\n self._status = self.WindowStatus.SHOWN\n\n # Subscribe current browser for javascript value returned\n RuntimeManager.get_instance().JavascriptReturned.subscribe(self.browser)\n\n # Get callback on engine ready\n RuntimeManager.get_instance().JavascriptReturned\\\n .on_value('_event__engine_ready', lambda *_: self._on_engine_ready())\n\n # Inject puithonJS the engine\n self.browser.ExecuteJavascript(open(self.JS_ENGINE_FILE, 'r').read())",
"def update_page(self, waittime):\n if not self.runningtask.get():\n return\n if self.vars[\"enabled\"].get():\n logger.trace(\"Updating page\")\n self.display_item_set()\n self.load_display()\n self.after(waittime, lambda t=waittime: self.update_page(t))",
"def postloop(self):\n print 'Bye!'",
"def view(self):\n\t\tself.done(1)",
"def wait_for_load(driver):\n html = driver.page_source\n time.sleep(0.5)\n while html != driver.page_source:\n html = driver.page_source\n time.sleep(0.5)",
"def html_redirect(self):\n soup = BeautifulSoup(self.contents, \"lxml\")\n meta = soup.find('meta', **{'http-equiv': 'refresh'})\n assert meta is not None, 'No <meta http-equiv=\"refresh\" /> tag found.'\n url = meta.get('content').partition(';url=')[2]\n self.open(url)",
"def ReturnReload():\r\n return _hiew.ReturnReload()",
"def eighth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.ninth_page.wait_for_page()",
"def reload(self):\n puts('Reloading application...')\n local('touch ../reload.txt')",
"def refreshPageAndGoToWatchlist(self):\n try:\n self.sleep_approx(1)\n self.user_requests_made += 1\n self.driver.refresh()\n\n wait_for_shield_invisibility(self.driver)\n\n WebDriverWait(self.driver, 30).until(\n EC.visibility_of_element_located(\n (By.CLASS_NAME, 'icon-transfer'))\n )\n\n wait_for_shield_invisibility(self.driver)\n\n self.sleep_approx(3)\n\n log_event(self.queue, \"Going back to watchlist\")\n self.go_to_watchlist()\n except:\n log_event(self.queue, \"Exception retrying refreshPageGoToWatchlist\")\n # TODO could be dangerous when stuck in infinite loop\n self.refreshPageAndGoToWatchlist()",
"def test_reload_parameter_starts_populateed(self, simulate_reload_one_day_main,\n caplog, qtbot):\n QtTest.QTest.qWaitForWindowShown(simulate_reload_one_day_main.form)\n qtbot.wait(3000)"
] | [
"0.61143285",
"0.6020823",
"0.5730542",
"0.5707968",
"0.5650479",
"0.5597226",
"0.5587973",
"0.5548144",
"0.55204254",
"0.55047655",
"0.54316336",
"0.53578436",
"0.5348964",
"0.5344503",
"0.52896523",
"0.5284119",
"0.5284119",
"0.5281825",
"0.52662414",
"0.5262159",
"0.52440184",
"0.52349263",
"0.5234214",
"0.52057993",
"0.5190605",
"0.51883143",
"0.5174814",
"0.51740783",
"0.5151051",
"0.51439023"
] | 0.77481604 | 0 |
Click button and wait until playing class disappeared from DOM | def is_class_absent(self):
self.q(css='#spinner').first.click()
self.wait_for_element_absence('.playing', 'Animation Stopped') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def poll(self):\n\tself.met = self.button.poll()",
"def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)",
"def _check_play_button(self, mouse_pos):\n # checking if button is clicked while there's no game active\n # else the button would be clickable even after turning invisible\n button_clicked = self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n # resets games speed\n self.settings.initialize_dynamic_settings()\n\n # reset stats / level / ships and changing game state\n self.stats.reset_stats()\n self.stats.game_active = True\n self.sb.prep_score()\n self.sb.prep_level()\n self.sb.prep_ships()\n\n # getting rid of alien ships and bullets\n self.aliens.empty()\n self.bullets.empty()\n\n # creating new fleet and centering players ship\n self._create_fleet()\n self.ship.center_ship()\n\n # making mouse pointer invisible\n pygame.mouse.set_visible(False)",
"def _check_play_button(self, mouse_pos): \n button_clicked = self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n # Reset game settings\n self.settings.initialize_dynamic_settings()\n\n # Reset game stats\n self.stats.reset_stats()\n self.stats.game_active = True\n self.sb.prep_score()\n self.sb.prep_level()\n self.sb.prep_ships()\n\n # Remove any remaining aliends and bullets\n self.aliens.empty() \n self.bullets.empty()\n\n # Create new fleet and center the ship\n self._create_fleet()\n self.ship.center_ship() \n\n # Hide the mouse cursor when inside of game window\n pygame.mouse.set_visible(False)",
"def wait_for_click():\r\n global _canvas\r\n global _cue\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n while True:\r\n _cue = _canvas.wait()\r\n if _cue.getDescription() == 'mouse release': break",
"def _check_play_button(self, mouse_pos):\n button_clicked = self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n # Reset the game settings.\n self.settings.initialize_dynamic_settings()\n # Reset the game statistics.\n self.stats.reset_stats()\n self.stats.game_active = True\n self.sb.prep_score()\n self.sb.prep_level()\n self.sb.prep_ships()\n # Get rid of any remaining stars and bullets.\n self.stars.empty()\n self.bullets.empty()\n # Create a new galaxy and center the ship.\n self._create_galaxy()\n self.ship.center_ship()\n pygame.mouse.set_visible(False)",
"def check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y):\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n if button_clicked and not stats.game_active:\n ai_settings.initialize_dynamic_settings()\n #hiding mouse cursor\n start_game(ai_settings, screen, stats, ship, aliens, bullets)\n\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n sb.prep_ships()",
"def play_video(self):\n\n self.wait.until(self.visible((By.ID, \"video-title\")))\n self.driver.find_element_by_xpath(\"//button[@class='ytp-large-play-button ytp-button']\").click()",
"def click_button(self):\n self.q(css='div#fixture button').first.click()",
"def _check_play_button(self, mouse_pos):\n\t\tbutton_clicked = self.play_button.rect.collidepoint(mouse_pos)\n\t\tif button_clicked and not self.stats.game_active:\n\t\t\t# Reset the game settings.\n\t\t\tself.settings.initialize_dynamic_settings()\n\t\t\tself.stats.reset_stats()\n\t\t\tself.stats.game_active = True\n\t\t\tself.sb.prep_score()\n\t\t\tself.sb.prep_pigeons()\n\t\t\t# Hide the cursor.\n\t\t\tpygame.mouse.set_visible(False)\n\n\t\t# Get rid of any remaining autos and droppings.\n\t\tself.autos.empty()\n\t\tself.droppings.empty()\n\n\t\t# Create a new fleet and center the pigeon\n\t\tself._create_fleet()\n\t\tself.pigeon.center_pigeon()",
"def start_game():\n logger.info(\"Clicking play button\")\n mouseclick(coords_play_final_button[0], coords_play_final_button[1])",
"def wait_for_start(self):\n while True:\n ev = self.scene.waitfor('click')\n game_type = self.on_click(ev)\n if game_type:\n return game_type",
"def on_click(self):\n arcade.play_sound(button, volume=constants.MUSIC_VOLUME / 40)\n\n global success\n global fails\n if success or fails == 20:\n reset_global_variables()\n self.minigame.window.show_view(self.minigame.main_view)\n else:\n self.minigame.window.show_view(self.minigame.main_view)\n print(f\"Exit Button.\")",
"def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()",
"def wait_to_play(self):\n\n\t\tself.player_model.current_player = self.player_model.rival_player\n\t\tself.player_frame.prepare_to_wait_turn(self.player_model.rival_player.name, self.player_model.available_cells)",
"def wait_for_video_class(self):\r\n self.wait_for_ajax()\r\n\r\n video_selector = '{0}'.format(CSS_CLASS_NAMES['video_container'])\r\n self._wait_for_element(video_selector, 'Video is initialized')",
"def play(self, event):\n if self.num_clicks == 1:\n self.clickable(event)\n if len(self.canvas.find_withtag(\"selected\")) == 2:\n self.num_of_tries += 1\n print(f'Number of tries {self.num_of_tries}')\n if self.num_of_tries > 13:\n self.score -= 10\n self.score_label.config(text=f'Score: {self.score}')\n self.check_match(self.click_tiles)\n self.canvas.after(self.delay, self.flip_back)\n self.click_tiles.clear()\n self.num_clicks = 0\n else:\n self.clickable(event)",
"def click_music(self, button):\n if cf.music_on is True:\n cf.music_on = False\n elif cf.music_on is False:\n cf.music_on = True\n # Remove old button.\n self.remove_button()\n # Re-add the button.\n self.add_button()",
"def pause():\n click.pause()",
"def _check_play_button(self, mouse_pos):\n if self.play_button.rect.collidepoint(mouse_pos) and not self.stats.game_active:\n self.stats.reset_stats()\n self.settings.initialize_dynamic_settings()\n self.stats.game_active = True\n\n #Hide mouse cursor\n pygame.mouse.set_visible(False)\n\n # Get rid of any leftover aliens and bullets\n self.aliens.empty()\n self.bullets.empty()\n\n #Create a new fleet and center the ship.\n self._create_fleet()\n self.ship.center_ship()\n\n self.scoreboard.prep_score()\n self.scoreboard.prep_high_score()\n self.scoreboard.prep_ships()",
"def update(self):\n\n self.play_button.update()",
"def click_button(self):\n self.q(css='div#fixture input').first.click()",
"def check_play_button(\n ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y\n):\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n\n # The play button needs to deactivate each time the game is active.\n if button_clicked and not stats.game_active:\n # Reset game settings.\n ai_settings.initialize_dynamic_settings()\n # Hide the cursor.\n pygame.mouse.set_visible(False)\n # Reset the game stats.\n stats.reset_stats()\n stats.game_active = True\n\n # Reset the scoreboard images.\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n\n # Empty aliens and bullets.\n aliens.empty()\n bullets.empty()\n\n # Create new fleet.\n create_fleet(ai_settings, screen, ship, aliens)\n ship.center_ship()",
"def click(self) -> None:\n if self.is_enabled():\n try:\n self.element.click()\n logging.info(\"Class booked!\")\n except:\n logging.info(\"The button could not be clicked, trying to execute the element.\")\n self.driver.execute_script(\"arguments[0].click();\", self.element)\n finally:\n logging.info(\"Could not book the class\")\n\n else:\n warnings.warn('The Button cannot be clicked.')",
"def _check_play_button(self, mouse_pos):\n\n # If the player clicks the play button AND the game isn't going\n if self.play_button.rect.collidepoint(mouse_pos) and not self.stats.game_active:\n\n # reset the game stats and dynamic settings\n self.stats.reset_stats()\n self.settings.initialize_dynamic_settings()\n self.stats.game_active = True\n self.sb.prep_score()\n\n # get rid of any remaining aliens and bullets.\n self.aliens.empty()\n self.bullets.empty()\n\n # recenter player\n self.ship.center_ship()\n\n # hide the mouse cursor\n pygame.mouse.set_visible(False)",
"def on_play_btn(self):\n if self.state == self.READY:\n self.send_rtsp_request(self.PLAY)",
"def press_button_play(self):\n \n global is_playing\n global my_thread\n is_playing = False\n if not is_playing:\n is_playing = True\n my_thread = threading.Thread(target=self.play_audio)\n my_thread.start()",
"def button_click(self, btn, mbtn):\n self.last_action_ts = pygame.time.get_ticks() # update last action timestamp (idle shutdown countdown restarts)\n self.show_time = pygame.time.get_ticks() # refresh show time timestamp, so countdown restarts\n\n status = self.player.get_status()\n\n # which button was pressed?\n if btn is self.btn_play:\n logger.debug(\"button_click: btn_play \")\n player.pause() # toggle play/pause\n elif btn is self.btn_prev:\n logger.debug(\"button_click: btn_prev \")\n try:\n if int(status['song']) > 0: # only accept 'prev' button push if this is not the first song\n player.prev()\n except Exception as e:\n logger.error(e, exc_info=True) # log any exceptions\n elif btn is self.btn_next:\n logger.debug(\"button_click: btn_next \")\n try:\n if int(status['song']) < (int(status['playlistlength']) - 1):\n player.next()\n except Exception as e:\n logger.error(e, exc_info=True) # log any exceptions\n elif btn is self.background:\n logger.debug(\"button_click: background \")\n if status['state'] == 'play' or status['state']== 'pause':\n self.show_buttons()\n else:\n logger.debug(\"button_click: <unknown>\")",
"def check_play_button(ai_settings,screen,stats,play_button,ship,aliens,bullets,\n\tmouse_x,mouse_y,sb):\n\n\tbutton_clicked = play_button.rect.collidepoint(mouse_x,mouse_y)\n\n\tif button_clicked and not stats.game_active:\n\t\t# Reinicia as configurações no jogo\n\t\tai_settings.initialize_dynamic_settings()\n\n\n\t\t# Oculta cursor do mouse quando o mouse estiver sobre a janela\n\t\tpygame.mouse.set_visible(False)\n\t\t\n\n\t\t# Reinicia o jogo\n\t\tstats.reset_stats()\n\t\tstats.game_active = True\n\n\t\t# Reinicia as imagems do painel de pontuação\n\t\tsb.prep_score()\n\t\tsb.prep_high_score()\n\t\tsb.prep_level()\n\t\tsb.prep_ship()\n\n\t\t# Esvazia a lista de alienígenas e de projéteis\n\t\taliens.empty()\n\t\tbullets.empty()\n\n\t\t# Cria uma ova frota e centraliza a espaçonave\n\t\tcreate_fleet(ai_settings,screen,ship,aliens)\n\t\tship.center_ship()",
"def check_play_button(si_settings,screen,stats,sb,play_button,ship,aliens,bullets,mouse_x,mouse_y):\n button_clicked = play_button.rect.collidepoint(mouse_x,mouse_y)\n if button_clicked and not stats.game_active:\n #Hides mouse\n pygame.mouse.set_visible(False)\n #reset stats\n si_settings.initalize_dynamic_settings()\n stats.reset_stats()\n stats.game_active = True\n #reset Scoreboard\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n sb.prep_ships()\n #Empty aliens and bullets\n aliens.empty()\n bullets.empty()\n #creates new fleet and centers ship\n create_fleet(si_settings,screen,ship,aliens)\n ship.center_ship()"
] | [
"0.6262264",
"0.6247249",
"0.6172894",
"0.6123498",
"0.6084997",
"0.6075153",
"0.60310066",
"0.6026245",
"0.60012144",
"0.5995387",
"0.5967095",
"0.59577924",
"0.5948899",
"0.59429574",
"0.592757",
"0.5881039",
"0.5876616",
"0.5875502",
"0.58591527",
"0.5838041",
"0.58163005",
"0.5808849",
"0.5791393",
"0.57863444",
"0.5770776",
"0.57666916",
"0.575722",
"0.5756134",
"0.57365084",
"0.57210803"
] | 0.6424565 | 0 |
Click button and wait until spinner is disappeared. | def is_spinner_invisible(self):
self.q(css='#spinner').first.click()
self.wait_for_element_invisibility('#anim', 'Button Output is Visible') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait_spinner_disappear(self):\n self.wait_for_element_disappear(loadings_catalog.LOADING_SPINNER)\n self.wait_for_element_disappear(loadings_catalog.LOADING)",
"def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)",
"def click_on_browse_button(self):\n self.kill_all_opened_file_browsing_dialogs()\n browse_button_element = self.wait().until(EC.element_to_be_clickable(self.browse_button_locator), 'browse button not found before specified time')\n browse_button_element.click()\n self.wait_for_ajax_spinner_load()",
"def click_entry_complete_button(self):\n self.click_element(self.entry_complete_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.statement_entry_success_message_locator), 'statement entry success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()",
"def poll(self):\n\tself.met = self.button.poll()",
"def click_on_upload_button(self):\n upload_button_element = self.wait().until(EC.visibility_of_element_located(self.upload_button_locator), 'upload button not found before specified time')\n upload_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_popup_title), 'success popup message not found before specified time')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()",
"def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)",
"def click_button(self):\n self.widgets.get('button').click()",
"def wait_dialog_box(self):\n while True:\n time.sleep(0.5)\n dialog = AppWindow.locate_on(SummonSelector.dialog_ok.path, (1 / 3, 2 / 3, 2 / 3, 1 / 3))\n if dialog is not None:\n self.logger.info(\"dialog popped up\")\n return",
"def click_received_charges_cancel_changes_button(self):\n self.click_element(self.received_charges_cancel_changes_button_locator)\n self.wait_for_ajax_spinner_load()",
"def is_class_absent(self):\n self.q(css='#spinner').first.click()\n self.wait_for_element_absence('.playing', 'Animation Stopped')",
"def _done_button_cb(self, widget=None):\n if self.lastTestResult:\n self._trigger_event(\"success\")\n else:\n self._launch_click_through_dialog()",
"def click_button(self):\n self.q(css='div#fixture button').first.click()",
"def click_win_dispute_cancel_button(self):\n self.click_element(self.win_dispute_cancel_button_locator)\n try:\n self.dismiss_alert_pop_up()\n except:\n pass\n self.wait_for_ajax_spinner_load()",
"def handler(signum, frame, spinner):\n spinner.red.fail(\"✘\")\n spinner.stop()",
"def wait_for_load(browser):\n loader = browser.find_element_by_class_name('ui-loader')\n while loader.is_displayed():\n time.sleep(0.1)",
"def hide(self) -> None:\n self.spinner.stop()\n self.hidden = True",
"def click_button(self):\n self.q(css='div#fixture input').first.click()",
"def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()",
"def wait_for_click():\r\n global _canvas\r\n global _cue\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n while True:\r\n _cue = _canvas.wait()\r\n if _cue.getDescription() == 'mouse release': break",
"def wait(self):\n time.sleep(0.010)",
"def wait_until_transfers_displayed(self):\n BaseElement(self.driver, locators.TRANSFER_MONEY_BUTTON).wait_until_displayed()",
"def spinner(self):\n return None",
"def wait():\n time.sleep(1)",
"def select_settings_unload_btn(self):\n select_settings_unload_btn_sitem = self.locator_finder_by_id(self.select_settings_unload_btn_id)\n select_settings_unload_btn_sitem.click()\n time.sleep(2)\n self.wait_for_ajax()",
"def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)",
"def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()",
"def wait(self):\n self.mainloop().wait()",
"def click_submit_payment_button(self):\n self.click(self.submit_payment_locator)\n time.sleep(2)",
"def select_upload_btn(self):\n select_upload_btn_sitem = self.locator_finder_by_xpath(self.select_upload_btn_id)\n select_upload_btn_sitem.click()\n time.sleep(3)"
] | [
"0.7126271",
"0.6884283",
"0.66924036",
"0.65854484",
"0.654948",
"0.64384645",
"0.63156706",
"0.62573993",
"0.62523603",
"0.6224479",
"0.61823416",
"0.61524755",
"0.6134778",
"0.61076003",
"0.6089265",
"0.6077746",
"0.6075494",
"0.606039",
"0.6058926",
"0.6032544",
"0.6031637",
"0.6020459",
"0.60119134",
"0.6008281",
"0.59909135",
"0.5972173",
"0.5954425",
"0.59371537",
"0.5932139",
"0.59140754"
] | 0.69422036 | 1 |
Check if value is prime | def is_prime(value: int) -> bool:
if value == 1:
return False
if value <= 0:
raise ValueError("Value must be greater than zero")
for i in range(2, int(value**(1/2)) + 1):
if value % i == 0:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_prime(value):\n\n if value < 2: raise ValueError\n\n for i in range(2, value):\n if value % i == 0:\n return False\n\n return True",
"def is_prime(self):\n pass",
"def is_prime(value):\n if value < 4:\n return True\n \n lower_bound = 2\n upper_bound = value-1\n \n prime = True\n test_value = lower_bound\n \n while test_value < upper_bound:\n #print \"testing divisibility of %d for %d\" % (value, test_value)\n if value % test_value == 0:\n prime = False\n test_value += 1\n return prime",
"def is_prime_number(number_):\n flag = 0\n for values in range(2, number_//2):\n if number_ % values == 0:\n flag += 1\n if flag == 1:\n return True\n else:\n return False",
"def is_prime(a):\n return all(a % i for i in xrange(2, a))",
"def is_prime(number: int):\n\n for index in range(2, (number//2) + 1):\n if number%index == 0:\n return False\n return True",
"def isprime(n):\r\n\treturn is_prime(n)",
"def isprime(x):\n if x <= 1: return False \n if x % 2 == 0: return x == 2\n for k in range(3, int(sqrt(x))+1, 2): \n if x % k == 0: return False\n return True",
"def is_prime(number):\n if number <=3:\n return True\n \n for i in range(2, number):\n if number % i == 0:\n return False\n \n return True",
"def isprime(number):\n\n if number == 1:\n return False\n for i in range(2, int(number**0.5) + 1):\n if number % i == 0:\n return False\n return True",
"def isprime(number: int) -> bool:\n for i in range(2, int(number ** 0.5) + 1):\n if number % i == 0:\n return False\n return True",
"def is_prime(number):\n #for i in range(2, ceil(sqrt(number))):\n for i in range(2, number):\n if number % i == 0:\n return False\n return True",
"def isprime(n):\n\treturn is_prime(n)",
"def is_prime(number: int) -> bool:\n\n if number % 2 == 0 and number > 2:\n return False\n return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))",
"def test_prime(n):\n if SIEVE[n]:\n return True\n else:\n return False",
"def is_prime(n):\n return mr_prime(n)",
"def is_prime(x):\n if x < 2:\n return False\n for i in range(2, x // 2 + 1):\n if x % i == 0:\n return False\n return True",
"def is_prime(num):\n if num < 2:\n return False\n\n for i in range(2, num):\n if num % i == 0:\n return True",
"def is_prime(number):\n number = int(number)\n\n if number < 2:\n return False\n if number < 4:\n return True\n if number % 2 == 0:\n return False\n for d in range(3, number // 2, 2):\n if number % d == 0:\n return False\n return True",
"def is_prime(num):\n\n if num == 2:\n return True\n for i in range(2, num):\n if num % i == 0:\n return False\n return True",
"def isPrime(x):\n for i in range(2,int(x**0.5)+1):\n if (x % i == 0):\n return False\n\n return True",
"def is_prime(num):\n import math\n\n\n if num % 2 == 0 and num > 2:\n return False\n for i in range(3, int(math.sqrt(num))+1, 2):\n if num % i == 0:\n return False\n return True",
"def is_prime(x: int) -> bool:\n if x < 2:\n return False\n if x != 2 and x % 2 == 0:\n return False\n for i in range(3, x // 2 + 1):\n if x % i == 0:\n return False\n return True",
"def is_prime(num: int) -> bool:\n return factorial(num - 1) % num != 0",
"def _is_prime(self, num):\n if num == 2:\n return True\n if num < 2 or num % 2 == 0:\n return False\n for n in range(3, int(num ** 0.5) + 2, 2):\n if num % n == 0:\n return False\n return True",
"def is_prime(number):\n if number == 2:\n return True\n\n if number <= 1 or number % 2 == 0:\n return False\n\n # check to see if number has any odd factors\n for x in range(3, int(number ** 0.5) + 1, 2):\n if number % x == 0:\n return False\n return True",
"def primenumber(x):\n if x >= 2:\n for y in range(2,x):\n if not (x % y):\n return False\n else:\n return False\n return True",
"def is_prime(number):\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True",
"def is_prime(number):\n\tif number < 0:\n\t\treturn False\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True",
"def is_prime_by_python(num):\n if num == 2:\n return True\n elif num % 2 == 0 or num <= 1:\n # even or smaller then one\n return False\n else:\n res = True\n partial_num_range = int(num / 4) + 1\n\n for i in range(1, partial_num_range):\n if num % (2 * i + 1) == 0:\n res = False\n break\n return res"
] | [
"0.8251294",
"0.7981401",
"0.794535",
"0.78773165",
"0.7870162",
"0.7779645",
"0.77755505",
"0.77661145",
"0.77396506",
"0.77324396",
"0.77158386",
"0.77105474",
"0.7694114",
"0.7672864",
"0.7660135",
"0.7650814",
"0.76269066",
"0.76167756",
"0.7599302",
"0.75861603",
"0.7584424",
"0.75835097",
"0.7577305",
"0.7570747",
"0.7545516",
"0.7541722",
"0.7530172",
"0.7525161",
"0.75243926",
"0.7520725"
] | 0.81013525 | 1 |
Get all prime factors of the given value | def factors(value: int) -> list:
prime_factors: list = []
for i in range(2, value + 1):
if i > 2 and i % 2 == 0 or not is_prime(i):
continue
while value % i == 0:
value = int(value / i)
prime_factors.append(i)
if value == 1:
break
return prime_factors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prime_factors_of(value):\n\n # Okay, so we need to \"solve\" two problems here:\n # is a given number a factor of `value`?\n # and\n # is a given number PRIME?\n\n # I think the simplest non-stupid approach is to generate all \n # FACTORS OF VALUE, and then check to see which are prime!\n # actually, a cute approach would be to start from the top down\n # and just return the first one. we'll see if i need that optimization.\n # (don't optimize prematurely!)\n\n # WELP. I tried to generate all primes up to value//2! what a mistake.\n # or was it? maybe it was just a bad implementation of prime-finding?\n\n factors = []\n\n for i in range(2, value//2):\n if value % i == 0:\n factors.append(i)\n\n prime_factors = []\n \n for i in factors:\n if is_prime(i):\n prime_factors.append(i)\n\n return prime_factors",
"def generate_prime_factors(value):\n if not isinstance(value, int):\n raise ValueError()\n\n primes = []\n factor = 2\n while factor <= value:\n while value % factor == 0:\n primes.append(factor)\n value /= factor\n\n factor += 1\n\n return primes",
"def factor_primes(x, iter):\n factors = []\n for factor in prime:\n while x % factor == 0:\n x = x / factor\n factors.append(factor)\n if x == 1:\n break\n return factors",
"def primefactors(n):\n factors = []\n primes = prime_sieve(n)\n\n for p in primes:\n while n % p == 0:\n factors.append(p)\n n /= p\n if n == 1:\n return(factors)\n return([n])",
"def prime_factors(number):\n factors = []\n\n if number == 0 : return factors\n\n # first round factors by two\n while number % 2 == 0:\n factors.append(2)\n number /= 2\n\n # other rounds goes by odd numbers only (no other even is prime)\n divisor = 3\n while divisor <= number:\n while number % divisor == 0:\n factors.append(divisor)\n number /= divisor\n divisor += 2\n\n return factors",
"def prime_factors(num):\n if prime_checker(num):\n return num\n if num > 10^5:\n maxPrime = round(num**0.5) + 1\n else:\n maxPrime = round(num/2)+1\n primelist = prime_generator(maxPrime)\n factors = []\n\n while num > 1 and num not in primelist:\n for prime in primelist:\n if num % prime == 0:\n factors.append(prime)\n num = int(num / prime)\n break\n if not num == 1:\n factors.append(num)\n \n return factors",
"def get_prime_factors(self, number):\n for prime in self.get_primes():\n while number % prime == 0:\n yield prime\n number /= prime\n \n if number == 1:\n break",
"def prime_factors(number):\n all_factors = factors(number)\n return list(filter(lambda x: is_prime(x), all_factors))",
"def getallprimefactors(n):\n factors = []\n d = 2\n while n > 1:\n while n % d == 0:\n factors.append(d)\n print(n)\n n /= d\n d += 1\n return factors",
"def factor(number):\n\tdividing_primes = sieve(number/2 + 1)\n\tfactors = []\n\t\n\twhile number != 1:\t\n\t\tif not dividing_primes:\n\t\t\treturn [number]\n\n\t\tnext_divisor = min(dividing_primes)\n\n\t\tif not number % next_divisor:\n\t\t\tfactors.append(next_divisor)\n\t\t\tnumber /= next_divisor\n\t\telse:\n\t\t\tdividing_primes.remove(next_divisor)\n\n\treturn factors",
"def get_factors(number):\n\n factors = [1, number]\n\n for i in range(2, int(math.sqrt(number))):\n if number % i == 0:\n factors.extend([i, number / i])\n\n return(factors)",
"def primeFactors(number):\n factorlist=[]\n loop=2\n while loop<=number:\n if number%loop==0:\n number/=loop\n factorlist.append(loop)\n else: \n loop+=1\n return factorlist",
"def primefactors(n):\n seq = []\n val = 2\n while val <= n:\n if VERBOSE: print \"val: %s n: %s\" % (val, n)\n if n % val == 0:\n # Found a factor, shrink n by that factor \n # ie. n = 60, val = 2\n # Next pass n = 30, val = 2\n seq.append(val)\n n /= val\n else:\n # Not (or no longer) a factor\n val += 1\n\n return seq",
"def prime_factors(n):\n\n prime_set = primes(n)\n factors = []\n for prime in prime_set:\n if n % prime == 0:\n factors.append(prime)\n return factors",
"def prime_factors(n) -> []:\n i = 2\n factors = []\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n factors.append(i)\n if n > 1:\n factors.append(n)\n return factors",
"def get_prime_factors(number):\n if number == 1:\n return []\n\n # We have to begin with 2 instead of 1 or 0\n # to avoid the calls infinite or the division by 0\n for i in range(2, number):\n # Get remainder and quotient\n rd, qt = divmod(number, i)\n if not qt: # if equal to zero\n return [i] + get_prime_factors(rd)\n\n return [number]",
"def factors(n):\n _factors = []\n p = 1\n\n # Loop until half of n\n while p <= n // 2:\n p += 1\n if div_by(p, _factors):\n continue\n if not n % p:\n _factors.append(p)\n\n # Number given is a prime\n if not _factors:\n _factors.append(n)\n\n return _factors",
"def factor(cls, number):\n factors = []\n for prime in cls():\n if prime > number:\n break\n # print 'Checking to see if %d is a factor of %d' % (prime, number)\n # reduce the total iterations\n if prime > math.sqrt(number):\n factors.append(number)\n break\n while not number % prime:\n number /= prime\n factors.append(prime)\n return factors",
"def factors(n, primes):\n\n for p in takewhile(lambda p: p*p < n, primes):\n exponent = 0\n\n while n % p == 0:\n exponent += 1\n n /= p\n\n if exponent > 0:\n yield p, exponent\n\n if n > 1:\n yield n, 1",
"def prime_factors(n):\n factors = []\n lastresult = n\n c = 2\n while lastresult != 1:\n if lastresult % c == 0 and c % 2 > 0:\n factors.append(c)\n lastresult /= c\n c += 1\n else:\n c += 1\n return factors[0], factors[1]",
"def get_factors(num):\n factors = []\n\n # Extend range by 1 to include num\n for i in range(1, num+1):\n if num % i == 0:\n factors.append(i)\n return factors",
"def factors(n, cache=None):\n if cache is None or max(cache) < n:\n potential_factors = primes(n + 1)\n else:\n potential_factors = cache\n prime_factors = []\n i = 0\n while n != 1:\n while n % potential_factors[i] == 0:\n n /= potential_factors[i]\n prime_factors.append(potential_factors[i])\n i += 1\n return prime_factors",
"def prime_factors(n):\r\n factors = defaultdict(int)\r\n d = 2\r\n while n > 1:\r\n while n % d == 0:\r\n factors[d]+=1\r\n n /= d\r\n d = d + 1\r\n if d*d > n:\r\n if n > 1: factors[n]+=1\r\n break\r\n return factors",
"def prime_factors(num):\n prime_factors = []\n for i in range(2, num + 1):\n if (num % i) == 0 and is_prime(i) == True:\n prime_factors.append(i)\n return prime_factors",
"def get_factors(val):\n N = np.sqrt(val)\n N = np.floor(N)\n M = val/N\n\n while (val % N != 0):\n N = N-1\n M = val/N\n\n return int(M), int(N)",
"def prime_factors(n):\n\n factors = []\n lastresult = n\n c = 2\n while lastresult != 1:\n if lastresult % c == 0 and c % 2 > 0:\n factors.append(c)\n lastresult /= c\n c += 1\n else:\n c += 1\n return factors[0], factors[1]",
"def factorize(num):\n factors = []\n while num not in primes_list:\n for prime in primes_list:\n if num % prime == 0:\n factors.append(prime)\n num /= prime\n break\n factors.append(num)\n factors = sorted(factors)\n return factors",
"def factors(number):\n\n if not (isinstance(number, int)):\n raise TypeError(\n \"Incorrect number type provided. Only integers are accepted.\")\n\n factors = []\n for i in range(1, number + 1):\n if number % i == 0:\n factors.append(i)\n return factors",
"def prime_factorization(x, sieve=None):\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)",
"def get_unique_factors(num):\n a = num\n m = int(num ** 0.5) if num > 100 else num\n factors = []\n primes = sieve(m)\n # Divide the number by compatible primes until it is 1\n # (or we run out of primes...)\n for p in primes:\n if a % p == 0:\n a = a / p\n factors.append(p)\n if a == 1:\n break\n return factors"
] | [
"0.8195275",
"0.8172038",
"0.78768766",
"0.78615814",
"0.78161514",
"0.7815678",
"0.78074765",
"0.7803163",
"0.7796986",
"0.7784412",
"0.77701026",
"0.7747795",
"0.77219456",
"0.7687131",
"0.766128",
"0.76611435",
"0.76522994",
"0.7594097",
"0.75423145",
"0.7516391",
"0.7508749",
"0.7505096",
"0.7502163",
"0.747645",
"0.7472635",
"0.7472505",
"0.74684477",
"0.74524325",
"0.7440694",
"0.74284333"
] | 0.85676354 | 0 |
Sets up the connection. Will optionally accept a size or else will use a chunked TransferEncoding. | def setup(self, size=None):
if size:
self.size = size
if not self.size:
self.size = UNKNOWN_LENGTH
self.body.length = self.size
req = self.conn.make_request('PUT', self.url,
headers=self.headers,
data=self.body)
self.req = req
print "ChunkedTwistedConnection: STARTED REQUEST" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def _send_connection_init(self, request: Request) -> None:\n # Need to set these manually here instead of manipulating via\n # __setitem__() otherwise the H2Connection will emit SettingsUpdate\n # frames in addition to sending the undesired defaults.\n self._h2_state.local_settings = h2.settings.Settings(\n client=True,\n initial_values={\n # Disable PUSH_PROMISE frames from the server since we don't do anything\n # with them for now. Maybe when we support caching?\n h2.settings.SettingCodes.ENABLE_PUSH: 0,\n # These two are taken from h2 for safe defaults\n h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100,\n h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536,\n },\n )\n\n # Some websites (*cough* Yahoo *cough*) balk at this setting being\n # present in the initial handshake since it's not defined in the original\n # RFC despite the RFC mandating ignoring settings you don't know about.\n del self._h2_state.local_settings[\n h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL\n ]\n\n self._h2_state.initiate_connection()\n self._h2_state.increment_flow_control_window(2**24)\n await self._write_outgoing_data(request)",
"def __init__(self, host, port, compress=True, chunk_size=1300, **kwargs):\n\n DatagramHandler.__init__(self, host, port)\n BaseHandler.__init__(self, **kwargs)\n\n self.compress = compress\n self.chunk_size = chunk_size",
"def __init__(self, bitstream, chunk_size):\n self.stream_chunker = self.chunker(bitstream, chunk_size)",
"def __init__(self, resp, chunk_size, conn_to_close):\n self.resp = resp\n self.chunk_size = chunk_size\n self.conn_to_close = conn_to_close",
"def __init__(self, host='localhost', port=8125, max_buffer_size=50):\n self.max_buffer_size = max_buffer_size\n self._send = self._send_to_server\n self.connect(host, port)\n self.encoding = 'utf-8'",
"def __init__(self, port=8080, bufferSize=1024, encoding=\"utf-8\"):\n self.BUFFER_SIZE = bufferSize\n self.PORT_NUM = port\n self.ENCODING = encoding\n self.RECV_LIMIT = 5 #DO NOT CHANGE!!\n self.socket = socket(AF_INET,SOCK_STREAM)\n self.socket.settimeout(0.5)#intentionally very low. DO NOT CHANGE!!",
"def __init__(self, size, connection):\n pycastle_log.debug(str(self)+\" start\")\n try:\n assert isinstance(connection, CastleConnection), \"wtf\"\n self.buf = castle_shared_buffer_create(connection.conn, size)\n self.size = size\n self.connection = connection\n pycastle_log.info(\"Made buffer {0} of size {1} with connection {2}\".format(self.buf, self.size, self.connection.conn))\n except Exception, e:\n pycastle_log.error(str(self)+\" got exception {0}:{1}\".format(type(e), e))\n raise\n finally:\n pycastle_log.debug(str(self)+\" stop\")",
"def prepare(self):\n if self.request.method.upper() == 'POST':\n if 'expected_size' in self.request.arguments:\n self.request.connection.set_max_body_size(\n int(self.get_argument('expected_size')))\n try:\n total = int(self.request.headers.get(\"Content-Length\", \"0\"))\n except KeyError:\n total = 0\n self.multipart_streamer = MultiPartStreamer(total)",
"def open(self):\n streaming_specs = self.get_streaming_specs()\n self._stream = chunked_requests.Stream(**streaming_specs)",
"def __init__(self, transport, buff_size=16384, socket_timeout=5.0,\n progress=None, sanitize=_sh_quote):\n self.transport = transport\n self.buff_size = buff_size\n self.socket_timeout = socket_timeout\n self.channel = None\n self.preserve_times = False\n self._progress = progress\n self._recv_dir = b''\n self._rename = False\n self._utime = None\n self.sanitize = sanitize\n self._dirtimes = {}",
"def __init__(self, data_size):\n try:\n self.data_size = int(data_size)\n except ValueError as exc:\n raise ValueError(\"Exepected arg 'size' to be int: \" + str(exc))\n self.packet = bytearray()\n self.in_data = False\n self.header_pos = 0\n self.transport = None",
"def est_connection(self):\n try:\n file_size = math.ceil(self.get_file_size())\n with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:\n print(f\"[+]connecting to {self.HOSTNAME}:{self.PORT}\")\n s.connect((self.HOSTNAME,self.PORT))\n print(f\"[+]Connected\")\n # prime the server with file meta data\n s.send(f\"{self.FILENAME} {file_size}\".encode())\n print(f\"[+]Sending file info from: {self.get_full_path()}\")\n self.stream_files(s)\n return \n\n except socket.error as msg:\n print(f\"Caught exception: {msg}\")",
"def __init__(self, size):\n self.handle_errors(size)\n self.__size = size",
"def init_connection(self, connection):",
"def _setup(self, addr, size):\n # No-op base implementation",
"def __init__(self, stream, progress_callback, progress_chunk_size):\n self._stream = stream\n self._progress_callback = progress_callback\n self._progress_chunk_size = progress_chunk_size\n self._bytes_transferred = 0\n self._progress_chunk = 0",
"def done (self):\r\n\r\n # ----------------------------------------\r\n # persistent connection management\r\n # ----------------------------------------\r\n\r\n # --- BUCKLE UP! ----\r\n\r\n connection = get_header(CONNECTION, self.header).lower()\r\n\r\n close_it = 0\r\n wrap_in_chunking = 0\r\n\r\n if self.version == '1.0':\r\n if connection == 'keep-alive':\r\n if 'Content-Length' not in self:\r\n close_it = 1\r\n else:\r\n self['Connection'] = 'Keep-Alive'\r\n else:\r\n close_it = 1\r\n elif self.version == '1.1':\r\n if connection == 'close':\r\n close_it = 1\r\n elif 'Content-Length' not in self:\r\n if 'Transfer-Encoding' in self:\r\n if not self['Transfer-Encoding'] == 'chunked':\r\n close_it = 1\r\n elif self.use_chunked:\r\n self['Transfer-Encoding'] = 'chunked'\r\n wrap_in_chunking = 1\r\n else:\r\n close_it = 1\r\n elif self.version is None:\r\n # Although we don't *really* support http/0.9 (because we'd have to\r\n # use \\r\\n as a terminator, and it would just yuck up a lot of stuff)\r\n # it's very common for developers to not want to type a version number\r\n # when using telnet to debug a server.\r\n close_it = 1\r\n\r\n outgoing_header = producers.simple_producer(self.get_reply_header_text())\r\n\r\n if close_it:\r\n self['Connection'] = 'close'\r\n\r\n if wrap_in_chunking:\r\n outgoing_producer = producers.chunked_producer (\r\n producers.composite_producer (self.outgoing)\r\n )\r\n # prepend the header\r\n outgoing_producer = producers.composite_producer(\r\n [outgoing_header, outgoing_producer]\r\n )\r\n else:\r\n # prepend the header\r\n self.outgoing.insert(0, outgoing_header)\r\n outgoing_producer = producers.composite_producer (self.outgoing)\r\n\r\n # apply a few final transformations to the output\r\n self.channel.push_with_producer (\r\n # globbing gives us large packets\r\n producers.globbing_producer (\r\n # hooking lets us log the number of bytes sent\r\n producers.hooked_producer (\r\n outgoing_producer,\r\n self.log\r\n )\r\n )\r\n )\r\n\r\n self.channel.current_request = None\r\n\r\n if close_it:\r\n self.channel.close_when_done()",
"def open(self, transport_config, options, protocol_class=None):",
"def connection_made(self, transport):\n self._transport = transport\n self._when_connected = datetime.datetime.now()\n self._last_received = datetime.datetime.now()\n\n reader_factory = self._reader_factory\n writer_factory = self._writer_factory\n reader_kwds = {}\n writer_kwds = {}\n\n if self.default_encoding:\n reader_kwds[\"fn_encoding\"] = self.encoding\n writer_kwds[\"fn_encoding\"] = self.encoding\n reader_kwds[\"encoding_errors\"] = self._encoding_errors\n writer_kwds[\"encoding_errors\"] = self._encoding_errors\n reader_factory = self._reader_factory_encoding\n writer_factory = self._writer_factory_encoding\n\n if self._limit:\n reader_kwds[\"limit\"] = self._limit\n\n self.reader = reader_factory(**reader_kwds)\n\n self.writer = writer_factory(\n transport=transport,\n protocol=self,\n reader=self.reader,\n server=True,\n **writer_kwds\n )\n\n logger.info(\"Connection from %s\", self)\n\n self._waiter_connected.add_done_callback(self.begin_shell)\n asyncio.get_event_loop().call_soon(self.begin_negotiation)",
"def done(self, *arg, **kw):\r\n\r\n # ----------------------------------------\r\n # persistent connection management\r\n # ----------------------------------------\r\n\r\n # --- BUCKLE UP! ----\r\n\r\n connection = http_server.get_header(http_server.CONNECTION,self.header)\r\n connection = connection.lower()\r\n\r\n close_it = 0\r\n wrap_in_chunking = 0\r\n globbing = 1\r\n\r\n if self.version == '1.0':\r\n if connection == 'keep-alive':\r\n if not 'Content-Length' in self:\r\n close_it = 1\r\n else:\r\n self['Connection'] = 'Keep-Alive'\r\n else:\r\n close_it = 1\r\n elif self.version == '1.1':\r\n if connection == 'close':\r\n close_it = 1\r\n elif not 'Content-Length' in self:\r\n if 'Transfer-Encoding' in self:\r\n if not self['Transfer-Encoding'] == 'chunked':\r\n close_it = 1\r\n elif self.use_chunked:\r\n self['Transfer-Encoding'] = 'chunked'\r\n wrap_in_chunking = 1\r\n # globbing slows down tail -f output, so only use it if\r\n # we're not in chunked mode\r\n globbing = 0\r\n else:\r\n close_it = 1\r\n elif self.version is None:\r\n # Although we don't *really* support http/0.9 (because\r\n # we'd have to use \\r\\n as a terminator, and it would just\r\n # yuck up a lot of stuff) it's very common for developers\r\n # to not want to type a version number when using telnet\r\n # to debug a server.\r\n close_it = 1\r\n\r\n outgoing_header = producers.simple_producer(self.build_reply_header())\r\n\r\n if close_it:\r\n self['Connection'] = 'close'\r\n\r\n if wrap_in_chunking:\r\n outgoing_producer = deferring_chunked_producer(\r\n deferring_composite_producer(self.outgoing)\r\n )\r\n # prepend the header\r\n outgoing_producer = deferring_composite_producer(\r\n [outgoing_header, outgoing_producer]\r\n )\r\n else:\r\n # prepend the header\r\n self.outgoing.insert(0, outgoing_header)\r\n outgoing_producer = deferring_composite_producer(self.outgoing)\r\n\r\n # hook logging into the output\r\n outgoing_producer = deferring_hooked_producer(outgoing_producer,\r\n self.log)\r\n\r\n if globbing:\r\n outgoing_producer = deferring_globbing_producer(outgoing_producer)\r\n\r\n self.channel.push_with_producer(outgoing_producer)\r\n\r\n self.channel.current_request = None\r\n\r\n if close_it:\r\n self.channel.close_when_done()",
"def Prepare(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()",
"def initialize(self):\n if not self.connection.is_closed():\n self.connection.close()\n\n self.connection.connect()",
"def __init__(self, width=None, chunk_size=None):\r\n self._width = width or 50\r\n if not isinstance(self._width, Compatibility.integer):\r\n raise ValueError('The width must be an integer, given %s' % self._width)\r\n\r\n self._chunk_size = chunk_size or Amount(10, Data.KB)\r\n if not isinstance(self._chunk_size, Amount) or not isinstance(self._chunk_size.unit(), Data):\r\n raise ValueError('The chunk_size must be a Data Amount, given %s' % self._chunk_size)\r\n\r\n self._start = time.time()",
"def makeConnection(self, transport):\n pass",
"def __init__(self, host, port, use_socket=None, server=False, handler=False, bufsize=\"auto\", compress=False, compress_level=6):\n super(BinarySocket, self).__init__(host, port, server=server, use_socket=use_socket, use_pickle=False, bufsize=bufsize, handler=handler)\n self.__header_buffer = \"\"\n self.__binary_buffer = \"\"\n self.__meta_buffer = \"\"\n self.__header_length = 2 * 4 + 1 # 2 Unsigned Ints, 1 Bool\n self.__binary_length = None\n self.__binary_compressed = False\n self.__meta_length = None\n self.__buffer_lock = threading.Lock()\n\n self.set_compression(compress, compress_level)",
"def connection_made(self, transport):\n #self._transport = transport\n\n self._server_ip, self._server_port = (\n transport.get_extra_info('peername')[:2])\n\n self.stream = self._stream_factory(\n transport=transport, client=True, log=self.log)\n\n# self.reader = self._factory_reader()\n# self.reader.set_transport(transport)\n self.shell = self._shell_factory(client=self, log=self.log)\n\n self.init_environment_values()\n self.set_stream_callbacks()\n self._last_received = datetime.datetime.now()\n self._connected = datetime.datetime.now()\n\n # begin connect-time negotiation\n self._loop.call_soon(self.begin_negotiation)\n\n # resolve server fqdn (and later, reverse-dns)\n self._server_host = self._loop.run_in_executor(\n None, socket.gethostbyaddr, self._server_ip)\n self._server_host.add_done_callback(self.after_server_lookup)\n\n self.log.info(self)",
"async def read_chunk(self, size: int = ...) -> bytes:\n ...",
"def __init__(self, ip, port, stream_id, auth_options, chunk_size=2097152):\n self.stream_id = stream_id\n # Create a rtmp url using ip, port, and stream_id\n connection_string = \"rtmp://{0}:{1}/view/{2}\".format(ip, port, stream_id)\n # Add authorization parameters to connection string\n self.connection_string = self._auth_RTMP(\n connection_string,\n auth_options[\"loginUrl\"],\n auth_options[\"rtmpRequestUrl\"],\n auth_options[\"username\"],\n auth_options[\"password\"])\n # Create a stream connection to rtmp url\n self.connection = librtmp.RTMP(self.connection_string, live=True)\n self.connection.connect()\n self.stream = self.connection.create_stream()\n self.chunk_size = chunk_size\n self.bytes_read = 0\n self.previous_read = 0\n self.data = b''\n self.is_reading = True",
"def __init__(self, size):\n self.__size = size\n self.integer_validator(\"size\", size)\n super().__init__(size, size)\n self._size = size",
"def connection_made(self, transport):\n self.transport = transport\n self.buf = bytes()\n self.msgs_recvd = 0\n print('Reader connection created')"
] | [
"0.5874525",
"0.58596337",
"0.57424444",
"0.57155627",
"0.571499",
"0.5675915",
"0.566236",
"0.5662341",
"0.56123924",
"0.5471748",
"0.5447842",
"0.5433867",
"0.5358979",
"0.53248644",
"0.53098845",
"0.52834433",
"0.5279242",
"0.5220655",
"0.5175288",
"0.51495194",
"0.5126971",
"0.5102948",
"0.5066454",
"0.5006225",
"0.49974203",
"0.49876526",
"0.49875414",
"0.49744007",
"0.49731275",
"0.49513358"
] | 0.7370442 | 0 |
Sends a chunk of data. | def send_chunk(self, chunk):
print "ChunkedTwistedConnection: send chunk"
return self.body.send(chunk) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_chunk(chunk, send_socket):\n length = len(chunk)\n data = str(length).zfill(MAX_CHUNK_SIZE).encode() + chunk\n send_socket.send(data)",
"def send_chunk(chnk, sock):\n length = len(chnk)\n data = str(length).zfill(MAX_CHUNK_SIZE).encode() + chnk\n sock.send(data)",
"def inject_send(data):\n tsent = 0\n bytes = len(data)\n chunksize = filesize / 100\n if chunksize < 4096:\n chunksize = 4096\n while bytes > 0:\n sent = imap.sslobj.write(data[:chunksize])\n if sent == bytes:\n common.progress(filesize, bytes)\n break # avoid copy\n tsent += sent\n common.progress(filesize, tsent)\n data = data[sent:]\n bytes = bytes - sent",
"def send(self, bytestream):\n total_sent = 0\n length_data = len(bytestream)\n try:\n while total_sent < length_data:\n # Returns the number of bytes sent\n nr_sent = self.socket.send(bytestream[total_sent:])\n total_sent += nr_sent\n except (socket.error, socket.timeout):\n # Evt17: Transport connection closed\n self.event_queue.put('Evt17')",
"def send(self, data, is_chunked=False):\r\n if self.sock is None:\r\n if self.auto_open:\r\n self.connect()\r\n else:\r\n raise NotConnected()\r\n\r\n if self.debuglevel > 0:\r\n print \"send:\", repr(data)\r\n blocksize = 8192\r\n if hasattr(data, 'read') and not isinstance(data, array):\r\n if self.debuglevel > 0:\r\n print \"sendIng a read()able\"\r\n datablock = data.read(blocksize)\r\n while datablock:\r\n if self.debuglevel > 0:\r\n print 'chunked:', is_chunked\r\n if is_chunked:\r\n if self.debuglevel > 0:\r\n print 'send: with trunked data'\r\n lenstr = string.upper(hex(len(datablock))[2:])\r\n self.sock.sendall('%s\\r\\n%s\\r\\n' % (lenstr, datablock))\r\n else:\r\n self.sock.sendall(datablock)\r\n datablock = data.read(blocksize)\r\n if is_chunked:\r\n self.sock.sendall('0\\r\\n\\r\\n')\r\n else:\r\n self.sock.sendall(data)",
"def send(self, data: bytes):\n\n self.client.sendall(data)\n\n return len(data)",
"def send(self, data: bytes):",
"def recv_chunk(self, data):",
"def send(self, data: bytes) -> int:\n ...",
"def send_bytes(self, data: bytes) -> None:",
"async def write(self, data: bytes):\n while data:\n await self.wait_for_write()\n try:\n sent = self.socket.send(data)\n except OSError as e:\n self.logger.debug(\"Failed to write: %s\", e)\n raise asyncio.TimeoutError()\n data = data[sent:]",
"def send_data(self, data):\n self._transport.write(data)",
"def send_chunked(self, chunks, payload, trailers):\r\n\r\n chunk_list = chunks.split(',')\r\n pointer = 0\r\n for cwidth in chunk_list:\r\n cwidth = int(cwidth)\r\n # send chunk length indicator\r\n self.wfile.write(format(cwidth, 'x').upper() + \"\\r\\n\")\r\n # send chunk payload\r\n self.wfile.write(payload[pointer:pointer + cwidth] + \"\\r\\n\")\r\n pointer += cwidth\r\n\r\n # is there another chunk that has not been configured? Send it anyway for the sake of completeness..\r\n if len(payload) > pointer:\r\n # send chunk length indicator\r\n self.wfile.write(format(len(payload) - pointer, 'x').upper() + \"\\r\\n\")\r\n # send chunk payload\r\n self.wfile.write(payload[pointer:] + \"\\r\\n\")\r\n\r\n # we're done with the payload. Send a zero chunk as EOF indicator\r\n self.wfile.write('0'+\"\\r\\n\")\r\n\r\n # if there are trailing headers :-) we send them now..\r\n for trailer in trailers:\r\n self.wfile.write(\"%s: %s\\r\\n\" % (trailer[0], trailer[1]))\r\n\r\n # and finally, the closing ceremony...\r\n self.wfile.write(\"\\r\\n\")",
"def send(self, data):\n self.socket.sendall(data)",
"def sendData(self, data):\n self.tx.sendBuffer(data)",
"def save_send(socket, data):\r\n\r\n # We have no control about how much data the clients accepts,\r\n # thus we send in chunks until done\r\n while len(data) > 0:\r\n try:\r\n send_data_size = socket.send(data)\r\n # remove sent portion form data\r\n data = data[send_data_size:]\r\n except error as msg:\r\n # most likely socket busy, buffer full or not yet ready\r\n sleep(0.01)",
"def send(self, data):\n pass",
"def _send_data(self, data, time):\n pass",
"def send(self, data):",
"def _send_from_buffer(cls, buf, stream):\n remaining_bytes = len(buf)\n while remaining_bytes > 0:\n next_chunk_bytes = min( remaining_bytes, VoxelsNddataCodec.STREAM_CHUNK_SIZE )\n chunk_start = len(buf)-remaining_bytes\n chunk_stop = len(buf)-(remaining_bytes-next_chunk_bytes)\n stream.write( buf[chunk_start:chunk_stop] )\n remaining_bytes -= next_chunk_bytes",
"def send(self, data):\n self.sent.put(data)",
"async def _send_stream_data(\n self, request: Request, stream_id: int, data: bytes\n ) -> None:\n while data:\n max_flow = await self._wait_for_outgoing_flow(request, stream_id)\n chunk_size = min(len(data), max_flow)\n chunk, data = data[:chunk_size], data[chunk_size:]\n self._h2_state.send_data(stream_id, chunk)\n await self._write_outgoing_data(request)",
"def send(self, buf, offset=0, size=None):\n raise NotImplementedError",
"def send(self, data):\n self._send(data)",
"def send(self, data):\n self.sock.send(data)",
"def send(self, data):\n self.sock.send(data)",
"def send_to_data_channel(self, sock, data):\n resp = sock.send(data)\n print_debug(resp)\n self.logger.log(\"Sent: %s\" % data)\n return resp",
"async def send_raw(self, data: bytes) -> None:\n await self.socket.sendall(data)",
"def send(self, data: bytes) -> int:\n return self.connection.send(data)",
"def send(self, socket, data):\n data_length = len(data)\n socket.send(self.struct.pack(data_length))\n\n total_sent = 0\n while total_sent < data_length:\n sent = socket.send(data[total_sent:])\n if not sent:\n raise RuntimeError(\"Socket connection was broken.\")\n total_sent += sent"
] | [
"0.78114295",
"0.7532161",
"0.7232472",
"0.71928585",
"0.7086782",
"0.7002679",
"0.6989813",
"0.69814444",
"0.69016576",
"0.68666893",
"0.68550235",
"0.6853498",
"0.67988867",
"0.6783611",
"0.67816645",
"0.67800605",
"0.6746048",
"0.6729257",
"0.6725598",
"0.67044735",
"0.67041624",
"0.6678867",
"0.6669147",
"0.66383004",
"0.66168433",
"0.66168433",
"0.6610788",
"0.65852153",
"0.6584088",
"0.65770674"
] | 0.7723083 | 1 |
Calculate the distance and rotation to the edge of the desk | def getDistanceAndRotationToEdge(l, f, r):
if DEBUG:
print "lfr:", l,",",f,",",r
# Maths help from: http://xaktly.com/MathNonRightTrig.html
# - Specfically the law of cosines, but at least one of their
# examples is wrong, but methods are correct... sigh.
#
# For triangle with forward length, shortest of
# left and right length, and desk edge as sides...
#
# f = forward distance length
# l = left distance length
# r = right distance length
# e = length of desk edge between left and right views
# s = shortest of left and right distance length
# v = "view" angle of how much robot looks left or right
# g = angle between f and e
# d = distance between robot and edge of desk
# a = angle between the way the robot is facing and edge of desk
# (i.e. if the robot is facing directly towards edge it's 0)
# (in radians or degrees?..)
#
# e² = f² + s² - 2 * f * s * cos(v)
# g = sin⁻¹ * (s * sin(v) / e)
# d = f * sin(g)
# a = 180 - 90 - g (minus or positive depending on if s is left or right)
# Figure out if the edge of the desk is more to the right or left
# s = min(l, r) <-- Used to use this, but need additional things.
# r | l | s
# x | x | ?
# 1 | 1 | ? Logic table for _r_ight, _l_eft, and output
# 0 | 0 | ? _s_hortest distances from robot to desk edge
# x | 0 | l
# 1 | x | r x = None
# 0 | 1 | r 1 = arbitrary high-ish value
# x | 1 | l 0 = arbitrary low-ish value
# 1 | 0 | l
# 0 | x | r
# Distance to right and left are missing?
if r is None and l is None:
if DEBUG:
print "INFO: Skipping edge calcs because of missing distances."
return int(round(f)), 0
# Distance to right and left identical?
elif r == l:
if DEBUG:
print "INFO: Skipping edge calcs because of identical distances."
# This is unlikely-ish because l, f, r are floats...
#
# r < f r > f
# ◆ | or ◼
# ____➘| __🠛__
#
return int(round(min(r, f))), 0
# Figure out if _l_eft or _r_ight is the shorter distance
else:
if r is None:
s = l
direction = -1
elif l is None:
s = r
direction = 1
elif l < r:
s = l
direction = -1
elif r < l :
s = r
direction = 1
cosV = math.cos(math.radians(45))
sinV = math.sin(math.radians(45))
e = f**2 + s**2 - 2 * f * s * cosV
e = math.sqrt(e)
g = math.degrees(math.asin(s * sinV / e))
d = f * math.sin(math.radians(g)) # Switching degrees/radians f'debugging
a = (90 - g) * direction
'''
# Debug stuff
print "f =", f
print "l =", l
print "r =", r
print "e =", e
print "s =", s
print "v =", 45
print "g =", g
print "d =", d
print "a =", a
'''
distance = int(round(d))
rotation = int(round(a))
if DEBUG:
print "Distance to edge:", str(distance) + "cm"
print "Rotation to edge:", str(rotation) + "°"
return distance, rotation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getEdgeAngle():\n '''\n returns angle a\n a\n ◿\n b c\n '''\n ANGLE_OFFSET = 8 # How far off the angle measurements are in degrees.\n THRESHOLD = 220 # How much light must be reflected to 'notice' the desk.\n angle = 0\n while angle < panTilt.TLT_RANGE:\n angle += 1\n panTilt.tilt(int(angle))\n deskDetected = ir.readWithDelay()\n # print \"Angle:\", angle + ANGLE_OFFSET, \", ir reading:\", deskDetected\n if deskDetected > THRESHOLD or angle == panTilt.TLT_RANGE:\n # print \"-----------------------\"\n break # Break out of looking downwards loop\n panTilt.up() # Look up again\n return 90 - angle - ANGLE_OFFSET",
"def calculate_distance_edge(self):\n if self.mu > 0:\n # right interface is intersected next\n dx = self.cell_xr - self.x\n self.next_cell_index = self.cell_index + 1\n else:\n # left interface is intersected next\n dx = self.cell_xl - self.x\n self.next_cell_index = self.cell_index - 1\n\n return dx / self.mu",
"def getEdgeDistance():\n '''\n a\n ◿\n b c\n\n hypotenuse\n ◿ adjacent\n opposite\n\n tan(a) = opposite/adjacent\n adjacent * tan(a) = opposite\n '''\n\n # An estimated multiplier to take into account the larger infrared dot\n # observed when further away from as surface - think torch beam onto a\n # wall getting larger as it gets further away, but only the radius\n # (center downwards) being relevant.\n # TODO: Maybe move into infrared sensor code?\n MULTI = 1.2\n\n edgeDistance = BOT_HEIGHT * math.tan(math.radians(getEdgeAngle()))\n edgeDistance *= MULTI\n\n if DEBUG:\n print \"Distance to edge: \", int(round(edgeDistance))\n\n return edgeDistance",
"def calculate_distance_edge(self):\n mu_star = -np.sqrt(1. - (self.cell_xl / self.x)**2)\n\n if self.mu <= mu_star:\n\n l_edge = (-self.mu * self.x -\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xl**2))\n self.next_cell_index = self.cell_index - 1\n\n else:\n\n l_edge = (-self.mu * self.x +\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xr**2))\n self.next_cell_index = self.cell_index + 1\n\n return l_edge",
"def getDistances():\n\n # If there's a wall in the way then there's no edge that way (probably)\n\n wallL, edgeL = getDistance(-45) # Left\n wallF, edgeF = getDistance( 0) # Forward\n wallR, edgeR = getDistance( 45) # Right\n\n panTilt.pan() # Recenter\n\n return wallL, edgeL, wallF, edgeF, wallR, edgeR",
"def direction(self):\r\n return 180 - atan2(self.x, self.y)*180/pi",
"def fangle_degr(self):\r\n\r\n return self._versor_1.angle_degr(self._versor_2)",
"def calculate_clockwise_angle_and_distance(self, center_node, spoke_node): # pylint: disable=R0201\n if not spoke_node['id'] in center_node['relations']:\n raise Exception('spoke_node_id must be related to center node')\n\n refvec = [0, 1]\n point = spoke_node['coords']\n origin = center_node['coords']\n\n # Vector between point and the origin: v = p - o\n vector = [point[0] - origin[0], point[1] - origin[1]]\n # Length of vector: ||v||\n lenvector = math.hypot(vector[0], vector[1])\n # If length is zero there is no angle\n if lenvector == 0:\n return -math.pi, 0\n\n # Normalize vector: v/||v||\n normalized = [vector[0]/lenvector, vector[1]/lenvector]\n dotprod = normalized[0]*refvec[0] + normalized[1]*refvec[1] # x1*x2 + y1*y2\n diffprod = refvec[1]*normalized[0] - refvec[0]*normalized[1] # x1*y2 - y1*x2\n angle = math.atan2(diffprod, dotprod)\n\n # Negative angles represent counter-clockwise angles so we need to subtract them\n # from 2*pi (360 degrees)\n if angle < 0:\n return 2 * math.pi + angle, lenvector\n\n # I return first the angle because that's the primary sorting criterium\n # but if two vectors have the same angle then the shorter distance should come first.\n # (lenvector should never really be needed, however, since that would mean edges overlap)\n return angle, lenvector",
"def distance_between_wheels():",
"def faceDiagonal(self):\n faceDiagonal = (2**(1/2)) * self.sideLength\n return faceDiagonal",
"def edge_dxy(self):\r\n loc = self.loc\r\n rect = loc.coord\r\n p1 = rect[0]\r\n p2 = rect[1]\r\n edx = p2[0] - p1[0] # Find edge direction\r\n edy = p2[1] - p1[1]\r\n return edx, edy",
"def edgeCurl(self):\n if getattr(self, '_edgeCurl', None) is None:\n assert self.dim > 1, \"Edge Curl only programed for 2 or 3D.\"\n\n n = self.vnC # The number of cell centers in each direction\n L = self.edge # Compute lengths of cell edges\n S = self.area # Compute areas of cell faces\n\n # Compute divergence operator on faces\n if self.dim == 2:\n\n D21 = sp.kron(ddx(n[1]), speye(n[0]))\n D12 = sp.kron(speye(n[1]), ddx(n[0]))\n C = sp.hstack((-D21, D12), format=\"csr\")\n self._edgeCurl = C*sdiag(1/S)\n\n elif self.dim == 3:\n\n D32 = kron3(ddx(n[2]), speye(n[1]), speye(n[0]+1))\n D23 = kron3(speye(n[2]), ddx(n[1]), speye(n[0]+1))\n D31 = kron3(ddx(n[2]), speye(n[1]+1), speye(n[0]))\n D13 = kron3(speye(n[2]), speye(n[1]+1), ddx(n[0]))\n D21 = kron3(speye(n[2]+1), ddx(n[1]), speye(n[0]))\n D12 = kron3(speye(n[2]+1), speye(n[1]), ddx(n[0]))\n\n O1 = spzeros(np.shape(D32)[0], np.shape(D31)[1])\n O2 = spzeros(np.shape(D31)[0], np.shape(D32)[1])\n O3 = spzeros(np.shape(D21)[0], np.shape(D13)[1])\n\n C = sp.vstack((sp.hstack((O1, -D32, D23)),\n sp.hstack((D31, O2, -D13)),\n sp.hstack((-D21, D12, O3))), format=\"csr\")\n\n self._edgeCurl = sdiag(1/S)*(C*sdiag(L))\n return self._edgeCurl",
"def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre",
"def virtual_distance(self):\n conflict_zone_radio = 384.0\n path_width = 172.0\n right_turn_radio = path_width / 4.0\n left_turn_radio = 3 * path_width / 4.0\n initial_straight_section = conflict_zone_radio - path_width / 2.0\n if self.get_intention() == \"s\":\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_intention() == \"r\":\n # Calculate real virtual distance\n if self.get_virtual_x_position() <= initial_straight_section:\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_virtual_y_position() > -right_turn_radio:\n virtual_distance_value = (\n initial_straight_section + atan(\n (\n self.get_virtual_x_position() -\n initial_straight_section\n ) / (right_turn_radio + self.get_virtual_y_position())\n ) * right_turn_radio\n )\n else:\n virtual_distance_value = (\n initial_straight_section + pi * right_turn_radio / 2.0 -\n self.get_virtual_y_position() - right_turn_radio\n )\n\n a = path_width / 2.0\n b = right_turn_radio + path_width / 4.0\n c = pi * right_turn_radio / 2.0\n # Scale virtual distance\n if virtual_distance_value <= initial_straight_section + c:\n virtual_distance_value *= (\n (initial_straight_section + a + b) /\n (initial_straight_section + c)\n )\n else:\n virtual_distance_value += a + b - c\n\n else:\n # Calculate real virtual distance\n if self.get_virtual_x_position() <= initial_straight_section:\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_virtual_y_position() < left_turn_radio:\n virtual_distance_value = (\n initial_straight_section + atan(\n (\n self.get_virtual_x_position() -\n initial_straight_section\n ) / (\n left_turn_radio -\n self.get_virtual_y_position()\n )\n ) * left_turn_radio\n )\n else:\n virtual_distance_value = (\n initial_straight_section + pi * left_turn_radio / 2 +\n self.get_virtual_y_position() - left_turn_radio\n )\n\n a = path_width / 2\n b = right_turn_radio + path_width / 4\n c = pi * left_turn_radio / 2\n # Scale virtual distance\n if virtual_distance_value <= initial_straight_section + c:\n virtual_distance_value *= (\n (initial_straight_section + a + b) /\n (initial_straight_section + c)\n )\n else:\n virtual_distance_value += a + b - c\n\n return virtual_distance_value",
"def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]",
"def doMath(self, distance):\n\t\trotational_corr = -.1*(self.front_point-self.back_point)\n\t\tdistance_corr = -.2*(self.middle - distance)\n\t\treturn rotational_corr + distance_corr",
"def _get_distance(self, target): \r\n sensor_transform = self._sensors['rgb_front'].get_transform()\r\n\r\n distance = np.sqrt(\r\n (sensor_transform.location.x - target.x) ** 2 +\r\n (sensor_transform.location.y - target.y) ** 2 +\r\n (sensor_transform.location.z - target.z) ** 2)\r\n\r\n return distance",
"def getDistance(self):\n taBox = (self.thor * self.tvert)/(720*960) #box area as percentage of whole\n if(taBox==None or taBox<=0): return -1\n const = 4 * math.tan(0.471)*math.tan(0.3576)\n return math.sqrt((self.abox)/(const*taBox))",
"def distancia(self, other):\n return ((self.x-other.x)**2 + (self.y-other.y)**2 + (self.z-other.z)**2) ** (1 / 2)",
"def total_edge_angle(e1, e2):\n e1_source = section.index(e1[0])\n e2_target = section.index(e2[1])\n\n \"\"\" Given a pair of vertices, call angle_delta between them. \"\"\"\n f = lambda pair: utils.angle_delta(self.node_heading[pair[0]], self.node_heading[pair[1]])\n\n \"\"\" Map f onto each pair of adjacent vertices, and return the abs of the summed result. \"\"\"\n return abs(sum(map(f, zip(section[e1_source + 1:e2_target], section[e1_source + 2:e2_target + 1]))))",
"def _calc_side(self):\n\n # Calculation of the side of the car with respect to the trajectory\n next_index = self.index + 1\n\n if next_index == len(self.x_trajectory):\n next_index = self.index\n\n trajectory_vector = ((self.x_trajectory[next_index]\n - self.x_trajectory[self.index]),\n (self.y_trajectory[next_index]\n - self.y_trajectory[self.index]))\n\n x_diff = self.x - self.x_trajectory[self.index]\n y_diff = self.y - self.y_trajectory[self.index]\n\n ugv_vector = (x_diff, y_diff)\n\n vector_z = ugv_vector[0] * trajectory_vector[1] \\\n - ugv_vector[1] * trajectory_vector[0]\n\n if vector_z >= 0:\n\n # It is in the right side\n self.sign = 1\n\n else:\n\n # It is in the left side\n self.sign = -1\n\n return self.sign",
"def _geodesic_distance(mesh, face1, face2, edge):\n edge_center = (mesh.vertices[edge[0]] + mesh.vertices[edge[1]]) / 2\n return _list_length(_list_minus(edge_center, _face_center(mesh, face1))) + \\\n _list_length(_list_minus(edge_center, _face_center(mesh, face2)))",
"def comp_angle_magnet(self):\n Rbo = self.get_Rbo()\n W0 = self.comp_W0m()\n Harc = self.comp_H_arc()\n if self.is_outwards():\n return float(2 * arctan(W0 / (2 * (Rbo + self.H1 - Harc))))\n else:\n return float(2 * arctan(W0 / (2 * (Rbo - self.H1 - Harc))))\n\n # if self.W0_is_rad:\n # return self.W0\n # else: # Convert W0 from m to rad\n # Rbo = self.get_Rbo()\n # return float(2 * arcsin(self.W0 / (2 * Rbo)))",
"def angle(self) -> int:",
"def degrees(self):\n A = self.adjacency()\n A.data = np.ones(A.nnz)\n right = np.array(A.sum(1)).ravel()\n left = np.array(A.sum(0)).ravel()\n return right, left",
"def get_diameter(self) -> float:\r\n \r\n return (self.box[3] - self.box[1] + self.box[2] - self.box[0]) / 2",
"def _get_angle_and_dist_to_avoid(self, detection, direction='left'):\n OVERSHOOT_DIST = 0.20 # meters, distance to overshoot target by\n base_link_pose = self._transform_to_base_link(detection)\n radius = math.sqrt(base_link_pose.pose.position.x ** 2\n + base_link_pose.pose.position.y ** 2)\n tag_point = Point(x=base_link_pose.pose.position.x,\n y=base_link_pose.pose.position.y)\n\n path_edge_point = Point()\n # solve for x given the radius and y-coord of a point on a circle\n # Just set x to zero if radius is too small (if tag is too close to\n # the rover. Protects math.sqrt() from evaluating a negative number.\n if radius > Planner.PATHWAY_EDGE_DIST:\n path_edge_point.x = math.sqrt(radius ** 2\n - Planner.PATHWAY_EDGE_DIST ** 2)\n else:\n path_edge_point.x = 0\n path_edge_point.y = Planner.PATHWAY_EDGE_DIST\n if direction == 'left':\n path_edge_point.y *= -1\n\n return (-self._angle_between(tag_point, path_edge_point),\n path_edge_point.x + OVERSHOOT_DIST)",
"def calcDistance(self, left, right):\n\n return math.fabs(right-left)",
"def mrr_diagonal(geom: base.BaseGeometry) -> float:\n if len(geom) <= 1:\n return 0\n if len(geom) == 2:\n return geo.distance( # type: ignore\n lat1=geom[0].y, lon1=geom[0].x, lat2=geom[1].y, lon2=geom[1].x\n )\n mrr = LineString(geom).minimum_rotated_rectangle\n if isinstance(mrr, Point):\n return 0\n try: # in most cases, mrr is a Polygon\n x, y = mrr.exterior.coords.xy\n except AttributeError: # then it should be a LineString\n p0, p1 = mrr.coords[0], mrr.coords[-1]\n return geo.distance(p0[1], p0[0], p1[1], p1[0]) # type: ignore\n return geo.distance(y[0], x[0], y[2], x[2]) # type: ignore",
"def cable_length(self):\n skel = self.physical_space(copy=False)\n\n v1 = skel.vertices[skel.edges[:,0]]\n v2 = skel.vertices[skel.edges[:,1]]\n\n delta = (v2 - v1)\n delta *= delta\n dist = np.sum(delta, axis=1)\n dist = np.sqrt(dist)\n\n return np.sum(dist)"
] | [
"0.6792521",
"0.67125314",
"0.6442544",
"0.6154413",
"0.5941092",
"0.5854384",
"0.58526313",
"0.5838844",
"0.58166885",
"0.5809942",
"0.57968384",
"0.5790198",
"0.5733752",
"0.57309914",
"0.5662172",
"0.5659722",
"0.5644403",
"0.5641197",
"0.5620599",
"0.55770284",
"0.5569615",
"0.555166",
"0.5506792",
"0.5505262",
"0.54924667",
"0.5491714",
"0.54822713",
"0.54792947",
"0.5438697",
"0.53948087"
] | 0.6808179 | 0 |
Given a value break it down by the ilk of node (usb or pci), the vendor, and the device or product. | def parse_value(value: str) -> Tuple[str, str, str]:
value_pattern = r'^(usb|pci)\(([^:]{4}):([^:]{4})\)$'
matches = re.match(value_pattern, value)
assert matches, value
ilk, vendor, device = matches.group(1), matches.group(2), matches.group(3)
return ilk, vendor, device | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vendor_list():\n return ['nxos', 'eos', 'cumulus']",
"def get_vendor(mac):\r\n return p.get_manuf(mac) or 'None'",
"def bios_vendor(self):\n\t\treturn self.__info_dict['info']['bios_vendor']['value']",
"def device_catalog_path_value_converter(value):\n paths = []\n for path in value:\n pt = tuple(path.split(\"/\"))\n if pt and pt[-2]==\"devices\":\n pt = pt[:-2] + pt[-1:]\n paths.append(pt)\n return paths",
"def device(self, value):\n try:\n if isinstance(value, str):\n self._device_serial = value\n self._check_requirements()\n except ValueError:\n self._device_serial = None",
"def __init__(self, device=None, value=None):\n self.device = device\n self.value = value",
"def __init__(self, device=None, value=None):\n self.device = device\n self.value = value",
"def __init__(self, device=None, value=None):\n self.device = device\n self.value = value",
"def __init__(self, device=None, value=None):\n self.device = device\n self.value = value",
"def __init__(self, device=None, value=None):\n self.device = device\n self.value = value",
"def __init__(self, device=None, value=None):\n self.device = device\n self.value = value",
"def _get_vendor_product_id(device_dict):\n return f'{_get_vendor_id(device_dict)}/{_get_product_id(device_dict)}'",
"def get_vendor(self, result, host, mac):\n if \"vendor\" in result['scan'][host] and mac in result['scan'][host]['vendor']:\n return result['scan'][host]['vendor'][mac]\n else:\n return \"\"",
"def vendor(self) -> str:\n return self.properties[DBUS_ATTR_VENDOR]",
"def get_vendor(disk):\n\n if DISKINFO[\"/dev/\"+disk][\"Type\"] == \"Partition\":\n #We need to use the info from the host disk, which will be whatever came before.\n return DISKINFO[DISKINFO[\"/dev/\"+disk][\"HostDevice\"]][\"Vendor\"]\n\n else:\n try:\n vendor = PLIST[\"MediaName\"].split()[0]\n\n except KeyError:\n vendor = \"Unknown\"\n\n return vendor",
"def set_value_to_device(self, dev_name, value):\n dev = self.devices[dev_name]\n # If it is an analog channel\n if 'model' in dev.properties:\n if dev.properties['model'] == 'ni':\n daq = self.devices[dev.properties['connection']['device']]\n conditions = {\n 'dev': dev,\n 'value': value\n }\n daq.driver.analog_output_dc(conditions)\n else:\n dev.apply_values(value)",
"def _manufacturer(self, mac_address):\n # Initialize key variables\n manufacturer = ''\n\n # Process data\n mac_oui = mac_address[0:6]\n if mac_oui in self.oui:\n manufacturer = self.oui[mac_oui]\n\n # Return\n return manufacturer",
"def _get_vendor_id(device_dict):\n return device_dict['vendor_id'].split()[0].split('x')[-1]",
"def device_info(node):\n\n if \"cpu\" in node and \"total_mbufs\" in node[\"cpu\"]:\n total_mbufs = node[\"cpu\"][\"total_mbufs\"]\n if total_mbufs != 0:\n print(\"Total Number of Buffers: {}\".format(total_mbufs))\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n linkup_devs = vpp.get_link_up_devices()\n if len(linkup_devs):\n print(\"\\nDevices with link up (can not be used with VPP):\")\n vpp.show_vpp_devices(linkup_devs, show_header=False)\n # for dev in linkup_devs:\n # print (\" \" + dev)\n kernel_devs = vpp.get_kernel_devices()\n if len(kernel_devs):\n print(\"\\nDevices bound to kernel drivers:\")\n vpp.show_vpp_devices(kernel_devs, show_header=False)\n else:\n print(\"\\nNo devices bound to kernel drivers\")\n\n dpdk_devs = vpp.get_dpdk_devices()\n if len(dpdk_devs):\n print(\"\\nDevices bound to DPDK drivers:\")\n vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices bound to DPDK drivers\")\n\n other_devs = vpp.get_other_devices()\n if len(other_devs):\n print(\"\\nDevices not bound to Kernel or DPDK drivers:\")\n vpp.show_vpp_devices(other_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices not bound to Kernel or DPDK drivers\")\n\n vpputl = VPPUtil()\n interfaces = vpputl.get_hardware(node)\n if interfaces == {}:\n return\n\n print(\"\\nDevices in use by VPP:\")\n\n if len(interfaces.items()) < 2:\n print(\"None\")\n return\n\n print(\n \"{:30} {:4} {:4} {:7} {:4} {:7}\".format(\n \"Name\", \"Numa\", \"RXQs\", \"RXDescs\", \"TXQs\", \"TXDescs\"\n )\n )\n for intf in sorted(interfaces.items()):\n name = intf[0]\n value = intf[1]\n if name == \"local0\":\n continue\n numa = rx_qs = rx_ds = tx_qs = tx_ds = \"\"\n if \"numa\" in value:\n numa = int(value[\"numa\"])\n if \"rx queues\" in value:\n rx_qs = int(value[\"rx queues\"])\n if \"rx descs\" in value:\n rx_ds = int(value[\"rx descs\"])\n if \"tx queues\" in value:\n tx_qs = int(value[\"tx queues\"])\n if \"tx descs\" in value:\n tx_ds = int(value[\"tx descs\"])\n\n print(\n \"{:30} {:>4} {:>4} {:>7} {:>4} {:>7}\".format(\n name, numa, rx_qs, rx_ds, tx_qs, tx_ds\n )\n )",
"def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self._config[CONF_SERIAL])},\n \"name\": self._config[CONF_NAME],\n \"manufacturer\": \"Bosch\",\n }",
"def device(self):\n if (self.symbol.type == self.scanner.NAME):\n device_name = self.names.get_name_string(self.symbol.id)\n device_id = self.names.query(device_name)\n self.old_symbol = self.symbol # for reporting duplicate devices\n self.symbol = self.scanner.get_symbol()\n if (self.symbol.type == self.scanner.COLON):\n self.symbol = self.scanner.get_symbol()\n device_kind = self.logictype()\n\n if(self.symbol.type == self.scanner.COMMA):\n self.symbol = self.scanner.get_symbol()\n if(self.symbol.type == self.scanner.KEYWORD):\n if(self.symbol.id in [self.scanner.initial_ID,\n self.scanner.inputs_ID,\n self.scanner.period_ID, self.scanner.sequence_ID]):\n\n self.symbol = self.scanner.get_symbol()\n\n # initialise list to hold device property numbers\n device_property_list = []\n\n if(self.symbol.type == self.scanner.NUMBER):\n number_val = int(\n self.names.get_name_string(self.symbol.id))\n if device_kind == self.names.query(\"SIGGEN\"):\n if (number_val == 0 or number_val == 1):\n device_property_list.append(number_val)\n self.symbol = self.scanner.get_symbol()\n else:\n # Error: Siggen signal value has\n # to be '0' or '1'.\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END.\n self.error(\n self.SIGGEN_QUALIFIER, [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Not a SIGGEN\n device_property_list.append(number_val)\n self.symbol = self.scanner.get_symbol()\n\n # Extract sequence of numbers for SIGGEN\n while (self.symbol.type == self.scanner.COMMA):\n if device_kind == self.names.query(\n \"SIGGEN\"):\n self.symbol = self.scanner.get_symbol()\n if(self.symbol.type == (\n self.scanner.NUMBER)):\n number_val = int(\n self.names.get_name_string(\n self.symbol.id))\n if (number_val == 0 or (\n number_val == 1)):\n device_property_list.append(\n number_val)\n self.symbol = (\n self.scanner.get_symbol())\n else:\n # Error: Signal value has\n # to be '0' or '1'.\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END.\n list1 = [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY\n ]\n list2 = [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID\n ]\n self.error(\n self.SIGGEN_QUALIFIER,\n list1,\n list2)\n else:\n # Error: Needs to be an integer\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END\n list1 = [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY\n ]\n list2 = [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID\n ]\n self.error(\n self.INTEGER, list1, list2)\n else:\n # Error: Excess qualifiers\n # for non-SIGGEN\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END\n self.error(\n self.devices.EXCESS_QUALIFIER, [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Needs to be an integer\n # Stop symbs:';','}','CONNECT','MONITOR', END\n self.error(\n self.INTEGER, [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Parameter: 'initial',\n # inputs, period, sequence.\n # Stopping symbols: ';' , '}','CONNECT', 'MONITOR'\n # or 'END' KEYWORD '\n self.error(self.NEED_QUALIFIER,\n [self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Comma has to be followed by parameter\n # speficification\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR'\n # or 'END' KEYWORD\n self.error(self.NEED_QUALIFIER,\n [self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # There is no device property\n device_property_list = None\n\n if (self.symbol.type == self.scanner.SEMICOLON):\n self.symbol = self.scanner.get_symbol()\n else:\n # Error: Device definition needs to end in ';'\n # Stopping symbols: NAME, ';' , '}', 'CONNECT', 'MONITOR'\n # or 'END' KEYWORD\n self.error(self.NO_DEVICE_SEMICOLON,\n [self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.NAME,\n self.scanner.RIGHT_CURLY],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Device name has to be followed by ':'\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n self.NO_DEVICE_COLON, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Valid Device name required\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n self.DEVICE_NAME, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n\n # Check for device semantic errors\n if self.error_count == 0:\n # Only check for semantic errors if no errors so far\n err = self.devices.make_device(\n device_id, device_kind, device_property_list)\n if err != self.devices.NO_ERROR:\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n err, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n\n # Increment input pin counter by number of pins on new device\n if self.error_count == 0:\n device_name_string = self.names.get_name_string(device_kind)\n if device_name_string == \"DTYPE\":\n self.num_input_pin += 4\n elif device_name_string in [\"AND\", \"OR\", \"NAND\", \"NOR\"]:\n self.num_input_pin += device_property_list[0]\n elif device_name_string == \"XOR\":\n self.num_input_pin += 2",
"def getNodeUSB(self,node):\n data = self.connect('get','nodes/%s/scan/usb' % (node),None)\n return data",
"def vendor_name(self):\n return self._device.vendor",
"def parse_url(cls, urlstr: str, scheme: str,\n vdict: Dict[str, int],\n pdict: Dict[int, Dict[str, int]],\n default_vendor: int) -> Tuple[UsbDeviceDescriptor, int]:\n urlparts = urlsplit(urlstr)\n if scheme != urlparts.scheme:\n raise UsbToolsError(\"Invalid URL: %s\" % urlstr)\n try:\n if not urlparts.path:\n raise UsbToolsError('URL string is missing device port')\n path = urlparts.path.strip('/')\n if path == '?' or (not path and urlstr.endswith('?')):\n report_devices = True\n else:\n interface = to_int(path)\n report_devices = False\n except (IndexError, ValueError) as exc:\n raise UsbToolsError('Invalid device URL: %s' % urlstr) from exc\n candidates, idx = cls.enumerate_candidates(urlparts, vdict, pdict,\n default_vendor)\n if report_devices:\n UsbTools.show_devices(scheme, vdict, pdict, candidates)\n raise SystemExit(candidates and\n 'Please specify the USB device' or\n 'No USB-Serial device has been detected')\n if idx is None:\n if len(candidates) > 1:\n raise UsbToolsError(\"%d USB devices match URL '%s'\" %\n (len(candidates), urlstr))\n idx = 0\n try:\n desc, _ = candidates[idx]\n vendor, product = desc[:2]\n except IndexError:\n raise UsbToolsError('No USB device matches URL %s' %\n urlstr) from None\n if not vendor:\n cvendors = {candidate[0] for candidate in candidates}\n if len(cvendors) == 1:\n vendor = cvendors.pop()\n if vendor not in pdict:\n raise UsbToolsError('Vendor ID %s not supported' %\n (vendor and '0x%04x' % vendor))\n if not product:\n cproducts = {candidate[1] for candidate in candidates\n if candidate[0] == vendor}\n if len(cproducts) == 1:\n product = cproducts.pop()\n if product not in pdict[vendor].values():\n raise UsbToolsError('Product ID %s not supported' %\n (product and '0x%04x' % product))\n devdesc = UsbDeviceDescriptor(vendor, product, desc.bus, desc.address,\n desc.sn, idx, desc.description)\n return devdesc, interface",
"def device_info(devid: int = 0) -> str: # pragma: no cover\n numdev = jax.device_count()\n if devid >= numdev:\n raise RuntimeError(f\"Requested information for device {devid} but only {numdev} present.\")\n dev = jax.devices()[devid]\n if dev.platform == \"cpu\":\n info = \"CPU\"\n else:\n info = f\"{dev.platform.upper()} ({dev.device_kind})\"\n return info",
"def enumerate_candidates(cls, urlparts: SplitResult,\n vdict: Dict[str, int],\n pdict: Dict[int, Dict[str, int]],\n default_vendor: int) -> \\\n Tuple[List[Tuple[UsbDeviceDescriptor, int]], Optional[int]]:\n specifiers = urlparts.netloc.split(':')\n plcomps = specifiers + [''] * 2\n try:\n plcomps[0] = vdict.get(plcomps[0], plcomps[0])\n if plcomps[0]:\n vendor = to_int(plcomps[0])\n else:\n vendor = None\n product_ids = pdict.get(vendor, None)\n if not product_ids:\n product_ids = pdict[default_vendor]\n plcomps[1] = product_ids.get(plcomps[1], plcomps[1])\n if plcomps[1]:\n try:\n product = to_int(plcomps[1])\n except ValueError as exc:\n raise UsbToolsError('Product %s is not referenced' %\n plcomps[1]) from exc\n else:\n product = None\n except (IndexError, ValueError) as exc:\n raise UsbToolsError('Invalid device URL: %s' %\n urlunsplit(urlparts)) from exc\n sernum = None\n idx = None\n bus = None\n address = None\n locators = specifiers[2:]\n if len(locators) > 1:\n try:\n bus = int(locators[0], 16)\n address = int(locators[1], 16)\n except ValueError as exc:\n raise UsbToolsError('Invalid bus/address: %s' %\n ':'.join(locators)) from exc\n else:\n if locators and locators[0]:\n try:\n devidx = to_int(locators[0])\n if devidx > 255:\n raise ValueError()\n idx = devidx\n if idx:\n idx = devidx-1\n except ValueError:\n sernum = locators[0]\n candidates = []\n vendors = [vendor] if vendor else set(vdict.values())\n vps = set()\n for vid in vendors:\n products = pdict.get(vid, [])\n for pid in products:\n vps.add((vid, products[pid]))\n devices = cls.find_all(vps)\n if sernum:\n if sernum not in [dev.sn for dev, _ in devices]:\n raise UsbToolsError(\"No USB device with S/N %s\" % sernum)\n for desc, ifcount in devices:\n if vendor and vendor != desc.vid:\n continue\n if product and product != desc.pid:\n continue\n if sernum and sernum != desc.sn:\n continue\n if bus is not None:\n if bus != desc.bus or address != desc.address:\n continue\n candidates.append((desc, ifcount))\n return candidates, idx",
"def set_value(self, device_name, val):\n epics.caput(device_name, val)\n\n\t\t#mu = mu\n\t\t#sig = math.sqrt(abs(mu))\n\t\t#y = (float(x)-mu)/(sig)",
"def _vendor_request(self, direction, request, length_or_data=0, value=0, index=0, timeout=1000):\n return self.device.ctrl_transfer(\n direction | usb.TYPE_VENDOR | usb.RECIP_DEVICE,\n request, value, index, length_or_data, timeout)",
"def ConvertGpuToVendorName(gpu):\n if not gpu:\n return 'No GPU'\n elif '8086' in gpu:\n return 'Intel'\n elif '10de' in gpu:\n return 'NVIDIA'\n elif '1002' in gpu:\n return 'AMD'\n return gpu",
"def test_get_pci_device_by_moid(self):\n pass"
] | [
"0.57974625",
"0.5661134",
"0.56355417",
"0.55662423",
"0.5533631",
"0.5503334",
"0.5503334",
"0.5503334",
"0.5503334",
"0.5503334",
"0.5503334",
"0.5413959",
"0.53457385",
"0.5296801",
"0.52907497",
"0.52587706",
"0.52237445",
"0.52177995",
"0.52067405",
"0.517088",
"0.5161193",
"0.51514477",
"0.5135221",
"0.5128968",
"0.5128023",
"0.51059866",
"0.50786406",
"0.50738555",
"0.5057736",
"0.505613"
] | 0.6276907 | 0 |
I have some docs | def docs(): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def document(self):\n ...",
"def get_docs(self):\n return self.retrieve_docstring()",
"def get_docs(self):\n return self.retrieve_docstring()",
"def get_docs(self):\n return self.retrieve_docstring()",
"def docs(self):\n self._doc_info = DocumentationURL()\n self._doc_info.show()",
"def get_documentation(self, *args, **dargs):\n pass",
"def apiDocs():\n\treturn render_template('apiDocs.html')",
"def get_docs(self):\n return get_view_description(self.callback)",
"def documento():\r\n\tpass",
"def show_documentation(self):\n self.docs = documentation.Documentation()",
"def django_show_docs():\r\n app = wingapi.gApplication\r\n app.ExecuteCommand('show-document', section=\"howtos/django\")",
"def __call__(self, doc):\n return doc",
"def getDocsList(self):\n return self.docs_list",
"def getDoc(self):\r\n return self.__doc__",
"def do_docs(self, path):\n print(\"scaraping documentation\")\n for p in path.glob(\"**/*\"):\n if p.is_file():\n parts = p.relative_to(path).parts\n if parts[-1].endswith(\"rst\"):\n data = tsparse(p.read_bytes())\n blob = DocBlob()\n blob.arbitrary = data\n blob.content = {}\n\n blob.ordered_sections = []\n blob.item_file = None\n blob.item_line = None\n blob.item_type = None\n blob.aliases = []\n blob.example_section_data = Section()\n blob.see_also = []\n blob.signature = None\n blob.references = None\n blob.refs = []\n\n self.docs[parts] = json.dumps(blob.to_json(), indent=2)\n else:\n pass\n # data = p.read_bytes()",
"def merge_docs(self):",
"def init_doc(self):\n raise NotImplementedError()",
"def items(self):\n return self.docs.items()",
"def documents(self):\r\n return doc.Documents(self)",
"def test_doc():\n pass",
"def build_document(self):\n pass",
"def test_docstring(self):\n self.assertTrue(len(FileStorage.__doc__) > 1)\n self.assertTrue(len(FileStorage.all.__doc__) > 1)\n self.assertTrue(len(FileStorage.new.__doc__) > 1)\n self.assertTrue(len(FileStorage.save.__doc__) > 1)\n self.assertTrue(len(FileStorage.reload.__doc__) > 1)",
"def test_client_document_list(self):\n pass",
"def with_docs(self):\r\n self._configurations.append('javadoc')\r\n return self",
"def test_all_documents(self):",
"def test_doc1(self):\n assert models.review.__doc__ is not None",
"def fini_doc(self):\n raise NotImplementedError()",
"def test_method_docs(self):\n for func in dir(DBStorage):\n self.assertTrue(len(func.__doc__) > 0)",
"def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p",
"def has_doc() -> None:"
] | [
"0.7454927",
"0.7446354",
"0.7446354",
"0.7446354",
"0.73413706",
"0.7109946",
"0.71089756",
"0.7055562",
"0.70452785",
"0.69460195",
"0.68773216",
"0.6874397",
"0.6845154",
"0.6833291",
"0.67916155",
"0.66953266",
"0.6682665",
"0.66622543",
"0.66396433",
"0.66306525",
"0.65615904",
"0.6557985",
"0.65171283",
"0.6508592",
"0.64965737",
"0.64489305",
"0.6444214",
"0.6439048",
"0.64372736",
"0.6412439"
] | 0.87407255 | 0 |
Grava objetos em formato texto no arquivo de Indices. | def gravarArquivoIndices(indices):
arq = open("arquivoIndices.txt", "w")
for i in indices.indices:
linha = i.codigo + "," + str(i.indice) + "," + str(i.excluido) + "\n"
arq.write(linha)
arq.close()
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def obj_index(self) -> str:\n return str(self._data[\"index\"])",
"def WriteIndexContent(indexList, formatindex, fpindex):#{{{\n if formatindex == FORMAT_TEXT:\n numRecord = len(indexList[0])\n idList = indexList[0]\n v1 = indexList[1]\n v2 = indexList[2]\n v3 = indexList[3]\n for i in range(numRecord):\n print(idList[i], v1[i], v2[i], v3[i], file=fpindex)\n else:\n maxOffset = max(indexList[2])\n\n numRecord = len(indexList[0])\n\n idList = indexList[0]\n v1 = indexList[1]\n v3 = indexList[3]\n if maxOffset > LargeFileThresholdSize:\n v2 = indexList[2]\n else: #'I'\n v2 = array('I', [x for x in indexList[2]])\n\n dumpedliststr = '\\n'.join(s for s in idList)\n\n vI=array('I')\n vI.append(len(dumpedliststr))\n vI.tofile(fpindex)\n fpindex.write(dumpedliststr)\n\n vI=array('I')\n vI.append(numRecord)\n vI.tofile(fpindex)\n\n v1.tofile(fpindex)\n v2.tofile(fpindex)\n v3.tofile(fpindex)",
"def read_idx_2_label():\n with open('../Data/imagenet_class_index.json') as f:\n dictionary = json.load(f)\n return dictionary",
"def __getitem__(self, idx):\n\n text, label = self.data[idx]\n ids = self.get_ids(text)\n\n return {\"ids\": ids, \"label\": label}",
"def read_file_object(self, file_obj, file_format='FASTA'):\n if ( file_format.upper() == 'FASTA' ):\n read_func = read_fasta \n #elif ( file_format.upper() == 'COMPACT' ):\n # read_func = read_compact\n #elif ( file_format.upper() == 'COMPACT3' ):\n # read_func = read_compact3\n else:\n raise NotImplementedError(\"Unknown file format (%s) is not supported\" % file_format)\n self.colcount = 0\n for name, seq in read_func(file_obj):\n cseq, l = self.get_alignment_seq_object(seq)\n self[name] = cseq\n self.colcount = max(l, self.colcount)",
"def make_iob(txt, ents, etypes):\r\n index = 0\r\n for i in ents:\r\n start = txt.index(i, index) #get the start of the entity\r\n tmp1, tmp2 = txt[:start], txt[start:]\r\n tmp1 += \" eeeeeeeeeeeeeeeeeeee \"\r\n txt = ' '.join([tmp1, tmp2])\r\n index = start + len(i) + len(\" eeeeeeeeeeeeeeeeeeee \")\r\n \r\n line_tokens = word_tokenize(txt)#tokenize the text\r\n \r\n #get the starting positions of the entities\r\n starts = []\r\n try: #in order to handle the last case where list.index doesnt finds anything\r\n while line_tokens.index(\"eeeeeeeeeeeeeeeeeeee\") > -1:\r\n tmp = line_tokens.index(\"eeeeeeeeeeeeeeeeeeee\")\r\n starts.append(tmp)\r\n del line_tokens[tmp]\r\n except ValueError:\r\n pass\r\n \r\n line_iob = ['O'] * len(line_tokens)# the iob tags of the whole text\r\n \r\n for i in range(0, len(ents)):\r\n #tokenize the entities\r\n entity_tokens = word_tokenize(ents[i])\r\n tmp = 'I-'+etypes[i]\r\n entity_iob = [tmp] * len(entity_tokens)\r\n entity_iob[0] = \"B-\" + etypes[i]\r\n \r\n #make changes to the iob tags to match the entities\r\n for j in range(0, len(entity_iob)):\r\n line_iob[starts[i] + j] = entity_iob[j]\r\n \r\n #the format is: token IOB-etypes\r\n for i in range(0, len(line_tokens)):\r\n output.write(\"{}\\t{}\\n\".format(line_tokens[i], line_iob[i]))\r\n output.write('\\n')#new document\r",
"def _get_objects(self,label_fh):\n objects = []\n for line in label_fh.readlines():\n try:\n object = {}\n line = line.replace(u'\\ufeff', '')\n if line != '':\n x1, y1, x2, y2, x3, y3, x4, y4= [int(i) for i in line.split(',')[:-1]]\n p1 = (x1, y1)\n p2 = (x2, y2)\n p3 = (x3, y3)\n p4 = (x4, y4)\n object['polygon'] = [p1,p2,p3,p4]\n objects.append(object)\n except:\n pass\n return objects",
"def loadOBJModel(file_name):\n file_text = open(file_name)\n text = file_text.readlines()\n vertex = []\n normals = []\n uv = []\n faces_vertex = []\n faces_normal = []\n faces_uv = []\n for line in text:\n info = line.split(\" \")\n if info[0] == \"v\":\n vertex.append(\n (float(info[1]), float(info[2]) - 0.1, float(info[3])))\n elif info[0] == \"vn\":\n normals.append((float(info[1]), float(info[2]), float(info[3])))\n elif info[0] == \"vt\":\n uv.append((float(info[1]), float(info[2])))\n elif info[0] == \"f\":\n p1 = info[1].split(\"/\")\n p2 = info[2].split(\"/\")\n p3 = info[3].split(\"/\")\n faces_vertex.append((int(p1[0]), int(p2[0]), int(p3[0])))\n faces_uv.append((int(p1[1]), int(p2[1]), int(p3[1])))\n faces_normal.append((int(p1[2]), int(p2[2]), int(p3[2])))\n return vertex, normals, uv, faces_vertex, faces_normal, faces_uv",
"def ReadIndex_text(indexfile, isPrintWarning = False):#{{{\n# return (indexList, headerinfo, dbfileindexList)\n indexList = []\n idList = []\n v1 = array('B') # dbfile index\n v2 = array('L') # offset\n v3 = array('I') # block size\n apd1 = idList.append\n apd2 = v1.append\n apd3 = v2.append\n apd4 = v3.append\n indexFileHeaderText = []\n origdbname=\"\"\n origversion=\"\"\n origext=\"\"\n origprefix=\"\"\n try:\n\n hdl = mybase.ReadLineByBlock(indexfile)\n lines = hdl.readlines()\n while lines != None:\n for line in lines:\n if not line or line[0] == \"#\":\n continue\n strs = line.split()\n if strs[0] == \"DEF_DBNAME\":\n if len(strs)>=2:\n origdbname=strs[1]\n elif strs[0] == \"DEF_VERSION\":\n if len(strs)>=2:\n origversion=strs[1]\n elif strs[0] == \"DEF_EXTENSION\":\n if len(strs)>=2:\n origext=strs[1]\n elif strs[0] == \"DEF_PREFIX\":\n if len(strs)>=2:\n origprefix = strs[1]\n else:\n apd1(strs[0])\n apd2(int(strs[1]))\n apd3(int(strs[2]))\n apd4(int(strs[3]))\n lines = hdl.readlines()\n\n indexList.append(idList)\n indexList.append(v1)\n indexList.append(v2)\n indexList.append(v3)\n\n headerinfo = (origdbname, origversion, origext, origprefix)\n\n numRecord = len(idList)\n lastDBFileIndex = v1[numRecord-1]\n dbfileindexList = list(range(lastDBFileIndex+1))\n\n if isPrintWarning:\n if origversion == \"\":\n msg = \"{}: Warning! No version info in the index file {}\"\n print(msg.format(sys.argv[0],indexfile), file=sys.stderr)\n elif origversion != version:\n msg = \"{}: Warning! Version conflicts. \"\\\n \"Version of the index file {} ({}) \"\\\n \"!= version of the program ({})\"\n print(msg.format(sys.argv[0],indexfile,\n origversion, version), file=sys.stderr)\n return (indexList, headerinfo, dbfileindexList)\n except IOError:\n msg = \"Failed to read index file {} in function {}\"\n print(msg.format(indexfile, sys._getframe().f_code.co_name), file=sys.stderr)\n return (None, None, None)",
"def get_formatted_data(line, indices=None):\n\tfile_data = str.strip(line).split(' ')\n\tif indices is None:\n\t\tdata = list(range(len(file_data)))\n\telse:\n\t\tdata = list(indices)\n\t\t\n\tfor i, file_column in enumerate(data):\n\t\tif file_column is not None:\n\t\t\tdatum = file_data[file_column]\n\t\telse:\n\t\t\tdatum = ' '\n\t\tif '.' in datum:\n\t\t\ttry:\n\t\t\t\tdatum = float(datum)\n\t\t\texcept:\n\t\t\t\tpass\n\t\telse:\n\t\t\ttry:\n\t\t\t\tdatum = int(datum)\n\t\t\texcept:\n\t\t\t\tpass\n\t\tdata[i] = datum\n\treturn data",
"def load_annotation_at_index(self, index):\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n tree = ET.parse(filename)\n objs = tree.findall('object')\n if not self.cfg['use_diff']:\n # Exclude the samples labeled as difficult\n non_diff_objs = [\n obj for obj in objs if int(obj.find('difficult').text) == 0]\n # if len(non_diff_objs) != len(objs):\n # print 'Removed {} difficult objects'.format(\n # len(objs) - len(non_diff_objs))\n objs = non_diff_objs\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n #overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n #seg_areas = np.zeros((num_objs), dtype=np.float32)\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n\n\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n cls = self._class_to_num[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n #overlaps[ix, cls] = 1.0\n #seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n #overlaps = scipy.sparse.csr_matrix(overlaps)\n return {'boxes' : boxes,\n 'gt_classes': gt_classes}",
"def index_object(idxs=None):",
"def write_raw_text(self, path='.'):\n cells = self.get_cells()\n arrays = []\n for cell in cells:\n arrays.append(cell.data)\n array = np.concatenate(arrays)\n fn = os.path.join(path, self.label + '.txt')\n fmt = []\n p = re.compile('(\\w)(\\d+)')\n for key, value in self.datatype:\n m = p.search(value)\n if m:\n kind, size = m.groups()\n # strings\n if kind == 'S':\n add = '%{}c'.format(size)\n # integers\n elif kind in ['u', 'i']:\n add = '%d'\n else:\n add = '%.8e'\n else:\n add = '%.8e'\n fmt.append(add)\n np.savetxt(fn, array, fmt=fmt, delimiter='\\t')\n return",
"def __getInterProIndex(self, filePath):\n\n interProD = {}\n encodingD = {\"encoding\": \"ascii\"} if sys.version_info[0] < 3 else {}\n rowL = self.__mU.doImport(filePath, fmt=\"tdd\", rowFormat=\"list\", **encodingD)\n for row in rowL:\n try:\n interProId = row[0].strip().upper()\n interProType = row[1].strip()\n descr = row[2].strip()\n interProD[interProId] = {\"description\": descr, \"type\": interProType}\n except Exception:\n pass\n #\n return interProD",
"def parse_rec(json_dataset, index):\n info = voc_info(json_dataset)\n data_path = info['data_path']\n image_file = os.path.join(data_path, 'images', index + '.jpg')\n assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)\n\n height, width = cv2.imread(image_file).shape[:2]\n annopath = os.path.join(data_path, 'annotations', '{:s}.txt')\n filename = annopath.format(index)\n rotate = 0\n objects = []\n with open(filename) as f:\n line = f.readline()\n while line:\n parts = line.split()\n if parts[0] == 'rotate':\n rotate = int(parts[1])\n assert rotate == 0\n else:\n obj_struct = {'name': parts[0]}\n x1 = min(max(int(parts[1]), 0), width - 1)\n y1 = min(max(int(parts[2]), 0), height - 1)\n x2 = min(max(int(parts[3]), 0), width - 1)\n y2 = min(max(int(parts[4]), 0), height - 1)\n obj_struct['bbox'] = [x1, y1, x2, y2]\n obj_struct['truncated'] = int(parts[5])\n obj_struct['difficult'] = 0\n objects.append(obj_struct)\n line = f.readline()\n\n return objects",
"def format_as_index(indices):\r\n\r\n if not indices:\r\n return \"\"\r\n return \"[%s]\" % \"][\".join(repr(index) for index in indices)",
"def get_mapping():\n \n import pandas as pd\n data = pd.read_csv('/home/yuheng/Downloads/ADE20K_2016_07_26/objectInfo150.txt',sep='\\t',lineterminator='\\n') \n mapping = {}\n for i in range(150):\n line = data.loc[i]\n mapping[ int(line['Idx']) ] = line['Name']\n \n return mapping",
"def get_index_repr(self):\r\n return \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (self.trf_id,\r\n self.trf_period,\r\n self.trf_array_length,\r\n self.trf_array_gc,\r\n self.trf_pvar,\r\n self.trf_gi,\r\n self.trf_l_ind,\r\n self.trf_r_ind,\r\n self.trf_chr)",
"def indirectobject(self, index, io):\n if self.indices != '':\n self.indices += ' '\n self.indices += '%d %d' % (index, len(self.ios))\n self.ios += io\n self.objects.append(index)",
"def index_to_algebraic(pos: tuple[int, int]) -> str:\r\n return INDEX_TO_FILE[pos[1]] + INDEX_TO_RANK[pos[0]]",
"def IndexFileToPrefixInfo(index_fp):\n\n IX_FH = open(index_fp, \"r\")\n\n header_line = IX_FH.readline()\n\n c_line = \"placeholder\"\n\n # prefix is an important list that holds [[nLeading i, indexseq s, name s],...]\n # nLeading is number of n's before index \n prefix = []\n line_num = 0\n\n while c_line != \"\":\n c_line = IX_FH.readline().rstrip()\n line_num += 1\n\n line_split = c_line.split('\\t')\n\n if len(line_split) > 2:\n raise Exception(\"In indexfile, found a line that has more than \"\\\n + \"2 tsvs.\\n Filename: {} Line Number: {}\".format(\n index_fp, line_num))\n #Note name & index are in form H1, ATCACGAG\n name, index = line_split \n\n # What does this account for?\n if (re.search(r'name', name ,re.IGNORECASE)):\n continue\n\n nLeading = None\n indexseq = None\n\n match = re.search(r'^([nN]*)([ACGT]+)$',index)\n if not match:\n raise Exception(\"Invalid index sequence {}\".format(index))\n else:\n nLeading = len(match[0])\n indexseq = match[1]\n\n if (nLeading == None ) or (indexseq == None) or (name == ''):\n raise Exception(line)\n prefix.append([nLeading, indexseq, name])\n\n IX_FH.close()\n\n report_str = \"Read {} indices from {}\\n\".format(len(prefix),index_fp)\n prefixNames = [x[2] for x in prefix]\n\n \n return {\n \"report_str\": report_str,\n \"prefixNames\": prefixNames,\n \"prefix\": prefix\n }",
"def createindexes():\n index = [{}, {}, {}, {}]\n readcorpus(index)\n buildindex4(index[2], index[3])\n writeindextofile(index)\n return index",
"def _construct_index(self, row) -> str:\n chrom = row[\"CHROM\"]\n pos = row[\"POS\"]\n ref = row[\"REF\"]\n alt = row[\"ALT\"]\n\n return f\"{chrom}_{pos}_{ref}>{alt}\"",
"def __getitem__(self, index: int):\n path, label = self.paths[index], self.labels[index]\n data = self._read_input_file(path)\n data = self._apply_transform(data)\n\n return {\"data\": data, \"target\": label}",
"def dump(self):\n res = []\n #res.append(\"Submeshes: %d\" % len(self.submeshes))\n #res.append(\"IdxBuf: 0x%04X bytes\" % len(self.idx_buf))\n #res.append(\"PrimFmt: 0x%04X (%s)\" % (\n # self.prim_fmt_id, self.prim_fmt))\n #res.append(\"IdxType: 0x%02X (%s)\" % (\n # self.header['idx_type'], self.idx_fmt,\n #))\n #res.append(\"IdxCnt: %d\" % self.header['idx_cnt'])\n #res.append(\"VisGrp: %d\" % self.header['visibility_group'])\n #res.append(\"Unknown: 0x%08X 0x%08X 0x%08X\" % (\n # self.header['unk08'],\n # self.header['unk10'],\n # self.header['unk34'],\n #))\n #return '\\n'.join(res).replace('\\n', '\\n ')\n\n return \"%4d│%04X│%04X %-24s│%02X %s│%5d│%5d│%08X│%08X│%08X\" %(\n len(self.submeshes),\n len(self.idx_buf),\n self.prim_fmt_id, self.prim_fmt,\n self.header['idx_type'], self.idx_fmt,\n self.header['idx_cnt'],\n self.header['visibility_group'],\n self.header['unk08'], self.header['unk10'],\n self.header['unk34'],\n )",
"def index(self):\n path = self.path.format('index')\n \n with open(path, 'r', newline='') as file:\n l = list(csv.reader(file))\n \n index = [v for _ in l for v in _]\n index = dict((v, i) for (i, v) in enumerate(index))\n \n return index",
"def to_iob(self) -> List[List[List[str]]]:\n iobs: List[List[List[str]]] = [\n [[\"O\"] * (len(tokens) - 1) for _ in self.attributes]\n for tokens in self.word_alignments\n ]\n for ne in self.nes:\n start_line: int = ne.token_offset.start.line_id\n start_offset: int = ne.token_offset.start.offset\n end_line: int = ne.token_offset.end.line_id\n end_offset: int = ne.token_offset.end.offset\n\n # 文を跨いだentityは除外\n if start_line != end_line:\n continue\n\n # 正解となるsubwordを含むwordまでタグ付\n attr_idx: int = self.attr2idx[ne.attribute]\n ne_start: int = self.sub2word[start_line][start_offset]\n ne_end: int = self.sub2word[end_line][end_offset - 1] + 1\n\n for idx in range(ne_start, ne_end):\n iobs[start_line][attr_idx][idx] = \"B\" if idx == ne_start else \"I\"\n\n return iobs",
"def sentences_2_idxs(self):\n fo_pos = open(self.config.parsed_train_file_pos, 'w')\n fo_neg = open(self.config.parsed_train_file_neg, 'w')\n self.load_dicts()\n labels = pd.read_csv(self.config.train_file, usecols=[\"target\"])\n\n labels = list(labels.values[:, 0])\n questions = pd.read_csv(self.config.train_file,\n usecols=[\"question_text\"], index_col=False)\n unk_idx = self.word2idx.get(self.config.unknown_token)\n\n for label, quest in zip(labels, questions.question_text):\n tokens = preprocess_text(quest)\n\n if self.config.include_unknown:\n idxs = [self.word2idx.get(token, unk_idx) for token in\n tokens]\n else:\n idxs = [self.word2idx.get(token) for token in tokens]\n idxs = [idx for idx in idxs if idx]\n out_line = (str(\" \".join(str(num) for num in idxs)) + \"\\n\")\n if label == 1:\n fo_pos.write(out_line)\n else:\n fo_neg.write(out_line)",
"def WriteIndexHeader(indexFileHeaderText, formatindex, fpindex):#{{{\n if formatindex == FORMAT_TEXT:\n for s in indexFileHeaderText:\n print(s, file=fpindex)\n else:\n dumpedtext='\\n'.join(s for s in indexFileHeaderText)\n vI = array('I')\n vI.append(len(dumpedtext))\n vI.tofile(fpindex)\n fpindex.write(dumpedtext)",
"def OBJ(filename, pos=(0,0,0),\r\n rotation=(0,0,0), colorize=(1,1,1,1)):\r\n view.require_init()\r\n svertices = []\r\n snormals = []\r\n stexcoords = []\r\n sfaces = []\r\n\r\n material = None\r\n smtl = None\r\n for line in open(filename, \"r\"):\r\n if line.startswith('#'): continue\r\n values = line.split()\r\n if not values: continue\r\n if values[0] == 'v':\r\n v = list(map(float, values[1:4]))\r\n svertices.append(v)\r\n elif values[0] == 'vn':\r\n v = list(map(float, values[1:4]))\r\n snormals.append(v)\r\n elif values[0] == 'vt':\r\n stexcoords.append(list(map(float, values[1:3])))\r\n elif values[0] in ('usemtl', 'usemat'):\r\n material = values[1]\r\n elif values[0] == 'mtllib':\r\n path = os.path.split(filename)[0]\r\n smtl = {}\r\n mtl = None\r\n for line in open(os.path.join(path, values[1]), \"r\"):\r\n if line.startswith('#'): continue\r\n values = line.split()\r\n if not values: continue\r\n if values[0] == 'newmtl':\r\n smtl[values[1]] = None\r\n mtl = values[1]\r\n elif mtl is None:\r\n raise ValueError(\"mtl file doesn't start with newmtl stmt\")\r\n elif values[0] == 'map_Kd':\r\n tex = data.Texture(os.path.join(path, values[1]))\r\n smtl[mtl] = tex\r\n elif values[0]==\"Kd\":\r\n tex = data.BlankTexture(color=list(map(float, values[1:])))\r\n smtl[mtl] = tex\r\n elif values[0] == 'f':\r\n face = []\r\n texcoords = []\r\n norms = []\r\n for v in values[1:]:\r\n w = v.split('/')\r\n face.append(int(w[0]))\r\n if len(w) >= 2 and len(w[1]) > 0:\r\n texcoords.append(int(w[1]))\r\n else:\r\n texcoords.append(0)\r\n if len(w) >= 3 and len(w[2]) > 0:\r\n norms.append(int(w[2]))\r\n else:\r\n norms.append(0)\r\n sfaces.append((face, norms, texcoords, material))\r\n\r\n\r\n faces_ordered_by_material = {}\r\n for face in sfaces:\r\n v, n, t, m = face\r\n if m in faces_ordered_by_material:\r\n faces_ordered_by_material[m].append(face)\r\n else:\r\n faces_ordered_by_material[m] = [face]\r\n\r\n lists = []\r\n for i in faces_ordered_by_material:\r\n sfaces = faces_ordered_by_material[i]\r\n\r\n material = smtl[i]\r\n\r\n gl_list = data.DisplayList()\r\n gl_list.begin()\r\n current_tex = None\r\n for face in sfaces:\r\n vertices, normals, texture_coords, _m = face\r\n glBegin(GL_POLYGON)\r\n for i in range(len(vertices)):\r\n if normals[i] > 0:\r\n glNormal3fv(snormals[normals[i] - 1])\r\n if texture_coords[i] > 0:\r\n glTexCoord2fv(stexcoords[texture_coords[i] - 1])\r\n glVertex3fv(svertices[vertices[i] - 1])\r\n glEnd()\r\n gl_list.end()\r\n\r\n lists.append([gl_list, material])\r\n\r\n verts = []\r\n for i in sfaces:\r\n for x in i[0]:\r\n verts.append(svertices[x-1])\r\n\r\n return BasicMesh(lists, pos, rotation, verts, 1, colorize)"
] | [
"0.56216776",
"0.5584734",
"0.55089396",
"0.5442182",
"0.53701806",
"0.53672534",
"0.5365042",
"0.5347537",
"0.53458637",
"0.5334542",
"0.5304593",
"0.52780014",
"0.52368265",
"0.52331924",
"0.5230119",
"0.52129966",
"0.51751065",
"0.51676047",
"0.5156888",
"0.5112448",
"0.510058",
"0.5071025",
"0.50695306",
"0.5068904",
"0.50651175",
"0.5054637",
"0.505233",
"0.5043695",
"0.5034985",
"0.5022904"
] | 0.6343497 | 0 |
Return all row and column groups. | def get_regular_groups(self, grid, min=3):
row_groups = self._get_row_groups(grid.grid, models.patterns.RowPattern, min)
col_groups = self._get_row_groups(grid.grid.T, models.patterns.ColumnPattern, min)
return row_groups + col_groups | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def groups(self, *columns):\n # TODO: This really needs to just use Pandas.MultiIndex, stack(),\n # and pivot(). I just need to rework the FactorExprNode stuff\n # to produce a MultiIndex; then, this DataCube can just pass\n # in self._expr.\n raise NotImplementedError",
"def _get_groupings(dist_matrix_header, dist_matrix, groups, within=True,\r\n suppress_symmetry_and_hollowness_check=False):\r\n # Note: Much of this code is taken from Jeremy Widmann's\r\n # distances_by_groups() function, part of make_distance_histograms.py from QIIME 1.8.0.\r\n if not suppress_symmetry_and_hollowness_check:\r\n if not is_symmetric_and_hollow(dist_matrix):\r\n raise ValueError(\"The distance matrix must be symmetric and \"\r\n \"hollow.\")\r\n result = []\r\n group_items = groups.items()\r\n\r\n for i, (row_group, row_ids) in enumerate(group_items):\r\n row_indices = _get_indices(dist_matrix_header, row_ids)\r\n if within:\r\n # Handle the case where indices are the same so we need to omit\r\n # the diagonal.\r\n block = dist_matrix[row_indices][:, row_indices]\r\n\r\n size = len(row_indices)\r\n indices = []\r\n for i in range(size):\r\n for j in range(i, size):\r\n if i != j:\r\n indices.append(block[i][j])\r\n if indices:\r\n result.append((row_group, row_group, indices))\r\n else:\r\n # Handle the case where indices are separate: just return blocks.\r\n for j in range(i + 1, len(groups)):\r\n col_group, col_ids = group_items[j]\r\n col_indices = _get_indices(dist_matrix_header, col_ids)\r\n vals = dist_matrix[row_indices][:, col_indices]\r\n\r\n # Flatten the array into a single-level list.\r\n vals = map(None, vals.flat)\r\n if vals:\r\n result.append((row_group, col_group, vals))\r\n return result",
"def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()",
"def _get_row_groups(self, array, pattern, min):\n groups = []\n\n for row, col in zip(array, xrange(len(array))):\n start = 0\n\n while start + min <= len(row):\n size = 1\n orb_type = type(row[start])\n\n for cell in xrange(start + 1, len(row)):\n if orb_type != type(row[cell]):\n break\n size += 1\n start += 1\n\n if size >= min:\n groups.append(pattern(\n orb_type, size, (col, start - size + 1)\n ))\n\n start += 1\n\n return groups",
"def cells(self):\n return ((row, col) for row in self.rows for col in self.cols)",
"def _get_md_row_groups(pieces):\n row_groups = []\n for piece in pieces:\n for rg in range(piece.get_metadata().num_row_groups):\n row_group = piece.get_metadata().row_group(rg)\n for c in range(row_group.num_columns):\n if not row_group.column(c).statistics:\n return []\n row_groups.append(row_group)\n return row_groups",
"def row_group_limits():\r\n from pymatgen import Element, periodic_table\r\n \r\n # Get all available elements in periodic table.\r\n rs = [e.row for e in periodic_table.Element]\r\n gs = [e.group for e in periodic_table.Element]\r\n \r\n return (max(rs), max(gs))",
"def _get_groups(X, y):\n if SK18:\n X, y = _indexable(X, y)\n return X, y, None",
"def get_groups_from_project_multiple(project_ID, characteristics_groups, return_all=False, return_matrix=False):\n needed_characteristic = [\n 'cell type',\n 'developmental stage',\n 'inferred cell type - ontology labels'\n ]\n \n # Read the metadata file using the API\n metadata, matrix, gene_names, cell_names = read_files(project_ID)\n\n # If there is not metadata for this project, return empty lists\n if metadata is None and return_matrix:\n return [], [], None, None\n elif metadata is None:\n return [], []\n\n metadata = process_metadata(metadata, cell_names)\n metadata_cells = len(metadata)\n number_genes = len(gene_names)\n \n rows = []\n combinations_subgroups = []\n \n for characteristics in characteristics_groups:\n # Initialitation of parameters\n subgroups = init_subgroups(metadata)\n project_characteristics = metadata.columns\n used_characteristics = []\n\n # Start the subgroup generation using the characteristics\n for characteristic in characteristics:\n # If the characteristic is not in the project, we skip it\n if characteristic not in project_characteristics:\n continue\n\n # For each subgroup created, divide it using the current characteristic\n subgroups_aux = []\n for subgroup in subgroups:\n subgroup_aux = get_subgroups(subgroup, characteristic)\n\n subgroups_aux = subgroups_aux + subgroup_aux\n\n # Check if we have lost cells\n cells_aux = sum([len(x['dataframe']) for x in subgroups_aux])\n if cells_aux < metadata_cells and characteristic not in needed_characteristic:\n continue \n \n # Update parameters\n used_characteristics = used_characteristics + [characteristic]\n subgroups = subgroups_aux\n\n # If there are no subgroups left, stop\n if not subgroups:\n break\n\n row = create_row(project_ID, subgroups, used_characteristics, metadata_cells, number_genes)\n\n # If the combination isnt repeated, save it\n if row not in rows:\n rows.append(row)\n combinations_subgroups.append(subgroups)\n\n # If all the combinations are needed\n if return_all:\n if return_matrix: # If matrix has to be returned\n return rows, combinations_subgroups, matrix, gene_names\n \n return rows, combinations_subgroups\n \n # Get best combination\n row, index = best_subgroup_combination(rows)\n subgroups = combinations_subgroups[index]\n\n if return_matrix: # If matrix has to be returned\n return row, subgroups, matrix, gene_names \n \n return row, subgroups",
"def get_groups(self):\n return [self.primary_group] + list(self.secondary_groups)",
"def getGroups(self):\n return [g[0] for g in grp.getgrall()]",
"def iter_groups(self):\n\t\treturn iter(self._groups)",
"def rows(self):\n for row in range(self.min_row, self.max_row+1):\n yield tuple('%s%d' % (get_column_letter(col), row)\n for col in range(self.min_col, self.max_col+1))",
"def grids(self):\n x = self.xvalues\n if self.ndim == 1:\n return x\n if self.ndim == 2:\n return x[None, :], x[:, None]\n if self.ndim == 3:\n return x[None, :, None], x[:, None, None], x[None, None, :]",
"def get_all_groups(self):\n return self.groups + ['all']",
"def column_groups(self) -> pulumi.Output[Optional[Sequence['outputs.DataSetColumnGroup']]]:\n return pulumi.get(self, \"column_groups\")",
"def all_groups(self):\n return self._all_groups",
"def __iter__(self):\n for g, xs in self._groups.items():\n dtype = dt.Struct(self._item_fields)\n df = ta.Column(dtype).append(\n tuple(\n tuple(\n self._parent._data.child_at(\n self._parent._data.type().get_child_idx(f.name)\n )[x]\n for f in self._item_fields\n )\n for x in xs\n )\n )\n\n yield g, df",
"def Group(self) -> _n_5_t_0:",
"def Group(self) -> _n_5_t_0:",
"def _iter_groups(self, df, y=None):\n groups = df.groupby(self.groupby).indices\n for key, sub_idx in groups.items():\n sub_df = df.iloc[sub_idx]\n if y is not None:\n # y is either a numpy array or a pd.Series so index accordingly\n sub_y = y.iloc[sub_idx] if type(y) is pd.Series else y[sub_idx]\n else:\n sub_y = None\n yield key, sub_df, sub_y",
"def get_group_names(self):\n return [self.frame.columns[i] for i in self.group_cols]",
"def calc_group(self, row, col):\n return ((row // 3) * 3 + (col // 3))",
"def groups(self):\n return []",
"def get_grid(self):\n self.fullws = []\n for row in self.word_search_grid:\n rowdata = []\n for column in row:\n rowdata += [column.entry.get()]\n self.fullws += [rowdata]\n self.logic.set_grid(self.fullws)",
"def _local_groupby(df_rows, axis=0):\n concat_df = pd.concat(df_rows, axis=axis)\n return concat_df.groupby(concat_df.index)",
"def get_variable_groups(all_inputs):\n row_length = len(all_inputs[0])\n for single_input in all_inputs[1:]:\n if len(single_input) != row_length:\n raise ValueError(\n \"Please make sure the length is the same if you want to input multiple values when the type of variables is t_array or t_mapping\")\n\n final_groups = list()\n row_length = len(all_inputs[0])\n col_length = len(all_inputs)\n for i in range(1, row_length):\n temp_list = list()\n for j in range(col_length):\n temp_list.append((all_inputs[j][0], all_inputs[j][i]))\n final_groups.append(temp_list)\n return final_groups",
"def read_row_group_arrays(file, rg, columns, categories, schema_helper, cats,\n selfmade=False, assign=None):\n out = assign\n maps = {}\n\n for column in rg.columns:\n if (_is_list_like(schema_helper, column) or\n _is_map_like(schema_helper, column)):\n name = \".\".join(column.meta_data.path_in_schema[:-2])\n else:\n name = \".\".join(column.meta_data.path_in_schema)\n if name not in columns:\n continue\n\n read_col(column, schema_helper, file, use_cat=name+'-catdef' in out,\n selfmade=selfmade, assign=out[name],\n catdef=out.get(name+'-catdef', None))\n\n if _is_map_like(schema_helper, column):\n if name not in maps:\n maps[name] = out[name].copy()\n else:\n if column.meta_data.path_in_schema[0] == 'key':\n key, value = out[name], maps[name]\n else:\n value, key = out[name], maps[name]\n out[name][:] = [dict(zip(k, v)) if k is not None else None\n for k, v in zip(key, value)]",
"def get_groups(self):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" ORDER BY $groupname_field$\",{'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: get_groups: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_groupname_field]",
"def _possible_grids(self, num_windows):\n if num_windows < 2:\n end = 2\n else:\n end = num_windows // 2 + 1\n for rows in range(1, end):\n cols = int(math.ceil(num_windows / rows))\n yield (rows, cols, ROWCOL)\n if rows != cols:\n # also want the reverse test\n yield (cols, rows, COLROW)"
] | [
"0.61957633",
"0.6163433",
"0.6040524",
"0.5894949",
"0.58843005",
"0.58239186",
"0.58054775",
"0.5755542",
"0.56944263",
"0.56619304",
"0.56394035",
"0.56309",
"0.56161165",
"0.56100893",
"0.55997294",
"0.55897653",
"0.5586745",
"0.5579677",
"0.55577207",
"0.55577207",
"0.5531093",
"0.5509804",
"0.54825634",
"0.5482236",
"0.5477484",
"0.54531723",
"0.544685",
"0.5445219",
"0.5429757",
"0.5429742"
] | 0.68432385 | 0 |
Delete acl in a secret. | def delete_acls_for_secret(cls, secret, session=None):
session = cls.get_session(session=session)
for entity in secret.secret_acls:
entity.delete(session=session) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_acls_for_secret_model(cls, secret, session=None):\n cls.db_repo.delete_acls_for_secret(secret, session)",
"def delete_acl(self, sg):\n self.security_group_driver.delete_acl(sg)",
"def delete_secret_link(link_id):\n\n Secret_Link.objects.filter(link_id=link_id).delete()",
"def delete_bucket_acl(self, bucket, user):\n msg = \"delete_bucket_acl not implemented\"\n raise NotImplementedError(msg)",
"def DeleteAclSample():\n client = CreateClient()\n doc = gdata.docs.data.Resource(type='document', title='My Sample Doc')\n doc = client.CreateResource(doc)\n acl_entry = gdata.docs.data.AclEntry(\n scope=gdata.acl.data.AclScope(value='[email protected]', type='user'),\n role=gdata.acl.data.AclRole(value='reader'),\n )\n acl_entry = client.AddAclEntry(doc, acl_entry)\n client.DeleteAclEntry(acl_entry)",
"def _DeleteAclRule(self, entry):\n\n self.cal_client.Delete(entry.GetEditLink().href)",
"def delete(key, **kwargs):\n cluster_call(\n \"secret_delete\",\n key=key,\n **kwargs,\n confirm=f\"Delete secret {key}\",\n prefix=f\"Deleting secret {key}...\",\n postfix=\"deleted.\",\n )",
"def test_vault_delete_authorization_for_vault_section(self):\n pass",
"def test_delete_acl(self, env):\n # Create ACL Expressions\n self.suite_logger.debug(\"Create ACL Expressions\")\n expressions = [(1, 'DstMac', 'FF:FF:FF:FF:FF:FF', '00:00:00:01:01:01'),\n (2, 'SrcMac', 'FF:FF:FF:FF:FF:FF', '00:00:00:02:02:02')]\n env.switch[1].ui.create_acl(expressions=expressions)\n # Verify ACL Expression\n expressions_table = env.switch[1].ui.get_table_acl(\"ACLExpressions\")\n # Verify first expression has been added\n expr_1 = {\"expressionId\": expressions[0][0],\n \"field\": expressions[0][1],\n \"mask\": expressions[0][2],\n \"data\": expressions[0][3]\n }\n assert expr_1 in expressions_table, \\\n \"Expression {0} was not added\".format(expressions[0])\n # Verify second expression has been added\n expr_2 = {\"expressionId\": expressions[1][0],\n \"field\": expressions[1][1],\n \"mask\": expressions[1][2],\n \"data\": expressions[1][3]\n }\n assert expr_2 in expressions_table,\\\n \"Expression {0} was not added\".format(expressions[1])\n # Delete Expression\n self.suite_logger.debug(\"Delete ACL Expression\")\n env.switch[1].ui.delete_acl(expression_ids=[(2, 'SrcMac'), ])\n # Verify Expression has been deleted\n expressions_table = env.switch[1].ui.get_table_acl(\"ACLExpressions\")\n assert expr_2 not in expressions_table, \\\n \"Expression {0} was not deleted\".format(expressions[1])\n\n # Create ACL Actions\n self.suite_logger.debug(\"Create ACL Actions\")\n actions = [(1, 'Drop', ''),\n (2, 'Count', '')]\n env.switch[1].ui.create_acl(actions=actions)\n # Verify ACL Action\n actions_table = env.switch[1].ui.get_table_acl(\"ACLActions\")\n # Verify first action has been added\n act_1 = {\"actionId\": actions[0][0],\n \"action\": actions[0][1],\n \"param\": actions[0][2]\n }\n assert act_1 in actions_table, \"Action {0} was not added\".format(actions[0])\n # Verify second action has been added\n act_2 = {\"actionId\": actions[1][0],\n \"action\": actions[1][1],\n \"param\": actions[1][2]\n }\n assert act_2 in actions_table, \"Action {0} was not added\".format(actions[1])\n # Delete Action\n self.suite_logger.debug(\"Delete ACL Action\")\n env.switch[1].ui.delete_acl(action_ids=[(2, 'Count'), ])\n # Verify Action has been deleted\n actions_table = env.switch[1].ui.get_table_acl(\"ACLActions\")\n assert act_2 not in actions_table, \"Action {0} was not deleted\".format(actions[1])\n\n # Create ACL Rule\n self.suite_logger.debug(\"Create ACL Rule\")\n rules = [(1, 1, 1, 'Ingress', 'Enabled', 0), ]\n env.switch[1].ui.create_acl(ports=[1, ], rules=rules)\n # Verify ACL Rule has been added\n rules_table = env.switch[1].ui.get_table_acl(\"ACLRules\")\n rule = {\"ruleId\": rules[0][0],\n \"expressionId\": rules[0][1],\n \"actionId\": rules[0][2],\n \"stage\": rules[0][3],\n \"enabled\": rules[0][4],\n \"priority\": rules[0][5]\n }\n assert rule in rules_table, \"Rule {0} was not added\".format(rules[0])\n # Delete Rule\n self.suite_logger.debug(\"Delete ACL Rule\")\n env.switch[1].ui.delete_acl(ports=[1, ], rule_ids=[1, ])\n # Verify Rule has been deleted\n rules_table = env.switch[1].ui.get_table_acl(\"ACLRules\")\n assert rule not in rules_table, \"Rule {0} was not deleted\".format(rules[0])",
"def delete_acl_rule(self, sgr):\n self.security_group_driver.delete_acl_rule(sgr)",
"def delete_access_list(self, loadbalancer):\n return loadbalancer.delete_access_list()",
"def delete_remote_access_session(arn=None):\n pass",
"def delete_vlan_acl(self, vlan, acl):\n raise NotImplementedError # pragma: no cover",
"def delete(self, policy_name):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n # Actually run vault\n logging.info(\"Deleting the policy: %s\", address)\n self.vault.requests_request(\"DELETE\", address, headers=self.vault.token_header)",
"def delete_port_acl(self, port, acl):\n raise NotImplementedError # pragma: no cover",
"def rbac_policy_delete(request, policy_id):\n neutronclient(request).delete_rbac_policy(policy_id)",
"def delete_secret_request(self, vault_name: str, secret_name: str) -> dict[str, Any]:\n url = f'https://{vault_name}{self.azure_cloud.suffixes.keyvault_dns}/secrets/{secret_name}'\n response = self.http_request(\n 'DELETE', full_url=url, resource=self.get_vault_resource())\n return response",
"def delete_bucket_policy(Bucket=None):\n pass",
"def delete_bucketlist():\n pass",
"def delete_secret_command(client: KeyVaultClient, args: dict[str, Any]) -> CommandResults:\n vault_name = args['vault_name']\n secret_name = args['secret_name']\n\n response = client.delete_secret_request(vault_name, secret_name)\n\n outputs = copy.deepcopy(response)\n outputs['deletedDate'] = convert_timestamp_to_readable_date(\n outputs['deletedDate'])\n outputs['scheduledPurgeDate'] = convert_timestamp_to_readable_date(\n outputs['scheduledPurgeDate'])\n\n readable_response = copy.deepcopy(outputs)\n outputs['attributes'] = convert_time_attributes_to_iso(outputs['attributes'])\n outputs[VAULT_NAME_CONTEXT_FIELD] = vault_name\n\n readable_response['secretId'] = readable_response.pop('id')\n readable_output = tableToMarkdown(f'Delete {secret_name}',\n readable_response,\n ['secretId', 'recoveryId', 'deletedDate',\n 'scheduledPurgeDate'], removeNull=True,\n headerTransform=pascalToSpace)\n command_results = CommandResults(\n outputs_prefix='AzureKeyVault.Secret',\n outputs_key_field='recoveryId',\n outputs=outputs,\n raw_response=response,\n readable_output=readable_output,\n ignore_auto_extract=True\n )\n\n return command_results",
"def delete_access_list(self):\n return self.manager.delete_access_list(self)",
"def post_access_control_list_delete(self, resource_id, resource_dict):\n pass",
"def deleteSecret(self, clientIP, not_before):\n\n return self._secret_table.delete_item(ip_address=clientIP,not_before=not_before)",
"def pre_access_control_list_delete(self, resource_id):\n pass",
"def delete_permissions(queue_url, label):\n client = boto3.client('sqs')\n try:\n client.remove_permission(QueueUrl=queue_url, Label=label)\n except ClientError as e:\n if e.response['Error']['Code'] != 'InvalidParameterValue':\n raise\n # We are failing silently since the label, which we would like to delete, does not exist",
"def delete_access_list(self, loadbalancer):\n uri = \"/loadbalancers/%s/accesslist\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_delete(uri)\n return body",
"def delete(ctx: CLIContext, access_key):\n with Session() as session:\n try:\n data = session.KeyPair.delete(access_key)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='deletion',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='deletion',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n extra_info={\n 'access_key': access_key,\n },\n )",
"def test_remove_authz_wrong(self):\n self.test_add_authz()\n self.app.delete(\"/config/authorize?operation=config\", status=400)\n self.app.delete(\"/config/authorize?dn=/DN=a.test.user\", status=204)",
"def main_role_delete(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n id_ = opts[\"id\"]\n client.delete_role(opts[\"formation\"], id_)\n logger.info(f\"Deleted role with id=\\\"{id_}\\\"\")",
"def remove_access(acl, list_to_edit):\n post_key = '%s_remove_' % list_to_edit\n removal_keys = [k for k in request.POST.keys() if k.startswith(post_key)]\n for key in removal_keys:\n model_type = models.UserGroup\n if list_to_edit.startswith('user'):\n model_type = models.UserProfile\n key_id = int(key.replace(post_key, ''))\n datastore_object = model_type.get_by_id(key_id)\n acl.__getattribute__(list_to_edit).remove(datastore_object.key())"
] | [
"0.7304661",
"0.7133021",
"0.6473207",
"0.6455759",
"0.6440277",
"0.6435884",
"0.64185977",
"0.6405398",
"0.63174623",
"0.6266833",
"0.6206975",
"0.6191693",
"0.61805665",
"0.6168148",
"0.6163188",
"0.60189515",
"0.59845203",
"0.5972625",
"0.59346336",
"0.5915101",
"0.59063804",
"0.5896104",
"0.5870325",
"0.5860186",
"0.5825764",
"0.58130634",
"0.5795946",
"0.57953167",
"0.5767018",
"0.5766786"
] | 0.72501093 | 1 |
Updates a torch model so that input minibatches are parallelized across the batch dimension to utilise multiple gpus. If model parallel is set to True and execution is in test mode, then model is partitioned to perform full volume inference. This assumes the model has been created, that the optimizer has not yet been created, and the the model has not been adjusted twice. This method should not be called externally. Use instead adjust_model_for_gpus or adjust_mean_teacher_model_for_gpus | def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase,
model_execution_mode: ModelExecutionMode) -> DeviceAwareModule:
if config.use_gpu:
model = model.cuda()
logging.info("Adjusting the model to use mixed precision training.")
# If model parallel is set to True, then partition the network across all available gpus.
if config.use_model_parallel:
devices = config.get_cuda_devices()
assert devices is not None # for mypy
model.partition_model(devices=devices) # type: ignore
else:
logging.info("Making no adjustments to the model because no GPU was found.")
# Update model related config attributes (After Model Parallel Activated)
config.adjust_after_mixed_precision_and_parallel(model)
# DataParallel enables running the model with multiple gpus by splitting samples across GPUs
# If the model is used in training mode, data parallel is activated by default.
# Similarly, if model parallel is not activated, data parallel is used as a backup option
use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel)
if config.use_gpu and use_data_parallel:
logging.info("Adjusting the model to use DataParallel")
# Move all layers to the default GPU before activating data parallel.
# This needs to happen even though we put the model to the GPU at the beginning of the method,
# but we may have spread it across multiple GPUs later.
model = model.cuda()
model = DataParallelModel(model, device_ids=config.get_cuda_devices())
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_model_parallel(self, global_rank: int, world_size: int) -> None:\n app_state = AppState()\n\n # we initialize megatron-lm model parallel and data parallel groups\n # after initializing DDP with PTL.\n if app_state.model_parallel_size is not None:\n # destroy groups in case they have already been created\n # this happens with multiple calls to trainer.test for example\n parallel_state.destroy_model_parallel()\n if torch.distributed.is_initialized():\n parallel_state.initialize_model_parallel(\n tensor_model_parallel_size=app_state.tensor_model_parallel_size,\n pipeline_model_parallel_size=app_state.pipeline_model_parallel_size,\n virtual_pipeline_model_parallel_size=app_state.virtual_pipeline_model_parallel_size,\n pipeline_model_parallel_split_rank=app_state.pipeline_model_parallel_split_rank,\n use_fp8=app_state.use_fp8,\n )\n\n # assert that fake tp and pp rank match after model parallel init\n assert app_state.tensor_model_parallel_rank == parallel_state.get_tensor_model_parallel_rank()\n assert app_state.pipeline_model_parallel_rank == parallel_state.get_pipeline_model_parallel_rank()\n\n app_state.tensor_model_parallel_group = parallel_state.get_tensor_model_parallel_group()\n app_state.data_parallel_group = parallel_state.get_data_parallel_group()\n app_state.data_parallel_rank = parallel_state.get_data_parallel_rank()\n app_state.data_parallel_size = parallel_state.get_data_parallel_world_size()\n app_state.pipeline_model_parallel_group = parallel_state.get_pipeline_model_parallel_group()\n\n # create MPI process group for UCX-based communication APIs\n if app_state.init_mpi_proc_group:\n torch.distributed.new_group(backend='mpi')",
"def adjust_model_for_gpus(self) -> None:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n # Adjusting twice causes an error.\n if self.is_model_adjusted:\n logging.debug(\"model_and_info.is_model_adjusted is already True\")\n\n if self._optimizer:\n raise ValueError(\"Create an optimizer only after creating and adjusting the model.\")\n\n self._model = ModelAndInfo._adjust_for_gpus(model=self._model,\n config=self.config,\n model_execution_mode=self.model_execution_mode)\n\n self.is_model_adjusted = True\n logging.debug(\"model_and_info.is_model_adjusted set to True\")",
"def partition_data_parallel(\n graph: GraphModule,\n model: nn.Module,\n optimizer: Optional[torch.optim.Optimizer],\n params_buffers: Dict[str, torch.Tensor],\n named_states: Dict[str, Any],\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n mesh: DeviceMesh,\n parallel_style: DataParallelStyle,\n input_batch_dim: int,\n) -> GraphModule:\n num_params_buffers = len(params_buffers)\n flattened_states = pytree.tree_flatten(named_states)[0]\n num_states = len(flattened_states)\n\n changed = graph.graph.eliminate_dead_code()\n if changed:\n graph.recompile()\n\n # 1. First build up data parallel strategies for the whole graph\n strategy_map = build_data_parallel_strategies(\n graph, num_params_buffers, num_states, mesh=mesh, batch_dim=input_batch_dim\n )\n\n # 2. Next we mark the data parallel strategy for each node base on\n # the parallel_style\n mark_data_parallel_shardings(\n graph,\n num_parameters=num_params_buffers,\n num_states=num_states,\n dp_strategy_map=strategy_map,\n parallel_mode=parallel_style,\n )\n\n # 3. Partition the single machine graph to the distribute graph\n partitioned_graph = partitioner(graph)\n\n # preserve node types for the expanded graph\n for node in partitioned_graph.graph.nodes:\n if node in strategy_map:\n node_strategy = strategy_map[node]\n if isinstance(node_strategy, DataParallelStrategy):\n node.meta[\"node_type\"] = node_strategy.node_type\n elif isinstance(node_strategy, TupleStrategy):\n node.meta[\"node_type\"] = NodeType.NON_TENSOR\n else:\n raise RuntimeError(f\"Unknown node strategy {node_strategy}\")\n else:\n # if the nodes are expanded nodes (collectives), we mark them\n # the same type as the input node.\n input_node = node.all_input_nodes[0]\n node.meta[\"node_type\"] = input_node.meta[\"node_type\"]\n\n # 4. Last, inplace partition the weights and optim states to\n # DTensors base on the parallel style\n accessor = NamedMemberAccessor(model)\n for param_key, param in params_buffers.items():\n placement: Placement = Replicate()\n if parallel_style == DataParallelStyle.FULLY_SHARD:\n placement = Shard(0)\n elif parallel_style != DataParallelStyle.REPLICATE:\n raise RuntimeError(f\"parallel style {parallel_style} not supported yet\")\n\n dtensor_param = distribute_tensor(param, mesh, [placement])\n # update re-parameterized module param dict and optim states dict to DTensor\n params_buffers[param_key] = dtensor_param.to_local()\n # update module parameters to DTensor\n accessor.set_tensor(param_key, dtensor_param)\n\n # update the optimizer state key and values to DTensor\n if optimizer is not None and param in optimizer.state:\n param_states = named_states[param_key]\n param_dtensor_states = {}\n for state_key, state_val in param_states.items():\n if isinstance(state_val, torch.Tensor) and state_val.ndim > 0:\n # shard/replicate non-scalar tensors, for scalar tensor, we\n # don't do anything\n dtensor_state = distribute_tensor(state_val, mesh, [placement])\n param_dtensor_states[state_key] = dtensor_state\n param_states[state_key] = dtensor_state.to_local()\n else:\n param_dtensor_states[state_key] = state_val\n\n optimizer.state.pop(param) # type: ignore[call-overload]\n optimizer.state[dtensor_param] = param_dtensor_states # type: ignore[index]\n\n return partitioned_graph",
"def optimize_model(input,\n model_type='bert',\n num_heads=0,\n hidden_size=0,\n optimization_options=None,\n opt_level=0,\n use_gpu=False,\n only_onnxruntime=False):\n (optimizer_class, producer, run_onnxruntime) = MODEL_CLASSES[model_type]\n\n temp_model_path = None\n if opt_level > 1: # Optimization specified for an execution provider.\n temp_model_path = optimize_by_onnxruntime(input, use_gpu=use_gpu, opt_level=opt_level)\n elif run_onnxruntime:\n # Use Onnxruntime to do optimizations (like constant folding and cast elimation) that is not specified to exection provider.\n # CPU provider is used here so that there is no extra node for GPU memory copy.\n temp_model_path = optimize_by_onnxruntime(input, use_gpu=False, opt_level=1)\n\n model = load_model(temp_model_path or input, format=None, load_external_data=True)\n\n if model.producer_name and producer != model.producer_name:\n logger.warning(\n f\"Model producer not matched: Expect {producer}, Got {model.producer_name} {model.producer_version}. Please specify correct --model_type parameter.\"\n )\n\n if optimization_options is None:\n optimization_options = BertOptimizationOptions(model_type)\n\n optimizer = optimizer_class(model, num_heads, hidden_size)\n\n if not only_onnxruntime:\n optimizer.optimize(optimization_options)\n\n # Remove the temporary model.\n if temp_model_path:\n os.remove(temp_model_path)\n logger.debug(\"Remove tempoary model: {}\".format(temp_model_path))\n\n optimizer.model.producer_name = \"onnxruntime.transformers\"\n from onnxruntime import __version__ as onnxruntime_version\n optimizer.model.producer_version = onnxruntime_version\n\n return optimizer",
"def configure_ddp(self):\n\n if (hasattr(self.model, 'megatron_amp_o2') and self.model.megatron_amp_o2) or (\n hasattr(self.model, 'with_distributed_adam') and self.model.with_distributed_adam\n ):\n # do not use DDP if using megatron amp O2 or distributed optimizer\n self._model = _LightningModuleWrapperBase(self.model)\n else:\n app_state = AppState()\n\n if app_state.model_parallel_size is not None:\n\n logging.info(f\"Configuring DDP for model parallelism.\")\n\n # With model parallelism, multiple GPUs form a large \"logical GPU\"\n # this means that data parallel groups span multiple GPUs\n # and are non-trivial\n # TODO: for megatron-lm self.model is a list\n # Removing self.pre_configure_ddp() as DDP's 'find_unused_parameters' now defaults\n # to False in PTL 2.0 and hence pre_configure_ddp() is removed in ddp.py\n # self.pre_configure_ddp()\n # device_ids = self.determine_ddp_device_ids()\n self._model = DistributedDataParallel(\n _LightningModuleWrapperBase(self.model),\n process_group=parallel_state.get_data_parallel_group(),\n **self._ddp_kwargs,\n )\n\n if self.no_ddp_communication_hook:\n # When using custom gradient accumulation and allreduce, disable\n # DDP communication hook that works on the gradient bucket.\n # Instead, use the custom gradient function and communication hook,\n # which is defined in the master optimizer wrapper.\n self._model.require_backward_grad_sync = False\n self._model.register_comm_hook(None, noop_hook)\n\n else:\n super().configure_ddp()",
"def deploy_to_device(self):\n if self.device_ids is not None and len(self.device_ids) > 1:\n if not isinstance(self.model, torch.nn.DataParallel):\n self.model = torch.nn.DataParallel(self.model, self.device_ids)\n\n self.model = self.model.to(self.device)\n self.criterion = self.criterion.to(self.device)",
"def may_data_parallel(model):\n if torch.cuda.device_count() > 1:\n model = TransparentDataParallel(model)\n return model",
"def parallelize(self):\n self.parallel = True\n self.network = torch.nn.DataParallel(self.network)",
"def parallelize(self):\r\n self.parallel = True\r\n self.network = torch.nn.DataParallel(self.network)",
"def load_model(self, gpus=1):\r\n\t\t\r\n\t\tif self.model != None:\r\n\t\t\treturn\r\n\r\n\t\t## build the model on the CPU if parallelism is targeted\r\n\t\tif isinstance(gpus, Sequence):\r\n\t\t\tif len(gpus) != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus[0])\r\n\t\t\t\tmultigpu = False\r\n\t\telse:\r\n\t\t\tif gpus != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus)\r\n\t\t\t\tmultigpu = False\r\n\r\n\r\n\t\tif self.__prop__(\"Resume\"):\r\n\t\t\tself.model = keras.models.load_model(\r\n\t\t\t\tself.__prop__(\"SnapshotDirectory\") + self.__prop__(\"Prefix\") + self.__prop__(\"Resume\") + '.h5\"')\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\telse: \r\n\t\t\t\r\n\t\t\twith tensorflow.device(device):\r\n\t\t\t\tif self.__prop__(\"Prefix\").startswith(\"i3PosNet_VGG16\"):\r\n\t\t\t\t\tself.model = i3PosNetVGG(\r\n\t\t\t\t\t\tinput_shape=self.__prop__(\"InputShape\"), \r\n\t\t\t\t\t\tout_number=self.__prop__(\"TargetShape\"),\r\n\t\t\t\t\t\tlayer_count=self.__prop__(\"LayerCount\"), \r\n\t\t\t\t\t\tfc_layer_count=self.__prop__(\"FCLayerCount\"), \r\n\t\t\t\t\t\tfc_reg=self.__prop__(\"FCReg\"), \r\n\t\t\t\t\t\tconv_reg=self.__prop__(\"ConvReg\"), \r\n\t\t\t\t\t\tshrinking=self.__prop__(\"Shrinking\"),\r\n\t\t\t\t\t\tpadding=self.__prop__(\"Padding\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.model = i3PosNet(image_shape, out = self.__prop__(\"TargetShape\"))\r\n\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\t\t\t\r\n\t\t\t# clear model\r\n\t\t\t# try:\r\n\t\t\t\t# del self.model\r\n\t\t\t# except:\r\n\t\t\t\t# pass\r\n\r\n\t\t\tif self.__prop__(\"Optimizer\") == \"SGD\":\r\n\t\t\t\toptimizer = SGD(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\tmomentum= self.__prop__(\"Momentum\"),\r\n\t\t\t\t\tnesterov=True)\r\n\t\t\telif self.__prop__(\"Optimizer\") == \"Adam\":\r\n\t\t\t\toptimizer = Adam(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\t# use defaults for these for now (b1 = 0.9, b2 = 0.999, e = 1e-8\r\n\t\t\t\t\tbeta_1=self.__prop__(\"Beta1\"),\r\n\t\t\t\t\tbeta_2=self.__prop__(\"Beta2\"),\r\n\t\t\t\t\tepsilon=self.__prop__(\"Epsilon\")\r\n\t\t\t\t\t)\r\n\t\t\t\r\n\t\t\tself.model.compile(loss='mean_squared_error', optimizer=optimizer)",
"def initialize_multitask_model(\n *,\n model_def: nn.Module,\n input_spec: Dict[Tuple[Tuple[str, str], ...],\n Sequence[Union[Tuple[Tuple[int, ...], jnp.dtype],\n Tuple[int, ...]]]],\n config: ml_collections.ConfigDict,\n rngs: Union[jnp.ndarray, Mapping[str, jnp.ndarray]],\n) -> Tuple[PyTree, PyTree, int, Optional[Dict[str, float]]]:\n\n def init_fn(model_def):\n for kwargs, in_spec in input_spec.items():\n\n if config.get('batch_sizes') is not None:\n batch_size = config.batch_sizes.get(dict(kwargs)['dataset'])\n else:\n batch_size = config.batch_size\n\n batch_size = (batch_size // jax.device_count()) if batch_size else None\n\n input_shapetype = [\n debug_utils.input_spec_to_jax_shape_dtype_struct(\n spec, batch_size=batch_size) for spec in in_spec\n ]\n dummy_input = []\n for in_st in input_shapetype:\n dummy_input.append(jnp.zeros(in_st.shape, in_st.dtype))\n model_def(\n *dummy_input, train=False, debug=False, **dict(kwargs))\n\n # We want all parameters to be created in host RAM, not on any device, they'll\n # be sent there later as needed, otherwise we already encountered two\n # situations where we allocate them twice.\n @functools.partial(jax.jit, backend='cpu')\n def _initialize_model(rngs):\n \"\"\"Initialization function to be jitted.\"\"\"\n init_model_state, init_params = nn.init(\n fn=init_fn, module=model_def)(rngs).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if (config.get('init_head_bias', None) is not None and\n 'output_projection' in init_params):\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state\n\n if not isinstance(rngs, dict):\n rngs = {'params': rngs}\n init_params, init_model_state = _initialize_model(rngs)\n # Pop out params rng:\n rngs.pop('params')\n\n # Count number of trainable parameters:\n num_trainable_params = debug_utils.log_param_shapes(init_params)\n\n # Count gflops:\n count_flops = config.get('count_flops',\n ml_collections.ConfigDict({'count_flops': True}))\n if count_flops:\n variables = {'params': init_params, **init_model_state}\n gflops_dict = {}\n gflops_all = 0\n for kwargs, in_spec in input_spec.items():\n flops = debug_utils.compute_flops(\n flax_model_apply_fn=functools.partial(\n model_def.apply,\n variables,\n train=False,\n debug=False,\n rngs=rngs,\n **dict(kwargs)),\n input_spec=count_flops.get('input_spec', in_spec),\n fuse_multiply_add=count_flops.get('fuse_multiply_add', True))\n gflops = flops / (10**9)\n gflops_key = 'gflops/' + '/'.join(f'{x}={y}' for x, y in kwargs)\n gflops_dict[gflops_key] = gflops\n gflops_all += gflops\n gflops_dict['gflops'] = gflops_all\n else:\n gflops_dict = None\n\n return init_params, init_model_state, num_trainable_params, gflops_dict",
"def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer",
"def train_parallel(config):\n _setup_parallel_env()\n print(f\" | Starting training on {os.getenv('RANK_SIZE', None)} devices.\")\n\n pre_train_dataset = load_dataset(\n data_files=config.pre_train_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.pre_train_dataset else None\n fine_tune_dataset = load_dataset(\n data_files=config.fine_tune_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.fine_tune_dataset else None\n test_dataset = load_dataset(\n data_files=config.test_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.test_dataset else None\n\n _build_training_pipeline(config=config,\n pre_training_dataset=pre_train_dataset,\n fine_tune_dataset=fine_tune_dataset,\n test_dataset=test_dataset)",
"def build_model(cfg, model, gpu_id=None):\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n # Construct the model\n # name = cfg.MODEL.MODEL_NAME\n # model = MODEL_REGISTRY.get(name)(cfg)\n \n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n #, find_unused_parameters=True\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n \n return model",
"def fit(\n self,\n lr: float,\n epochs: int,\n model_dir: str = \"checkpoints\",\n model_name: str = None,\n momentum: float = 0.95,\n weight_decay: float = 0.0001,\n mixed_prec: bool = False,\n use_one_cycle_policy: bool = False,\n warmup_pct: float = 0.3,\n lr_gamma: float = 0.1,\n lr_step_size: float = None,\n grad_steps: int = 2,\n save_model: bool = False,\n ) -> None:\n # set epochs\n self.epochs = epochs\n\n # set lr_step_size based on epochs\n if lr_step_size is None:\n lr_step_size = np.ceil(2 / 3 * self.epochs)\n\n # set model name\n if model_name is None:\n model_name = self.model_name\n\n os.makedirs(model_dir, exist_ok=True)\n\n data_loaders = {}\n data_loaders[\"train\"] = self.dataset.train_dl\n data_loaders[\"valid\"] = self.dataset.test_dl\n\n # Move model to gpu before constructing optimizers and amp.initialize\n device = torch_device()\n self.model.to(device)\n count_devices = num_devices()\n torch.backends.cudnn.benchmark = True\n\n named_params_to_update = {}\n total_params = 0\n for name, param in self.model.named_parameters():\n total_params += 1\n if param.requires_grad:\n named_params_to_update[name] = param\n\n print(\"Params to learn:\")\n if len(named_params_to_update) == total_params:\n print(\"\\tfull network\")\n else:\n for name in named_params_to_update:\n print(f\"\\t{name}\")\n\n # create optimizer\n optimizer = optim.SGD(\n list(named_params_to_update.values()),\n lr=lr,\n momentum=momentum,\n weight_decay=weight_decay,\n )\n\n # Use mixed-precision if available\n # Currently, only O1 works with DataParallel: See issues https://github.com/NVIDIA/apex/issues/227\n if mixed_prec:\n # break if not AMP_AVAILABLE\n assert AMP_AVAILABLE\n # 'O0': Full FP32, 'O1': Conservative, 'O2': Standard, 'O3': Full FP16\n self.model, optimizer = amp.initialize(\n self.model,\n optimizer,\n opt_level=\"O1\",\n loss_scale=\"dynamic\",\n # keep_batchnorm_fp32=True doesn't work on 'O1'\n )\n\n # Learning rate scheduler\n if use_one_cycle_policy:\n # Use warmup with the one-cycle policy\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n max_lr=lr,\n total_steps=self.epochs,\n pct_start=warmup_pct,\n base_momentum=0.9 * momentum,\n max_momentum=momentum,\n )\n else:\n # Simple step-decay\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, step_size=lr_step_size, gamma=lr_gamma,\n )\n\n # DataParallel after amp.initialize\n model = (\n nn.DataParallel(self.model) if count_devices > 1 else self.model\n )\n\n criterion = nn.CrossEntropyLoss().to(device)\n\n # set num classes\n topk = 5\n if topk >= self.num_classes:\n topk = self.num_classes\n\n for e in range(1, self.epochs + 1):\n print(\n f\"Epoch {e} =========================================================\"\n )\n print(f\"lr={scheduler.get_lr()}\")\n\n self.results.append(\n self.train_an_epoch(\n model,\n data_loaders,\n device,\n criterion,\n optimizer,\n grad_steps=grad_steps,\n mixed_prec=mixed_prec,\n topk=topk,\n )\n )\n\n scheduler.step()\n\n if save_model:\n self.save(\n os.path.join(\n model_dir,\n \"{model_name}_{epoch}.pt\".format(\n model_name=model_name, epoch=str(e).zfill(3),\n ),\n )\n )\n self.plot_precision_loss_curves()",
"def update_model(self, verbose):\n if self.comm.project.meshes == \"multi-mesh\":\n self.comm.lasif.move_gradient_to_cluster()\n\n if not self.task_dict[\"summing_completed\"]:\n grad_summer = GradientSummer(comm=self.comm)\n grad_summer.sum_gradients(\n events=self.comm.project.non_val_events_in_iteration,\n output_location=self.raw_gradient_path,\n batch_average=True,\n sum_vpv_vph=True,\n store_norms=True,\n )\n write_xdmf(self.raw_gradient_path)\n self.task_dict[\"summing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Summing already done\")\n\n if not self.task_dict[\"raw_update_completed\"]:\n self._update_model(raw=True, smooth=False, verbose=verbose)\n self.task_dict[\"raw_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Raw updating already completed\")\n\n if not self.task_dict[\"smoothing_completed\"]:\n self.perform_smoothing()\n self.task_dict[\"smoothing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smoothing already done\")\n\n if not self.task_dict[\"smooth_update_completed\"]:\n self._update_model(raw=False, smooth=True, verbose=verbose)\n self.task_dict[\"smooth_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smooth updating already completed\")\n\n if not self.task_dict[\"iteration_finalized\"]:\n self._finalize_iteration(verbose=verbose)\n self.task_dict[\"iteration_finalized\"] = True\n self._update_task_file()\n else:\n self.print(\"Iteration already finalized\")\n\n self.finish_task()",
"def run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None):\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list = [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] in [\"HeMISUnet\", \"FiLMedUnet\"]:\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"UNet3D\" and model_params[\"attention\"]:\n imed_utils.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if not model_params[\"name\"].endswith('3D'):\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = metadata_idx[0]['gt_filenames'][0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool):\n # save the completely processed file as a nifti file\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n output_nii = imed_utils.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=0.9 if testing_params[\"binarize_prediction\"] else -1)\n # TODO: Adapt to multilabel\n preds_npy_list.append(output_nii.get_fdata()[:, :, :, 0])\n gt_npy_list.append(nib.load(fname_tmp).get_fdata())\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1:\n imed_utils.save_color_labels(np.stack(pred_tmp_lst, -1),\n testing_params[\"binarize_prediction\"],\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_utils.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n # Choose only one modality\n output_nii = imed_utils.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=0.5 if testing_params[\"binarize_prediction\"] else -1)\n preds_npy_list.append(output_nii.get_fdata().transpose(3, 0, 1, 2))\n gt_lst = []\n for gt in metadata[0]['gt_filenames']:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(gt_lst[0].shape))\n\n gt_npy_list.append(np.array(gt_lst))\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1:\n imed_utils.save_color_labels(pred_undo,\n testing_params['binarize_prediction'],\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list",
"def convert_model(self, model: nn.Module) -> nn.Module:\n if self.sync_bn is not None:\n try:\n model = convert_sync_batchnorm(model, self.sync_bn)\n except ValueError as e:\n self.logger.error('cfg.sync_bn should be \"torch\" or '\n f'\"mmcv\", but got {self.sync_bn}')\n raise e\n\n return model",
"def optimize(self, model):\n model.optimize_params(\n max_iters=self.max_iters, max_beta_iters=self.max_beta_iters,\n max_U_iters=self.max_U_iters, rel_tol=self.rel_tol,\n optimize_beta=self.optimize_beta, optimize_U=self.optimize_U,\n compute_D=self.compute_D\n )\n return model",
"def run_training(self, schema_params, export_model=False, output_model_dir=None):\n # Log distributed execution context, which includes cluster configuration\n logger.info(f\"Commencing {self.effect_name} training\")\n logger.info(f\"Execution context : {self.execution_context}\")\n\n # Create partition_index_list\n partition_index_list = self._get_partition_list()\n logger.info(f\"This worker on work on the following list of partitions : {partition_index_list}\")\n\n # Sequentially train model on partitions\n for partition_index in partition_index_list:\n logger.info(f\"Commencing {self.effect_name} training for partition index : {partition_index}\")\n\n # Resolve partitioned data directory from raw path params from user\n checkpoint_path = self._anchor_directory(\n self.model.checkpoint_path,\n partition_index)\n training_data_dir = self._anchor_directory(self.model.training_data_dir,\n partition_index)\n validation_data_dir = self._anchor_directory(self.model.validation_data_dir,\n partition_index) if self.model.validation_data_dir else None\n\n if is_empty_directory(training_data_dir):\n logger.info(f\"{training_data_dir} is empty, no dataset to train on.\")\n continue\n # Train model\n self.execution_context[constants.PARTITION_INDEX] = partition_index\n self.model.train(training_data_dir=training_data_dir,\n validation_data_dir=validation_data_dir,\n metadata_file=self.model.metadata_file,\n checkpoint_path=checkpoint_path,\n execution_context=self._prepare_training_context(partition_index),\n schema_params=schema_params)\n\n # Chief should export model\n is_chief = self.execution_context[constants.IS_CHIEF]\n if export_model and is_chief:\n logger.info(f\"Exporting model to directory : {output_model_dir}\")\n self.model.export(output_model_dir=output_model_dir)",
"def create_summary_and_adjust_model_for_gpus(self) -> None:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n if self.config.is_segmentation_model:\n summary_for_segmentation_models(self.config, self._model)\n # Prepare for mixed precision training and data parallelization (no-op if already done).\n # This relies on the information generated in the model summary.\n self.adjust_model_for_gpus()",
"def test_build_default_model(self):\n cfg = get_cfg_defaults()\n cfg.SYSTEM.NUM_GPUS = self.num_gpu\n model = build_model(cfg, self.device)\n self.assertTrue(isinstance(model, (torch.nn.Module,\n torch.nn.DataParallel,\n torch.nn.parallel.DistributedDataParallel)))",
"def build_model(cfg, gpu_id=None):\n # Construct the model\n if MODEL_REGISTRY.get(cfg.MODEL.NAME) == None:\n # attempt to find standard models\n model = BaseVideoModel(cfg)\n else:\n # if the model is explicitly defined,\n # it is directly constructed from the model pool\n model = MODEL_REGISTRY.get(cfg.MODEL.NAME)(cfg)\n\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n model = model.cuda(device=cur_device)\n \n model_ema = None\n if cfg.MODEL.EMA.ENABLE:\n model_ema = ModelEmaV2(model, decay=cfg.MODEL.EMA.DECAY)\n\n try:\n # convert batchnorm to be synchronized across \n # different GPUs if needed\n sync_bn = cfg.BN.SYNC_BN\n if sync_bn == True and cfg.NUM_GPUS * cfg.NUM_SHARDS > 1:\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model)\n except:\n sync_bn = None\n\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS*cfg.NUM_SHARDS > 1:\n # Make model replica operate on the current device\n if cfg.PAI:\n # Support distributed training on the cluster\n model = torch.nn.parallel.DistributedDataParallel(\n module=model\n )\n else:\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n\n return model, model_ema",
"def _update_model(self, X_all, Y_all):\n if self.model is None:\n self._create_model(X_all, Y_all)\n else:\n self.model.set_XY(X_all, Y_all)\n\n # WARNING: Even if self.max_iters=0, the hyperparameters are bit modified...\n if self.max_iters > 0:\n # --- update the model maximizing the marginal likelihood.\n if self.optimize_restarts==1:\n self.model.optimize(optimizer=self.optimizer, max_iters = self.max_iters, messages=False, ipython_notebook=False)\n else:\n self.model.optimize_restarts(num_restarts=self.optimize_restarts, optimizer=self.optimizer, max_iters = self.max_iters, verbose=self.verbose)",
"def update_model(self, **kwargs):\n self.__dict__.update(kwargs)\n opt_params = ['optimizer_params', 'optimizer']\n if any(item in kwargs.keys() for item in opt_params):\n self.get_unet_model()",
"def enable_model_cpu_offload(self, gpu_id=0):\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n if self.device.type != \"cpu\":\n self.to(\"cpu\", silence_dtype_warnings=True)\n torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)\n\n hook = None\n for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:\n _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)\n\n if self.safety_checker is not None:\n _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)\n\n # We'll offload the last model manually.\n self.final_offload_hook = hook",
"def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n loader = tqdm.tqdm(loader, total=num_batches)\n\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))",
"def initialize_model(self):\n args = self.args\n\n if self.args.search_space == 'nasbench':\n self.model_fn = NasBenchNetSearchDarts\n self.fixmodel_fn = NasBenchNet\n model = self.model_fn(args)\n utils = darts_nasbench_utils\n else:\n raise NotImplementedError(\"Not supported\")\n # finialize model update\n if args.gpus > 0:\n if self.args.gpus == 1:\n model = model.cuda()\n self.parallel_model = model\n else:\n self.model = model\n self.parallel_model = nn.DataParallel(self.model).cuda()\n # IPython.embed(header='checking replicas and others.')\n else:\n self.parallel_model = model\n\n darts = DartsArchitect(model, args=args)\n model = self.parallel_model\n # logging.info(\"DARTS param size = %fMB\", utils.count_parameters_in_MB(darts))\n self.train_fn = partial(darts_train_model, args=args, architect=darts, sampler=None)\n self.eval_fn = partial(darts_model_validation, args=args, verbose=True)\n self.controller = darts\n\n logging.info(\"param size = %fMB\", utils.count_parameters_in_MB(model))\n optimizer = torch.optim.SGD(\n model.parameters(),\n args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n )\n\n # scheduler as Cosine.\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.learning_rate_min)\n return model, optimizer, scheduler, darts, None",
"def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n\n loader = tqdm.tqdm(loader, total=num_batches)\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))",
"def freeze_model(model):\n for param in model.parameters():\n param.requires_grad = False"
] | [
"0.5936191",
"0.56602675",
"0.56224686",
"0.5621566",
"0.55595684",
"0.54760945",
"0.54551095",
"0.54427326",
"0.54231954",
"0.54035556",
"0.5396976",
"0.53836715",
"0.5365184",
"0.53514045",
"0.5305904",
"0.5280321",
"0.5267586",
"0.5260385",
"0.525179",
"0.5249681",
"0.51886994",
"0.51437724",
"0.5140816",
"0.5139759",
"0.5129505",
"0.5104898",
"0.51026154",
"0.5098515",
"0.50848323",
"0.5082763"
] | 0.70964164 | 0 |
Creates a model (with temperature scaling) according to the config given. | def create_model(self) -> None:
self._model = create_model_with_temperature_scaling(self.config) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_model_with_temperature_scaling(config: ModelConfigBase) -> Any:\n # wrap the model around a temperature scaling model if required\n model = config.create_model()\n if isinstance(config, SequenceModelBase) and config.temperature_scaling_config:\n model = ModelWithTemperature(model, config.temperature_scaling_config)\n return model",
"def create_model(config, rng, example_batch):\n example_batch = train_utils.prepare_example_batch(example_batch)\n\n key0, rng = random.split(rng, 2)\n model, variables, metric_collector = MODEL_DICT[config.model.name](\n key0, example_batch, config\n )\n\n return model, variables, metric_collector",
"def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model",
"def model_creator(config):\n return nn.Linear(1, 1)",
"def __init__(self, data_set, model, config):\n\n self.config = config\n self.data_set = data_set\n # Normalize or standardize the features, to have them ready to use as model input\n self.data_set.shift_and_scale(self.config[\"shift\"], self.config[\"scaling\"])\n self.model = model\n self.model.eval()\n self.device = torch.device(\"cpu\") if not self.config[\"use_gpu\"] \\\n else torch.device(\"cuda:\" + str(self.config[\"gpu_no\"]))",
"def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model",
"def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model",
"def model(self, **config_kwargs):\n measurement = self.get_measurement(**config_kwargs)\n log.debug(\n 'model being created for measurement {0:s}'.format(measurement['name'])\n )\n\n patches = config_kwargs.get('patches', [])\n\n modelspec = {\n 'channels': self.spec['channels'],\n 'parameters': measurement['config']['parameters'],\n }\n for patch in patches:\n modelspec = jsonpatch.JsonPatch(patch).apply(modelspec)\n\n return Model(modelspec, poiname=measurement['config']['poi'], **config_kwargs)",
"def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args",
"def run_model(config_file):\n config_file = os.path.join(os.getcwd(), config_file)\n result = Tethys(config_file=config_file)\n result.run_model()\n return result",
"def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model",
"def create_shunt_model(self):\r\n\r\n print('\\nCreate shunt model')\r\n\r\n if not self.original_model:\r\n raise ValueError('Original model not yet initialized! Either call create_original_model or set it manually.')\r\n if not self.shunt_params:\r\n raise ValueError('No parameters found in config for shunt model! Create the field [SHUNT_MODEL]')\r\n\r\n logging.info('')\r\n logging.info('#######################################################################################################')\r\n logging.info('############################################ SHUNT MODEL ##############################################')\r\n logging.info('#######################################################################################################')\r\n logging.info('')\r\n\r\n dilation_rate_input, dilation_rate_output = find_input_output_dilation_rates(self.original_model, self.shunt_params['locations'])\r\n\r\n print('Used dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('Creating shunt with dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('')\r\n\r\n with self.activate_distribution_scope():\r\n if self.shunt_params['from_file']:\r\n self.shunt_model = keras.models.load_model(self.shunt_params['filepath'])\r\n print('Shunt model loaded successfully!')\r\n else:\r\n input_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][0]).input_shape[1:]\r\n if isinstance(input_shape_shunt, list):\r\n input_shape_shunt = input_shape_shunt[0][1:]\r\n output_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][1]).output_shape[1:]\r\n if isinstance(output_shape_shunt, list):\r\n output_shape_shunt = output_shape_shunt[0][1:]\r\n\r\n self.shunt_model = Architectures.createShunt(input_shape_shunt,\r\n output_shape_shunt,\r\n arch=self.shunt_params['arch'],\r\n use_se=False,\r\n dilation_rate_input=dilation_rate_input,\r\n dilation_rate_output=dilation_rate_output,\r\n expansion_factor=1.0)\r\n\r\n if self.shunt_params['pretrained']:\r\n self.shunt_model.load_weights(self.shunt_params['weightspath'])\r\n print('Shunt weights loaded successfully!')\r\n\r\n self.shunt_model.summary(print_fn=self.logger.info, line_length=150)\r\n\r\n keras.models.save_model(self.shunt_model, Path(self.folder_name_logging, \"shunt_model.h5\"))\r\n logging.info('')\r\n logging.info('Shunt model saved to {}'.format(self.folder_name_logging))\r\n\r\n # calculate flops\r\n flops_shunt = calculate_flops_model(self.shunt_model)\r\n self.flops_dict['shunt'] = flops_shunt\r\n logging.info('')\r\n logging.info('FLOPs of shunt model: {}'.format(flops_shunt))",
"def do_create_model(**kwargs):\n model_params = {\n 'name': kwargs['dag_run'].conf.get('model_name'),\n 'description': 'A custom DNN regressor model',\n 'regions': [REGION]\n }\n\n ti = kwargs['ti']\n\n is_model = ti.xcom_pull(key='is_project', task_ids='check_model')\n if not is_model:\n mle = MLEngineHook()\n mle.create_model(PROJECT, model_params)",
"def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m",
"def create_model(self):\n model = solph.Model(self.es)\n return model",
"def createModel(config_path, checkpoint_path, graph_path):\n\n global build_graph, prev_classes\n\n trt_graph = None\n input_names = None\n \n if build_graph:\n frozen_graph, input_names, output_names = build_detection_graph(\n config=config_path,\n checkpoint=checkpoint_path\n )\n \n trt_graph = trt.create_inference_graph(\n input_graph_def=frozen_graph,\n outputs=output_names,\n max_batch_size=1,\n max_workspace_size_bytes=1 << 25,\n precision_mode='FP16',\n minimum_segment_size=50\n )\n\n with open(graph_path, 'wb') as f:\n f.write(trt_graph.SerializeToString())\n\n with open('config.txt', 'r+') as json_file: \n data = json.load(json_file)\n data['model'] = []\n data['model'] = [{'input_names': input_names}]\n json_file.seek(0)\n json_file.truncate()\n json.dump(data, json_file)\n\n else:\n with open(graph_path, 'rb') as f:\n trt_graph = tf.GraphDef()\n trt_graph.ParseFromString(f.read())\n with open('config.txt') as json_file: \n data = json.load(json_file)\n input_names = data['model'][0]['input_names']\n\n return Model(trt_graph, input_names)",
"def config_task(self) -> None:\n if self.hparams[\"model\"] == \"resnet18\":\n self.model = models.resnet18(pretrained=True)\n in_features = self.model.fc.in_features\n self.model.fc = nn.Linear( # type: ignore[attr-defined]\n in_features, out_features=1\n )\n else:\n raise ValueError(f\"Model type '{self.hparams['model']}' is not valid.\")",
"def create_model(self, model_config):\n\n return self.conn.create_model(\n **model_config)",
"def create_model(model_class, model_params=None, model_name='model'):\n\n model_params = {} if model_params is None else model_params\n\n model = model_class(**model_params)\n\n if special_parameters.load_model: # recover from checkpoint\n _load_model(model, model_name)\n\n # configure usage on GPU\n if use_gpu():\n model.to(first_device())\n model = torch.nn.DataParallel(model, device_ids=all_devices())\n\n # print info about devices\n print_info('Device(s)): ' + str(device_description()))\n\n return model",
"def create_model(self, fun, kwargs=None, compile=True):\n if kwargs is None:\n kwargs = {}\n\n self.model = fun(self.config.inputs, self.config.output, **kwargs)\n if compile:\n self.model.compile(\n loss=self.config.get_loss(self.modeldir),\n optimizer=\"adam\", metrics=[\"accuracy\"])",
"def create_model(configuration):\n model = find_model_using_name(configuration['model_name'])\n instance = model(configuration)\n print(\"model [{0}] was created\".format(type(instance).__name__))\n return instance",
"def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()",
"def _random_model(self, input_size, output_size, task, config: dict) -> AbstractModel:\n return create_random_model(input_size, output_size, config, task)",
"def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return",
"def model_setup(params):\n n_classes = len(classes_config.training_ids)\n if general_config.model_id == constants.ssdlite:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list)\n elif general_config.model_id == constants.ssd:\n model = resnet_ssd.SSD300(n_classes=n_classes)\n elif general_config.model_id == constants.ssd_modified:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list,\n out_channels=params.out_channels, width_mult=params.width_mult)\n model.to(general_config.device)\n\n return model",
"def from_config(cls,config):\n ## find labels in list\n label_list = load_label_list(config.label_list)\n use_cuda = True if torch.cuda.is_available() else False\n\n global_args = {\n \"fp16\" : False,\n \"classification_report\" : True,\n \"tensorboard_dir\" : config.tensorboard_dir,\n \"wandb_project\" : config.wandb_project,\n \"wandb_kwargs\" : {\n \"name\" : config.wandb_name,\n \"entity\" : config.wandb_entity,\n }\n }\n\n model = NERModel(\n config.model_name,\n config.model_type,\n use_cuda=use_cuda,\n labels=label_list,\n args=global_args,\n )\n return cls(model,config)",
"def template_model():\n model_type = 'continuous' # either 'discrete' or 'continuous'\n model = do_mpc.model.Model(model_type)\n\n # Model variables:\n var1 = model.set_variable(var_type='_x', var_name='var1')\n var2 = model.set_variable(var_type='_x', var_name='var2')\n\n state = vertcat(var1,var2)\n state_dot = model.set_variable(var_type='_x', var_name='state_dot', shape=(2.1))\n\n input1 = model.set_variable(var_type='_u', var_name='input1')\n\n\n # Parameters:\n # define Parameters\n\n model.set_rhs('var1',state_dot[0])\n model.set_rhs('var2',state_dot[1])\n\n state_dot_rhs = vertcat(\n # rhs1,\n # rhs2)\n model.set_rhs('state_dot',state_dot_rhs)\n\n model.setup()\n\n return model",
"def create(self, req, body):\n context = req.environ['meteos.context']\n\n if not self.is_valid_body(body, 'model'):\n raise exc.HTTPUnprocessableEntity()\n\n model = body['model']\n\n LOG.debug(\"Create model with request: %s\", model)\n\n try:\n experiment = self.engine_api.get_experiment(\n context, model['experiment_id'])\n utils.is_valid_status(experiment.__class__.__name__,\n experiment.status,\n constants.STATUS_AVAILABLE)\n template = self.engine_api.get_template(\n context, experiment.template_id)\n except exception.NotFound:\n raise exc.HTTPNotFound()\n except exception.InvalidStatus:\n raise\n\n display_name = model.get('display_name')\n display_description = model.get('display_description')\n experiment_id = model.get('experiment_id')\n source_dataset_url = model.get('source_dataset_url')\n dataset_format = model.get('dataset_format', 'csv')\n model_type = model.get('model_type')\n model_params = model.get('model_params')\n swift_tenant = model.get('swift_tenant')\n swift_username = model.get('swift_username')\n swift_password = model.get('swift_password')\n\n new_model = self.engine_api.create_model(context,\n display_name,\n display_description,\n source_dataset_url,\n dataset_format,\n model_type,\n model_params,\n template.id,\n template.job_template_id,\n experiment_id,\n experiment.cluster_id,\n swift_tenant,\n swift_username,\n swift_password)\n\n return self._view_builder.detail(req, new_model)",
"def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')",
"def from_config(cls, *args, **kwargs):\n _config = args\n\n if isinstance(args, tuple): # multiple non-keyword arguments were provided\n if len(args) > 0:\n _config = args[0]\n\n else:\n _config = kwargs['config_path']\n kwargs.pop('config_path')\n\n local = False\n if 'make_new_path' in kwargs:\n local = True\n elif isinstance(_config, str) and os.path.isfile(_config):\n local = True\n elif isinstance(_config, dict) and \"category\" in _config:\n local = True\n\n if local:\n config = None\n config_path = None\n\n # we need to build ai4water's Model class\n if isinstance(_config, dict):\n config = _config\n else:\n config_path = _config\n return BaseModel._get_config_and_path(\n cls,\n config=config,\n config_path=config_path,\n **kwargs\n )\n\n # tf1.15 has from_config so call it\n return super().from_config(*args, **kwargs)"
] | [
"0.8444309",
"0.65563035",
"0.637411",
"0.6288527",
"0.62518233",
"0.622743",
"0.622743",
"0.6217139",
"0.6207316",
"0.6149929",
"0.6077935",
"0.6031471",
"0.6030025",
"0.6021649",
"0.5992696",
"0.59753346",
"0.5956578",
"0.5916777",
"0.5907874",
"0.5907593",
"0.58919555",
"0.58832514",
"0.58558136",
"0.58446854",
"0.5827892",
"0.58207685",
"0.5820042",
"0.57981926",
"0.5787968",
"0.5780455"
] | 0.8335938 | 1 |
Generates the model summary, which is required for model partitioning across GPUs, and then moves the model to GPU with data parallel/model parallel by calling adjust_model_for_gpus. | def create_summary_and_adjust_model_for_gpus(self) -> None:
if self._model is None:
raise ValueError("Model must be created before it can be adjusted.")
if self.config.is_segmentation_model:
summary_for_segmentation_models(self.config, self._model)
# Prepare for mixed precision training and data parallelization (no-op if already done).
# This relies on the information generated in the model summary.
self.adjust_model_for_gpus() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_and_print_model_summary(config: ModelConfigBase, model: DeviceAwareModule) -> None:\n random_state = RandomStateSnapshot.snapshot_random_state()\n # There appears to be a bug in apex, where previous use (in training for example) causes problems\n # when another model is later built on the CPU (for example, before loading from a checkpoint)\n # https://github.com/NVIDIA/apex/issues/694\n # Hence, move the model to the GPU before doing model summary.\n if config.use_gpu:\n model = model.cuda()\n if isinstance(config, ScalarModelBase):\n # To generate the model summary, read the first item of the dataset. Then use the model's own\n # get_model_input function to convert the dataset item to input tensors, and feed them through the model.\n train_dataset = config.get_torch_dataset_for_inference(ModelExecutionMode.TRAIN)\n train_item_0 = next(iter(train_dataset.as_data_loader(shuffle=False, batch_size=1, num_dataload_workers=0)))\n model_inputs = get_scalar_model_inputs_and_labels(config, model, train_item_0).model_inputs\n # The model inputs may already be converted to float16, assuming that we would do mixed precision.\n # However, the model is not yet converted to float16 when this function is called, hence convert back to float32\n summary = ModelSummary(model)\n summary.generate_summary(input_tensors=model_inputs, log_summaries_to_files=config.log_summaries_to_files)\n elif config.is_segmentation_model:\n summary_for_segmentation_models(config, model)\n assert model.summarizer\n summary = model.summarizer # type: ignore\n else:\n raise ValueError(\"Don't know how to generate a summary for this type of model?\")\n RUN_CONTEXT.log(LoggingColumns.NumTrainableParameters, summary.n_trainable_params)\n random_state.restore_random_state()",
"def model_summary():\n print(\"\\n\")\n print(\"=\" * 30 + \"Model Structure\" + \"=\" * 30)\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n print(\"=\" * 60 + \"\\n\")",
"def _build_model(self):\n tf.set_random_seed(self.params.tf_random_seed)\n np.random.seed(4321)\n phase_train = not (self.params.eval or self.params.forward_only)\n\n log_fn('Generating model')\n losses = []\n device_grads = []\n all_logits = []\n all_top_1_ops = []\n all_top_5_ops = []\n enqueue_ops = []\n gpu_compute_stage_ops = []\n gpu_grad_stage_ops = []\n\n with tf.device(self.global_step_device):\n global_step = tf.train.get_or_create_global_step()\n \n # Build the processing and model for the worker.\n (image_producer_ops,\n image_producer_stages) = self._build_image_processing(shift_ratio=0)\n image_producer_ops = tf.group(*image_producer_ops)\n update_ops = None\n staging_delta_ops = []\n\n for device_num in range(len(self.devices)):\n with self.variable_mgr.create_outer_variable_scope(\n device_num), tf.name_scope('tower_%i' % device_num) as name_scope:\n results = self.add_forward_pass_and_gradients(\n phase_train, device_num, device_num,\n image_producer_stages[device_num], gpu_compute_stage_ops,\n gpu_grad_stage_ops)\n if phase_train:\n losses.append(results['loss'])\n device_grads.append(results['gradvars'])\n \n\n if device_num == 0:\n # Retain the Batch Normalization updates operations only from the\n # first tower. These operations update the moving mean and moving\n # variance variables, which are updated (but not used) during\n # training, and used during evaluation. The moving mean and variance\n # approximate the true mean and variance across all images in the\n # dataset. Therefore, in replicated mode, these moving averages would\n # be almost identical for each tower, and so we only update and save\n # the moving averages for one tower. In parameter server mode, all\n # towers share a copy of the variables so we also only need to update\n # and save the moving averages once.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)\n staging_delta_ops = list(self.variable_mgr.staging_delta_ops)\n \n enqueue_ops.append(tf.group(*gpu_compute_stage_ops))\n\n fetches = self._build_fetches(global_step, all_logits, losses, device_grads,\n enqueue_ops, update_ops, all_top_1_ops,\n all_top_5_ops, phase_train)\n return (image_producer_ops, enqueue_ops, fetches)",
"def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase,\n model_execution_mode: ModelExecutionMode) -> DeviceAwareModule:\n if config.use_gpu:\n model = model.cuda()\n logging.info(\"Adjusting the model to use mixed precision training.\")\n # If model parallel is set to True, then partition the network across all available gpus.\n if config.use_model_parallel:\n devices = config.get_cuda_devices()\n assert devices is not None # for mypy\n model.partition_model(devices=devices) # type: ignore\n else:\n logging.info(\"Making no adjustments to the model because no GPU was found.\")\n\n # Update model related config attributes (After Model Parallel Activated)\n config.adjust_after_mixed_precision_and_parallel(model)\n\n # DataParallel enables running the model with multiple gpus by splitting samples across GPUs\n # If the model is used in training mode, data parallel is activated by default.\n # Similarly, if model parallel is not activated, data parallel is used as a backup option\n use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel)\n if config.use_gpu and use_data_parallel:\n logging.info(\"Adjusting the model to use DataParallel\")\n # Move all layers to the default GPU before activating data parallel.\n # This needs to happen even though we put the model to the GPU at the beginning of the method,\n # but we may have spread it across multiple GPUs later.\n model = model.cuda()\n model = DataParallelModel(model, device_ids=config.get_cuda_devices())\n\n return model",
"def _regular_build(self):\n # This overwrites define_model, is that ok?\n self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101\n self.define_model,\n create_scope_now_=True)\n\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n\n def _build(mode):\n outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)\n self.outputs[mode] = outputs\n self.losses[mode] = losses\n self.otters[mode] = others\n if mode == 'train':\n self._build_optimizer()\n\n # TODO Move clean and summary to proper section\n self.summary_ops = {}\n if self._train_model:\n _build('train')\n summary = []\n for idx, loss in enumerate(self.losses['train']):\n summary.append(\n tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['train']):\n summary.append(\n tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))\n self.summary_ops['train'] = tf.summary.merge(summary)\n\n if self._validate_model:\n _build('validation')\n summary = []\n for idx, loss in enumerate(self.losses['validation']):\n summary.append(\n tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['validation']):\n summary.append(\n tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))\n self.summary_ops['validation'] = tf.summary.merge(summary)\n\n self.writer = tf.summary.FileWriter(self.output_path,\n self.session.graph)\n self.saver = tf.train.Saver()\n # TODO Add routine to save\n logging.info('Model construction complete.')",
"def _build_all_models(self):\r\n self.output_tensors = {}\r\n self.loss_terms = {}\r\n self.metrics = {}\r\n\r\n def _build_datasource_summaries(data_sources, mode):\r\n \"\"\"Register summary operations for input data from given data sources.\"\"\"\r\n with tf.variable_scope('%s_data' % mode):\r\n for data_source_name, data_source in data_sources.items():\r\n tensors = data_source.output_tensors\r\n for key, tensor in tensors.items():\r\n summary_name = '%s/%s' % (data_source_name, key)\r\n shape = tensor.shape.as_list()\r\n num_dims = len(shape)\r\n if num_dims == 4: # Image data\r\n if shape[1] == 1 or shape[1] == 3:\r\n self.summary.image(summary_name, tensor,\r\n data_format='channels_first')\r\n elif shape[3] == 1 or shape[3] == 3:\r\n self.summary.image(summary_name, tensor,\r\n data_format='channels_last')\r\n # TODO: fix issue with no summary otherwise\r\n elif num_dims == 2:\r\n self.summary.histogram(summary_name, tensor)\r\n else:\r\n logger.debug('I do not know how to create a summary for %s (%s)' %\r\n (summary_name, tensor.shape.as_list()))\r\n\r\n def _build_train_or_test(mode):\r\n data_sources = self._train_data if mode == 'train' else self._test_data\r\n\r\n # Build model\r\n output_tensors, loss_terms, metrics = self.build_model(data_sources, mode=mode)\r\n\r\n # Record important tensors\r\n self.output_tensors[mode] = output_tensors\r\n self.loss_terms[mode] = loss_terms\r\n self.metrics[mode] = metrics\r\n\r\n # Create summaries for scalars\r\n if mode == 'train':\r\n for name, loss_term in loss_terms.items():\r\n self.summary.scalar('loss/%s/%s' % (mode, name), loss_term)\r\n for name, metric in metrics.items():\r\n self.summary.scalar('metric/%s/%s' % (mode, name), metric)\r\n\r\n # Build the main model\r\n if len(self._train_data) > 0:\r\n _build_datasource_summaries(self._train_data, mode='train')\r\n _build_train_or_test(mode='train')\r\n logger.info('Built model.')\r\n\r\n # Print no. of parameters and lops\r\n flops = tf.profiler.profile(\r\n options=tf.profiler.ProfileOptionBuilder(\r\n tf.profiler.ProfileOptionBuilder.float_operation()\r\n ).with_empty_output().build())\r\n logger.info('------------------------------')\r\n logger.info(' Approximate Model Statistics ')\r\n logger.info('------------------------------')\r\n logger.info('FLOPS per input: {:,}'.format(flops.total_float_ops / self._batch_size))\r\n logger.info(\r\n 'Trainable Parameters: {:,}'.format(\r\n np.sum([np.prod(v.shape.as_list()) for v in tf.trainable_variables()])\r\n )\r\n )\r\n logger.info('------------------------------')\r\n\r\n # If there are any test data streams, build same model with different scope\r\n # Trainable parameters will be copied at test time\r\n if len(self._test_data) > 0:\r\n _build_datasource_summaries(self._test_data, mode='test')\r\n with tf.variable_scope('test'):\r\n _build_train_or_test(mode='test')\r\n logger.info('Built model for live testing.')\r\n\r\n if self._enable_live_testing:\r\n self._tester._post_model_build() # Create copy ops to be run before every test run\r",
"def main(model_arch: str, images: List, batch_size: int,\n batches_per_step: int, loop: bool, num_iterations: int, num_ipus: int, mode: str, data: str,\n available_memory_proportion: float, gen_report: bool, save_graph_pb: bool, use_ipu_model: bool) -> None:\n\n if (available_memory_proportion <= 0.05) or (available_memory_proportion > 1):\n raise ValueError('Invalid \"availableMemoryProportion\" value: must be a float >=0.05'\n ' and <=1 (default value is 0.6)')\n\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --log_cycle_count=0\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--log_cycle_count=0\"\n\n if data == \"synthetic\":\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --use_synthetic_data --synthetic_data_initializer=random\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--use_synthetic_data --synthetic_data_initializer=random\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"\"\n\n if use_ipu_model:\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --use_ipu_model\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--use_ipu_model\"\n\n # Select model architecture\n model_cls = model_dict[model_arch]\n if model_arch == 'googlenet':\n model_arch = 'inceptionv1'\n config = Path(f'configs/{model_arch}.yml')\n\n # Create graph and data iterator\n loop_op, infeed_initializer, outfeed_op = construct_graph(model_cls, config,\n f\"./checkpoints/{model_arch}/\",\n batch_size, batches_per_step,\n images, loop,\n model_cls.preprocess_method(), num_ipus,\n mode, save_graph_pb)\n # Run on model or device\n if gen_report:\n get_report(loop_op, infeed_initializer, outfeed_op, f\"{config.stem}_report.txt\",\n available_memory_proportion=available_memory_proportion)\n else:\n ground_truth = tuple([Path(filename).stem for filename in images])\n run_inference(loop_op, infeed_initializer, outfeed_op, batch_size, batches_per_step, config.stem,\n model_cls.decode_method(), ground_truth, num_iterations, num_ipus, mode, data,\n available_memory_proportion=available_memory_proportion)",
"def train(hparams, summary_dir, num_gpus, model_type, max_steps, save_step,\n data_dir, num_targets, dataset, validate, seed, shuffled, shift,\n pad, batch_size=128):\n summary_dir += '/train/'\n with tf.Graph().as_default():\n # Build model\n features = get_features('train', batch_size, num_gpus, data_dir,\n num_targets, dataset, validate, evaluate=False,\n seed=seed, shuffled=shuffled, shift=shift,\n pad=pad)\n model = models[model_type](hparams)\n result, _ = model.multi_gpu(features, num_gpus)\n # Print stats\n param_stats = tf.profiler.profile(\n tf.get_default_graph(),\n options=tf.contrib.tfprof.model_analyzer.\n TRAINABLE_VARS_PARAMS_STAT_OPTIONS)\n sys.stdout.write('total_params: %d\\n' % param_stats.total_parameters)\n writer = tf.summary.FileWriter(summary_dir)\n run_experiment(load_training, summary_dir, writer, train_experiment,\n model, result, max_steps, save_step)\n writer.close()",
"def load_model(self, gpus=1):\r\n\t\t\r\n\t\tif self.model != None:\r\n\t\t\treturn\r\n\r\n\t\t## build the model on the CPU if parallelism is targeted\r\n\t\tif isinstance(gpus, Sequence):\r\n\t\t\tif len(gpus) != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus[0])\r\n\t\t\t\tmultigpu = False\r\n\t\telse:\r\n\t\t\tif gpus != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus)\r\n\t\t\t\tmultigpu = False\r\n\r\n\r\n\t\tif self.__prop__(\"Resume\"):\r\n\t\t\tself.model = keras.models.load_model(\r\n\t\t\t\tself.__prop__(\"SnapshotDirectory\") + self.__prop__(\"Prefix\") + self.__prop__(\"Resume\") + '.h5\"')\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\telse: \r\n\t\t\t\r\n\t\t\twith tensorflow.device(device):\r\n\t\t\t\tif self.__prop__(\"Prefix\").startswith(\"i3PosNet_VGG16\"):\r\n\t\t\t\t\tself.model = i3PosNetVGG(\r\n\t\t\t\t\t\tinput_shape=self.__prop__(\"InputShape\"), \r\n\t\t\t\t\t\tout_number=self.__prop__(\"TargetShape\"),\r\n\t\t\t\t\t\tlayer_count=self.__prop__(\"LayerCount\"), \r\n\t\t\t\t\t\tfc_layer_count=self.__prop__(\"FCLayerCount\"), \r\n\t\t\t\t\t\tfc_reg=self.__prop__(\"FCReg\"), \r\n\t\t\t\t\t\tconv_reg=self.__prop__(\"ConvReg\"), \r\n\t\t\t\t\t\tshrinking=self.__prop__(\"Shrinking\"),\r\n\t\t\t\t\t\tpadding=self.__prop__(\"Padding\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.model = i3PosNet(image_shape, out = self.__prop__(\"TargetShape\"))\r\n\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\t\t\t\r\n\t\t\t# clear model\r\n\t\t\t# try:\r\n\t\t\t\t# del self.model\r\n\t\t\t# except:\r\n\t\t\t\t# pass\r\n\r\n\t\t\tif self.__prop__(\"Optimizer\") == \"SGD\":\r\n\t\t\t\toptimizer = SGD(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\tmomentum= self.__prop__(\"Momentum\"),\r\n\t\t\t\t\tnesterov=True)\r\n\t\t\telif self.__prop__(\"Optimizer\") == \"Adam\":\r\n\t\t\t\toptimizer = Adam(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\t# use defaults for these for now (b1 = 0.9, b2 = 0.999, e = 1e-8\r\n\t\t\t\t\tbeta_1=self.__prop__(\"Beta1\"),\r\n\t\t\t\t\tbeta_2=self.__prop__(\"Beta2\"),\r\n\t\t\t\t\tepsilon=self.__prop__(\"Epsilon\")\r\n\t\t\t\t\t)\r\n\t\t\t\r\n\t\t\tself.model.compile(loss='mean_squared_error', optimizer=optimizer)",
"def summary_for_segmentation_models(config: ModelConfigBase, model: DeviceAwareModule) -> None:\n assert isinstance(model, BaseModel)\n crop_size = config.crop_size\n if isinstance(crop_size, int):\n crop_size = (crop_size, crop_size, crop_size)\n try:\n model.generate_model_summary(crop_size, log_summaries_to_files=config.log_summaries_to_files)\n except AttributeError as e:\n logging.warning(f\"summary_for_segmentation_models failed with exception {e}\")",
"def build_model(cfg, model, gpu_id=None):\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n # Construct the model\n # name = cfg.MODEL.MODEL_NAME\n # model = MODEL_REGISTRY.get(name)(cfg)\n \n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n #, find_unused_parameters=True\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n \n return model",
"def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer",
"def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()",
"def build_model(self):\n if self.args.network_type == 'unet':\n self.shared = models.Unet(self.args)\n else:\n raise NotImplementedError(f'Network type '\n f'`{self.args.network_type}` is not '\n f'defined')\n self.controller = models.Controller(self.args)\n\n if self.args.num_gpu == 1:\n self.shared.cuda()\n self.controller.cuda()\n elif self.args.num_gpu > 1:\n raise NotImplementedError('`num_gpu > 1` is in progress')",
"def create_summary_and_adjust_mean_teacher_model_for_gpus(self) -> None:\n if self._mean_teacher_model is None:\n raise ValueError(\"Mean teacher model must be created before it can be adjusted.\")\n\n if self.config.is_segmentation_model:\n summary_for_segmentation_models(self.config, self._mean_teacher_model)\n # Prepare for mixed precision training and data parallelization (no-op if already done).\n # This relies on the information generated in the model summary.\n self.adjust_mean_teacher_model_for_gpus()",
"def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))",
"def summarize_model(\n model: keras.Model, fig_dir: Union[str, None] = None\n) -> None:\n\n submodels = []\n for layer in model.layers:\n if isinstance(layer, TimeDistributed):\n submodels.append(layer.layer)\n\n for submodel in submodels:\n submodel.summary()\n model.summary()\n\n if fig_dir is not None:\n for submodel in submodels:\n keras.utils.plot_model(\n submodel, os.path.join(fig_dir, f'model_{submodel.name}.png'),\n dpi=300\n )\n keras.utils.plot_model(\n model, os.path.join(fig_dir, 'model_full.png'), dpi=300\n )",
"def main():\n\n args = get_arguments()\n\n w, h = map(int, args.input_size.split(','))\n\n config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')\n with open(config_path, 'r') as stream:\n config = yaml.load(stream)\n\n args.model = config['model']\n print('ModelType:%s'%args.model)\n print('NormType:%s'%config['norm_style'])\n gpu0 = args.gpu\n batchsize = args.batchsize\n\n model_name = os.path.basename( os.path.dirname(args.restore_from) )\n #args.save += model_name\n\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n confidence_path = os.path.join(args.save, 'submit/confidence')\n label_path = os.path.join(args.save, 'submit/labelTrainIds')\n label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')\n for path in [confidence_path, label_path, label_invalid_path]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n if args.model == 'DeepLab':\n model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])\n elif args.model == 'Oracle':\n model = Res_Deeplab(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_ORC\n elif args.model == 'DeeplabVGG':\n model = DeeplabVGG(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_VGG\n\n if args.restore_from[:4] == 'http' :\n saved_state_dict = model_zoo.load_url(args.restore_from)\n else:\n saved_state_dict = torch.load(args.restore_from)\n\n try:\n model.load_state_dict(saved_state_dict)\n except:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(saved_state_dict)\n model.eval()\n model.cuda(gpu0)\n\n testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n scale = 1.25\n testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(h*scale), round(w*scale) ), resize_size=( round(w*scale), round(h*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n\n if version.parse(torch.__version__) >= version.parse('0.4.0'):\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)\n else:\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear')\n\n sm = torch.nn.Softmax(dim = 1)\n log_sm = torch.nn.LogSoftmax(dim = 1)\n kl_distance = nn.KLDivLoss( reduction = 'none')\n prior = np.load('./utils/prior_all.npy').transpose((2,0,1))[np.newaxis, :, :, :]\n prior = torch.from_numpy(prior)\n for index, img_data in enumerate(zip(testloader, testloader2) ):\n batch, batch2 = img_data\n image, _, name = batch\n image2, _, name2 = batch2\n\n inputs = image.cuda()\n inputs2 = image2.cuda()\n print('\\r>>>>Extracting feature...%04d/%04d'%(index*batchsize, args.batchsize*len(testloader)), end='')\n if args.model == 'DeepLab':\n with torch.no_grad():\n output1, output2 = model(inputs)\n output_batch = interp(sm(0.5* output1 + output2))\n\n heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)\n\n output1, output2 = model(fliplr(inputs))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs\n\n output1, output2 = model(inputs2)\n output_batch += interp(sm(0.5* output1 + output2))\n output1, output2 = model(fliplr(inputs2))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs2\n ratio = 0.95\n output_batch = output_batch.cpu() / 4\n # output_batch = output_batch *(ratio + (1 - ratio) * prior)\n output_batch = output_batch.data.numpy()\n heatmap_batch = heatmap_batch.cpu().data.numpy()\n elif args.model == 'DeeplabVGG' or args.model == 'Oracle':\n output_batch = model(Variable(image).cuda())\n output_batch = interp(output_batch).cpu().data.numpy()\n\n output_batch = output_batch.transpose(0,2,3,1)\n score_batch = np.max(output_batch, axis=3)\n output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)\n\n threshold = 0.3274\n for i in range(output_batch.shape[0]):\n output_single = output_batch[i,:,:]\n output_col = colorize_mask(output_single)\n output = Image.fromarray(output_single)\n\n name_tmp = name[i].split('/')[-1]\n dir_name = name[i].split('/')[-2]\n save_path = args.save + '/' + dir_name\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n output.save('%s/%s' % (save_path, name_tmp))\n print('%s/%s' % (save_path, name_tmp))\n output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.')[0]))\n\n # heatmap_tmp = heatmap_batch[i,:,:]/np.max(heatmap_batch[i,:,:])\n # fig = plt.figure()\n # plt.axis('off')\n # heatmap = plt.imshow(heatmap_tmp, cmap='viridis')\n # fig.colorbar(heatmap)\n # fig.savefig('%s/%s_heatmap.png' % (save_path, name_tmp.split('.')[0]))\n\n if args.set == 'test' or args.set == 'val':\n # label\n output.save('%s/%s' % (label_path, name_tmp))\n # label invalid\n output_single[score_batch[i, :, :] < threshold] = 255\n output = Image.fromarray(output_single)\n output.save('%s/%s' % (label_invalid_path, name_tmp))\n # conficence\n\n confidence = score_batch[i, :, :] * 65535\n confidence = np.asarray(confidence, dtype=np.uint16)\n print(confidence.min(), confidence.max())\n iio.imwrite('%s/%s' % (confidence_path, name_tmp), confidence)\n\n return args.save",
"def create_snapshot_model(model_args):\n # similar to create_separate_model but with experts pretrained\n # 1. get model directory path with models at each epoch for a global model\n # 2. choose the model at epochs that gives best validation performance for each cohort\n # as starting point\n # 3. finetune the resulting model\n tasks = model_args['tasks']\n X_val, y_val, cohorts_val = model_args['X_val'], model_args['y_val'], model_args['cohorts_val']\n val_loader = create_loader(X_val, y_val, batch_size=100, shuffle=False) \n # convert y_val and cohorts_val to numpy\n y_val, cohorts_val = dataset2numpy(y_val).astype(int), dataset2numpy(cohorts_val).astype(int)\n\n experts_auc = [(None, 0) for _ in range(len(tasks))] # init to (n model, 0 auc)\n for fn in glob.glob(model_args['global_model_dir'] + \"/epoch*.m\"):\n net = torch.load(fn)\n y_pred = get_output(net, val_loader).ravel()\n for i, task in enumerate(tasks):\n y_val_in_task = y_val[cohorts_val == task]\n y_pred_in_task = y_pred[cohorts_val == task]\n try:\n auc = roc_auc_score(y_val_in_task, y_pred_in_task)\n except:\n auc = 0.1 # slightly larger than 0 but shouldn't be selected\n if auc > experts_auc[i][1]:\n experts_auc[i] = (net, auc)\n\n experts = nn.ModuleList([expert for expert, auc in experts_auc])\n # currently is inefficient by running all models for all tasks\n # I should be able to just run the required expert\n model = Separate_MIMIC_Model(experts)\n return model",
"def main_modeling_pipeline():\n\n\n data_df = pd.read_csv('gs://aiplatformfilipegracio2020/head_train_data.csv')\n data_df = data_df[[LABEL, 'price', 'days_on_site']]\n\n class_weights = calculate_class_weights(data_df[LABEL])\n print('class weights', class_weights)\n logging.info('Data loaded and processed')\n train_ds, val_ds, test_ds = make_tf_datasets(data_df, LABEL)\n logging.info('Tensorflow datasets created')\n\n with strategy.scope():\n logging.info('Inside strategy')\n simple_feature_layer = make_simple_feature_layer(data_df)\n logging.info('Going to make model')\n simple_model = make_simple_model(simple_feature_layer)\n\n logging.info('Going fit model')\n simple_model_results, simple_model = model_fit_and_evaluate(model=simple_model,\n train_ds=train_ds,\n val_ds=val_ds,\n test_ds=test_ds,\n class_weights=class_weights,\n epochs=TRAINING_EPOCHS,\n job_name='simple_model')\n\n simple_model.save('gs://aiplatformfilipegracio2020/')",
"def main(batch_size, saves_dir=TENSORFLOW_SAVES_DIR):\n batches = [1, 8, 16, 32, 64]\n if batch_size:\n batches = [batch_size]\n\n for batch_size in batches:\n print(\"Batch size: {}\".format(batch_size))\n batch = np.random.random((batch_size, 224, 224, 3))\n\n # our default model\n tf.reset_default_graph()\n usual_model = Model()\n measure_model(usual_model, \"Usual model\", batch)\n usual_model.sess.close()\n\n # our binary file\n tf.reset_default_graph()\n frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='constant_graph.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(frozen_model, \"Frozen model\", batch)\n frozen_model.sess.close()\n\n # binary file with some constant operations\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='optimized_graph.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, \"Optimized frozen model\", batch)\n optimized_frozen_model.sess.close()\n\n # model quantized with python\n model_name = \"Quantized with python\"\n try:\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='quantized_graph_python.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, model_name, batch)\n optimized_frozen_model.sess.close()\n except FileNotFoundError:\n print(\"skipped // %s\" % model_name)\n\n # model quantized with bazel\n model_name = \"Quantized with bazel\"\n try:\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='quantized_graph_bazel.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, model_name, batch)\n optimized_frozen_model.sess.close()\n except FileNotFoundError:\n print(\"skipped // %s\" % model_name)",
"def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model",
"def produce_summary_pdf(model_name, img_path, hyperparams, model_arch, train_stats):\n # datetime object containing current date and time\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n pdf = FPDF()\n pdf.set_title(\"training_summary_{}_{}\".format(model_name.lower(), dt_string))\n pdf.add_page()\n pdf.set_xy(0, 10)\n pdf.set_font(\"Helvetica\", \"BI\", 16)\n pdf.set_text_color(25, 33, 78)\n pdf.set_draw_color(25, 33, 78)\n pdf.cell(20)\n pdf.cell(\n 200,\n 10,\n \"Model Training Summary: {}\".format(model_name.upper()),\n 0,\n 2,\n )\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(\n 200,\n 5,\n dt_string,\n 0,\n 2,\n )\n\n # Model Configuration Section\n pdf.cell(150, 10, \"Model Configuration:\", 0, 2)\n pdf.cell(30, 10, \"Parameter\", 1, 0)\n pdf.cell(140, 10, \"Value\", 1, 2)\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-30)\n attributes = [\n \"model_dir\",\n \"log_dir\",\n \"check_dir\",\n \"current_epoch\",\n \"overwrite\",\n \"exp_name\",\n ]\n for i, val in enumerate(hyperparams):\n if val not in attributes:\n pdf.cell(30, 10, \"%s\" % (val), 1, 0)\n pdf.cell(140, 10, \"%s\" % (hyperparams[val]), 1, 2)\n pdf.cell(-30)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Model Performance Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Performance Stats:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n\n loss = train_stats[\"test_loss\"]\n acc = train_stats[\"test_acc\"]\n\n pdf.set_text_color(255, 96, 80)\n pdf.cell(35, 6, \"Best Loss:\", 0, 0)\n pdf.cell(\n 45, 6, \"{:.3f} (Epoch {})\".format(min(loss), loss.index(min(loss)) + 1), 0, 0\n )\n pdf.cell(60, 6, \"Training Duration:\", 0, 0)\n pdf.cell(30, 6, \"{:.3f} (s)\".format(train_stats[\"total_dur\"]), 0, 2)\n pdf.cell(-140)\n pdf.cell(35, 6, f\"Best Accuracy:\", 0, 0)\n pdf.cell(45, 6, \"{:.3f} (Epoch {})\".format(max(acc), acc.index(max(acc)) + 1), 0, 0)\n pdf.cell(60, 6, \"Average Epoch Duration:\", 0, 0)\n pdf.cell(\n 30,\n 6,\n \"{:.3f} (s)\".format(train_stats[\"total_dur\"] / hyperparams[\"current_epoch\"]),\n 0,\n 2,\n )\n pdf.cell(-140)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Loss Curve Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Loss Curve:\", 0, 2)\n pdf.image(img_path, x=None, y=None, w=160, h=0, type=\"PNG\", link=\"\")\n\n # Second Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20)\n\n # Model Arch Section\n pdf.cell(150, 20, \"Model Configuration:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n if model_arch is None:\n model_arch = \"No model configuration was provided\"\n pdf.set_text_color(255, 96, 80)\n pdf.multi_cell(180, 8, str(model_arch))\n\n # Third Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20, \" \")\n\n # Training Loss Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 20, \"Detailed Loss Output:\", 0, 2)\n pdf.cell(40, 8, \"Epoch\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Acc\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Acc\", 1, 2, \"C\")\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-130)\n for i in range(0, len(train_stats[\"train_loss\"])):\n pdf.cell(40, 8, \"{}\".format((i + 1)), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_acc\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_acc\"][i])), 1, 2, \"C\")\n pdf.cell(-130)\n pdf.cell(90, 3, \"\", 0, 2)\n\n pdf.output(\n os.path.join(\n os.path.dirname(img_path),\n \"training_summary_{}.pdf\".format(model_name.lower()),\n ),\n \"F\",\n )",
"def main_stats_model(y_train: pd.DataFrame, y_test: pd.DataFrame, y_pred: np.ndarray,\n model_name: str = '',\n model_parameters: dict = None,\n model_preprocessing: str = '',\n sequence_origin: str = '',\n primers_origin: str = '',\n taxonomy_level: Union[List[int], int] = '',\n selected_primer: Union[List[str], str] = '',\n test_size: float = 0.2,\n feature_importances: np.ndarray = None,\n k: int = 4,\n save_csv: bool = False,\n xgb_model=None,\n rf_model=None,\n save_model=False,\n save_tree: int = 0):\n model_path = folder_paths['model_results'] + model_name + '{}'.format(slash)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n folder_number = get_new_model_folder_number(model_name=model_name)\n analysis_path = model_path + '{:0>5d}_analysis_{}_{}{}'.format(folder_number, selected_primer, taxonomy_level, slash)\n os.makedirs(analysis_path)\n\n log_path = analysis_path + 'model_results.txt'\n logger = StatLogger(log_path=log_path)\n\n # Basic information on configuration\n test_size = get_model_info(y_test, model_name, model_parameters, model_preprocessing, sequence_origin,\n primers_origin, taxonomy_level, selected_primer, test_size, logger)\n\n # Metrics of model results\n main_class_prop, accuracy = get_metrics_model(y_train, y_test, y_pred, logger, feature_importances, k, save_tree,\n xgb_model,\n analysis_path=analysis_path)\n\n if save_csv:\n add_optimal_model_params(folder_number, selected_primer, taxonomy_level, accuracy, model_parameters,\n model_path=model_path)\n\n if save_model:\n if xgb_model is not None:\n xgb_model.save_model(analysis_path+'0001.model')\n if rf_model is not None:\n filename = analysis_path+'0001.model'\n pickle.dump(rf_model, open(filename, 'wb'))\n\n logger.close_file()\n\n return test_size, main_class_prop, accuracy",
"def _prepare_models(self):\n if self.freeze_layers is not None:\n self._set_freeze_layers()\n self._load_weight_if_possible()\n print(self.keras_model.summary())\n self.show_configuration()",
"def _benchmark_cnn(self):\n self.single_session = False\n (image_producer_ops, enqueue_ops, fetches) = self._build_model()\n fetches_list = nest.flatten(list(fetches.values()))\n main_fetch_group = tf.group(*fetches_list)\n execution_barrier = None\n \n\n global_step = tf.train.get_global_step()\n with tf.device(self.global_step_device):\n with tf.control_dependencies([main_fetch_group]):\n fetches['inc_global_step'] = global_step.assign_add(1)\n\n\n local_var_init_op = tf.local_variables_initializer()\n variable_mgr_init_ops = [local_var_init_op]\n with tf.control_dependencies([local_var_init_op]):\n variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops())\n local_var_init_op_group = tf.group(*variable_mgr_init_ops)\n\n summary_op = tf.summary.merge_all()\n is_chief = (not self.job_name or self.task_index == 0)\n summary_writer = None\n \n # We run the summaries in the same thread as the training operations by\n # passing in None for summary_op to avoid a summary_thread being started.\n # Running summaries and training operations in parallel could run out of\n # GPU memory.\n saver = tf.train.Saver(\n self.variable_mgr.savable_variables(), save_relative_paths=True)\n ready_for_local_init_op = None\n \n sv = tf.train.Supervisor(\n is_chief=is_chief,\n logdir=self.params.train_dir,\n ready_for_local_init_op=ready_for_local_init_op,\n local_init_op=local_var_init_op_group,\n saver=saver,\n global_step=global_step,\n summary_op=None,\n save_model_secs=self.params.save_model_secs,\n summary_writer=summary_writer)\n\n step_train_times = []\n start_standard_services = (\n self.params.summary_verbosity >= 1 or\n self.dataset.queue_runner_required())\n target = self.cluster_manager.get_target() if self.cluster_manager else ''\n with sv.managed_session(\n master=target,\n config=create_config_proto(self.params),\n start_standard_services=start_standard_services) as sess:\n image_producer = cnn_util.ImageProducer(sess, image_producer_ops,\n self.batch_group_size)\n image_producer.start()\n for i in xrange(len(enqueue_ops)):\n sess.run(enqueue_ops[:(i + 1)])\n image_producer.notify_image_consumption()\n self.init_global_step, = sess.run([global_step])\n if not self.single_session:\n global_step_watcher = GlobalStepWatcher(\n sess, global_step,\n self.num_workers * self.num_warmup_batches +\n self.init_global_step,\n self.num_workers * (self.num_warmup_batches + self.num_batches) - 1)\n global_step_watcher.start()\n \n\n log_fn('Running warm up')\n local_step = -1 * self.num_warmup_batches\n done_fn = global_step_watcher.done\n loop_start_time = time.time()\n while not done_fn():\n if local_step == 0:\n log_fn('Done warm up')\n \n header_str = 'Step\\tImg/sec\\tloss'\n \n log_fn(header_str)\n \n # reset times to ignore warm up batch\n step_train_times = []\n loop_start_time = time.time()\n \n fetch_summary = None\n summary_str = benchmark_one_step(\n sess, fetches, local_step,\n self.batch_size * (self.num_workers if self.single_session else 1),\n step_train_times, self.trace_filename, image_producer, self.params,\n fetch_summary)\n \n local_step += 1\n loop_end_time = time.time()\n # Waits for the global step to be done, regardless of done_fn.\n \n num_steps = global_step_watcher.num_steps()\n elapsed_time = global_step_watcher.elapsed_time()\n\n average_wall_time = elapsed_time / num_steps if num_steps > 0 else 0\n images_per_sec = ((self.num_workers * self.batch_size) / average_wall_time\n if average_wall_time > 0 else 0)\n\n log_fn('-' * 64)\n log_fn('total images/sec: %.2f' % images_per_sec)\n log_fn('-' * 64)\n image_producer.done()\n #if is_chief:\n # store_benchmarks({'total_images_per_sec': images_per_sec}, self.params)\n # Save the model checkpoint.\n \n sv.stop()\n return {\n 'num_workers': self.num_workers,\n 'num_steps': num_steps,\n 'average_wall_time': average_wall_time,\n 'images_per_sec': images_per_sec\n }",
"def summary(self) -> None:\n print(\"Model manager summary:\")\n print(\"Preprocessor:\")\n print(self.preprocessor)\n print(\"Model summary:\")\n self.model.summary()\n print(\"Postprocessor:\")\n print(self.postprocessor)",
"def main():\n args = arg_parser()\n if(args.gpu):\n gpu = args.gpu\n else:\n print(\"GPU mode not specified, will use the default value - Use GPU\")\n gpu = \"Y\"\n # Device setting:\n device = device_setting(gpu)\n \n # Prepare the datasets and dataloaders:\n print(\"\\nPreparing dataset for train/valid/test ...\")\n train_loader, valid_loader, test_loader, train_data, valid_data, test_data = load_dataset()\n \n # Model architects, criterion and optimizer:\n print(\"\\nNetwork archetecture building ...\")\n model, criterion, optimizer = network(device=device,\n architecture=args.architecture,\n learning_rate=args.learning_rate,\n hidden_size=args.hidden_size,\n dropout=args.dropout,\n output_size=args.output_size)\n \n # Train the model:\n print(\"\\n\")\n model = train(model=model,\n epochs=5,\n learning_rate=args.learning_rate,\n criterion=criterion,\n optimizer=optimizer,\n train_loader=train_loader,\n valid_loader=valid_loader,\n device=device)\n \n # Validate the model performance on the test set:\n print(\"\\nValidate model performance on test set ...\")\n test(model=model, test_loader=test_loader, device=device)\n \n # Save model checkpoint:\n print(\"\\nSave model checkpoint ...\")\n save(model=model, train_data=train_data, epochs=args.epochs, architecture=args.architecture)",
"def auto_model_profiling(model_info, server_name, device_util_thd=0.01, device_memory_thd=0.01, period=10):\n\n different_kind_devices = collections.OrderedDict()\n for gpu in GPUtil.getGPUs():\n if gpu.name not in different_kind_devices:\n different_kind_devices[gpu.name] = gpu\n\n for device in list(different_kind_devices.values()):\n profiler = Profiler(model_info=model_info, server_name=server_name)\n monitor = UtilMonitor(device, profiler, period, device_util_thd, device_memory_thd)\n monitor.start()",
"def main():\n\n args = define_and_process_args()\n print('\\n', 'ARGUMENTS', '\\n\\n', args, '\\n')\n\n log_dir = get_log_dir(args)\n print('\\n', 'LOG DIRECTORY', '\\n\\n', log_dir, '\\n')\n\n standardized_data_path = os.path.join(args.data_dir, args.data_filename)\n if not os.path.exists(standardized_data_path):\n message = '%s does not exist.' % standardized_data_path\n raise ValueError(message)\n\n dataset = data.Dataset(standardized_data_path)\n train_raw_seqs, test_raw_seqs = dataset.get_splits(args.test_users)\n train_triplets = [data.prepare_raw_seq(seq) for seq in train_raw_seqs]\n test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]\n\n train_input_seqs, train_reset_seqs, train_label_seqs = zip(*train_triplets)\n test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)\n\n Model = eval('models.' + args.model_type + 'Model')\n input_size = dataset.input_size\n target_size = dataset.num_classes\n\n # This is just to satisfy a low-CPU requirement on our cluster\n # when using GPUs.\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n config = tf.ConfigProto(intra_op_parallelism_threads=2,\n inter_op_parallelism_threads=2)\n else:\n config = None\n\n with tf.Session(config=config) as sess:\n model = Model(input_size, target_size, args.num_layers,\n args.hidden_layer_size, args.init_scale,\n args.dropout_keep_prob)\n optimizer = optimizers.Optimizer(\n model.loss, args.num_train_sweeps, args.initial_learning_rate,\n args.num_initial_sweeps, args.num_sweeps_per_decay,\n args.decay_factor, args.max_global_grad_norm)\n train(sess, model, optimizer, log_dir, args.batch_size,\n args.num_sweeps_per_summary, args.num_sweeps_per_save,\n train_input_seqs, train_reset_seqs, train_label_seqs,\n test_input_seqs, test_reset_seqs, test_label_seqs)"
] | [
"0.7002343",
"0.62532896",
"0.62355524",
"0.6192523",
"0.61289847",
"0.6088246",
"0.59602416",
"0.5950651",
"0.59498066",
"0.5881187",
"0.588016",
"0.5873111",
"0.58373106",
"0.5834648",
"0.58186084",
"0.58035755",
"0.57857174",
"0.57534796",
"0.5750009",
"0.5738947",
"0.57329243",
"0.57293063",
"0.5724346",
"0.572426",
"0.5712572",
"0.5712538",
"0.5695092",
"0.5662597",
"0.56513256",
"0.56502306"
] | 0.7819205 | 0 |
Creates an optimizer and loads its state from a checkpoint. | def try_create_optimizer_and_load_from_checkpoint(self) -> bool:
self.create_optimizer()
if self.checkpoint_path:
return self.try_load_checkpoint_for_optimizer()
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self",
"def create_optimizer(self) -> None:\n # Make sure model is created before we create optimizer\n if self._model is None:\n raise ValueError(\"Model checkpoint must be created before optimizer checkpoint can be loaded.\")\n\n # Select optimizer type\n if self.config.optimizer_type in [OptimizerType.Adam, OptimizerType.AMSGrad]:\n self._optimizer = torch.optim.Adam(self._model.parameters(), self.config.l_rate,\n self.config.adam_betas, self.config.opt_eps, self.config.weight_decay,\n amsgrad=self.config.optimizer_type == OptimizerType.AMSGrad)\n elif self.config.optimizer_type == OptimizerType.SGD:\n self._optimizer = torch.optim.SGD(self._model.parameters(), self.config.l_rate, self.config.momentum,\n weight_decay=self.config.weight_decay)\n elif self.config.optimizer_type == OptimizerType.RMSprop:\n self._optimizer = RMSprop(self._model.parameters(), self.config.l_rate, self.config.rms_alpha,\n self.config.opt_eps,\n self.config.weight_decay, self.config.momentum)\n else:\n raise NotImplementedError(f\"Optimizer type {self.config.optimizer_type.value} is not implemented\")",
"def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model",
"def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state",
"def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)",
"def opt_from_checkpoint(\n checkpoint_path: str,\n config_path: Optional[str] = None,\n extra_bindings=tuple([])\n) -> Optimizer:\n\n if config_path is None:\n config_path = \"/\".join(checkpoint_path.split(\"/\")[:-1]) + \"/config.gin\"\n\n logging.info(\"Restoring configs from: %s\", config_path)\n with gin.unlock_config():\n scope = f\"opt_from_checkpoint__{str(uuid.uuid4()).replace('-', '_')}\"\n with gin.config_scope(None):\n with gin.config_scope(scope):\n if config_path:\n with file_open(config_path, \"rb\") as f:\n content = bytes(f.read()).decode(\"utf-8\")\n\n # gin writes out multi line sometimes, undo this.\n content = content.replace(\"\\\\\\n\", \"\")\n\n def maybe_add_scope(c):\n # filter out train as this overlaps with outer_training.\n if c.startswith(\"#\"):\n return None\n if \"=\" in c:\n return scope + \"/\" + c\n return c\n\n bindings = [maybe_add_scope(c) for c in content.split(\"\\n\")]\n bindings = [b for b in bindings if b]\n bindings = bindings + [maybe_add_scope(c) for c in extra_bindings]\n\n logging.info(\"Parsing bindings\")\n for b in bindings:\n logging.info(b)\n print(b)\n gin.parse_config(bindings, skip_unknown=True)\n\n configurable = gin.query_parameter(f\"{scope}/run_train.lopt\")\n if isinstance(configurable, gin.config._UnknownConfigurableReference): # pylint: disable=protected-access\n raise ValueError(\"Gin couldn't find the learned optimizer in current\"\n \" imports. Did you forget to import the module?\")\n\n # with summary.summary_scope(\"opt_from_checkpoint\"):\n lopt = configurable.configurable.wrapped()\n theta = lopt.init(jax.random.PRNGKey(0))\n logging.info(f\"Restoring checkpoint {checkpoint_path}\") # pylint: disable=logging-fstring-interpolation\n ckpt = ParameterCheckpoint(theta, \"\", 0)\n ckpt = load_state(checkpoint_path, ckpt)\n opt = lopt.opt_fn(ckpt.params)\n return opt\n # wrapped = _GinScopeClass(opt, scope)\n # For now, just add the lopt to the returned class.\n # TODO(lmetz) change this api to return a more structured class?\n # wrapped.lopt = lopt\n # return wrapped # type: ignore",
"def _load(checkpoint_path):\n state_dict, optimizer_state = dg.load_persistables(dirname=checkpoint_path)\n return state_dict, optimizer_state",
"def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state",
"def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint",
"def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint",
"def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state",
"def try_load_checkpoint_for_optimizer(self) -> bool:\n\n if self._optimizer is None:\n raise ValueError(\"Optimizer must be created before optimizer checkpoint can be loaded.\")\n\n if not self.checkpoint_path:\n logging.warning(\"No checkpoint path provided.\")\n return False\n\n if not self.checkpoint_path.is_file():\n logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}')\n return False\n\n logging.info(f\"Loading checkpoint {self.checkpoint_path}\")\n checkpoint = ModelAndInfo.read_checkpoint(self.checkpoint_path, self.config.use_gpu)\n\n try:\n state_dict = checkpoint[ModelAndInfo.OPTIMIZER_STATE_DICT_KEY]\n except KeyError:\n logging.error(f\"Key {ModelAndInfo.OPTIMIZER_STATE_DICT_KEY} not found in checkpoint\")\n return False\n\n self._optimizer.load_state_dict(state_dict)\n\n logging.info(f\"Loaded optimizer from checkpoint (epoch: {checkpoint[ModelAndInfo.EPOCH_KEY]})\")\n self.checkpoint_epoch = checkpoint[ModelAndInfo.EPOCH_KEY]\n return True",
"def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])",
"def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])",
"def LoadProgramState(self, restored_checkpoint_path=None, sess=None):\n pass",
"def create_optimizer(net, optimizer_state_dict, learning_rate, device='cuda'):\n # define optimizer\n optimizer = optim.Adam([{\n 'params': net.net.parameters(),\n 'initial_lr': learning_rate\n }])\n # load optimizer checkpoint if available\n if optimizer_state_dict is not None:\n target_device = 'cpu' if device == 'cpu' else 'cuda'\n # load the optimizer weights\n optimizer.load_state_dict(optimizer_state_dict)\n for state in optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = getattr(v, target_device)()\n return optimizer",
"def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)",
"def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']",
"def create_optimizer(parameters, optimizer_class, optim_params, model_name='model'):\n opt = optimizer_class(parameters, **optim_params)\n if special_parameters.load_model:\n _load_optimizer(opt, model_name)\n return opt",
"def load_state_dict(self, checkpoint):\n self.net.load_state_dict(checkpoint['Net'])\n self.optimizer.load_state_dict(checkpoint['Optimizer'])\n\n if ADVERSARIAL_FLAG:\n self.adv_net.load_state_dict(checkpoint['AdvNet'])\n self.adv_optimizer.load_state_dict(checkpoint['AdvOptimizer'])\n\n self.history = checkpoint['History']\n self.stats = checkpoint['Stats']\n\n # The following loops are used to fix a bug that was\n # discussed here: https://github.com/pytorch/pytorch/issues/2830\n # (it is supposed to be fixed in recent PyTorch version)\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(self.net.device)\n if ADVERSARIAL_FLAG:\n for adv_state in self.adv_optimizer.state.values():\n for k, v in adv_state.items():\n if isinstance(v, torch.Tensor):\n adv_state[k] = v.to(self.adv_net.device)",
"def load_model(agent, optimizer, model_file):\n checkpoint = torch.load(model_file)\n episode = checkpoint['epoch']\n agent.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n return episode",
"def load_checkpoint(self, filename, load_optim=True):\n extra_state, optim_history, last_optim_state = \\\n utils.load_model_state(filename, self.get_model())\n\n if last_optim_state is not None:\n # rebuild optimizer after loading model, since params may have changed\n #self.optimizer = optim.build_optimizer(self.args, self.model.parameters())\n self.lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)\n\n if load_optim:\n self._optim_history = optim_history\n # only reload optimizer and lr_scheduler if they match\n last_optim = self._optim_history[-1]\n if last_optim['criterion_name'] == self.criterion.__class__.__name__:\n self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])\n if last_optim['optimizer_name'] == self.optimizer.__class__.__name__:\n self.optimizer.load_state_dict(last_optim_state)\n\n self._num_updates = last_optim['num_updates']\n\n return extra_state",
"def _create_optimizer(self):\n\n with tf.name_scope(\"optimizer\"):\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)",
"def create_optimizer(self, context, optimizer, host):\n pass",
"def _load_state_dict(optimizer, state: dict) -> None:\n if is_scheduler(optimizer):\n optimizer.load_state_dict(state[\"scheduler\"])\n optimizer.optimizer.load_state_dict(state[\"optimizer\"])\n else:\n optimizer.load_state_dict(state)",
"def load_state(net, optimizer, scheduler, model_no=0, load_best=False):\n logger.info(\"Initializing model and optimizer states...\")\n base_path = \"./data/\"\n checkpoint_path = os.path.join(base_path,\"test_checkpoint_%d.pth.tar\" % model_no)\n best_path = os.path.join(base_path,\"test_model_best_%d.pth.tar\" % model_no)\n start_epoch, best_pred, checkpoint = 0, 0, None\n if (load_best == True) and os.path.isfile(best_path):\n checkpoint = torch.load(best_path)\n logger.info(\"Loaded best model.\")\n elif os.path.isfile(checkpoint_path):\n checkpoint = torch.load(checkpoint_path)\n logger.info(\"Loaded checkpoint model.\")\n if checkpoint != None:\n start_epoch = checkpoint['epoch']\n best_pred = checkpoint['best_acc']\n net.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n logger.info(\"Loaded model and optimizer.\") \n return start_epoch, best_pred",
"def load_from_checkpoint(self, path):\n print(f'# loading trainer state from {path}')\n checkpoint = torch.load(path)\n self.load(checkpoint)",
"def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc",
"def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers",
"def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager"
] | [
"0.7107619",
"0.70649505",
"0.7064646",
"0.69969",
"0.6977983",
"0.6892563",
"0.6793151",
"0.67687976",
"0.673699",
"0.6716268",
"0.65814966",
"0.650589",
"0.64456534",
"0.64456534",
"0.6368876",
"0.632732",
"0.6312547",
"0.6301847",
"0.62795025",
"0.6267788",
"0.623797",
"0.61970675",
"0.6188866",
"0.6160548",
"0.61562544",
"0.61470646",
"0.61243194",
"0.6072098",
"0.6072091",
"0.6048436"
] | 0.71238905 | 0 |
Generates a human readable summary of the present segmentation model, writes it to logging.info, and stores the ModelSummary object inside the argument `model`. | def summary_for_segmentation_models(config: ModelConfigBase, model: DeviceAwareModule) -> None:
assert isinstance(model, BaseModel)
crop_size = config.crop_size
if isinstance(crop_size, int):
crop_size = (crop_size, crop_size, crop_size)
try:
model.generate_model_summary(crop_size, log_summaries_to_files=config.log_summaries_to_files)
except AttributeError as e:
logging.warning(f"summary_for_segmentation_models failed with exception {e}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_and_print_model_summary(config: ModelConfigBase, model: DeviceAwareModule) -> None:\n random_state = RandomStateSnapshot.snapshot_random_state()\n # There appears to be a bug in apex, where previous use (in training for example) causes problems\n # when another model is later built on the CPU (for example, before loading from a checkpoint)\n # https://github.com/NVIDIA/apex/issues/694\n # Hence, move the model to the GPU before doing model summary.\n if config.use_gpu:\n model = model.cuda()\n if isinstance(config, ScalarModelBase):\n # To generate the model summary, read the first item of the dataset. Then use the model's own\n # get_model_input function to convert the dataset item to input tensors, and feed them through the model.\n train_dataset = config.get_torch_dataset_for_inference(ModelExecutionMode.TRAIN)\n train_item_0 = next(iter(train_dataset.as_data_loader(shuffle=False, batch_size=1, num_dataload_workers=0)))\n model_inputs = get_scalar_model_inputs_and_labels(config, model, train_item_0).model_inputs\n # The model inputs may already be converted to float16, assuming that we would do mixed precision.\n # However, the model is not yet converted to float16 when this function is called, hence convert back to float32\n summary = ModelSummary(model)\n summary.generate_summary(input_tensors=model_inputs, log_summaries_to_files=config.log_summaries_to_files)\n elif config.is_segmentation_model:\n summary_for_segmentation_models(config, model)\n assert model.summarizer\n summary = model.summarizer # type: ignore\n else:\n raise ValueError(\"Don't know how to generate a summary for this type of model?\")\n RUN_CONTEXT.log(LoggingColumns.NumTrainableParameters, summary.n_trainable_params)\n random_state.restore_random_state()",
"def model_summary():\n print(\"\\n\")\n print(\"=\" * 30 + \"Model Structure\" + \"=\" * 30)\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n print(\"=\" * 60 + \"\\n\")",
"def summary(self):\n\n self.model.summary(print_fn=lambda x: logging.info(x))",
"def model_summary_to_file(model, save_path):\n with open(save_path, 'w') as fh:\n model.summary(print_fn=lambda x: fh.write(x + \"\\n\"))",
"def save_summary(model, model_name, stage_no):\n stringlist = []\n model.summary(print_fn=lambda x: stringlist.append(x))\n short_model_summary = \"\\n\".join(stringlist)\n \n with open(eval_path+\"{}_model_summary_stage_{}.txt\".format(model_name, stage_no), \"w\") as text_file:\n print(short_model_summary, file=text_file)",
"def print_summary(self):\n self.model.summary()",
"def show_model_summary(self):\n\t\treturn self.model.summary()",
"def summarize_model(\n model: keras.Model, fig_dir: Union[str, None] = None\n) -> None:\n\n submodels = []\n for layer in model.layers:\n if isinstance(layer, TimeDistributed):\n submodels.append(layer.layer)\n\n for submodel in submodels:\n submodel.summary()\n model.summary()\n\n if fig_dir is not None:\n for submodel in submodels:\n keras.utils.plot_model(\n submodel, os.path.join(fig_dir, f'model_{submodel.name}.png'),\n dpi=300\n )\n keras.utils.plot_model(\n model, os.path.join(fig_dir, 'model_full.png'), dpi=300\n )",
"def model_stats(opt, epoch, model):\n log = rlog.getLogger(opt.experiment + \".model\")\n if hasattr(opt, \"log\") and opt.log.detailed:\n # log histogram also\n assert isinstance(\n model, SVIModel\n ), \"This stat only makes sense for SVI models.\"\n for mu, std in zip(model.mu(), model.std()):\n log.put(mu=mu, std=std)\n log.trace(step=epoch, **model.summarize())\n log.reset()",
"def summary(self) -> None:\n print(\"Model manager summary:\")\n print(\"Preprocessor:\")\n print(self.preprocessor)\n print(\"Model summary:\")\n self.model.summary()\n print(\"Postprocessor:\")\n print(self.postprocessor)",
"def test_summaries(self):\n try:\n ans = str(self.model)\n except:\n assert False, \"Model __repr__ failed.\"\n\n try:\n print(self.model)\n except:\n assert False, \"Model print failed.\"\n\n try:\n self.model.summary()\n except:\n assert False, \"Model summary failed.\"",
"def _save_model_info(self, model):\r\n with open_(self.output_path / \"model.info\", \"w+\") as f:\r\n f.write(model.info)",
"def print_summary(self):\n print(\"Word Level\")\n self.model_word.summary()\n \n print(\"Sent Level\")\n self.model_sent.summary()\n\n print(\"Doc Level\")\n self.model.summary()",
"def test_get_summary_with_model(self):\n\t\t\n\t\tdescription = self.watcher.describe(model=self.model)\n\t\tself.assertEqual(11, len(description))\n\t\t\n\t\t\n\t\tdetails = self.watcher.analyze(model=self.model, layers=[self.second_layer])\n\t\treturned_summary = self.watcher.get_summary(details)\n\t\t\n\t\tprint(returned_summary)\n\t\t\n\t\tsaved_summary = self.watcher.get_summary()\n\t\tself.assertEqual(returned_summary, saved_summary)",
"def summary(self):\r\n print(self.model.summary())",
"def summary(self):\n print(self.model.summary())",
"def output_summary(self, v, vhat, sk, logged_matrics, train_dataset_label, val_dataset_label, summary_folder_path):\n\n mse = np.sum((v-vhat)**2)/len(v)\n train_loss = logged_matrics[\"train_loss\"]\n\n k = np.sum(p.numel() for p in self.parameters())\n\n numOfSamples = len(sk)\n aic = 2*k + numOfSamples*np.log(mse) + numOfSamples*(1+np.log(2*np.pi))\n\n summary_file = os.path.join(summary_folder_path, \"model_summary.txt\")\n if not os.path.isfile(summary_file):\n print(\"Created file \"+summary_file)\n with open(summary_file, \"w\") as output:\n output.write(\n \"Model Train_dataset_label Val_dataset_label Train_loss Test_loss AIC\\n\")\n else:\n print(summary_file +\n \" exists, model summary will be attached to the end of this file.\")\n\n with open(summary_file, \"a\") as output:\n model_name = self.version\n output.write(model_name + \" \" + train_dataset_label + \" \" +\n val_dataset_label + \" %f %f %f\\n\" % (train_loss, mse, aic))\n\n\n plt.scatter(sk, v, c=\"blue\", s=2, label=\"true\")\n plt.scatter(sk, vhat, c=\"red\", s=2, label=\"predict\")\n plt.legend()\n plt.xlabel(\"sk\")\n plt.ylabel(\"v\")\n\n plt.savefig(os.path.join(summary_folder_path, \"plots\", model_name+\".png\"))\n plt.show()\n\n np.savetxt(os.path.join(summary_folder_path, \"model_prediction\", model_name+\"_prediction.txt\"), np.column_stack((sk, v, vhat)), header=\"sk v vhat\", fmt='%.8f')\n\n\n print(\"Plot saved as\", os.path.join(summary_folder_path, \"plots\", model_name+\".png\"))\n print(\"Model prediction saved as\", os.path.join(summary_folder_path, \"model_prediction\", model_name+\"_prediction.txt\"))",
"def log_model_analysis(\n logger, image, segmentation_image, model, indices_to_colors_map, void_color, colors_to_ignore):\n\n ground_truth_overlay_image = net.utilities.get_segmentation_overlaid_image(\n image, segmentation_image, colors_to_ignore)\n\n predicted_segmentation_cube = model.predict(image)\n\n predicted_segmentation_image = net.data.get_segmentation_image(\n predicted_segmentation_cube, indices_to_colors_map, void_color)\n\n predicted_overlay_image = net.utilities.get_segmentation_overlaid_image(\n image, predicted_segmentation_image, colors_to_ignore)\n\n logger.info(vlogging.VisualRecord(\n \"Data\", [image, segmentation_image, predicted_segmentation_image,\n ground_truth_overlay_image, predicted_overlay_image]))",
"def summary(self):\n self.model.summary()",
"def review_model(model): \n \n diagnose_model(model)\n \n plot_param_coef(model)\n \n plot_p_values(model)\n \n return",
"def lme_summary(output_dir: str, model: LMEModel, tree: TreeNode) -> None:\n warnings.warn(\"This visualization are deprecated.\", DeprecationWarning)\n # log likelihood\n loglike = pd.Series({r.model.endog_names: r.model.loglike(r.params)\n for r in model.results})\n w, h = 500, 300 # plot width and height\n # Summary object\n smry = model.summary()\n\n t = _decorate_tree(tree, -loglike)\n\n p1 = radialplot(t, figsize=(800, 800))\n p1.title.text = 'Loglikelihood of submodels'\n p1.title_location = 'above'\n p1.title.align = 'center'\n p1.title.text_font_size = '18pt'\n\n # 2D scatter plot for prediction on PB\n p2 = _projected_prediction(model, plot_width=w, plot_height=h)\n p3 = _projected_residuals(model, plot_width=w, plot_height=h)\n hm_p = _heatmap_summary(model.pvalues.T, model.coefficients().T,\n plot_width=900, plot_height=400)\n\n # combine the cross validation, explained sum of squares tree and\n # residual plots into a single plot\n p = row(column(p2, p3), p1)\n p = column(hm_p, p)\n\n # Deposit all regression results\n _deposit_results(model, output_dir)\n\n index_fp = os.path.join(output_dir, 'index.html')\n with open(index_fp, 'w') as index_f:\n index_f.write('<html><body>\\n')\n index_f.write('<h1>Simplicial Linear Mixed Effects Summary</h1>\\n')\n index_f.write(smry.as_html())\n index_f.write(\n ('<th>Coefficients</th>\\n'\n '<a href=\"coefficients.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>Coefficient pvalues</th>\\n'\n '<a href=\"pvalues.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>FDR corrected coefficient pvalues</th>\\n'\n '<a href=\"fdr-corrected-pvalues.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>Predicted Balances</th>\\n'\n '<a href=\"predicted.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>Residuals</th>\\n'\n '<a href=\"residuals.csv\">'\n 'Download as CSV</a><br>\\n')\n )\n\n diag_html = file_html(p, CDN, 'Diagnostic plots')\n index_f.write(diag_html)\n index_f.write('</body></html>\\n')",
"def print_summary(self, print_level = 0):\n\n print(\"==========================\")\n print(\"= FUNtoFEM model summary =\")\n print(\"==========================\")\n print(\"Model name:\", self.name)\n print(\"Number of bodies:\", len(self.bodies))\n print(\"Number of scenarios:\", len(self.scenarios))\n print(\" \")\n print(\"------------------\")\n print(\"| Bodies summary |\")\n print(\"------------------\")\n for body in self.bodies:\n print(\"Body:\", body.id, body.name)\n print(\" coupling group:\", body.group)\n print(\" transfer scheme:\", type(body.transfer))\n print(\" shape parameteration:\", type(body.shape))\n for vartype in body.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(body.variables[vartype]))\n if print_level >= 0:\n for var in body.variables[vartype]:\n print(' variable:', var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)\n\n print(\" \")\n print(\"--------------------\")\n print(\"| Scenario summary |\")\n print(\"--------------------\")\n for scenario in self.scenarios:\n print(\"scenario:\", scenario.id, scenario.name)\n print(\" coupling group:\", scenario.group)\n print(\" steps:\", scenario.steps)\n print(\" steady?:\", scenario.steady)\n for func in scenario.functions:\n print(' function:', func.name, ', analysis_type:', func.analysis_type)\n print(' adjoint?', func.adjoint)\n if not scenario.steady:\n print(' time range', func.start, ',', func.stop)\n print(' averaging', func.averaging)\n\n\n for vartype in scenario.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(scenario.variables[vartype]))\n if print_level >= 0:\n for var in scenario.variables[vartype]:\n print(' variable:', var.id, var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)",
"def model_architecture(self, filename=None):\n list_summary = []\n self.model.summary(print_fn=lambda x: list_summary.append(x))\n summary = \"\\n\".join(list_summary)\n\n if filename:\n with open(filename + '.txt', 'w') as f:\n f.write(summary)\n\n from keras.utils import plot_model\n plot_model(self.model, filename + '.jpg')\n\n return summary",
"def summary(self):\n from statsmodels.iolib.summary import Summary\n from statsmodels.iolib.table import SimpleTable\n model = self.model\n title = model.__class__.__name__ + ' Model Results'\n\n dep_variable = 'endog'\n if isinstance(self.model.endog, pd.DataFrame):\n dep_variable = self.model.endog.columns[0]\n elif isinstance(self.model.endog, pd.Series):\n dep_variable = self.model.endog.name\n seasonal_periods = None if self.model.seasonal is None else self.model.seasonal_periods\n lookup = {'add': 'Additive', 'additive': 'Additive',\n 'mul': 'Multiplicative', 'multiplicative': 'Multiplicative', None: 'None'}\n transform = self.params['use_boxcox']\n box_cox_transform = True if transform else False\n box_cox_coeff = transform if isinstance(transform, str) else self.params['lamda']\n if isinstance(box_cox_coeff, float):\n box_cox_coeff = '{:>10.5f}'.format(box_cox_coeff)\n top_left = [('Dep. Variable:', [dep_variable]),\n ('Model:', [model.__class__.__name__]),\n ('Optimized:', [str(np.any(self.optimized))]),\n ('Trend:', [lookup[self.model.trend]]),\n ('Seasonal:', [lookup[self.model.seasonal]]),\n ('Seasonal Periods:', [str(seasonal_periods)]),\n ('Box-Cox:', [str(box_cox_transform)]),\n ('Box-Cox Coeff.:', [str(box_cox_coeff)])]\n\n top_right = [\n ('No. Observations:', [str(len(self.model.endog))]),\n ('SSE', ['{:5.3f}'.format(self.sse)]),\n ('AIC', ['{:5.3f}'.format(self.aic)]),\n ('BIC', ['{:5.3f}'.format(self.bic)]),\n ('AICC', ['{:5.3f}'.format(self.aicc)]),\n ('Date:', None),\n ('Time:', None)]\n\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n title=title)\n formatted = self.params_formatted # type: pd.DataFrame\n\n def _fmt(x):\n abs_x = np.abs(x)\n scale = 1\n if abs_x != 0:\n scale = int(np.log10(abs_x))\n if scale > 4 or scale < -3:\n return '{:>20.5g}'.format(x)\n dec = min(7 - scale, 7)\n fmt = '{{:>20.{0}f}}'.format(dec)\n return fmt.format(x)\n\n tab = []\n for _, vals in formatted.iterrows():\n tab.append([_fmt(vals.iloc[1]),\n '{0:>20}'.format(vals.iloc[0]),\n '{0:>20}'.format(str(bool(vals.iloc[2])))])\n params_table = SimpleTable(tab, headers=['coeff', 'code', 'optimized'],\n title=\"\",\n stubs=list(formatted.index))\n\n smry.tables.append(params_table)\n\n return smry",
"def create_summary_and_adjust_model_for_gpus(self) -> None:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n if self.config.is_segmentation_model:\n summary_for_segmentation_models(self.config, self._model)\n # Prepare for mixed precision training and data parallelization (no-op if already done).\n # This relies on the information generated in the model summary.\n self.adjust_model_for_gpus()",
"def print_brief_summary(self):\n print (\"Model {}\".format(self.modelName))\n print (\"Precision {}\".format(self.precision))\n print (\"Recall {}\".format(self.recall))\n print (\"f1 score {}\".format(self.f1))\n \n # work here\n print (\"\\nGold NER label counts:\")\n for ner in self.gold_cts.keys():\n print (\"{} : {} (tag{})\".format(self.gold_cts[ner], self.nerTags.ids_to_words([ner]), ner))\n print (\"\\nPredicted NER label counts:\")\n for ner in self.pred_cts.keys():\n print (\"{} : {} (tag{})\".format(self.pred_cts[ner], self.nerTags.ids_to_words([ner]), ner))",
"def set_up_summary_writer(model_config,\n sess):\n\n paths_config = model_config.paths_config\n\n logdir = paths_config.logdir\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n\n logdir = logdir + '/eval'\n\n datetime_str = str(datetime.datetime.now())\n summary_writer = tf.summary.FileWriter(logdir + '/' + datetime_str,\n sess.graph)\n\n global_summaries = set([])\n summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))\n summary_merged = summary_utils.summaries_to_keep(summaries,\n global_summaries,\n histograms=False,\n input_imgs=False,\n input_bevs=False)\n\n return summary_writer, summary_merged",
"def create_summary_writer(model, data_loader, log_dir):\n writer = SummaryWriter(log_dir=log_dir)\n data_loader_iter = iter(data_loader)\n x = next(data_loader_iter)\n try:\n writer.add_graph(model, x)\n except Exception as e:\n warnings.warn(\"Failed to save model graph: {}\".format(e))\n return writer",
"def get_model_summary(self):\n\n summary = self._model[0].get_model_summary()\n lower_bound = self._FLOAT_STRING_FORMAT.format(self._break_points[0])\n upper_bound = self._FLOAT_STRING_FORMAT.format(self._break_points[1])\n summary_title = 'Segment model range: ' \\\n + lower_bound \\\n + ' <= ' + self._explanatory_variables[0] \\\n + ' < ' + upper_bound\n summary.tables[0].title = summary_title\n\n number_of_segments = self.get_number_of_segments()\n\n spacer_table = SimpleTable(data=['=' * 50])\n\n for i in range(1, number_of_segments):\n segment_model_summary = self._model[i].get_model_summary()\n lower_bound = self._FLOAT_STRING_FORMAT.format(self._break_points[i])\n upper_bound = self._FLOAT_STRING_FORMAT.format(self._break_points[i + 1])\n summary_title = 'Segment model range: ' \\\n + lower_bound \\\n + ' <= ' + self._explanatory_variables[0] \\\n + ' < ' + upper_bound\n segment_model_summary.tables[0].title = summary_title\n summary.tables.extend([spacer_table] + segment_model_summary.tables)\n\n return summary",
"def get_model_summary(self):\n\n summary = Summary()\n\n # add the model equation with estimated parameters\n model_equation = self._get_model_equation()\n summary.tables.append(model_equation)\n\n # add the parameter summary\n params_summary = self._get_params_summary()\n summary.tables.append(params_summary)\n\n res = self._model.fit()\n\n # add more summary statistics\n gleft = self._get_left_summary_table(res)\n gright = self._get_right_summary_table(res)\n summary.add_table_2cols(res, gleft=gleft, gright=gright)\n\n # add extreme influence and outlier table\n high_leverage = ('High leverage:', self._FLOAT_STRING_FORMAT.format(3 * res.params.shape[0] / res.nobs))\n extreme_outlier = ('Extreme outlier (Standardized residual):', self._FLOAT_STRING_FORMAT.format(3))\n dfn = res.params.shape[0] + 1\n dfd = res.nobs + res.params.shape[0]\n high_influence_cooksd = (\"High influence (Cook's D)\",\n self._FLOAT_STRING_FORMAT.format(stats.f.ppf(0.9, dfn=dfn, dfd=dfd)))\n high_influence_dffits = (\"High influence (DFFITS)\",\n self._FLOAT_STRING_FORMAT.format(2 * np.sqrt(res.params.shape[0] / res.nobs)))\n influence_and_outlier_table_data = [high_leverage,\n extreme_outlier,\n high_influence_cooksd,\n high_influence_dffits]\n influence_and_outlier_table = SimpleTable(data=influence_and_outlier_table_data)\n summary.tables.append(influence_and_outlier_table)\n\n return summary"
] | [
"0.73806274",
"0.7147296",
"0.6999165",
"0.6930897",
"0.68741435",
"0.6732777",
"0.67200786",
"0.6702313",
"0.6616318",
"0.65155643",
"0.6488972",
"0.6480176",
"0.6442557",
"0.64185977",
"0.6416413",
"0.6397906",
"0.6342395",
"0.63167113",
"0.63136315",
"0.6211133",
"0.620451",
"0.6184558",
"0.6125917",
"0.60407835",
"0.5997129",
"0.5974154",
"0.5956726",
"0.59416246",
"0.589099",
"0.5879752"
] | 0.7225569 | 1 |
Create a model with temperature scaling by wrapping the result of config.create_model with ModelWithTemperature, if temperature scaling config has been provided, otherwise return the result of config.create_model | def create_model_with_temperature_scaling(config: ModelConfigBase) -> Any:
# wrap the model around a temperature scaling model if required
model = config.create_model()
if isinstance(config, SequenceModelBase) and config.temperature_scaling_config:
model = ModelWithTemperature(model, config.temperature_scaling_config)
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_model(self) -> None:\n self._model = create_model_with_temperature_scaling(self.config)",
"def do_create_model(**kwargs):\n model_params = {\n 'name': kwargs['dag_run'].conf.get('model_name'),\n 'description': 'A custom DNN regressor model',\n 'regions': [REGION]\n }\n\n ti = kwargs['ti']\n\n is_model = ti.xcom_pull(key='is_project', task_ids='check_model')\n if not is_model:\n mle = MLEngineHook()\n mle.create_model(PROJECT, model_params)",
"def model(self, **config_kwargs):\n measurement = self.get_measurement(**config_kwargs)\n log.debug(\n 'model being created for measurement {0:s}'.format(measurement['name'])\n )\n\n patches = config_kwargs.get('patches', [])\n\n modelspec = {\n 'channels': self.spec['channels'],\n 'parameters': measurement['config']['parameters'],\n }\n for patch in patches:\n modelspec = jsonpatch.JsonPatch(patch).apply(modelspec)\n\n return Model(modelspec, poiname=measurement['config']['poi'], **config_kwargs)",
"def create(model: TModel) -> ModelTransformer:\n model_backend = get_backend(model)\n if model_backend == BackendType.ONNX:\n from nncf.onnx.graph.model_transformer import ONNXModelTransformer\n\n return ONNXModelTransformer(model)\n if model_backend == BackendType.OPENVINO:\n from nncf.openvino.graph.model_transformer import OVModelTransformer\n\n return OVModelTransformer(model)\n if model_backend == BackendType.TORCH:\n from nncf.torch.model_transformer import PTModelTransformer\n\n return PTModelTransformer(model)\n raise RuntimeError(\n \"Cannot create backend-specific model transformer because {} is not supported!\".format(model_backend)\n )",
"def create_scaling_model(params, experiments, reflections):\n autos = [None, Auto, \"auto\", \"Auto\"]\n use_auto_model = params.model in autos\n # Determine non-auto model to use outside the loop over datasets.\n if not use_auto_model:\n model_class = None\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == params.model:\n model_class = entry_point.load()\n break\n if not model_class:\n raise ValueError(f\"Unable to create scaling model of type {params.model}\")\n\n for expt, refl in zip(experiments, reflections):\n if not expt.scaling_model or params.overwrite_existing_models:\n # need to make a new model\n if use_auto_model:\n if not expt.scan:\n model = KBScalingModel\n else: # set model as physical unless scan < 1.0 degree\n osc_range = expt.scan.get_oscillation_range()\n abs_osc_range = abs(osc_range[1] - osc_range[0])\n if abs_osc_range < 1.0:\n model = KBScalingModel\n else:\n model = PhysicalScalingModel\n else:\n model = model_class\n expt.scaling_model = model.from_data(params, expt, refl)\n else:\n # allow for updating of an existing model.\n expt.scaling_model.update(params)\n return experiments",
"def calibrate_temperature(task_id, data, mnet, hnet, hhnet, device, config,\n shared, logger, writer, cal_per_model=False,\n only_correctly_classified=False,\n cal_target_entropy=-1):\n logger.info('Temperature calibration for task %d ...' % (task_id+1))\n\n # FIXME We could also follow the code from\n # https://github.com/gpleiss/temperature_scaling/blob/master/temperature_scaling.py\n # but they don't consider BNNs. Note, there code is much more efficient\n # since they compute the logits before entering the training loop (which\n # is possible when only having one model). Though, in general, we have\n # multiple models.\n\n set_train_mode(True, mnet, hnet, hhnet, None)\n\n gauss_main = False\n if isinstance(mnet, GaussianBNNWrapper):\n gauss_main = True\n\n # Whether the hypernet represents an implicit distribution (i.e., it's\n # input is a random variable), or whether it has task embeddings as input.\n det_hnet = False\n if hnet is not None:\n if hnet.num_known_conds > 0:\n assert hhnet is None\n\n det_hnet = True\n # Can currently only be the case if we train a BbB setup with option\n # `mean_only` enabled.\n if not gauss_main:\n assert hasattr(config, 'mean_only') and config.mean_only\n\n # The single parameter to be tuned by this method.\n temp_param = torch.nn.Parameter(shared.softmax_temp[task_id],\n requires_grad=True)\n assert temp_param == 1.\n\n # Which temperature transfer function to use during training. Note, this\n # can ensure that temperatures don't become negative.\n # ttf = temperature transfer function\n ttf_choice = 'softplus'\n if ttf_choice == 'linear':\n ttf = lambda x : x\n #torch.nn.init.ones_(temp_param.data)\n elif ttf_choice == 'exp':\n ttf = torch.exp\n torch.nn.init.zeros_(temp_param.data)\n else:\n ttf = F.softplus\n temp_param.data = torch.log(torch.exp(torch.ones(1)) - \\\n torch.ones(1)).to(device)\n\n allowed_outputs = pmutils.out_units_of_task(config, data, task_id,\n config.num_tasks)\n\n optimizer = tutils.get_optimizer([temp_param], config.lr,\n momentum=config.momentum, weight_decay=config.weight_decay,\n use_adam=config.use_adam, adam_beta1=config.adam_beta1,\n use_rmsprop=config.use_rmsprop, use_adadelta=config.use_adadelta,\n use_adagrad=config.use_adagrad)\n\n mnet_kwargs = pmutils.mnet_kwargs(config, task_id, mnet)\n\n num_w_samples = config.train_sample_size if config.cal_sample_size == -1 \\\n else config.cal_sample_size\n\n with torch.no_grad():\n # We don't change any network parameters, so these calls produce\n # constant outputs.\n theta_current = None\n if hhnet is not None:\n theta_current = hhnet.forward(cond_id=task_id)\n theta_current = [p.detach() for p in theta_current]\n\n if gauss_main:\n assert hhnet is None\n\n if hnet is not None:\n hnet_out = hnet.forward(cond_id=task_id)\n else:\n hnet_out = None\n w_mean, w_rho = mnet.extract_mean_and_rho(weights=hnet_out)\n w_std = putils.decode_diag_gauss(w_rho,\n logvar_enc=mnet.logvar_encoding)\n\n elif det_hnet:\n w_mean = hnet.forward(cond_id=task_id)\n\n ### We first compute the logit outputs over all samples for all models,\n ### since they don't change anymore.\n # FIXME Could lead to memory issues for large datasets and might not be\n # inefficient if ``config.cal_temp_iter`` is small, since we won't\n # iterate over the whole dataset.\n inputs = data.get_train_inputs()\n targets = data.get_train_outputs()\n\n T = data.output_to_torch_tensor(targets, device, mode='train')\n # Modify 1-hot encodings according to CL scenario.\n assert T.shape[1] == data.num_classes\n # In CL1, CL2 and CL3 (with seperate heads) we do not have to modify the\n # targets.\n if config.cl_scenario == 3 and not config.split_head_cl3:\n raise NotImplementedError('Temperature calibration not ' +\n 'implemented for CL3 without split-head.')\n\n _, labels = torch.max(T, 1) # Integer labels.\n #labels = labels.detach()\n\n num_samples = inputs.shape[0]\n\n logit_outputs = torch.empty((num_w_samples, num_samples, T.shape[1])). \\\n to(device)\n\n for j in range(num_w_samples):\n if gauss_main: # Gaussian weight posterior.\n # In case of the local-reparam trick, we anyway have a different\n # weight per sample. So, the demand of having the same model for\n # all samples in the dataset drops.\n if config.local_reparam_trick:\n # Note, the sampling will happen inside the forward method.\n weights = None\n emean = w_mean\n erho = w_rho\n else:\n weights = putils.sample_diag_gauss(w_mean, w_std,\n is_radial=config.radial_bnn)\n emean = None\n erho = None\n\n elif det_hnet:\n weights = w_mean\n\n else:\n if hnet is not None: # Implicit hypernetwork.\n z = torch.normal(torch.zeros(1, shared.noise_dim),\n config.latent_std).to(device)\n weights = hnet.forward(uncond_input=z,\n weights=theta_current)\n else: # Main network only training.\n weights = None\n\n # I use the validation batch size on purpose, since it is usually\n # bigger and we just want to quickly compute the logits.\n curr_bs = config.val_batch_size\n n_processed = 0\n\n while n_processed < num_samples:\n if n_processed + curr_bs > num_samples:\n curr_bs = num_samples - n_processed\n n_processed += curr_bs\n\n sind = n_processed - curr_bs\n eind = n_processed\n\n ### Compute negative log-likelihood (NLL).\n X = data.input_to_torch_tensor(inputs[sind:eind, :], device,\n mode='train')\n\n if gauss_main:\n Y = mnet.forward(X, weights=None, mean_only=False,\n extracted_mean=emean, extracted_rho=erho,\n sample=weights, **mnet_kwargs)\n else:\n Y = mnet.forward(X, weights=weights, **mnet_kwargs)\n\n if allowed_outputs is not None:\n Y = Y[:, allowed_outputs]\n\n logit_outputs[j, sind:eind, :] = Y\n\n # Since we computed all training logits, we might as well compute\n # the training accuracy on the predictive distributions at temperature 1\n # (note, temperature doesn't change predicted labels).\n pred_dists = F.softmax(logit_outputs, dim=2).mean(dim=0)\n assert pred_dists.ndim == 2\n _, pred_labels = torch.max(pred_dists, 1)\n train_acc = 100. * torch.sum(pred_labels == labels) / num_samples\n logger.debug('Task %d -- training accuracy: %.2f%%.' % \\\n (task_id+1, train_acc))\n\n log_pred_dists = torch.log(torch.clamp(pred_dists, min=1e-5))\n in_entropies = -torch.sum(pred_dists * log_pred_dists, dim=1)\n\n # Normalize by maximum entropy.\n max_ent = - np.log(1.0 / data.num_classes)\n in_entropies /= max_ent\n\n in_entropies_mean = in_entropies.mean()\n in_entropies_std = in_entropies.std()\n logger.debug('Task %d -- training in-dist. entropy: %f.' % \\\n (task_id+1, in_entropies_mean))\n\n if not hasattr(shared, 'train_in_ent_mean'):\n shared.train_in_ent_mean = []\n shared.train_in_ent_std = []\n shared.train_in_ent_mean.append( \\\n in_entropies_mean.detach().cpu().numpy())\n shared.train_in_ent_std.append(in_entropies_std.detach().cpu().numpy())\n\n if only_correctly_classified:\n num_correct = torch.sum(pred_labels == labels)\n\n logger.info('Task %d -- only using %d/%d correctly classified ' \\\n % (task_id+1, num_correct, num_samples) + \\\n 'samples for calibration.')\n\n logit_outputs = logit_outputs[:, pred_labels == labels, :]\n num_samples = num_correct\n assert logit_outputs.shape[1] == num_correct\n\n labels = labels[pred_labels == labels]\n assert labels.shape[0] == num_correct\n\n # Sanity check!\n pred_dists = F.softmax(logit_outputs, dim=2).mean(dim=0)\n _, pred_labels = torch.max(pred_dists, 1)\n assert torch.sum(pred_labels == labels) == num_correct\n\n logit_outputs = logit_outputs.detach()\n\n ### Calibrate temperature.\n for i in range(config.cal_temp_iter):\n optimizer.zero_grad()\n\n batch_inds = np.random.randint(0, num_samples, config.batch_size)\n\n batch_logits = logit_outputs[:, batch_inds, :]\n batch_labels = labels[batch_inds]\n assert batch_logits.ndim == 3\n\n # Note, this first option is more numerically stable when calibrating NLL.\n if cal_per_model or num_w_samples == 1:\n loss = 0\n for j in range(num_w_samples):\n if cal_target_entropy != -1:\n batch_sm = F.softmax(batch_logits[j, :, :] / \\\n ttf(temp_param), dim=1)\n # For numerical stability.\n batch_log_sm = torch.log(torch.clamp(batch_sm, min=1e-5))\n\n # Mean entropy within the batch.\n batch_entropy = -torch.sum(batch_sm * batch_log_sm,\n dim=1).mean()\n\n loss += (batch_entropy - cal_target_entropy)**2\n else: # Compute NLL loss\n # Note, softmax will be computed inside the `cross_entropy`.\n loss += F.cross_entropy( \\\n batch_logits[j, :, :] / ttf(temp_param), batch_labels,\n reduction='mean')\n loss /= num_w_samples\n\n else:\n batch_pred_dist = F.softmax(batch_logits / ttf(temp_param),\n dim=2).mean(dim=0)\n # FIXME nll_loss expects log_softmax as input. To compute the\n # predictive distribution, we have to first average softmax outputs\n # before we can apply the log, which might lead to numerical\n # instabilities.\n #batch_log_pd = batch_pred_dist\n #batch_log_pd[batch_pred_dist < 1e-5] = 1e-5\n batch_log_pd = torch.clamp(batch_pred_dist, min=1e-5)\n batch_log_pd = torch.log(batch_log_pd)\n if cal_target_entropy != -1:\n # Mean entropy within the batch.\n batch_entropy = -torch.sum(batch_pred_dist * batch_log_pd,\n dim=1).mean()\n\n loss += (batch_entropy - cal_target_entropy)**2\n else: # Compute NLL loss\n loss = F.nll_loss(batch_log_pd, batch_labels, reduction='mean')\n\n loss.backward()\n if config.clip_grad_value != -1:\n torch.nn.utils.clip_grad_value_(optimizer.param_groups[0]['params'],\n config.clip_grad_value)\n elif config.clip_grad_norm != -1:\n torch.nn.utils.clip_grad_norm_(optimizer.param_groups[0]['params'],\n config.clip_grad_norm)\n optimizer.step()\n\n if ttf_choice == 'linear':\n # NOTE In this case, nothing prevents the temperature from going\n # negative (e.g., when starting with a large learning rate).\n # Therefore, we have to actively capture this case.\n temp_param.data = torch.clamp(temp_param, min=1e-5)\n\n if i % 50 == 0:\n writer.add_scalar('cal/task_%d/loss' % task_id, loss, i)\n writer.add_scalar('cal/task_%d/temp' % task_id,\n ttf(temp_param), i)\n\n final_temp = ttf(temp_param).data\n shared.softmax_temp[task_id] = final_temp.data\n\n logger.info('Calibrated softmax temperature of task %d is: %f.' % \\\n (task_id+1, final_temp))\n\n logger.info('Temperature calibration for task %d ... Done' % (task_id+1))",
"def create(self, req, body):\n context = req.environ['meteos.context']\n\n if not self.is_valid_body(body, 'model'):\n raise exc.HTTPUnprocessableEntity()\n\n model = body['model']\n\n LOG.debug(\"Create model with request: %s\", model)\n\n try:\n experiment = self.engine_api.get_experiment(\n context, model['experiment_id'])\n utils.is_valid_status(experiment.__class__.__name__,\n experiment.status,\n constants.STATUS_AVAILABLE)\n template = self.engine_api.get_template(\n context, experiment.template_id)\n except exception.NotFound:\n raise exc.HTTPNotFound()\n except exception.InvalidStatus:\n raise\n\n display_name = model.get('display_name')\n display_description = model.get('display_description')\n experiment_id = model.get('experiment_id')\n source_dataset_url = model.get('source_dataset_url')\n dataset_format = model.get('dataset_format', 'csv')\n model_type = model.get('model_type')\n model_params = model.get('model_params')\n swift_tenant = model.get('swift_tenant')\n swift_username = model.get('swift_username')\n swift_password = model.get('swift_password')\n\n new_model = self.engine_api.create_model(context,\n display_name,\n display_description,\n source_dataset_url,\n dataset_format,\n model_type,\n model_params,\n template.id,\n template.job_template_id,\n experiment_id,\n experiment.cluster_id,\n swift_tenant,\n swift_username,\n swift_password)\n\n return self._view_builder.detail(req, new_model)",
"def create_shunt_model(self):\r\n\r\n print('\\nCreate shunt model')\r\n\r\n if not self.original_model:\r\n raise ValueError('Original model not yet initialized! Either call create_original_model or set it manually.')\r\n if not self.shunt_params:\r\n raise ValueError('No parameters found in config for shunt model! Create the field [SHUNT_MODEL]')\r\n\r\n logging.info('')\r\n logging.info('#######################################################################################################')\r\n logging.info('############################################ SHUNT MODEL ##############################################')\r\n logging.info('#######################################################################################################')\r\n logging.info('')\r\n\r\n dilation_rate_input, dilation_rate_output = find_input_output_dilation_rates(self.original_model, self.shunt_params['locations'])\r\n\r\n print('Used dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('Creating shunt with dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('')\r\n\r\n with self.activate_distribution_scope():\r\n if self.shunt_params['from_file']:\r\n self.shunt_model = keras.models.load_model(self.shunt_params['filepath'])\r\n print('Shunt model loaded successfully!')\r\n else:\r\n input_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][0]).input_shape[1:]\r\n if isinstance(input_shape_shunt, list):\r\n input_shape_shunt = input_shape_shunt[0][1:]\r\n output_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][1]).output_shape[1:]\r\n if isinstance(output_shape_shunt, list):\r\n output_shape_shunt = output_shape_shunt[0][1:]\r\n\r\n self.shunt_model = Architectures.createShunt(input_shape_shunt,\r\n output_shape_shunt,\r\n arch=self.shunt_params['arch'],\r\n use_se=False,\r\n dilation_rate_input=dilation_rate_input,\r\n dilation_rate_output=dilation_rate_output,\r\n expansion_factor=1.0)\r\n\r\n if self.shunt_params['pretrained']:\r\n self.shunt_model.load_weights(self.shunt_params['weightspath'])\r\n print('Shunt weights loaded successfully!')\r\n\r\n self.shunt_model.summary(print_fn=self.logger.info, line_length=150)\r\n\r\n keras.models.save_model(self.shunt_model, Path(self.folder_name_logging, \"shunt_model.h5\"))\r\n logging.info('')\r\n logging.info('Shunt model saved to {}'.format(self.folder_name_logging))\r\n\r\n # calculate flops\r\n flops_shunt = calculate_flops_model(self.shunt_model)\r\n self.flops_dict['shunt'] = flops_shunt\r\n logging.info('')\r\n logging.info('FLOPs of shunt model: {}'.format(flops_shunt))",
"def create_model(self, model_config):\n\n return self.conn.create_model(\n **model_config)",
"def create_model(config, rng, example_batch):\n example_batch = train_utils.prepare_example_batch(example_batch)\n\n key0, rng = random.split(rng, 2)\n model, variables, metric_collector = MODEL_DICT[config.model.name](\n key0, example_batch, config\n )\n\n return model, variables, metric_collector",
"def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model",
"def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model",
"def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()",
"def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model",
"def get_model(model_name, model_config, to_cuda,\n uniform_initialize_bn_weight=False, forward_is_infer=False):\n model = None\n if model_name == 'Tacotron2':\n if forward_is_infer:\n class Tacotron2__forward_is_infer(Tacotron2):\n def forward(self, inputs, input_lengths):\n return self.infer(inputs, input_lengths)\n model = Tacotron2__forward_is_infer(**model_config)\n else:\n model = Tacotron2(**model_config)\n elif model_name == 'WaveGlow':\n if forward_is_infer:\n class WaveGlow__forward_is_infer(WaveGlow):\n def forward(self, spect, sigma=1.0):\n return self.infer(spect, sigma)\n model = WaveGlow__forward_is_infer(**model_config)\n else:\n model = WaveGlow(**model_config)\n else:\n raise NotImplementedError(model_name)\n\n if uniform_initialize_bn_weight:\n init_bn(model)\n\n if to_cuda:\n model = model.cuda()\n return model",
"def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)",
"def create_mean_teacher_model(self) -> None:\n self._mean_teacher_model = create_model_with_temperature_scaling(self.config)",
"def config_task(self) -> None:\n weights = self.hyperparams[\"weights\"]\n\n if self.hyperparams[\"model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['model']}' is not valid. \"\n f\"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n\n class_weights = None\n if isinstance(self.class_weights, torch.Tensor):\n class_weights = self.class_weights.to(dtype=torch.float32)\n elif hasattr(self.class_weights, \"__array__\") or self.class_weights:\n class_weights = torch.tensor(self.class_weights, dtype=torch.float32)\n\n self.loss = nn.CrossEntropyLoss(\n ignore_index=ignore_value, weight=class_weights\n )\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(\n f\"Loss type '{self.hyperparams['loss']}' is not valid. \"\n f\"Currently, supports 'ce', 'jaccard' or 'focal' loss.\"\n )\n\n if self.hyperparams[\"model\"] != \"fcn\":\n if weights and weights is not True:\n if isinstance(weights, WeightsEnum):\n state_dict = weights.get_state_dict(progress=True)\n elif os.path.exists(weights):\n _, state_dict = utils.extract_backbone(weights)\n else:\n state_dict = get_weight(weights).get_state_dict(progress=True)\n self.model.encoder.load_state_dict(state_dict)\n\n # Freeze backbone\n if self.hyperparams.get(\"freeze_backbone\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.encoder.parameters():\n param.requires_grad = False\n\n # Freeze decoder\n if self.hyperparams.get(\"freeze_decoder\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.decoder.parameters():\n param.requires_grad = False",
"def set_up_and_parameterise_model_for_experiment(self):\n self.experiment_unique_steps_to_model = {}\n for op_number, op in enumerate(self.experiment.unique_steps):\n new_model = self.model.new_copy()\n new_parameter_values = self.parameter_values.copy()\n\n if op.type != \"current\":\n # Voltage or power control\n # Create a new model where the current density is now a variable\n # To do so, we replace all instances of the current density in the\n # model with a current density variable, which is obtained from the\n # FunctionControl submodel\n # check which kind of external circuit model we need (differential\n # or algebraic)\n if op.type == \"voltage\":\n submodel_class = pybamm.external_circuit.VoltageFunctionControl\n elif op.type == \"power\":\n submodel_class = pybamm.external_circuit.PowerFunctionControl\n\n # Build the new submodel and update the model with it\n submodel = submodel_class(new_model.param, new_model.options)\n variables = new_model.variables\n submodel.variables = submodel.get_fundamental_variables()\n variables.update(submodel.variables)\n submodel.variables.update(submodel.get_coupled_variables(variables))\n variables.update(submodel.variables)\n submodel.set_rhs(variables)\n submodel.set_algebraic(variables)\n submodel.set_initial_conditions(variables)\n new_model.rhs.update(submodel.rhs)\n new_model.algebraic.update(submodel.algebraic)\n new_model.initial_conditions.update(submodel.initial_conditions)\n\n # Set the \"current function\" to be the variable defined in the submodel\n new_parameter_values[\"Current function [A]\"] = submodel.variables[\n \"Current [A]\"\n ]\n self.update_new_model_events(new_model, op)\n # Update parameter values\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n experiment_parameter_values = self.get_experiment_parameter_values(\n op, op_number\n )\n new_parameter_values.update(\n experiment_parameter_values, check_already_exists=False\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[repr(op)] = parameterised_model\n\n # Set up rest model if experiment has start times\n if self.experiment.initial_start_time:\n new_model = self.model.new_copy()\n # Update parameter values\n new_parameter_values = self.parameter_values.copy()\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n new_parameter_values.update(\n {\"Current function [A]\": 0, \"Ambient temperature [K]\": \"[input]\"},\n check_already_exists=False,\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[\n \"Rest for padding\"\n ] = parameterised_model",
"def evaluate_model(self, t, scaling_parameters, system_parameters):\n raise NotImplementedError",
"def prepare_model_(model, *data, device='cpu'):\n _auto_name('', model)\n set_default_parent(model)\n def _prep_data(d):\n if isinstance(d, (np.ndarray, torch.Tensor)):\n return torch.as_tensor(d).to(device)\n elif isinstance(d, (list, tuple)):\n if all(isinstance(x, int) for x in d):\n return torch.randn(*d, device=device)\n return [_prep_data(x) for x in d]\n elif isinstance(d, dict):\n return {k:_prep_data(v) for k, v in d.items()}\n with torch.no_grad():\n is_training = model.training\n data = [_prep_data(d) for d in data]\n model.eval()\n model.to(device)\n model(*data)\n model.train(is_training)\n return model",
"def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model",
"def _random_model(self, input_size, output_size, task, config: dict) -> AbstractModel:\n return create_random_model(input_size, output_size, config, task)",
"def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model",
"def create_model(model_class, model_params=None, model_name='model'):\n\n model_params = {} if model_params is None else model_params\n\n model = model_class(**model_params)\n\n if special_parameters.load_model: # recover from checkpoint\n _load_model(model, model_name)\n\n # configure usage on GPU\n if use_gpu():\n model.to(first_device())\n model = torch.nn.DataParallel(model, device_ids=all_devices())\n\n # print info about devices\n print_info('Device(s)): ' + str(device_description()))\n\n return model",
"def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase,\n model_execution_mode: ModelExecutionMode) -> DeviceAwareModule:\n if config.use_gpu:\n model = model.cuda()\n logging.info(\"Adjusting the model to use mixed precision training.\")\n # If model parallel is set to True, then partition the network across all available gpus.\n if config.use_model_parallel:\n devices = config.get_cuda_devices()\n assert devices is not None # for mypy\n model.partition_model(devices=devices) # type: ignore\n else:\n logging.info(\"Making no adjustments to the model because no GPU was found.\")\n\n # Update model related config attributes (After Model Parallel Activated)\n config.adjust_after_mixed_precision_and_parallel(model)\n\n # DataParallel enables running the model with multiple gpus by splitting samples across GPUs\n # If the model is used in training mode, data parallel is activated by default.\n # Similarly, if model parallel is not activated, data parallel is used as a backup option\n use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel)\n if config.use_gpu and use_data_parallel:\n logging.info(\"Adjusting the model to use DataParallel\")\n # Move all layers to the default GPU before activating data parallel.\n # This needs to happen even though we put the model to the GPU at the beginning of the method,\n # but we may have spread it across multiple GPUs later.\n model = model.cuda()\n model = DataParallelModel(model, device_ids=config.get_cuda_devices())\n\n return model",
"def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args",
"def model_setup(params):\n n_classes = len(classes_config.training_ids)\n if general_config.model_id == constants.ssdlite:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list)\n elif general_config.model_id == constants.ssd:\n model = resnet_ssd.SSD300(n_classes=n_classes)\n elif general_config.model_id == constants.ssd_modified:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list,\n out_channels=params.out_channels, width_mult=params.width_mult)\n model.to(general_config.device)\n\n return model",
"def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model",
"def create_model(configuration):\n model = find_model_using_name(configuration['model_name'])\n instance = model(configuration)\n print(\"model [{0}] was created\".format(type(instance).__name__))\n return instance"
] | [
"0.73296446",
"0.57077694",
"0.570612",
"0.57056636",
"0.56610817",
"0.55574673",
"0.54785675",
"0.5419638",
"0.53965676",
"0.53905576",
"0.538604",
"0.538604",
"0.5313637",
"0.52842414",
"0.52818054",
"0.5228253",
"0.5218954",
"0.5209238",
"0.52031755",
"0.5201383",
"0.51910466",
"0.51866466",
"0.5161635",
"0.515062",
"0.5128692",
"0.5108899",
"0.5107936",
"0.5107017",
"0.5105261",
"0.5101205"
] | 0.8471851 | 0 |
Load twine from a .json filename, filelike or a json string and validates twine contents. | def _load_twine(self, source=None):
if source is None:
# If loading an unspecified twine, return an empty one rather than raising error (like in _load_data())
raw_twine = {}
logger.warning("No twine source specified. Loading empty twine.")
else:
raw_twine = self._load_json("twine", source, allowed_kinds=("file-like", "filename", "string", "object"))
self._validate_against_schema("twine", raw_twine)
self._validate_twine_version(twine_file_twined_version=raw_twine.get("twined_version", None))
return raw_twine | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_tweets(filename):\n\n try:\n with open(filename, 'r') as f:\n data = json.loads(f.read())\n except:\n print('ERROR in load_tweets.')\n\n return data",
"def test_loader_loads_from_file():\n base_json = 'tests/test_json.json'\n json_test = {\"foo\": \"bar\"}\n assert whenzat.loader(base_json) == json_test",
"def loadFromFile(self, filename):\n with open(filename, 'r') as file:\n raw_data = file.read()\n # data = json.loads(raw_data, encoding='utf-8') # python 3.9 suppression de encoding\n try:\n data = json.loads(raw_data)\n self.deserialize(data)\n self.has_been_modified = False\n except json.JSONDecodeError:\n raise InvalidFile(f'{os.path.basename(filename)} is not a valid JSON file')\n except Exception as e:\n dumpException(e)",
"def load(filename):\n\n try:\n with open(filename) as data:\n return json.load(data)\n except:\n return None",
"def load_json_fixture(filename: str) -> Any:\n return json.loads(load_fixture(f\"jellyfin/{filename}\"))",
"def sniff( self, filename ):\r\n try:\r\n json.load( open(filename) )\r\n return True\r\n except Exception:\r\n return False",
"def load_json(path, name):\n if 'txt' not in name:\n name += '.json'\n with open(os.path.join(path, name), 'r') as json_file:\n return json.load(json_file)",
"def test_loads_a_non_object_json_file(self):\n from test.resources import simple_json\n self.assertEqual(simple_json._data, 'test')",
"def load_tweets(file):\n with open(file) as f:\n data = json.load(f)\n return data",
"def test_load_json_str():\n\n file_name = 'test_fooof_all'\n\n data = load_json(file_name, TEST_DATA_PATH)\n\n assert data",
"def test_loader_loads_from_str():\n base_json = '{\"foo\": \"bar\"}'\n json_test = {\"foo\": \"bar\"}\n assert whenzat.loader(base_json, from_file=False) == json_test",
"def test_verifies_token_file_contains_json(self):\n\n with open(self.sample_token_file, 'w',\n encoding=\"utf8\", errors=\"surrogateescape\") as stf_h:\n stf_h.write(\"Bad JSON\")\n\n with self.assertRaises(json.decoder.JSONDecodeError):\n badgr = BadgrLite(token_filename=self.sample_token_file)\n badgr.load_token()",
"def json_loader(filename):\n\n with open(filename, \"r\", encoding=\"UTF-8\") as source:\n data = json.load(source, object_hook=object_decode)\n return data",
"def load_from_json_file(filename):\n if type(filename) is not str:\n return\n\n with open(filename, mode=\"r\") as file:\n return json.loads(file.read())",
"def load_json(filepath: str):\n with open(filepath, \"r\", encoding=\"utf8\") as f:\n return json.loads(f.read())",
"def from_file(filename):\n # in order to complete this lab we are going to use the python lib json in which we have the function json.loads\n # which will automatically load a json from a string\n f = open(filename, 'r')\n string = f.read()\n return json.loads(string)",
"def load_json_data(filepath):\n with open(filepath,'r') as f:\n return json.load(f)",
"def read(self,filename):\n with open(str(filename),\"r\") as f:\n data = f.read()\n #check if the loaded file is json\n try:\n datajson = json.loads(data)\n except Exception as e:\n if mer == True:\n merrors.error('could not load '+str(filename)+', add a basic entry to the config like {\"name\":\"Example\"}. Python error: '+str(e))\n quit()\n else:\n print(\"could not load \"+str(filename)+\". Python error: \"+str(e))\n quit()\n self.datajson = datajson\n self.filename = filename\n f.close()",
"def load_json(filepath: str):\n with open(filepath, encoding=\"utf-8\") as f:\n return json.load(f)",
"def load_from_json_file(filename):\n with open(filename, encoding=\"utf-8\") as round:\n return json.load(round)",
"def load_life(name):\n\tif not '.json' in name:\n\t\tname += '.json'\n\t\n\twith open(os.path.join(LIFE_DIR, name), 'r') as e:\n\t\treturn json.loads(''.join(e.readlines()))",
"def load(self):\n with io.open(self.filename, encoding='utf-8') as f:\n self.load_from_dict(json.loads(f.read()))",
"def __load_json(self, path):\n try:\n with Path(path).open('r') as f:\n return json.load(f)\n except ValueError as ve:\n six.raise_from(ValueError(\"error while loading the fixture %s\" % path), ve)",
"def load(self):\n filename = self._filename\n if not os.path.exists(filename):\n self.service.log.store('Cannot load %s, does not exist' % filename)\n return False\n \n # Read from file\n self.service.log.store('Loading %s' % filename)\n f = open(filename, 'r')\n raw = f.read()\n f.close()\n \n self.from_json(raw)\n return True",
"def load_tweets(fname):\n tweets = []\n for line in open(fname):\n tweets.append(json.loads(line))\n return tweets",
"def _loadf(fname):\n with open(fname, encoding=\"ISO-8859-1\") as f:\n return json.load(f)",
"def load_from_file(self, filepath, validate=True, results=None):\n with open(filepath) as fd:\n try:\n data = json.load(fd)\n except ValueError as ex:\n ex = JSONEncodingError(ex)\n if not results:\n raise ex\n results.add(filepath, ex)\n return results\n\n return self.load(data, validate=validate, results=results, id=filepath)",
"def test_load_unsupported_type(self):\n expected = {\n \"name\": \"Kevin\",\n \"age\": 21,\n \"pet\": {\n \"name\": \"Trippy Jack\",\n \"age\": 20762,\n \"__type__\": \"hyperdimensional.hamster\"\n }\n }\n with open('tests/unsupported_type.json', 'r') as json_file:\n self.assertEqual(expected, morejson.load(json_file))",
"def loadJSON(jsonData):\n\n if hasattr(jsonData, 'read'):\n loadedjson = json.load(jsonData)\n elif isinstance(jsonData, str):\n if os.path.exists(jsonData):\n with open(jsonData) as jsonFile:\n loadedjson = json.load(jsonFile)\n else:\n try:\n loadedjson = json.loads(jsonData)\n except JSONDecodeError as e:\n raise ValueError(f\" {str(e)}: Got {jsonData}, either bad format of file does not exist\")\n\n elif isinstance(jsonData, dict):\n loadedjson = jsonData\n else:\n err = f\"workflow type: {type(jsonData)} is unknonw. Must be str, file-like or dict. \"\n raise ValueError(err)\n\n\n return loadedjson",
"def read_object_from_file(file_name):\n if os.path.exists(file_name) is False:\n print (\"Error read path: [%s]\" % file_name)\n return None\n with open(file_name, 'r') as f:\n try:\n obj = json.load(f)\n except Exception:\n print (\"Error json: [%s]\" % f.read()[0:10])\n return None\n return obj"
] | [
"0.64222986",
"0.61474323",
"0.6144654",
"0.61129427",
"0.6093864",
"0.6074711",
"0.6030403",
"0.59984696",
"0.59946185",
"0.59418505",
"0.59320986",
"0.58631575",
"0.5842694",
"0.5824927",
"0.57905143",
"0.5786703",
"0.5777964",
"0.57755256",
"0.5758631",
"0.57461494",
"0.57401806",
"0.57319224",
"0.5727668",
"0.57230777",
"0.5722163",
"0.5718931",
"0.5711493",
"0.5708973",
"0.5705908",
"0.56931615"
] | 0.7133785 | 0 |
Get the schema for the given strand. | def _get_schema(self, strand):
if strand == "twine":
# The data is a twine. A twine *contains* schema, but we also need to verify that it matches a certain
# schema itself. The twine schema is distributed with this packaged to ensure version consistency...
schema_path = "schema/twine_schema.json"
elif strand in CHILDREN_STRANDS:
# The data is a list of children. The "children" strand of the twine describes matching criteria for
# the children, not the schema of the "children" data, which is distributed with this package to ensure
# version consistency...
schema_path = "schema/children_schema.json"
elif strand in MANIFEST_STRANDS:
# The data is a manifest of files. The "*_manifest" strands of the twine describe matching criteria used to
# filter files appropriate for consumption by the digital twin, not the schema of the manifest data, which
# is distributed with this package to ensure version consistency...
schema_path = "schema/manifest_schema.json"
else:
if strand not in SCHEMA_STRANDS:
raise exceptions.UnknownStrand(f"Unknown strand {strand}. Try one of {ALL_STRANDS}.")
# Get schema from twine.json file.
schema_key = strand + "_schema"
try:
return getattr(self, schema_key)
except AttributeError:
raise exceptions.StrandNotFound(f"Cannot validate - no {schema_key} strand in the twine")
return jsonlib.loads(pkg_resources.resource_string("twined", schema_path)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_schema(self):\n self._pick()\n return Schema()",
"def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))",
"def schema(self):\n return _parse_schema_resource(self._properties.get(\"schema\", {}))",
"def get_schema(cls):\n return cls.schema()",
"def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})",
"def get_schema(): # noqa: WPS440\n return config.DEFAULT_SCHEMA",
"def get_schema(self) -> ArchiveSchema:\n return self.schema",
"def get_schema(self):\r\n return self.__schema",
"def sample_schema(self):\n if 'sample' not in self._schemas:\n logging.debug(f\"{self.id} - no schema? {self._schemas}\")\n return None\n return self._schemas['sample']",
"def getSchema(cls):\n pass",
"def schema(cls):\n return Schema.get_instance(cls)",
"def get_schema(schema): # noqa: E501\n return 'do some magic!'",
"def get_schema():\n if TEST_COLLECTION:\n return TestSchema()\n return MySchema()",
"def getSchema( sourceDirectory ):\r\n if( sourceDirectory == settings.LEXISNEXIS_FILETAG ): return LexisNexisSchema()\r\n raise Exception( \"Filer for source <%s> is not registered in getSchema( source ).\" % ( sourceDirectory ) )",
"def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out",
"def get_schema(self) -> dict:",
"async def get_schema(request: Request, namespace: str, project: str):\n # endpoint to schema.databio.org/...\n # like pipelines/ProseqPEP.yaml\n\n try:\n schema = eido.read_schema(\n f\"https://schema.databio.org/{namespace}/{project}.yaml\"\n )[0]\n except IndexError:\n raise HTTPException(status_code=404, detail=\"Schema not found\")\n\n return schema",
"def get_schema(self, engine_name):\n endpoint = \"engines/{}/schema\".format(engine_name)\n return self.swiftype_session.request('get', endpoint)",
"def get_schema(self, name):\n return Schema(self, name)",
"def get_schema(self) -> dict:\n return schemas.get_object_schema(self.schema)",
"def schema(self, name):\n return model.Schema(self, name)",
"def _get_schema(name):\n global SCHEMA\n\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = \"{}/{}.json\".format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n\n return SCHEMA.get(name)",
"def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")",
"def schema(self):\n return self._schema",
"def schema(self) -> str:\n return parse_schema(self._spec[\"schema\"])",
"def getDBSchema(self, desired=None):\n role = self.getRole(desired)\n schema = role[\"roleName\"]\n return schema",
"def _get_schema(want_version):\n for maj, min in _GET_SCHEMA_MICROVERSIONS:\n if want_version.matches((maj, min)):\n return getattr(schema, 'GET_SCHEMA_%d_%d' % (maj, min))\n\n return schema.GET_SCHEMA_1_10",
"def schema(self):\n # type: () -> object\n return self._schema",
"def get_default_schema(self):\n schema = self._connection.settings.get(\"schema\")\n if schema:\n res = (\n self.sql(_SELECT_SCHEMA_NAME_QUERY.format(escape(schema)))\n .execute()\n .fetch_all()\n )\n try:\n if res[0][0] == schema:\n return Schema(self, schema)\n except IndexError:\n raise ProgrammingError(\n f\"Default schema '{schema}' does not exists\"\n ) from None\n return None",
"def get_schema(sid, did, scid):\n\n driver = get_driver(PG_DEFAULT_DRIVER)\n manager = driver.connection_manager(sid)\n conn = manager.connection(did=did)\n\n ver = manager.version\n server_type = manager.server_type\n\n # Fetch schema name\n status, schema_name = conn.execute_scalar(\n render_template(\n \"/\".join(['schemas',\n '{0}/#{1}#'.format(server_type, ver),\n 'sql/get_name.sql']),\n conn=conn, scid=scid\n )\n )\n\n return status, schema_name"
] | [
"0.66698045",
"0.6518725",
"0.6390867",
"0.6313399",
"0.6210594",
"0.62096435",
"0.61262435",
"0.61096203",
"0.6025287",
"0.5981262",
"0.5939648",
"0.5933518",
"0.5902661",
"0.58778864",
"0.5872866",
"0.58726424",
"0.58520615",
"0.58372164",
"0.58244634",
"0.5824207",
"0.581969",
"0.5783459",
"0.5776963",
"0.57689315",
"0.57574755",
"0.5726965",
"0.57250506",
"0.5714461",
"0.5697706",
"0.56923294"
] | 0.8233626 | 0 |
Validate that the installed version is consistent with an optional version specification in the twine file. | def _validate_twine_version(self, twine_file_twined_version):
installed_twined_version = pkg_resources.get_distribution("twined").version
logger.debug(
"Twine versions... %s installed, %s specified in twine", installed_twined_version, twine_file_twined_version
)
if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version):
raise exceptions.TwineVersionConflict(
f"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_valid_version(self):\n pass",
"def validate_configurator_version():\n if settings.CONFIGURATOR_MODULE == \"bootmachine.contrib.configurators.salt\":\n pkgver = settings.SALT_AUR_PKGVER\n pkgrel = settings.SALT_AUR_PKGREL\n response = urllib2.urlopen(\"https://aur.archlinux.org/packages/sa/salt/PKGBUILD\")\n for line in response:\n if line.startswith(\"pkgver=\") and not pkgver in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgver, line.strip()))\n if line.startswith(\"pkgrel=\") and not pkgrel in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgrel, line.strip()))",
"def check_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()",
"def test_schema_version(self):\n\n self.validator.adata.uns[\"schema_version\"] = \"1.0.0\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. \"\n \"Validation cannot be performed.\"\n ],\n )",
"def validate_project_version(config: Dict[str, Any]) -> None:\n spacy_version = config.get(\"spacy_version\", None)\n if spacy_version and not is_compatible_version(about.__version__, spacy_version):\n err = (\n f\"The {PROJECT_FILE} specifies a spaCy version range ({spacy_version}) \"\n f\"that's not compatible with the version of spaCy you're running \"\n f\"({about.__version__}). You can edit version requirement in the \"\n f\"{PROJECT_FILE} to load it, but the project may not run as expected.\"\n )\n msg.fail(err, exits=1)",
"def test_valid_hh_version():\n # TODO: Basically only enforcing correct main segment, since not using `re.fullmatch`\n # TODO: Probably want `re.fullmatch` here - Currently ignoring any potentially invalid suffix\n version_pattern = r\"^[0-9]+\\.[0-9]+\\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])\"\n res = re.match(version_pattern, hh.__version__)\n assert res is not None",
"def test_versionString(self):\n self.assertIn(\"%d.%d.%d\" % nevow.__version_info__, nevow.__version__)",
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion",
"def validate_required_python_version_running(minimal_required_version: str) -> None:\n try:\n parts = minimal_required_version.split(\".\")\n min_py_version = 1000000*int(parts[0]) + 1000*(int(parts[1]) if len(parts) > 1 else 0) + (int(parts[2]) if len(parts) > 2 else 0)\n running_py_version = 1000000*sys.version_info.major + 1000*sys.version_info.minor + sys.version_info.micro\n if running_py_version < min_py_version:\n raise RuntimeError(\"\")\n except:\n raise RuntimeError(f\"Kqlmagic requires python >= {Constants.MINIMAL_PYTHON_VERSION_REQUIRED}, you use python {sys.version}\")",
"def check_version(self, node):\n assert \"version\" in node, \"Version node does not contain attribute 'version'\"\n assert len(node[\"version\"]) >= 1, \"Expecting at least one 'version' value\"\n # TODO: add more thorough checks",
"def validate_backend_version(self):\n pass",
"def validate_version(version):\n matched = VERSION_REGEX.match(version)\n if matched is None:\n print_stderr(\"Version '{0}' does not match version regex\".format(version))\n return\n\n # Format is syntactically valid\n version_dict = matched.groupdict()\n\n # Check dependents\n dependencies = [(\"special_sep\", \"special\"), (\"special\", \"special_sep\"), (\"index_sep\", \"index\"), (\"index\", \"index_sep\"), (\"index\", \"special\"), (\"special\", \"index\")]\n for dependent, dependency in dependencies:\n if version_dict[dependent] and not version_dict[dependency]:\n print_stderr(\"Version '{0}' is invalid: '{1}' is defined but not '{2}'\".format(version, dependent, dependency))\n return None\n\n # Remove noise\n for noise in [\"special_sep\", \"index_sep\"]:\n del version_dict[noise]\n\n return version_dict",
"def test_pynast_suported_version(self):\r\n min_acceptable_version = (1, 2)\r\n max_acceptable_version = (1, 2, 2)\r\n try:\r\n from pynast import __version__ as pynast_lib_version\r\n version = pynast_lib_version.split('.')\r\n if version[-1][-4:] == '-dev':\r\n version[-1] = version[-1][:-4]\r\n version = tuple(map(int, version))\r\n pass_test = (version >= min_acceptable_version and\r\n version <= max_acceptable_version)\r\n version_string = str(pynast_lib_version)\r\n except ImportError:\r\n pass_test = False\r\n version_string = \"Not installed\"\r\n\r\n min_version_str = '.'.join(map(str, min_acceptable_version))\r\n max_version_str = '.'.join(map(str, max_acceptable_version))\r\n error_msg = (\"Unsupported pynast version. Must be >= %s and <= %s, \"\r\n \"but running %s.\" % (min_version_str, max_version_str,\r\n version_string))\r\n self.assertTrue(pass_test, error_msg)",
"def package_version_check(args, parser):\n if (args.build or args.check) and args.package_version:\n parser.error('--package-version works only with --create')",
"def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))",
"def check_version(ctx, builder, version_function, *,\n requires_version=None,\n requires_at_least_version=None,\n requires_at_most_version=None):\n if any(v is not None for v in (\n requires_version,\n requires_at_least_version,\n requires_at_most_version)):\n ctx.logger.check('checking %s version' % builder)\n\n version_str = version_function()\n\n # Convert the version into a tuple\n version = []\n for i in version_str.split('.'):\n try:\n version.append(int(i))\n except ValueError:\n # The subversion isn't a number, so just convert it to a\n # string.\n version.append(i)\n version = tuple(version)\n\n if requires_version is not None and requires_version != version:\n msg = 'version %s required; found %s' % (\n '.'.join(str(i) for i in requires_version), version_str)\n\n ctx.logger.failed(msg)\n raise fbuild.ConfigFailed(msg)\n\n if requires_at_least_version is not None and \\\n requires_at_least_version > version:\n msg = 'at least version %s required; found %s' % (\n '.'.join(str(i) for i in requires_at_least_version),\n version_str)\n\n ctx.logger.failed(msg)\n raise fbuild.ConfigFailed(msg)\n\n if requires_at_most_version is not None and \\\n requires_at_most_version < version:\n msg = 'at most version %s required; found %s' % (\n '.'.join(str(i) for i in requires_at_most_version),\n version_str)\n\n ctx.logger.failed(msg)\n raise fbuild.ConfigFailed(msg)\n\n ctx.logger.passed(version_str)",
"def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)",
"def test_version():\n versions = ((2, 7, 16), (3, 5, 7), (3, 6, 8), (3, 7, 3))\n assert sys.version_info[:3] in versions",
"def test_version(self) -> None:\n with open(\"pyproject.toml\") as f:\n for line in f:\n if \"version\" in line:\n version = line.split()[-1].replace('\"', \"\")\n break\n self.assertEqual(__version__, version)",
"def check_from_version(version: str) -> str:\n version_int = [int(v) for v in version.split(\".\")]\n if version_int[0] not in PipetteModelMajorVersion:\n raise ValueError(f\"Major version {version_int[0]} is not supported.\")\n if version_int[1] not in PipetteModelMinorVersion:\n raise ValueError(f\"Minor version {version_int[1]} is not supported.\")\n return version",
"def is_version_valid(version):\n return _compiled_version_regex.match(version) is not None",
"def _checkUpdateNeeded(self):\n try:\n currentVersionLine = str(subprocess.run(['pacman', '-Q', '-i', self._name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True).stdout)\n currentVersion = re.sub(r'.*Version\\s*: ([\\d|\\.]*)-.*', r'\\1', currentVersionLine).split('.')\n newVersion = self._version.split('.')\n for i in range(0, min(len(currentVersion), len(newVersion))):\n if currentVersion[i].isdigit():\n # TODO: test if new version is only digits too, two of them should be the same anyway\n if int(newVersion[i]) > int(currentVersion[i]):\n return True\n if int(newVersion[i]) < int(currentVersion[i]):\n return False\n return len(newVersion) > len(currentVersion)\n except subprocess.CalledProcessError:\n # Package not found: to be installed then\n return True",
"def check_version_is_supported(name, version, min_version, help=''):\n if (pkg_resources.parse_version(version) <\n pkg_resources.parse_version(min_version)):\n # Version is too old.\n print('ERROR: Unsupported %s version: %s (minimum %s).%s' %\n (name, version, min_version, (' %s' % help) if help else ''),\n file=sys.stderr)\n exit(1)",
"def check_version_2(dataset):\n\n if float(dataset.get('version')) >= 2.0 \\\n if dataset.get('version') else False:\n return True\n else:\n return False",
"def check_version():\n reset_flag = False\n try:\n data = du.read_yml(du.DEFAULT)\n if (\n data[\"version\"].split(\".\")[0] != __version__.split(\".\")[0]\n ): # If Version if different from \"1.x.y\" remove data:\n reset_flag = True\n except (KeyError, FileNotFoundError, TypeError):\n reset_flag = True\n\n if reset_flag:\n print(\"Your configuration file version is older than 1.0.0\")\n print(\n \"Your .Experiment file will be removed, please run daf.init to generate an up-to-date file\"\n )\n if os.path.isfile(du.DEFAULT):\n os.remove(du.DEFAULT)\n sys.exit(0)",
"def is_valid_version(self) -> bool:\n return self._is_valid_version()",
"def check_all():\n for package, version in required_versions.items():\n try:\n module = importlib.import_module(package)\n except ImportError:\n return\n else:\n if StrictVersion(version) > StrictVersion(module.__version__):\n raise RuntimeError(\"Your version of %s is too old - it must be at least %s\" % (\n package,\n version,\n ))",
"def check_version_part(self, node, ecosystem, package, version):\n version_node = node[\"version\"]\n # check the ecosystem, version, and name attributes that are required for a version\n self.check_pecosystem(version_node)\n self.check_pname(version_node)\n self.check_version(version_node)\n\n # compare with expected values\n e = version_node[\"pecosystem\"][0]\n p = version_node[\"pname\"][0]\n v = version_node[\"version\"][0]\n self.compare_ecosystems(e, ecosystem)\n self.compare_packages(p, package)\n self.compare_versions(v, version)",
"def check_py_version(self, cur_version):\n\n # convert cur_version to string, in case of erroneous type being passed\n cur_version = str(cur_version)\n\n acceptable_python_versions_regex = r\"(^(2\\.[6-9])(\\.?\\d{1,2})?$)|(^(3\\.[3-9])(\\.?\\d{1,2})?$)\"\n pyversions_regex_compiled = re.compile(acceptable_python_versions_regex)\n pyversions_match = pyversions_regex_compiled.match(cur_version)\n\n # If match is found, return True. If no match, return False\n if pyversions_match:\n return True\n else:\n return False",
"def test_versioning_unknown_version(workflow_runner):\n with pytest.raises(WDL.Error.SyntaxError):\n workflow_runner(\"test_versioning_unknown_version.wdl\")"
] | [
"0.6902856",
"0.6780541",
"0.67345285",
"0.6424998",
"0.6407822",
"0.64057225",
"0.6383413",
"0.6376578",
"0.6337082",
"0.6273863",
"0.6264896",
"0.62600714",
"0.6233149",
"0.62270665",
"0.6219592",
"0.61668104",
"0.61236084",
"0.6105264",
"0.60952014",
"0.60824794",
"0.6016871",
"0.6010673",
"0.60073346",
"0.59973854",
"0.59782064",
"0.593414",
"0.59320503",
"0.5928235",
"0.59116226",
"0.59097207"
] | 0.7964266 | 0 |
Check that all nonoptional datasets specified in the corresponding manifest strand in the twine are present in the given manifest. | def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):
# This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.
manifest_schema = getattr(self, manifest_kind)
for expected_dataset_name, expected_dataset_schema in manifest_schema["datasets"].items():
if expected_dataset_name in manifest["datasets"]:
continue
if expected_dataset_schema.get("optional", False):
continue
raise exceptions.invalid_contents_map[manifest_kind](
f"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing."
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))",
"def _IsApplicable(self, manifest):\n check_list = [(self.tool, manifest.tool), (self.board, manifest.board)]\n\n return all(fnmatch(text, pattern) for text, pattern in check_list)",
"def _warn_for_missing_datasets(self, datasets: set[str]):\n any_missing = False\n for ds in datasets:\n if not self.frames.has_dataset(ds):\n any_missing = True\n logger.warn(f'dataset \"{ds}\" is not in the database')\n if any_missing:\n logger.warn(f\"datasets in the databse: {self.all_datasets()}\")",
"def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))",
"def test_valid_and_empty_manifest(self):\n collector = PypiCollector()\n collector.parse_and_collect(MANIFEST_START + DEP_1, True)\n collector.parse_and_collect(None, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'daiquiri': 1\n }",
"def validate_manifest(manifest_json):\n manifest_json = copy.deepcopy(manifest_json)\n for field in [\"schemes\", \"host\", \"basePath\", \"info\"]:\n if field not in manifest_json:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file.\", fg=\"red\").format(field),\n json=manifest_json)\n\n for field in [\"contact\", \"title\", \"description\", \"x-21-total-price\", \"x-21-quick-buy\", \"x-21-category\"]:\n if field not in manifest_json[\"info\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'info' section.\",\n fg=\"red\").format(field),\n json=manifest_json)\n\n for field in {\"name\", \"email\"}:\n if field not in manifest_json[\"info\"][\"contact\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'contact' section.\", fg=\"red\")\n .format(field),\n json=manifest_json)\n\n for field in [\"min\", \"max\"]:\n if field not in manifest_json[\"info\"][\"x-21-total-price\"]:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file under the \"\n \"'x-21-total-price' section.\",\n fg=\"red\"),\n json=manifest_json)\n\n if len(manifest_json[\"schemes\"]) == 0:\n raise exceptions.ValidationError(\n click.style(\n \"You have to specify either HTTP or HTTPS for your endpoint under the \"\n \"`schemes` section.\",\n fg=\"red\"),\n json=manifest_json)\n\n valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'}\n if manifest_json[\"info\"][\"x-21-category\"].lower() not in valid_app_categories:\n valid_categories = \", \".join(valid_app_categories)\n raise exceptions.ValidationError(\n click.style(\"'{}' is not a valid category for the 21 marketplace. Valid categories are {}.\",\n fg=\"red\").format(\n manifest_json[\"info\"][\"x-21-category\"], valid_categories),\n json=manifest_json)",
"def data_available(dataset_name=None):\r\n for file_list in data_resources[dataset_name]['files']:\r\n for file in file_list:\r\n if not os.path.exists(os.path.join(data_path, dataset_name, file)):\r\n return False\r\n return True",
"def supports_manifest(manifest):\n pass",
"def test_theme_manifest(err, xpi_package=None):\n\n # Don't even both with the test(s) if there's no chrome.manifest.\n chrome = err.get_resource('chrome.manifest')\n if not chrome:\n return\n\n for triple in chrome.triples:\n subject = triple['subject']\n # Test to make sure that the triple's subject is valid\n if subject not in ('skin', 'style'):\n err.warning(\n err_id=('themes', 'test_theme_manifest',\n 'invalid_chrome_manifest_subject'),\n warning='Invalid chrome.manifest subject',\n description=('chrome.manifest files for full themes are only '\n \"allowed to have 'skin' and 'style' items. \"\n 'Other types of items are disallowed for '\n 'security reasons.',\n 'Invalid subject: %s' % subject),\n filename=triple['filename'],\n line=triple['line'],\n context=triple['context'])",
"def test_is_valid_manifest_with_missing_size_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_size_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.SIZE\"'\n )\n assert missing_size_message in caplog.text\n assert result == False",
"def test_is_valid_manifest_with_missing_url_column_and_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n error_on_empty_url=True,\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == False",
"def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"",
"def test_is_valid_manifest_with_missing_url_column(caplog):\n logging.getLogger().setLevel(logging.WARNING)\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == True",
"def test_is_valid_manifest_format_with_many_types_of_errors(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_many_types_of_errors.tsv\",\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False",
"def test_required_attributes(self):\n\n required_attributes = ('ID', )\n\n for attribute in required_attributes:\n self.assertIn(attribute, dir(DatasetLoader_Jakob2019))",
"def sanity_check(hdf):\n required_paths = ['Analyses', 'UniqueGlobalKey', 'Analyses/EventDetection_000']\n try:\n for p in required_paths:\n if p not in hdf:\n return False\n return True\n except:\n return False",
"def readManifestFile(syn, manifestFile):\n table.test_import_pandas()\n import pandas as pd\n\n sys.stdout.write('Validation and upload of: %s\\n' % manifestFile)\n # Read manifest file into pandas dataframe\n df = pd.read_csv(manifestFile, sep='\\t')\n if 'synapseStore' not in df:\n df = df.assign(synapseStore=None)\n df.synapseStore[df['path'].apply(is_url)] = False # override synapseStore values to False when path is a url\n df.synapseStore[df['synapseStore'].isnull()] = True # remaining unset values default to True\n df.synapseStore = df.synapseStore.astype(bool)\n df = df.fillna('')\n\n sys.stdout.write('Validating columns of manifest...')\n for field in REQUIRED_FIELDS:\n sys.stdout.write('.')\n if field not in df.columns:\n sys.stdout.write('\\n')\n raise ValueError(\"Manifest must contain a column of %s\" % field)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all paths exist')\n df.path = df.path.apply(_check_path_and_normalize)\n\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all files are unique...')\n if len(df.path) != len(set(df.path)):\n raise ValueError(\"All rows in manifest must contain a unique file to upload\")\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating provenance...')\n df = _sortAndFixProvenance(syn, df)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that parents exist and are containers...')\n parents = set(df.parent)\n for synId in parents:\n try:\n container = syn.get(synId, downloadFile=False)\n except SynapseHTTPError:\n sys.stdout.write('\\n%s in the parent column is not a valid Synapse Id\\n' % synId)\n raise\n if not is_container(container):\n sys.stdout.write('\\n%s in the parent column is is not a Folder or Project\\n' % synId)\n raise SynapseHTTPError\n sys.stdout.write('OK\\n')\n return df",
"def count_missing_stats(manifest):\n num_missing = 0\n for element in manifest:\n if element.missing_stats():\n num_missing += 1\n return num_missing",
"def is_manifest_list(self):\n return False",
"def split_manifest(root_path, manifest_file_path):\n\n train_manifest = open(os.path.join(root_path,\"dataset\", \"train_manifest.txt\"), \"w+\")\n test_manifest = open(os.path.join(root_path, \"dataset\",\"test_manifest.txt\"), \"w+\")\n val_manifest = open(os.path.join(root_path,\"dataset\" ,\"valid_manifest.txt\"), \"w+\")\n with open(os.path.join(root_path, manifest_file_path), 'r') as f:\n data_manifest = f.read().strip().split('\\n')\n data_len = len(data_manifest)\n k = 0\n for i in data_manifest:\n if k == 0:\n k = k+1\n continue\n elif k == 1:\n train_manifest.write(i+'\\n')\n test_manifest.write(i+'\\n')\n val_manifest.write(i+'\\n')\n elif k <= data_len*0.6: # 60% on train set\n train_manifest.write(i+'\\n')\n elif k > data_len*0.6 and k <= data_len*0.8: # 20 % on test\n test_manifest.write(i+'\\n')\n else: #20 % on test\n val_manifest.write(i+'\\n')\n k = k+1\n print(\"Spliting attritutes Done!\")",
"def test_is_valid_manifest_with_missing_md5_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_md5_column.tsv\",\n )\n missing_md5_message = (\n 'could not find a column name corresponding to required \"Columns.MD5\"'\n )\n assert missing_md5_message in caplog.text\n assert result == False",
"def verify_wilds(self):\n self.check_dataset_duplicate_ids(self.wilds)",
"def test_manifest(self):\n self.parse_manifest()\n\n ids = {}\n errors = []\n collisions = []\n manifest = self.cryptomattes[self.selection][\"names_to_IDs\"]\n for name, idvalue in manifest.iteritems():\n if mm3hash_float(name) != idvalue:\n errors.append(\"computed ID doesn't match manifest ID: (%s, %s)\" % (idvalue, mm3hash_float(name)))\n else:\n if idvalue in ids:\n collisions.append(\"colliding: %s %s\" % (ids[idvalue], name))\n ids[idvalue] = name\n\n print \"Tested %s, %s names\" % (self.nuke_node.name(), len(manifest))\n print \" \", len(errors), \"non-matching IDs between python and c++.\"\n print \" \", len(collisions), \"hash collisions in manifest.\"\n\n return errors, collisions",
"def test_sa_mismatch_manifest_file_and_ecosystem(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/202/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n with pytest.raises(Exception) as exception:\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='pypi', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n sa.post_request()\n self.assertIs(exception.type, ValidationError)",
"def check_dataset(number_episode_dics):\n\n for env_name in number_episode_dics.keys():\n\n check_folder(env_name, number_episode_dics[env_name])",
"def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True",
"def test_is_valid_manifest_format_with_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n )\n assert caplog.text == \"\"\n assert result == True",
"def check_unstructured(extractions):\n if not extractions:\n return True\n for ext in extractions:\n if not hasattr(ext, 'args'):\n return False\n return True",
"def cross_validate(self, contents, required=None, forbidden=None):\n if required:\n for item in required:\n self.assertTrue(\n item in contents,\n \"Required entry [{item}] not found in:\\n{contents}\".format(\n item=item, contents=contents\n )\n )\n if forbidden:\n for item in forbidden:\n self.assertTrue(\n item not in contents,\n \"Forbidden entry [{item}] found in:\\n{contents}\".format(\n item=item, contents=contents\n )\n )",
"def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")"
] | [
"0.68781096",
"0.5914848",
"0.5894921",
"0.58664596",
"0.5797446",
"0.5790559",
"0.575301",
"0.57257426",
"0.56155634",
"0.5589998",
"0.55779296",
"0.5571627",
"0.55710405",
"0.5568376",
"0.55295604",
"0.5499033",
"0.5491544",
"0.5466877",
"0.54596764",
"0.5446938",
"0.54257554",
"0.5356052",
"0.533595",
"0.5322388",
"0.5315691",
"0.5311172",
"0.5294386",
"0.5277329",
"0.52738976",
"0.52700686"
] | 0.7935964 | 0 |
Get the names of strands that are found in this twine. | def available_strands(self):
return self._available_strands | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iter_strands(self):\n return iter(self.strand_list)",
"def getStationsName(self) :\n names = []\n for sts in self._stations :\n names.append(sts.getName())\n\n return names",
"def get_well_aliases(self):\n return self.info_wells['well'].unique()",
"def list_available_strains(self):\n return [strain for strain in self.sample_dict]",
"def find_hypernyms(self, syns):\n names = set()\n # Find hypernyms of each syn\n for syn in syns:\n hypernyms = syn.hypernyms()\n # find hypernyms one more level up\n for hypernym in hypernyms:\n names.add(hypernym.name())\n hypernyms_second = hypernym.hypernyms()\n for h in hypernyms_second:\n names.add(h.name())\n return names",
"def wells_list(self, wtype='all'):\n list_names = []\n for well_data in self.wells:\n if wtype == 'all':\n list_names.append(well_data.drawdown.name)\n elif wtype == well_data._type - 2:\n list_names.append(well_data.drawdown.name)\n return(list_names)",
"def names(self) -> list[str]:",
"def stl_names(self):\n return [stl.member.get_full_name() for stl in self.stls.all()]",
"def _getAllWorklistNames(self):\n log.debug(\"Finding all worklists mentioned in this statemachine.\")\n worklists = {}\n names = [s.getTaggedValue('worklist')\n for s in self.sm.getStates(no_duplicates = 1)\n if s.getTaggedValue('worklist')]\n for name in names:\n worklists[name] = 'just filtering out doubles'\n result = worklists.keys()\n log.debug(\"Found the following worklists: %r.\", result)\n return result",
"def synonyms(self):\n\n return [synonym[\"name\"] for synonym in self._get_synonym_json()]",
"def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)",
"def getNames(self) -> List[unicode]:\n ...",
"def speciesNames(self):\n nsp = self.nSpecies()\n return map(self.speciesName,range(nsp))",
"def get_station_names(self):\n station_names = []\n for wrapper in self.soup.find_all(\"div\", {\"class\": \"stop-wrapper\"}):\n station_name = ' '.join(wrapper.find(\"h3\").text.split(' ')[:-1])\n station_names.append(station_name)\n return np.array(station_names).T",
"def names(self) -> PlaceNames | None:\n pass",
"def get_names(self):\n return self.names",
"def get_words_from_sysets(synset):\n synlist = []\n for s in synset:\n syns = s.lemmas()[0].name()\n synlist.append(syns)\n return synlist",
"def get_holonyms(synset):\n return set(\n synset.member_holonyms() + synset.substance_holonyms() + synset.part_holonyms()\n )",
"def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames",
"def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames",
"def tank_name_list(self):\n return list(self._node_reg.tank_names)",
"def names(cls) -> List[str]:",
"def names(self) -> List:\n ...",
"def synonyms(self) -> List[str]:\n return self._synonyms",
"def synonyms(self) -> List[str]:\n return pulumi.get(self, \"synonyms\")",
"def return_names(self):\n return self.__name_list",
"def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def available_manifest_strands(self):\n return self._available_manifest_strands",
"def namelist(self):\n return set(self.names())",
"def wt_strains(df):\n \n ts_plates = []\n dma_plates = []\n for plate in df.Plate.unique():\n if ('_26C_' in plate) or ('_37C_' in plate):\n ts_plates.append(plate)\n else:\n dma_plates.append(plate)\n\n wt_strain_ids_dma = df[(df['ORF'].isin(['YOR202W'])) &\n (df['Plate'].isin(dma_plates))]['Strain ID'].unique()\n wt_strain_ids_ts = df[(df['ORF'].isin(['YOR202W', 'YMR271C'])) &\n (df['Plate'].isin(ts_plates))]['Strain ID'].unique()\n wt_strain_ids = np.append(wt_strain_ids_ts, wt_strain_ids_dma)\n \n return wt_strain_ids"
] | [
"0.6584263",
"0.65621185",
"0.63851863",
"0.62019277",
"0.61951375",
"0.60891676",
"0.60425985",
"0.6007916",
"0.59496164",
"0.59036535",
"0.5884974",
"0.58607626",
"0.57966846",
"0.57783294",
"0.5773814",
"0.5770074",
"0.57655287",
"0.5721144",
"0.5684944",
"0.5684944",
"0.5680266",
"0.56731397",
"0.565833",
"0.5656931",
"0.5639807",
"0.5628358",
"0.5610615",
"0.5599485",
"0.5598051",
"0.5592574"
] | 0.71010107 | 0 |
Get the names of the manifest strands that are found in this twine. | def available_manifest_strands(self):
return self._available_manifest_strands | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def available_strands(self):\n return self._available_strands",
"def list_manifests():\n import enaml\n with enaml.imports():\n from .pulses.manifest import PulsesManagerManifest\n from .tasks.manifest import PulsesTasksManifest\n from .measure.manifest import PulsesMeasureManifest\n return [PulsesManagerManifest, PulsesTasksManifest, PulsesMeasureManifest]",
"def getAtomNames(self):\n return self._raw_data['ATOM_NAME']",
"def names(self) -> List[str]:\n names = set()\n for summary_dir in self._summary_dirs:\n for subdir in summary_dir.glob(\"*\"):\n if subdir == _METADATA:\n continue\n if subdir.is_dir():\n names.add(subdir.name)\n return sorted(names)",
"def names(self) -> list[str]:",
"def get_well_aliases(self):\n return self.info_wells['well'].unique()",
"def installed_appnames():\n appnames = set()\n for finder in sys.meta_path:\n if hasattr(finder, 'appname'):\n appnames.add(finder.appname)\n return appnames",
"def fqns(self):\n return [fqn for fqn in self.runinfos]",
"def GetResourceNames(self):\r\n return [x.name for x in self.resources]",
"def synonyms(self):\n\n return [synonym[\"name\"] for synonym in self._get_synonym_json()]",
"def all_registered_appnames():\n yield from sorted(Registry.monomers.keys())",
"def get_all_fullpaths(self):\n files = []\n for mf in self.manifests:\n files.extend(self.manifests[mf].get_fullpaths())\n return files",
"def onboot_names(self):\n ext_names = []\n for ext in self.extensions.values():\n if not ext.onboot:\n continue\n ext_names.append(ext.name)\n return ', '.join(sorted(ext_names))",
"def app_names(self):\n return self.get_app_names()",
"def get_app_names(self):\n groups = self['__store']\n lookup = {\n g.group_id: g.name[2:]\n for g in groups\n if (g.name.startswith('a_'))\n }\n return set(map(lookup.get, self.get_app_ids()))",
"def get_app_manifests(self,sfilter = None):\n if sfilter:\n try:\n return filter(lambda app: app[\"developer\"] == sfilter[\"developer\"] and\n app[\"name\"] == sfilter[\"name\"] and\n app[\"version\"] == sfilter[\"version\"], self.app_manifests)\n except:\n return []\n else :\n return self.app_manifests",
"def RAppNames(self):\n\t\tnames=[]\n\t\tfor item in range(self.rApps.Count):\n\t\t\tnames.append(self.rApps.Item(item).Name)\n\t\treturn names",
"def namelist(self):\n return self._handle.getnames()",
"def namelist(self):\n return self._handle.getnames()",
"def names() -> Tuple[str, ...]:\n return plugins.list_all(package_name=__name__)",
"def find_hypernyms(self, syns):\n names = set()\n # Find hypernyms of each syn\n for syn in syns:\n hypernyms = syn.hypernyms()\n # find hypernyms one more level up\n for hypernym in hypernyms:\n names.add(hypernym.name())\n hypernyms_second = hypernym.hypernyms()\n for h in hypernyms_second:\n names.add(h.name())\n return names",
"def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def mapped_names(self):\n return [x.distro for x in DistroMapping.distros_mapped_to(self.name, self.version)]",
"def get_afferents_names(self):\n\t\treturn self._afferentsNames",
"def names(self):\r\n return resource.Name(self)",
"def names(self):\n return list(item.name for item in self.mechanisms)",
"def getStationsName(self) :\n names = []\n for sts in self._stations :\n names.append(sts.getName())\n\n return names",
"def get_short_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[0])\n return result"
] | [
"0.6490598",
"0.61462444",
"0.6061251",
"0.6059069",
"0.59545153",
"0.59168273",
"0.5911148",
"0.5903504",
"0.5897712",
"0.5851716",
"0.58406675",
"0.5829558",
"0.58154243",
"0.5781835",
"0.57704556",
"0.57661283",
"0.57341903",
"0.56977165",
"0.56977165",
"0.56939626",
"0.5673895",
"0.56689405",
"0.5650809",
"0.5650809",
"0.5626021",
"0.5612284",
"0.55900735",
"0.5579343",
"0.5575883",
"0.55578464"
] | 0.7861807 | 0 |
Validate that the children values, passed as either a file or a json string, are correct. | def validate_children(self, source, **kwargs):
# TODO cache this loaded data keyed on a hashed version of kwargs
children = self._load_json("children", source, **kwargs)
self._validate_against_schema("children", children)
strand = getattr(self, "children", [])
# Loop the children and accumulate values so we have an O(1) check
children_keys = {}
for child in children:
children_keys[child["key"]] = children_keys.get(child["key"], 0) + 1
# Check there is at least one child for each item described in the strand
# TODO add max, min num specs to the strand schema and check here
for item in strand:
strand_key = item["key"]
if children_keys.get(strand_key, 0) <= 0:
raise exceptions.InvalidValuesContents(f"No children found matching the key {strand_key}")
# Loop the strand and add unique keys to dict so we have an O(1) check
strand_keys = {}
for item in strand:
strand_keys[item["key"]] = True
# Check that each child has a key which is described in the strand
for child in children:
child_key = child["key"]
if not strand_keys.get(child_key, False):
raise exceptions.InvalidValuesContents(
f"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine."
)
# TODO Additional validation that the children match what is set as required in the Twine
return children | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_json(self):\n pass",
"def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data",
"def _validate(self, path, obj):\r\n if isinstance(obj, str):\r\n if path[-1] != \"pattern\":\r\n self._validate_string(path, obj)\r\n elif isinstance(obj, dict):\r\n for key, value in obj.items():\r\n new_path = path.copy()\r\n new_path.append('%s' % key)\r\n self._validate_string(new_path, key, True)\r\n self._validate(new_path, value)\r\n elif isinstance(obj, list):\r\n for index, value in enumerate(obj):\r\n new_path = path.copy()\r\n new_path.append('%d' % index)\r\n self._validate(new_path, value)\r\n elif isinstance(obj, bool):\r\n pass\r\n elif isinstance(obj, int):\r\n pass\r\n elif isinstance(obj, float):\r\n pass\r\n elif isinstance(obj, type(None)):\r\n pass\r\n else:\r\n print(type(obj))\r\n pass\r\n # raise Exception()\r",
"def check_children_attributes(self, branch):\n attributes = branch.get_attributes()\n for attr in attributes:\n if not isinstance(attributes[attr], str) and not isinstance(attributes[attr], list) :\n print('Attribute '+str(attr)+' of '+ branch.__class__.__name__ + ' should be str or list')\n self.assertTrue(False)\n children = branch.get_children()\n for child in children:\n self.check_children_attributes(child)",
"def validate_json_file(namespace):\n if namespace.json_file:\n try:\n with open(namespace.json_file) as file_handle:\n json.load(file_handle)\n except EnvironmentError:\n raise ValueError(\"Cannot access JSON request file: \" + namespace.json_file)\n except ValueError as err:\n raise ValueError(\"Invalid JSON file: {}\".format(err))\n # other_values = [arg_name(n) for n in vars(namespace).keys() if getattr(namespace, n)]\n # if other_values:\n # message = \"--json-file cannot be combined with:\\n\"\n # raise ValueError(message + '\\n'.join(other_values))",
"def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True",
"def test_validate_file_extension_json(self):\n data_locations = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n data_locations_false = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n a = validate_file_extension_json(data_locations)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_json(data_locations_false)\n data_locations.close()\n data_locations_false.close()\n self.assertTrue(\"Kein gültiges JSON-File\" or \"No valid JSON file\" in\n str(context.exception))",
"def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json",
"def assert_correct_json_response(self, json_response):\r\n self.assertIsNotNone(json_response['display_name'])\r\n self.assertIsNotNone(json_response['id'])\r\n self.assertIsNotNone(json_response['category'])\r\n self.assertIsNotNone(json_response['is_draft'])\r\n self.assertIsNotNone(json_response['is_container'])\r\n if json_response['is_container']:\r\n for child_response in json_response['children']:\r\n self.assert_correct_json_response(child_response)\r\n else:\r\n self.assertFalse('children' in json_response)",
"def validate(self, config_json):\n pass",
"def validate(self, root):\n if not isinstance(root, list):\n parser.error('Root object is not a list')\n if root:\n expected_type = type(root[0])\n if expected_type not in [list, dict]:\n parser.error('First row is a {}, not a list or dictionary'.format(expected_type))\n for item in root:\n if type(item) != expected_type:\n parser.error('Row type does not match first row')\n for col in item:\n if type(col) not in [str, unicode, int, float, bool]:\n parser.error('Row column is unexpected type')",
"def _assert_valid_deep(value):\n if isinstance(value, dict):\n for v in value.itervalues():\n _assert_valid_deep(v)\n elif isinstance(value, list):\n for v in value:\n _assert_valid_deep(v)\n else:\n if hasattr(value, \"assert_valid\"):\n value.assert_valid()",
"def valid_is_json(self):\n return self.file_name.endswith('.json')",
"def _check_allowed_values(self, key: str, value: Any):\n allowedValues = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"allowedValues\", None)\n if allowedValues is not None and value not in allowedValues:\n raise Exception(\n f\"Value '{value}' is not an allowed value for '{key}'. Allowed values are: {', '.join(allowedValues)}\"\n )",
"def _check_children(self):\n def froze_list(l):\n return frozenset(frozenset(child) for child in l)\n children, values = self._get_children()\n if froze_list(children) != froze_list(self.children) or frozenset(values) != frozenset(self.values):\n self._children_watcher()",
"def test_load_json_value_data(tmp_path: Path) -> None:\n fname = tmp_path / \"test5.json\"\n with open(fname, \"w\", encoding=\"utf8\") as handle:\n handle.write('\"two\"')\n\n assert load_json(fname) == \"two\"\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a dict\"\n ):\n load_json_object(fname)\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a list\"\n ):\n load_json_array(fname)",
"def _check_data_type(self, key: str, value: Any):\n allowedDataType = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"allowedDataType\", None)\n if allowedDataType is not None and not isinstance(value, allowedDataType):\n raise Exception(\n f\"Value '{value}' is not of the correct type. The allowed data type is: {allowedDataType.__name__}\"\n )",
"def is_valid_child(self, child):\n return isinstance(child, baseobject.PBXBaseObject) \\\n and child.isa in self.allow_children_types()",
"def test_json_error(self):\n with self.assertRaises(AttributeError):\n FileStorage.__objects\n FileStorage.__File_Path",
"def is_valid_value(self, value: Any) -> bool:\n return self.type_registry.is_valid_nested(value)",
"def _CheckJson(input_api, output_api):\n for affected_file in input_api.AffectedFiles(include_deletes=False):\n filename = affected_file.AbsoluteLocalPath()\n if os.path.splitext(filename)[1] != '.json':\n continue\n try:\n input_api.json.load(open(filename))\n except ValueError:\n return [output_api.PresubmitError('Error parsing JSON in %s!' % filename)]\n return []",
"def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file content is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n\n elif not isinstance(content, (list, dict)):\n # testcase file content does not match testcase format\n err_msg = u\"Testcase file content format invalid: {}\".format(file_path)\n logger.log_error(err_msg)",
"def validate_fields(self, tree):\n # Check fields\n fields = list(tree.keys())\n for k in self.fields:\n assert (k in fields)",
"def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False",
"def test_json_reader_data_contents(process_data):\n json_data = process_data(file_name_or_type='scooter_data.json')\n for val in json_data:\n assert(isinstance(val['id'], int))\n assert(isinstance(val['name'], str))\n assert(isinstance(val['vin_number'], str))\n assert(isinstance(val['electric_scooter'], bool))\n assert(isinstance(val['city'], str))\n assert(isinstance(val['usage'], str))\n assert(isinstance(val['cost_usd'], float))\n assert(isinstance(val['total_years_of_use'], int))",
"def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()",
"def test_data_parse_invalid_json(self):\n lines = ['{\"a\": \"val\" \"b\": \"val2\"}']\n self.assertRaises(TypeError, parser._parse_data, lines)",
"def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False",
"def assert_sanity(self):\n # Maybe in the future: Check whether commands can be found in path\n # For now, let the OS handle this\n\n # Check whether command dictionary has a correct structure. Namely,\n # that:\n #\n # 1. Toplevel children may only be called \"commands\" or \"paths\".\n if len(self.command_dict) > 2:\n raise CommandDictSanityError(\"Only two toplevel children allowed.\")\n for key in self.command_dict.keys():\n if key not in (\"commands\",\"paths\"):\n raise CommandDictSanityError(\n f\"Invalid toplevel child found: {key}.\")\n # 2. \"paths\" node must be a list, and must only contain string\n # children.\n if \"paths\" in self.command_dict:\n if type(self.command_dict[\"paths\"]) != list:\n raise CommandDictSanityError(\n \"The \\\"paths\\\" node must be a list.\")\n for path in self.command_dict[\"paths\"]:\n if type(path) != str:\n raise CommandDictSanityError(\"Defined paths must be strings.\")\n # 3. \"commands\" node chilren (henceforth command nodes) must be\n # dictionaries, \n # 4. and may contain only the following keys:\n # \"regex\", \"cmd\", \"help\", \"markdown_convert\", \"formatted\",\n # \"code\" and \"split\".\n # 5. The command node children may only be strings.\n # 6. Command node children with keys \"markdown_convert\",\n # \"formatted\" or \"code\" may only be defined as \"true\" or as\n # \"false\".\n if \"commands\" in self.command_dict.keys():\n for com in self.command_dict[\"commands\"]:\n # Implement rule 3\n if type(self.command_dict[\"commands\"][com]) != dict:\n raise CommandDictSanityError(\n \"Defined commands must be dictionaries.\")\n for opt in self.command_dict[\"commands\"][com].keys():\n # Implement rule 4\n if opt not in (\"regex\",\n \"cmd\",\n \"help\",\n \"markdown_convert\",\n \"formatted\",\n \"code\",\n \"split\"):\n raise CommandDictSanityError(\n f\"In command \\\"{com}\\\", invalid option found: \" \\\n f\"\\\"{opt}\\\".\")\n # Implement rule 6\n elif opt in (\"markdown_convert\", \"formatted\", \"code\"):\n if type(self.command_dict[\"commands\"][com][opt]) != bool:\n raise CommandDictSanityError(\n f\"In command \\\"{com}\\\", invalid value for option \"\n f\"\\\"{opt}\\\" found: \" \\\n f\"\\\"{self.command_dict['commands'][com][opt]}\\\"\")\n # Implement rule 5\n else:\n if type(self.command_dict[\"commands\"][com][opt]) != str:\n raise CommandDictSanityError(\n f\"In command \\\"{com}\\\", command option \" \\\n f\"\\\"{opt}\\\" must be a string.\")\n\n return",
"def testCheck(self):\r\n from pydsl.Grammar.Definition import JsonSchema\r\n from pydsl.Check import JsonSchemaChecker\r\n schema = {\r\n \"type\" : \"string\",\r\n \"items\" : {\r\n \"type\" : [\"string\", \"object\"],\r\n \"properties\" : {\r\n \"foo\" : {\"enum\" : [1, 3]},\r\n #\"bar\" : { #See https://github.com/Julian/jsonschema/issues/89\r\n # \"type\" : \"array\",\r\n # \"properties\" : {\r\n # \"bar\" : {\"required\" : True},\r\n # \"baz\" : {\"minItems\" : 2},\r\n # }\r\n #}\r\n }\r\n }\r\n }\r\n grammardef = JsonSchema(schema)\r\n checker = JsonSchemaChecker(grammardef)\r\n self.assertTrue(checker.check(\"a\"))\r\n self.assertFalse(checker.check([1, {\"foo\" : 2, \"bar\" : {\"baz\" : [1]}}, \"quux\"]))"
] | [
"0.6158009",
"0.6144193",
"0.5932691",
"0.5760641",
"0.56944185",
"0.56927955",
"0.5685668",
"0.56744194",
"0.56555223",
"0.5653026",
"0.5635682",
"0.559688",
"0.55957067",
"0.5592181",
"0.5564809",
"0.5561077",
"0.55481446",
"0.55238324",
"0.551551",
"0.5502962",
"0.5483416",
"0.54796106",
"0.5474002",
"0.5427207",
"0.5421077",
"0.541044",
"0.539442",
"0.5382479",
"0.5380251",
"0.53705496"
] | 0.6725077 | 0 |
Validate that all credentials required by the twine are present. Credentials must be set as environment variables, or defined in a '.env' file. If stored remotely in a secrets manager (e.g. Google Cloud Secrets), they must be loaded into the environment before validating the credentials strand. If not present in the environment, validate_credentials will check for variables in a .env file (if present) and populate the environment with them. Typically a .env file resides at the root of your application (the working directory) although a specific path may be set using the `dotenv_path` argument. .env files should never be committed to git or any other version control system. | def validate_credentials(self, *args, dotenv_path=None, **kwargs):
if not hasattr(self, "credentials"):
return set()
# Load any variables from the .env file into the environment.
dotenv_path = dotenv_path or os.path.join(".", ".env")
load_dotenv(dotenv_path)
for credential in self.credentials:
if credential["name"] not in os.environ:
raise exceptions.CredentialNotFound(
f"Credential {credential['name']!r} missing from environment or .env file."
)
return self.credentials | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_credentials(self):\n\n # There should be a client_id and client secret\n return \"client_id\" in self.credentials.keys() and \"client_secret\" in self.credentials.keys() \\\n and self.credentials[\"client_id\"] and self.credentials[\"client_secret\"]",
"def check_credentials():\n\n required_variables = ('OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD')\n\n logging.debug(\"checking openstack auth environment variables\")\n ok = True\n for var in required_variables:\n if not var in os.environ:\n logging.warning(\"missing required environment variable: {}\".format(var))\n ok = False\n else:\n logging.debug(\"OpenStack Auth Var: {} = {}\".format(var, os.environ[var]))\n\n return ok",
"def test_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with mock.patch.dict(\n os.environ,\n {\"SECRET_THE_FIRST\": \"a value\", \"SECRET_THE_SECOND\": \"another value\", \"SECRET_THE_THIRD\": \"value\"},\n ):\n twine.validate_credentials()\n self.assertEqual(os.environ[\"SECRET_THE_THIRD\"], \"value\")",
"def check_for_credential_file(self):\r\n if 'AWS_CREDENTIAL_FILE' in os.environ:\r\n path = os.environ['AWS_CREDENTIAL_FILE']\r\n path = os.path.expanduser(path)\r\n path = os.path.expandvars(path)\r\n if os.path.isfile(path):\r\n fp = open(path)\r\n lines = fp.readlines()\r\n fp.close()\r\n for line in lines:\r\n if line[0] != '#':\r\n if '=' in line:\r\n name, value = line.split('=', 1)\r\n if name.strip() == 'AWSAccessKeyId':\r\n if 'aws_access_key_id' not in self.args:\r\n value = value.strip()\r\n self.args['aws_access_key_id'] = value\r\n elif name.strip() == 'AWSSecretKey':\r\n if 'aws_secret_access_key' not in self.args:\r\n value = value.strip()\r\n self.args['aws_secret_access_key'] = value\r\n else:\r\n print 'Warning: unable to read AWS_CREDENTIAL_FILE'",
"def test_missing_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with self.assertRaises(exceptions.CredentialNotFound):\n twine.validate_credentials()",
"def test_validate_credentials(self):\n pass",
"def test_environment_credentials(main_container):\n # Check for credential variables.\n # These are not required for pre-built images.\n assert (\n \"FOUNDRY_USERNAME\" in os.environ\n ), \"FOUNDRY_USERNAME was not in the environment\"\n assert (\n \"FOUNDRY_PASSWORD\" in os.environ\n ), \"FOUNDRY_PASSWORD was not in the environment\"",
"def test_getcredentials_from_env(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD",
"def get_credentials():\n credential_dir = os.getcwd()\n credential_path = os.path.join(credential_dir,\n 'smarking_error_check.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def test_credentials(self):\r\n data = self._deep_clean('[email protected]')\r\n error = data.get(ERROR_CODE, None)\r\n if error in (1,2):\r\n raise InvalidCredentialsError(\"Credentials are invalid for user '{}'\".format(self._username))\r\n return True",
"def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or\n credentials.access_token_expired):\n return None\n return credentials",
"def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or\n credentials.access_token_expired):\n return None\n return credentials",
"def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or credentials.access_token_expired):\n return None\n return credentials",
"def cfg_credentials(context):\n arguments = {\n '--config': context.config_file,\n 'authorize': False,\n 'account_summary': False\n }\n pychex_cli = PychexCli(arguments)\n pychex_cli.read_config()\n # Check that the values pulled from the read_config method match what we\n # know\n print(pychex_cli.username)\n assert pychex_cli.username == context.username\n assert pychex_cli.security_image_path == context.security_image_path\n assert pychex_cli.password == context.password\n # Check that the unencrypted values are not present\n with open(arguments['--config']) as cfg:\n cfg_txt = cfg.read()\n assert cfg_txt.find(context.username) == -1\n assert cfg_txt.find(context.security_image_path) == -1\n assert cfg_txt.find(context.password) == -1",
"def __validate_google_credentials(self):\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n self.service = build('sheets', 'v4', credentials=creds)",
"def check_credentials(self) -> None:\n # Checks the GitHub token is defined\n configuration.get_value(ConfigurationVariable.GIT_TOKEN)",
"def check_credentials_validation(credentials):\n spec = {'_id': credentials['username'], 'password': credentials['password']}\n if not current_app.mongo.observer.users.find_one(spec):\n raise Unauthorized('invalid credentials')",
"def get_credentials(self):\r\n \r\n try:\r\n import argparse\r\n #flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\n if self.noauth == True:\r\n flags = tools.argparser.parse_args(args=['--noauth_local_webserver'])\r\n else:\r\n flags = tools.argparser.parse_args(args=[])\r\n except ImportError:\r\n flags = None \r\n \r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'sheets.googleapis.com-allstarbot.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n secret = Path(self.CLIENT_SECRET_FILE)\r\n if secret.exists():\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n else:\r\n print(\"client_secret.json not found, using env vars\")\r\n if not os.environ.get('client_id') or not os.environ.get('client_secret'): \r\n print(\"env vars client_id and client_secret not found. canceling\")\r\n raise Exception(\"client secret error\")\r\n else:\r\n flow = OAuth2WebServerFlow(\r\n os.environ.get('client_id'),\r\n os.environ.get('client_secret'),\r\n self.SCOPES) \r\n \r\n flow.params['access_type'] = 'offline'\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def verify_credentials(self):\n try:\n self.api.VerifyCredentials()\n logging.info('Successfully verified')\n return True\n except TwitterError as e:\n logging.error('Error verifying credentials: %s', e.message[0]['message'])\n return False",
"def has_credentials(credentials_file=CREDENTIALS_FILE):\n return os.path.exists(credentials_file)",
"def authorize_credentials():\n credentials = STORAGE.get()\n # If the credentials doesn't exist in the storage location then run the flow\n if credentials is None or credentials.invalid:\n flow = flow_from_clientsecrets(CREDENTIAL_JSON, scope=SCOPE)\n http = httplib2.Http()\n credentials = run_flow(flow, STORAGE, http=http)\n return credentials",
"def validate_env(self) -> None:\n errors = []\n\n self.user_name = env.str('USER_NAME')\n if not self.user_name:\n errors.append('USER_NAME environment variable needs to be set to your MyQ user name')\n\n self.password = env.str('PASSWORD')\n if not self.password:\n errors.append('PASSWORD environment variable needs to be set to your MyQ password')\n\n self.left_door = env.int('EDGEWOOD', 0)\n self.right_door = 1 - self.left_door\n\n self.only_close = env.bool('ONLY_CLOSE', True)\n\n if errors:\n raise Exception(','.join(errors))",
"def test_aiven_creds_exist(self):\n assert os.environ[\"AIVEN_API_URL\"] is not None\n assert os.environ[\"AIVEN_TOKEN\"] is not None",
"def _make_sure_credentials_are_set(self):\n if self.backend_options:\n if not os.environ.get('APCA_API_KEY_ID') and \\\n self.backend_options['key_id']:\n os.environ['APCA_API_KEY_ID'] = self.backend_options['key_id']\n if not os.environ.get('APCA_API_SECRET_KEY') and \\\n self.backend_options['secret']:\n os.environ['APCA_API_SECRET_KEY'] = self.backend_options[\n 'secret']\n if not os.environ.get('APCA_API_BASE_URL') and \\\n self.backend_options['base_url']:\n os.environ['APCA_API_BASE_URL'] = self.backend_options[\n 'base_url']",
"def resolve_credentials():\n path = os.getenv('GOOGLE_APPLICATION_CREDENTIALS', '')\n\n if os.path.exists(path):\n return True\n\n credentials = os.getenv('GOOGLE_SERVICE_KEY', None)\n if credentials:\n with open(path, 'w') as credentials_file:\n credentials_file.write(credentials)",
"def test_no_credentials(self):\n twine = Twine(source=VALID_SCHEMA_TWINE)\n twine.validate_credentials()",
"def authenticate(self):\r\n\r\n config_data = {}\r\n\r\n # Step 1: try getting username/password from environment\r\n config_data = self.read_config_environment(config_data)\r\n\r\n # Step 2: if credentials were not in env read in configuration file\r\n if self.CONFIG_NAME_USER not in config_data \\\r\n or self.CONFIG_NAME_KEY not in config_data:\r\n if os.path.exists(self.config):\r\n config_data = self.read_config_file(config_data)\r\n else:\r\n raise IOError('Could not find {}. Make sure it\\'s located in'\r\n ' {}. Or use the environment method.'.format(\r\n self.config_file, self.config_dir))\r\n\r\n # Step 3: load into configuration!\r\n self._load_config(config_data)",
"def authenticate(self, credentials=None):\n if credentials is None: # pragma: no cover\n credentials['AWS_ACCESS_KEY_ID'] = getpass.getpass(prompt=\"Enter AWS_ACCESS_KEY_ID: \")\n credentials['AWS_SECRET_KEY'] = getpass.getpass(prompt=\"Enter AWS_SECRET_KEY: \")\n\n self._credentials = credentials",
"def command_check_credentials():\n \n # now calling STS service with the credentials retrieved for verification\n if not aws.check_credentials():\n print(\"credential check failed. exiting program with exit code 1\")\n sys.exit(1)",
"def check_credentials(self, cli_credentials, default_prompt, enable_prompt, logger):\n raise NotImplementedError(\"Class {} must implement method 'check_credentials'\".format(type(self)))"
] | [
"0.661287",
"0.64161855",
"0.6251266",
"0.61858857",
"0.61782354",
"0.6148377",
"0.61406416",
"0.6052758",
"0.6034112",
"0.6033556",
"0.60107875",
"0.60107875",
"0.60083914",
"0.6002498",
"0.5968834",
"0.59106755",
"0.5909878",
"0.5885369",
"0.585459",
"0.5849183",
"0.58435297",
"0.57935625",
"0.57888234",
"0.5787467",
"0.5766613",
"0.575164",
"0.57392514",
"0.571363",
"0.56959903",
"0.5687062"
] | 0.7446492 | 0 |
Validate monitor message against the monitor message schema strand. | def validate_monitor_message(self, source, **kwargs):
return self._validate_values(kind="monitor_message", source=source, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(self):\n\n # Check if motherboard record exists\n motherboard_record_exists = False\n board_info_records = self.groups[constants.RecordType.BASEBOARD_RECORD]\n for handle_id in board_info_records:\n record = self.records[handle_id]\n if 'Type' in record.props and record.props['Type'].val == 'Motherboard':\n motherboard_record_exists = True\n break\n if not motherboard_record_exists:\n self.err_msgs['Motherboard SMBIOS record is missing.'] = (\n 'There should be at least one structure defining the motherboard '\n '(Board Type: 0xA).')\n\n return self.err_msgs",
"def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))",
"def validate_message(self, state_id, msg):\n pass",
"def schema_check(self):\n\n try:\n self.schema.assertValid(self.get_content())\n except lxml.etree.DocumentInvalid:\n logger.error(\"PDU failed schema check\")\n for line in self.pretty_print_content().splitlines():\n logger.warning(line)\n raise",
"def validation_event(self, message):",
"def message_error_validator():\n\n return validator.MessageErrorSchema()",
"def check_message(self, msg):\n pass",
"def _validate(self):\n schema_version = util.schemas[self.schema_name]\n stored_schemas = util.stored_schemas\n\n try:\n schema_obj = stored_schemas[\n \"http://redfish.dmtf.org/schemas/v1/\" + schema_version]\n except KeyError:\n raise OneViewRedfishError(\"{} not found\".format(schema_version))\n\n resolver = jsonschema.RefResolver('', schema_obj, store=stored_schemas)\n jsonschema.validate(self.redfish, schema_obj, resolver=resolver)",
"def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()",
"def validate():",
"def validateMessage(self):\n assert self.validation_class is not None, (f'{self.__class__.__name__}'\n ' must include a validation'\n '_attribute or override '\n 'validateMessage method.')\n\n validation_class = self.validation_class\n registry = validation_class(data=self.message, context={'request': None})\n\n if registry.is_valid():\n self.is_valid = True\n self.registry = registry\n else:\n self.result = registry.errors\n super().finishTask(failed=True)\n\n return self.is_valid",
"def metadata_validate(self):\n # Set path to `service_schema` stored in the `resources` directory from cwd of `mpe_service.py`\n current_path = Path(__file__).parent\n relative_path = '../../snet/snet_cli/resources/service_schema'\n path_to_schema = (current_path / relative_path).resolve()\n with open(path_to_schema, 'r') as f:\n schema = json.load(f)\n metadata = load_mpe_service_metadata(self.args.metadata_file)\n try:\n validate(instance=metadata.m, schema=schema)\n except Exception as e:\n docs = \"http://snet-cli-docs.singularitynet.io/service.html\"\n error_message = f\"\\nVisit {docs} for more information.\"\n if e.validator == 'required':\n raise ValidationError(e.message + error_message)\n elif e.validator == 'minLength':\n raise ValidationError(f\"`{e.path[-1]}` -> cannot be empty.\" + error_message)\n elif e.validator == 'minItems':\n raise ValidationError(f\"`{e.path[-1]}` -> minimum 1 item required.\" + error_message)\n elif e.validator == 'type':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'enum':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'additionalProperties':\n if len(e.path) != 0:\n raise ValidationError(f\"{e.message} in `{e.path[-2]}`.\" + error_message)\n else:\n raise ValidationError(f\"{e.message} in main object.\" + error_message)\n else:\n exit(\"OK. Ready to publish.\")",
"def handle_message(self, validated_message: dict):\n pass",
"def _validate_input(self):\n\n if is_empty(self.message) == True:\n raise ValidationException(\"Message cannont be empty.\")",
"def onMessage(self):\n \"\"\"\n Validates that the received message is from a student and then broadcasts the message to the rest of the class.\n\n @param self: self is the instance of this object.\n @param message: the message that is received\n @param student: the student that sent the message\n \"\"\"\n pass",
"def validate(self, soapmsg):\n return self.xsd_validator.validate(soapmsg.body)",
"def test_validate_valid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n self.assertIsNone(resumeschema.validate(self.valid_resume))",
"def determineMessageValidity(message):\n return Sentience._determineMessagePositivityWrapper(message, overall=False)",
"def _check_monitorline(self):\n # Check if device name is valid\n if self._check_name(self.symbol):\n self.monitor_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if ';' is used\n if self._is_semicolon(self.symbol):\n # End of line reached, exit function\n self.symbol = self.scanner.get_symbol()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n monitor_error = self.monitors.make_monitor(\n self.monitor_device.id, None)\n self._display_semantic_error(monitor_error)\n elif self._is_period(self.symbol):\n # DType output\n self.symbol = self.scanner.get_symbol()\n if self._check_validdtypeoutput(self.symbol):\n self.monitor_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n if self._is_semicolon(self.symbol):\n # End of line reached, exit function\n self.symbol = self.scanner.get_symbol()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n monitor_error = self.monitors.make_monitor(\n self.monitor_device.id,\n self.monitor_port.id)\n self._display_semantic_error(monitor_error)\n else:\n # Semicolon error\n self._display_syntax_error(\"semicolon\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n self._display_syntax_error(\"doutput\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # Semicolon error\n self._display_syntax_error(\"semicolon\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n\n return None",
"def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))",
"def validate_against_schema(request, schema, data):\n try:\n data_pure = schema.deserialize(data)\n data_clean = post_serialize(data_pure)\n # Attach data_clean to request: see usage in views.\n request.data_clean = data_clean\n except Invalid as e:\n # here we transform the errors we got from colander into cornice\n # errors\n for field, error in e.asdict().items():\n request.errors.add('body', field, error)",
"def _is_message_valid(message):\n return isinstance(message, ev_envelope.Envelope)",
"def _validate_input(self):\n\n try:\n expected_type(str, self.venue_id, \"venue_id\")\n expected_type(datetime.datetime, self.timestamp_utc, \"timestamp_utc\")\n\n expected_type(VenueStreamType, self.measurement_type, \"measurement_type\")\n\n expected_type(int, self.number_of_people, \"number_of_people\")\n\n if self.measurement_type is VenueStreamType.ABSOLUTE:\n if self.operator:\n raise ValueError(\"The stream type for the venue doesn't allow passing an Event operator\")\n\n elif self.measurement_type is VenueStreamType.EVENT:\n expected_type(EventStreamOperator, self.operator, \"operator\")\n else:\n raise ValueError(\"Unsupported member of the VenueStreamType enum\")\n\n if self.metadata:\n expected_type(dict, self.metadata, \"metadata\")\n\n except Exception as ex:\n raise ValueError(\"Validation of input failed. Reason: %s\" % str(ex))",
"def test_validate_invalid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n with self.assertRaises(jsonschema.ValidationError):\n resumeschema.validate(self.invalid_resume)",
"def __validate():\n # TODO: implement",
"def check_msg(self, msg, log, topic):\n topics = self.cfg_logs[log]['topics']\n\n if topic in topics.keys():\n for attribute in topics[topic]:\n if not attribute.startswith('_'):\n val = getattr(msg, attribute)\n\n if 'min' in topics[topic][attribute].keys():\n if val < topics[topic][attribute]['min']:\n self.errors.append(\n '{} {} {} {}: violating minimium value {}: {}'.format(log, msg.header.stamp.to_sec(),\n topic, attribute,\n topics[topic][attribute]['min'],\n val))\n if 'max' in topics[topic][attribute].keys():\n if val > topics[topic][attribute]['max']:\n self.errors.append(\n '{} {} {} {}: violating maximum value {}: {}'.format(log, msg.header.stamp.to_sec(),\n topic, attribute,\n topics[topic][attribute]['max'],\n val))\n if 'val' in topics[topic][attribute].keys():\n if val != topics[topic][attribute]['val']:\n self.errors.append(\n '{} {} {} {}: violating value {}: {}'.format(log, msg.header.stamp.to_sec(), topic,\n attribute, topics[topic][attribute]['val'],\n val))",
"def validateJSON(jsonData):\n try:\n json.loads(jsonData)\n validate(instance=json.loads(jsonData), schema=read_json_schema(schema_file_path))\n except Exception as err:\n logging.error(err)\n logging.info(\" Message received is not correct \")\n logging.info(\" Message sent to Pulsar Rejection Topic for reprocessing\")\n # IF a message is not correct, I prefer to stop the consumer and fix the problem. Another way will be to\n # Send message to another to topic if the message is not valid and change raise below by pass.\n raise\n return False\n\n return True",
"def _validate(self):\n pass",
"def _check_message(self, _message_contents):\r\n if not type(_message_contents) is dict:\r\n self.logger.error(\"Message should be a dict.\")\r\n return False\r\n if not \"event\" in _message_contents:\r\n self.logger.error(\"Message dict has no event key.\")\r\n return False\r\n if not \"data\" in _message_contents:\r\n self.logger.error(\"Message dict has no data key.\")\r\n return False\r\n if not type(_message_contents[\"event\"]) == str:\r\n self.logger.error(\"Message event is not a string.\")\r\n return False\r\n if len(_message_contents[\"event\"]) == 0:\r\n self.logger.error(\"Message event cannot be empty.\")\r\n return False\r\n if not type(_message_contents[\"data\"]) == list:\r\n self.logger.error(\"Message data is not a list.\")\r\n return False\r\n if len(_message_contents[\"data\"]) == 0:\r\n self.logger.error(\"Message data cannot be empty list.\")\r\n return False\r\n return True",
"def test_validate_connector(self):\n connector = {'wwpns': [\"not empty\"],\n 'wwnns': [\"not empty\"]}\n self.volume.driver.validate_connector(connector)"
] | [
"0.6137711",
"0.60649383",
"0.60399437",
"0.60151976",
"0.5827821",
"0.5748296",
"0.5649671",
"0.56458515",
"0.550159",
"0.54282165",
"0.5421623",
"0.54128164",
"0.54105055",
"0.5404401",
"0.5385656",
"0.5354246",
"0.53523284",
"0.5333825",
"0.5331712",
"0.53300637",
"0.5320604",
"0.53031343",
"0.5286672",
"0.52821654",
"0.5263968",
"0.5259492",
"0.523754",
"0.5236003",
"0.5214703",
"0.5212126"
] | 0.73682123 | 0 |
Validate the output manifest, passed as either a file or a json string. | def validate_output_manifest(self, source, **kwargs):
return self._validate_manifest("output_manifest", source, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_manifest(manifest_json):\n manifest_json = copy.deepcopy(manifest_json)\n for field in [\"schemes\", \"host\", \"basePath\", \"info\"]:\n if field not in manifest_json:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file.\", fg=\"red\").format(field),\n json=manifest_json)\n\n for field in [\"contact\", \"title\", \"description\", \"x-21-total-price\", \"x-21-quick-buy\", \"x-21-category\"]:\n if field not in manifest_json[\"info\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'info' section.\",\n fg=\"red\").format(field),\n json=manifest_json)\n\n for field in {\"name\", \"email\"}:\n if field not in manifest_json[\"info\"][\"contact\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'contact' section.\", fg=\"red\")\n .format(field),\n json=manifest_json)\n\n for field in [\"min\", \"max\"]:\n if field not in manifest_json[\"info\"][\"x-21-total-price\"]:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file under the \"\n \"'x-21-total-price' section.\",\n fg=\"red\"),\n json=manifest_json)\n\n if len(manifest_json[\"schemes\"]) == 0:\n raise exceptions.ValidationError(\n click.style(\n \"You have to specify either HTTP or HTTPS for your endpoint under the \"\n \"`schemes` section.\",\n fg=\"red\"),\n json=manifest_json)\n\n valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'}\n if manifest_json[\"info\"][\"x-21-category\"].lower() not in valid_app_categories:\n valid_categories = \", \".join(valid_app_categories)\n raise exceptions.ValidationError(\n click.style(\"'{}' is not a valid category for the 21 marketplace. Valid categories are {}.\",\n fg=\"red\").format(\n manifest_json[\"info\"][\"x-21-category\"], valid_categories),\n json=manifest_json)",
"def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))",
"def validate_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ValidateManifestResponse:\n\n _, response = _validate_manifest(request, schema)\n return response",
"def validate_json_file(namespace):\n if namespace.json_file:\n try:\n with open(namespace.json_file) as file_handle:\n json.load(file_handle)\n except EnvironmentError:\n raise ValueError(\"Cannot access JSON request file: \" + namespace.json_file)\n except ValueError as err:\n raise ValueError(\"Invalid JSON file: {}\".format(err))\n # other_values = [arg_name(n) for n in vars(namespace).keys() if getattr(namespace, n)]\n # if other_values:\n # message = \"--json-file cannot be combined with:\\n\"\n # raise ValueError(message + '\\n'.join(other_values))",
"def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False",
"def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False",
"def validate_input_manifest(self, source, **kwargs):\n return self._validate_manifest(\"input_manifest\", source, **kwargs)",
"def is_valid_file(parser, arg):\n if not os.path.isfile(arg):\n parser.error(\"The file %s does not seem to be a file at all! Exiting for safety reasons.\" %arg)\n sys.exit(1)\n else:\n if validate_input(arg):\n return True\n else:\n print \"Invalid JSON. Exiting\"\n sys.exit(1)",
"def test_is_valid_manifest_format_with_many_types_of_errors(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_many_types_of_errors.tsv\",\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False",
"def _validate_manifest(self, kind, source, cls=None, **kwargs):\n data = self._load_json(kind, source, **kwargs)\n\n # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive\n inbound = True\n if hasattr(data, \"to_primitive\"):\n inbound = False\n data = data.to_primitive()\n\n self._validate_against_schema(kind, data)\n self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)\n\n if cls and inbound:\n return cls(**data)\n\n return data",
"def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"",
"def check_output(out: Union[str, bytes], fmt: str) -> None:\n if fmt in [\"png\", \"pdf\"]:\n assert isinstance(out, bytes)\n elif fmt in [\"vega\", \"vega-lite\"]:\n assert isinstance(out, str)\n dct = json.loads(out)\n assert len(dct) > 0\n else:\n assert isinstance(out, str)\n assert len(out) > 0",
"def check_app_manifest(api_docs_path, overrides, marketplace):\n if not os.path.exists(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"Could not find the manifest file at {}.\", fg=\"red\").format(api_docs_path))\n\n if os.path.isdir(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"{} is a directory. Please enter the direct path to the manifest file.\",\n fg=\"red\").format(api_docs_path))\n\n file_size = os.path.getsize(api_docs_path) / 1e6\n if file_size > 2:\n raise exceptions.ValidationError(\n click.style(\"The size of the manifest file at {} exceeds the maximum limit of 2MB.\", fg=\"red\")\n .format(api_docs_path))\n\n try:\n with open(api_docs_path, \"r\") as f:\n original_manifest_dict = yaml.load(f.read())\n\n manifest_dict = transform_manifest(original_manifest_dict, overrides, marketplace)\n\n # write back the manifest in case some clean up or overriding has happend\n with open(api_docs_path, \"w\") as f:\n yaml.dump(manifest_dict, f)\n\n return manifest_dict\n except (YAMLError, ValueError):\n raise exceptions.ValidationError(\n click.style(\"Your manifest file at {} is not valid YAML.\", fg=\"red\")\n .format(api_docs_path))",
"def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))",
"def test_is_valid_manifest_format_with_invalid_sizes(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\"\n )\n error_log = caplog.text\n assert \"-1\" in error_log\n assert \"not_an_int\" in error_log\n assert \"3.34\" in error_log\n assert \"string_with_42\" in error_log\n assert result == False",
"def valid_and_export(template, dashname):\n\n if not json_validation(template):\n print('Bad json format for ' + dashname + ' grafana dashboard')\n else:\n if export_file(template, dashname + '.json'):\n print('Successfully generated dashboard: ' + dashname)\n else:\n print('Error during export dashboard: ' + dashname)",
"def schema_validate_kubernetes_output(validate_data, cache_dir):\n (kind, version), validate_files = validate_data\n KubernetesManifestValidator(cache_dir).validate(validate_files, kind=kind, version=version)",
"def test_sa_invalid_manifest_file(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)",
"def test_schema_valid(path, name, data):\n schemas = metaschemas()\n if name in ('release-schema.json', 'release-package-schema.json'):\n metaschema = schemas['release_package_metaschema']\n elif name == 'record-package-schema.json':\n metaschema = schemas['record_package_metaschema']\n elif name in ('project-schema.json', 'project-package-schema.json'):\n metaschema = schemas['project_package_metaschema']\n else:\n metaschema = schemas['metaschema']\n\n validate_json_schema(path, name, data, metaschema)",
"def validate(self, config_json):\n pass",
"def test_empty_output_successful(self):\n\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['outputs'] = {}\n\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})",
"def test_is_valid_manifest_format_with_invalid_authz_resources(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_authz_resources.tsv\",\n )\n error_log = caplog.text\n assert '\"invalid_authz\"' in error_log\n assert '\"/\"' in error_log\n assert '\"//\"' in error_log\n assert '\"///\"' in error_log\n assert '\"invalid_authz2\"' in error_log\n assert result == False",
"def valid_is_json(self):\n return self.file_name.endswith('.json')",
"def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")",
"def parse_manifest(manifest_path):\n with open(manifest_path, 'r') as f:\n data = f.read()\n if data:\n return json.loads(data)\n else:\n return {}",
"def test_is_valid_manifest_format_using_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n error_on_empty_url=True,\n )\n assert '\"\"' in caplog.text\n assert result == False",
"def validate_package_metadata(filename, meta, expected_name, expected_version):\n if meta.get('name') != expected_name:\n raise distutils.errors.DistutilsSetupError(\n \"unexpected JS package name in %s: expected %s; got %s\"\n % (filename, expected_name, meta.get('name')))\n if meta.get('version') != expected_version:\n raise distutils.errors.DistutilsSetupError(\n \"unexpected JS package version in %s: expected %s; got %s\"\n % (filename, expected_version, meta.get('version')))\n if meta.get('dependencies') and not isinstance(meta['dependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"dependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('peerDependencies') and not isinstance(meta['peerDependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"peerDependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('devDependencies') and not isinstance(meta['devDependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"devDependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('rex'):\n if not isinstance(meta['rex'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"rex\\\" key should be a JSON object in %s\"\n % filename)\n if meta['rex'].get('dependencies') and not isinstance(meta['rex']['dependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"rex.dependencies\\\" key should be a JSON object in %s\"\n % filename)",
"def supports_manifest(manifest):\n pass",
"def test_is_valid_manifest_format_with_invalid_md5_values(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\"\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 in error_log\n assert result == False",
"def validate_yaml(\n data_json: str,\n root_dir: str,\n output_file: str = None\n) -> None:\n grep_tags, source_tags, ignored_tags, source_methods = (\n analyze.analyze_json(data_json, root_dir))\n\n (is_valid, output) = cli_yaml.validate_yaml_syntax(\n root_dir, grep_tags, source_tags)\n\n if is_valid:\n output.append('All files are valid.')\n else:\n output.append('Invalid file(s) found!')\n\n _write_output(output, output_file)"
] | [
"0.689081",
"0.67470354",
"0.6491389",
"0.6478711",
"0.64743024",
"0.6428948",
"0.642016",
"0.6391841",
"0.63199776",
"0.6262872",
"0.5994585",
"0.59788805",
"0.5946266",
"0.5936269",
"0.5893858",
"0.58807135",
"0.58602494",
"0.5850808",
"0.5838025",
"0.5832804",
"0.5797433",
"0.5791419",
"0.57425106",
"0.5738217",
"0.57367176",
"0.5734439",
"0.57109505",
"0.5704293",
"0.56795967",
"0.566765"
] | 0.7151388 | 0 |
Getter that will return cls[name] if cls is a dict or cls otherwise | def _get_cls(name, cls):
return cls.get(name, None) if isinstance(cls, dict) else cls | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(cls, name):\n cls.initialize()\n if isinstance(name, cls):\n return name\n else:\n return cls.mapping[name]",
"def getInstacefromcls(cls, clsname, valuedict=None):\n for i in range(len(clslist)):\n if clsname == clslist[i]:\n return clslist[i](valuedict)\n return None",
"def __get__(self, instance, cls=None):\n\n if cls is None:\n cls = type(instance)\n\n try:\n return vars(cls)[self.__cache_name__]\n except KeyError:\n result = super().__get__(instance, cls)\n setattr(cls, self.__cache_name__, result)\n return result",
"def __getitem__(cls, name):\n return cls.__getattr__(name)",
"def lookup_by_name(cls, name):\n return cls.__by_name[name]",
"def __getattr__(self, name: str) -> any:\n return self._dict[name]",
"def lookup_by_class(dict_,class_):\n v = None\n for c in classlist(class_)[::-1]:\n if c in dict_:\n v = dict_[c]\n break\n return v",
"def __getitem__(cls, key):\n return cls(cls._nameToValue[key])",
"def withdraw(\n self, \n name: Union[str, Sequence[str]], \n kwargs: Optional[denovo.base.Dictionary] = None) -> (\n Union[Type[Any], object]):\n names = denovo.convert.listify(name)\n item = None\n for key in names:\n for catalog in ['instances', 'classes']:\n try:\n item = getattr(self, catalog)[key]\n break\n except KeyError:\n pass\n if item is not None:\n break\n if item is None:\n raise KeyError(f'No matching item for {name} was found')\n if kwargs is not None:\n if 'name' in item.__annotations__.keys() and 'name' not in kwargs:\n kwargs[name] = names[0]\n if inspect.isclass(item):\n item = item(**kwargs)\n else:\n for key, value in kwargs.items():\n setattr(item, key, value) \n return item # type: ignore",
"def lookup(self, cls, name, mode):\n mro = [el.__name__ for el in cls.mro()]\n registry = self.method_registry if mode=='method' else self.type_registry\n\n for class_name in mro:\n entries = registry[class_name]\n if name in entries:\n return entries[name]\n raise KeyError(\"Could not find method named %r.\"\n \" Please ensure classes using component decorators\"\n \" are decorated with the Model.definition\"\n \" class decorator.\" % name)",
"def _get(obj, name):\n try:\n # try to get value using dict's __getitem__ descriptor first\n return dict.__getitem__(obj, name)\n except TypeError:\n # if it's a dict, then preserve the TypeError\n if isinstance(obj, dict):\n raise\n # otherwise try one last time, relying on __getitem__ if any\n return obj[name]",
"def __getitem__(self, name):\n if name in self:\n try:\n return getattr(self, name)\n except AttributeError:\n pass\n\n raise KeyError(name)",
"def _get(self, name):\n return object.__getattribute__(self, name)",
"def _get(self, name):\n return object.__getattribute__(self, name)",
"def class_hook(dct):\n if len(dct) == 1:\n class_name, value = next(iter(dct.items()))\n class_name = class_name.strip('_')\n if class_name == 'Dictionary':\n return Dictionary(*value)\n return dct",
"def get_class(self, name):\n raise NotImplementedError",
"def get_class(self, name):\n if name in self._objects_mapping:\n classname = self._objects_mapping[name]\n\n klass = None\n try:\n klass = getattr(self._sdk, classname)\n except:\n Printer.raise_error('Unknown class %s' % classname)\n\n return klass\n\n Printer.raise_error('Unknown object named %s' % name)",
"def __getitem__(self, name):\n return getattr(self, name)",
"def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n raise AttributeError(name)",
"def from_string(cls, name):\n if hasattr(cls,name):\n return cls.__getattribute__(name)\n else:\n return None",
"def __call__(cls, *args, **kwargs):\n if cls not in cls._instance:\n cls._instance[cls] = super(Metaclass, cls).__call__(*args, **kwargs)\n return cls._instance[cls]",
"def by_name(name, cls=None):\n\n if cls is None:\n cls = base.Point\n\n if cls.__name__ == name:\n return cls\n\n for c in cls.__subclasses__():\n cc = by_name(name, c)\n if cc is not None:\n return cc\n\n return None",
"def __getattr__ (self, name):\n\t\ttry:\n\t\t\treturn self.__dict__[name]\n\t\texcept KeyError:\n\t\t\treturn self.__dict__[\"value\"][name]",
"def __getitem__(self, name):\r\n return self.get(name)",
"def __getitem__(self, name):\n return self.get(name)",
"def __getattr__(self, type_or_name):\n return self._retrieve_registered_value(type_or_name)",
"def get_by_name(self, class_name, object_name, session):",
"def __getitem__(self, name):\n return self.__getattr__(name)",
"def __call__(self, cls_or_name: Union[str, Type]) -> Type[DTSchema]:\n if isinstance(cls_or_name, type):\n n = cls_or_name.__name__\n else:\n n = cls_or_name\n if hasattr(self, n):\n return getattr(self, n)\n raise ValueError(f\"Could not find type {cls_or_name}\")",
"def __getattr__(self, name):\n return self.lookup(name)"
] | [
"0.7089814",
"0.68150103",
"0.6775328",
"0.6689568",
"0.65000015",
"0.6219528",
"0.6086408",
"0.60403633",
"0.6031711",
"0.6026237",
"0.5955669",
"0.5954231",
"0.5952677",
"0.5952677",
"0.59358865",
"0.5935747",
"0.59133816",
"0.5905671",
"0.58998924",
"0.5898058",
"0.5878466",
"0.5868883",
"0.5852091",
"0.58493954",
"0.584092",
"0.584062",
"0.5837674",
"0.58309555",
"0.58293474",
"0.5822067"
] | 0.8500315 | 0 |
Validate a single strand by name. | def validate_strand(self, name, source, **kwargs):
return self.validate({name: source}, **kwargs)[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_valid(name):\n return bool(name)",
"def validated_name(cls, name):\n if (name[:5] == 'hive-'\n and name[5] in ['1', '2', '3']\n and re.match(r'^hive-[123]\\d{4,6}$', name)):\n return name\n return None",
"def check_schema_name(name: str):\n if not is_valid_schema_name(name):\n raise ValidationError(\"Invalid string used for the schema name.\")",
"def validname(name):\r\n return len(name)>0 and (\r\n Context.__invalid_character.search(name) is None)",
"def validate_name(name: str) -> None:\n\n # Disallow empty.\n if not name:\n raise CleanError('Feature set name cannot be empty.')\n\n # Require starting with a letter.\n if not name[0].isalpha():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - names must start with a letter.'\n )\n\n # Require only letters, numbers, and underscores.\n if not name.replace('_', '').isalnum():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only letters, numbers, and underscores are allowed.'\n )\n\n # Require all lowercase.\n if not name.islower():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only lowercase letters are allowed.'\n )\n\n # Disallow leading, trailing, or consecutive underscores.\n # (these will result in a '' in the split results which evals to False)\n if not all(name.split('_')):\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - leading, trailing, and consecutive underscores are'\n ' not allowed.'\n )",
"def is_valid_compound_name(name: str) -> bool:\n return n2s.has_smiles(name)",
"def check_input(naming):\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')",
"def verify_name(name):\n try:\n if name.index(' '):\n return False\n except ValueError:\n return True",
"def validateName(name):\r\n if not name:\r\n raise IllegalName('Name can not be an empty string.')\r\n\r\n m = _NAME_RE.match(name)\r\n\r\n if m is None or m.group(0) != name:\r\n raise IllegalName('Name has to start with a letter followed by an '\r\n 'arbitrary number of alphanumeric characters or '\r\n 'underscores.')",
"def validate_name(name:str) -> bool:\r\n return name.isalpha() and name.count(\" \") == 0 and len(name) >= 2",
"def _is_valid_varname(self, name):\n if name in RESERVED or re.match(r'^str[0-9]+$', name): return False\n return True if VALID_NAME_RE.match(name) else False",
"def is_valid(self, qstr):\r\n pass",
"def _is_valid_varname(self, name):\n if name in RESERVED or re.match(r'^str([0-9]+|L)$', name): return False\n return True if VALID_NAME_RE.match(name) else False",
"def _validate(self, s: str):\n if not s.isidentifier():\n raise ValueError(('Invalid Django project name \"{}\": '\n 'must be a valid Python identifier').format(s))",
"def _validate(self, s: str):\n if not s.isidentifier():\n raise ValueError(('Invalid Django project name \"{}\": '\n 'must be a valid Python identifier').format(s))",
"def validate_names(name):\n return isinstance(name, str) and not re.search(r'[\\s]', name)",
"def validate_species(self, name):\n accepted_species = Species.objects.values_list('name', flat=True)\n if name not in accepted_species:\n raise serializers.ValidationError(\n 'Species {0} is not allowed.'.format(name))\n else:\n return name",
"def isValidDataTypeName(name: unicode) -> bool:\n ...",
"def verify_spec_name(spec_name):\n if not isinstance(spec_name, text_type):\n raise ValueError(\n \"expected spec name of string type, but got '{0}' of type '{1}'\".\n format(spec_name, to_str(type(spec_name))))",
"def check_dog_name(dog):\n if not isinstance(dog.name, str):\n raise NotStringError(\"Dog name entered is not a string\")",
"def name_valid(name):\n return name.isalpha()",
"def test_valid_name_valid():\n assert valid_name(\"1\")\n assert valid_name(\"a\"*20)",
"def _assert_valid_name(name, container):\n container.file.name_validation(container.directory, name)",
"def valid_street_name(cls, new_street):\n if type(new_street) is str:\n return True\n # else\n return False",
"def verify_name(name):\n if name and not name.isspace(): # if it's not empty/NULL and it's not whitespace\n return True\n else:\n return False",
"def test_reserved_name(self):\n with self.assertRaises(ValidationError):\n field_name_validator('_id')",
"def validate_species_name(self, species_name, require_full=True, require_prefix=True):\n\n if species_name == 's__':\n return True, None\n\n # test for prefix\n if require_prefix:\n if not species_name.startswith('s__'):\n return False, 'name is missing the species prefix'\n\n # remove prefix before testing other properties\n test_name = species_name\n if test_name.startswith('s__'):\n test_name = test_name[3:]\n\n # test for full name\n if require_full:\n if 'candidatus' in test_name.lower():\n if len(test_name.split(' ')) <= 2:\n return False, 'name appears to be missing the generic name'\n else:\n if len(test_name.split(' ')) <= 1:\n return False, 'name appears to be missing the generic name'\n\n # check for tell-tale signs on invalid species names\n if \" bacterium\" in test_name.lower():\n return False, \"name contains the word 'bacterium'\"\n if \" archaeon\" in test_name.lower():\n return False, \"name contains the word 'archaeon'\"\n if \" archeaon\" in test_name.lower():\n return False, \"name contains the word 'archeaon'\"\n if \"-like\" in test_name.lower():\n return False, \"name contains '-like'\"\n if \" group \" in test_name.lower():\n return False, \"name contains 'group'\"\n if \" symbiont\" in test_name.lower():\n return False, \"name contains 'symbiont'\"\n if \" endosymbiont\" in test_name.lower():\n return False, \"name contains 'endosymbiont'\"\n if \" taxon\" in test_name.lower():\n return False, \"name contains 'taxon'\"\n if \" cluster\" in test_name.lower():\n return False, \"name contains 'cluster'\"\n if \" of \" in test_name.lower():\n return False, \"name contains 'of'\"\n if test_name[0].islower():\n return False, 'first letter of name is lowercase'\n if 'sp.' in test_name.lower():\n return False, \"name contains 'sp.'\"\n\n return True, None",
"def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))",
"def check_name(name, allow_services=False):",
"def check_name(self, name):\n status, msg = utils.validate_name(name, \"36\", \"storageview name\")\n if not status:\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n else:\n LOG.info(msg)"
] | [
"0.62414205",
"0.6123533",
"0.60466254",
"0.59945595",
"0.5968563",
"0.5950491",
"0.5935591",
"0.5917729",
"0.5904584",
"0.58871233",
"0.5855124",
"0.5852563",
"0.58318865",
"0.5813858",
"0.5813858",
"0.58076316",
"0.57668716",
"0.576247",
"0.5755813",
"0.57409114",
"0.57358676",
"0.57354623",
"0.5715307",
"0.5704926",
"0.5694756",
"0.5690485",
"0.56901884",
"0.56880516",
"0.5669288",
"0.5626796"
] | 0.67900914 | 0 |
Ensure that the nonce is correct, less than one hour old, and not more than two minutes in the future Callers should also store used nonces and reject messages with previouslyused ones. | def verify_and_burn_nonce(nonce):
ret = re.match(r'^001[2-9][0-9]{3}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])'
r'T([01][0-9]|2[0-3])(:[0-5][0-9]){2}Z[A-Za-z0-9]{6}$', nonce)
if ret:
date = parser.parse(nonce[3:-6])
now = datetime.utcnow().replace(tzinfo=tz.tzutc())
ret = date < (now + timedelta(minutes=2)) and date > (now + timedelta(hours=-1))
return ret # TODO: keep a record (at least for the last hour) of burned nonces | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validateNonce(lastNonce, lastHash, nonce):\n sha = hashlib.sha256(f'{lastNonce}{lastHash}{nonce}'.encode())\n return sha.hexdigest()[:4] == '0000'",
"def nonce():\n return random.randint(0, 4294967295)",
"def nonce():\n return random.randint(0, 4294967295)",
"def _nonce():\n return str(round(100000 * time.time()) * 2)",
"def test_expired_thread_token_is_valid(self):\n self.token.modified = self.days_ago(const.THREAD_TOKEN_EXPIRY + 1)\n assert not self.token.is_valid()",
"def get_nonce() -> int:\n return int(time.time() * FACTOR)",
"def useNonce(self, nonce):\r\n query = datastore.Query('Nonce')\r\n query['nonce ='] = nonce\r\n query['created >='] = (datetime.datetime.now() -\r\n datetime.timedelta(hours=6))\r\n\r\n results = query.Get(1)\r\n if results:\r\n datastore.Delete(results[0].key())\r\n return True\r\n else:\r\n return False",
"def nonceVerification(nonce, decryptedNonce):\r\n if (nonce == decryptedNonce):\r\n status = \"150 OK\"\r\n else:\r\n status = \"400 Error\"\r\n return status",
"def _nonce(self):\n return str(int(round(time.time() * 10000)))",
"def make_nonce (self, request):\r\n ip = request.channel.server.ip\r\n now = str(long(time.time()))\r\n if now[-1:] == 'L':\r\n now = now[:-1]\r\n private_key = str (id (self))\r\n nonce = ':'.join([ip, now, private_key])\r\n return self.apply_hash (nonce)",
"def __check_token(self) -> bool:\r\n\r\n now = datetime.now(self.__tz)\r\n\r\n if (self.__token_expiration_date - now).total_seconds() < 0:\r\n log.debug('Token needs update!')\r\n return self.__update_token()\r\n return False",
"def useNonce(self, server_url, timestamp, salt):\n\n if is_nonce_old(timestamp):\n return False\n\n try:\n mist_nonces = MistNonce.objects(server_url=server_url, salt=salt,\n timestamp=timestamp)\n except me.DoesNotExist:\n mist_nonces = []\n\n if len(mist_nonces) == 0:\n print(\"Timestamp = %s\" % timestamp)\n MistNonce(\n server_url=server_url, salt=salt, timestamp=timestamp\n ).save()\n return True\n\n return False",
"def validate_token():\n global vault_token\n global vault_token_time\n\n if vault_token is None:\n return False\n\n return datetime.datetime.now() < vault_token_time",
"def check_token(self, user, token):\n\n # Parse the token\n try:\n ts_b36, hash = token.split(\"-\")\n except ValueError:\n return False\n\n try:\n ts = base36_to_int(ts_b36)\n except ValueError:\n return False\n\n # Check that the timestamp/uid has not been tampered with\n recomputed_token = self._make_token_with_timestamp(user, ts)\n\n log.debug(\"Ricalcolo re_token=%s token=%s\" % (recomputed_token, token))\n if not constant_time_compare(recomputed_token, token):\n return False\n\n # Check the timestamp is within limit\n if (self._num_days(self._today()) - ts) > settings.REFERRAL_TOKEN_RESET_TIMEOUT_DAYS:\n return False\n\n return True",
"def default_nonce_duration():\n return now() + timedelta(hours=4)",
"def test_blind_sig_expiration(self):\n signer_obj = ECCBlind(year=2020, month=1)\n point_r = signer_obj.signer_init()\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n msg = os.urandom(64)\n msg_blinded = requester_obj.create_signing_request(point_r, msg)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n verifier_obj = ECCBlind(pubkey=signer_obj.pubkey())\n self.assertFalse(verifier_obj.verify(msg, signature))",
"def _nonce(self):\n # Note: if we use multithreading for a single exchange, this may\n # cause an issue.\n delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)\n return int(delta.total_seconds() * 1000)",
"def gen_nonce(self, length=32):\n if(length < 32):\n res = {\"message\": 'Invalid nonce length'}, 400\n else:\n nonce = secrets.token_hex(floor(length))\n nonces_file = \"client-generate-nonces.txt\"\n res = self.check_nonce(nonce, nonces_file, length)\n return res",
"def _get_nonce():\n return uuid.uuid4().get_hex()",
"def test_nonce(mocker):\n transaction = Transaction(\n chain=0,\n nonce=14_294_967_296,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_NONCE\n ):\n transaction.validate(raise_exception=True)\n\n transaction.nonce = 1_260_300\n assert transaction.validate() == True\n assert transaction.validate(raise_exception=True) == True\n\n transaction.nonce = -1\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_NONCE\n ):\n transaction.validate(raise_exception=True)",
"def nonceVerification(nonce, decryptedNonce):\n #Enter code to compare the nonce and the decryptedNonce. This method\n # should return a string of \"200 OK\" if the parameters match otherwise\n # it should return \"400 Error Detected\"\n if nonce == decryptedNonce:\n return \"200 OK\"\n else:\n return \"400 Error Detected\"",
"def check_one_time_nonce(self, user_supplied_nonce):\n\n if self.nonce_action_auth_valid_uses > 0:\n self.nonce_action_auth_valid_uses -= 1\n ret = util.safe_string_compare(user_supplied_nonce, self.nonce_action_auth)\n if ret is True: # explicitly checking for boolean True\n return True\n return False\n return False",
"def make_nonce():\n time_format = '%Y-%m-%dT%H:%M:%SZ'\n time_component = time.strftime(time_format, time.gmtime())\n valid_chars = ''\n\n # iterate over all the aschii characters for a list of all alpha-numeric characters\n for char_index in range(0, 128):\n if chr(char_index).isalpha() or chr(char_index).isalnum():\n valid_chars += chr(char_index)\n\n random_str = ''\n random_chr = random.SystemRandom()\n for i in range(0, 6):\n random_str += random_chr.choice(valid_chars)\n\n return '001{time_str}{random_str}'.format(time_str=time_component,\n random_str=random_str)",
"def generate_nonce():\n return str(int(round(time.time() * 1000)))",
"def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True",
"def nonceVerification(nonce, decryptedNonce):\n if nonce == decryptedNonce:\n return \"200 OK\"\n else:\n return \"400 Error Detected\"",
"def verify_token(self, token):\n _now = timezone.now()\n\n if (\n (self.token is not None)\n and (token == self.token)\n and (_now < self.valid_until)\n ):\n self.token = None\n self.valid_until = _now\n self.save()\n\n return True\n else:\n return False",
"def _generate_nonce(self):\n return str(random.randrange(100000, 999999))",
"async def test_transaction_nonce_lock(self):\n\n no_tests = 20\n\n txs = []\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx = decode_transaction(tx)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n for i in range(11, 10 + no_tests):\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** i)\n self.assertEqual(decode_transaction(tx).nonce, dtx.nonce)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n\n responses = await asyncio.gather(*(to_asyncio_future(self.fetch(\"/tx\", method=\"POST\", body={\"tx\": tx})) for tx in txs))\n\n ok = 0\n bad = 0\n for resp in responses:\n if resp.code == 200:\n ok += 1\n else:\n bad += 1\n self.assertEqual(ok, 1)\n self.assertEqual(bad, no_tests - 1)",
"def get_initial_nonce(self):\n\n #First we will initiate the nonce with the prng.\n bit_nonce = int_to_bitstr(self.prng, 16)\n\n \"\"\" Then we generate the second part by taking only \n the last 16 bits until we have 32 bits in total. \"\"\"\n for i in range(16):\n bit_nonce += self.prng_feedback(bit_nonce[i:i+16])\n\n \"\"\" The new state of the prng will be the last 16 bits\n of the nonce, because we discarded 16 bits during the\n feedback loop. The initial nonce has 32 bits now. \"\"\"\n bit_prng = bit_nonce[16:]\n\n self.prng = bitstr_to_int(bit_prng)\n self.nonce = bitstr_to_int(bit_nonce)\n\n return self.nonce"
] | [
"0.65487677",
"0.6318106",
"0.6318106",
"0.63045347",
"0.623894",
"0.618787",
"0.61794573",
"0.6055988",
"0.59781826",
"0.59658813",
"0.59374857",
"0.59365934",
"0.59196234",
"0.5910003",
"0.5893382",
"0.5891976",
"0.58326024",
"0.5790357",
"0.5772316",
"0.5751489",
"0.57323647",
"0.5717647",
"0.57078433",
"0.56723195",
"0.56435466",
"0.5622025",
"0.55843925",
"0.55049086",
"0.5487869",
"0.5466227"
] | 0.694894 | 0 |
Map Juniper SRX Policy Object into xml config element | def to_xml(self):
policy_element = create_element('policy')
create_element('name', text=self.name, parent=policy_element)
match_element = create_element('match', parent=policy_element)
for s in self.src_addresses:
create_element('source-address', text=s.name, parent=match_element)
for d in self.dst_addresses:
create_element('destination-address', text=d.name, parent=match_element)
then_element = create_element('then', parent=policy_element)
create_element(JuniperSRXPolicy.ActionMap[self.action], parent=then_element)
log_element = create_element('log', parent=then_element)
for log_type in self.logging:
create_element(JuniperSRXPolicy.LoggingMap[log_type], parent=log_element)
return policy_element | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _wrap_policy(policy_doc):\n return {\"IAMPolicy\": policy_doc}",
"def get_config(self):\n config = super(Sc2Policy, self).get_config()\n config['eps'] = self.eps\n config['testing'] = self.testing\n return config",
"def translate_policy(policy: dict):\n if 'PolicyName' in policy:\n # This is a normal policy that should not be expanded\n return policy\n template_name = next(iter(policy))\n template_parameters = policy[template_name]\n try:\n # 'convert' will return a list of policy statements\n policy_document = processor.convert(template_name, template_parameters)\n except InsufficientParameterValues as e:\n # Exception's message will give lot of specific details\n raise ValueError(str(e))\n except InvalidParameterValues:\n raise ValueError(\"Must specify valid parameter values for policy template '{}'\".format(template_name))\n return {\n \"PolicyName\": template_name + '-' + str(uuid.uuid4()),\n \"PolicyDocument\": policy_document\n }",
"def policy_str(self): # -> str:\n ...",
"def get_config(self):\n config = super(EpsGreedyQPolicy, self).get_config()\n config['eps'] = self.eps\n return config",
"def create_export_policy():\n config = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n return config",
"def module_config_template():\n\n d = {\"AWSPricePerformancePublisher\": {\n \"module\": \"modules.AWS.publishers.AWS_price_performance\",\n \"name\": \"AWSPricePerformancePublisher\",\n }, }\n print(\"Entry in channel cofiguration\")\n pprint.pprint(d)\n print(\"where\")\n print(\"\\t name - name of the class to be instantiated by task manager\")\n print(\"\\t publish_to_graphite - publish to graphite if True\")\n print(\"\\t graphite_host - graphite host name\")",
"def test_create_hyperflex_ucsm_config_policy(self):\n pass",
"def test_create_hyperflex_sys_config_policy(self):\n pass",
"def _config(self):\n tmpl = self._template_interface\n for p in tmpl._params:\n setattr(self, p._name, p.get_value())",
"def __repr__(self) -> str:\n view = {\n \"server\": self.server,\n \"access-token\": 'yes' if self.token is not None else 'no',\n \"insecure\": self.insecure,\n \"output\": self.output,\n \"verbose\": self.verbose,\n }\n\n return \"<Configuration({})\".format(view)",
"def generate_puppet_resource(self):\n\t\tself.puppet_resource = Template(\"\"\"\n# -- BEGIN \"$domain\" --\napache::vhost::enable { \"$user\": }\n# -- END \"$domain\" --\n\"\"\").safe_substitute({\n\t\t\t\t\"user\": self.argv.get('user'),\n\t\t})",
"def policy_repr(self, policy):\n return policy.__repr__()",
"def test_patch_hyperflex_ucsm_config_policy(self):\n pass",
"def get_policy(usage_id):\r\n return policy.get(policy_key(usage_id), {})",
"def update_Policy(self,inputpolicy):\n \n policyob = self.SD_Map.retrieve_ob(inputpolicy)\n policyob.values[-1] = self.PolicyDicts[inputpolicy][self.translate(self.policy_option_vars[inputpolicy].get(),\n input_language = self.language,\n output_language = 'english')]",
"def test_create_hyperflex_node_config_policy(self):\n pass",
"def test_update_hyperflex_ucsm_config_policy(self):\n pass",
"def policy_alias(self):",
"def get_policy_info(self):\n policy_info = []\n for pol in self:\n # delete from /info if deprecated\n if pol.is_deprecated:\n continue\n policy_entry = {}\n policy_entry['name'] = pol.name\n if pol.is_default:\n policy_entry['default'] = pol.is_default\n policy_info.append(policy_entry)\n return policy_info",
"def from_xml_node(cls, xml_node):\n policies = []\n for policy_node in xml_node.iter(tag=xml_tags.Elements.POLICY):\n policies.append(Policy.from_xml_node(policy_node))\n return cls(policies)",
"def test_patch_hyperflex_node_config_policy(self):\n pass",
"def test_get_hyperflex_sys_config_policy_by_moid(self):\n pass",
"def add_to_pr_export(self, exp_template):",
"def apply_policy(self, policy):\n tenant_name = policy['tenant_name']\n fw_id = policy['fw_id']\n fw_name = policy['fw_name']\n LOG.debug(\"asa_apply_policy: tenant=%s fw_id=%s fw_name=%s\",\n tenant_name, fw_id, fw_name)\n cmds = [\"conf t\", \"changeto context \" + tenant_name]\n\n rule_dict = policy['rules']\n for rule_id in rule_dict:\n rule = rule_dict[rule_id]\n protocol = rule['protocol']\n name = rule['name']\n enabled = rule['enabled']\n dst_port = rule['destination_port']\n src_port = rule['source_port']\n\n if (rule['source_ip_address'] is not None):\n src_ip = IPNetwork(rule['source_ip_address'])\n else:\n src_ip = IPNetwork('0.0.0.0/0')\n\n if (rule['destination_ip_address'] is not None):\n dst_ip = IPNetwork(rule['destination_ip_address'])\n else:\n dst_ip = IPNetwork('0.0.0.0/0')\n\n if rule['action'] == 'allow':\n action = 'permit'\n else:\n action = 'deny'\n\n LOG.debug(\"rule[%s]: name=%s enabled=%s prot=%s dport=%s sport=%s \\\n dip=%s %s sip=%s %s action=%s\",\n rule_id, name, enabled, protocol, dst_port, src_port,\n dst_ip.network, dst_ip.netmask,\n src_ip.network, src_ip.netmask, action)\n\n acl = \"access-list \"\n acl = (acl + tenant_name + \" extended \" + action + \" \" +\n protocol + \" \")\n if (rule['source_ip_address'] is None):\n acl = acl + \"any \"\n else:\n acl = acl + str(src_ip.network) + \" \" + (\n str(src_ip.netmask) + \" \")\n if (src_port is not None):\n if (':' in src_port):\n range = src_port.replace(':', ' ')\n acl = acl + \"range \" + range + \" \"\n else:\n acl = acl + \"eq \" + src_port + \" \"\n if (rule['destination_ip_address'] is None):\n acl = acl + \"any \"\n else:\n acl = acl + str(dst_ip.network) + \" \" + \\\n str(dst_ip.netmask) + \" \"\n if (dst_port is not None):\n if (':' in dst_port):\n range = dst_port.replace(':', ' ')\n acl = acl + \"range \" + range + \" \"\n else:\n acl = acl + \"eq \" + dst_port + \" \"\n if (enabled is False):\n acl = acl + 'inactive'\n\n # remove the old ace for this rule\n if (rule_id in self.rule_tbl):\n cmds.append('no ' + self.rule_tbl[rule_id])\n\n self.rule_tbl[rule_id] = acl\n if tenant_name in self.tenant_rule:\n if rule_id not in self.tenant_rule[tenant_name]['rule_lst']:\n self.tenant_rule[tenant_name]['rule_lst'].append(rule_id)\n cmds.append(acl)\n cmds.append(\"access-group \" + tenant_name + \" global\")\n cmds.append(\"write memory\")\n\n LOG.debug(cmds)\n data = {\"commands\": cmds}\n return self.rest_send_cli(data)",
"def apply_policy(cls, metadata, policy):\r\n for attr, value in policy.iteritems():\r\n attr = cls._translate(attr)\r\n if attr not in cls.fields:\r\n # Store unknown attributes coming from policy.json\r\n # in such a way that they will export to xml unchanged\r\n metadata['xml_attributes'][attr] = value\r\n else:\r\n metadata[attr] = value",
"def getConfigPage():\r\n\r\n\r\n #print os.getcwd()\r\n #os.chdir('..')\r\n #os.chdir('D:\\Stone\\Python\\Python_Insurance')\r\n #print os.getcwd()\r\n\r\n #configPage = ConfigObj('pagesConf.conf')\r\n configPage = ConfigObj('thirdevalConf.conf')\r\n\r\n #configPagePolicyHolder_Usr = configPage['PolicyHolder_Usr']\r\n #dictFirst = configPagePolicyHolder_Usr.dict()\r\n #print dictFirst\r\n #j = 2\r\n #for j in range(j):\r\n # for member in dictFirst:\r\n # if 'titleText' in dictFirst[member]:\r\n # print member\r\n # dictSecond = dictFirst[member]\r\n # print dictSecond\r\n # break\r\n # dictFirst.pop(member)\r\n #i = i+1\r\n #print i\r\n #for item in configPagePolicyHolder_Usr[member]:\r\n # print configPagePolicyHolder_Usr[member][item]\r\n #for item in member:\r\n # print member[item]\r\n #print configPage\r\n #print configPagePolicyHolder_Usr\r\n #configPagePolicyHolder_Usr = configPage['PolicyHolder_Usr']\r\n #print configPagePolicyHolder_Usr\r\n\r\n #print configPagePolicyHolder_Usr.as_int( 'rowNumber')\r\n #print configPage.as_bool(\"hasPolicyHolder_Usr\")\r\n return configPage",
"def _serialize(self, policy):\n return {\n \"rid\": policy.rid,\n \"actions\": list(policy.actions),\n \"consumer\": url_for(\n \"rest_endpoints.consumers\",\n consumer_key=policy.consumer_key)\n }",
"def test_update_hyperflex_node_config_policy(self):\n pass",
"def create_policy_request():\n return {\n 'public_key':\n r'BBLewg4VqLR38b38daE7Fj\\/uhr543uGrEpyoPFgmFZK6EZ9g2XdK\\/i65RrSJ6sJ96aXD3DJHY3Me2GJQO9\\/ifjE=',\n 'label':\n 'Integration Test Policy',\n 'operations': [{\n 'sensor_id': 10,\n 'action': 'SHARE',\n }, {\n 'sensor_id': 53,\n 'action': 'BIN',\n 'bins': [30.0, 60.0, 90.0]\n }, {\n 'sensor_id': 55,\n 'action': 'MOVING_AVG',\n 'interval': 300\n }]\n }"
] | [
"0.58459353",
"0.57357043",
"0.52592963",
"0.52169687",
"0.5177298",
"0.5132841",
"0.5117141",
"0.5053394",
"0.50252014",
"0.50223446",
"0.5019244",
"0.50055027",
"0.49536827",
"0.4945237",
"0.49375263",
"0.49065456",
"0.49007678",
"0.48749763",
"0.48602158",
"0.48564184",
"0.4848674",
"0.48378274",
"0.48179054",
"0.48055226",
"0.48010263",
"0.47990635",
"0.4791481",
"0.47764415",
"0.4771747",
"0.4757154"
] | 0.6687791 | 0 |
Creates a new ColumnInfo and update the size | def update(self, size) -> 'ColumnInfo':
return ColumnInfo(
size,
self.directive,
self.period
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def AddColumnInfo(self, colInfo):\r\n \r\n self._columns.append(colInfo)\r\n self._total_col_width += colInfo.GetWidth()\r\n self._owner.AdjustMyScrollbars()\r\n self._owner._dirty = True",
"def AddColumnInfo(self, colInfo):\r\n\r\n self._header_win.AddColumnInfo(colInfo)\r\n self.DoHeaderLayout()",
"def InsertColumnInfo(self, before, colInfo):\r\n\r\n if before < 0 or before >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n self._columns.insert(before, colInfo)\r\n self._total_col_width += colInfo.GetWidth()\r\n self._owner.AdjustMyScrollbars()\r\n self._owner._dirty = True",
"def InsertColumnInfo(self, before, colInfo):\r\n\r\n self._header_win.InsertColumnInfo(before, colInfo)\r\n self._header_win.Refresh()",
"def SetColumn(self, column, info):\r\n \r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n w = self._columns[column].GetWidth()\r\n self._columns[column] = info\r\n \r\n if w != info.GetWidth():\r\n self._total_col_width += info.GetWidth() - w\r\n self._owner.AdjustMyScrollbars()\r\n \r\n self._owner._dirty = True",
"def _update_width(self, is_commit_in_existing_columns):\n max_cols = self.num_columns + self.num_parents\n\n # Even if the current commit has no parents to be printed, it still\n # takes up a column for itself.\n if self.num_parents < 1:\n max_cols += 1\n\n # We added a column for the current commit as part of self.num_parents.\n # If the current commit was already in self.columns, then we have double\n # counted it.\n if is_commit_in_existing_columns:\n max_cols -= 1\n\n # Each column takes up 2 spaces\n self.width = max_cols * 2",
"def UpdateColumns(self):\r\n data = self.data\r\n columns = data.getParam('columns',data.tankColumns[:])\r\n col_name = data.getParam('colNames',{})\r\n col_width = data.getParam('colWidths',{})\r\n col_align = data.getParam('colAligns',{})\r\n for index,column in enumerate(columns):\r\n name = col_name.get(column,_(column))\r\n width = col_width.get(column,30)\r\n align = wxListAligns[col_align.get(column,'LEFT')]\r\n self.gList.InsertColumn(index,name,align)\r\n self.gList.SetColumnWidth(index, width)",
"def addcolumn(self, colname, coldata):\n if len(coldata) != len(self):\n raise ValueError,\"Column length must match catalog length\"\n\n #Most of the bookkeeping is the same as for an empty column\n self.addemptycolumn(colname,coldata.dtype)\n\n #and then we reset the column to contain the actual data\n setattr(self,colname,coldata)",
"def AddColumns(sqlite_file, table_name):\r\n columns = ['cf_direct_parent','cf_kingdom','cf_superclass',\\\r\n 'cf_class','cf_subclass','cf_intermediate_0','cf_intermediate_1',\\\r\n 'cf_intermediate_2','cf_intermediate_3','cf_intermediate_4',\\\r\n 'cf_intermediate_5','cf_molecular_framework','cf_alternative_parents',\\\r\n 'cf_substituents', 'cf_description']\r\n column_type = 'TEXT'\r\n # Connecting to the database file\r\n conn = sqlite3.connect(sqlite_file) # Connecting to the database\r\n c = conn.cursor() # Adding a cursor to interact with the database\r\n # Adding new column, if it does not exist yet, without a row value\r\n for new_column_name in columns:\r\n try:\r\n c.execute(\"ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}\"\\\r\n .format(tn=table_name, cn=new_column_name, ct=column_type))\r\n print(\"Column created: {cn}\".format(cn=new_column_name))\r\n except sqlite3.OperationalError:\r\n print(\"Column already exists: {cn}\".format(cn=new_column_name))\r\n conn.commit()\r\n conn.close()\r\n return None",
"def _addcolumns(self, columnname, columndata=\"\"):\n self[columnname] = columndata",
"def __store_column_width(self):\n self.header_width = []\n for i in range(0, self.view.header().count()):\n self.header_width.append(self.view.columnWidth(i))",
"def _addColumn(self, table, column, init_data):\n\t\tcommand = \"ALTER TABLE \" + table + \" ADD COLUMN \" + str(column) + \" \" + getSQLiteType(init_data)\n\t\ttry:\n\t\t\tself._run_command(command)\n\t\texcept sqlite3.OperationalError:\n\t\t\tprint(\"Column \" + str(column) + \" already exists!\")",
"def SetColumn(self, column, colInfo):\r\n\r\n self._header_win.SetColumn(column, colInfo)\r\n self._header_win.Refresh()",
"def _write_new_column_data(self, column: str, new_kind: str, data: ndarray,\n srm: list, order: int) -> None:\n if new_kind not in self._data:\n loc = 0\n else:\n loc = self._data[new_kind].shape[1]\n if new_kind == 'S':\n self._str_reverse_map[loc] = srm\n self._column_info[column] = utils.Column(new_kind, loc, order)\n if new_kind in self._data:\n self._data[new_kind] = np.asfortranarray(np.column_stack((self._data[new_kind], data)))\n else:\n if data.ndim == 1:\n data = data[:, np.newaxis]\n\n self._data[new_kind] = np.asfortranarray(data)",
"def newColumn (layer,FieldName,DataType):\n # Check if field already exists\n if layer.fields().indexFromName(FieldName)==-1:\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes([QgsField(FieldName,DataType)])\n print(\"New field \\\"{}\\\" added\".format(FieldName))\n # Update to propagate the changes\n layer.updateFields()\n else:\n print(\"Field \\\"{}\\\" already exists.\".format(FieldName))",
"def new_column( self, delta = 1, ):\n self.ix_row = 0\n self.ix_col += delta",
"def add_col(self, colname, n_batch=5000, debug=False):\n\n if debug: print(\"Create new column {col}\".format(col=colname))\n # Alter table add column\n #\n alter_query = '''\n ALTER TABLE \"{tablename}\"\n ADD COLUMN \"{colname}\" {datatype};\n '''.format(tablename=self.get_carto_tablename(),\n colname=colname,\n datatype=datatype_map(str(self.dtypes[colname])))\n if debug: print(alter_query)\n\n # add column\n resp = self.carto_sql_client.send(alter_query)\n if debug: print(resp)\n\n # update all the values in that column\n #\n # NOTE: fails if colval is 'inf' or some other exceptional Python\n # or NumPy type\n n_items = len(self[colname])\n update_query = '''\n UPDATE \"{tablename}\"\n SET \"{colname}\" = {colval}\n WHERE \"cartodb_id\" = {cartodb_id};\n '''\n queries = []\n\n for row_num, item in enumerate(self[colname].iteritems()):\n # if debug: print(item)\n pgtype = dtype_to_pgtype(self[colname].dtype, colname)\n temp_query = update_query.format(\n tablename=self.get_carto_tablename(),\n colname=colname,\n colval=numpy_val_to_pg_val(item[1], pgtype),\n cartodb_id=item[0]).strip()\n queries.append(temp_query)\n if (len(queries) == n_batch) or (row_num == n_items - 1):\n output_query = '\\n'.join(queries)\n if debug: print(output_query)\n if debug: print(\"Num chars in query: {}\".format(len(output_query)))\n resp = self.carto_sql_client.send(output_query)\n queries = []\n\n return None",
"def _update_desc(self):\n if not self.connection:\n self.close()\n cname = CREATE_BUFFER_U(1024)\n ctype_code = C_SHORT()\n csize = ctypes.c_size_t()\n cdisp_size = C_SSIZE_T(0)\n c_decimal_digits = C_SHORT()\n cnull_ok = C_SHORT()\n col_descr = []\n self._col_type_code_list = []\n for col in range(1, self._num_of_cols() + 1):\n ret = ODBC_API.SQLColAttribute(self.stmt_h, col, 6, ADDR(CREATE_BUFFER(10)), 10, ADDR(C_SHORT()), ADDR(cdisp_size))\n if ret != SQL_SUCCESS:\n check_success(self, ret)\n ret = ODBC_API.SQLDescribeColW(self.stmt_h, col, cname, len(cname), ADDR(C_SHORT()), ADDR(ctype_code), ADDR(csize), ADDR(c_decimal_digits), ADDR(cnull_ok))\n if ret != SQL_SUCCESS:\n check_success(self, ret)\n # (name, type_code, display_size,\n col_descr.append((from_buffer_u(cname), SQL_DATA_TYPE_DICT.get(ctype_code.value, (ctype_code.value,))[0], cdisp_size.value, csize.value, csize.value, c_decimal_digits.value, cnull_ok.value == 1 and True or False))\n self._col_type_code_list.append(ctype_code.value)\n if len(col_descr) > 0:\n self.description = col_descr\n # Create the row type before fetching.\n self._row_type = self.row_type_callable(self)\n else:\n self.description = None\n self._create_col_buf()",
"def create_column(self, new_column, dtype):\n self.logger.debug(\"[%u] Ready to add column %s\" %\n (os.getpid(), new_column))\n ddl = \"\"\"\n ALTER TABLE {schema}.{table}\n ADD COLUMN IF NOT EXISTS {col} {type}\n \"\"\"\n # TODO Replace by execute_ddl func and test it\n with get_sink_connection_string(self) as conn:\n with conn.cursor() as cursor:\n cursor.execute(ddl.format(schema=self.config['schema'],\n table=self.config['table'],\n col=new_column,\n type=dtype))\n self.logger.debug(\"[%u] Column %s has been added\" %\n (os.getpid(), new_column))",
"def OnColumnResize(self,event):\r\n iColumn = event.GetColumn()\r\n column = self.data.getParam('columns')[iColumn]\r\n self.data.updateParam('colWidths')[column] = self.gList.GetColumnWidth(iColumn)",
"def columns(self, new_columns: ColumnT) -> None:\n new_columns2: ndarray = init.check_column_validity(new_columns)\n len_new: int = len(new_columns2)\n len_old: int = len(self._columns)\n if len_new != len_old:\n raise ValueError(f'There are {len_old} columns in the DataFrame. '\n f'You provided {len_new}.')\n\n new_column_info: ColInfoT = {}\n for old_col, new_col in zip(self._columns, new_columns2):\n new_column_info[new_col] = utils.Column(*self._column_info[old_col].values)\n\n self._column_info = new_column_info\n self._columns = new_columns2",
"def update_column(self, xmldata):\n columns = managers.request_manager.get_request().session().value(\"columns\")\n headers = managers.request_manager.get_request().session().value(\"headers\")\n if not columns:\n return False\n if xmldata:\n # Parsing of column declaration\n dom = parseString(xmldata.encode(\"UTF-8\"))\n column = dom.getElementsByTagName(\"column\")[0]\n name = un_quote(column.getAttribute(\"name\"))\n if not name:\n return False\n declaration = name\n constraints = {}\n cid = column.getAttribute(\"id\")\n type = column.getAttribute(\"type\")\n if not type or type == \"INTEGER\" or type == \"REAL\" or type == \"TEXT\" or type == \"BLOB\":\n constraints[\"type\"] = type\n if column.getAttribute(\"notnull\") == \"true\":\n constraints[\"not null\"] = True\n if column.getAttribute(\"primary\") == \"true\":\n if column.getAttribute(\"autoincrement\") == \"true\":\n constraints[\"primary key\"] = \"autoincrement\"\n else:\n constraints[\"primary key\"] = True\n if column.getAttribute(\"unique\") == \"true\":\n constraints[\"unique\"] = True\n\n if column.getAttribute(\"default\") and column.getAttribute(\"default\") != \"\" and column.getAttribute(\"default\") != \"NULL\":\n constraints[\"default\"] = column.getAttribute(\"default\")\n\n column_obj = VDOM_db_column(name, constraints)\n column_obj.id = cid\n\n # praparing SQL code\n old_column = None\n for col in columns:\n if columns[col].id == cid:\n old_column = columns[col]\n break\n if not old_column:\n return False\n\n newtable = \"%s_new(\" % self.name\n oldtable = \"%s(\" % self.name\n for col in headers:\n if oldtable[-1] != \"(\":\n oldtable += \", \"\n oldtable += columns[col].to_declaration()\n\n if columns[col].id == cid:\n if newtable[-1] != \"(\":\n newtable += \", \"\n newtable += column_obj.to_declaration()\n\n else:\n if newtable[-1] != \"(\":\n newtable += \", \"\n newtable += columns[col].to_declaration()\n newtable += \")\"\n if newtable[-2] == \"(\":\n return False\n newcols = []\n newcols.extend(headers)\n newcols.remove(old_column.name)\n newcols_decl = \"\"\n for ctr in newcols:\n newcols_decl += \", `%s`\" % ctr\n\n sql = \"\"\"BEGIN TRANSACTION;\nCREATE TABLE %(newtable)s;\nINSERT INTO `%(newtablename)s` (%(newcols)s) SELECT %(newcols)s FROM `%(oldtablename)s`;\nDROP TABLE `%(oldtablename)s`;\nALTER TABLE `%(newtablename)s` RENAME TO `%(oldtablename)s`;\nEND TRANSACTION;\"\"\" % {\"newtable\": newtable, \"newtablename\": self.name + \"_new\", \"oldtablename\": self.name, \"newcols\": newcols_decl[2:]}\n query = VDOM_sql_query(self.owner_id, self.database_id, sql, None, True)\n query.commit()\n columns.pop(old_column.name)\n columns[column_obj.name] = column_obj\n managers.request_manager.get_request().session().value(\"columns\", columns)\n self.restore_structure()\n return True",
"def add_columns(self, table, col_data, col_type):\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n for data, typ in zip(col_data, col_type):\n c.execute(\"ALTER TABLE {tn} ADD COLUMN {cn} {ct}\".\n format(tn=table, cn=data, ct=typ))\n conn.commit() \n conn.close()",
"def addemptycolumn(self, colname, coltype):\n setattr(self,colname,N.zeros((len(self),),coltype))\n self._modflag=True\n self._type[colname]=coltype\n\n #Looks strange here because we count columns from 1 but\n #Python counts them from 0\n self._ncolumns+=1\n self._d[colname]=self._ncolumns\n self._colnames.append(colname)\n self._header+='# %d %s\\n'%(self._ncolumns,colname)",
"def add_column(self, fieldname, column, align=..., valign=...):\n ...",
"def set_headers(self,executer, tree, cursor, table, columns_size):\n\n # Getting headers\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = headers\n\n # Setting width to all column headers basing on columns amount.\n set_width = int(self.column_length_configurator/len(headers))\n\n\n # Setting columns width and headers\n for column in headers:\n tree.column(column, width=set_width,minwidth=self.min_width)\n tree.heading(column, text=column)",
"def append_columns(classdict, shape=()):\n heavy = common.heavy\n for (itype, type_) in enumerate(sorted(type_info)):\n if not heavy and type_ in heavy_types:\n continue # skip heavy type in non-heavy mode\n colpos = itype + 1\n colname = 'c_%s' % type_\n if type_ == 'enum':\n base = tb.Atom.from_sctype(sctype_from_type[type_])\n col = tb.EnumCol(enum, enum(0), base, shape=shape, pos=colpos)\n else:\n sctype = sctype_from_type[type_]\n dtype = np.dtype((sctype, shape))\n col = tb.Col.from_dtype(dtype, pos=colpos)\n classdict[colname] = col\n ncols = colpos\n return ncols",
"def initalise_column_lengths(coldata):\n for key, _ in coldata.items():\n coldata[key]['collen'] = len(coldata[key]['head'])\n return coldata",
"def setoutputsize(self, size, column=None):\n pass",
"def charcolumns(con,table):\n dbcolumns(con,table,\n # basic classification\n image=\"blob\",\n cls=\"text\",\n cost=\"real\",\n # separate prediction\n # pocost is the cost for the transcribed cls\n pred=\"text\",\n pcost=\"real\",\n pocost=\"real\",\n # cluster information\n cluster=\"integer\",\n count=\"integer\",\n classes=\"text\",\n # geometry\n rel=\"text\",\n lgeo=\"text\",\n # line source\n file=\"text\",\n segid=\"integer\",\n bbox=\"text\",\n )\n con.execute(\"create index if not exists cls_index on %s (cls)\"%table)\n con.execute(\"create index if not exists cluster_index on %s (cluster)\"%table)\n con.execute(\"create index if not exists cost_index on %s (cost)\"%table)\n con.execute(\"create index if not exists countcost_index on %s (count,cost)\"%table)\n con.commit()"
] | [
"0.6953581",
"0.6563744",
"0.63662314",
"0.62849265",
"0.6020931",
"0.5901503",
"0.5833289",
"0.58037275",
"0.5675224",
"0.5647959",
"0.56247205",
"0.5617577",
"0.55949396",
"0.5576177",
"0.55671567",
"0.5556555",
"0.5535544",
"0.55261594",
"0.55159014",
"0.5498711",
"0.54230773",
"0.54193276",
"0.5403627",
"0.5396774",
"0.53628767",
"0.53617436",
"0.5341596",
"0.53378534",
"0.53260034",
"0.5313883"
] | 0.76685566 | 0 |
Simply copy metadata from source to target | def copy_stock_metas(
meta_source,
target,
copy_columns_info=True,
) -> None:
set_attr(
target,
KEY_ALIAS_MAP,
copy(getattr(meta_source, KEY_ALIAS_MAP))
)
if copy_columns_info:
set_attr(
target,
KEY_COLUMNS_INFO_MAP,
deepcopy(getattr(meta_source, KEY_COLUMNS_INFO_MAP))
)
else:
set_attr(target, KEY_COLUMNS_INFO_MAP, {}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _copy_metadata(from_dir, to_dir):\n if not FLAGS.dry_run:\n tf.io.gfile.makedirs(to_dir)\n for fname in tfds.core.utils.list_info_files(from_dir):\n from_path = os.path.join(from_dir, fname)\n to_path = os.path.join(to_dir, fname)\n logging.info('cp %s %s', from_path, to_path)\n if not FLAGS.dry_run:\n tf.io.gfile.copy(from_path, to_path, overwrite=True)",
"def cp_metadata(src_client, src_index, target_client, target_index):\n print \"Copy settings, aliases & mappings from source index %s to target index %s...\" % (src_index, target_index)\n try:\n res = src_client.indices.get_settings(src_index)\n settings = res[src_index]['settings'] if res and 'settings' in res[src_index] else {}\n res = src_client.indices.get_mapping(src_index)\n mappings = res[src_index]['mappings'] if res and 'mappings' in res[src_index] else {}\n res = src_client.indices.get_aliases(src_index)\n aliases = res[src_index]['aliases'] if res and 'aliases' in res[src_index] else {}\n res = target_client.indices.create(index=target_index, body={\"settings\": settings, \"mappings\": mappings, \"aliases\": aliases})\n print 'Metadata copied'\n return res['acknowledged']\n except Exception, e:\n print e\n return False",
"def copy_metadata():\n\n common_root = os.path.join(os.path.dirname(__file__), \"wsdotroute\", \"esri\")\n src = os.path.join(common_root, \"toolboxes\")\n dest = os.path.join(common_root, \"help\", \"gp\", \"toolboxes\")\n\n if os.path.exists(dest):\n shutil.rmtree(dest)\n\n shutil.copytree(src, dest, ignore=shutil.ignore_patterns(\"*.pyt\"))\n\n print(\"Completed copying metadata XML files\")",
"def copy(self, src_path: str, tgt_path: str) -> None:",
"def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):\n source_structure = self._lookup_course(source_course_key).structure\n with self.bulk_operations(dest_course_key):\n original_structure = self._lookup_course(dest_course_key).structure\n index_entry = self._get_index_if_valid(dest_course_key)\n new_structure = self.version_structure(dest_course_key, original_structure, user_id)\n\n new_structure['assets'] = source_structure.get('assets', {})\n new_structure['thumbnails'] = source_structure.get('thumbnails', [])\n\n # update index if appropriate and structures\n self.update_structure(dest_course_key, new_structure)\n\n if index_entry is not None:\n # update the index entry if appropriate\n self._update_head(dest_course_key, index_entry, dest_course_key.branch, new_structure['_id'])",
"def copy(self, source_host, dest_host, filename):",
"def _load_meta(self, db, metadata, source_name) -> None:\n db.metadata.put_item(Item={\n 'src_name': source_name,\n 'data_license': metadata.data_license,\n 'data_license_url': metadata.data_license_url,\n 'version': metadata.version,\n 'data_url': metadata.data_url,\n 'rdp_url': metadata.rdp_url,\n 'data_license_attributes': metadata.data_license_attributes,\n 'genome_assemblies': metadata.genome_assemblies\n })",
"def generate_metadata(self):\n self.metadata = {\n 'title': os.path.basename(self.source_file).rsplit('.', 1)[0],\n 'url': self.relative_destination_file,\n 'full_path': os.path.dirname(self.relative_destination_file),\n 'short_path': self.shorten_path(\n os.path.dirname(self.relative_destination_file))\n }",
"def _copy_output(src: Graph, dst: Graph):\n for n_src, n_dst in zip(src.nodes, dst.nodes):\n if n_src.op == 'output':\n n_dst.meta = n_src.meta",
"def copy(source, target):\n\tshutil.copy(source, target)",
"def copy_object_metadata(self, bucket_name, src_object_name, dst_object_name):\n\n return h3lib.copy_object_metadata(self._handle, bucket_name, src_object_name, dst_object_name, self._user_id)",
"def append_ipma_metadata(orig: dict, dest: dict):\n for key in [key for key in orig.keys() if key != 'data']:\n dest[key] = orig[key]",
"def _copy_file ( self, source, dest ):\n return",
"def copyMedia(source, target):\n if not os.path.exists(target):\n print(\"copying source,target:\", source, target)\n shutil.copy2(source, target)",
"def copy(from_dir: tfds.typing.PathLike, to_dir: tfds.typing.PathLike) -> None:\n for full_name in tfds.core.load.list_full_names():\n from_full_name_dir = os.path.join(from_dir, full_name)\n to_full_name_dir = os.path.join(to_dir, full_name)\n\n # Skip if the dataset isn't generated or that metadata are already copied\n if not tf.io.gfile.exists(from_full_name_dir):\n logging.info('Skipping %s (not found)', from_full_name_dir)\n continue\n if tf.io.gfile.exists(to_full_name_dir) and not FLAGS.overwrite:\n logging.info('Skipping %s (already exists)', to_full_name_dir)\n continue\n\n _copy_metadata(from_dir=from_full_name_dir, to_dir=to_full_name_dir)",
"def _copyFile(\n syn,\n entity,\n destinationId,\n version=None,\n updateExisting=False,\n setProvenance=\"traceback\",\n skipCopyAnnotations=False,\n):\n ent = syn.get(entity, downloadFile=False, version=version, followLink=False)\n # CHECK: If File is in the same parent directory (throw an error) (Can choose to update files)\n if not updateExisting:\n existingEntity = syn.findEntityId(ent.name, parent=destinationId)\n if existingEntity is not None:\n raise ValueError(\n 'An entity named \"%s\" already exists in this location. File could not be copied'\n % ent.name\n )\n profile = syn.getUserProfile()\n # get provenance earlier to prevent errors from being called in the end\n # If traceback, set activity to old entity\n if setProvenance == \"traceback\":\n act = Activity(\"Copied file\", used=ent)\n # if existing, check if provenance exists\n elif setProvenance == \"existing\":\n try:\n act = syn.getProvenance(ent.id)\n except SynapseHTTPError as e:\n if e.response.status_code == 404:\n act = None\n else:\n raise e\n elif setProvenance is None or setProvenance.lower() == \"none\":\n act = None\n else:\n raise ValueError(\"setProvenance must be one of None, existing, or traceback\")\n # Grab entity bundle\n bundle = syn._getEntityBundle(\n ent.id,\n version=ent.versionNumber,\n requestedObjects={\"includeEntity\": True, \"includeFileHandles\": True},\n )\n fileHandle = synapseclient.core.utils.find_data_file_handle(bundle)\n createdBy = fileHandle[\"createdBy\"]\n # CHECK: If the user created the file, copy the file by using fileHandleId else copy the fileHandle\n if profile.ownerId == createdBy:\n newdataFileHandleId = ent.dataFileHandleId\n else:\n copiedFileHandle = copyFileHandles(\n syn,\n [fileHandle],\n [\"FileEntity\"],\n [bundle[\"entity\"][\"id\"]],\n [fileHandle[\"contentType\"]],\n [fileHandle[\"fileName\"]],\n )\n # Check if failurecodes exist\n copyResult = copiedFileHandle[0]\n if copyResult.get(\"failureCode\") is not None:\n raise ValueError(\n \"%s dataFileHandleId: %s\"\n % (copyResult[\"failureCode\"], copyResult[\"originalFileHandleId\"])\n )\n newdataFileHandleId = copyResult[\"newFileHandle\"][\"id\"]\n\n new_ent = File(\n dataFileHandleId=newdataFileHandleId, name=ent.name, parentId=destinationId\n )\n # Set annotations here\n if not skipCopyAnnotations:\n new_ent.annotations = ent.annotations\n # Store provenance if act is not None\n if act is not None:\n new_ent = syn.store(new_ent, activity=act)\n else:\n new_ent = syn.store(new_ent)\n # Leave this return statement for test\n return new_ent[\"id\"]",
"def copy_to(raw_data, obj):\n\n shutil.copyfileobj(raw_data, obj)",
"def data_copy(config, start, end, new, destination_node, source_url, source, destination):\n try:\n asyncio.run(_run(config.node, start, end, new, destination_node,\n source, destination, source_url))\n except errors.ApiError as e:\n raise click.ClickException(str(e)) from e\n finally:\n asyncio.run(\n config.close_node())",
"def _StageMetadata(json_metadata, storage_service, staged_file: str):\n # Write computed metadata to object storage.\n temp_run_dir = temp_dir.GetRunDirPath()\n local_file = os.path.join(temp_run_dir, os.path.basename(staged_file))\n with open(local_file, 'w') as f:\n json.dump(json_metadata, f)\n storage_service.Copy(local_file, staged_file)",
"def clone_data(self,req):\n # source folder\n source=req.source or \"/media/howie/archive/data/music/\"\n # destination folder\n dest=req.dest or \"/home/howie/data/music/\"\n # clone the music files\n c=0\n for i in self.list(isin={'kind':(\"track\",\"image\",\"file\")},orderby=\"uid\"):\n c+=1\n# print c,\" uid:\",i.uid,\" kind:\",i.kind,\" loc:\",i.file_folder(),\" name:\",i.name\n subfolder=i.file_folder()\n destfolder=dest+subfolder\n if not os.path.exists(destfolder):\n os.makedirs(destfolder)\n shutil.copy2(source+subfolder+\"/\"+i.code,destfolder)\n print(\"added %s\" % (dest+subfolder+\"/\"+i.code,))\n return \"clone completed: %s files added\" % c",
"def _copy_visitor(path, source, destination, labels):\n\n # Skip paths corresponding to excluded labels\n if path.split('/')[0] in labels:\n return\n\n # Copy everything else\n source_obj = source[path]\n if isinstance(source_obj, h5py.Group):\n dest_obj = destination.create_group(path)\n else:\n ds = source_obj\n dest_obj = destination.create_dataset(\n path,\n data=source_obj[()],\n chunks=ds.chunks,\n maxshape=ds.maxshape,\n compression=ds.compression,\n compression_opts=ds.compression_opts,\n scaleoffset=ds.scaleoffset,\n shuffle=ds.shuffle,\n fletcher32=ds.fletcher32,\n fillvalue=ds.fillvalue,\n )\n\n dest_obj.attrs.update(source_obj.attrs)",
"def run_copy(self, src, dst):\n pass",
"def copy(self, target):\r\n py.process.cmdexec(\"svn copy %s %s\" %(str(self), str(target)))",
"def test_copy(self):\n\n tempdir = tempfile.mkdtemp()\n include_example = os.path.join(here, 'include-example.ini')\n manifest = ManifestParser(manifests=(include_example,))\n manifest.copy(tempdir)\n self.assertEqual(sorted(os.listdir(tempdir)),\n ['fleem', 'include', 'include-example.ini'])\n self.assertEqual(sorted(os.listdir(os.path.join(tempdir, 'include'))),\n ['bar.ini', 'crash-handling', 'flowers', 'foo.ini'])\n from_manifest = ManifestParser(manifests=(include_example,))\n to_manifest = os.path.join(tempdir, 'include-example.ini')\n to_manifest = ManifestParser(manifests=(to_manifest,))\n self.assertEqual(to_manifest.get('name'), from_manifest.get('name'))\n shutil.rmtree(tempdir)",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def _copyRecursive(\n syn, entity, destinationId, mapping=None, skipCopyAnnotations=False, **kwargs\n):\n\n version = kwargs.get(\"version\", None)\n setProvenance = kwargs.get(\"setProvenance\", \"traceback\")\n excludeTypes = kwargs.get(\"excludeTypes\", [])\n updateExisting = kwargs.get(\"updateExisting\", False)\n if mapping is None:\n mapping = dict()\n # Check that passed in excludeTypes is file, table, and link\n if not isinstance(excludeTypes, list):\n raise ValueError(\"Excluded types must be a list\")\n elif not all([i in [\"file\", \"link\", \"table\"] for i in excludeTypes]):\n raise ValueError(\n \"Excluded types can only be a list of these values: file, table, and link\"\n )\n\n ent = syn.get(entity, downloadFile=False)\n if ent.id == destinationId:\n raise ValueError(\"destinationId cannot be the same as entity id\")\n\n if (isinstance(ent, Project) or isinstance(ent, Folder)) and version is not None:\n raise ValueError(\"Cannot specify version when copying a project of folder\")\n\n if not isinstance(ent, (Project, Folder, File, Link, Schema, Entity)):\n raise ValueError(\"Not able to copy this type of file\")\n\n permissions = syn.restGET(\"/entity/{}/permissions\".format(ent.id))\n # Don't copy entities without DOWNLOAD permissions\n if not permissions[\"canDownload\"]:\n syn.logger.warning(\n \"%s not copied - this file lacks download permission\" % ent.id\n )\n return mapping\n\n access_requirements = syn.restGET(\"/entity/{}/accessRequirement\".format(ent.id))\n # If there are any access requirements, don't copy files\n if access_requirements[\"results\"]:\n syn.logger.warning(\n \"{} not copied - this file has access restrictions\".format(ent.id)\n )\n return mapping\n copiedId = None\n\n if isinstance(ent, Project):\n if not isinstance(syn.get(destinationId), Project):\n raise ValueError(\n \"You must give a destinationId of a new project to copy projects\"\n )\n copiedId = destinationId\n # Projects include Docker repos, and Docker repos cannot be copied\n # with the Synapse rest API. Entity views currently also aren't\n # supported\n entities = syn.getChildren(\n entity, includeTypes=[\"folder\", \"file\", \"table\", \"link\"]\n )\n for i in entities:\n mapping = _copyRecursive(\n syn,\n i[\"id\"],\n destinationId,\n mapping=mapping,\n skipCopyAnnotations=skipCopyAnnotations,\n **kwargs,\n )\n elif isinstance(ent, Folder):\n copiedId = _copyFolder(\n syn,\n ent.id,\n destinationId,\n mapping=mapping,\n skipCopyAnnotations=skipCopyAnnotations,\n **kwargs,\n )\n elif isinstance(ent, File) and \"file\" not in excludeTypes:\n copiedId = _copyFile(\n syn,\n ent.id,\n destinationId,\n version=version,\n updateExisting=updateExisting,\n setProvenance=setProvenance,\n skipCopyAnnotations=skipCopyAnnotations,\n )\n elif isinstance(ent, Link) and \"link\" not in excludeTypes:\n copiedId = _copyLink(syn, ent.id, destinationId, updateExisting=updateExisting)\n elif isinstance(ent, Schema) and \"table\" not in excludeTypes:\n copiedId = _copyTable(syn, ent.id, destinationId, updateExisting=updateExisting)\n # This is currently done because copyLink returns None sometimes\n if copiedId is not None:\n mapping[ent.id] = copiedId\n syn.logger.info(\"Copied %s to %s\" % (ent.id, copiedId))\n else:\n syn.logger.info(\"%s not copied\" % ent.id)\n return mapping",
"def copy(\n self,\n source,\n dest,\n name=None,\n shallow=False,\n expand_soft=False,\n expand_external=False,\n expand_refs=False,\n without_attrs=False,\n ):",
"def update_flow_metadata(self, update_with):\n if update_with:\n source, clone = self._fetch_flowdetail(clone=True)\n clone.meta.update(update_with)\n self._with_connection(self._save_flow_detail, source, clone)"
] | [
"0.7023326",
"0.6726561",
"0.65834624",
"0.6537077",
"0.64390403",
"0.62709254",
"0.62083226",
"0.61535376",
"0.60626066",
"0.60160655",
"0.60077727",
"0.59770507",
"0.59708804",
"0.5922094",
"0.5846591",
"0.57966024",
"0.57608265",
"0.57467353",
"0.56573725",
"0.56544816",
"0.5609208",
"0.5597532",
"0.55618346",
"0.5541921",
"0.5539623",
"0.5539623",
"0.5539623",
"0.5532673",
"0.5519487",
"0.5509952"
] | 0.7013717 | 1 |
Get list of all public modules relative to a path. | def get_public_modules(path, base_package=None):
result = []
for subdir, _, files in os.walk(path):
# Skip folders that start with _.
if any([part.startswith('_')
for part in subdir.split(os.path.sep)]):
continue
_, rel_dir = subdir.split(path)
rel_dir = rel_dir.lstrip(os.path.sep)
for filename in files:
if is_valid_module(filename):
mod_name, _ = os.path.splitext(filename)
rel_path = os.path.join(rel_dir, mod_name)
if base_package is not None:
rel_path = os.path.join(base_package, rel_path)
# Turn into a Python module rather than a file path.
result.append(rel_path.replace(os.path.sep, '.'))
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def moduleList(path):\n\n if os.path.isdir(path):\n folder_list = os.listdir(path)\n elif path.endswith('.egg'):\n try:\n folder_list = [f for f in zipimporter(path)._files]\n except:\n folder_list = []\n else:\n folder_list = []\n #folder_list = glob.glob(os.path.join(path,'*'))\n folder_list = [p for p in folder_list \\\n if os.path.exists(os.path.join(path, p,'__init__.py'))\\\n or p[-3:] in ('.py','.so')\\\n or p[-4:] in ('.pyc','.pyo','.pyd')]\n\n folder_list = [os.path.basename(p).split('.')[0] for p in folder_list]\n return folder_list",
"def list_modules(path):\n modules = []\n for root, dirs, files in os.walk(path): # pylint: disable=unused-variable\n for file in files:\n if file.endswith(\".js\"):\n with open(os.path.join(path, file), 'r') as modfile:\n content = modfile.readlines()\n module_re = r\"/\\*\\* @module +([\\w.]+) +\\*/\"\n m = re.search(module_re, content[0])\n # test if its supposed to be a module\n if m and m.group(1):\n # great its a module ! lets see its content\n logger.debug(\"Module detected %s\" % m.group(1))\n modules.append((m.group(1), content))\n return modules",
"def walk_modules(path):\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods",
"def getRootModules():\n modules = []\n if ip.db.has_key('rootmodules'):\n return ip.db['rootmodules']\n t = time()\n store = False\n for path in sys.path:\n modules += moduleList(path) \n if time() - t >= TIMEOUT_STORAGE and not store:\n store = True\n print \"\\nCaching the list of root modules, please wait!\" \n print \"(This will only be done once - type '%rehashx' to \" + \\\n \"reset cache!)\"\n print\n if time() - t > TIMEOUT_GIVEUP:\n print \"This is taking too long, we give up.\"\n print\n ip.db['rootmodules'] = []\n return []\n \n modules += sys.builtin_module_names\n \n modules = list(set(modules))\n if '__init__' in modules:\n modules.remove('__init__')\n modules = list(set(modules))\n if store:\n ip.db['rootmodules'] = modules\n return modules",
"def find_modules(x):\n return Path(x).rglob('*.py')",
"def modules():\n return [os.path.relpath(os.path.join(root, filename), 'groot_ansible')\n for root, _, filenames in os.walk('groot_ansible/playbooks/library') for filename in filenames if '.git' not in root.split(os.sep)\n ]",
"def list_modules(lookup_paths: list = None):\n result = []\n\n if lookup_paths is None:\n lookup_paths = analyzer_paths()\n\n for path in lookup_paths:\n analyzer_module_root = resource_filename(path, \"modules\")\n # analyzer_root = os.path.join(anchore_module_root, \"modules\")\n for f in os.listdir(analyzer_module_root):\n thecmd = os.path.join(analyzer_module_root, f)\n if re.match(r\".*\\.py$\", thecmd):\n result.append(thecmd)\n\n result.sort(key=lambda x: analyzer_name_from_path(x))\n return result",
"def list_modules():\n for module_name in listdir(modules_directory):\n if isdir(join(modules_directory, module_name)):\n log.debug('Load module: {0}'.format(module_name))\n yield module_name",
"def list_dir(self, path):",
"def listdir(self, path):\n return os.listdir(path)",
"def get_modules(self):\n return self._module_loader.filelist",
"def list_path(self, path):\n return LocalResources(\"\").list_path(path)",
"def lists(path):\r\n return os.listdir(path)",
"def modules_from_path(path, module_name=None, relative='.'):\n if path.is_file():\n yield import_module(relative + path.stem, module_name)\n elif path.is_dir():\n for file in path.iterdir():\n if '__pycache__' in str(file):\n continue\n if file.suffix == '.py' and file.stem != '__init__':\n yield from modules_from_path(file, module_name, relative)",
"def modules(self):\n return self._modules.keys()",
"def list(self):\n for dir in subdirs('plugins'):\n print dir.replace('plugins/', '')",
"def getAllPackages(metadataPaths):\n\n global index\n index = createModuleIndex(metadataPaths)\n allRpms = []\n for name in index.get_module_names():\n module = index.get_module(name)\n for stream in module.get_all_streams():\n allRpms.extend(stream.get_rpm_artifacts())\n return allRpms",
"def modules(self):\n return self._modules",
"def get_all_modules(package):\n base = Path(inspect.getabsfile(package)).parent\n\n for fl in base.glob(\"*.py\"):\n print(f\"loading module {fl}\")\n yield load_module(fl)",
"def get_modules(site_path):\n for dirpath, dirnames, filenames in os.walk(site_path):\n module = dirpath[len(site_path) + 1:].split(os.path.sep)\n if module[0].endswith('.egg-info'):\n continue\n if module[0].endswith('.egg') or not module[0]:\n module.pop(0)\n\n for filename in filenames:\n root, ext = os.path.splitext(filename)\n if ext == '.py':\n if not re.match(VALID_PYTHON_FILENAME, root):\n continue\n if root == '__init__':\n yield '.'.join(module)\n else:\n yield '.'.join(module + [root])\n elif ext == '.egg-link':\n with open(os.path.join(dirpath, filename)) as f:\n egglink_path = f.readline().strip()\n linked_module_path = os.path.join(egglink_path, root.replace('-', '_'))\n for modname in get_modules_to_import_from_directory(linked_module_path):\n yield modname\n elif ext == '.egg':\n yield filename.split('-', 1)[0]",
"def list_public_repos():\n return Collaborator.objects.filter(user__username=settings.PUBLIC_ROLE)",
"def getModules() -> tuple:\n return data.getFoldersOf(data.ETC)",
"def get_modules(self):\n return self._modules.values()",
"def __dir__():\n return __all__",
"def modules_in_current_dir(path, module_name):\n yield from modules_from_path(Path(path).parent, module_name)",
"def get_leaf_modules(package_path):\n assert os.path.isfile(os.path.join(package_path, '__init__.py'))\n res = []\n root = os.path.join(package_path, '..')\n for path, _, files in os.walk(package_path):\n for f in files:\n if f.endswith(\".py\") and not f == \"__init__.py\":\n full_file = os.path.relpath(os.path.join(path, f), start=root)\n module = full_file.replace(os.sep, \".\")[:-3]\n res.append(module)\n return res",
"def plugin_list(self):\r\n return get_module_list()",
"def get_all_sources(remit):\n if remit == 'panzer' or remit == 'pandoc':\n os.chdir('source-'+remit)\n sourcelist = [name for name in os.listdir(\".\") if os.path.isdir(name)]\n os.chdir('..')\n else:\n # get the maximal list of sources for a diff\n pandoc_list = get_all_sources('pandoc')\n panzer_list = get_all_sources('panzer')\n sourcelist = list(set(pandoc_list+panzer_list))\n sourcelist.sort()\n return sourcelist",
"def getDirContents(self, path):\r\n return sorted([int(file) for file in os.listdir(os.path.dirname(path))])",
"def mod_list(dir):\n\n modList = []\n modHash = {}\n isModule = False\n for ii in os.walk(dir):\n if ii[0] == dir:\n for f in ii[2]:\n # If there is no __init__ file, then the directory\n # upon which mod_list() is operating is not a module\n if f[0:8] == '__init__':\n isModule = True\n elif f[-3:] == '.py':\n modHash[f[:-3]] = True\n elif f[-4:] == '.pyc' or f[-4:] == '.pyo':\n modHash[f[:-4]] = True\n if isModule:\n modList = modHash.keys()\n modList.sort()\n return(modList)\n else:\n # Returning an empty list allows 'in' tests since a list is iterable,\n # and None isn't\n return([])"
] | [
"0.68155396",
"0.6617278",
"0.6521537",
"0.6507804",
"0.64241713",
"0.6303963",
"0.62988365",
"0.62711245",
"0.6268034",
"0.62417346",
"0.62394094",
"0.61733466",
"0.61073136",
"0.60500586",
"0.60428715",
"0.6029033",
"0.60251296",
"0.600245",
"0.59671587",
"0.5930028",
"0.5929299",
"0.5927241",
"0.5895827",
"0.5867134",
"0.5865177",
"0.5848004",
"0.58411664",
"0.58408874",
"0.58360225",
"0.58226395"
] | 0.81640553 | 0 |
Get losses of last computation if existing | def get_losses(self):
if self.loss is not None:
return [self.loss]
else:
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def losses(self):\n pass",
"def compute_loss(self):",
"def build_losses(self):\n self.batch_losses = tf.squared_difference(self.predicted_rv, self.label)\n self.total_loss = tf.reduce_mean(self.batch_losses)",
"def build_losses(self):\n self.batch_losses = tf.squared_difference(self.predicted_rv, self.label)\n self.total_loss = tf.reduce_mean(self.batch_losses)",
"def compute_loss(self, obs, returns):",
"def get_current_loss(self):\n return sum(self.recent_loss_array)/sum(self.recent_loss_bs_array)",
"def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss",
"def build_loss(self):\n if self.mode != \"encode\":\n total_loss = tf.losses.get_total_loss()\n tf.summary.scalar(\"losses/total\", total_loss)\n\n self.total_loss = total_loss",
"def _compute_loss(self):\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n state = torch.FloatTensor(state)\n next_state = torch.FloatTensor(next_state)\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(done)\n\n q_values = self.dqn(state)\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n\n next_q_values = self.target_dqn(next_state)\n next_q_value = next_q_values.max(1)[0]\n target = reward + self.discount_factor * next_q_value * (1 - done)\n\n # loss = F.smooth_l1_loss(q_value, target.detach())\n loss = F.mse_loss(q_value, target.detach())\n\n return loss",
"def _get_loss(self):\n raise NotImplementedError",
"def loss(self):\n if not self.run:\n self._run()\n return self.model_loss",
"def losses(self):\n losses = []\n for layer in self.layers:\n losses += layer.losses\n if context.in_eager_mode():\n return losses\n\n relevant_inputs = self.inputs or []\n for i in range(1, len(self._inbound_nodes)):\n inputs = self.get_input_at(i)\n if isinstance(inputs, list):\n relevant_inputs += inputs\n else:\n relevant_inputs.append(inputs)\n reachable = tf_layers_util.get_reachable_from_inputs(relevant_inputs,\n losses)\n relevant_conditional_losses = [x for x in losses if x in reachable]\n unconditional_losses = [\n x for x in losses if x._unconditional_loss] # pylint: disable=protected-access\n return list(set(\n relevant_conditional_losses + unconditional_losses + self._losses))",
"def get_loss(self):\n raise NotImplementedError",
"def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss",
"def get_loss(self):\n return self.loss / self.cnt",
"def losses(self):\n # compute all kinds of losses \n\n # 1. Logits losses for classification \n\n # 2. regression loss for bbox \n\n return classification_loss, bbox_reg_loss",
"def test_loss_hook(self, losses):\n self.runinfo[\"dev_losses\"].append(losses)",
"def compute_losses(self):\n cycle_consistency_loss_a = \\\n self._lambda_a * losses.cycle_consistency_loss(\n real_images=self.input_a, generated_images=self.cycle_images_a,\n )\n cycle_consistency_loss_b = \\\n self._lambda_b * losses.cycle_consistency_loss(\n real_images=self.input_b, generated_images=self.cycle_images_b,\n )\n\n lsgan_loss_a = losses.lsgan_loss_generator(self.prob_fake_a_is_real)\n lsgan_loss_b = losses.lsgan_loss_generator(self.prob_fake_b_is_real)\n\n g_loss_A = \\\n cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b\n g_loss_B = \\\n cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a\n\n d_loss_A = losses.lsgan_loss_discriminator(\n prob_real_is_real=self.prob_real_a_is_real,\n prob_fake_is_real=self.prob_fake_pool_a_is_real,\n )\n d_loss_B = losses.lsgan_loss_discriminator(\n prob_real_is_real=self.prob_real_b_is_real,\n prob_fake_is_real=self.prob_fake_pool_b_is_real,\n )\n\n optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5)\n\n self.model_vars = tf.trainable_variables()\n\n d_A_vars = [var for var in self.model_vars if 'd_A' in var.name]\n g_A_vars = [var for var in self.model_vars if 'g_A' in var.name]\n d_B_vars = [var for var in self.model_vars if 'd_B' in var.name]\n g_B_vars = [var for var in self.model_vars if 'g_B' in var.name]\n\n self.d_A_trainer = optimizer.minimize(d_loss_A, var_list=d_A_vars)\n self.d_B_trainer = optimizer.minimize(d_loss_B, var_list=d_B_vars)\n self.g_A_trainer = optimizer.minimize(g_loss_A, var_list=g_A_vars)\n self.g_B_trainer = optimizer.minimize(g_loss_B, var_list=g_B_vars)\n\n for var in self.model_vars:\n print(var.name)\n\n # Summary variables for tensorboard\n self.g_A_loss_summ = tf.summary.scalar(\"g_A_loss\", g_loss_A)\n self.g_B_loss_summ = tf.summary.scalar(\"g_B_loss\", g_loss_B)\n self.d_A_loss_summ = tf.summary.scalar(\"d_A_loss\", d_loss_A)\n self.d_B_loss_summ = tf.summary.scalar(\"d_B_loss\", d_loss_B)",
"def loss_op(self):\n return self.loss",
"def loss(self):\n return self._loss",
"def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret",
"def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret",
"def calculate_loss(self, output, batch):\n\n detailed_loss = {}\n for loss_func_key, this_loss_func, weight in self.loss_funcs:\n this_loss = this_loss_func(output, batch) * weight\n detailed_loss[loss_func_key] = this_loss\n loss = sum(detailed_loss.values())\n return loss, detailed_loss",
"def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss",
"def compute_loss(self, batch, y_next_true):\n\n # Get the output of the gru layer for the input which serves as input to the reconstruction + forecasting model\n gru_output = self.model(batch, training=True)\n\n # Forecasting model loss calculation\n # Using mse yields the same result as RMSE and is more stable\n y_next_pred = self.model.forecasting_model(gru_output, training=True)\n y_next_pred = y_next_pred[:, -1, :] # only get the prediction for the last timestamp\n\n mse_for = tf.keras.losses.MeanSquaredError()\n loss_for = mse_for(y_next_true, y_next_pred)\n\n # Reconstruction model loss calculation\n # Like VAE based on: https://bit.ly/3oRMiQz\n mse_rec = tf.keras.losses.MeanSquaredError()\n reconstructed_output = self.model.reconstruction_model(gru_output)\n reconstruction_target = gru_output if 'reconstruct_gru' in self.hyper.variants else batch\n\n loss_rec = mse_rec(reconstruction_target, reconstructed_output)\n loss_rec += sum(self.model.reconstruction_model.losses) # Add KLD regularization loss\n\n # Overall loss\n loss = loss_for + loss_rec\n\n return loss",
"def get_current_validation_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name+'_val'] = float(getattr(self, 'loss_' + name + '_val')) # float(...) works for both scalar tensor and float number\n return errors_ret",
"def _build_loss(self, results, features, labels):\n losses, loss = getters.get_loss(\n self.loss.IDENTIFIER, results, labels, **self.loss.to_dict())\n self._loss = loss\n self._losses = losses\n\n other_losses = get_tracked(tf.GraphKeys.REGULARIZATION_LOSSES)\n if other_losses:\n loss = [loss] + other_losses\n loss = tf.add_n(loss, name=\"TotalLoss\")\n self._total_loss = loss\n return losses, loss",
"def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))",
"def _compute_loss(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:\n\n feat_static_cat = batch[\"feat_static_cat\"]\n feat_static_real = batch[\"feat_static_real\"]\n past_time_feat = batch[\"past_time_feat\"]\n past_target = batch[\"past_target\"]\n future_time_feat = batch[\"future_time_feat\"]\n future_target = batch[\"future_target\"]\n past_observed_values = batch[\"past_observed_values\"]\n\n picnn = self.model.picnn\n\n _, scale, hidden_state, _, _ = self.model.unroll_lagged_rnn(\n feat_static_cat,\n feat_static_real,\n past_time_feat,\n past_target,\n past_observed_values,\n future_time_feat,\n future_target,\n )\n\n hidden_state = hidden_state[:, : self.model.context_length]\n\n distr = self.model.output_distribution(picnn, hidden_state, scale)\n\n context_target = past_target[:, -self.model.context_length + 1 :]\n target = torch.cat(\n (context_target, future_target),\n dim=1,\n )\n\n loss_values = self.loss(distr, target)\n\n return loss_values.mean()",
"def get_loss(self, inputs, outputs, add_summary=True):\n cfg = self.cfg()\n torch.autograd.set_detect_anomaly(True)\n # g_loss = tf.zeros(dtype=tf.float32, shape=[])\n g_loss = self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n r_loss = self.regularization_loss(cfg)\n# print(g_loss, r_loss)\n g_loss += r_loss\n # if cfg.proj_weight:\n # g_loss += self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n\n # if cfg.drc_weight:\n # g_loss += add_drc_loss(cfg, inputs, outputs, cfg.drc_weight, add_summary)\n #\n # if cfg.pc_rgb:\n # g_loss += add_proj_rgb_loss(cfg, inputs, outputs, cfg.proj_rgb_weight, add_summary, self._sigma_rel)\n #\n # if cfg.proj_depth_weight:\n # g_loss += add_proj_depth_loss(cfg, inputs, outputs, cfg.proj_depth_weight, self._sigma_rel, add_summary)\n #\n # if add_summary:\n # tf.contrib.summary.scalar(\"losses/total_task_loss\", g_loss)\n\n return g_loss"
] | [
"0.7297349",
"0.69542986",
"0.6948095",
"0.6948095",
"0.68919253",
"0.6775551",
"0.665083",
"0.66488665",
"0.6633766",
"0.6528974",
"0.6520181",
"0.65189093",
"0.6494421",
"0.64789355",
"0.64417124",
"0.64047754",
"0.6394247",
"0.639117",
"0.6388315",
"0.631335",
"0.62982076",
"0.62982076",
"0.6297565",
"0.6277467",
"0.6260354",
"0.6249968",
"0.62422276",
"0.6229282",
"0.62292683",
"0.62263453"
] | 0.7003179 | 1 |
Compute any branch of the stable or unstable submanifolds of a saddle. Accepts fixed point instances of class fixedpoint_2D. | def find_saddle_manifolds(fp, xname, ds=None, ds_gamma=None, ds_perp=None, tmax=None,
max_arclen=None, ic=None, eps=None, ev_dirn=1,
ic_ds=None, max_pts=1000, directions=(1,-1),
which=('s', 'u'), other_pts=None, rel_scale=None,
ds_perp_fac=0.75, verboselevel=0, fignum=None):
if verboselevel > 1:
figure_name, layer_name = plotter.active_layer
_, layer_struct = plotter.active_layer_structs
assert layer_struct is not None
assert fp.classification == 'saddle' and not fp.degenerate
if fp.evals[0] < 0:
eval_s = fp.evals[0]
eval_u = fp.evals[1]
evec_s = fp.evecs[0]
evec_u = fp.evecs[1]
else:
eval_s = fp.evals[1]
eval_u = fp.evals[0]
evec_s = fp.evecs[1]
evec_u = fp.evecs[0]
gen = fp.gen
assert 'Gamma_out_plus' in gen.eventstruct, "Detection event surface(s) not present"
assert 'Gamma_out_minus' in gen.eventstruct, "Detection event surface(s) not present"
if eps is None:
# Dividing fixed point's inherited epsilon tolerance by 100
eps = fp.eps / 100
ds_perp_eps = 1e-12
if ds_perp_fac >= 1 or ds_perp_fac <= 0:
raise ValueError("ds_perp_fac must be between 0 and 1")
normord = fp.normord
if rel_scale is None:
rel_scale = (1,1)
dsscaled = dx_scaled_2D(ds, rel_scale)
if isinstance(ds_gamma, dict):
assert len(ds_gamma) == 2, "Invalid value for ds_gamma"
assert remain(list(ds_gamma.keys()), [1,-1]) == [], \
"Invalid value for ds_gamma"
else:
try:
ds_gamma = {1: ds_gamma, -1: ds_gamma}
except:
raise TypeError("Invalid type for ds_gamma")
try:
xcoord_ix = fp.point.coordnames.index(xname)
except ValueError:
raise ValueError("Invalid x coordinate name '%s'"%xname)
else:
# x coordinate index is either 0 or 1 for this 2D system
# y coordinate index is therefore 1-xcoord_ix
ycoord_ix = 1-xcoord_ix
yname = fp.point.coordnames[ycoord_ix]
if verboselevel>1:
# validate coord names
xn, yn = layer_struct.axes_vars
if xname != xn and yname != yn:
raise ValueError("x and y name mismatch with Plotter")
def test_fn(x, dircode):
if verboselevel>1:
dm.log.msg("Integrate from test point", x=x[xname], y=x[yname], direction=dircode)
gen.set(ics=x)
try:
test = gen.compute('test', dirn=dircode)
except KeyboardInterrupt:
raise
except:
raise RuntimeError("Integration failed")
events = gen.getEvents()
if verboselevel>1:
pts=test.sample(coords=x.coordnames)
# only show first 25 points unless Gamma bd not met
plotter.add_data((pts[xname][:25],pts[yname][:25]), style='b-',
layer=layer_name,
name=dm.get_unique_name('test_traj_first25_'))
if events['Gamma_out_plus'] is None:
if events['Gamma_out_minus'] is None:
if verboselevel>1:
pts = test.sample(coords=x.coordnames)
dm.log.msg("Error", err_msg="Did not reach Gamma surfaces",
status="fail", last_computed_point=pts[-1],
last_computed_time=pts['t'][-1])
plotter.add_data((pts[xname],pts[yname]), style='b-',
layer=layer_name,
name=dm.get_unique_name('test_traj_full'),
log=dm.log)
raise RuntimeError("Did not reach Gamma surfaces")
else:
# hit Gamma_out_minus
if verboselevel>1:
dm.log.msg("Reached Gamma minus", t=events['Gamma_out_minus']['t'][0],
last_computed_point=pts[-1],
last_computed_time=pts['t'][-1])
sgn = -1
else:
if events['Gamma_out_minus'] is None:
# hit Gamma_out_plus
if verboselevel>1:
dm.log.msg("Reached Gamma plus", t=events['Gamma_out_plus']['t'][0],
last_computed_point=pts[-1],
last_computed_time=pts['t'][-1])
sgn = 1
else:
# both were non-None, i.e. both events happened: impossibru!
if verboselevel>1:
pts = test.sample(coords=x.coordnames)
dm.log.msg("Error", err_msg="Both Gamma surfaces reached",
status="fail", last_computed_point=pts[-1],
last_computed_time=pts['t'][-1])
plotter.add_data((pts[xname],pts[yname]), style='b-',
layer=layer_name,
name=dm.get_unique_name('universe_fail'),
log=dm.log)
raise RuntimeError("Both Gamma surfaces reached, impossibly")
return sgn
def onto_manifold(x_ic, dn, normal_dir, dircode='f'):
try:
return bisection(test_fn, x_ic+dn*normal_dir, x_ic-dn*normal_dir,
args=(dircode,), xtol=eps, maxiter=100,
normord=normord)
except AssertionError:
if verboselevel>1:
xp = x_ic+dn*normal_dir
xm = x_ic-dn*normal_dir
dm.log.msg("Error", err_msg="onto_manifold bisection fail",
status="fail", point_p=xp, point_m=xm)
plotter.add_data([xp[xname],xp[yname]], style='gx',
layer=layer_name,
name=dm.get_unique_name('xp'), log=dm.log)
plotter.add_data([xm[xname],xm[yname]], style='gx',
layer=layer_name,
name=dm.get_unique_name('xm'), log=dm.log)
plotter.show()
raise RuntimeError("ds_perp too small? +/- initial displacement did not straddle manifold")
except RuntimeError:
if verboselevel>1:
xp = x_ic+dn*normal_dir
xm = x_ic-dn*normal_dir
dm.log.msg("Error", err_msg="onto_manifold bisection fail",
status="fail", point_p=xp, point_m=xm)
plotter.add_data([xp[xname],xp[yname]], style='gx',
layer=layer_struct.name,
name=dm.get_unique_name('xp'), log=dm.log)
plotter.add_data([xm[xname],xm[yname]], style='gx',
layer=layer_struct.name,
name=dm.get_unique_name('xm'), log=dm.log)
plotter.show()
raise
gen.eventstruct['Gamma_out_plus'].activeFlag=True # terminal
gen.eventstruct['Gamma_out_minus'].activeFlag=True # terminal
assert tmax > 0
manifold = {'s': {1: None, -1: None}, 'u': {1: None, -1: None}}
man_names = {'s': 'stable', 'u': 'unstable'}
for w in which:
# w = 's' => stable branch
# w = 'u' => unstable branch
if verboselevel>0:
print("Starting %s branch" % man_names[w])
if w == 's':
col = 'g'
w_sgn = -1
integ_dircode = 'f'
evec = evec_u
evec_other = evec_s
elif w == 'u':
col = 'r'
w_sgn = 1
integ_dircode = 'b'
evec = evec_s
evec_other = evec_u
# set Gamma_out surfaces on "outgoing" branch
# (polarity is arbitrary)
p0_plus = fp.point + ds_gamma[1]*evec
p0_minus = fp.point - ds_gamma[-1]*evec
evec_perp = get_perp(evec)
gen.eventstruct.setEventDir('Gamma_out_plus', ev_dirn)
gen.eventstruct.setEventDir('Gamma_out_minus', -ev_dirn)
gen.set(pars={'Gamma_out_plus_p_'+xname: p0_plus[xname],
'Gamma_out_plus_p_'+yname: p0_plus[yname],
'Gamma_out_plus_dp_'+xname: evec_perp[xname],
'Gamma_out_plus_dp_'+yname: evec_perp[yname],
'Gamma_out_minus_p_'+xname: p0_minus[xname],
'Gamma_out_minus_p_'+yname: p0_minus[yname],
'Gamma_out_minus_dp_'+xname: evec_perp[xname],
'Gamma_out_minus_dp_'+yname: evec_perp[yname],
## 'fp_'+xname: fp.point[xname], 'fp_'+yname: fp.point[yname]
},
tdata = [0,tmax])
if verboselevel>1:
if fignum is None:
fignum=figure()
else:
figure(fignum)
# plot event surfaces for gamma plus and minus exit events
# ISSUE: Convert to plotter.add_data
plot([p0_plus[xname]-dsscaled*evec_perp[xname],p0_plus[xname]+dsscaled*evec_perp[xname]],
[p0_plus[yname]-dsscaled*evec_perp[yname],p0_plus[yname]+dsscaled*evec_perp[yname]], 'k-', linewidth=2)
plot([p0_minus[xname]-dsscaled*evec_perp[xname],p0_minus[xname]+dsscaled*evec_perp[xname]],
[p0_minus[yname]-dsscaled*evec_perp[yname],p0_minus[yname]+dsscaled*evec_perp[yname]], 'k-', linewidth=2)
draw()
check_other_pts = other_pts is not None
if ic_ds is None:
ic_ds = dsscaled
else:
ic_ds = dx_scaled_2D(ic_ds, rel_scale)
if ic is None:
ic = fp.point
f_ic = -w_sgn * evec_other
dirn_fix = 1 # not used for this case
if verboselevel>0:
# ISSUE: Convert to log entry
print("f_ic from evec_other")
print("evec_other " + str(evec_other))
print("f_ic = " + str(f_ic))
curve_len = 0
# initial estimate x0 = a point close to f.p. along manifold with
# opposite stability
else:
# initial curve length from previous independent variable, if present
# otherwise, assume zero
if isinstance(ic, Pointset):
assert len(ic) == 1, "Only pass a length-1 pointset"
# (guarantee curve_len > 0)
# BUG: for direction=-1 case, arc_len will be negative
# and index 0 will have the smallest arc_len, not the
# largest. Better not to use ic as Pointset option and
# fix arc_len outside of call
curve_len = abs(ic['arc_len'][0])
ic = ic[0]
else:
curve_len = 0
# ensure correct sign relative to starting point (if ic is None)
sgns_orig = sign(-w_sgn * evec_other)
f_ic_alpha = gen.Rhs(0, ic, gen.pars) # array in alpha order
# f_ic here isn't normalized to length 1 like the case above that uses
# evec_other (which is already normalized)
f_ic = Point({xname: f_ic_alpha[xcoord_ix], yname: f_ic_alpha[ycoord_ix]})
sgns_f_ic = sign(f_ic)
if any(sgns_orig != sgns_f_ic):
dirn_fix = -1
f_ic = -f_ic
else:
dirn_fix = 1
if verboselevel>0:
# ISSUE: Convert to log entry
print("f_ic = " + str(f_ic))
for sgn in directions:
piece = {}
if verboselevel>0:
# ISSUE: Convert to log entry
print("Starting direction", sgn)
# PREDICTION
x0_ic = ic+w_sgn*sgn*ic_ds*f_ic/norm(f_ic, normord)
if verboselevel>1:
figure(fignum)
# show starting point (initial estimate) as green circle
# ISSUE: Convert to plotter.add_data
plot(x0_ic[xname], x0_ic[yname], 'go', linewidth=1)
# put x0 initial estimate onto stable manifold
f_alpha = dirn_fix * gen.Rhs(0, x0_ic, gen.pars) # array in alpha order
f = Point({xname: f_alpha[xcoord_ix], yname: f_alpha[ycoord_ix]})
normf = norm(f, normord)
norm_to_flow = get_perp(f/normf)
if verboselevel>1:
# show flow direction from IC as solid red line
plotter.add_data(([x0_ic[xname], x0_ic[xname]+dsscaled*f[xname]/normf],
[x0_ic[yname], x0_ic[yname]+dsscaled*f[yname]/normf]),
style='r-', name=dm.get_unique_name('flow_fwd'), log=dm.log)
# show normal to flow direction from IC as dotted red line
plotter.add_data(([x0_ic[xname], x0_ic[xname]+dsscaled*norm_to_flow[xname]],
[x0_ic[yname], x0_ic[yname]+dsscaled*norm_to_flow[yname]]),
style='r:', name=dm.get_unique_name('flow_perp'), log=dm.log)
ds_perp_default = ds_perp
# CORRECTION
while ds_perp > ds_perp_eps:
try:
x = onto_manifold(x0_ic, ds_perp, norm_to_flow,
dircode=integ_dircode)
except RuntimeError as e:
ds_perp *= ds_perp_fac
else:
break
if ds_perp <= ds_perp_eps:
# RuntimeError was raised and could not continue reducing ds_perp
print("ds_perp reached lower tolerance =", ds_perp_eps)
print(e)
raise RuntimeError("Initial point did not converge")
else:
curve_len += norm(x-ic, normord)
piece[sgn*curve_len] = x
num_pts = 1
last_x = x
if verboselevel>0:
print("Initial point converged to (%.6f, %.6f)\n" % \
(x[xname], x[yname]))
ds_perp = ds_perp_default
last_f = f_ic
# step backwards along local linear flow to predict next starting
# position on manifold
while curve_len < max_arclen and num_pts < max_pts:
if verboselevel>0:
# ISSUE: Convert to plotter.add_data
figure(fignum)
plot(last_x[xname], last_x[yname], col+'.', linewidth=1)
if check_other_pts and sometrue([norm(last_x - pt, normord) < ds \
for pt in other_pts]):
# we've hit a different fixed point (or other feature), so stop
break
f_alpha = dirn_fix * gen.Rhs(0, last_x, gen.pars) # array
f = Point({xname: f_alpha[xcoord_ix], yname: f_alpha[ycoord_ix]})
if all(sign(f) != sign(last_f)):
f = -f
# on other side of manifold so must keep stepping in the
# same direction, therefore switch signs!
# PREDICTION
x_ic = last_x + w_sgn*sgn*dsscaled*f/norm(f,normord)
last_f = f
if verboselevel>1:
print("\nStarting from point ", last_x)
delta = w_sgn*sgn*dsscaled*f/norm(f,normord)
print("Trying point ", x_ic, "in direction (%.6f, %.6f)\n" % (delta[xname], delta[yname]))
ds_perp = ds_perp_default
# CORRECTION
while ds_perp > ds_perp_eps:
try:
x = onto_manifold(x_ic, ds_perp, get_perp(f/norm(f,normord)),
dircode=integ_dircode)
except RuntimeError as e:
ds_perp *= 0.75
else:
break
if ds_perp <= ds_perp_eps:
# RuntimeError was raised and could not continue reducing ds_perp
print("ds_perp reached lower tolerance =", ds_perp_eps)
print(e)
break # end while search
else:
curve_len += norm(x-last_x, normord)
piece[sgn*curve_len] = x
last_x = x
num_pts += 1
if verboselevel>1:
print("\nManifold has %i points" % num_pts)
elif verboselevel>0:
print(".", end=' ')
sys.stdout.flush()
indepvar, piece_sorted = sortedDictLists(piece, byvalue=False)
manifold[w][sgn] = pointsToPointset(piece_sorted, indepvarname='arc_len',
indepvararray=indepvar, norm=normord)
if verboselevel>0:
# finish the line on stdout
print(" ")
gen.eventstruct['Gamma_out_plus'].activeFlag=False
gen.eventstruct['Gamma_out_minus'].activeFlag=False
## gen.eventstruct['fp_closest'].activeFlag=False
return manifold | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exact_saddle(V,X,Y,Z,dim,Z0=None):\n #from all_functions import find_saddle,sum_of_e_field\n if dim==3:\n print \"here\"\n print find_saddle(V,X,Y,Z,3)\n [I,J,K]=find_saddle(V,X,Y,Z,3) # guess saddle point; Z0 not needed\n print I,J,K\n r0=[X[I],Y[J],Z[K]]\n if I<2 or I>V.shape[0]-2: \n print('exact_saddle.py: Saddle point out of bounds in radial direction.')\n return r0\n if J<2 or J>V.shape[1]-2:\n print('exact_saddle.py: Saddle point out of bounds in vertical direction.')\n return r0\n if K<2 or K>V.shape[2]-2:\n print('exact_saddle.py: Saddle point out of bounds in axial direction.')\n return r0\n if V.shape[0]>100:\n Vn = V[I-2:I+3,J-2:J+3,K-2:K+3] # create smaller 5x5x5 grid around the saddle point to speed up optimization\n # note that this does not prevent the optimization function from trying values outside this\n Xn,Yn,Zn=X[I-2:I+3],Y[J-2:J+3],Z[K-2:K+3] # change grid vectors as well\n else:\n Vn,Xn,Yn,Zn = V,X,Y,Z\n #################################### Minimize\n r=spo.minimize(sum_of_e_field,r0,args=(Vn,Xn,Yn,Zn)) \n r=r.x # unpack for desired values\n Xs,Ys,Zs=r[0],r[1],r[2] \n ################################################################################################# \n if dim==2: \n if len(V.shape)==3:\n K=0 # in case there is no saddle\n for i in range(len(Z)):\n if Z[i-1]<Z0 and Z[i]>=Z0:\n K=i-1\n Vs = V.shape\n if K>=Vs[2]: # Matlab had Z, not V; also changed from == to >=\n return('The selected coordinate is at the end of range.')\n v1=V[:,:,K-1] # potential to left\n v2=V[:,:,K] # potential to right (actually right at estimate; K+1 to be actually to right)\n V2=v1+(v2-v1)*(Z0-Z[K-1])/(Z[K]-Z[K-1]) # averaged potential around given coordinate\n [I,J,K0]=find_saddle(V,X,Y,Z,2,Z0) \n r0=X[I],Y[J]\n print 1\n if (I<2 or I>V.shape[0]-2): \n print('exact_saddle.py: Saddle point out of bounds in radial direction.\\n')\n return r0\n if (J<2 or J>V.shape[1]-1):\n print('exact_saddle.py: Saddle point out of bounds in vertical direction.\\n')\n return r0\n if V.shape[0]>100:\n Vn = V[I-2:I+3,J-2:J+3,K-2:K+3] # create smaller 5x5x5 grid around the saddle point to speed up optimization\n # note that this does not prevent the optimization function from trying values outside this\n Xn,Yn,Zn=X[I-2:I+3],Y[J-2:J+3],Z[K-2:K+3] # Matlab 4, not 2\n else:\n Vn,Xn,Yn,Zn = V,X,Y,Z\n ################################## Minimize\n r=spo.minimize(sum_of_e_field_2d,r0,args=(Z0,Vn,Xn,Yn,Zn)) \n r=r.x # unpack for desired values\n Xs,Ys,Zs=r[0],r[1],Z0\n print Xs\n print Ys\n print Zs\n return [Xs,Ys,Zs]",
"def layer_sweep(self):\n for fixed_id, fixed_layer in enumerate(self.layers):\n if fixed_id + 1 == len(self.layers):\n break\n moving_layer = self.layers[fixed_id + 1]\n for node in moving_layer.nodes:\n self.find_neighbors(node)\n if len(node.neighbors) > 0:\n self.calculate_barycenter(node)\n else:\n node.barycenter = 0 #1000\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.barycenter, reverse=False)\n for slot, node in enumerate(sorted_nodes):\n node.slot = slot + 1\n barys = set([n.barycenter for n in sorted_nodes])\n bary_nodes = [list(filter(lambda x: x.barycenter == b, sorted_nodes)) for b in barys]\n for b in bary_nodes:\n if len(b) > 1:\n for node in b:\n if len(node.sl_neighbors) == 1:\n n_slot = node.sl_neighbors[0].slot\n if n_slot > node.slot:\n other_node = max(b, key=lambda s: s.slot)\n elif n_slot < node.slot:\n other_node = min(b, key=lambda s: s.slot)\n temp = node.slot\n node.slot = other_node.slot\n other_node.slot = temp\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.slot, reverse=False)\n moving_layer.nodes = sorted_nodes",
"def cleanOpenBranches(skeleton, skelton_copy, points, radii, length, clean = True, verbose = False):\n \n assert np.isfortran(skeleton);\n assert np.isfortran(skelton_copy);\n \n timer = tmr.Timer();\n timer_all = tmr.Timer();\n \n # find branch and end points\n deg = cpl.convolve_3d_indices(skeleton, t3d.n26, points, out_dtype = 'uint8');\n branchpoints = points[deg >= 3];\n e_pts = points[deg == 1];\n \n if verbose:\n timer.printElapsedTime('Detected %d branch and %d endpoints' % (branchpoints.shape[0], e_pts.shape[0]));\n timer.reset();\n \n #prepare temps\n #skel = skeleton.copy();\n skel_flat = np.reshape(skelton_copy, -1, order = 'A');\n strides = np.array(skelton_copy.strides);\n \n \n if verbose:\n timer.printElapsedTime('Detected %d branch and %d endpoints' % (branchpoints.shape[0], e_pts.shape[0]));\n timer.reset();\n \n label = np.arange(27);\n label = label.reshape([3,3,3]);\n label[1,1,1] = 0;\n \n critical_points = [e_pts];\n delete_points = [];\n \n for l in range(1, length + 1):\n #neighbours of end points\n e_pts_label = cpl.convolve_3d_indices(skelton_copy, label, e_pts);\n \n if verbose:\n timer.printElapsedTime('Done labeling %d / %d' % (l, length));\n timer.reset();\n \n #label zero points are non-critical short isolated branches\n e_pts_zero = e_pts_label == 0;\n #print 'zero length:', np.unravel_index(e_pts[e_pts_zero], skel.shape)\n if e_pts_zero.sum() > 0:\n keep = np.logical_not(e_pts_zero);\n for m in range(l):\n critical_points[m] = critical_points[m][keep];\n e_pts_label = e_pts_label[keep];\n e_pts = e_pts[keep];\n \n if verbose:\n timer.printElapsedTime('Ignored %d small branches' % (keep.sum()));\n timer.reset();\n \n e_pts_new = e_pts + np.sum((np.vstack(np.unravel_index(e_pts_label, label.shape)) - 1).T * strides, axis = 1)\n \n # did we hit a branch point\n delete = np.in1d(e_pts_new, branchpoints); #, assume_unique = True);\n keep = np.logical_not(delete);\n #print delete.shape, keep.shape, e_pts_new.shape\n \n #delete all path that hit a branch point\n if delete.sum() > 0:\n for m in range(l):\n delete_points.append(critical_points[m][delete]);\n #print 'deleting:', np.unravel_index(critical_points[m][delete], skel.shape)\n critical_points[m] = critical_points[m][keep];\n e_pts_new = e_pts_new[keep];\n \n if verbose:\n timer.printElapsedTime('Deleted %d points' % (delete.sum()));\n timer.reset();\n \n if l < length:\n skel_flat[e_pts] = False; # remove endpoints for new neighbour detection\n critical_points.append(e_pts_new);\n e_pts = e_pts_new;\n \n if verbose:\n timer.printElapsedTime('Cleanup iteration %d / %d done.' % (l, length));\n \n #gather all points\n if len(delete_points) > 0:\n delete_points = np.hstack(delete_points);\n delete_points = np.unique(delete_points);\n else:\n delete_points = np.zeros(0);\n \n if verbose:\n timer_all.printElapsedTime('Cleanup');\n \n if clean:\n skel_flat = np.reshape(skeleton, -1, order = 'F');\n skel_flat[delete_points] = False;\n keep_ids = np.logical_not(np.in1d(points, delete_points, assume_unique = True))\n points = points[keep_ids];\n radii = radii[keep_ids];\n return skeleton, points, radii\n \n return delete_points;",
"def solve_step(ball_list, step,borders,obstacle=None):\n ball_list = step1(ball_list, step,borders,obstacle)\n ball_list = step2(ball_list, step)",
"def branch_competetive(state, time, d):\n\n th0 = state[0] \n th1 = state[1:(d[\"alpha1\"]+d[\"alpha1_p\"])]\n th2 = state[(d[\"alpha1\"]+d[\"alpha1_p\"]):]\n \n #print(len(state), len(th1))\n ### get all cytokine secreting cells \n th1_all = np.sum(th1[-d[\"alpha1_p\"]:])\n th2_all = np.sum(th2[-d[\"alpha2_p\"]:])\n ### calculate cytokine concentrations\n cyto_1 = d[\"beta_cyto_1\"]*th1_all + d[\"ifn_ext\"]\n cyto_2 = d[\"beta_cyto_2\"]*th2_all + d[\"il21_ext\"]\n ### calculate cytokine effect on rate\n fb1 = d[\"fb_rate1\"]*cyto_1**3/(cyto_1**3+d[\"K_1\"]**3)\n fb2 = d[\"fb_rate2\"]*cyto_2**3/(cyto_2**3+d[\"K_2\"]**3)\n ### update differantiation rate\n beta1 = d[\"beta1\"]*(1+fb1)\n beta2 = d[\"beta2\"]*(1+fb2)\n \n ### differentiate effectors th1 \n alpha = d[\"alpha1\"]\n p = 1.\n dt_th1 = diff_effector2(th1, th0, alpha, beta1, d[\"beta1_p\"], p, d)\n ### differentiate effectors th2\n alpha = d[\"alpha2\"]\n p = 1.\n dt_th2 = diff_effector2(th2, th0, alpha, beta2, d[\"beta2_p\"], p, d)\n \n ### combine all cells\n dt_th0 = -(beta1+beta2)*th0\n dt_state = np.concatenate(([dt_th0], dt_th1, dt_th2))\n\n return dt_state",
"def __detect_branching_haghverdi16(\n self, Dseg: np.ndarray, tips: np.ndarray\n ) -> np.ndarray:\n # sort distance from first tip point\n # then the sequence of distances Dseg[tips[0]][idcs] increases\n idcs = np.argsort(Dseg[tips[0]])\n # consider now the sequence of distances from the other\n # two tip points, which only increase when being close to `tips[0]`\n # where they become correlated\n # at the point where this happens, we define a branching point\n if True:\n imax = self.kendall_tau_split(\n Dseg[tips[1]][idcs],\n Dseg[tips[2]][idcs],\n )\n if False:\n # if we were in euclidian space, the following should work\n # as well, but here, it doesn't because the scales in Dseg are\n # highly different, one would need to write the following equation\n # in terms of an ordering, such as exploited by the kendall\n # correlation method above\n imax = np.argmin(\n Dseg[tips[0]][idcs] + Dseg[tips[1]][idcs] + Dseg[tips[2]][idcs]\n )\n # init list to store new segments\n ssegs = [] # noqa: F841 # TODO Look into this\n # first new segment: all points until, but excluding the branching point\n # increasing the following slightly from imax is a more conservative choice\n # as the criterion based on normalized distances, which follows below,\n # is less stable\n if imax > 0.95 * len(idcs) and self.allow_kendall_tau_shift:\n # if \"everything\" is correlated (very large value of imax), a more\n # conservative choice amounts to reducing this\n logg.warning(\n 'shifting branching point away from maximal kendall-tau '\n 'correlation (suppress this with `allow_kendall_tau_shift=False`)'\n )\n ibranch = int(0.95 * imax)\n else:\n # otherwise, a more conservative choice is the following\n ibranch = imax + 1\n return idcs[:ibranch]",
"def classify_fixedpoints(fps, scale):\n\n x_directions = []\n scale = scale\n for fp in fps:\n\n trace = np.matrix.trace(fp['jac'])\n det = np.linalg.det(fp['jac'])\n if det > 0 and trace == 0:\n print('center has been found. Watch out for limit cycles')\n elif trace**2 - 4 * det == 0:\n print(\"star nodes has been found.\")\n elif trace**2 - 4 * det < 0:\n print(\"spiral has been found\")\n e_val, e_vecs = np.linalg.eig(fp['jac'])\n ids = np.argwhere(np.real(e_val) > 0)\n countgreaterzero = np.sum(e_val > 0)\n if countgreaterzero == 0:\n print('stable fixed point was found.')\n fp['fp_stability'] = 'stable fixed point'\n elif countgreaterzero > 0:\n print('saddle point was found.')\n fp['fp_stability'] = 'saddle point'\n for id in ids:\n x_plus = fp['x'] + scale * e_val[id] * np.real(e_vecs[:, id].transpose())\n x_minus = fp['x'] - scale * e_val[id] * np.real(e_vecs[:, id].transpose())\n x_direction = np.vstack((x_plus, fp['x'], x_minus))\n x_directions.append(np.real(x_direction))\n\n return fps, x_directions",
"def _computeStikeDip(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n norm_vec = Vector(0, 0, 0)\n north_vec = Vector(0, 0, 0)\n up_vec = Vector(0, 0, 0)\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n P0 = Point(self._toplons[ind],\n self._toplats[ind],\n self._topdeps[ind])\n P1 = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1])\n P2 = Point(self._botlons[ind + 1],\n self._botlats[ind + 1],\n self._botdeps[ind + 1])\n P3 = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind])\n P1up = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1] - 1.0)\n P1N = Point(self._toplons[ind + 1],\n self._toplats[ind + 1] + 0.001,\n self._topdeps[ind + 1])\n P3up = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind] - 1.0)\n P3N = Point(self._botlons[ind],\n self._botlats[ind] + 0.001,\n self._botdeps[ind])\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n p1up = Vector.fromPoint(P1up)\n p1N = Vector.fromPoint(P1N)\n p3up = Vector.fromPoint(P3up)\n p3N = Vector.fromPoint(P3N)\n\n # Sides\n s01 = p1 - p0\n s02 = p2 - p0\n s03 = p3 - p0\n s21 = p1 - p2\n s23 = p3 - p2\n\n # First triangle\n t1norm = (s02.cross(s01)).norm()\n a = s01.mag()\n b = s02.mag()\n c = s21.mag()\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Second triangle\n t2norm = (s03.cross(s02)).norm()\n a = s03.mag()\n b = s23.mag()\n c = s02.mag()\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Up and North\n p1up = (p1up - p1).norm()\n p3up = (p3up - p3).norm()\n p1N = (p1N - p1).norm()\n p3N = (p3N - p3).norm()\n\n # Combine\n norm_vec = norm_vec + A1 * t1norm + A2 * t2norm\n north_vec = north_vec + A1 * p1N + A2 * p3N\n up_vec = up_vec + A1 * p1up + A2 * p3up\n\n norm_vec = norm_vec.norm()\n north_vec = north_vec.norm()\n up_vec = up_vec.norm()\n\n # Do I need to flip the vector because it is pointing down (i.e.,\n # right-hand rule is violated)?\n flip = np.sign(up_vec.dot(norm_vec))\n norm_vec = flip * norm_vec\n\n # Angle between up_vec and norm_vec is dip\n self._dip = np.arcsin(up_vec.cross(norm_vec).mag()) * 180 / np.pi\n\n # Normal vector projected to horizontal plane\n nvph = (norm_vec - up_vec.dot(norm_vec) * up_vec).norm()\n\n # Dip direction is angle between nvph and north; strike is orthogonal.\n cp = nvph.cross(north_vec)\n sign = np.sign(cp.dot(up_vec))\n dp = nvph.dot(north_vec)\n strike = np.arctan2(sign * cp.mag(), dp) * 180 / np.pi - 90\n if strike < -180:\n strike = strike + 360\n self._strike = strike",
"def solve_step(particle_list, step, size):\r\n \r\n # Detect edge-hitting and collision of every particle\r\n for i in range(len(particle_list)):\r\n particle_list[i].compute_refl(step,size)\r\n for j in range(i+1,len(particle_list)):\r\n particle_list[i].compute_coll(particle_list[j],step) \r\n\r\n \r\n # Compute position of every particle \r\n for particle in particle_list:\r\n particle.compute_step(step)",
"def optimize_cobra_model(sbml, bound=INF):\n\n cobra = convert_sbml_to_cobra(sbml, bound)\n\n N, L, U = cobra['S'], list(cobra['lb']), list(cobra['ub'])\n f, b = list(cobra['c']), list(cobra['b'])\n v_sol, f_opt, conv = easy_lp(f, N, b, L, U, one=True)\n\n return v_sol, f_opt",
"def eval_top_down(\n root: Node,\n x: np.ndarray,\n lls: np.ndarray,\n leaf_func: Callable[[Leaf, np.ndarray, Any], np.ndarray],\n sum_func: Callable[[Sum, np.ndarray, Any], np.ndarray],\n leaf_func_kwargs: Optional[dict] = None,\n sum_func_kwargs: Optional[dict] = None,\n inplace: bool = False,\n n_jobs: int = 0\n) -> np.ndarray:\n if leaf_func_kwargs is None:\n leaf_func_kwargs = dict()\n if sum_func_kwargs is None:\n sum_func_kwargs = dict()\n\n # Check the SPN\n check_spn(root, labeled=True, smooth=True, decomposable=True)\n\n # Copy the input array, if not inplace mode\n if not inplace:\n x = np.copy(x)\n\n def eval_backward(n):\n if isinstance(n, Leaf):\n mask = np.ix_(masks[n.id], n.scope)\n x[mask] = leaf_func(n, x[mask], **leaf_func_kwargs)\n elif isinstance(n, Product):\n for c in n.children:\n masks[c.id] |= masks[n.id]\n elif isinstance(n, Sum):\n children_lls = np.stack([lls[c.id] for c in n.children], axis=1)\n branch = sum_func(n, children_lls, **sum_func_kwargs)\n for i, c in enumerate(n.children):\n masks[c.id] |= masks[n.id] & (branch == i)\n else:\n raise NotImplementedError(f\"Top down evaluation not implemented for node of type {n.__class__.__name__}\")\n\n if n_jobs == 0:\n # Compute the topological ordering\n ordering = topological_order(root)\n if ordering is None:\n raise ValueError(\"SPN structure is not a directed acyclic graph (DAG)\")\n n_nodes, n_samples = len(ordering), len(x)\n\n # Build the array consisting of top-down path masks\n masks = np.zeros(shape=(n_nodes, n_samples), dtype=np.bool_)\n masks[root.id] = True\n for node in ordering:\n eval_backward(node)\n else:\n # Compute the layered topological ordering\n layers = topological_order_layered(root)\n if layers is None:\n raise ValueError(\"SPN structure is not a directed acyclic graph (DAG)\")\n n_nodes, n_samples = sum(map(len, layers)), len(x)\n\n # Build the array consisting of top-down path masks\n masks = np.zeros(shape=(n_nodes, n_samples), dtype=np.bool_)\n masks[root.id] = True\n parallel_layerwise_eval(layers, eval_backward, reverse=False, n_jobs=n_jobs)\n\n return x",
"def identify_leaflets_cluster(self,pts,vec,topologize_time_limit=30,max_count_asymmetry=0.05):\n\t\timport scipy\n\t\timport sklearn\n\t\timport sklearn.neighbors\n\t\timport sklearn.cluster\n\t\tnlipids = len(pts)\n\t\t#---time limit on the topologize function which joins broken bilayers e.g. a saddle that crosses PBCs\n\t\ttry:\n\t\t\twith time_limit(topologize_time_limit): \n\t\t\t\twrapper = topologize(pts,vec,\n\t\t\t\t\t**({'tol':self.topologize_tolerance} if self.topologize_tolerance else {}))\n\t\texcept TimeoutException: \n\t\t\tstatus('topologize failed to join the bilayer. '\n\t\t\t\t'if it is broken over PBCs e.g. a saddle, this is a serious error which may go undetected. '\n\t\t\t\t'make sure you always inspect the topology later.',tag='error')\n\t\t\twrapper = np.zeros((len(pts),3))\n\t\tfindframe = pts + wrapper*np.array(vec)\n\t\t#---ensure that all points are in the box\n\t\tfindframe += vec*(findframe<0) - vec*(findframe>vec)\n\t\t#---previous calculation of connectivity was done manually\n\t\tif False:\n\t\t\t#---conservative cutoff gets lots of nearby points\n\t\t\tcutoff = 10.0\n\t\t\tcutoff_short = 2.0\n\t\t\t#---make a K-D tree from the points\n\t\t\ttree = scipy.spatial.ckdtree.cKDTree(findframe,boxsize=np.concatenate((vec,vec))+0.*eps)\n\t\t\t#---find the nearest reference points for each instantaneous point\n\t\t\tclose,nns = tree.query(findframe,distance_upper_bound=cutoff,k=20)\n\t\t\t#---construct the neighbor list\n\t\t\tsubjects = np.where(np.all((close<cutoff,close>0),axis=0))\n\t\t\t#---get the pairs of neighbors\n\t\t\tsubjects,neighbors = subjects[0],nns[subjects]\n\t\t\tpds = np.ones((nlipids,nlipids))*0.0\n\t\t\tpds[tuple((np.arange(nlipids),np.arange(nlipids)))] = 0.0\n\t\t\tnears = np.where(np.all((close>0,close<=cutoff_short),axis=0))\n\t\t\tpds[tuple((nears[0],nns[nears]))] = 1.0#close[nears]\n\t\t\tpds[tuple((nns[nears],nears[0]))] = 1.0#close[nears]\n\t\tconnectivity = sklearn.neighbors.kneighbors_graph(findframe,\n\t\t\tn_neighbors=self.cluster_neighbors,include_self=False)\n\t\tward = sklearn.cluster.AgglomerativeClustering(n_clusters=2,\n\t\t\tconnectivity=connectivity,linkage='complete').fit(findframe)\n\t\timono = ward.labels_\n\t\tif np.mean(imono)==0.5: \n\t\t\tstatus('[STATUS] perfect split is %0.5f'%np.mean(imono))\n\t\telif (np.all(np.array(imono)==0) or np.all(np.array(imono)==1) or \n\t\t\tnp.abs(np.mean(imono)-0.5)>=max_count_asymmetry):\n\t\t\tstatus('[STATUS] split is %0.5f'%np.mean(imono))\n\t\t\tstatus('[STATUS] one side has %d'%np.sum(imono))\n\t\t\tstatus('[WARNING] leaflets were not distinguished')\n\t\t\traise Exception('[ERROR] failed to identify leaflets. '\n\t\t\t\t'DEVELOPMENT NOTE!? use legacy or a different cutoff?')\n\t\telse: status('[STATUS] some lipids might be flipped %d %.5f'%(np.sum(imono),np.mean(imono)))\n\t\treturn np.array(imono)",
"def findStableState(L, boundaryConditions, Minv = None):\n\tn = L.shape[0]\n\tm = len(boundaryConditions)\n\tVb = np.zeros(m)\n\tpositions = {}\n\tfor i in range(m):\n\t\tcondition = boundaryConditions[i]\n\t\tVb[i] = condition[0]\n\t\tpositions[condition[0]] = condition[1]\n\tVb = np.sort(Vb)\n\tBPrime = np.zeros((m, n))\n\tYPrime = np.zeros((m, 3))\n\tfor i in range(m):\n\t\tBPrime[i][int(Vb[i])] = 1\n\t\tYPrime[i] = positions[Vb[i]]\n\n\tif Minv is None:\n\t\tzeroCorner = np.zeros((m, m))\n\t\tM = np.array(np.bmat([[L, -BPrime.T], [BPrime, zeroCorner]]))\n\t\tMinv = np.linalg.inv(M)\n\n\tXT = np.zeros((3, n))\n\t# find x coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[0]\n\tx = np.dot(Minv, y)\n\tXT[0] = x[:n]\n\t# find y coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[1]\n\tx = np.dot(Minv, y)\n\tXT[1] = x[:n]\n\t# find z coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[2]\n\tx = np.dot(Minv, y)\n\tXT[2] = x[:n]\n\n\treturn XT.T",
"def extract_1d_boundaries(xy, NL, KL, BL, PVx, PVy, check=False):\n if PVx is None and PVy is None:\n raise RuntimeError('Not designed to allow openBC networks.')\n # PVx = np.zeros_like(KL, dtype=float)\n # PVy = np.zeros_like(KL, dtype=float)\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles, xy, NL, KL, BL, backtrans = remove_dangling_points(xy, NL, KL, BL, check=check)\n # If no dangling bonds, no need to translate indices at the end\n translate_at_end = len(dangles) > 0\n\n # Initialize the list of boundary indices to be larger than necessary\n boundaries = []\n for boundaryloc in ['top', 'bottom']:\n # Initialize the boundary list to be as long as possible (will truncate later)\n bb = np.zeros(2 * len(xy), dtype=int)\n if boundaryloc == 'top':\n # Start with the topmost point, which is guaranteed to be\n # at the convex hull and thus also at the top outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.max(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n else:\n # Start with the bottom most point, which is guaranteed to be\n # at the convex hull and thus also at the bottom outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.min(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_1d_boundaries(): Found extremal pt: ', rightIND\n print 'le.extract_1d_boundaries(): with neighbors: ', NL[rightIND]\n print 'le.extract_1d_boundaries(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.01)\n\n # Grab the true neighbors of this starting point\n # print 'le.extract_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n connect = np.argwhere(np.abs(KL[rightIND]).ravel()).ravel()\n neighbors = NL[rightIND, connect]\n if check:\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): rightIND = ', rightIND\n\n # Compute the angles of the neighbor bonds\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1] + PVy[rightIND, connect],\n xy[neighbors, 0] - xy[rightIND, 0] + PVx[rightIND, connect]).ravel(),\n 2 * np.pi)\n if check:\n print 'le.extract_1d_boundaries(): KL[rightIND] = ', KL[rightIND]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] = ', KL[rightIND, 0]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles = ', angles\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[rightIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[rightIND, connect[angles == min(angles)][0]]\n\n # Take the second particle to be the one with the lowest bond angle (will be >= pi/2)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n\n dmyi = 1\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n connect = np.argwhere(np.abs(KL[nextIND]).ravel())\n n_tmp = NL[nextIND, connect]\n\n # Get position in row of NL where NL == bb[dmyi - 1] (the previous boundary particle/site)\n # and where the PVx and PVy are opposite of the last used PVx and PVy values (to make sure we\n # are looking backwards along the boundary). We will use this to get the 'backward angle' -- the\n # angle of the previous bond in the boundary\n # Note that bb[dmyi - 1] may have been index 0, so there could be multiple matches\n nlpos = np.where(np.logical_and(NL[nextIND] == bb[dmyi - 1],\n np.abs(KL[nextIND]).ravel().astype(bool)))[0]\n if len(nlpos) > 1:\n # There is more than one connection to the previous particle. Check for where PVx and PVy\n # values are opposite the previously used values.\n ind_nlpos = np.where(np.logical_and(PVx[nextIND, nlpos] == -pvx_prev,\n PVy[nextIND, nlpos] == -pvy_prev))[0]\n print 'ind_nlpos = ', ind_nlpos\n nlpos = nlpos[ind_nlpos]\n\n # Exclude previous boundary particle (the copy of that particle in the nlpos position)\n # from the neighbors array, UNLESS IT IS THE ONLY ONE,\n # since its angle with itself is zero!\n\n # Used to remove previous particle, but this assumes that boundary is more than 2\n # particles long, which might not be true for periodic_strip bcs\n if len(n_tmp) == 1:\n print 'le: The bond is a lone bond, not part of a triangle.'\n neighbors = n_tmp\n else:\n print 'n_tmp = ', n_tmp\n neighbors = np.delete(n_tmp, nlpos)\n connect = np.delete(connect, nlpos)\n print 'n_tmp = ', n_tmp\n print 'neighbors = ', neighbors\n\n # print 'le: nlpos = ', nlpos\n forward_angles = np.arctan2(xy[neighbors, 1] - xy[nextIND, 1] + PVy[nextIND, connect],\n xy[neighbors, 0] - xy[nextIND, 0] + PVx[nextIND, connect]).ravel()\n backward_angle = np.arctan2(xy[bb[dmyi - 1], 1] - xy[nextIND, 1] + PVy[nextIND, nlpos],\n xy[bb[dmyi - 1], 0] - xy[nextIND, 0] + PVx[nextIND, nlpos]).ravel()\n if check:\n print 'le: connect = ', connect\n print 'le: forward_angles = ', forward_angles\n print 'le: backward_angle = ', backward_angle\n\n angles = np.mod(forward_angles - backward_angle, 2 * np.pi)\n if check:\n print 'le: angles = ', angles\n print 'le: angles==min--> ', angles == min(angles)\n print 'le: neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles==min--> ', angles == min(angles)\n print 'le.extract_1d_boundaries(): neighbors[angles == min(angles)] --> ', neighbors[angles == min(angles)]\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[nextIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[nextIND, connect[angles == min(angles)][0]]\n # Redefine nextIND to be the new boundary index\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n # plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.pause(0.01)\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n # print 'boundary = ', boundary\n # print 'translation = ', translation\n # print 'backtrans = ', backtrans\n boundary = backtrans[boundary]\n\n boundaries.append(boundary)\n\n return tuple(boundaries)",
"def see_what_its_doing_2d_comparison(length_scale, true_values=False):\n\n a = create_points_with_spatially_correlated_pollution_2d(10, 100, 10, length_scale, 1)\n b = pick_uniform_random_points_on_map_of_maps(a, 20)\n c1 = interpolate_unknown_points_of_a_map_of_maps_of_points(b, a, RBF(length_scale), fixed=True)\n c2 = interpolate_unknown_points_of_a_map_of_maps_of_points(b, a, RBF(np.random.randint(1, 10000)), fixed=False)\n\n x1 = []\n y1 = []\n z1 = []\n for point in b[0].values():\n x1.append(point.get_x_cord())\n y1.append(point.get_y_cord())\n z1.append(point.get_pollution_value())\n\n x2_fixed = []\n y2_fixed = []\n z2_fixed = []\n\n for label, point in c1[0][0].items():\n if not label in b[0].keys():\n x2_fixed.append(point.get_x_cord())\n y2_fixed.append(point.get_y_cord())\n z2_fixed.append(point.get_pollution_value())\n\n x2_not_fixed = []\n y2_not_fixed = []\n z2_not_fixed = []\n\n for label, point in c2[0][0].items():\n if not label in b[0].keys():\n x2_not_fixed.append(point.get_x_cord())\n y2_not_fixed.append(point.get_y_cord())\n z2_not_fixed.append(point.get_pollution_value())\n\n if true_values:\n x3_true_values = []\n y3_true_values = []\n z3_true_values = []\n\n for label, point in a[0].items():\n if not label in b[0].keys():\n x3_true_values.append(point.get_x_cord())\n y3_true_values.append(point.get_y_cord())\n z3_true_values.append(point.get_actual_pollution_value())\n\n plot_numbers_3d_and_save(x3_true_values, y3_true_values, z3_true_values, x2_fixed, y2_fixed, z2_fixed,\n \"True Value Comparison Fixed Graph.gif\")\n plot_numbers_3d_and_save(x3_true_values, y3_true_values, z3_true_values, x2_not_fixed, y2_not_fixed,\n z2_not_fixed, \"True value Not Fixed Graph.gif\")\n\n plot_numbers_3d_and_save(x1, y1, z1, x2_fixed, y2_fixed, z2_fixed, \"Fixed Rotating Graph.gif\")\n plot_numbers_3d_and_save(x1, y1, z1, x2_not_fixed, y2_not_fixed, z2_not_fixed, \"Not Fixed Rotating Graph.gif\")",
"def _detect_branching(\n self,\n Dseg: np.ndarray,\n tips: np.ndarray,\n seg_reference=None,\n ) -> Tuple[\n List[np.ndarray],\n List[np.ndarray],\n List[List[int]],\n List[List[int]],\n int,\n ]:\n if self.flavor == 'haghverdi16':\n ssegs = self._detect_branching_single_haghverdi16(Dseg, tips)\n elif self.flavor == 'wolf17_tri':\n ssegs = self._detect_branching_single_wolf17_tri(Dseg, tips)\n elif self.flavor == 'wolf17_bi' or self.flavor == 'wolf17_bi_un':\n ssegs = self._detect_branching_single_wolf17_bi(Dseg, tips)\n else:\n raise ValueError(\n '`flavor` needs to be in {\"haghverdi16\", \"wolf17_tri\", \"wolf17_bi\"}.'\n )\n # make sure that each data point has a unique association with a segment\n masks = np.zeros((len(ssegs), Dseg.shape[0]), dtype=bool)\n for iseg, seg in enumerate(ssegs):\n masks[iseg][seg] = True\n nonunique = np.sum(masks, axis=0) > 1\n ssegs = []\n for iseg, mask in enumerate(masks):\n mask[nonunique] = False\n ssegs.append(np.arange(Dseg.shape[0], dtype=int)[mask])\n # compute new tips within new segments\n ssegs_tips = []\n for inewseg, newseg in enumerate(ssegs):\n if len(np.flatnonzero(newseg)) <= 1:\n logg.warning(f'detected group with only {np.flatnonzero(newseg)} cells')\n secondtip = newseg[np.argmax(Dseg[tips[inewseg]][newseg])]\n ssegs_tips.append([tips[inewseg], secondtip])\n undecided_cells = np.arange(Dseg.shape[0], dtype=int)[nonunique]\n if len(undecided_cells) > 0:\n ssegs.append(undecided_cells)\n # establish the connecting points with the other segments\n ssegs_connects = [[], [], [], []]\n for inewseg, newseg_tips in enumerate(ssegs_tips):\n reference_point = newseg_tips[0]\n # closest cell to the new segment within undecided cells\n closest_cell = undecided_cells[\n np.argmin(Dseg[reference_point][undecided_cells])\n ]\n ssegs_connects[inewseg].append(closest_cell)\n # closest cell to the undecided cells within new segment\n closest_cell = ssegs[inewseg][\n np.argmin(Dseg[closest_cell][ssegs[inewseg]])\n ]\n ssegs_connects[-1].append(closest_cell)\n # also compute tips for the undecided cells\n tip_0 = undecided_cells[\n np.argmax(Dseg[undecided_cells[0]][undecided_cells])\n ]\n tip_1 = undecided_cells[np.argmax(Dseg[tip_0][undecided_cells])]\n ssegs_tips.append([tip_0, tip_1])\n ssegs_adjacency = [[3], [3], [3], [0, 1, 2]]\n trunk = 3\n elif len(ssegs) == 3:\n reference_point = np.zeros(3, dtype=int)\n reference_point[0] = ssegs_tips[0][0]\n reference_point[1] = ssegs_tips[1][0]\n reference_point[2] = ssegs_tips[2][0]\n closest_points = np.zeros((3, 3), dtype=int)\n # this is another strategy than for the undecided_cells\n # here it's possible to use the more symmetric procedure\n # shouldn't make much of a difference\n closest_points[0, 1] = ssegs[1][\n np.argmin(Dseg[reference_point[0]][ssegs[1]])\n ]\n closest_points[1, 0] = ssegs[0][\n np.argmin(Dseg[reference_point[1]][ssegs[0]])\n ]\n closest_points[0, 2] = ssegs[2][\n np.argmin(Dseg[reference_point[0]][ssegs[2]])\n ]\n closest_points[2, 0] = ssegs[0][\n np.argmin(Dseg[reference_point[2]][ssegs[0]])\n ]\n closest_points[1, 2] = ssegs[2][\n np.argmin(Dseg[reference_point[1]][ssegs[2]])\n ]\n closest_points[2, 1] = ssegs[1][\n np.argmin(Dseg[reference_point[2]][ssegs[1]])\n ]\n added_dist = np.zeros(3)\n added_dist[0] = (\n Dseg[closest_points[1, 0], closest_points[0, 1]]\n + Dseg[closest_points[2, 0], closest_points[0, 2]]\n )\n added_dist[1] = (\n Dseg[closest_points[0, 1], closest_points[1, 0]]\n + Dseg[closest_points[2, 1], closest_points[1, 2]]\n )\n added_dist[2] = (\n Dseg[closest_points[1, 2], closest_points[2, 1]]\n + Dseg[closest_points[0, 2], closest_points[2, 0]]\n )\n trunk = np.argmin(added_dist)\n ssegs_adjacency = [\n [trunk] if i != trunk else [j for j in range(3) if j != trunk]\n for i in range(3)\n ]\n ssegs_connects = [\n [closest_points[i, trunk]]\n if i != trunk\n else [closest_points[trunk, j] for j in range(3) if j != trunk]\n for i in range(3)\n ]\n else:\n trunk = 0\n ssegs_adjacency = [[1], [0]]\n reference_point_in_0 = ssegs_tips[0][0]\n closest_point_in_1 = ssegs[1][\n np.argmin(Dseg[reference_point_in_0][ssegs[1]])\n ]\n reference_point_in_1 = closest_point_in_1 # ssegs_tips[1][0]\n closest_point_in_0 = ssegs[0][\n np.argmin(Dseg[reference_point_in_1][ssegs[0]])\n ]\n ssegs_connects = [[closest_point_in_1], [closest_point_in_0]]\n return ssegs, ssegs_tips, ssegs_adjacency, ssegs_connects, trunk",
"def computeB(linsys_setup):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nFreq = len(g_nu); nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz = True\n \n def computeCMBY(d0):\n \"\"\"\n For CMB, y = S^1/2 A N^-1 d, where S is CMB signal covariance matrix (Cl's)\n \"\"\"\n # N.B. Reshaping operations required to go between 2D pixel arrays and \n # 1D vector (for linear system)\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny,nx))\n d1 *= ninvs[freq]\n a_l = fft.fft(d1,axes=[-2,-1])\n a_l *= beams[freq]*precond_2d\n d1 = numpy.real(fft.ifft(a_l,axes=[-2,-1],normalize=True))\n d1 = numpy.reshape(d1,(nx*ny))\n d2 += d1\n return d2\n \n def computeClusterY(d0):\n \"\"\"\n For cluster, y = F^T A^T N^-1 d, where F is TSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[0][ic][freq] * g_nu[freq])\n return d2\n \n def computeClusterKSZY(d0):\n \"\"\"\n For cluster, y = K^T A^T N^-1 d, where K is KSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[1][ic][freq])\n return d2\n \n def computeMonopoleY(d0):\n \"\"\"\n Overall monopole amplitude.\n \"\"\"\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2 += numpy.sum(d1 * ninvs[freq])\n return(d2)\n \n \n # CMB realisation; convolve white noise map with beam and multiply by \n # signal covmat S^1/2 in harmonic space\n b0 = numpy.random.randn(ny,nx)\n a_l = numpy.fft.fft2(b0, b0.shape)\n a_l *= precond_2d * power_2d**(-0.5)\n b0 = numpy.fft.irfft2(a_l, b0.shape)\n \n # Calculate per-band noise realisation.\n # Multiply by pixel-space N^1/2, convolve with beam, and sum over \n # cluster pixels to get RHS\n b1 = 0; b4 = 0\n b2 = numpy.zeros(nCluster)\n if ksz: b3 = numpy.zeros(nCluster)\n \n for freq in range(nFreq):\n _b = numpy.random.randn(ny,nx) * ninvs[freq]**0.5\n a_l = numpy.fft.fft2(_b) * beams[freq] * precond_2d\n b1 += numpy.fft.irfft2(a_l, _b.shape)\n b4 += numpy.sum(_b)\n for ic in range(nCluster):\n b2[ic] += numpy.sum( _b * g_nu[freq] * clumaps[0][ic][freq] )\n if ksz: b3[ic] += numpy.sum( _b * clumaps[1][ic][freq] )\n\n b0 = numpy.reshape(b0,(nx*ny))\n b1 = numpy.reshape(b1,(nx*ny))\n \n\n # Compute CMB and cluster data parts of b\n b_CMB = computeCMBY(datamaps) + b0 + b1\n b_mono = computeMonopoleY(datamaps) + b4\n b_tsz = computeClusterY(datamaps) + b2\n if ksz: b_ksz = computeClusterKSZY(datamaps) + b3\n \n # Return total b vector (Ncmbpix + 1 + (1|2)*Ncluster elements in vector)\n b = numpy.append(b_CMB, b_mono)\n b = numpy.append(b, b_tsz)\n if ksz: b = numpy.append(b, b_ksz)\n return b",
"def test_solvers_bc():\n tol = 3E-12 # Appropriate tolerance for these tests (P2, 20x20 mesh)\n import sympy as sym\n x, y = sym.symbols('x[0], x[1]')\n u = 1 + x**2 + 2*y**2\n f = -sym.diff(u, x, 2) - sym.diff(u, y, 2)\n f = sym.simplify(f)\n u_00 = u.subs(x, 0) # x=0 boundary\n u_01 = u.subs(x, 1) # x=1 boundary\n g = -sym.diff(u, y).subs(y, 1) # x=1 boundary\n r = 1000 # arbitrary function can go here\n s = u\n\n # Turn to C/C++ code for UFL expressions\n f = sym.printing.ccode(f)\n u_00 = sym.printing.ccode(u_00)\n u_01 = sym.printing.ccode(u_01)\n g = sym.printing.ccode(g)\n r = sym.printing.ccode(r)\n s = sym.printing.ccode(s)\n print('Test problem (C/C++):\\nu = %s\\nf = %s' % (u, f))\n print('u_00: %s\\nu_01: %s\\ng = %s\\nr = %s\\ns = %s' %\n (u_00, u_01, g, r, s))\n\n # Turn into FEniCS objects\n u_00 = Expression(u_00)\n u_01 = Expression(u_01)\n f = Expression(f)\n g = Expression(g)\n r = Expression(r)\n s = Expression(s)\n u_exact = Expression(sym.printing.ccode(u))\n\n # Define boundary conditions\n boundary_conditions = {0: {'Dirichlet': u_00},\n 1: {'Dirichlet': u_01},\n 2: {'Robin': (r, s)},\n 3: {'Neumann': g}}\n\n for Nx, Ny in [(3,3), (3,5), (5,3), (20,20)]:\n for degree in 1, 2, 3:\n for linear_solver in ['direct']:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n kappa = Constant(1)\n u, kappa = solver_bc(\n kappa, f, boundary_conditions, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol,\n rel_tol=0.1*tol)\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_e_Function = interpolate(u_exact, V) # exact solution\n # Check that dof arrays are equal\n u_e_array = u_e_Function.vector().array() # dof values\n max_error = (u_e_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol, msg",
"def identify_lipid_leaflets_legacy(pts,vec,monolayer_cutoff,\n\tmonolayer_cutoff_retry=True,max_count_asymmetry=0.05,pbc_rewrap=True,\n\ttopologize_tolerance=None,topologize_time_limit=30):\n\t#---previous default was somewhat high, but typically came in from specs, and we reduced it incrementally\n\tif monolayer_cutoff==None: monolayer_cutoff = 2.0\n\t#---time limit on the tolerance checker\n\ttry:\n\t\twith time_limit(topologize_time_limit): \n\t\t\twrapper = topologize(pts,vec,\n\t\t\t\t**({'tol':topologize_tolerance} if topologize_tolerance else {}))\n\texcept TimeoutException: \n\t\tstatus('topologize failed to join the bilayer. '\n\t\t\t'if it is broken over PBCs e.g. a saddle, this is a serious error which may go undetected. '\n\t\t\t'make sure you always inspect the topology later.',tag='error')\n\t\twrapper = np.zeros((len(pts),3))\n\tfindframe = pts + wrapper*np.array(vec)\n\tstatus('this step is somewhat slow. it uses scipy.spatial.pdist.',tag='warning')\n\tpd = [scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(findframe[:,d:d+1])) \n\t\tfor d in range(3)]\n\tif pbc_rewrap:\n\t\tpd3pbc = np.sqrt(np.sum(np.array([pd[d]-(pd[d]>vec[d]/2.)*vec[d]+(pd[d]<-1*vec[d]/2.)*vec[d] \n\t\t\tfor d in range(3)])**2,axis=0))\n\telse: pd3pbc = pd\n\tnbors = np.transpose(np.where(pd3pbc<monolayer_cutoff))\n\tnlipids = len(pts)\n\timono = np.zeros(nlipids)\n\tnlist = []\n\tfor i in range(nlipids):\n\t\tstatus('cataloging lipids',i=i,looplen=nlipids,tag='compute')\n\t\tnlist.append(nbors[np.where(nbors[:,0]==i)[0],1])\n\tiref = 0\n\tmono = np.zeros(nlipids)\n\tsearched = np.zeros(nlipids)\n\timono[iref],searched[iref] = 1,1\n\timono[nlist[iref]] = 1\n\twhile np.any(np.all((imono==1,searched==0),axis=0)):\n\t\tfor iref in np.where(np.all((imono==1,searched==0),axis=0))[0]: \n\t\t\timono[nlist[iref]] = 1\n\t\t\tsearched[iref] = 1\n\t#---check that the leaflets were properly distinguished by looking at the number in each monolayer\n\tif np.mean(imono)==0.5: \n\t\tstatus('[STATUS] perfect split is %0.5f'%np.mean(imono))\n\t\treturn imono\n\telif (monolayer_cutoff_retry and (np.all(np.array(imono)==0) or np.all(np.array(imono)==1) or \n\t\tnp.abs(np.mean(imono)-0.5)>=max_count_asymmetry)):\n\t\tstatus('[STATUS] split is %0.5f'%np.mean(imono))\n\t\tstatus('[STATUS] one side has %d'%np.sum(imono))\n\t\tstatus('[WARNING] leaflets were not distinguished')\n\t\tstatus('[COMPUTE] leaflets = '+str(np.sum(imono))+'/'+str(len(imono)))\n\t\tstatus('[WARNING] previous monolayer_cutoff = '+str(monolayer_cutoff))\n\t\traise Exception(\n\t\t\t'[ERROR] failed to identify leaflets so we are returning an exception to the LeafletFinder')\n\telse: status('[STATUS] some lipids might be flipped %d %.5f'%(np.sum(imono),np.mean(imono)))\n\treturn imono",
"def find_saddle(V,X,Y,Z,dim,Z0=None):\n debug=False # internal code only; typically False\n from project_parameters import scale\n if (dim==2 and Z0==None):\n return 'z0 needed for evaluation'\n if dim==3:\n if len(V.shape)!=3:\n return('Problem with find_saddle.m dimensionalities.')\n f=V/float(np.amax(V)) # Normalize field\n [Ex,Ey,Ez]=np.gradient(f,abs(X[1]-X[0])/scale,abs(Y[1]-Y[0])/scale,abs(Z[1]-Z[0])/scale) # grid spacing is automatically consistent thanks to BEM-solver\n E=np.sqrt(Ex**2+Ey**2+Ez**2) # magnitude of gradient (E field)\n m=E[1,1,1]\n origin=[1,1,1]\n for i in range(E.shape[0]):\n for j in range(E.shape[1]):\n for k in range(E.shape[2]):\n if E[i,j,k]<m:\n m=E[i,j,k]\n origin=[i,j,k] \n if debug:\n print('DEBUGGING...')\n fig=plt.figure()\n e=np.reshape(E,(1,E.shape[0]*E.shape[1]*E.shape[2]))\n ind,e=np.argsort(e),np.sort(e)\n e=e[0]\n ind=ind[0] #Sort V by the same indexing.\n v=np.reshape(V,(1,V.shape[0]*V.shape[1]*V.shape[2]))\n v=v[0]\n plt.plot(e/float(np.amax(e)))\n def index_sort(v,e):\n \"\"\"Takes in two lists of the same length and returns the first sorted by the indexing of i sorted.\"\"\"\n es=np.sort(e)\n ix=np.argsort(e)\n vs=np.ones(len(v)) #Sorted by the sorting defined by f being sorted. \n # If v==e, this returns es.\n for i in range(len(v)):\n j=ix[i]\n vs[i]=v[j]\n return vs\n v=index_sort(v,e) # Is it supposed to look like this?\n plt.plot(v/float(np.amax(v)))\n plt.title('Debugging: blue is sorted gradient, green is potential sorted by gradient')\n plt.show() #f is blue and smooth, v is green and fuzzy.\n if origin[0]==(1 or V.shape[0]):\n print('find_saddle: Saddle out of bounds in x (i) direction.\\n')\n return origin\n if origin[0]==(1 or V.shape[1]):\n print('find_saddle: Saddle out of bounds in y (j) direction.\\n')\n return origin\n if origin[0]==(1 or V.shape[2]): \n print('find_saddle: Saddle out of bounds in z (k) direction.\\n')\n return origin\n #################################################################################################\n if dim==2: # Extrapolate to the values of A at z0.\n V2=V\n if len(V.shape)==3:\n Ks=0 # in case there is no saddle point\n for i in range(len(Z)):\n if Z[i-1]<Z0 and Z[i]>=Z0:\n Ks=i-1\n if Z0<1:\n Ks+=1\n Vs=V.shape\n if Ks>=Vs[2]: # Matlab had Z, not V; also changed from == to >=\n return('The selected coordinate is at the end of range.')\n v1=V[:,:,Ks] \n v2=V[:,:,Ks+1]\n V2=v1+(v2-v1)*(Z0-Z[Ks])/(Z[Ks+1]-Z[Ks])\n V2s=V2.shape\n if len(V2s)!=2: # Old: What is this supposed to check? Matlab code: (size(size(A2),2) ~= 2)\n return('Problem with find_saddle.py dimensionalities. It is {}.'.format(V2s))\n f=V2/float(np.max(abs(V2)))\n [Ex,Ey]=np.gradient(f,abs(X[1]-X[0]),abs(Y[1]-Y[0]))\n E=np.sqrt(Ex**2+Ey**2)\n m=float(np.min(E))\n if m>1e-4: # This requires a grid with step size 0.01, not just 0.1.\n if debug:\n Is,Js=np.NaN,np.NaN\n print('Warning, there seems to be no saddle point.')\n mr=E[0,0]\n Is,Js=1,1 # in case there is no saddle\n for i in range(E.shape[0]):\n for j in range(E.shape[1]):\n if E[i,j]<mr:\n mr=E[i,j]\n Is,Js=i,j\n origin=[Is,Js,Ks]\n if Is==1 or Is==V.shape[0]:\n print('find_saddle: Saddle out of bounds in x (i) direction.\\n')\n return origin\n if Js==1 or Js==V.shape[1]:\n print('find_saddle: Saddle out of bounds in y (j) direction.\\n')\n return origin\n return origin",
"def inter_op_dp_inner_loop(\n n_layers, n_devices, submesh_sizes, valid_idxs_costs, max_n_succ_stages\n):\n F = np.full((n_layers + 1, n_layers + 1, n_devices + 1), np.inf, dtype=np.float32)\n F_stage_max = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1), 0.0, dtype=np.float32\n )\n F_argmin = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1, 3), -1, dtype=np.int32\n )\n F[0, n_layers, 0] = 0\n\n for d in range(1, n_devices + 1):\n for (\n l,\n i,\n submesh_shape_idx,\n sharding_config_idx,\n stage_cost,\n ) in valid_idxs_costs:\n l, i, submesh_shape_idx, sharding_config_idx = map(\n int, (l, i, submesh_shape_idx, sharding_config_idx)\n )\n\n n_submesh_devices = submesh_sizes[submesh_shape_idx]\n if n_submesh_devices <= d:\n for s in range(1, n_layers + 1):\n if (\n s - 1\n > max_n_succ_stages[\n l, i, submesh_shape_idx, sharding_config_idx\n ]\n ):\n continue\n\n new_cost = F[s - 1, i + 1, d - n_submesh_devices] + stage_cost\n if new_cost < F[s, l, d]:\n F[s, l, d] = new_cost\n F_argmin[s, l, d] = (\n i + 1,\n submesh_shape_idx,\n sharding_config_idx,\n )\n F_stage_max[s, l, d] = max(\n F_stage_max[s - 1, i + 1, d - n_submesh_devices], stage_cost\n )\n\n return F, F_stage_max, F_argmin",
"def test_solve_nestedcs_bce():\n import numpy as np\n from crpm.setup_nestedcs import setup_nestedcs\n from crpm.fwdprop import fwdprop\n from crpm.lossfunctions import loss\n from crpm.gradientdecent import gradientdecent\n\n #init numpy seed\n np.random.seed(40017)\n\n #setup model\n model, data = setup_nestedcs()\n\n #calculate initial binary cross entropy error\n pred, _ = fwdprop(data[0:2,], model)\n icost, _ = loss(\"bce\", pred, data[-1,])\n\n #train model\n pred, cost, _ = gradientdecent(model, data[0:2,], data[-1,], \"bce\")\n\n #print(model)\n #print(icost)\n #print(cost)\n assert icost > cost\n assert cost < .29",
"def saddle_point(I):\n #--- FILL ME IN ---\n\n m, n = I.shape\n\n #compute the inputs to the function lstsq\n\n #get sci\n sci = I.reshape(m*n, 1)\n #get A\n A = []\n for y in range(n):\n for x in range(m):\n #print((x,y))\n #print([x*x, x*y, y*y, x, y, 1])\n A.append([x*x, x*y, y*y, x, y, 1])\n\n A = np.array(A)\n \n parms = np.linalg.lstsq(A,sci)[0]\n #print(parms)\n r1 = np.array([[2*parms[0][0], parms[1][0]], \n [parms[1][0], 2*parms[2][0]]])\n r1 = np.linalg.inv(r1)\n r2 = np.array([[parms[3][0]], \n [parms[4][0]]])\n\n pt = np.negative(np.matmul(r1, r2))\n\n #------------------\n\n return pt",
"def between_vec(df, switch): \n gauss1_idx, gauss2_idx = gauss_idx_func(CC_scaled)\n nga_dict = {key:list() for key in NGAs}\n \n slopes = [] \n \n def slope(a, b):\n \"\"\" find slope given two points \"\"\"\n a1, a2 = PC_matrix[:, 0][a], PC_matrix[:, 1][a]\n b1, b2 = PC_matrix[:, 0][b], PC_matrix[:, 1][b]\n \n return b1-a1, b2-a2\n \n # compute flow vector for each nga \n for nga in NGAs:\n nga_idx = df.index[df['NGA'] == nga].tolist()\n\n gauss1 = [i for i in nga_idx if i in gauss1_idx]\n gauss2 = [j for j in nga_idx if j in gauss2_idx]\n\n # use the last point in the first cluster and the first point in the second cluster\n if switch == 1: \n \n try:\n a, b = gauss1[-1], gauss2[0]\n x, y = slope(a, b)\n slopes.append((x, y))\n\n except: # lies only in one of the two clusters \n pass \n \n # use the very first time points make a transition from the first to the second\n elif switch == 2:\n \n for idx in range(len(nga_idx)-1):\n \n if nga_idx[idx] in gauss1 and nga_idx[idx+1] in gauss2:\n \n a, b = nga_idx[idx], nga_idx[idx+1]\n x, y = slope(a, b)\n slopes.append((x, y))\n \n break \n \n # take all transitions\n elif switch == 3:\n \n for idx in range(len(nga_idx)-1):\n \n if nga_idx[idx] in gauss1 and nga_idx[idx+1] in gauss2:\n \n a, b = nga_idx[idx], nga_idx[idx+1]\n x, y = slope(a, b)\n slopes.append((x, y))\n \n return slopes",
"def our_own_bvp_solve(f, a, b, n, y0, dim, bc, tol=1e-2):\n\n # interpolate the initial guess function y0 on Chebyshev points of the first kind\n cf0 = []\n for y0_i in y0:\n for thing in np.polynomial.chebyshev.Chebyshev(np.zeros(n), (a, b)).interpolate(y0_i, n, (a, b)):\n cf0.append(thing)\n\n solution = root(lambda u: fun(u, a, b, dim, n, f, bc), cf0, method='lm', tol=tol)\n if not solution.success:\n print('root finding failed')\n\n cf = solution.x\n cf = cf.reshape((dim, cf.size // dim))\n\n return [np.polynomial.chebyshev.Chebyshev(cf[i], (a, b)) for i in range(dim)]",
"def g_solving_subproblem_of_LR(self,vehicle_id):\r\n global_LB=-10000\r\n global_UB=10000\r\n iteration_for_RSP=20\r\n optimal_solution_for_RSP=None\r\n optimal_value_y=0\r\n self.multiplier_v=0.5\r\n\r\n #solve the expected shortest path problem\r\n self.g_dynamic_programming_algorithm(vehicle_id, 4)\r\n #obtain the variance\r\n y_=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n\r\n for k in range(iteration_for_RSP):\r\n # print(k)\r\n LB=0\r\n # step 2: solve decomposed dual problems\r\n # Part I: subproblem of x\r\n self.g_dynamic_programming_algorithm(vehicle_id, 2)\r\n LB+=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian\r\n\r\n # Part II: subproblem of y\r\n obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_\r\n if obj_of_y_ > 0:\r\n y = 0\r\n LB += 0\r\n else:\r\n y = y_\r\n LB += obj_of_y_\r\n # generate an upper bound\r\n variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n Label_cost_for_lagrangian_mean=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian_mean\r\n UB=Label_cost_for_lagrangian_mean+self.reliability*(variance)**0.5\r\n\r\n # print(\"UB:{}\".format(UB))\r\n # print(\"LB:{}\".format(LB))\r\n\r\n # UB and LB update\r\n if LB > global_LB:\r\n global_LB = LB\r\n optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector\r\n optimal_value_y = y\r\n\r\n if UB < global_UB:\r\n global_UB = UB\r\n\r\n\r\n # step 3: update multipliers\r\n if variance-y!= 0:\r\n self.multiplier_v+= (global_UB - LB) / (variance-y)\r\n # if self.multiplier_v<0:\r\n # self.multiplier_v=1\r\n # print(self.multiplier_v)\r\n\r\n # step 4: termination condition test\r\n if global_UB != 0:\r\n gap = abs((global_UB-global_LB) / global_UB)\r\n # print(gap)\r\n if gap < 0.02:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, optimal_value_y,global_LB,global_UB\r\n else:\r\n if global_UB - global_LB == 0:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB\r\n\r\n if k == iteration_for_RSP - 1:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB",
"def group_centers_phase1_and_2(self) -> None:\n self.rotate_U_to_U()\n self.rotate_F_to_F()\n\n if self.centers_staged():\n return\n\n original_state = self.state[:]\n original_solution = self.solution[:]\n tmp_solution_len = len(self.solution)\n\n # find multiple phase1 solutions\n phase1_solutions = self.lt_LR_centers_stage.solutions_via_c(solution_count=100)\n pt_state_indexes = []\n pt_state_indexes_LR_centers_special = []\n phase2_pt_state_indexes_to_phase1_solution = {}\n logger.info(f\"found {len(phase1_solutions)} phase1 solutions\")\n\n # find the phase2 solution for each phase1 solution\n for phase1_solution, (pt0_state, pt1_state, pt2_state, pt3_state, pt4_state) in phase1_solutions:\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n for step in phase1_solution:\n self.rotate(step)\n\n # stage the LR centers\n phase2_pt_state_indexes = tuple([pt.state_index() for pt in self.lt_FB_centers_stage.prune_tables])\n pt_state_indexes.append(phase2_pt_state_indexes)\n phase2_pt_state_indexes_to_phase1_solution[phase2_pt_state_indexes] = phase1_solution\n\n # stage the LR centers and put them into one of 495 states solveable with L L' R R'\n phase2_pt_state_indexes = tuple(\n [pt.state_index() for pt in self.lt_FB_centers_stage_LR_centers_special.prune_tables]\n )\n pt_state_indexes_LR_centers_special.append(phase2_pt_state_indexes)\n phase2_pt_state_indexes_to_phase1_solution[phase2_pt_state_indexes] = phase1_solution\n\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n # stage the FB centers\n phase2_solutions = self.lt_FB_centers_stage.solutions_via_c(pt_states=pt_state_indexes, solution_count=1)\n phase2_solution = phase2_solutions[0][0]\n\n # stage the FB centers and put LR centers into one of 495 states solveable with L L' R R'\n phase2_solutions_lr_centers_special = self.lt_FB_centers_stage_LR_centers_special.solutions_via_c(\n pt_states=pt_state_indexes_LR_centers_special, solution_count=1\n )\n phase2_solution_lr_centers_special = phase2_solutions_lr_centers_special[0][0]\n\n # if we can put the LR centers into one of 495 states without adding to the move count, make it so\n if len(phase2_solution_lr_centers_special) <= len(phase2_solution):\n min_phase2_solution, (\n pt0_state,\n pt1_state,\n pt2_state,\n pt3_state,\n pt4_state,\n ) = phase2_solutions_lr_centers_special[0]\n min_phase1_solution = phase2_pt_state_indexes_to_phase1_solution[pt0_state, pt1_state, pt2_state]\n else:\n min_phase2_solution, (pt0_state, pt1_state, pt2_state, pt3_state, pt4_state) = phase2_solutions[0]\n min_phase1_solution = phase2_pt_state_indexes_to_phase1_solution[pt0_state, pt1_state]\n\n logger.info(\n f\"phase2 solution length {len(phase2_solution)}, phase2_lr_centers_special solution length {len(phase2_solution_lr_centers_special)}\"\n )\n\n for step in min_phase1_solution:\n self.rotate(step)\n\n self.print_cube_add_comment(\"LR centers staged\", tmp_solution_len)\n\n tmp_solution_len = len(self.solution)\n for step in min_phase2_solution:\n self.rotate(step)\n\n self.print_cube_add_comment(\"UD FB centers staged\", tmp_solution_len)",
"def create_bcs(dim, H, Hmin, HZ, HminZ, XYZ, inlet_velocity,\n V_0, solutes, subdomains_file,\n enable_NS, enable_PF, enable_EC, \n mesh, boundaries_Facet, **namespace):\n mvc = df.MeshValueCollection(\"size_t\", mesh, dim-1) \n with df.XDMFFile(subdomains_file) as infile:\n infile.read(mvc, \"name_to_read\")\n facet_domains = df.cpp.mesh.MeshFunctionSizet(mesh, mvc)\n\n # Re-create boundaries with facet_domain for mesh relevance\n\n boundaries = dict(\n inlet = [facet_domains, boundaries_Facet[\"inlet\"]],\n outletL = [facet_domains, boundaries_Facet[\"outletL\"]],\n outletR = [facet_domains, boundaries_Facet[\"outletR\"]],\n wall = [facet_domains, boundaries_Facet[\"wall\"]],\n )\n\n # Alocating the boundary dicts\n bcs = dict()\n bcs_pointwise = dict()\n for boundary in boundaries:\n bcs[boundary] = dict()\n\n ## Velocity Phase Flow In (Retrieve expression)\n #\n #length inlet, water inflow, X/Y/Z, Positive/neg flow along axis\n velocity_expr = velocity_init(H, HZ, inlet_velocity, XYZ, 1, Hmin, HminZ) \n velocity_in = Fixed(velocity_expr)\n\n # Pressure set to 0 at outlet\n pressure_out = Pressure(0.0)\n # Create NoSlip function for walls\n noslip = Fixed((0., 0., 0.)) # Unlike 2D \"NoSlip()\", need 3 dimensions\n\n ## Define boundaries\n # Note we have two outlets\n if enable_NS:\n bcs[\"inlet\"][\"u\"] = velocity_in\n bcs[\"outletL\"][\"p\"] = pressure_out\n bcs[\"outletR\"][\"p\"] = pressure_out\n bcs[\"wall\"][\"u\"] = noslip\n\n # Ensure all processes have completed (Might be redundant) \n mpi_barrier()\n return boundaries, bcs, bcs_pointwise",
"def branches_library(point, dict_v, dict_e, list_v, kind_v, tuple_e):\n branches=[[point]] #unfinished branches\n lista=[] # finished branches, possible branches started in endpoint and ended in trunkpoint\n next_points = []\n while branches != []:\n for branch in branches:\n next_points = detect_next_points(branch[-1], branch[:-1], tuple_e)\n temp_list=list()\n \n if len(next_points)==0:\n branches.remove(branch)\n continue\n for pointn in next_points:\n temp_list.append(branch+[pointn])\n \n branches.remove(branch)\n\n for br in temp_list:\n if control_endpoint(br[-1],list_v, kind_v)==1:\n lista.append(br)\n else:\n branches.append(br)\n if len(lista)>10:\n return lista\n return lista",
"def detect_branchings(self):\n logg.debug(\n f' detect {self.n_branchings} '\n f'branching{\"\" if self.n_branchings == 1 else \"s\"}',\n )\n # a segment is a subset of points of the data set (defined by the\n # indices of the points in the segment)\n # initialize the search for branchings with a single segment,\n # that is, get the indices of the whole data set\n indices_all = np.arange(self._adata.shape[0], dtype=int)\n # let's keep a list of segments, the first segment to add is the\n # whole data set\n segs = [indices_all]\n # a segment can as well be defined by the two points that have maximal\n # distance in the segment, the \"tips\" of the segment\n #\n # the rest of the points in the segment is then defined by demanding\n # them to \"be close to the line segment that connects the tips\", that\n # is, for such a point, the normalized added distance to both tips is\n # smaller than one:\n # (D[tips[0],i] + D[tips[1],i])/D[tips[0],tips[1] < 1\n # of course, this condition is fulfilled by the full cylindrical\n # subspace surrounding that line segment, where the radius of the\n # cylinder can be infinite\n #\n # if D denotes a euclidian distance matrix, a line segment is a linear\n # object, and the name \"line\" is justified. if we take the\n # diffusion-based distance matrix Dchosen, which approximates geodesic\n # distance, with \"line\", we mean the shortest path between two points,\n # which can be highly non-linear in the original space\n #\n # let us define the tips of the whole data set\n if False: # this is safe, but not compatible with on-the-fly computation\n tips_all = np.array(\n np.unravel_index(\n np.argmax(self.distances_dpt), self.distances_dpt.shape\n )\n )\n else:\n if self.iroot is not None:\n tip_0 = np.argmax(self.distances_dpt[self.iroot])\n else:\n tip_0 = np.argmax(self.distances_dpt[0])\n tips_all = np.array([tip_0, np.argmax(self.distances_dpt[tip_0])])\n # we keep a list of the tips of each segment\n segs_tips = [tips_all]\n segs_connects = [[]]\n segs_undecided = [True]\n segs_adjacency = [[]]\n logg.debug(\n ' do not consider groups with less than '\n f'{self.min_group_size} points for splitting'\n )\n for ibranch in range(self.n_branchings):\n iseg, tips3 = self.select_segment(segs, segs_tips, segs_undecided)\n if iseg == -1:\n logg.debug(' partitioning converged')\n break\n logg.debug(\n f' branching {ibranch + 1}: split group {iseg}',\n ) # [third start end]\n # detect branching and update segs and segs_tips\n self.detect_branching(\n segs,\n segs_tips,\n segs_connects,\n segs_undecided,\n segs_adjacency,\n iseg,\n tips3,\n )\n # store as class members\n self.segs = segs\n self.segs_tips = segs_tips\n self.segs_undecided = segs_undecided\n # the following is a bit too much, but this allows easy storage\n self.segs_adjacency = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=float)\n self.segs_connects = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=int)\n for i, seg_adjacency in enumerate(segs_adjacency):\n self.segs_connects[i, seg_adjacency] = segs_connects[i]\n for i in range(len(segs)):\n for j in range(len(segs)):\n self.segs_adjacency[i, j] = self.distances_dpt[\n self.segs_connects[i, j], self.segs_connects[j, i]\n ]\n self.segs_adjacency = self.segs_adjacency.tocsr()\n self.segs_connects = self.segs_connects.tocsr()"
] | [
"0.58253855",
"0.56931674",
"0.54437244",
"0.5257577",
"0.52070004",
"0.51921296",
"0.5183743",
"0.5179858",
"0.51775",
"0.51654345",
"0.5148147",
"0.51285076",
"0.51230913",
"0.5090722",
"0.5089123",
"0.50647557",
"0.5053416",
"0.5052067",
"0.50175494",
"0.50117624",
"0.4977015",
"0.4969425",
"0.49470815",
"0.49426764",
"0.49317843",
"0.49298218",
"0.4927969",
"0.49272484",
"0.49262154",
"0.4905312"
] | 0.57671916 | 1 |
Returned data frame should have trading_pair as index and include usd volume, baseAsset and quoteAsset | async def get_active_exchange_markets(cls) -> pd.DataFrame:
async with aiohttp.ClientSession() as client:
trading_pairs_response = await client.get(ASSET_PAIRS_URL)
trading_pairs_response: aiohttp.ClientResponse = trading_pairs_response
if trading_pairs_response.status != 200:
raise IOError(f"Error fetching Kraken trading pairs. "
f"HTTP status is {trading_pairs_response.status}.")
trading_pairs_data: Dict[str, Any] = await trading_pairs_response.json()
trading_pairs_data["result"] = {
pair: details for pair, details in trading_pairs_data["result"].items() if "." not in pair}
wsname_dict: Dict[str, str] = {pair: details["wsname"]
for pair, details in trading_pairs_data["result"].items()}
trading_pairs: Dict[str, Any] = {pair: {"baseAsset": wsname_dict[pair].split("/")[0],
"quoteAsset": wsname_dict[pair].split("/")[1],
"wsname": wsname_dict[pair]}
for pair in trading_pairs_data["result"]}
trading_pairs_str: str = ','.join(trading_pairs.keys())
market_response = await client.get(f"{TICKER_URL}?pair={trading_pairs_str}")
market_response: aiohttp.ClientResponse = market_response
if market_response.status != 200:
raise IOError(f"Error fetching Kraken markets information. "
f"HTTP status is {market_response.status}.")
market_data = await market_response.json()
market_data: List[Dict[str, Any]] = [{"pair": pair, **market_data["result"][pair], **trading_pairs[pair]}
for pair in market_data["result"]
if pair in trading_pairs]
# Build the data frame.
all_markets: pd.DataFrame = pd.DataFrame.from_records(data=market_data, index="pair")
all_markets["lastPrice"] = all_markets.c.map(lambda x: x[0]).astype("float")
all_markets.loc[:, "volume"] = all_markets.v.map(lambda x: x[1]).astype("float")
price_dict: Dict[str, float] = await cls.get_prices_from_df(all_markets)
usd_volume: List[float] = [
(
baseVolume * price_dict[baseAsset] if baseAsset in price_dict else -1
)
for baseAsset, baseVolume in zip(all_markets.baseAsset,
all_markets.volume)]
all_markets.loc[:, "USDVolume"] = usd_volume
return all_markets.sort_values("USDVolume", ascending=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def account_df(self, typ='trades', improve=False):\n cols = ['date_open', 'date_close', 'symbol', 'style', 'volume', 'price_open', 'price_stop', 'price_limit', 'price_close', 'comment', 'magic', 'order_id_master', 'order_id_stop', 'order_id_limit', 'direction', 'price_diff', 'price_diff', 'price_diff_d', 'price_diff_rel', 'price_diff_rel_d', 'MAE', 'MFE', 'MAE_rel', 'MFE_rel', 'price_trailing_diff', 'profit']\n d = self._d_orders[typ]\n if len(d)>0:\n df = pd.DataFrame(d.values(), index=d.keys())\n df = df.rename(columns={0: 'bo'})\n df['date_created'] = df['bo'].map(lambda o: o.date_created)\n df['date_open'] = df['bo'].map(lambda o: o.date_open)\n df['date_close'] = df['bo'].map(lambda o: o.date_close)\n df['date_closed'] = df['bo'].map(lambda o: o.date_closed)\n df['symbol'] = df['bo'].map(lambda o: o.symbol)\n #df['style'] = df['bo'].map(lambda o: o.style)\n df['volume'] = df['bo'].map(lambda o: o.volume)\n df['price_open'] = df['bo'].map(lambda o: o.price_open)\n df['price_stop'] = df['bo'].map(lambda o: o.price_stop)\n df['price_limit'] = df['bo'].map(lambda o: o.price_limit)\n df['price_close'] = df['bo'].map(lambda o: o.price_close)\n df['comment'] = df['bo'].map(lambda o: o.comment)\n df['magic'] = df['bo'].map(lambda o: o.magic)\n #df['order_id_master'] = df['bo'].map(lambda o: o.order_id_master)\n #df['order_id_stop'] = df['bo'].map(lambda o: o.order_id_stop)\n #df['order_id_limit'] = df['bo'].map(lambda o: o.order_id_limit)\n\n df['direction'] = df['bo'].map(lambda o: o.direction)\n\n df['price_diff'] = df['bo'].map(lambda o: o.price_diff)\n df['price_diff_d'] = df['bo'].map(lambda o: o.price_diff_d)\n df['price_diff_rel'] = df['bo'].map(lambda o: o.price_diff_rel)\n df['price_diff_rel_d'] = df['bo'].map(lambda o: o.price_diff_rel_d)\n \n df['MAE'] = df['bo'].map(lambda o: o.MAE)\n df['MFE'] = df['bo'].map(lambda o: o.MFE)\n \n #df['MAE_rel'] = df['MAE'] / df['price_open']\n #df['MFE_rel'] = df['MFE'] / df['price_open']\n df['MAE_rel'] = df['bo'].map(lambda o: o.MAE_rel)\n df['MFE_rel'] = df['bo'].map(lambda o: o.MFE_rel)\n \n\n #df['profit'] = df['volume'] * df['price_diff'].fillna(0)\n df['profit'] = df['bo'].map(lambda o: o.profit)\n #df['profit_rel'] = df['bo'].map(lambda o: o.profit_rel)\n \n if improve:\n try:\n df = improve_account_df_with_additional_data(df)\n except Exception as e:\n log.error(\"Can't improve account df with additional data\")\n log.error(\"Reason: %s\" % str(e))\n \n #del df['bo'] \n \n return(df)\n else:\n return(pd.DataFrame(columns=cols))",
"def __populate_historical_trade_data(self):\n\n trade_data = self.__transactions.pivot_table(\n index=\"Date\",\n columns=[\"Ticker\"],\n values=[\n \"Quantity\",\n \"Investment\",\n ],\n aggfunc={\"Quantity\": np.sum, \"Investment\": np.sum},\n )\n\n # Make historical prices columns a multi-index. This helps the merging.\n self.portfolio_historical_prices.columns = pd.MultiIndex.from_product(\n [[\"Close\"], self.portfolio_historical_prices.columns]\n )\n\n trade_data = pd.merge(\n trade_data,\n self.portfolio_historical_prices,\n how=\"outer\",\n left_index=True,\n right_index=True,\n )\n\n trade_data[\"Close\"] = trade_data[\"Close\"].fillna(method=\"ffill\")\n trade_data.fillna(0, inplace=True)\n\n trade_data[\"Quantity\"] = trade_data[\"Quantity\"].cumsum()\n trade_data[\"Investment\"] = trade_data[\"Investment\"].cumsum()\n trade_data[\"Investment\", \"Total\"] = trade_data[\"Investment\"].sum(axis=1)\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Investment delta\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"Investment\"].diff(periods=1).fillna(trade_data[\"Investment\"]))\n\n # End Value = Quantity * Close\n trade_data[pd.MultiIndex.from_product([[\"End Value\"], self.tickers_list])] = (\n trade_data[\"Quantity\"][self.tickers_list]\n * trade_data[\"Close\"][self.tickers_list]\n )\n\n trade_data.loc[:, (\"End Value\", \"Total\")] = trade_data[\"End Value\"][\n self.tickers_list\n ].sum(axis=1)\n\n # Initial Value = Previous End Value + Investment changes\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Initial Value\"], self.tickers_list + [\"Total\"]]\n )\n ] = 0\n\n trade_data[\"Initial Value\"] = trade_data[\"End Value\"].shift(1) + trade_data[\n \"Investment\"\n ].diff(periods=1)\n\n # Set first day Initial Value as the Investment (NaNs break first period)\n for t in self.tickers_list + [\"Total\"]:\n trade_data.at[trade_data.index[0], (\"Initial Value\", t)] = trade_data.iloc[\n 0\n ][\"Investment\"][t]\n\n trade_data = trade_data.reindex(\n columns=[\n \"Quantity\",\n \"Investment\",\n \"Investment delta\",\n \"Close\",\n \"Initial Value\",\n \"End Value\",\n ],\n level=0,\n )\n self.historical_trade_data = trade_data",
"def trade_vol_VS_tx_vol(df):\n\n volume_cryptocompare = df['Volume (BTC)']\n volume_tx = df['Tx Volume (BTC)']\n result = volume_cryptocompare.div(volume_tx).fillna(0)\n result.name = 'Trade Vol / Tx Vol'\n return out(SETTINGS, df, result)",
"def get_stock_price_df(info, symbols):\n\n df_l = []\n\n for num, i in enumerate(info):\n df = pd.DataFrame.from_dict(i, orient='index')\n df['Symbol'] = symbols[num]\n df_l.append(df)\n\n df_full = pd.concat(df_l)\n df_full = df_full.rename(columns={'1. open': 'Open',\n '2. high': 'High',\n '3. low': 'Low',\n '4. close': 'Close',\n '5. volume': 'Volume'})\n\n return df_full",
"def exchanges_df(self) -> pd.DataFrame:\n mid_price = self.connectors[self.maker_exchange].get_mid_price(self.maker_pair)\n maker_buy_result = self.connectors[self.maker_exchange].get_price_for_volume(self.taker_pair, True, self.order_amount)\n maker_sell_result = self.connectors[self.maker_exchange].get_price_for_volume(self.taker_pair, False, self.order_amount)\n taker_buy_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, True, self.order_amount)\n taker_sell_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, False, self.order_amount)\n maker_buy_spread_bps = (maker_buy_result.result_price - taker_buy_result.result_price) / mid_price * 10000\n maker_sell_spread_bps = (taker_sell_result.result_price - maker_sell_result.result_price) / mid_price * 10000\n columns = [\"Exchange\", \"Market\", \"Mid Price\", \"Buy Price\", \"Sell Price\", \"Buy Spread\", \"Sell Spread\"]\n data = []\n data.append([\n self.maker_exchange,\n self.maker_pair,\n float(self.connectors[self.maker_exchange].get_mid_price(self.maker_pair)),\n float(maker_buy_result.result_price),\n float(maker_sell_result.result_price),\n int(maker_buy_spread_bps),\n int(maker_sell_spread_bps)\n ])\n data.append([\n self.taker_exchange,\n self.taker_pair,\n float(self.connectors[self.taker_exchange].get_mid_price(self.maker_pair)),\n float(taker_buy_result.result_price),\n float(taker_sell_result.result_price),\n int(-maker_buy_spread_bps),\n int(-maker_sell_spread_bps)\n ])\n df = pd.DataFrame(data=data, columns=columns)\n return df",
"def map_to_trade(self, raw_trade: HitbtcRawTradeModel) -> HitbtcTradeModel:\n\n id_ = int(raw_trade[\"id\"])\n price = Decimal(raw_trade[\"price\"])\n quantity = Decimal(raw_trade[\"quantity\"])\n side = raw_trade[\"side\"]\n timestamp = raw_trade[\"timestamp\"]\n\n trade = HitbtcTradeModel(\n id=id_,\n price=price,\n quantity=quantity,\n side=side,\n timestamp=timestamp)\n\n return trade",
"def _deserialize_trade(self, raw_result: list[Any]) -> Trade:\n amount = deserialize_asset_amount(raw_result[4])\n trade_type = TradeType.BUY if amount >= ZERO else TradeType.SELL\n bfx_pair = self._process_bfx_pair(raw_result[1])\n if bfx_pair in self.pair_bfx_symbols_map:\n bfx_base_asset_symbol, bfx_quote_asset_symbol = self.pair_bfx_symbols_map[bfx_pair]\n elif len(bfx_pair) == 6:\n # Could not see it in the listed pairs. Probably delisted. Gotta try and figure it out\n # TODO: The whole pair logic in bitfinex seems complicated. Simplify!\n bfx_base_asset_symbol = bfx_pair[:3]\n bfx_quote_asset_symbol = bfx_pair[3:]\n else:\n raise DeserializationError(\n f'Could not deserialize bitfinex trade pair {raw_result[1]}. '\n f'Raw trade: {raw_result}',\n )\n\n base_asset = asset_from_bitfinex(\n bitfinex_name=bfx_base_asset_symbol,\n currency_map=self.currency_map,\n )\n quote_asset = asset_from_bitfinex(\n bitfinex_name=bfx_quote_asset_symbol,\n currency_map=self.currency_map,\n )\n fee_asset = asset_from_bitfinex(\n bitfinex_name=raw_result[10],\n currency_map=self.currency_map,\n )\n\n trade = Trade(\n timestamp=Timestamp(int(raw_result[2] / 1000)),\n location=Location.BITFINEX,\n base_asset=base_asset,\n quote_asset=quote_asset,\n trade_type=trade_type,\n amount=AssetAmount(abs(amount)),\n rate=deserialize_price(raw_result[5]),\n fee=Fee(abs(deserialize_fee(raw_result[9]))),\n fee_currency=fee_asset,\n link=str(raw_result[0]),\n notes='',\n )\n return trade",
"def calculate_vol_adjusted_index_from_prices(self, prices_df, br):\n\n tsc = TimeSeriesCalcs()\n\n returns_df, leverage_df = self.calculate_vol_adjusted_returns(prices_df, br, returns = False)\n\n return tsc.create_mult_index(returns_df)",
"def create_pair_differences(self):\n\n # Create an empty dataframe of pair differences, we will append this later.\n pair_string_names = []\n pair_price_diff = []\n\n for pair in self.__pairs_data:\n # Choose both stocks from each pair\n stock_symbol_1 = pair[0]\n stock_symbol_2 = pair[1]\n\n # Create a string that symbolizes the pair and add it to a list of strings\n pair_string = str(stock_symbol_1) + '-' + str(stock_symbol_2)\n pair_string_names.append(pair_string)\n\n # Get both stock prices from the price dataset\n stock_price1 = self.__price_data[stock_symbol_1]\n stock_price2 = self.__price_data[stock_symbol_2]\n pair_diff = stock_price2 - stock_price1\n pair_price_diff.append(pair_diff)\n\n # Concat all the pairs into the pair differences attribute in class and set column names\n self.__pair_diff = pd.concat([pd.Series(pair_prices) for pair_prices in pair_price_diff], axis=1)\n self.__pair_diff.columns = pair_string_names\n\n return self.__pair_diff",
"async def get_trading_table(self):\n if self.trading_table is None:\n self.trading_table = {}\n wikitext = await Controller.get_wikitext('Trading')\n for match in re.finditer(r\"===='''([^']+)'''====\\n({\\|[^\\n]*\\n(?:[^\\n]*\\n)+?\\|})\", wikitext):\n place = match.group(1)\n trade_list = {'into':{}, 'from':{}}\n for row in match.group(2).strip().split('|-'):\n if len(row) < 5:\n continue\n trade = re.search(r'\\|([0-9,.]+)\\|\\| \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]\\|\\|→\\n\\|align\\=right\\|([0-9,.]+)\\|\\| \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]', row)\n if not trade:\n trade = re.search(r'\\| ?([0-9,.]+) \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]\\|\\| ?([0-9,.]+) \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]', row)\n if not trade:\n logging.warn(f'No trade row in `{row}`')\n continue\n from_amt = int(trade.group(1).replace(',', ''))\n from_itm = trade.group(2).lower()\n to_amt = int(trade.group(3).replace(',', ''))\n to_itm = trade.group(4).lower()\n if from_itm not in trade_list['from']:\n trade_list['from'][from_itm] = []\n if to_itm not in trade_list['into']:\n trade_list['into'][to_itm] = []\n trade_list['from'][from_itm].append((to_itm, from_amt, to_amt))\n trade_list['into'][to_itm].append((from_itm, to_amt, from_amt))\n if '(' in place:\n # Gorenichi (Kiev), Magnitogorsk (trader), Magnitogorsk (fitter)\n if place[0] == 'G':\n place = 'Kiev'\n self.trading_table[place.lower()] = trade_list\n return self.trading_table",
"def aggregate_historical_trades(self, pair: list):\n raise NotImplementedError",
"def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass",
"def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass",
"def get_data(pair, other):\n days_ago = 7\n endtime = int(time())\n starttime = endtime - 60 * 60 * 24 * days_ago\n\n geckourl = '%s/markets?vs_currency=%s&ids=%s' % (API, pair[\"currency\"],\n pair[\"coin\"])\n liveprice = requests.get(geckourl).json()[0]\n pricenow = float(liveprice['current_price'])\n alltimehigh = float(liveprice['ath'])\n other['volume'] = float(liveprice['total_volume'])\n\n url_hist = '%s/%s/market_chart/range?vs_currency=%s&from=%s&to=%s' % (\n API, pair[\"coin\"], pair[\"currency\"], str(starttime), str(endtime))\n\n try:\n timeseriesarray = requests.get(url_hist).json()['prices']\n except JSONDecodeError as err:\n print(f'Caught JSONDecodeError: {repr(err)}')\n return None\n timeseriesstack = []\n length = len(timeseriesarray)\n i = 0\n while i < length:\n timeseriesstack.append(float(timeseriesarray[i][1]))\n i += 1\n\n timeseriesstack.append(pricenow)\n if pricenow > alltimehigh:\n other['ATH'] = True\n else:\n other['ATH'] = False\n\n other[\"image\"] = pair[\"image\"]\n other[\"coin\"] = pair[\"coin\"]\n\n return timeseriesstack",
"def format_pair_result(pair_name, pair_tuple, price):\n return {\n \"name\": pair_name,\n \"base_volume\": \"%.7f\" % pair_tuple[0],\n \"counter_volume\": \"%.7f\" % pair_tuple[1],\n \"trade_count\": pair_tuple[2],\n \"price\": \"%.7f\" % price\n }",
"def get_usdt_pairs(self):\r\n usdt_pairs = {}\r\n raw_symbols = self.kc.get_symbols()\r\n '''\r\n {'symbol': 'GRIN-USDT', 'quoteMaxSize': '99999999', 'enableTrading': True, 'priceIncrement': '0.000001',\r\n 'feeCurrency': 'USDT', 'baseMaxSize': '10000000000', 'baseCurrency': 'GRIN', 'quoteCurrency': 'USDT', 'market': 'USDS', 'quoteIncrement': '0.000001',\r\n 'baseMinSize': '0.01', 'quoteMinSize': '0.01', 'name': 'GRIN-USDT', 'baseIncrement': '0.00000001', 'isMarginEnabled': False}\r\n '''\r\n\r\n for data in raw_symbols:\r\n if self.base_currency in data[\"symbol\"]:\r\n pair = data[\"symbol\"]\r\n quote, base = pair.split('-')\r\n if base == self.base_currency:\r\n self.log(pair, quote)\r\n # add/modify data here\r\n usdt_pairs[quote] = data\r\n\r\n return usdt_pairs",
"def return_trade_history(self, currency_pair):\n return self.api_query('returnTradeHistory', {\"currencyPair\": currency_pair})",
"def make_trade_params(pair):\n params = {\n \"order\": \"desc\",\n \"limit\": 1,\n }\n params.update(make_asset_param_from_pair(pair, \"base\"))\n params.update(make_asset_param_from_pair(pair, \"counter\"))\n return params",
"async def update_adjusted_tick_data(self, pair: str):\n\n base = config['trade_base']\n pair_base = pair.split('-')[0]\n\n try:\n last_time = self.last_adjusted_close_times[pair]\n start_index = self.close_times[pair].index(last_time) + 1\n\n except ValueError:\n self.log.error(\"{} has no adjusted close times.\", pair)\n last_time = 0\n start_index = 0\n\n diff = len(self.close_times[pair]) - start_index\n if diff != 1:\n self.log.debug(\"{} got diff {}, source length {}, last time {}.\",\n pair, diff, len(self.close_times[pair]), last_time)\n\n if base == pair_base:\n self.adjusted_close_values[pair] = self.close_values[pair]\n self.last_adjusted_close_times[pair] = self.close_times[pair][-1]\n await self._update_volume_derivatives(pair, diff, start_index)\n await self._truncate_adjusted_tick_data(pair)\n return\n\n convert_pair = '{}-{}'.format(base, pair_base)\n missing = 0\n\n for index in range(diff):\n try:\n convert_value = self.close_values[convert_pair][start_index + index]\n except IndexError:\n convert_value = self.close_values[convert_pair][-1]\n missing += 1\n\n close_value = self.close_values[pair][start_index + index]\n self.adjusted_close_values[pair].append(close_value * convert_value)\n\n if missing > 0:\n self.log.debug(\"{} padded {} values at end.\", pair, missing)\n\n self.last_adjusted_close_times[pair] = self.close_times[pair][-1]\n await self._update_volume_derivatives(pair, diff, start_index)\n await self._truncate_adjusted_tick_data(pair)",
"def typical_prices_from_trades_history(trade_history, read=False):\r\n if not read: # in that case you get data directly from the market\r\n start_date = _get_biggest_anterior_date(trade_history.index[-1]) + datetime.timedelta(minutes=5)\r\n trade_history = _cut_smallest_dates(trade_history, start_date)\r\n current_date = start_date + datetime.timedelta(hours=2)\r\n dates = [start_date + datetime.timedelta(hours=2)]\r\n # dates = []\r\n volumes = []\r\n while current_date < trade_history.index[0] + datetime.timedelta(hours=2):\r\n current_date += datetime.timedelta(minutes=5)\r\n dates.append(current_date)\r\n typical_prices = []\r\n k = 1\r\n date_index = 1\r\n while k < trade_history.shape[0]+1: # you have to go through all the rows of the dataframe\r\n price = 0\r\n normalizing_factor = 0\r\n volume = 0\r\n j = 0\r\n try:\r\n # you have one price per date. Each price is computed from the traded prices between a date and the following one\r\n while trade_history.index[-(k+j)] + datetime.timedelta(hours=2) <= dates[date_index]:\r\n if isinstance(trade_history.loc[trade_history.index[-(k+j)], 'rate'], pd.Series): # there can be several trades at the same time. In that case you get a series rather than a number\r\n for l in range(len(trade_history.loc[trade_history.index[-(k+j)], 'total'].values.tolist())):\r\n price += float(trade_history.loc[trade_history.index[-(k+j)], 'total'].values[l]) # several rows could have the same date. So take the mean. Maybe weight the prices with the volume ?\r\n normalizing_factor += float(trade_history.loc[trade_history.index[-(k+j)], 'amount'].values[l])\r\n volume += float(trade_history.loc[trade_history.index[-(k+j)], 'total'].values[l])\r\n j += trade_history.loc[trade_history.index[-(k+j)], 'rate'].shape[0]\r\n else:\r\n price += float(trade_history.loc[trade_history.index[-(k+j)], 'total'])\r\n normalizing_factor += float(trade_history.loc[trade_history.index[-(k+j)], 'amount'])\r\n volume += float(trade_history.loc[trade_history.index[-(k+j)], 'total'])\r\n j += 1\r\n if j != 0:\r\n price /= normalizing_factor\r\n typical_prices.append(price)\r\n k += j\r\n else:\r\n typical_prices.append(np.nan)\r\n k += 1\r\n volumes.append(volume)\r\n date_index += 1\r\n except IndexError:\r\n # print \"k+j: %s, \\n trade_history.shape: %s, \\n date_index: %s, \\n len(dates): %s\" % (k+j, trade_history.shape, date_index, len(dates))\r\n break\r\n return dates, typical_prices, volumes\r\n else: # otherwise you get a file where every data is unicode\r\n start_date = _get_biggest_anterior_date(parser.parse(trade_history.index[-1])) + datetime.timedelta(minutes=5)\r\n current_date = start_date + datetime.timedelta(hours=2)\r\n dates = []\r\n volumes = []\r\n while current_date < parser.parse(trade_history.index[0]) + datetime.timedelta(hours=2):\r\n current_date += datetime.timedelta(minutes=5)\r\n dates.append(current_date)\r\n typical_prices = []\r\n k = 1\r\n date_index = 1\r\n while k < trade_history.shape[0]+1:\r\n price = 0\r\n volume = 0\r\n normalizing_factor = 0\r\n j = 0\r\n try:\r\n while parser.parse(trade_history.index[-(k+j)]) + datetime.timedelta(hours=2) <= dates[date_index]:\r\n if not(isinstance(trade_history.loc[trade_history.index[-(k+j)], 'rate'], np.float64) or isinstance(trade_history.loc[trade_history.index[-(k+j)], 'rate'], np.float32) or isinstance(trade_history.loc[trade_history.index[-(k+j)], 'rate'], float)):\r\n price += np.sum(trade_history.loc[trade_history.index[-(k+j)], 'total'].values) # several rows could have the same date. So take the mean. Maybe weight the prices with the volume ?\r\n normalizing_factor += np.sum(trade_history.loc[trade_history.index[-(k+j)], 'amount'].values)\r\n volume += np.sum(trade_history.loc[trade_history.index[-(k+j)], 'total'].values)\r\n j += trade_history.loc[trade_history.index[-(k+j)], 'rate'].shape[0]\r\n else:\r\n price += trade_history.loc[trade_history.index[-(k+j)], 'total']\r\n normalizing_factor += trade_history.loc[trade_history.index[-(k+j)], 'amount']\r\n volume += trade_history.loc[trade_history.index[-(k+j)], 'total']\r\n j += 1\r\n if j != 0:\r\n price /= normalizing_factor\r\n typical_prices.append(price)\r\n k += j\r\n else:\r\n typical_prices.append(np.nan)\r\n k += 1\r\n volumes.append(volume)\r\n date_index += 1\r\n except IndexError:\r\n # print \"k+j: %s, \\n trade_history.shape: %s, \\n date_index: %s, \\n len(dates): %s\" % (k+j, trade_history.shape, date_index, len(dates))\r\n break\r\n return dates, typical_prices, volumes",
"def active_orders_df(self) -> pd.DataFrame:\n columns = [\"Exchange\", \"Market\", \"Side\", \"Price\", \"Amount\", \"Spread Mid\", \"Spread Cancel\", \"Age\"]\n data = []\n mid_price = self.connectors[self.maker_exchange].get_mid_price(self.maker_pair)\n taker_buy_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, True, self.order_amount)\n taker_sell_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, False, self.order_amount)\n buy_cancel_threshold = taker_sell_result.result_price * Decimal(1 - self.min_spread_bps / 10000)\n sell_cancel_threshold = taker_buy_result.result_price * Decimal(1 + self.min_spread_bps / 10000)\n for connector_name, connector in self.connectors.items():\n for order in self.get_active_orders(connector_name):\n age_txt = \"n/a\" if order.age() <= 0. else pd.Timestamp(order.age(), unit='s').strftime('%H:%M:%S')\n spread_mid_bps = (mid_price - order.price) / mid_price * 10000 if order.is_buy else (order.price - mid_price) / mid_price * 10000\n spread_cancel_bps = (buy_cancel_threshold - order.price) / buy_cancel_threshold * 10000 if order.is_buy else (order.price - sell_cancel_threshold) / sell_cancel_threshold * 10000\n data.append([\n self.maker_exchange,\n order.trading_pair,\n \"buy\" if order.is_buy else \"sell\",\n float(order.price),\n float(order.quantity),\n int(spread_mid_bps),\n int(spread_cancel_bps),\n age_txt\n ])\n if not data:\n raise ValueError\n df = pd.DataFrame(data=data, columns=columns)\n df.sort_values(by=[\"Market\", \"Side\"], inplace=True)\n return df",
"async def refresh_adjusted_tick_data(self, pair: str):\n\n self.base_24hr_volumes[pair][1] = array('d')\n self.last_adjusted_close_times[pair] = self.close_times[pair][-1]\n\n trade_base = config['trade_base']\n pair_base = pair.split('-')[0]\n\n if trade_base == pair_base:\n self.adjusted_close_values[pair] = self.close_values[pair]\n await self._refresh_volume_derivatives(pair)\n return\n\n else:\n self.adjusted_close_values[pair] = array('d')\n\n convert_pair = '{}-{}'.format(trade_base, pair_base)\n\n try:\n source_index = len(self.close_times[pair]) - 1\n convert_index = self.close_times[convert_pair].index(self.close_times[pair][-1])\n\n except ValueError:\n try:\n convert_index = len(self.close_times[convert_pair]) - 1\n source_index = self.close_times[pair].index(self.close_times[convert_pair][-1])\n convert_value = self.close_values[convert_pair][-1]\n\n for index in range(len(self.close_times[pair]) - 1, source_index, -1):\n adjusted_value = self.close_values[pair][index] * convert_value\n self.adjusted_close_values[pair].insert(0, adjusted_value)\n\n self.log.debug(\"{} last {} adjusted values are approximate.\", pair,\n len(self.close_times[pair]) - source_index)\n\n except ValueError:\n self.adjusted_close_values[pair] = array('d')\n self.log.error(\"{} ends at {} before start of convert pair {} data at {}.\",\n pair, self.close_times[pair][-1], convert_pair, self.close_times[convert_pair][0])\n return\n\n for index in range(source_index, -1, -1):\n if convert_index > -1:\n convert_value = self.close_values[convert_pair][convert_index]\n else:\n convert_value = self.close_values[convert_pair][0]\n\n adjusted_value = self.close_values[pair][index] * convert_value\n self.adjusted_close_values[pair].insert(0, adjusted_value)\n convert_index -= 1\n\n if convert_index < 0:\n self.log.debug(\"{} first {} adjusted values are approximate.\", pair, convert_index * -1)\n\n await self._refresh_volume_derivatives(pair)",
"def _normalize_trade_type(t):\n t = copy(t)\n assert isclose(t[\"vol\"], t[\"cost\"] / t[\"price\"], rel_tol=1e-4)\n if t[\"type\"] == \"sell\":\n t = _flip_pair(t)\n assert isclose(t[\"vol\"], t[\"cost\"] / t[\"price\"], rel_tol=1e-4)\n return t",
"def get_matching_ticker(row: QFSeries) -> Ticker:\n ticker_str = row.loc[\"Contract symbol\"]\n name = row.loc[\"Asset Name\"]\n sec_type = SecurityType(row.loc[\"Security type\"])\n point_value = row.loc[\"Contract size\"]\n ticker = ticker_params_to_ticker.get((name, sec_type, point_value), None)\n if isinstance(ticker, FutureTicker):\n ticker_type = ticker.supported_ticker_type()\n ticker = ticker_type(ticker_str, sec_type, point_value)\n return ticker",
"async def prepare_trades(self, pair: str):\n\n if pair not in self.trades:\n self.trades[pair] = {\n 'last_open_time': 0.0,\n 'rebuy_count': 0,\n 'open': [],\n 'closed': []\n }",
"def _get_trades(self):\n\n trade_url = self.trade_url % (self.date, self.instrument, self.exchange)\n self.trades = pd.read_csv(trade_url, parse_dates=[0],\n date_parser=lambda t: pd.to_datetime(str(t), format='%Y%m%dT%H%M%S'))\n\n self.trades.fillna(np.nan)\n self.trades.index = pd.to_datetime(self.trades.time, unit='s')\n self.trades.time = pd.to_datetime(self.trades.time, unit='s')\n self.trades.columns = ['time', 'price', 'volume', 'source', 'buyer', 'seller', 'initiator']\n # del self.trades['time']\n\n if self.exclude_derivative:\n self.trades = self.trades[(self.trades.source != 'Derivatives trade') & (self.trades.source != 'Official')]",
"def get_adjusted_data(stockSymbol, df):\n\n events = ['SPLIT', 'BONUS']\n arr = ['Open Price', 'High Price', 'Low Price',\n 'Last Price', 'Close Price', 'Average Price']\n\n stockSymbol = stockSymbol.replace('&', '%26')\n\n if(df.empty):\n print(\"Please check data. Dataframe is empty\")\n return df\n\n df.index = pd.to_datetime(df.index)\n df.sort_index(inplace=True)\n\n try:\n df = df.drop(['Prev Close'], axis=1)\n except KeyError:\n pass\n\n for event in events:\n\n ratio, dates = scrape_bonus_splits(stockSymbol, event)\n for i in range(len(dates)):\n\n date = datetime.datetime.strptime(dates[i], '%d-%b-%Y')\n print(event, \" on : \", dates[i], \" and ratio is : \", ratio[i])\n\n changed_data = df.loc[df.index < date]\n same_data = df.loc[df.index >= date]\n\n for j in arr:\n\n try:\n changed_data.loc[:, j] = changed_data.loc[:, j]/ratio[i]\n except TypeError:\n pass\n\n df = pd.concat([changed_data, same_data])\n\n return df",
"def merge_new(dfc, pairs, span=None):\n global last_update\n t1 = Timer()\n columns = ['open', 'close', 'trades', 'volume', 'buy_ratio']\n exclude = ['_id','high','low','quote_vol','sell_vol', 'close_time']\n projection = dict(zip(exclude, [False]*len(exclude)))\n idx, data = [], []\n db = app.get_db()\n\n if span is None and last_update:\n # If no span, query/merge db records inserted since last update.\n oid = ObjectId.from_datetime(last_update)\n last_update = now()\n _filter = {'_id':{'$gte':oid}}\n else:\n # Else query/merge all since timespan.\n span = span if span else timedelta(days=7)\n last_update = now()\n _filter = {'pair':{'$in':pairs}, 'close_time':{'$gte':now()-span}}\n\n batches = db.candles.find_raw_batches(_filter, projection)\n\n if batches.count() < 1:\n return dfc\n\n try:\n ndarray = bsonnumpy.sequence_to_ndarray(\n batches,\n dtype,\n db.candles.count()\n )\n except Exception as e:\n log.error(str(e))\n return dfc\n #raise\n\n df = pd.DataFrame(ndarray)\n df['open_time'] = pd.to_datetime(df['open_time'], unit='ms')\n df['freq'] = df['freq'].str.decode('utf-8')\n df['pair'] = df['pair'].str.decode('utf-8')\n\n df['freq'] = df['freq'].replace('1m',60)\n df['freq'] = df['freq'].replace('5m',300)\n df['freq'] = df['freq'].replace('1h',3600)\n df['freq'] = df['freq'].replace('1d',86400)\n df = df.sort_values(by=['pair','freq','open_time'])\n\n df2 = pd.DataFrame(df[columns].values,\n index = pd.MultiIndex.from_arrays(\n [df['pair'], df['freq'], df['open_time']],\n names = ['pair','freq','open_time']),\n columns = columns\n ).sort_index()\n\n df3 = pd.concat([dfc, df2]).drop_duplicates().sort_index()\n\n log.debug(\"{:,} records loaded into numpy. [{:,.1f} ms]\".format(\n len(df3), t1))\n #print(\"Done in %s ms\" % t1)\n return df3",
"def compute_derived_blockchain_data(df):\n\n original_join_state = SETTINGS.join\n\n SETTINGS.join = False\n result = pd.concat([trade_vol_VS_tx_vol(df),\n miners_revenue_VS_tx_volume(df),\n block_reward_USD(df),\n tx_fees_VS_miners_revenue(df),\n avg_tx_fees_USD(df),\n avg_tx_fees_BTC(df),\n avg_tx_value_USD(df),\n avg_tx_value_BTC(df),\n fee_VS_tx_value(df)], axis=1)\n\n velocity_df = compute_function_different_periods(df,\n periods=ta_periods,\n function=velocity)\n NVT_df = compute_function_different_periods(df,\n periods=ta_periods,\n function=NVT_ratio)\n\n result = pd.concat([result, velocity_df, NVT_df], axis=1)\n\n SETTINGS.join = original_join_state\n return out(SETTINGS, df, result)",
"def prices_pivot(self, prices_df, if_exists='append'):\n\n prices_df['date_hour'] = prices_df.index\n prices_df['date_hour'] = prices_df['date_hour'].dt.strftime(\"%Y-%m-%d %H:00:00\")\n prices_df['eurbidprice'] = prices_df['EURbidPrice']\n prices_df.drop(['EURbidPrice'], axis=1, inplace = True)\n \n with open('../creds/pg_creds.json') as json_data:\n d = json.load(json_data)\n json_data.close()\n\n user = d[0]['user']\n password = d[0]['password']\n \n engine = create_engine('postgresql://' + user + ':' + password + '@localhost:5432/cryptotracker')\n prices_df.to_sql('eur_prices', engine, schema=\"public\", if_exists=if_exists, index=False)\n\n return"
] | [
"0.5413317",
"0.5374446",
"0.5327119",
"0.5326608",
"0.52844787",
"0.5255842",
"0.52381635",
"0.52032465",
"0.51819",
"0.5160164",
"0.5140268",
"0.5139721",
"0.5139721",
"0.51213753",
"0.50929576",
"0.50690424",
"0.50472456",
"0.50343525",
"0.501496",
"0.49923664",
"0.49835643",
"0.49736023",
"0.49674344",
"0.49642214",
"0.49518022",
"0.49460435",
"0.49222705",
"0.49073943",
"0.48923177",
"0.48846138"
] | 0.56753194 | 0 |
Read a string from standard input, but prompt to standard error. The trailing newline is stripped. | def stderr_input(prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover
if file is sys.stdout:
return input(prompt)
try:
stdin = sys.stdin
except AttributeError:
raise RuntimeError("stderr_input: lost sys.stdin")
file.write(prompt)
try:
flush = file.flush
except AttributeError:
pass
else:
flush()
try:
file.softspace = 0 # type: ignore
except (AttributeError, TypeError):
pass
line = stdin.readline()
if not line: # inputting an empty line gives line == '\n'
raise EOFError
elif line[-1] == '\n':
return line[:-1]
return line | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def safe_input(prompt=\"\"):\n\n\ttry:\n\t\tresult = input(prompt)\n\t\treturn result\n\texcept KeyboardInterrupt:\n\t\tsys.exit()\n\texcept:\n\t\treturn \"\"",
"def get_input(prompt):\n try:\n try:\n return raw_input(prompt)\n except NameError:\n return input(prompt)\n except EOFError:\n return ''",
"def _input(str=''):\n print(str, end='', flush=True)\n return stdin.readline().rstrip('\\n')",
"def input_or_error(stream=sys.stdin):\n line = readline_strip(stream)\n if not line: raise EOFError(\"End of input\")\n return line",
"def prompt_str_input(prompt_name: str, get_user_input: GetInputFunc) -> str:\n try:\n return str(get_user_input(f\"type in {prompt_name}:\"))\n except (ValueError, IndexError) as e:\n raise InvalidInput(str(e))",
"def safe_input():\n try:\n input(\"Please enter something: \")\n except EOFError:\n return None\n except KeyboardInterrupt:\n return None",
"def input_(text=''):\n while True:\n try:\n thing = input(text)\n if thing == '':\n raise ValueError\n else:\n return thing\n except (EOFError, KeyboardInterrupt, ValueError):\n print()",
"def clean_input(prompt):\n try:\n return input(prompt)\n # There is a general handling of KeyboardInterrupt in main() but\n # here it leads to a cleaner exit as the option to quit is returned.\n except KeyboardInterrupt:\n return 'Quit'",
"def ask_user_input(prompt: str) -> str:\n return input(prompt)",
"def rawInput(string):\n if os.name == \"posix\":\n tcflush(sys.stdin, TCIFLUSH)\n return input(string)",
"def get_input(prompt):\n # type: (str) -> str\n return raw_input(prompt)",
"def rlinput(prompt, prefill=''):\n if \"readline\" not in sys.modules:\n # For example on Windows\n return input(prompt)\n else:\n readline.set_startup_hook(lambda: readline.insert_text(prefill))\n try:\n return input(prompt)\n finally:\n readline.set_startup_hook()",
"def safe_input(display_string):\n\n try:\n x = raw_input(display_string)\n except NameError:\n x = input(display_string)\n\n return x",
"def _ask_prompt(question: str,\n console: io.IO,\n validate: Optional[Callable[[str], None]] = None,\n default: Optional[str] = None) -> str:\n validate = validate or (lambda x: None)\n while True:\n answer = console.ask(question)\n if default and not answer:\n answer = default\n try:\n validate(answer)\n break\n except ValueError as e:\n console.error(e)\n\n return answer",
"def pseudo_raw_input(self, prompt):\n\n if self.use_rawinput:\n try:\n line = sm.input(prompt)\n except EOFError:\n line = 'EOF'\n else:\n self.stdout.write(prompt)\n self.stdout.flush()\n line = self.stdin.readline()\n if not len(line):\n line = 'EOF'\n else:\n if line[-1] == '\\n': # this was always true in Cmd\n line = line[:-1]\n return line",
"def input_helper(prompt):\n if version_info[0] == 2:\n # python2 input is scary - we want raw_input\n return raw_input(prompt)\n else:\n return input(prompt)",
"def _prompt(letters='yn', default=None):\n\n import sys\n while True:\n try:\n inputstr = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if inputstr and inputstr in letters:\n return inputstr\n if default is not None and inputstr == '':\n return default\n print 'Come again?'",
"def prompt(prompt, validator=(lambda x: True), hint=None):\n user_input = input(prompt)\n while not validator(user_input):\n user_input = input(prompt)\n return user_input",
"def input_with_timeout(prompt: Optional[str] = None, timeout: float = 36000.0) -> str:\n # use of sys.stdin and sys.stdout to mimic the builtin input based on\n # https://github.com/python/cpython/blob/baf7bb30a02aabde260143136bdf5b3738a1d409/Lib/getpass.py#L129\n if prompt:\n sys.stdout.write(prompt)\n sys.stdout.flush()\n\n line = misc.readline_with_timeout(timeout, prompt)\n\n if not line:\n raise EOFError\n return line.rstrip('\\n')",
"def prompt(msg):\n # remove non-blocking mode\n fd = sys.stdin.fileno()\n flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)\n return raw_input(msg)",
"def get_input(prompt):\n return input(prompt)",
"def get_input(prompt):\n return input(prompt)",
"def _get_input(question: str) -> str:\n print(question)\n sys.stdout.flush()\n user_input = sys.stdin.readline()\n user_input = user_input.strip()\n return user_input",
"def input_(self) -> str:\n\n # Try to input through the prefered medium, but revert to\n # backup if need to and log any errors found, for example:\n # logging.error(\"Problem!\")\n\n return IO.stdin()",
"def test_prompt_setInput_stringio_valid(self):\n instr = StringIO.StringIO()\n self.prompt.setInput(instr)\n\n self.assertEquals(instr, self.prompt._instr)\n self.assertEquals(instr.getvalue(), \"\")\n\n with mock.patch('__builtin__.raw_input', return_value='mocked input') as mockinput:\n result = self.prompt._prompt({}, {\n 'say': 'test',\n 'ask': 'varname'\n })\n\n self.assertEquals(result['ansible_facts']['varname'], 'mocked input')",
"def prompt_for_input(prepend_prompt=''):\n if not prepend_prompt == '':\n prepend_prompt += ' '\n return raw_input(prepend_prompt + '> ').strip()",
"def getstring(message = \"Enter a value: \"):\n\ttry:\n\t\tinput = raw_input\n\texcept: \n\t\tpass\n\treturn raw_input(message)",
"def get_string_input():\n string_input = input('Please enter string: ')\n return string_input",
"def _handle_stdin(self, line):\r\n return input(line.replace(STDIN_PROMPT, \"\"))",
"def prompt(msg, default=NO_DEFAULT, validate=None):\n while True:\n response = input(msg + \" \").strip()\n if not response:\n if default is NO_DEFAULT:\n continue\n return default\n if validate is None or validate(response):\n return response"
] | [
"0.7439654",
"0.723748",
"0.70752895",
"0.6913516",
"0.68963534",
"0.6866671",
"0.68438375",
"0.6834515",
"0.67756915",
"0.6763464",
"0.6721034",
"0.6602405",
"0.65703905",
"0.65552646",
"0.65077746",
"0.64992315",
"0.64948124",
"0.64894426",
"0.64843607",
"0.64575666",
"0.64274603",
"0.64274603",
"0.637787",
"0.6321883",
"0.63008523",
"0.62755066",
"0.62584275",
"0.6256506",
"0.6252478",
"0.6226008"
] | 0.76236886 | 0 |
Return canonical form for control state. | def canonical_ctrl_state(ctrl_state, num_qubits):
if not num_qubits:
return ''
if isinstance(ctrl_state, CtrlAll):
if ctrl_state == CtrlAll.One:
return '1' * num_qubits
return '0' * num_qubits
if isinstance(ctrl_state, int):
# If the user inputs an integer, convert it to binary bit string
converted_str = f'{ctrl_state:b}'.zfill(num_qubits)[::-1]
if len(converted_str) != num_qubits:
raise ValueError(
f'Control state specified as {ctrl_state} ({converted_str}) is higher than maximum for {num_qubits} '
f'qubits: {2 ** num_qubits - 1}'
)
return converted_str
if isinstance(ctrl_state, str):
# If the user inputs bit string, directly use it
if len(ctrl_state) != num_qubits:
raise ValueError(
f'Control state {ctrl_state} has different length than the number of control qubits {num_qubits}'
)
if not set(ctrl_state).issubset({'0', '1'}):
raise ValueError(f'Control state {ctrl_state} has string other than 1 and 0')
return ctrl_state
raise TypeError('Input must be a string, an integer or an enum value of class State') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_state(self):\n return self.cleaned_data['state'].upper()",
"def state(self) -> str:",
"def native_value(self) -> str:\n if isinstance(self._state, Enum):\n return self._state.name.lower()\n return self._state.lower()",
"def reflect_state(self, s):\n s[2:8] = reflect_control_vector(s[2:8])\n s[11:17] = reflect_control_vector(s[11:17])\n return s",
"def state(self):\n return str(self)",
"def state(self):\r\n return str(self)",
"def state_raw(self):\n return self._state_raw",
"def state(self):\n\n\t\treturn str(self)",
"def input(self):\n return ''.join([state[1] for state in self.condensed_input_states])",
"def canonical_code(self) -> Optional[pulumi.Input['StatusConditionCanonicalCode']]:\n return pulumi.get(self, \"canonical_code\")",
"def logic_program_form(self):\r\n s = ''\r\n return s",
"def normalize_state(self):\n self.state = 2 * (self.state - 0.5)",
"def get_initial_state(self) -> str:\n return ''",
"def getState( self, cCtrlName ):\n return self.getControlModelProperty( cCtrlName, \"State\" )",
"def get_state_s(self, lower = True):\r\n\r\n state_s = STATE_STRINGS[self._state - 1]\r\n state_s = state_s.lower() if lower else state_s\r\n return state_s",
"def __str__(self):\n return ''.join(str(e) + ' ' for e in self.state)",
"def get_human_state(self):\n return ReferralState(self.state).label",
"def state(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"state\")",
"def canonicalize(self):\n return _libsbml.ASTNode_canonicalize(self)",
"def state(self) -> str:\n return self._state",
"def state(self) -> str:\n return self._state",
"def state(self) -> str:\n return self._state",
"def state(self) -> str:\n return self._state",
"def state(self) -> str:\n return self._state",
"def get_state(self, state):\n return state",
"def state(self) -> str:\n try:\n state_bytes: bytes | None = self._redis.get(self._namespace(\"state\"))\n except RedisError:\n self.logger.error(\n \"RedisError: falling back to default circuit state\", exc_info=True\n )\n return self._fallback_circuit_state\n\n state = self._fallback_circuit_state\n if state_bytes is not None:\n state = state_bytes.decode(\"utf-8\")\n else:\n # state retrieved from redis was missing, so we re-initialize\n # the circuit breaker state on redis\n self._initialize_redis_state(self._fallback_circuit_state)\n\n return state",
"def __getstate__(self):\n state = composites.Composite.__getstate__(self)\n state[\"o\"] = None\n return state",
"def canonical(gra):\n can_key_dct = canonical_keys(gra, backbone_only=False)\n return relabel(gra, can_key_dct)",
"def state(self) -> str | None:\n return self._state",
"def getInitialState(self):\r\n return self.originalState"
] | [
"0.6335245",
"0.5661792",
"0.56419164",
"0.56038505",
"0.55594033",
"0.55442035",
"0.5506558",
"0.54213786",
"0.54014575",
"0.5394251",
"0.5330471",
"0.5314586",
"0.52940786",
"0.5290036",
"0.52667534",
"0.5218633",
"0.5182664",
"0.5157008",
"0.5146271",
"0.511659",
"0.511659",
"0.511659",
"0.511659",
"0.511659",
"0.5090097",
"0.5071018",
"0.50652236",
"0.50629073",
"0.5046554",
"0.5044131"
] | 0.61829215 | 1 |
Return True if command cmd has a compute/uncompute tag. | def _has_compute_uncompute_tag(cmd):
for tag in cmd.tags:
if tag in [UncomputeTag(), ComputeTag()]:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _isCmdStandalone(tgen):\n features = getattr(tgen, 'features', [])\n otherFeatures = set(features) - set(('runcmd', ))\n return not otherFeatures and getattr(tgen, 'rule', None) is None",
"def isOp(self):\n return True",
"def is_cmd(self, name):\n \n return name in self.cmds",
"def _iscommand(self, key):\r\n\t\tyes = False\r\n\t\tfor i in COMMAND_NAME.keys():\r\n\t\t\tif key == i: \r\n\t\t\t\tyes = True; break\r\n\t\treturn yes",
"def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False",
"def is_instruction(self):\n return False",
"def is_compute(self, nb_iterations):\n return nb_iterations % self.nb_iterations_between_compute == 0",
"def is_no_command_supported(command):\n command_type = command.get('command-type')\n if command_type:\n if command_type in ['display-table','display-rest', 'show']:\n return False\n no_supported = command.get('no-supported', True)\n if no_supported == False:\n return False\n return True",
"def _is_command(self, ext):\n try:\n return issubclass(ext, CommandExtension)\n except TypeError:\n return False",
"def is_empty():\n try:\n command(\"T\")\n except EppException:\n return False\n else:\n return True",
"def is_in_cmd(self):\r\n return self.select_cmd is not None",
"def has_negative_control(cmd):\n return get_control_count(cmd) > 0 and '0' in cmd.control_state",
"def nfvi_compute_plugin_disabled():\n return (_compute_plugin is None)",
"def has_openstack_compute(labels):\n if not labels:\n return False\n\n for label in labels:\n if label.label_key == helm_common.LABEL_COMPUTE_LABEL and label.label_value:\n return helm_common.LABEL_VALUE_ENABLED == label.label_value.lower()\n\n # We haven't found the openstack compute node key. Return False\n return False",
"def fingertip_no_recompute(self) -> bool:\n hcell = self._get_hcell2()\n return hcell.get(\"fingertip_no_recompute\", False)",
"def is_valid_command(args):\n if args.command is not None:\n return True\n return False",
"def has_command(self, command):\n for pbt in self._plugins.values():\n if pbt.command == command:\n return True\n return False",
"def isComputed(self) -> bool:\n ...",
"def check_command(self):\n return self.process is not None and self.process.poll() is None",
"def do_known_command(self, cmd):\n if cmd in self.commands:\n return \"true\", True\n else:\n return \"false\", True",
"def has_commands(self) -> bool:\n return len(self.commands) > 0",
"def hasCommand():\n args = sys.argv[1:]\n if '--help' in args:\n return False\n if '-h' in args:\n return False\n for arg in args:\n if arg and not arg.startswith('-'):\n return True\n return False",
"def is_instruction(self):\n return True",
"def is_empty(self) -> bool:\n return self.command is None and not self.required",
"def has_flag(flag, cmd):\n return bool(next((x for x in cmd if x.startswith(flag)), False))",
"def is_command(oin, env, pred_name: YPredName, arg: Any=None):\n return (env.check_predicate(obj, pred_name, arg) for obj in oin)",
"def is_computing(self):\n raise NotImplementedError",
"def is_command(self, text):\n return text.split(' ', 1)[0].startswith(\"!\")",
"def _is_pop_command(self):\n return self._match_memory_pattern(\"pop\")",
"def _is_valid_fetch_operation(operation):\n if operation in FetchQuantity._supported_fetch_operations():\n return True\n else:\n return False"
] | [
"0.581931",
"0.5786073",
"0.57732224",
"0.5759559",
"0.5728099",
"0.5650557",
"0.5638925",
"0.5607169",
"0.56052583",
"0.55509794",
"0.5506383",
"0.55021065",
"0.5499619",
"0.54725796",
"0.54692024",
"0.546461",
"0.5463064",
"0.5448807",
"0.5434303",
"0.5420862",
"0.5408913",
"0.539978",
"0.5390406",
"0.53820294",
"0.53612214",
"0.5352319",
"0.5343789",
"0.53217447",
"0.5299678",
"0.52534163"
] | 0.9016012 | 0 |
Receive a list of commands. | def receive(self, command_list):
for cmd in command_list:
self._handle_command(cmd) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def receive(self, command_list):\n for cmd in command_list:\n self._send_cmd_with_mapped_ids(cmd)",
"def receive(self, command_list):\n for cmd in command_list:\n if not cmd.gate == FlushGate():\n self._add_cmd(cmd)\n\n # (try to) send on\n if not self.is_last_engine:\n if self._is_rotation(cmd):\n orig_cmd = cmd\n sequence = self._rotations.pop(0)\n for elem in sequence:\n self.send([elem])\n else:\n self.send([cmd])",
"def communicate(self, commands):\n\n if not isinstance(commands, list):\n commands = [commands]\n\n self.socket.send_multipart([json.dumps(commands).encode('utf-8')])\n\n return self.socket.recv_multipart()",
"async def _list_commands(self):\n message_cmds = \"regular commands:\\n\"\n tts_cmds = \"tts commands:\\n\"\n cur = self.conn.cursor()\n cur.execute(\n \"SELECT invoke FROM message_commands WHERE istts is true;\")\n cmd_invokes = cur.fetchall()\n for invoke in cmd_invokes:\n tts_cmds += invoke[0] + ', '\n tts_cmds = tts_cmds[0:-2]\n cur.execute(\n \"SELECT invoke FROM message_commands WHERE istts is false;\")\n cmd_invokes = cur.fetchall()\n for invoke in cmd_invokes:\n message_cmds += invoke[0] + ', '\n message_cmds = message_cmds[0:-2]\n cur.close()\n await self.bot.say(message_cmds)\n await self.bot.say(tts_cmds)",
"def process_commands(self, commands: List[str]):",
"def receive(self, command_list):\n for cmd in command_list:\n if isinstance(cmd.gate, FlushGate):\n while self._stored_commands:\n self._run()\n self.send([cmd])\n else:\n self._stored_commands.append(cmd)\n # Storage is full: Create new map and send some gates away:\n if len(self._stored_commands) >= self.storage:\n self._run()",
"def list_commands():\n print(' ')\n print('Chat Client Commands')\n print('-----------------------')\n print(\"Whisper: Send a online user a private message: /w username (message)\")\n print('Current Users: Get a list of all current online users: /users')\n print('File Transfer (Upload): Transfer a file to the server: /file (file path)')\n print('File Transfer (Download): Prints out the contents of a file: /file_download (file name)')\n print('File List: Lists all files currently stored on a server: /file_list')\n print('Save Username: Save your current username to the server to auto login at this ip address: /save')\n print('Exit: Close the client: quit or exit')\n print('Commands: Lists all commands for the Client: /help')\n print('Feed: Redisplay all messages: /feed')\n print('-----------------------')\n print(' ')",
"async def list_command(ctx, cmd:str=False):\n if cmd == False: # for quick list of commands\n ls = await get_quick_list(ctx)\n await ctx.send(embed=ls)\n if cmd == \"all\" or cmd == \"commands\":\n ls = await get_list(ctx.message.author, 1)\n sent_list = await ctx.send(embed=ls)\n await sent_list.add_reaction(EMOJI_FAST_REVERSE)\n await sent_list.add_reaction(EMOJI_LEFT_ARROW)\n await sent_list.add_reaction(EMOJI_RIGHT_ARROW)\n await sent_list.add_reaction(EMOJI_FAST_FORWARD)\n elif cmd == \"states\":\n states_list = await get_state_list()\n list = assemble_embed(\n title=\"List of all states\",\n desc=\"\\n\".join([f\"`{state}`\" for state in states_list])\n )\n await ctx.send(embed=list)\n elif cmd == \"events\":\n events_list = [r['eventName'] for r in EVENT_INFO]\n list = assemble_embed(\n title=\"List of all events\",\n desc=\"\\n\".join([f\"`{name}`\" for name in events_list])\n )\n await ctx.send(embed=list)",
"def list_command(ctx: Any) -> None:\n pass",
"def run(self, commands: list[str]):\n ...",
"def list_commands(self, ctx):\n return self.daemon.list_actions()",
"def run_commands(self, commands, check_rc=True):\n output = None\n queue = list()\n responses = list()\n\n def run_queue(queue, output):\n try:\n response = to_list(self.send_request(queue, output=output))\n except Exception as exc:\n if check_rc:\n raise\n return to_text(exc)\n\n if output == 'json':\n response = [json.loads(item) for item in response]\n return response\n\n for item in to_list(commands):\n cmd_output = 'text'\n if isinstance(item, dict):\n command = item['command']\n if 'output' in item:\n cmd_output = item['output']\n else:\n command = item\n\n # Emulate '| json' from CLI\n if command.endswith('| json'):\n command = command.rsplit('|', 1)[0]\n cmd_output = 'json'\n\n if output and output != cmd_output:\n responses.extend(run_queue(queue, output))\n queue = list()\n\n output = cmd_output\n queue.append(command)\n\n if queue:\n responses.extend(run_queue(queue, output))\n\n return responses",
"def recv(self, *messages):\n for message in messages:\n self.input.put(message)",
"async def list(self, *args, **kwargs):\n return f\"Command list: {', '.join(self.get_commands())}\"",
"def cmd_list(self):\n rc = self.socket_command('list', False)\n return rc",
"def issue_cmds(self, cmds):\n output = []\n for cmd in cmds:\n output.append(self.send_and_parse(cmd))\n return output",
"def process(self):\n try:\n (data, peer) = self._socket.recvfrom(1024)\n request = json.loads(data.decode())\n command = request['command']\n method = getattr(self, 'do_' + command)\n try:\n result = method(request)\n if result is not None:\n self._send_response(result, peer)\n except KeyError as exc:\n self._logger.error(\n \"missing parameter for command '%s': '%s'\",\n command, exc.args[0]\n )\n except ValueError:\n self._logger.error(\"invalid control request received\")\n except KeyError:\n self._logger.error(\"no control command specified\")\n except AttributeError:\n self._logger.error(\"unknown control command '%s'\", command)\n return []",
"def get_commands_list() -> list:\n return open(\"data/metadata/commands.list.txt\", \"r\").read().split(\"\\n\")",
"def commands(self) -> List[Command]:\n return []",
"def run_commands(ip_address, user, password, commandList, platform, buffer=5000):\n print \"Configuring \" + ip_address\n remote_conn_pre = paramiko.SSHClient()\n remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n remote_conn_pre.connect(ip_address, username=user, password=password)\n remote_conn = remote_conn_pre.invoke_shell()\n if platform == \"cisco\":\n remote_conn.send(\"enable\\n\")\n time.sleep(1)\n remote_conn.send(password+'\\n')\n time.sleep(1)\n commands = commandList.split('\\n')\n for com in commands:\n remote_conn.send(com+'\\n')\n time.sleep(1)\n output = remote_conn.recv(buffer)\n #print output",
"async def send_commands(ans: Message):\n await ans.answer(all_commands)",
"def send_commands(self, commands=None):\n commands = commands or []\n command_list = {}\n for command in commands:\n command_list[command.id] = {\n 'speed': command.speed, 'direction': command.direction\n }\n data = {'commands': command_list}\n state = self._post(data)\n status = state['status'].lower()\n print(\"status: {}\".format(status))\n if status == 'error':\n print(\"message: {}\".format(state['message']))\n elif status == 'finished':\n print(\"finished! Score: {} Watch result at: {}\".format(state['score'], state['visualization']))\n if 'requests' not in state:\n state['requests'] = []\n for elevator_data in state.get('elevators', []):\n if 'buttons_pressed' not in elevator_data:\n elevator_data['buttons_pressed'] = []\n\n return state",
"async def listcommands(self, ctx):\n\t\twith open('custom_commands.json', 'r') as f:\n\t\t\tcommands = json.load(f)\n\t\t\toutput = \", \".join([*commands])\n\t\t\tawait ctx.send(f\"```List of custom commands:\\n{output}```\")",
"def get_all_commands():\n\n session_attributes = {}\n card_title = \"All Commands\"\n speech_output = \"You can ask for a synonym, antonym, rhyme, definition, part of speech, syllables, or frequency of a word by saying something like 'synonym for happy'. You can also ask for a random synonym, antonym, definition, or rhyme by saying something like 'random synonym for happy'. If you want all of them, say something like 'all synonyms for happy.'\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Ask for a synonym, antonym, part of speech, rhyme, definition, syllables, or frequency of a word! Or say 'all commands' to get hear all commands.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))",
"def _parse_cmds(self):\n lst = self.inbuffer.split('\\n')\n # leave trailing text (not terminated by \\n) in inbuffer\n self.inbuffer = lst.pop(-1)\n if lst:\n for cmd in lst:\n self.cmds.append(cmd)",
"async def _c_list(self, ctx):\n command_list = self.database.get_guild_commands(ctx.guild.id)\n if len(command_list) == 0:\n await ctx.send(\"This server has no custom commands\")\n return\n out = \"```\\nServer Commands:\\n\"\n for command in command_list:\n out += f\"{command.name}: {command.text}\\n\"\n out += \"```\"\n await ctx.send(out)",
"def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break",
"def sendCMDlist(self): \n\n if self.cmdlist:\n for cmd in self.cmdlist:\n try:\n tmp = self.FixLineEndingsForWindows(cmd)\n charssent= self.leicasocket.send(tmp)\n # we actually need to make sure\n # we sent the whole string by comparing charssent.\n if charssent != len(tmp):\n print \"Error sending commands\"\n raise CAMSendCharsError\n except:\n print \"error sending command\", cmd\n return False\n time.sleep(self.delay) # wait some time between sending each line\n self.emptyCMDlist()\n time.sleep(self.delay)",
"def cmd_list(args):",
"def receive(self, lines):\n if isinstance(lines, str):\n lines = [lines]\n for l in lines:\n self.client.line_received(l)"
] | [
"0.7666051",
"0.7365854",
"0.6984754",
"0.69481695",
"0.6923602",
"0.6804654",
"0.6704374",
"0.6583639",
"0.6574708",
"0.6483976",
"0.6443524",
"0.6436",
"0.64315933",
"0.64236295",
"0.6422955",
"0.63806427",
"0.63790524",
"0.6377476",
"0.63279116",
"0.6277306",
"0.62516063",
"0.625115",
"0.62159145",
"0.6208946",
"0.6208143",
"0.6181727",
"0.6179454",
"0.61721987",
"0.61287814",
"0.61091584"
] | 0.8273917 | 0 |
Return the number of control qubits of the command object cmd. | def get_control_count(cmd):
return len(cmd.control_qubits) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_commands(self):\n return len(self.commands)",
"def count(self):\n return len(self._commands)",
"def __len__(self):\n return len(self.commands)",
"def length(self):\n return len(self._commands)",
"def get_count_of_controls(self, recurse: bool) -> int:\n return len(list(self.get_all_controls(recurse)))",
"def ctrlqueue_num_actions(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(2), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to get *Num Actions of CrlQueue* check if *Queue* is NOT empty\", e)",
"def num_controls(self):\n return len(self._controls)",
"async def count(self, ctx):\r\n if ctx.invoked_subcommand is None:\r\n await self.bot.send_cmd_help(ctx)",
"def num_qubits(self) -> int:\n return self._circuit.num_qubits",
"def count_qubits(operator):\n # Handle FermionOperator.\n if isinstance(operator, FermionOperator):\n num_qubits = 0\n for term in operator.terms:\n for ladder_operator in term:\n if ladder_operator[0] + 1 > num_qubits:\n num_qubits = ladder_operator[0] + 1\n return num_qubits\n\n # Handle QubitOperator.\n elif isinstance(operator, QubitOperator):\n num_qubits = 0\n for term in operator.terms:\n if term:\n if term[-1][0] + 1 > num_qubits:\n num_qubits = term[-1][0] + 1\n return num_qubits\n\n # Handle MajoranaOperator.\n if isinstance(operator, MajoranaOperator):\n num_qubits = 0\n for term in operator.terms:\n for majorana_index in term:\n if numpy.ceil((majorana_index + 1) / 2) > num_qubits:\n num_qubits = int(numpy.ceil((majorana_index + 1) / 2))\n return num_qubits\n\n # Handle DiagonalCoulombHamiltonian\n elif isinstance(operator, DiagonalCoulombHamiltonian):\n return operator.one_body.shape[0]\n\n # Handle PolynomialTensor\n elif isinstance(operator, PolynomialTensor):\n return operator.n_qubits\n\n # Raise for other classes.\n else:\n raise TypeError('Operator of invalid type.')",
"def is_available(self, cmd):\n num_qubits = 0\n for qureg in cmd.all_qubits:\n num_qubits += len(qureg)\n return num_qubits <= 2",
"def subsystem_count(self):\n return len(self)",
"def __len__(self):\n return len(self._opts) + len(self._groups)",
"def command_ssize(self):\n self.print_out(\"Stack size: %s\" % (str(len(self.vm.stack))))",
"def stats(self):\n nqbits = self.operator.num_qubits",
"def cmd_num(self):\r\n return self._arm.cmd_num",
"def get_num_of_choices(self) -> int:\n return len(self._choices)",
"def num_qubits(self) -> int:\n return super().num_qubits",
"def num_qubits(self) -> int:\n raise NotImplementedError()",
"def count_qubits(operator):\n # Handle FermionOperator.\n valueable_type = (FermionOperator, QubitOperator, QubitExcitationOperator,\n ofops.FermionOperator, ofops.QubitOperator,\n pjops.QubitOperator)\n if isinstance(operator, valueable_type):\n num_qubits = 0\n for term in operator.terms:\n # a tuple compose of single (qubit_index,operator) subterms\n if term == ():\n qubit_index = (0,)\n else:\n qubit_index, _ = zip(*term)\n num_qubits = max(max(qubit_index) + 1,\n num_qubits) # index start with 0\n return num_qubits\n\n if isinstance(operator, PolynomialTensor):\n return operator.n_qubits\n\n raise TypeError(\"Unsupported type of operator {}\".format(operator))",
"def count(self):\n self._read_keypad()\n return len(self._current_events)",
"def cmd_size(args):",
"def qsize(self) -> int:\n pass",
"def ctrlqueue_show(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(7), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to get *Num Actions of CrlQueue* check if *Queue* is NOT empty\", e)",
"def qsize(self): \n return self.__db.llen(self.key)",
"def ctrlqueue_queue_size(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(10), ctypes.c_int32(0))",
"def timerCount(cmds):\n return int(sum(np.asarray(cmds) == 0x400001)) # numpy version\n #return cmds.count(0x400001) # python list version",
"def __len__(self):\n\t\treturn len(self._idle) + len(self._running)",
"def get_num_slots(self):\n # Your code here\n return len(self.data)",
"def __len__(self):\n\t\treturn self.qsize()"
] | [
"0.71016645",
"0.7081369",
"0.659699",
"0.63224685",
"0.6274492",
"0.6267259",
"0.6065778",
"0.5953152",
"0.59503293",
"0.59498817",
"0.5925458",
"0.58802515",
"0.5851433",
"0.58106935",
"0.5802685",
"0.5787972",
"0.57744926",
"0.57360053",
"0.5724881",
"0.5709863",
"0.56957686",
"0.56817234",
"0.5676002",
"0.5670858",
"0.5658443",
"0.56537336",
"0.5653579",
"0.56528264",
"0.5639464",
"0.56366473"
] | 0.91298246 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.