query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Returns the signed difference between two angles (rad) The difference is calculated as target_angle source_angle. The difference will thus be positive if target_angle > source_angle.
|
def delta_angle(source_angle, target_angle, hi=2 * np.pi):
diff = target_angle - source_angle
def mod(a, n): return (a % n + n) % n
return mod(diff + hi / 2, hi) - hi / 2
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def angle_diff(self, a, b):\n a = self.angle_normalize(a)\n b = self.angle_normalize(b)\n d1 = a-b\n d2 = 2*math.pi - math.fabs(d1)\n if d1 > 0:\n d2 *= -1.0\n if math.fabs(d1) < math.fabs(d2):\n return d1\n else:\n return d2",
"def direction_diff(direction_a, direction_b):\n diff = abs(direction_a - direction_b)\n return diff if diff < math.pi else 2*math.pi - diff",
"def angle_diff(self,a,b):\n self.a = self.angle_normalize(a)\n self.b = self.angle_normalize(b)\n self.d1 = a-b\n self.d2 = 2*math.pi - math.fabs(self.d1)\n if self.d1 > 0:\n self.d2 *= -1.0\n if math.fabs(self.d1) < math.fabs(self.d2):\n return self.d1\n else:\n return self.d2",
"def angle_diff(self,a,b):\n self.a = self.angle_normalize(a)\n self.b = self.angle_normalize(b)\n self.d1 = a-b\n self.d2 = 2*math.pi - math.fabs(self.d1)\n if self.d1 > 0:\n self.d2 *= -1.0\n if math.fabs(self.d1) < math.fabs(self.d2):\n return self.d1\n else:\n return self.d2",
"def angle_diff(self, a, b):\n\n\t\td1 = a-b\n\t\td2 = 2*math.pi - math.fabs(d1)\n\t\tif d1 > 0:\n\t\t\td2 *= -1.0\n\t\tif math.fabs(d1) < math.fabs(d2):\n\t\t\treturn d1\n\t\telse:\n\t\t\treturn d2",
"def angle_diff(self, a, b):\n\n\t\td1 = a-b\n\t\td2 = 2*math.pi - math.fabs(d1)\n\t\tif d1 > 0:\n\t\t\td2 *= -1.0\n\t\tif math.fabs(d1) < math.fabs(d2):\n\t\t\treturn d1\n\t\telse:\n\t\t\treturn d2",
"def angle_diff(a1, a2):\n a = a1 - a2\n if abs(a) > 180:\n return np.sign(a)*360 - a\n else:\n return a",
"def angle_difference(self, x, y):\n return 180 - abs(abs(x - y) - 180)",
"def angle_difference(x, y):\n return 180 - abs(abs(x - y) - 180)",
"def angle_to(self, other):\n return other.angle - self.angle",
"def __calc_target_angle(self, delta_angle, direction):\n if self.is_reverse:\n direction = not direction\n\n if direction:\n if self.current_angle - delta_angle < 0 or self.current_angle - delta_angle > pi:\n return self.current_angle\n return self.current_angle - delta_angle # this mines (-) for cw.\n else:\n if self.current_angle + delta_angle < 0 or self.current_angle + delta_angle > pi:\n return self.current_angle\n return self.current_angle + delta_angle",
"def get_signed_radians(grad1,grad2):\n g1 = flatten_layers(grad1)\n g2 = flatten_layers(grad2)\n if np.array_equal(g1, g2):\n return 0\n angle = -1 if g1[0]*g2[1] - g1[1]*g2[0] < 0 else 1\n unit_vector_1 = g1 / np.linalg.norm(g1) if np.linalg.norm(g1) != 0 else 0\n unit_vector_2 = g2 / np.linalg.norm(g2) if np.linalg.norm(g2) != 0 else 0\n dot_product = np.dot(unit_vector_1, unit_vector_2)\n radians = np.arccos(dot_product)\n if isinstance(radians,list):\n print(\"BUGGY angle: \",radians)\n return radians * angle",
"def angle_difference(ang1,ang2,units):\n ang1r = angle_to_radians(ang1,units)\n ang2r = angle_to_radians(ang2,units)\n y = np.sin(ang2r-ang1r)\n x = np.cos(ang2r-ang1r)\n angdiffr = np.arctan2(y,x)\n return radians_to_angle(angdiffr,units)",
"def angle_difference(a1, a2, deg=True, abs_val=False):\n\n if deg is False:\n a1 = rad2deg(a1)\n a2 = rad2deg(a2)\n\n d = (a2-a1+180.0)%360.0-180.0\n\n if abs_val:\n d = numpy.abs(d)\n\n if deg is False:\n return deg2rad(d)\n else:\n return d",
"def get_angle_degrees_between(self, other):\n return math.degrees(self.get_angle_between(other))",
"def angle(self, other):\n return acosd(np.clip(self.uv().dot(other.uv()), -1, 1))",
"def angle_difference(θ1, θ2):\n ordinary_diff = (θ2 - θ1) % np.pi\n return (np.pi / 2) - np.abs(ordinary_diff - (np.pi / 2))",
"def get_shortest_angle(target_angle, current_angle):\n a1 = target_angle\n a2 = current_angle\n return math.atan2(math.sin(a1-a2), math.cos(a1-a2))",
"def CalculateCompassDifference(a, b):\n delta = NormalizeAngle(a - b)\n return delta",
"def deltaAngle(x, y):\n return math.atan2(math.sin(x-y), math.cos(x-y))",
"def get_angle_rad_between_joints(joint_a: Joint2D, joint_b: Joint2D) -> float:\n return math.atan2(joint_a.y - joint_b.y, joint_a.x - joint_b.x)",
"def getSteeringDirection(self, targetAngle):\n\n\t\tepsilon = 2.5\n\t\tif abs(self.angle - targetAngle) < epsilon:\n\t\t\treturn\n\n\t\tif self.angle < targetAngle:\n\t\t\tif abs(self.angle - targetAngle) < 180:\n\t\t\t\treturn Direction.RIGHT\n\t\t\treturn Direction.LEFT\n\t\telif self.angle > targetAngle:\n\t\t\tif abs(self.angle - targetAngle) < 180:\n\t\t\t\treturn Direction.LEFT\n\t\t\treturn Direction.RIGHT",
"def get_vector(a, b):\n dx = float(b[0] - a[0])\n dy = float(b[1] - a[1])\n\n distance = math.sqrt(dx ** 2 + dy ** 2)\n\n if dy > 0:\n angle = math.degrees(math.atan(-dx / dy))\n elif dy == 0:\n if dx < 0:\n angle = 90.0\n elif dx > 0:\n angle = -90.0\n else:\n angle = 0.0\n else:\n if dx < 0:\n angle = 180 - math.degrees(math.atan(dx / dy))\n elif dx > 0:\n angle = -180 - math.degrees(math.atan(dx / dy))\n else:\n angle = 180.0\n\n return distance, angle",
"def angle_between_two(self, other):\n # angle = math.atan2(other.position.y - self.position.y,\n # other.position.x - self.position.x)\n minus = other.position - self.position\n angle = math.atan2(minus.y, minus.x)\n return angle",
"def ang_diff(self, theta1, theta2):\n\n return (theta1 - theta2 + np.pi) % (2 * np.pi) - np.pi",
"def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360",
"def angle_between(x1: float, y1: float, x2: float, y2: float) -> float:\n dx = x2 - x1\n dy = y2 - y1\n\n # We return negative because pyglet and math treat rotation differently\n return -math.atan2(dy, dx)",
"def compute_angle_in_rad(location1, location2):\n return np.arctan2(location1[0] - location2[0], location1[1] - location2[1])",
"def angle_diff(ang):\n while ang > math.pi:\n ang -= 2*math.pi\n while ang < -math.pi:\n ang += 2*math.pi\n\n return ang",
"def calculate_bearing_difference(current_bearing, previous_bearing):\n\n difference = current_bearing - previous_bearing\n\n while difference < -180:\n difference += 360\n while difference > 180:\n difference -= 360\n\n return difference"
] |
[
"0.6784113",
"0.66581917",
"0.6620481",
"0.6620481",
"0.66021883",
"0.66021883",
"0.6544369",
"0.6539233",
"0.6524171",
"0.65140384",
"0.6370633",
"0.62325966",
"0.6179326",
"0.6162608",
"0.6084999",
"0.6075657",
"0.60453475",
"0.5996615",
"0.5986001",
"0.5958576",
"0.59364897",
"0.58070666",
"0.5774432",
"0.57490426",
"0.5719864",
"0.5715011",
"0.5713443",
"0.5699841",
"0.56475455",
"0.56449014"
] |
0.6755501
|
1
|
This method takes a dictionary of item ids to their respective properties, extracts the key data fields for each item, and returns a dictionary of item ids to their respective extracted data.
|
def extract_key_item_data(item_data):
extracted_item_data = {}
for item_id in item_data:
key_data = {}
key_data["id"] = item_id
key_data["name"] = item_data[item_id]["name"]
key_data["image"] = item_data[item_id]["image"]["full"]
key_data["gold"] = item_data[item_id]["gold"]["total"]
key_data["tags"] = item_data[item_id]["tags"]
extracted_item_data[item_id] = key_data
return extracted_item_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def item_to_dict(dict_item):\n info = {}\n item_info = None\n\n for k, v in dict_item.items():\n if k == 'ItemType':\n info[k] = api.item_dict_inv[dict_item['ItemType']]\n elif k == 'Item':\n item_info = colectica.parse_xml(v, api.item_dict_inv[dict_item['ItemType']])\n else:\n info[k] = v\n d = {**info, **item_info}\n return d",
"def process_metadata_items(self):\n for item_id, item in self.metadata.items():\n assert item_id not in self.processed_metadata, 'Item {} presents twice'.format(item_id)\n self.processed_metadata[item_id] = {}\n for field, field_vals in item['metadata'].items():\n curr_field = ''\n # availability field is always empty\n if field == 'availability' or field == 'url':\n continue\n values = field_vals\n if field == 'availableSizes' and not isinstance(values, list,):\n values = self.repair_size_list(values)\n\n #field_tokens = tokenizer.tokenize(field)\n field_tokens = re.split('_|\\s', field)\n for tok in field_tokens:\n cleaned_tok = self._ATTR2STR[tok.lower()] if tok.lower() in self._ATTR2STR else tok.lower()\n curr_field += cleaned_tok + ' '\n curr_field = curr_field[:-1]\n \n curr_val = ''\n proc_values = []\n if isinstance(values, list,):\n for val in values:\n curr_val = ''\n #value_tokens = tokenizer.tokenize(val)\n value_tokens = re.split('_|\\s', val)\n proc_values.append(' '.join(value_tokens))\n else:\n value_tokens = re.split('_|\\s', values)\n proc_values.append(' '.join(value_tokens))\n\n #metadata JSON files contains different samples having hemLenght field twice.\n # In this case just discard the one with no values.\n if curr_field == 'hem length' and curr_field in self.processed_metadata[item_id]:\n if not len(self.processed_metadata[item_id][curr_field]):\n self.processed_metadata[item_id][curr_field] = proc_values\n continue\n assert curr_field not in self.processed_metadata[item_id], 'Field {} presents twice in item {}. Please remove one of them (preferably the empty one)'.format(curr_field, item_id)\n self.processed_metadata[item_id][curr_field] = proc_values",
"def _extract_subdict(self, rec, keys):\n d = {}\n d['msg_id'] = rec['msg_id']\n for key in keys:\n d[key] = rec[key]\n return deepcopy(d)",
"def _get_item_info(self, response):\n item_info = {\"keys\":[], \"values\":[]}\n for selector_action in self.item_selector.selectors_actions:\n if isinstance(selector_action, KeyValueSelector):\n # keys can be either strings or selectors. For the latter, obtain the key from the page\n key_selector = selector_action.key_selector\n if isinstance(key_selector, FieldSelector): #key_selector is a FieldSelector, use it to get the key from the response\n sel = Selector(response)\n if key_selector.type == FieldSelector.XPATH:\n key = sel.xpath(key_selector).extract()\n elif key_selector.type == FieldSelector.CSS:\n key = sel.css(key_selector).extract()\n if key: key = key[0]\n else: key = \"Invalid_Key_Selector\" #this may pack in all values with invalid keys with this key.\n else: \n key = key_selector\n value_selector = selector_action.value_selector\n item_info[\"keys\"].append(key)\n item_info[\"values\"].append(value_selector)\n return item_info",
"def _build_eitem_dict(self, eitem_json, document_pid):\n self._apply_url_login(eitem_json)\n self._set_record_import_source(eitem_json)\n dois = [\n doi\n for doi in self.json_data.get(\"identifiers\", [])\n if doi[\"scheme\"] == \"DOI\"\n ]\n eitem_json.update(\n dict(\n document_pid=document_pid,\n open_access=self.open_access,\n identifiers=dois,\n created_by={\n \"type\": \"import\",\n \"value\": self.metadata_provider,\n },\n urls=self.json_data[\"_eitem\"].get(\"urls\", []),\n description=self.json_data[\"_eitem\"].get(\"description\", \"\"),\n )\n )",
"def _format_primary_key_data(self, request):\n \n \n for index, item in enumerate(request.data['items']):\n try:\n request.data['items'][index]['item'] = {'id': item['id']}\n del request.data['items'][index]['id']\n except KeyError as e:\n logger.warn(e)\n \n return request",
"def get_item_info(self, item_id):\n request_name = \"get_shop_info\"\n\n items = self.make_request(request_name, url_id=item_id)\n try:\n item = items[0]\n item_dict = dict()\n item_dict[\"id\"] = item[\"@id\"].encode('utf-8')\n item_dict[\"name\"] = item[\"label\"].encode('utf-8')\n item_dict[\"shelf\"] = item[\"shelf\"].encode('utf-8')\n item_dict[\"slot\"] = item[\"slot\"].encode('utf-8')\n item_dict[\"quantity\"] = item[\"quantity\"]\n return item_dict\n except Exception as e:\n print(\"Encountered exception while getting item\", item_id, \"\\n\", str(e))\n return None",
"def pre_process_string_data(item: dict):\r\n try:\r\n result_item = {key: item[key] for key in KEYS + ['_id']}\r\n for prop in result_item:\r\n if type(result_item[prop]) is str and prop != '_id':\r\n result_item[prop] = re.sub(' +', ' ', item[prop])\r\n result_item[prop] = re.sub('\\n', ' ', item[prop])\r\n result_item[prop] = item[prop].strip().strip('\"').strip(\"'\").lower().strip()\r\n return result_item\r\n except KeyError:\r\n logging.warning(\"Wrong formed entity with id %s\", item['_id'])\r\n return None",
"def extract_data(product):\n if not isinstance(product, dict) and product:\n return\n image = product.get('mediumImageUrls', None)\n price = product.get('itemPrice', None)\n data = {\n 'service': 'rakuten',\n 'currency': None,\n 'price': price and int(price) or price,\n 'image': image[0] if image else 0,\n 'id': product.get('itemCode', None),\n # 'ProductId': product['itemCode', None],\n 'DetailPageURL': product.get('itemUrl', None),\n 'Label': product.get('itemCaption', None),\n 'EditorialReview': [\n {'name': 'Description',\n 'value': product.get('itemCaption', None)}],\n 'ProductGroup': product.get('genreId', None), # get it name to display\n 'Title': product.get('itemName', None),\n 'Manufacturer': product.get('shopName', None),\n 'CustomerReviews': product.get('itemUrl', None), # INFO: no such thing\n 'images': [\n {'SmallImage': small,\n 'LargeImage': small.rsplit('?', 1)[0]}\n for small in product.get('smallImageUrls', [])],\n 'ItemAttributes': [],\n }\n return data",
"def process_item(self, _item: dict):\n _item['coordinates'] = self.process_coordinates(\n _item['coordinates']\n )\n _item['countryName'] = self.process_country_name(\n _item['countryName']\n )\n _item['portName'] = self.process_port_name(\n _item['portName']\n )\n _item['unlocode'] = self.process_unlocode(\n _item['unlocode']\n )\n return _item",
"def map_items_to_dict(save_dict):\n # If item dictionary exists, load it to memory\n if not save_dict:\n item_dict_path = f'{path_dictionary[\"path_item_dictionary\"]}'\n item_dict_file = open(item_dict_path, 'rb')\n return pickle.load(item_dict_file)\n\n # Query all visited items and put into a dataframe\n visited_items = DbHelper.fetch_all_items()\n visited_items_columns = ['item_id']\n visited_items_df = pd.DataFrame(data=visited_items, columns=visited_items_columns)\n item_list = visited_items_df['item_id'].values.tolist()\n\n # Query all the items shown in the catalog page and put into a dataframe\n catalog_items = DbHelper.get_all_catalog_items_less_columns()\n catalog_items_columns = ['user_id', 'item_id', 'session_id', 'catalog_item_list']\n catalog_items_df = pd.DataFrame(data=catalog_items, columns=catalog_items_columns)\n catalog_items_df['catalog_item_list'] = catalog_items_df.apply(clean_objects_listed, axis=1)\n\n catalog_items_list = catalog_items_df['catalog_item_list'].values.tolist()\n\n # iterate all the catalog items and add them to a list.\n for item_information in catalog_items_list: # item information is a list contains ['itemID', 'x_coordinate', 'y_coordinate']\n if item_information != 'No Catalog Item':\n for item in item_information:\n item_list.append(item[0])\n\n # Remove the duplicates\n item_list = list(set(item_list))\n\n item_dict = {}\n counter = 0\n for item in item_list:\n item_dict[counter] = item\n counter += 1\n\n if save_dict:\n file_handler = open(f'{path_dictionary[\"path_item_dictionary\"]}', \"wb\")\n pickle.dump(item_dict, file_handler)\n file_handler.close()\n\n return item_dict",
"def extract_info(\n self,\n main_key:str,\n sub_key:str,\n data_key:str,\n ):\n\n extracted_info = {}\n for i in range(len(self.data)):\n try:\n gene_key = self.data[i]['gene'][0]['name']['value']\n if self.data[i][main_key][0][\"type\"] == sub_key:\n extracted_info[gene_key] = [self.data[i][main_key][0][data_key]]\n print(\"success\")\n except KeyError as e:\n print(f\"Could not find <{main_key}> and <{sub_key}>\\n{e}\")\n \n return extracted_info",
"def makeDict(self, item, nested=False, no_html=False):\n dictionary = dict([(field[\"external_id\"], self.getFieldValue(field, nested, no_html)) for field in item[\"fields\"]])\n return {'item': item[\"item_id\"], 'values':dictionary}",
"def _get_item(dic: dict, keys: list) -> dict:\n\tfor key in keys:\n\t\tdic = dic[key]\n\n\treturn dic",
"def get_item_data(item):\n\n return OnedriveItem(\n id=item.get('id'),\n name=item.get('name'),\n web_url=item.get('webUrl'),\n created_by=item.get('createdBy')\n ).__dict__",
"def getItemData(itemId):\n return Gw2Spidy._request('item', str(itemId))['result']",
"def get_elements_in_data_dimension(analytics_items, analytics_uids):\n for key in ['dataElement', 'indicator', 'programIndicator', 'attribute']:\n analytics_uids[key] = list(dict.fromkeys(analytics_uids[key] + json_extract_nested_ids(analytics_items, key)))\n\n return analytics_uids",
"def _item_to_dict(self, raw_response):\n\n if 'Item' not in raw_response:\n return {}\n\n return {\n field.name: raw_response['Item'][field.name][field.data_type] for field in self._available_fields\n }",
"def create_mapping(dico):\n sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0]))\n id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}\n #for i in sorted_items:\n #\tprint(i)\n item_to_id = {v: k for k, v in id_to_item.items()}\n return item_to_id, id_to_item",
"def _extract_input_dict(self, samples_data, keys, prefix=''):\n input_dict = OrderedDict()\n\n extracted_data = utils.extract(\n samples_data, *keys\n )\n\n # iterate over the desired data instances and corresponding keys\n for j, (data, key) in enumerate(zip(extracted_data, keys)):\n if isinstance(data, dict):\n # if the data instance is a dict -> iterate over the items of this dict\n for k, d in data.items():\n assert isinstance(d, np.ndarray)\n input_dict['%s_%s/%s' % (prefix, key, k)] = d\n\n elif isinstance(data, np.ndarray):\n input_dict['%s_%s' % (prefix, key)] = data\n else:\n raise NotImplementedError\n return input_dict",
"def read_item(data: DataModel) -> Dict:\n convertor = Convertor(data)\n return {'output': convertor.get_humanized_data()}",
"def item_dict():\n\n items = {'page': 'pages', 'table': 'tables',\n 'viz': 'vizualisation', 'column': 'columns'}\n return items",
"def secondary_keys_dicts(self):",
"def read_data(raw_data: list):\r\n cleaned_data = {}\r\n for data_item in raw_data:\r\n clean_data_item = pre_process_string_data(data_item)\r\n if clean_data_item is not None:\r\n cleaned_data[clean_data_item['_id']] = clean_data_item\r\n return cleaned_data",
"def get_menu_items_based_on_restaurant(restaurant_id):\n result_dictionary = dict()\n result_items_list = []\n restaurant = Restaurant.objects.get(pk=restaurant_id)\n result_dictionary['restaurant'] = {\n 'name': restaurant.name,\n 'location': restaurant.address,\n 'deliveryTime': restaurant.delivery_time\n }\n items = list(restaurant.menus.all().values())\n for item in items:\n item_instance = Item.objects.get(pk=item.get('item_id', None))\n result_items_list.append({\n 'name': item_instance.name,\n 'description': item_instance.description,\n 'price': item_instance.price,\n 'category': item_instance.category,\n 'sub_category': item_instance.sub_category\n })\n result_dictionary['itemsList'] = result_items_list\n return result_dictionary",
"def get_item_dict(self, item):\n item_values = [\n 'item-name', 'current-amount', 'item-price', 'item-cost']\n item_dict = {}\n for value in item_values:\n key = value.split('-')[1]\n item_dict[key] = item.find_element_by_class_name(value)\n item_dict['id'] = item_dict['amount'].get_attribute('data-item_id')\n\n ch_amount = item.find_elements_by_class_name('change-amount')\n for button in ch_amount:\n action = button.get_attribute('data-action')\n item_dict[action] = button\n\n return item_dict",
"def _parse_item(self, item):\n result = {}\n for f in self._invoice_report_item_fields:\n val = get_value_by_relation_path(item, f)\n # when it's function - call it! usefull for Choices\n # (get_<field_name>_display)\n if callable(val):\n val = val()\n elif isinstance(val, datetime.datetime):\n val = val.strftime(self._invoice_report_datetime_format)\n elif isinstance(val, Money):\n val_currency = '{}_currency'.format(self._price_field)\n result[val_currency] = str(val.currency) \\\n if val.currency else self._invoice_report_empty_value\n val = val.amount\n result[f] = str(val) if val else self._invoice_report_empty_value\n\n return result",
"def get_item(self, itemid: str, itemtypeid: str)->dict:\n self.__validate(itemid=itemid, itemtype=itemtypeid)\n url = build_uri_template('get_item').expand(type=itemtypeid, no=itemid)\n logger.info(\"Getting Item from: {}\".format(url))\n\n data = self._get_data(url)\n return data",
"def process_metadata(full_dict):\n reduced_dict = {}\n for key, param_obj in full_dict.items():\n if key[0] == '_':\n continue\n if is_numerical(param_obj):\n reduced_dict[key] = param_obj\n elif key == 'grid':\n grid_dict = param_obj._get_metadata_dict()\n reduced_dict.update(grid_dict)\n else:\n reduced_dict[key] = str(param_obj)\n return reduced_dict",
"def extract(obj, arr, key):\n if isinstance(obj, dict):\n for k, v in obj.items():\n # if the key is in the dictionary\n if k == key:\n # the key points to a list eg, key is 'dataElements':\n # \"dataElements\" : [\n # { \"id\": \"UID1\", \"id\": \"UID2\", ... }\n # ]\n if isinstance(v, list):\n for item in v:\n arr.append(item[\"id\"]) if item[\"id\"] not in arr else arr\n # the key points to another dictionary eg, key is 'dataElement':\n # \"dataElement\" : { \"id\": UID }\n elif isinstance(v, dict):\n if 'id' in v:\n arr.append(v[\"id\"])\n # It is a dictionary but not containing the id\n # Fetch the key and keep on looking\n else:\n for _key in list(v.keys()):\n if isinstance(v[_key], dict):\n extract(v, arr, _key)\n # if it is not a list or a dict, we simply take the value eg, key is organisationUnit\n # \"organisationUnit\" : UID\n else:\n arr.append(v)\n # if key is not there but it is still a dict or a list,\n # call the extract function again to keep going down another level\n elif isinstance(v, (dict, list)):\n extract(v, arr, key)\n # if it is a list, loop each element and call the extract function\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr"
] |
[
"0.6092631",
"0.6067828",
"0.60617733",
"0.6028937",
"0.5863528",
"0.5696433",
"0.5608552",
"0.55642694",
"0.55462956",
"0.55306554",
"0.5478335",
"0.5476924",
"0.5460027",
"0.5456423",
"0.54379886",
"0.5426146",
"0.5422652",
"0.5422065",
"0.5379292",
"0.5375101",
"0.5372952",
"0.5319963",
"0.52672446",
"0.5257536",
"0.5234577",
"0.521859",
"0.5206634",
"0.5204263",
"0.52008545",
"0.5190824"
] |
0.7907337
|
0
|
returns average of local clustering coefficients
|
def clustering_coefficient(graph):
count = 0
sumOfClusteringCoefficients = 0
for vertex in graph:
count += 1
sumOfClusteringCoefficients += local_clustering_coefficient(graph, vertex)
return sumOfClusteringCoefficients / count
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def GlobalClusteringCoefficient(graph):\n coef = np.mean(list(nx.clustering(graph).values()))\n return coef",
"def SquareClusteringCoefficient(graph):\n coef = np.mean(list(nx.square_clustering(graph).values()))\n return coef",
"def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)",
"def get_all_local_clustering_coef(g):\n local_cc = {}\n\n for n in nx.nodes(g):\n local_cc[n] = get_local_clustering_coef(g, n)\n\n return local_cc",
"def average_consensus(self, cluster):\n\t\tcenterk = 0\n\t\tindex = 0\n\t\tfor value in cluster:\n\t\t\tcenterk += value\n\t\t\tindex += 1\n\t\tcenterk = centerk / index\n\t\treturn centerk",
"def clusterAlgorithm(values):\n clusterMap = dict()\n for value in values:\n if value[2] not in clusterMap.keys():\n clusterMap[value[2]] = []\n clusterMap[value[2]].append(value)\n frequency = [float(len(clusterMap[value[2]])) for value in values]\n total = sum(frequency)\n weightValues = [freq / total for freq in frequency]\n print sum(weightValues)\n lightValues = [value[1] for value in values]\n return np.average(lightValues, weights = weightValues)",
"def internal_global_clustering(self, node_list):\n clustering = self.local_clustering()\n internal_clustering = clustering[node_list].mean()\n return internal_clustering",
"def compute_means(self):\n ###TODO\n vector_means = []\n for doc in self.fin_clust.values():\n vec = defaultdict(float)\n for d_id in doc:\n doc_keys = self.docs[d_id].keys()\n for key in self.docs[d_id]:\n vec[key] = vec[key] + self.docs[d_id][key]\n tot = len(doc)\n x = defaultdict(float)\n for k,v in vec.items():\n x[k] = float(v)/tot\n vec = Counter(x)\n vector_means.append(vec)\n return vector_means",
"def mean_cluster(self, labelled_cluster):\n sum_of_points = self.sum_cluster(labelled_cluster)\n size_cluster = len(labelled_cluster)\n if self.sigma_cl1:\n size_cluster += np.sqrt(2)*self.sigma_cl1*np.random.randn()\n mean_of_points = sum_of_points * (1.0 / size_cluster)\n return mean_of_points",
"def local_clustering_coefficient(graph, vertex):\r\n edge_count = 0\r\n for neighbour1 in graph[vertex]:\r\n for neighbour2 in graph[vertex]: #look at each pair of neighbours of vertex\r\n if neighbour1 in graph[neighbour2]: #if the neighbours are joined to each other by an edge\r\n edge_count += 1 #add one to the edge count\r\n degree = len(graph[vertex]) #count how many neighbours vertex has\r\n return edge_count / (degree * (degree - 1)) #note factor of 2 missing as each edge counted twice\r",
"def effective_cluster_weights(self):\n weights = np.array(\n [\n np.sum(\n self._subspace.function_ordering_multiplicities[\n self._subspace.function_orbit_ids == i\n ]\n * self.eci[self.eci_orbit_ids == i] ** 2\n )\n for i in range(len(self._subspace.orbits) + 1)\n ]\n )\n return weights",
"def get_cluster_average(cls, indices, dist_mat):\n distances = cls.get_all_distances(indices, dist_mat)\n return np.mean(distances)",
"def cluster_means(self):\n if self.evaluate_by is not None:\n return(self.merged_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_data.groupby('labels').mean().transpose())",
"def _compute_cluster_averages(self, key=\"_scvi_labels\"):\n # find cell label column\n label_col = self.adata.uns[\"_scvi\"][\"categorical_mappings\"][key][\"original_key\"]\n\n # find data slot\n x_dict = self.adata.uns[\"_scvi\"][\"data_registry\"][\"X\"]\n if x_dict[\"attr_name\"] == \"X\":\n use_raw = False\n else:\n use_raw = True\n if x_dict[\"attr_name\"] == \"layers\":\n layer = x_dict[\"attr_key\"]\n else:\n layer = None\n\n # compute mean expression of each gene in each cluster/batch\n aver = compute_cluster_averages(self.adata, labels=label_col, use_raw=use_raw, layer=layer)\n\n return aver",
"def _compute_mean(self, C, mag, rjb, rake):\n mean = (C['a1'] +\n self._compute_linear_magnitude_term(C, mag) +\n self._compute_quadratic_magnitude_term(C, mag) +\n self._compute_logarithmic_distance_term(C, mag, rjb) +\n self._compute_faulting_style_term(C, rake))\n\n return mean",
"def cluster_means_scaled(self):\n if self.evaluate_by is not None:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().transpose())",
"def compute_centroid(data):\n return sum(data[:]) / len(data)",
"def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)",
"def assign_to_current_mean(img: np.ndarray, clustermask: np.ndarray) -> float:\n\n rows, cols = img.shape[:2]\n distances = np.zeros((numclusters, 1))\n overall_dist = 0\n\n for i in range(rows):\n for j in range(cols):\n distances = distance(img[i, j, :]) # returned shape: (numclusters, 1)\n \n k = np.argmin(distances) # closest cluster\n clustermask.itemset((i, j), k) # update cluster mask\n overall_dist += distances[k, 0] # sum distance\n\n return overall_dist",
"def explore_float_data(d, name, label):\n data = get_float_data(d, name)\n mean = np.mean(data)\n\n print('The mean {} of Cluster{}:'.format(name, label), mean,\n '(valid subject number: {})'.format(len(data)))\n return mean",
"def calculate_cost(data, centers, clusters):\n total = 0\n for i in range(len(centers)):\n total = total + np.sum(data[centers[i]][clusters[i]]) \n return total",
"def findK_centroids_average(self, features, clusters):\n\n class InnerFeatures:\n def __init__(self, kps, des, pos):\n self.kps = kps\n self.des = des\n self.pos = pos\n\n kmeans = KMeans(n_clusters=clusters)\n\n pts = np.array(features.pos)\n kps = np.array(features.kps)\n des = np.array(features.des)\n\n kmeans.fit(pts)\n m_clusters = np.array(kmeans.labels_.tolist())\n centers = np.array(kmeans.cluster_centers_)\n\n # KeyPoint(x,y,size) -required\n\n final_kps = []\n final_des = []\n final_pts = []\n\n for cluster in range(clusters):\n indices = np.where(m_clusters == cluster)\n cluster_kps_size = np.mean(np.array([x.size for x in kps[indices]]))\n cluster_des = des[indices]\n\n average_des = np.mean(cluster_des, axis=0)\n cluster_kps = cv2.KeyPoint(x=centers[cluster][0], y=centers[cluster][1], _size=cluster_kps_size)\n\n final_kps.append(cluster_kps)\n final_des.append(average_des)\n final_pts.append([centers[cluster][0], centers[cluster][1]])\n\n final_pts = np.array(final_pts)\n final_des = np.array(final_des)\n final_kps = np.array(final_kps)\n\n result = InnerFeatures(kps=final_kps, des=final_des, pos=final_pts)\n return result",
"def clust_strength(mat,groups):\n cluster_strengths = []\n for group in range(len(np.unique(groups))):\n this_cluster = mat[groups==group,:]\n this_cluster_mean = np.mean(this_cluster,axis=0)\n all_dists = mat - this_cluster_mean\n out_dists = np.linalg.norm(all_dists[groups!=group],axis=1)\n in_dists = np.linalg.norm(all_dists[groups==group],axis=1)\n this_strength = np.mean(out_dists)/np.mean(in_dists)\n cluster_strengths.append(this_strength)\n \n return np.mean(cluster_strengths)",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def ensemble_mean(self):\n return self.mean(dim='mem')",
"def local_density_mean(self):\n\n # the simulation units are msun / kpc ^3\n local = np.mean(self.dens)\n\n return local",
"def computeMeans(X, idx, K):\n\tm, n = X.shape\n\tcentroids = np.zeros((K, n))\n\tcount = np.zeros(K)\n\n\tfor j in range(m):\n\t\tcentroids[int(idx[j])] += X[j]\n\n\tfor i in range(m):\n\t\tcount[int(idx[i])] += 1\n\n\treturn centroids / np.tile(count.reshape((K, 1)), n)",
"def k_mean(vector_array, k):\n kmeans = KMeans(n_clusters=k, random_state=0)\n kmeans.fit(vector_array)\n labels = kmeans.labels_\n return labels",
"def getCentroid(cluster):\n try:\n return np.mean(cluster, axis = 0)\n except:\n return None"
] |
[
"0.74364084",
"0.6687801",
"0.65657526",
"0.6507998",
"0.6330402",
"0.6327586",
"0.62386185",
"0.60642046",
"0.6037065",
"0.59907097",
"0.5903807",
"0.58887535",
"0.584757",
"0.57932895",
"0.5768591",
"0.57648635",
"0.57550263",
"0.57174104",
"0.56907356",
"0.5685301",
"0.56717134",
"0.56625074",
"0.5652536",
"0.56499416",
"0.56499416",
"0.5644719",
"0.56253195",
"0.56189066",
"0.55894625",
"0.5569635"
] |
0.69418436
|
1
|
finds the distance (the length of the shortest path) from the source to every other vertex in the same component using breadthfirst search, and returns the value of the largest distance found
|
def max_dist(graph, source):
q = queue.Queue()
found = {}
distance = {}
for vertex in graph:
found[vertex] = 0
distance[vertex] = -1
max_distance = 0
found[source] = 1
distance[source] = 0
q.put(source)
while q.empty() == False:
current = q.get()
for neighbour in graph[current]:
if found[neighbour] == 0:
found[neighbour] = 1
distance[neighbour] = distance[current] + 1
max_distance = distance[neighbour]
q.put(neighbour)
return max_distance
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def std_bfs(graph, src_vertex):\n # this sssp yields (node, level) in a breadth first search\n res = nx.single_source_shortest_path_length(graph, src_vertex)\n\n return [dist+1 for _, dist in sorted(res.items())]",
"def shortest_combined_wire_path(grid):\n current_minimum = sys.maxsize\n\n for crossing in grid.crossings:\n crossing_wires = grid.get(crossing[0], crossing[1])\n \n crossing_total = 0\n for wire in crossing_wires:\n crossing_total += wire['distance']\n\n if crossing_total < current_minimum:\n current_minimum = crossing_total\n\n print(f\"Total length of wire at {crossing} is {crossing_total}\")\n return current_minimum",
"def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices will all be gray.\r\n queue = []\r\n \r\n # Loops through the vertices in the graph, creates a key in the dictionary for each vertice, with default values.\r\n for vertex in graph[\"V\"]:\r\n dataInfo[str(vertex)] = {\"ancestor\": \"\", \"descendants\": [], \"distance\": \"\", \"color\": \"white\"}\r\n \r\n # At key source (variable) in dataInfo dictionary, key ancestor is set to have no value other than \"NA\" (as it is the starting point), and distance to 0 (as it will always be zero as it is the source).\r\n dataInfo[str(source)][\"ancestor\"] = \"NA\"\r\n dataInfo[str(source)][\"distance\"] = 0\r\n\r\n def symmetricVertex(edge, otherVertex):\r\n \r\n \"\"\"\r\n Function symmetricVertex takes arguments edge, a list of an edge from the graph dictionary, and otherVertex, an integer that is the other vertex in the edge with the sourceVertex. The function will return the point other than the otherVertex, and will be used to find adjacent vertices relative to the current vertex in the queue. Example: edge ([1, 2]), otherVertex (1), the function will return 2.\r\n \"\"\"\r\n \r\n for num in edge:\r\n if num != otherVertex:\r\n return num\r\n \r\n\r\n def pathFinder(graph, sourceVertex):\r\n \r\n \"\"\"\r\n Function pathFinder takes arguments graph, a dictionary, with the same keys for the edges and the vertices and sourceVertex, an integer. The function will loop through all of the edges in the graph and find adjacent vertices relative to the current sourceVertex. sourceVertex values will be in the queue. The function will edit dictionaries and lists, not return any value.\r\n \"\"\"\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)\r\n \r\n # The function pathFinder is called on the graph and the source vertex, which sets up the queue.\r\n pathFinder(graph, source)\r\n \r\n # While the list queue contains values, the pathFinder function is called on the graph, and the queue value at index 0.\r\n while len(queue) != 0:\r\n pathFinder(graph, queue[0])\r\n \r\n # Loop below is for formatting of the data, makes it easier to read.\r\n for key in dataInfo:\r\n print \"Vertex: \" + key + \", Distance: \" + str(dataInfo[key][\"distance\"]) + \", Ancestor: \" + str(dataInfo[key][\"ancestor\"]) + \", Descendants: \" + str(dataInfo[key][\"descendants\"]) + \", Color: \" + str(dataInfo[key][\"color\"]) + \".\" \r\n \r\n # Returns dictionary dataInfo.\r\n return dataInfo",
"def find_best_path(self, root):\n number_vertices = len(self.states)\n distances = [-float(\"inf\")] * number_vertices\n distances[root] = 0\n predecessors = [None] * number_vertices\n\n for _ in range(number_vertices - 1):\n for origin in range(number_vertices):\n for (target, value) in self.transitions[origin]:\n if distances[target] < distances[origin] + value:\n distances[target] = distances[origin] + value\n predecessors[target] = origin\n\n # compute the vertices with the highest value, excluding the root\n distances[root] = -float(\"inf\")\n most_valued_vertices = np.nonzero(distances == np.max(distances))[0]\n # choose at *random* among the most valuable vertices\n most_valued_vertex = np.random.choice(most_valued_vertices)\n return most_valued_vertex, predecessors",
"def disktra2(self, source=None, destination=None):\n if not source:\n source = self.vertices()[0]\n source.d = 0\n q = simply_python.data_structures.MinPriorityQueue()\n q.add(source, source.d)\n visited = set()\n seen = set()\n while not q.isempty():\n source = q.pop()\n d = source.d\n if source == destination:\n return d\n visited.add(source)\n for out_vertex in self.out_vertices(source):\n if out_vertex in visited:\n continue\n if out_vertex in seen:\n out_vertex.d = min(out_vertex.d, source.d +\n self[source][out_vertex].distance)\n else:\n out_vertex.d = source.d + self[source][out_vertex].distance\n seen.add(out_vertex)\n q.add(out_vertex, out_vertex.d)\n return d",
"def dijkstra(self, source=None, destination=None):\n for vertex in self.vertices():\n vertex.d = sys.maxint\n if not source:\n source = self.vertices()[0]\n q = simply_python.data_structures.FIFO_dict()\n source.d = 0\n q.append(source)\n while not q.isempty():\n source = q.pop()\n print source\n print source.d\n d = source.d\n for out_vertex in self.out_vertices(source):\n if out_vertex.d == sys.maxint:\n out_vertex.d = d + 1\n q.append(out_vertex)\n if out_vertex == destination:\n return out_vertex.d\n return d",
"def bellman_fords_shortest_path(graph: Graph[T], source_vertex_data: T) -> \\\n Tuple[bool, Dict[Vertex[T], int], Dict[Vertex[T], Vertex[T]]]:\n\n vertex_distance_mapping: Dict[Vertex[T], int] = defaultdict(lambda: maxsize) # vertex_weight_mapping\n vertex_parent_mapping: Dict[Vertex[T], Vertex[T]] = dict()\n source_vertex: Vertex[T] = graph.get_vertex(source_vertex_data)\n\n vertex_distance_mapping[source_vertex] = 0\n vertex_parent_mapping[source_vertex] = None\n\n # Relax all the edges (V-1)th time.\n # Why (V-1) times? - https://www.youtube.com/watch?v=-mOEd_3gTK0&feature=youtu.be&list=PLrmLmBdmIlpu2f2g8ltqaaCZiq6GJvl1j&t=785\n for i in range(0, len(graph.vertices)-1): # run it (V-1) times... for i=0: i<(V-1); i++\n relax_edges(graph.edges, vertex_distance_mapping, vertex_parent_mapping)\n\n # Relax all the edges for one more time(Vth time) to check if there is any -ve weight cycle present.\n has_negative_weight_cycle: bool = relax_edges(graph.edges, vertex_distance_mapping, vertex_parent_mapping,\n check_negative_weight_cycle=True)\n if has_negative_weight_cycle:\n return has_negative_weight_cycle, dict(), dict()\n\n return has_negative_weight_cycle, vertex_distance_mapping, vertex_parent_mapping",
"def diameter(graph):\r\n max_distance = 0\r\n for vertex in graph:\r\n new_dist = max_dist(graph, vertex)\r\n if new_dist > max_distance:\r\n max_distance = new_dist\r\n return max_distance",
"def distances_bfs(self, start):\r\n from queue import deque\r\n\r\n assert start in self.graph\r\n\r\n distance = {vertex: None for vertex in self.vertices()}\r\n distance[start] = 0\r\n\r\n queue = deque()\r\n queue.append(start)\r\n\r\n while queue:\r\n current_vertex = queue.pop()\r\n for neighbour in self.neighbours(current_vertex):\r\n if distance[neighbour] is None:\r\n queue.append(neighbour)\r\n distance[neighbour] = distance[current_vertex] + 1\r\n\r\n return distance",
"def longest_flight(self):\r\n distance = 0\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n if edge.distance > distance:\r\n distance = edge.distance\r\n start = edge.start\r\n destination = edge.destination\r\n return start, destination, distance",
"def bfs(self, source, target):\n source.color = TriColor.WHITE\n target.color = TriColor.WHITE\n\n Q = deque()\n Q.append(source)\n\n while len(Q) > 0:\n v = Q.popleft()\n if v.color == TriColor.BLACK:\n # a previously finished vertex\n # used when graph vertices (e.g. `self.neighbors_of()` is calculated dynamically)\n continue\n else:\n v.color = TriColor.BLACK # mark finished\n if v == target:\n # re-assign `target` in case `Vertex.__eq__` has been overridden\n target = v\n break\n\n for w, _ in self.neighbors_of(v, color=TriColor.WHITE):\n w.color = TriColor.GRAY # mark discovered\n w.bfs_parent = v\n Q.append(w)\n\n S = [] # holds the shortest path, or empty if None\n u = target\n if u.color == TriColor.BLACK:\n while u is not None:\n S.append(u)\n u = u.bfs_parent\n\n if len(S) > 0:\n path = S[::-1]\n distance = len(path)\n else:\n path = None\n distance = None\n return path, distance",
"def bfs_shortest_dist(self,\n origin: Tuple[int, int],\n destination: Tuple[int, int] = None) -> int:\n queue = deque([origin])\n visited_vertices = set()\n counter = -1\n\n while len(queue) > 0:\n counter += 1\n next_queue = deque()\n while len(queue) > 0:\n next_v = queue.popleft()\n visited_vertices.add(next_v)\n if destination != None and next_v == destination:\n # early return in the case where a destination was provided\n return counter\n next_queue.extend(set(self.graph[next_v]) - visited_vertices)\n queue = next_queue\n return counter",
"def min_path(vs, es, source, target):\n dijkstra(vs, es, source, stop = target)\n test = target\n result = []\n while test != source:\n e = test._ss_edge\n result.append(e)\n test = e.v1 if e.v1 != test else e.v2\n assert test == source and test._ss_edge is None\n return result[::-1]",
"def lovliest_path(G):\n m = 0\n ma = None\n mb = None\n for node in G.keys():\n for conn in G[node].keys():\n if G[node][conn] > m:\n m = G[node][conn]\n ma = node\n mb = conn\n print \"found lovliest_path of %s to %s with weight %s\" % (ma,mb,m)\n return (ma,mb)",
"def getShortestPath(self, src, dest):\n vertices = self.floorGraph.getVertList()\n unvisitedQueue = []\n srcPath = Path()\n srcPath.addNode(src)\n srcPath.pathValue = 0\n unvisitedQueue.append(srcPath)\n connections = self.floorGraph.getVertex(src).getConnections()\n #initialisez distances\n for vertex in vertices:\n newPath = Path()\n newPath.nodeList = list(srcPath.nodeList)\n newPath.addNode(vertex)\n if self.floorGraph.getVertex(vertex) in connections:\n newPath.pathValue = self.floorGraph.getVertex(src).getWeight(self.floorGraph.getVertex(vertex))\n unvisitedQueue.append(newPath)\n else:\n newPath.pathValue = math.inf\n self.shortestDistanceMap[src+vertex] = newPath\n # updates distances as per shorter routes\n while len(unvisitedQueue) is not 0:\n unvisitedQueue = sorted(unvisitedQueue, key=functools.cmp_to_key(compareNodes))\n chkPath = unvisitedQueue.pop(0)\n chkNode = chkPath.nodeList[len(chkPath.nodeList)-1]\n for vertex in vertices:\n if(self.floorGraph.getVertex(vertex) in self.floorGraph.getVertex(chkNode).getConnections()):\n newWeight = chkPath.pathValue + self.floorGraph.getVertex(chkNode).getWeight(self.floorGraph.getVertex(vertex))\n if(newWeight < self.shortestDistanceMap[src+vertex].pathValue):\n self.shortestDistanceMap[src+vertex].pathValue = newWeight\n self.shortestDistanceMap[src+vertex].nodeList = list(chkPath.nodeList)\n self.shortestDistanceMap[src+vertex].nodeList.append(vertex)\n newPath = Path()\n newPath.nodeList = list(self.shortestDistanceMap[src+vertex].nodeList)\n newPath.pathValue = newWeight\n unvisitedQueue.append(newPath)\n print(self.shortestDistanceMap[src+dest].nodeList)\n print(self.shortestDistanceMap[src+dest].pathValue)",
"def shortest_flight(self):\r\n distance = sys.maxsize\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n if edge.distance < distance:\r\n distance = edge.distance\r\n start = edge.start\r\n destination = edge.destination\r\n return start, destination, distance",
"def shortest_path_tree__bfs(self, start):\r\n from queue import deque\r\n\r\n assert start in self.graph\r\n\r\n distance = {vertex: None for vertex in self.vertices()}\r\n distance[start] = 0\r\n\r\n previous = {vertex: None for vertex in self.vertices()}\r\n\r\n queue = deque()\r\n queue.append(start)\r\n\r\n while queue:\r\n current_vertex = queue.pop()\r\n for neighbour in self.neighbours(current_vertex):\r\n if distance[neighbour] is None:\r\n queue.append(neighbour)\r\n distance[neighbour] = distance[current_vertex] + 1\r\n previous[neighbour] = current_vertex\r\n\r\n return previous",
"def getPath(\n self,\n source,\n dest,\n as_nodes=False,\n ):\n\n self.dist = {} # A map from nodes to their labels (float)\n self.predecessor = {} # A map from a node to a node\n\n # Initialize the distance labels to \"infinity\"\n\n vertices = self.g.nodes()\n for vertex in vertices:\n self.dist[vertex] = self.inf\n self.predecessor[vertex] = source\n\n # Further set up the distance from the source to itself and\n # to all one hops away.\n\n self.dist[source] = 0.0\n if self.g.is_directed():\n outEdges = self.g.out_edges([source])\n else:\n outEdges = self.g.edges([source])\n for edge in outEdges:\n self.dist[edge[1]] = self.g[edge[0]][edge[1]][self.wt]\n\n s = set(vertices)\n s.remove(source)\n currentMin = self._findMinNode(s)\n if currentMin == None:\n return None\n s.remove(currentMin)\n while currentMin != dest and len(s) != 0 and currentMin != None:\n if self.g.is_directed():\n outEdges = self.g.out_edges([currentMin])\n else:\n outEdges = self.g.edges([currentMin])\n for edge in outEdges:\n opposite = edge[1]\n if self.dist[currentMin] + self.g[edge[0]][edge[1]][self.wt] \\\n < self.dist[opposite]:\n self.dist[opposite] = self.dist[currentMin] \\\n + self.g[edge[0]][edge[1]][self.wt]\n self.predecessor[opposite] = currentMin\n s.add(opposite)\n\n currentMin = self._findMinNode(s)\n\n # print \"Current min node {}, s = {}\".format(currentMin, s)\n\n if currentMin == None:\n return None\n s.remove(currentMin)\n\n # Compute the path as a list of edges\n\n currentNode = dest\n predNode = self.predecessor.get(dest)\n node_list = [dest]\n done = False\n path = []\n while not done:\n path.append((predNode, currentNode))\n currentNode = predNode\n predNode = self.predecessor[predNode]\n node_list.append(currentNode)\n done = currentNode == source\n node_list.reverse()\n if as_nodes:\n return node_list\n else:\n return path",
"def compute_most_near(self,node_src,alloc_DES,sim,DES_dst):\n #By Placement policy we know that:\n try:\n minLenPath = float('inf')\n minPath = []\n bestDES = []\n for dev in DES_dst:\n node_dst = alloc_DES[dev]\n path = list(nx.shortest_path(sim.topology.G, source=node_src, target=node_dst))\n if len(path)<minLenPath:\n minLenPath = len(path)\n minPath = path\n bestDES = dev\n\n return minPath,bestDES\n except nx.NetworkXNoPath as e:\n self.logger.warning(\"There is no path between two nodes: %s - %s \" % (node_src, node_dst))\n print(\"Simulation ends?. Time:\", sim.env.now)\n # sim.stop = True ## You can stop all DES process\n return [], None\n\n except nx.NodeNotFound as e:\n self.logger.warning(\"Node not found: %s - %s \"%(node_src,node_dst))\n print(\"Simulation ends?. Time:\",sim.env.now)\n # sim.stop = True ## You can stop all DES process\n return [],None",
"def dft(self, starting_vertex):\n # Create a s and push starting vertex\n ss = Stack()\n ss.push([starting_vertex])\n # Create a set of traversed vertices\n visited = []\n eldest = [] \n # While stack is not empty:\n while ss.size() > 0:\n # dequeue/pop the first vertex\n path = ss.pop()\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n # mark as visited\n visited.append(path[-1])\n print(visited)\n # enqueue all neightbors\n if not self.get_neighbors(path[-1]):\n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n eldest.append(path[-1])\n\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n # print(new_path)\n new_path.append(next_vert)\n ss.push(new_path)\n \n return min(eldest)",
"def bfs(self, vertex_s):\r\n nd_list = list(self.vertices())\r\n visited = dict((node, 0) for node in nd_list)\r\n\r\n nq = deque()\r\n pre_dict, dist = {}, {}\r\n nq.append(vertex_s)\r\n visited[vertex_s]=1\r\n dist[vertex_s] = 0\r\n\r\n loop_counts = 0\r\n while nq:\r\n s = nq.popleft()\r\n for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'\r\n loop_counts += 1\r\n \r\n #if not node in visited:\r\n if not visited[node]:\r\n nq.append(node) # let 'node' in queue\r\n pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'\r\n dist[node] = dist[s] + 1 # shortest path to 'root'\r\n visited[node]=1 # 'node' is visted\r\n #if node in visited and dist[node] == dist[s] + 1: # still within the shortest path\r\n if visited[node] and dist[node] == dist[s] + 1: # still within the shortest path\r\n if s not in pre_dict[node]: # if this path have NOT been recorded, let's do that now\r\n pre_dict[node].append(s) \r\n \r\n if visited[node] and dist[node] > dist[s] + 1: # the previous 'recorded' path is longer than our current path (via node 's'); let's update that path and distance\r\n pre_dict[node] = [s]\r\n dist[node] = dist[s] + 1\r\n #print(\" #loops: %d\" %loop_counts)\r\n #current_bfs[vertex_s] = pre_dict\r\n \r\n return pre_dict",
"def max_flow(self, source, sink):\n\n path = self.valid_path(source, sink, [])\n\n while path:\n # get the maximum possible flow that can be taken from this path:\n max_flow = min([edge.capacity for edge in path])\n for edge in path:\n self.edges[edge] += max_flow\n path = self.valid_path(source, sink, [])\n\n # Compute all the flows from the neighbors of source:\n return sum([self.edges[edge] for edge in self.adjacents[source]])",
"def shortestPath(self, source, target):\n dist = {}\n prev = {}\n q = []\n for y,a in enumerate(self.sm):\n for x,b in enumerate(self.sm[y]):\n dist[(x,y)] = sys.maxint\n prev[(x,y)] = None\n q.append((x,y))\n dist[source] = 0\n\n while len(q) is not 0:\n # find the node with minimum value (u)\n d = deepcopy(dist)\n while True:\n b = dict(map(lambda item: (item[1],item[0]), d.items()))\n u = b[min(b.keys())]\n if u not in q:\n d.pop(u)\n else:\n break\n\n if dist[u] == sys.maxint: # remaining nodes are inaccessible\n break\n\n q.remove(u)\n\n\n if u == target: # target found\n break\n\n for v in self.getNeighbors(u):\n alt = dist[u] + 1\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n\n s = []\n u = target\n while prev[u] is not None:\n s.append(u)\n u = prev[u]\n s.reverse()\n\n return s",
"def shortest_path_lengths(self, g, src):\n d = {} # d[v] is upper bound from s to v\n cloud = {} # map reachable v to its d[v] value\n pq = AdaptableHeapPriorityQueue() # vertex v will have key d[v]\n pqlocator = {} # map from vertex to its pq locator\n\n # for each vertex v of the graph, add an entry to the priority queue, with\n # the source having distance 0 and all others having infinite distance\n for v in g.vertices():\n if v is src:\n d[v] = 0\n else:\n d[v] = float('inf') # syntax for positive infinity\n pqlocator[v] = pq.add(d[v], v) # save locator for future updates\n\n while not pq.is_empty():\n key, u = pq.remove_min()\n cloud[u] = key # its correct d[u] value\n del pqlocator[u] # u is no longer in pq\n for e in g.incident_edges(u): # outgoing edges (u,v)\n v = e.opposite(u)\n if v not in cloud:\n # perform relaxation step on edge (u,v)\n wgt = e.element()\n if d[u] + wgt < d[v]: # better path to v?\n d[v] = d[u] + wgt # update the distance\n pq.update(pqlocator[v], d[v], v) # update the pq entry\n\n return cloud # only includes reachable vertices",
"def one_way_path(most_important, total_distance, to_source2, to_source1):\n\n if total_distance == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(most_important), total_distance\n elif to_source2[0] == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(to_source2[1]), to_source2[0]\n else:\n return most_important_to_source(to_source1[1], up=False), to_source1[0]",
"def distance(self, start, end):\n if start == end:\n return 0\n if self.adjR == None:\n self.build_reverse_graph()\n n = self.nodes\n m = self.edges\n adj = self.adj\n adjR = self.adjR\n # For forward search\n processed = set()\n dist = {}\n dist[start] = 0\n heap = []\n heapq.heappush(heap,(0, start))\n # For backward search\n processedB = set()\n distB = {}\n distB[end] = 0\n heapB = []\n heapq.heappush(heapB,(0, end))\n shortest = float('inf')\n while heap and heapB:\n # For forward search\n if heap:\n d, u = heapq.heappop(heap)\n if u not in processed:\n processed.add(u)\n for v, w in adj[u]:\n if dist.get(v, -1) == -1:\n dist[v] = dist[u] + w\n heapq.heappush(heap,(dist[v], v))\n elif dist[v] > dist[u] + w:\n dist[v] = dist[u] + w\n heapq.heappush(heap,(dist[v], v))\n if dist[u] < shortest:\n for v, w in adj[u]:\n if v in processedB:\n length = dist[u] + distB[v] + w\n if length < shortest:\n shortest = length\n if u in processedB:\n return shortest\n else:\n return shortest\n # For backward search\n if heapB:\n d, u = heapq.heappop(heapB)\n if u not in processedB:\n processedB.add(u)\n for v, w in adjR[u]:\n if distB.get(v, -1) == -1:\n distB[v] = distB[u] + w\n heapq.heappush(heapB,(distB[v], v))\n elif distB[v] > distB[u] + w:\n distB[v] = distB[u] + w\n heapq.heappush(heapB,(distB[v], v))\n if distB[u] < shortest:\n for v, w in adjR[u]:\n if v in processed:\n length = distB[u] + dist[v] + w\n if length < shortest:\n shortest = length\n if u in processed:\n return shortest\n else:\n return shortest\n return -1",
"def std_bellman_ford(graph, src_vertex):\n res = nx.single_source_bellman_ford_path_length(graph, src_vertex)\n\n return [dist for _, dist in sorted(res.items())]",
"def bellman_ford(graph, src):\n distances = {}\n for vertex in graph.vertices:\n distances[vertex] = math.inf\n distances[src] = 0\n for _ in range(len(graph.vertices)):\n for src, edges in graph.get_all_edges().items():\n for neigh, edge in edges.items():\n wt = edge[\"weight\"]\n distances[neigh] = min(distances[neigh], distances[src] + wt)\n\n # If there is no negative cycle, doing the relaxation one more time\n # will have no effect on any of the distances.\n for src, edges in graph.get_all_edges().items():\n for neigh, edge in edges.items():\n wt = edge[\"weight\"]\n if distances[src] + wt < distances[neigh]:\n # Note that this will be reported only if any of the\n # negative cycle vertices are reachable from src.\n raise Exception(\"Negative cycle exists!\")\n return distances",
"def Optimum_prun_based_routing(self, S, D, L):\n if self.has_path(S, D):\n \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n PathConcave_cost = self.max_path_cost(Shortest_path, 'c1') \n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n if path_cost <= L:\n \"\"\"go to concave cost\"\"\"\n PathConcave_cost = self.max_path_cost(Shortest_path, 'c1') \n self.G = self.rm_edge_constraint(PathConcave_cost) # remove all links where the concave link is greater than PathConcave_cost\n \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n PathConcave_cost = 0\n Opt_path = []\n return PathConcave_cost, Opt_path",
"def euclid_destination_for_passenger(state, problem):\n unsatisfied = [p for p in state.passengers if not (p.is_arrived() or p.onboard)]\n if unsatisfied:\n max_dist = sum([p.opt for p in unsatisfied])\n return max_dist\n return 0"
] |
[
"0.6498419",
"0.6417174",
"0.6412791",
"0.639987",
"0.63898635",
"0.63288945",
"0.6257384",
"0.6251373",
"0.6233719",
"0.6225941",
"0.62100583",
"0.6209474",
"0.61876315",
"0.61588436",
"0.60992855",
"0.60745585",
"0.6067401",
"0.6062507",
"0.6048969",
"0.6018151",
"0.6005745",
"0.5970444",
"0.59685403",
"0.59513104",
"0.5928754",
"0.59283257",
"0.5927226",
"0.5924468",
"0.58988714",
"0.5896683"
] |
0.71585447
|
0
|
returns the diameter of a graph by finding greatest max distance
|
def diameter(graph):
max_distance = 0
for vertex in graph:
new_dist = max_dist(graph, vertex)
if new_dist > max_distance:
max_distance = new_dist
return max_distance
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def diameter(self):\n\n v = self.vertices()\n pairs = [ (v[i],v[j]) for i in range(len(v)-1) for j in range(i+1, len(v))]\n smallest_paths = []\n for (s,e) in pairs:\n paths = self.find_all_path(s,e)\n smallest = sorted(paths, key=len)[0]\n smallest_paths.append(smallest)\n\n smallest_paths.sort(key=len)\n\n # Print the list smallest_paths\n\n # Longest path is at the end of list\n # ie diameter corresponds to length of this path\n\n diameter = len(smallest_paths[-1]) -1\n return diameter",
"def get_diameter(node):\n if node is None:\n return 0\n else:\n diameter_root = get_max_path(node.left) + get_max_path(node.right) + 1\n #print 'max_path from {} is {}'.format(node.value, diameter_root)\n diameter_left = get_diameter(node.left)\n diameter_right = get_diameter(node.right)\n return max(diameter_left, diameter_right, diameter_root)",
"def find_diameter(self):\n all_ways = []\n for vertex1 in self.graph.keys():\n for vertex2 in self.graph.keys():\n if vertex2 != vertex1:\n result = self.pathFinder(vertex1, vertex2)\n for path in result:\n all_ways.append(len(path) - 1)\n self.diameter = max(all_ways)\n print(f\"Diameter of network is {self.diameter}\")",
"def topo_diameter(self):\n import math\n \n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for k in range(len(pathlist)):\n distance.append(len(pathlist[k]) - 1)\n \n if(len(distance) == 0):\n continue\n else:\n if(min(distance) >= Temp):\n Temp = min(distance)\n \n self.topodiameter = Temp",
"def get_max_density(self):\n max_density = str(self.density.index(min(self.density)) + 1)\n print(max_density)\n return max_density",
"def get_max_mid_diameter(self):\n max_min_mid_diam = 0\n\n for m in self.components:\n name = m.name\n diam_file = join(\n self.params['molec_dir'],\n name+'_size.csv'\n )\n\n if exists(diam_file.replace('.csv', '.TOOBIG')):\n max_min_mid_diam = 0\n print(f'{m.name} too big based on MW')\n break\n if exists(diam_file.replace(\n 'size.csv',\n 'unopt.ETKDGFAILED'\n )):\n max_min_mid_diam = 0\n print(f'{m.name} failed ETKDG')\n break\n results = pd.read_csv(diam_file)\n min_mid_diam = min(results['diam2'])\n max_min_mid_diam = max([min_mid_diam, max_min_mid_diam])\n\n self.max_min_mid_diam = max_min_mid_diam",
"def get_maxdist(self, pixel_size):\n\n total_area = self.minnpix_cluster*pixel_size**2.\n\n radius = ((np.sqrt(total_area)/2.))\n if radius > 1.0:\n radius = int(radius)\n else:\n radius = round_to_1(radius)\n dist = np.sqrt(2.*float(radius)**2.)\n dist = dist+(0.05*dist)\n\n return dist",
"def __find_max_distance(self):\n return utils.find_max_distance(self.__game)",
"def get_diameter(self, t):\r\n if not t:\r\n return 0\r\n left_diam = self.get_diameter(t.left)\r\n right_diam = self.get_diameter(t.right)\r\n left_ht = self.get_height(t.left)\r\n right_ht = self.get_height(t.right)\r\n return max(max(left_diam , right_diam) , left_ht+right_ht+1)",
"def countMaxDegree(self):\r\n max_degree = [0, 0] # le sommet, son nombre de connection \r\n for i_node, node_connections in enumerate(self.adjMatrix):\r\n connection = self.n - node_connections.count(0) # on compte le nombre de connections du sommet\r\n if connection > max_degree[1]:\r\n max_degree = max_degree[i_node, node_connections]\r\n return max_degree[0], max_degree[1] # C un tuple ! \r",
"def undirected_diameter(self) -> int:\n return nx.diameter(self.to_undirected())",
"def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):\n\n\n path = [[],0 , 0]\n best_path = get_best_path(digraph, start, end, path, max_dist_outdoors, max_total_dist, best_path = None)\n\n if best_path[0] is None:\n raise ValueError('No work')\n else :\n return best_path[0]",
"def radius(self):\n c = self.centroid()\n dmax = -np.inf\n for vertex in self.path.vertices:\n d = np.linalg.norm(vertex - c)\n if d > dmax:\n dmax = d\n return d",
"async def max_distance(self, *args):\n return await self._rpc.max_distance(*args)",
"def height(node): \n if node is None:\n return -1\n \n # select the top two heights:\n max_height_1, max_height_2 = -1, -1\n for child in node.children:\n h = height(child) + 1\n if h > max_height_1:\n max_height_1, max_height_2 = h, max_height_1\n elif h > max_height_2:\n max_height_2 = h\n \n self.diameter = max(self.diameter, max_height_1 + max_height_2 + 2)\n \n return max_height_1",
"def find_dist_max(ar_coorx,ar_coory):\n nb_cell=len(ar_coorx)\n max_dist=0.\n for i in range(nb_cell):\n for j in range(nb_cell):\n max_dist=max(max_dist,distance(ar_coorx[i],ar_coory[i],ar_coorx[j],ar_coory[j]))\n return max_dist",
"def maximumDominationCount(leaf):\n maximumDominationCount = np.nanmax(leaf.calDominationCount())\n return maximumDominationCount",
"def get_max_density_index(self, disturbed_car):\n return self.cars.get_max_density_index(disturbed_car)",
"def max_dist(graph, source):\r\n q = queue.Queue()\r\n found = {}\r\n distance = {}\r\n for vertex in graph: \r\n found[vertex] = 0\r\n distance[vertex] = -1\r\n max_distance = 0\r\n found[source] = 1\r\n distance[source] = 0\r\n q.put(source)\r\n while q.empty() == False:\r\n current = q.get()\r\n for neighbour in graph[current]:\r\n if found[neighbour] == 0:\r\n found[neighbour] = 1\r\n distance[neighbour] = distance[current] + 1\r\n max_distance = distance[neighbour]\r\n q.put(neighbour)\r\n return max_distance",
"def find_max(self):\n return max(self.nodes, key=int)",
"def max_cardinality():\r\n #create a list containing the number of each vertex involvement.\r\n array = []\r\n for i in adj:\r\n array += [i[0],i[1]]\r\n\r\n #compute the degree by counting the involment\r\n degree = Counter(array).most_common()\r\n\r\n #retrieve the degree only\r\n degree_ = [ i[1] for i in degree]\r\n\r\n degree_ = np.array(degree_)\r\n \r\n max_m = None\r\n \r\n #check if m is valid\r\n for i in range(degree[0][1]+2)[2:]:\r\n \r\n #valid if there are at least m vertex with degree equals to at least m-1 \r\n if i < len(np.where(degree_>=i-1)[0]):\r\n max_m = i\r\n else:\r\n break\r\n max_m += 1\r\n print(f'maximum possible clique cardinality :{max_m}')\r\n return max_m",
"def spatial_diameter(self):\n import math\n \n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for k in range(len(pathlist)):\n Temp2 = 0\n for m in range(len(pathlist[k]) - 1):\n Temp2 += self.Dismatrix[pathlist[k][m], pathlist[k][m+1]]\n distance.append(Temp2)\n \n if(len(distance) == 0):\n continue\n else:\n if(min(distance) >= Temp):\n Temp = min(distance)\n \n self.diameter = Temp",
"def graph_data_size_max(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size_max or 0)",
"def degree_graph(g):\n return max(degree_node(g, node) for node in g)",
"def diameter(self):\n return 2 * self.radius",
"def max_degree_node(g, d, connected):\n if not connected:\n deg = d\n else:\n deg = {k: v for k, v in d.items() if k in connected}\n if not deg:\n return None\n n = max(deg.keys(), key=lambda k: deg[k])\n d.pop(n)\n for n_ in g.neighbors(n):\n connected.add(n_)\n return n",
"def getMaximumDistances(self):\n pass",
"def edge_position_max(self) -> int:\n return int(self.graph_tuple_stats.edge_position_max or 0)",
"def find_large_separation(self):\n\n x = self.modes['n'] # radial order\n y = self.modes['freq'] # frequency\n wid = (0.66*self.numax**0.88)/2/np.sqrt(2*np.log(2.0))\n w = (np.exp((-(y-self.numax)**2)/(2*wid**2))) # weight\n\n mN = np.sum(w)*np.sum(w*x*y) - np.sum(w*x)*np.sum(w*y)\n D = np.sum(w)*np.sum(w*x**2) - np.sum(w*x)**2\n Dn = mN/D\n #print Dn\n\n return Dn",
"def diameter(self):\n return self.radius * 2"
] |
[
"0.73363787",
"0.7249815",
"0.7026748",
"0.666575",
"0.65678686",
"0.6497069",
"0.6471133",
"0.6458646",
"0.6384776",
"0.6333823",
"0.6312993",
"0.6240806",
"0.62322974",
"0.6172251",
"0.6165411",
"0.60725904",
"0.6060118",
"0.6053263",
"0.6025451",
"0.600312",
"0.59992427",
"0.59922516",
"0.5990146",
"0.5987237",
"0.59837097",
"0.59730595",
"0.5965644",
"0.5959623",
"0.59527683",
"0.59379"
] |
0.85918725
|
0
|
diameter and clustering coefficient vs rewiring prob with k trials
|
def diameter_clustering_vs_prob_ws(num_nodes, k):
xdata = []
ydata = []
zdata = []
prob = 0.0005
while prob < 1:
xdata += [prob]
diameters = []
coeffs = []
for i in range(k):
graph = make_ws_graph(num_nodes, 8, prob)
diameters += [diameter(graph)]
coeffs += [clustering_coefficient(graph)]
ydata += [sum(diameters) / k / 19.0] #divide by 19 as this diameter of circle lattice
zdata += [sum(coeffs) / k / 0.7] #divide by 0.7 as this is clustering coefficient of circle lattice
prob = 1.2*prob
return xdata, ydata, zdata
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def calculate_purity(D, k):\n ti = np.array(D.groupby(by=\"cluster\").count()['x1'])\n ci = np.array(D.groupby(by=\"label\").count()['x1'])\n total_observations = 0\n for i in range(k):\n total_observations += min(ti[i], ci[i])\n purity = total_observations / D.shape[0]\n return purity",
"def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return",
"def integrated_clustering(t_all,y_all,num_of_days=500,period = 1440,trim=10,min_n_clusters = 4, max_n_clusters=10,hierarchical=0):\n\n\n\n all_seg_april = initial_disaggregate(t_all,y_all,num_of_days,period = period)\n \n ''' '''\n all_seg_april_normalized = [np.array(x[0])-np.mean(x[1]) for x in all_seg_april if len(x[1])==3]\n \n ''' filter the empty segments'''\n all_seg_april_normalized = [x for x in all_seg_april_normalized if len(x)>0]\n \n ''' clustering in different ranges will probably have a better result'''\n if hierarchical == 0:\n pass\n elif hierarchical ==1:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()>1000]\n else:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()<1000]\n \n ''' filter out the positive segments'''\n all_positive_seg_april_normalized = [x for x in all_seg_april_normalized if x.min()>0]\n \n \n all_seg_april_normalized_trim50 = extract_first_n(all_positive_seg_april_normalized, trim)\n cluster_average = []\n \n # find optimal clustering number using silhouette score\n \n optimal_dict = {}\n \n for n_clusters in range(min_n_clusters,max_n_clusters):\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n\n # sihouette score\n cluster_labels = y_pred\n sample_silhouette_values = silhouette_samples(all_seg_april_normalized_trim50, cluster_labels)\n \n silhouette_avg = silhouette_score(pd.DataFrame(all_seg_april_normalized_trim50), cluster_labels)\n\n optimal_dict[n_clusters] = silhouette_avg +(sample_silhouette_values.min()+sample_silhouette_values.max())/2\n \n # n_clusters will give us the optimal number of clusters\n n_clusters = max(optimal_dict.iteritems(), key=operator.itemgetter(1))[0]\n\n #print n_clusters\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n \n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n cluster_average_rank = np.argsort(cluster_average)[::-1]\n rank_map = {cluster_average_rank[i_cluster]:i_cluster for i_cluster in range(n_clusters)} # old index:new index\n\n y_pred_old = y_pred\n y_pred = [rank_map[x] for x in y_pred]\n all_seg_per_cluster = [[] for i in range(n_clusters) ]\n for i_seg in range(len(all_seg_april_normalized_trim50)):\n all_seg_per_cluster[y_pred[i_seg]].append(all_seg_april_normalized_trim50[i_seg])\n \n cluster_mean = [[] for i in range(n_clusters) ]\n cluster_std = [[] for i in range(n_clusters) ]\n for i_cluster in range(n_clusters):\n cluster_mean[ i_cluster ] = np.mean(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n cluster_std[ i_cluster ] = np.std(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n \n \n \n \n #cluster_mean_2 = cluster_mean[5:6]\n \n return cluster_mean,cluster_std,n_clusters,all_seg_per_cluster",
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def compute_clusters(self, p: float):\n pass",
"def test_determine_k(self):\n test_dir_name = os.path.dirname(__file__)\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"four_clusters.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\"x\", \"y\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 4)\n\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"iris.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\n \"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\",\n \"Petal.Width\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 2)",
"def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)",
"def clustering(df, mode):\n # split into list of dfs containing only one reference node\n df_list = [df.loc[i : i + 8 - 1, :] for i in range(0, len(df), 8)]\n\n df_coefficient = pd.DataFrame()\n\n # loop over every single node\n for df_single in df_list:\n df_single = df_single.reset_index()\n total_value = 0\n\n # loop over the weights of all connected nodes\n for j in range(len(df_single) - 1):\n if mode == \"geometric\":\n # geometric\n total_value = total_value + math.sqrt(df_single.chi_sq[j] * df_single.chi_sq[j + 1])\n if mode == \"arithmetic\": \n # arithmetic\n total_value = total_value + ((df_single.chi_sq[j] * df_single.chi_sq[j + 1]) / 2)\n if mode == \"argmax\": \n # max\n total_value = total_value + max(df_single.chi_sq[j], df_single.chi_sq[j + 1])\n if mode == \"argmin\":\n # min\n total_value = total_value + min(df_single.chi_sq[j], df_single.chi_sq[j + 1])\n\n for i in range(len(df_single) - 1):\n if mode == \"geometric\":\n # geometric\n triplet_value = math.sqrt(df_single.chi_sq[i] * df_single.chi_sq[i + 1])\n if mode == \"arithmetic\":\n # arithmetic\n triplet_value = (df_single.chi_sq[i] * df_single.chi_sq[i + 1]) / 2\n if mode == \"argmax\":\n # max\n triplet_value = max(df_single.chi_sq[i], df_single.chi_sq[i + 1])\n if mode == \"argmin\": \n # min\n triplet_value = min(df_single.chi_sq[i], df_single.chi_sq[i + 1])\n\n cluster_coefficient = triplet_value / total_value\n buffer = [\n [\n df_single.reference[i],\n df_single.comparison[i],\n df_single.comparison[i + 1],\n triplet_value,\n cluster_coefficient,\n ]\n ]\n df_coefficient = df_coefficient.append(buffer)\n\n df_coefficient = df_coefficient.reset_index()\n\n print(\"\\n\\n threshold 0.5*c_omega\")\n check_list = []\n # print out triangles that have a cluster coefficient bigger, than X\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.5) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n print(\"\\n\\n threshold 0.75*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.75) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n print(\"\\n\\n threshold 0.8*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.9) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n\n print(\"\\n\\n threshold 0.9*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.9) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n return",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings",
"def __cluster_simi(self, i, j):\n sum_ = 0.\n for si in self.__indexclusters[i]:\n for sj in self.__indexclusters[j]:\n simi = self.__sample_simi(si, sj)\n sum_ += simi\n return sum_ / (len(self.__indexclusters[i]) * len(self.__indexclusters[j]))",
"def main():\n n = 34\n # create the adjacency matrix\n stripped_lines = Util.get_stripped_lines(g_karate_data.splitlines())\n string_rows = [line.split() for line in stripped_lines if line]\n assert len(string_rows) == n\n for row in string_rows:\n assert len(row) == n\n data_rows = [[float(x) for x in string_row] for string_row in string_rows]\n A = np.array(data_rows)\n # create the ordered module indices\n first_cluster_one_based_indices = [1, 3, 4, 14, 2, 8, 20, 18, 22, 13, 12, 6, 7, 17, 5, 11]\n second_cluster_one_based_indices = [25, 32, 26, 29, 24, 28, 9, 34, 33, 19, 16, 31, 15, 10, 23, 30, 21, 27]\n assert len(first_cluster_one_based_indices + second_cluster_one_based_indices) == n\n assert list(sorted(first_cluster_one_based_indices + second_cluster_one_based_indices)) == range(1, n+1)\n ordered_module_indices = []\n for i in range(n):\n if i+1 in first_cluster_one_based_indices:\n ordered_module_indices.append(0)\n else:\n ordered_module_indices.append(1)\n # print the modularity\n Q = get_modularity_other_b(A, ordered_module_indices)\n print 'modularity calculated using my interpretation of the method of the paper', Q\n Q = get_modularity_other_b2(A, ordered_module_indices)\n print 'modularity calculated using a modification of my interpretation of the method of the paper', Q\n Q = get_modularity_other_c(A, ordered_module_indices)\n print 'modularity calculated using the method on wikipedia', Q\n Q = get_eric_modularity(A, ordered_module_indices)\n print 'modularity calculated using the method eric used:', Q\n print 'expected modularity: .375 +/- .025'",
"def conductance(g,i,j):\r\n paths = find_all_paths(g, i, j)\r\n for path in list(paths):\r\n path_pairs = list(pairwise(path))\r\n path_eids = g.get_eids(pairs = path_pairs)\r\n Wkl = np.array(g.es[path_eids]['weight'])\r\n path_starts = [i[0] for i in path_pairs]\r\n Dk = np.array(g.vs[path_starts].degree(mode='OUT')) \r\n Conductance = sum(Wkl/Dk)\r\n# print(\"Conducatnace between {} & {} = \".format(i,j), Conductance)\r\n return Conductance",
"def computeClusterKSZY(d0):\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[1][ic][freq])\n return d2",
"def test(dist_param, picker_param, iters):\n orig = '/home/zby/MAGISTERKA/MGR/results/oryginal.clustered.t'\n cl_orig = read_clustered(orig)\n name_tag = ''\n ndist = dist_param[1:]\n npick = picker_param[1:]\n for index in drange(4, 20, 0.5):\n name_tag = \"{}_{}_{}\".format(index, npick, ndist)\n tf_conf = configs.TfidfConfig(\n root_name('all_merged.txt', None),\n tfidf_name('merged.stem{}.stop', name_tag),\n tfidf_name('merged.stem{}.stop.txt', name_tag),\n None,\n tfidf_name('merged.stem{}.tfidf', name_tag),\n 10,\n 0,\n None)\n execute(tf_conf)\n tf_conf = configs.TfidfConfig(\n root_name('all_merged.txt', None),\n None,\n tfidf_name('merged.stem{}.stop.txt', name_tag),\n tfidf_name('merged.stem{}.stop', name_tag),\n tfidf_name('merged.stem{}.stop.tfidf', name_tag),\n None,\n None,\n None)\n execute(tf_conf)\n #input, out, picker, distance, iterations,\n clust_cfg = configs.ClusteringConfig(\n tfidf_name('merged.stem{}.stop.tfidf', name_tag),\n tfidf_name('merged.stem{}.stop.clustered.t', name_tag),\n picker_param,\n dist_param,\n iters,\n None\n )\n execute(clust_cfg)\n clust2 = read_clustered(tfidf_name('merged.stem{}.stop.clustered.t', name_tag))\n var, norm = variation_of_information(cl_orig, clust2)\n print(\"**** FOR var {} VOI is {}\".format(name_tag, norm))",
"def monte_carlo_estimation(n, dist_d, m, sig_level, eps, minpts, h=10, best_obs_cluster = np.inf, print_freq = 10, print_option = 1):\n\n if print_option == 1:\n print('Monte Carlo estimation started (may take some time to finish)...')\n print('Total trial number: ', m)\n\n monte_carlo_table = np.zeros(m)\n early_term_cnt = 0\n for i in range(m):\n h0_data = generate_H0_data(n, dist_d)\n clusterer = DBSCAN(eps, min_samples=minpts).fit(h0_data)\n monte_carlo_table[i] = get_max_cluster_size(clusterer.labels_)\n if monte_carlo_table[i] >= best_obs_cluster:\n early_term_cnt += 1\n if early_term_cnt >= np.ceil(m * sig_level):\n if print_option == 1:\n print(\"Terminated early: no significant clusters...\")\n return np.inf\n\n if i % print_freq == 0:\n if print_option == 1:\n # print('Trial ', i, ', ', m-i-1, ' trials to complete...')\n print(i, 'trials completed...')\n\n monte_carlo_table = np.sort(monte_carlo_table)#sort\n monte_carlo_table = monte_carlo_table[::-1]#reverse order --> descending\n idx = np.ceil(m * sig_level).astype(int)\n threshold = monte_carlo_table[idx]\n\n # print(monte_carlo_table)\n\n return threshold",
"def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)",
"def make_doppelganger_vs_clusters(n_clusters_considered,X,X_occam,n_repeats):\n res = []\n for n_clusters in n_clusters_considered:\n res.append([])\n for _ in range(n_repeats):\n X_restricted,restricted_idxs = get_n_random_clusters(X_occam,n_clusters)\n print(X.val.shape)\n print(X_restricted.val.shape)\n evaluator_X = evaluators.EvaluatorWithFiltering(X,X_restricted,leave_out=True,fitter_class=standard_fitter,valid_idxs=valid_idxs[restricted_idxs])\n res[-1].append(evaluator_X.weighted_average) \n return res",
"def recclust(X, threshold = .01, mc_iters = 100, verbose = True, prefix = \"/\", IDS = np.arange(0)):\n if IDS.shape[0] == 0 :\n IDS = np.arange(X.shape[0])\n assert IDS.shape[0] == X.shape[0], \"\"\"Input data \\\n and tag list must have compatible dimensions \\\n (or tag list must be None).\n \"\"\"\n \n data = {\"prefix\" : prefix, \"pval\" : None,\n \"subclust0\" : None, \"subclust1\" : None,\n \"ids\" : None, \"tot\" : 1}\n if X.shape[0] == 1:\n data[\"ids\"] = IDS\n print(\"Cluster %s has exactly one element.\" %\n prefix)\n else:\n \n p, clust = sigclust(X, mc_iters = mc_iters,\n verbose = verbose)\n print(\"The p value for subcluster id %s is %f\" %\n (prefix, p))\n data[\"pval\"] = p\n \n if p >= threshold:\n data[\"ids\"] = IDS\n else:\n pref0 = prefix + \"0\"\n pref1 = prefix + \"1\"\n print(\"Examining sub-clusters %s and %s\" %\n (pref0, pref1))\n data_0 = X[clust == 0, :]\n data_1 = X[clust == 1, :]\n print(\"Computing RecClust data for first cluster.\\\n Please wait...\")\n dict0 = recclust(data_0,\n prefix = prefix + \"0\",\n IDS = IDS[clust == 0])\n print(\"Computing Recclust data for second cluster.\\\n Please wait...\") \n dict1 = recclust(data_1,\n prefix = prefix + \"1\",\n IDS = IDS[clust == 1])\n data[\"subclust0\"] = dict0\n data[\"subclust1\"] = dict1\n data[\"tot\"] = dict0[\"tot\"] + dict1[\"tot\"]\n \n return data\n\n\n\n def comp_sim_vars(eig_vals, noise, thresh):\n\n #First sort eig_vals\n args = np.argsort(eig_vals)\n rev_sorted_args = args[::-1]\n rev_sorted_vals = eig_vals[rev_sorted_args]\n \n if thresh == 0:\n print(\"Threshold parameter 0, ignoring background noise.\")\n return rev_sorted_vals\n elif thresh == 1:\n print(\"Threshold parameter 1, applying hard thresholding.\")\n return np.maximum(rev_sorted_vals,\n bg_noise_var * np.ones(num_features))\n else:\n assert thresh == 2, \"Threshold parameter must be one of {0, 1, 2}.\"\n print(\"Applying soft thresholding.\")",
"def cluster_testing_dist(agg1, agg2, partDiameter):\n agg2_temp = translate_aggregate(agg2, random_point_generator(calculate_LD(agg1), calculate_LD(agg2), calculate_COM(agg1), calculate_COM(agg2), partDiameter))\n agg2_temp = random_rotate_aggregate(agg2_temp)\n\n check = 1\n while check == 1:\n agg2_temp = translate_aggregate(agg2_temp, numpy.array((calculate_COM(agg1)-calculate_COM(agg2_temp))*0.01))\n check, index = test_collision(agg1, agg2_temp, partDiameter)\n \"\"\" Index from this part is not valid! Function returns '99' before collision happens.\n \"\"\"\n if (check == 2):\n # print(index)\n return numpy.linalg.norm(calculate_COM(agg1) - calculate_COM(agg2_temp)), numpy.linalg.norm(calculate_COM(agg1) - agg2_temp[:,index])\n # return numpy.linalg.norm(calculate_COM(agg1) - agg2_temp[0:3,index])\n break",
"def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist",
"def answer_q10():\n data_sources = [viz.DATA_111_URL, viz.DATA_290_URL, viz.DATA_896_URL]\n x_vals = range(6, 21)\n y_vals_hier = {}\n y_vals_kmean = {}\n for idx in range(len(data_sources)):\n # 0. Generate data_field & cluster_list\n clust_list, data_table = closest_pair.create_cluster_list(\n data_sources[idx])\n y_vals_hier[idx] = []\n # 1. calculate values for hierarchical - decreasing order\n for clust_size in reversed(x_vals):\n clust_list = closest_pair.hierarchical_clustering(clust_list,\n clust_size)\n clust_error = closest_pair.compute_distortions(clust_list,\n data_table)\n y_vals_hier[idx].insert(0, clust_error)\n # 2. calculate values for kmeans - decreasing order\n y_vals_kmean[idx] = []\n for clust_size in x_vals:\n clust_list, data_table = closest_pair.create_cluster_list(\n data_sources[idx])\n clust_list = closest_pair.kmeans_clustering(clust_list,\n clust_size, 5)\n clust_error = closest_pair.compute_distortions(clust_list,\n data_table)\n y_vals_kmean[idx].append(clust_error)\n return x_vals, y_vals_hier, y_vals_kmean",
"def approximateConvergence(d, k, t):\n #First get the random centroids from the data\n newCentroids = getRandomCentroids(d, k)\n #newCentroids = [[-2.0, 1.0], [-2.0, -2.0], [2.0, 2.0], [0.0, 0.0]]\n #Get the clusters from these random centroids\n clusters = initiateCentroid(d, newCentroids, k)\n oldCentroids = []\n\n #Counter is zero, this is the amount of iterations\n counter = 0\n #Check is true, the check will be put to false then the difference between\n #centroids is smaller than the tolerance\n check = True\n #While the old centroids are not equal to the new ones OR the check is true\n while oldCentroids != newCentroids or check == True:\n #Old centroids are set to the new centroids\n oldCentroids = newCentroids\n #Recalculation of the new centroids\n k, newCentroids = calcCentroids(d, clusters)\n #Recalculation of the clusters\n clusters = initiateCentroid(d, newCentroids, k)\n #Check if the difference is smaller than the tolerance\n check = tolerance(oldCentroids, newCentroids, t)\n counter += 1\n\n return counter, clusters",
"def toy_sbm2clusters_1Dinterpolation( graph_qt,graph_sizes, cluster_perturbation, intra_p, inter_p , seed):\n dataset = []\n np.random.seed(seed)\n \n def perturbate_size_vector(cluster_perturbation, sizes_vector, n_nodes):\n #We sample a cluster - as GW invariant with perturbation we keep with first cluster\n #Apply the random size perturbation based on cluster_perturbation parameter\n #Propagate the rest to keep the proper number of nodes n_nodes\n rest = n_nodes\n n = len(sizes_vector)\n size_rate= 1 - cluster_perturbation\n #make sure that a cluster keeps a size >= 2\n assert sizes_vector[0]>2\n max_perturbation = max(1, int(sizes_vector[0]*size_rate))\n \n perturbation0= np.random.choice(range(1,max_perturbation))\n sizes_vector[0]-= perturbation0\n rest-= sizes_vector[0]\n for i in range(1, n-1):\n max_perturbation = max(1, int(sizes_vector[i]*size_rate))\n assert sizes_vector[i]>2\n \n perturbation = np.random.choice(np.random.choice(range(1,max_perturbation)))\n sizes_vector[i]-=perturbation\n rest-=sizes_vector[i]\n sizes_vector[-1] = rest\n return sizes_vector\n \n bloc_qt=2\n stacked_rates= []\n for k in range(graph_qt):\n #number of nodes in the graph\n n_nodes=np.random.choice(graph_sizes)\n #Here if we have more than one cluster we had the perturbation\n #on cluster size depending on size_perturbation rate\n \n if n_nodes%bloc_qt ==0:\n \n sizes = [n_nodes//bloc_qt for _ in range(bloc_qt)]\n else:\n residuals = (n_nodes%bloc_qt)\n sizes =[n_nodes//bloc_qt for _ in range(bloc_qt)]\n for i in range(residuals):\n #pos= np.random.choice(len(sizes))\n #we delete this feature - boring for supervised analysis\n sizes[i]+=1\n \n probs = inter_p*np.ones((bloc_qt, bloc_qt))\n np.fill_diagonal(probs, intra_p)\n local_seed= np.random.choice(range(100))\n sizes = perturbate_size_vector(cluster_perturbation,sizes, n_nodes)\n local_rate = sizes[0]/n_nodes\n stacked_rates.append(local_rate)\n print('Graph %s - perturbated_size:%s / rate size C1: %s'%(k,sizes,local_rate))\n G=sbm(sizes,probs,seed=int(local_seed))\n dataset.append(nx.to_numpy_array(G))\n \n return dataset,stacked_rates",
"def clustering_coefficient(graph):\r\n count = 0\r\n sumOfClusteringCoefficients = 0\r\n for vertex in graph:\r\n count += 1\r\n sumOfClusteringCoefficients += local_clustering_coefficient(graph, vertex)\r\n return sumOfClusteringCoefficients / count",
"def estimate_label_proportion(source_loader,target_loader,feat_extract,cuda,n_clusters,cluster_param): \n feat_extract.eval()\n #n_clusters = 3\n from sklearn.cluster import AgglomerativeClustering\n \n \n X_s,y_s = extract_feature(source_loader,feat_extract,cuda) \n X_t,y_t = extract_feature(target_loader,feat_extract,cuda) \n \n \n \n cluster = AgglomerativeClustering(n_clusters=n_clusters,linkage=cluster_param)\n label_t = cluster.fit_predict(X_t)\n #print(np.unique(label_t))\n mean_mat_S, num_in_class_S = extract_prototypes(X_s,y_s,n_clusters)\n mean_mat_T, num_in_class_T = extract_prototypes(X_t,label_t,n_clusters)\n \n \"\"\"\n We assume that prototypes of classes have been transported in some in the feature\n space \n \"\"\"\n \n import ot\n M = ot.dist(mean_mat_S, mean_mat_T)\n M /= M.max()\n \n n_1 = n_clusters\n a = np.ones((n_1,)) / n_1\n b = np.ones((n_1,)) / n_1\n \n \n gamma = ot.emd(a,b,M)\n nb_sample_S = [ np.sum(y_s==i) for i in range(n_clusters) ]\n proportion_T = num_in_class_T/np.sum(num_in_class_T)\n assignement_source_to_target = gamma.argmax(axis=1)\n \n # proportions are arranged directly per class\n proportion_T = proportion_T[assignement_source_to_target]\n print(proportion_T,assignement_source_to_target)\n \n\n return proportion_T,nb_sample_S, assignement_source_to_target",
"def customNcuts(self):\n # computing neighboors graph\n A = kneighbors_graph(self.values, self.k, mode='distance', include_self=False).toarray()\n\n for i in range(self.values.shape[0]):\n for j in range(self.values.shape[0]):\n if A[i][j] > 0:\n\n v1 = (self.values[i][3], self.values[i][4], self.values[i][5])\n v2 = (self.values[j][3], self.values[j][4], self.values[j][5])\n\n magnitude1 = np.sqrt(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2])\n magnitude2 = np.sqrt(v2[0] * v2[0] + v2[1] * v2[1] + v2[2] * v2[2])\n ang = np.arccos(np.dot(v1, v2) / (magnitude1 * magnitude2))\n\n A[i][j] = max(self.values[i][7], self.values[j][7]) * A[i][j]\n\n # init SpectralClustering\n sc = SpectralClustering(4, affinity='precomputed', n_init=10, assign_labels = 'discretize')\n\n # cluster\n labels = sc.fit_predict(A)\n\n return labels",
"def structure_coeffs(df):\n j_dist = get_coop_coop_neighbour_dist(df) \n # j_dist = j_dist.append([{'n':1,'k':k,'j':0, 'j_freq':1} for k in range(j_dist.k.min(),j_dist.k.max())])\n sigma_df = j_dist.groupby(['k','j'])['j_freq'].sum().reset_index(name='sigma')\n degree_dist = get_degree_distribution(df) \n for k,freq in zip(degree_dist.index, degree_dist.values):\n sigma_df.loc[sigma_df.k==k,'sigma'] *= freq\n return sigma_df",
"def lloyds_algorithm(X, k, T):\n n, d = X.shape\n\n # Initialize clusters random.\n clustering = np.random.randint(0, k, (n,))\n centroids = np.zeros((k, d))\n\n # Used to stop if cost isn't improving (decreasing)\n cost = 0\n oldcost = 0\n\n # Column names\n # print(\"Iterations\\tCost\")\n for i in range(T):\n\n # Update centroid\n centroids = np.zeros((k, d))\n # YOUR CODE HERE\n numberOfPointsInClusters = np.zeros((k,))\n for idx, point in enumerate(clustering):\n numberOfPointsInClusters[point] += 1\n centroids[point] += X[idx]\n for n in range(k):\n if numberOfPointsInClusters[n] == 0:\n numberOfPointsInClusters[n] = float('-inf')\n centroids = [centroid / numberOfPointsInClusters[idx] for idx, centroid in enumerate(centroids)]\n # END CODE\n\n # Update clustering\n\n # YOUR CODE HERE\n for idx, point in enumerate(X):\n clustering[idx] = np.argmin([np.linalg.norm(point - cluster) for cluster in centroids])\n # END CODE\n\n # Compute and print cost\n cost = 0\n for j in range(n):\n cost += np.linalg.norm(X[j] - centroids[clustering[j]]) ** 2\n # print(i + 1, \"\\t\\t\", cost)\n\n # Stop if cost didn't improve more than epislon (decrease)\n if np.isclose(cost, oldcost): break # TODO\n oldcost = cost\n\n return clustering, centroids, cost",
"def run_various_Ks(x, K):\n m = len(x) # length of data points\n min_list = [] # list that will contain minimum costs\n Ks = [i for i in range(1,K+1)] # values of K's\n\n for i in range(1, K+1):\n # runs algorithm with different values of K\n kmeans = KMeans(n_clusters=i, random_state=0).fit(x)\n minval = kmeans.inertia_\n print(minval)\n min_list.append(minval) # appends minimum cost \n\n # Plotting J vs. K to choose best value of K\n plt.plot(Ks, min_list)\n plt.plot(Ks, min_list, '-o')\n plt.xlabel('K (# of clusters)')\n plt.ylabel('Cost function J')\n plt.title('J vs. K plot')\n plt.show()",
"def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)"
] |
[
"0.6533492",
"0.6453961",
"0.6248546",
"0.6242631",
"0.6210361",
"0.61629087",
"0.6087399",
"0.6042287",
"0.6032177",
"0.6013144",
"0.6012423",
"0.5967039",
"0.59623754",
"0.5951409",
"0.5901097",
"0.58950704",
"0.58583415",
"0.5852265",
"0.5821665",
"0.5807835",
"0.5776957",
"0.5776392",
"0.5751672",
"0.57407564",
"0.57406986",
"0.5723726",
"0.57162",
"0.5706526",
"0.5700983",
"0.5694492"
] |
0.69475347
|
0
|
Match protein names of MS and Uniprot's proteome.
|
def MatchProtNames(ProteomeDict, MS_names, MS_seqs):
matchedNames, seqs, Xidx = [], [], []
counter = 0
for i, MS_seq in enumerate(MS_seqs):
MS_seqU = MS_seq.upper()
MS_name = MS_names[i].strip()
if MS_name in ProteomeDict and MS_seqU in ProteomeDict[MS_name]:
Xidx.append(i)
seqs.append(MS_seq)
matchedNames.append(MS_name)
else:
try:
newname = getKeysByValue(ProteomeDict, MS_seqU)[0]
assert MS_seqU in ProteomeDict[newname]
Xidx.append(i)
seqs.append(MS_seq)
matchedNames.append(newname)
except BaseException:
print(MS_name, MS_seqU)
counter += 1
continue
assert counter == 0, "Proteome is missing %s peptides" % (counter)
assert len(matchedNames) == len(seqs)
return matchedNames, seqs, Xidx
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def findmotif(MS_seq, MS_name, ProteomeDict, motif_size):\n MS_seqU = MS_seq.upper()\n try:\n UP_seq = ProteomeDict[MS_name]\n assert MS_seqU in UP_seq, \"check \" + MS_name + \" with seq \" + MS_seq + \". Protein sequence found: \" + UP_seq\n regexPattern = re.compile(MS_seqU)\n MatchObs = list(regexPattern.finditer(UP_seq))\n if \"y\" in MS_seq:\n pY_idx = list(re.compile(\"y\").finditer(MS_seq))\n assert len(pY_idx) != 0\n center_idx = pY_idx[0].start()\n y_idx = center_idx + MatchObs[0].start()\n DoS_idx = None\n if len(pY_idx) > 1:\n DoS_idx = pY_idx[1:]\n assert len(DoS_idx) != 0\n elif \"t\" in MS_seq or \"s\" in MS_seq:\n DoS_idx = list(re.compile(\"y|t|s\").finditer(MS_seq))\n assert len(DoS_idx) != 0\n mappedMotif, pidx = makeMotif(UP_seq, MS_seq, motif_size, y_idx, center_idx, DoS_idx)\n if len(pidx) == 1:\n pos = pidx[0]\n if len(pidx) > 1:\n pos = \";\".join(pidx)\n\n if \"y\" not in MS_seq:\n pTS_idx = list(re.compile(\"t|s\").finditer(MS_seq))\n assert len(pTS_idx) != 0\n center_idx = pTS_idx[0].start()\n ts_idx = center_idx + MatchObs[0].start()\n DoS_idx = None\n if len(pTS_idx) > 1:\n DoS_idx = pTS_idx[1:]\n mappedMotif, pidx = makeMotif(UP_seq, MS_seq, motif_size, ts_idx, center_idx, DoS_idx)\n if len(pidx) == 1:\n pos = pidx[0]\n if len(pidx) > 1:\n pos = \";\".join(pidx)\n\n except BaseException:\n print(MS_name + \" not in ProteomeDict.\")\n raise\n\n return pos, mappedMotif",
"def DictProteomeNameToSeq(X, n):\n DictProtToSeq_UP = {}\n for rec2 in SeqIO.parse(X, \"fasta\"):\n UP_seq = str(rec2.seq)\n if n == \"full\":\n UP_name = rec2.description.split(\"HUMAN \")[1].split(\" OS\")[0]\n DictProtToSeq_UP[UP_name] = str(UP_seq)\n if n == \"gene\":\n try:\n UP_name = rec2.description.split(\" GN=\")[1].split(\" \")[0]\n DictProtToSeq_UP[UP_name] = str(UP_seq)\n except BaseException:\n continue\n return DictProtToSeq_UP",
"def extract_uniprot4protein_keys(self, proteins_dict):\n\t\treturn {key.split(\"|\")[1]: value for (key, value) in proteins_dict.items()}",
"def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r",
"def check_names(treat, control, error_stream):\n tchrnames = set(treat.get_chr_names())\n cchrnames = set(control.get_chr_names())\n commonnames = tchrnames.intersection(cchrnames)\n if len(commonnames)==0:\n error_stream(\"No common chromosome names can be found from treatment and control!\")\n error_stream(\"Please make sure that the treatment and control alignment files were generated by using the same genome assembly!\")\n error_stream(\"Chromosome names in treatment: %s\" % \",\".join(sorted(tchrnames)))\n error_stream(\"Chromosome names in control: %s\" % \",\".join(sorted(cchrnames)))\n sys.exit()",
"def protein_from_orfs(dna):\n rna = dna.replace(\"T\", \"U\")\n reverse_complement_rna = complement_strand(dna).replace(\"T\", \"U\")\n\n candidate_proteins = set()\n\n for strand in [rna, reverse_complement_rna]:\n for index in [m.start() for m in re.finditer('AUG', strand)]:\n codons_list = codons(strand[index:])\n protein = \"\"\n\n if any(rna_codon_dict[codon] == \"Stop\" for codon in codons_list):\n for codon in codons_list:\n symbol = rna_codon_dict[codon]\n\n if symbol != \"Stop\":\n protein += symbol\n else:\n candidate_proteins.add(protein)\n break\n\n return candidate_proteins",
"def _match_short_names(self, token_set_one, token_set_two):\n copy_set_one = token_set_one.copy()\n copy_set_two = token_set_two.copy()\n matching_dict = {}\n\n\n for token in token_set_one:\n res = self.dotted_name_re.search(token)\n if res:\n initials = res.group('name')\n for other_token in token_set_two:\n if other_token.startswith(initials):\n copy_set_one.remove(token)\n try:\n copy_set_two.remove(other_token)\n except KeyError:\n continue\n matching_dict[token] = other_token\n break\n else:\n return False, None, None, None\n\n return True, copy_set_one, copy_set_two, matching_dict",
"def bonenamematch(name1, name2):\n if name1 == name2:\n return True\n if name1.startswith(\"Bip01 L \"):\n name1 = \"Bip01 \" + name1[8:] + \".L\"\n elif name1.startswith(\"Bip01 R \"):\n name1 = \"Bip01 \" + name1[8:] + \".R\"\n if name2.startswith(\"Bip01 L \"):\n name2 = \"Bip01 \" + name2[8:] + \".L\"\n elif name2.startswith(\"Bip01 R \"):\n name2 = \"Bip01 \" + name2[8:] + \".R\"\n if name1 == name2:\n return True\n return False",
"def get_matching_names(understat_names, fpl_names): \n understat_names, fpl_names = understat_names['player_name'].unique(), fpl_names['player_name'].unique()\n seq = difflib.SequenceMatcher()\n understat_similar = []\n fpl_similar = []\n ratio = []\n for i in range(len(understat_names)):\n for j in range(len(fpl_names)):\n seq.set_seqs(understat_names[i].lower(), fpl_names[j].lower())\n ratio_similar = seq.ratio()\n understat_similar.append(understat_names[i])\n fpl_similar.append(fpl_names[j])\n ratio.append(ratio_similar)\n similarity_matched_df = pd.DataFrame({'understat':understat_similar, 'fpl':fpl_similar, 'similarity': ratio}).copy()\n similarity_matched_df_final = similarity_matched_df.loc[similarity_matched_df.groupby('understat')['similarity'].idxmax()].copy()\n # print(similarity_matched_df_final.sort_values('similarity',ascending=False).to_latex())\n return similarity_matched_df_final",
"def test_cases_different_prefix_return_both(self):\n self.create_testdata()\n res = self.filter([u\"pre\", u\"moz\"])\n\n self.assertEqual(\n Set([x.name for x in res.all()]),\n Set([\"CV 1\", \"CV 3\", \"CV 4\"]),\n )",
"def test_legal_names(self):\n product_list = generate_products()\n for prod in product_list:\n noun = prod.name.split(\" \")[1]\n adjective = prod.name.split(\" \")[0]\n self.assertIn(noun, NOUNS)\n self.assertIn(adjective, ADJECTIVES)",
"def lookup_phenotype_by_name( name, greent ):\n logger=logging.getLogger('application')\n #This performs a case-insensitive exact match, and also inverts comma-ed names\n hpo_ids = greent.hpo.search( name )\n if len(hpo_ids) == 0:\n logger.error('Could not convert phenotype name: {}.'.format(name))\n else:\n logger.debug('Found ids for phenotype name: {} {}.'.format(name,' '.join(hpo_ids)))\n return hpo_ids",
"def match_IAU_names(name1, name2, template):\n if diag:\n print \"match_IAU_names:\",name1,name2\n flag1,x1,y1 = parse_IAU_name(name1)\n flag2,x2,y2 = parse_IAU_name(name2)\n if flag1 != flag2:\n return False\n string1 = template % (x1,y1)\n string2 = template % (x2,y2)\n if string1 == string2:\n return True\n else:\n return False",
"def match_invert_skills(item):\n text = item.text\n\n if any([skill in text for skill in other_skill_names]):\n if any([skill in text for skill in class_skills]): # double check\n print('found a wizard skill', [skill for skill in class_skills if skill in text])\n print(item)\n return True\n return False\n return True",
"def test_legal_names(self):\n names = [prod.name for prod in generate_products()]\n sep = [(name.split()[0], name.split()[1]) for name in names]\n for name in sep:\n self.assertIn(name[0], ADJS)\n self.assertIn(name[1], NOUNS)",
"def verif_similar_names(sv):\r\n ok=True\r\n names=[os.path.normcase(n) for n in sv.Object_list] # list names without case\r\n names.sort() # facilitate compare one to the next\r\n for i, n in enumerate(names[:-1]): # scan whole list\r\n a,b=n[:-1], names[i+1][:-1] # names minus last char\r\n c=names[i+1][-1] # last char in full name\r\n d=n[-1] # last char in full name\r\n if len(a)>1 and (c <\"0\" or c>\"9\") and (d <\"0\" or d>\"9\") and a[-1]!=Underscore and b in [a, n]:\r\n if ok:\r\n print(\"\")\r\n ok=False\r\n warn(\"\\n\"+Warn_typing_risk+\"\\n'\"+n+\"' / '\"+names[i+1]+\"'\") # *** Warning: risk of typing error in '\"+n+\"' or '\"+names[i+1]+\"' *** \r\n \r\n if not ok: print(\"\")",
"def test_legal_names(self):\n names = [i[0] for i in generate_products()]\n\n for n in names:\n name = str(n).split()\n name1 = name[0]\n name2 = name[1]\n self.assertIn(name1, ADJECTIVES)\n self.assertIn(name2, NOUNS)",
"def open_uniprotsite(prot_names):\n fasta_dict = {}\n for prot_id in prot_names:\n \n uniprot_link = \"https://www.uniprot.org/uniprot/\" + prot_id + \".fasta\"\n\n uniprot_fasta = urllib.request.urlopen(uniprot_link)\n fasta_sequence = uniprot_fasta.readlines()#.decode('utf-8')\n fasta_sequence = fasta_sequence[1:]\n fasta_sequence = list(f.decode('utf-8') for f in fasta_sequence)\n fasta_sequence = ''.join(fasta_sequence)\n fasta_sequence = fasta_sequence.replace('\\n','')\n\n fasta_dict[prot_id] = fasta_sequence\n uniprot_fasta.close()\n\n return fasta_dict",
"def homophones():\n pron = pronounce.read_dictionary('c06d')\n words = mkwrddct('words.txt')\n\n for word in words:\n phone1 = word[1:]\n phone2 = word[0] + word[2:]\n if phone1 in pron and phone2 in pron and word in pron:\n if pron[word] == pron[phone1] and pron[word] == pron[phone2]:\n print word, phone1, phone2",
"def test_legal_names(self):\n products = generate_products()\n\n for product in products:\n names = product.name.split(\" \")\n self.assertIn(names[0], ADJECTIVES)\n self.assertIn(names[1], NOUNS)",
"def test_legal_names(self):\n prods = generate_products()\n for obj in prods:\n self.assertRegexpMatches(\n '(\\w{2,10} \\w{0,12}|\\?{0,3}){1}', obj.name)",
"def GeneratingKinaseMotifs(names, seqs):\n motif_size = 5\n proteome = open(os.path.join(path, \"./data/Sequence_analysis/proteome_uniprot2019.fa\"), \"r\")\n ProteomeDict = DictProteomeNameToSeq(proteome, n=\"gene\")\n protnames, seqs, Xidx = MatchProtNames(ProteomeDict, names, seqs)\n MS_names, mapped_motifs, uni_pos, = [], [], []\n\n for i, MS_seq in enumerate(seqs):\n pos, mappedMotif = findmotif(MS_seq, protnames[i], ProteomeDict, motif_size)\n MS_names.append(protnames[i])\n mapped_motifs.append(mappedMotif)\n uni_pos.append(pos)\n\n proteome.close()\n return MS_names, mapped_motifs, uni_pos, Xidx",
"def test_legal_names(self):\r\n prod = generate_products()\r\n ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\r\n NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\r\n for product in prod:\r\n self.assertIn(product.name.split(\" \")[0], ADJECTIVES)\r\n self.assertIn(product.name.split(\" \")[1], NOUNS)",
"def test_cases_same_prefix_return_both(self):\n self.create_testdata()\n res = self.filter([u\"moz\"])\n\n self.assertEqual(\n Set([x.name for x in res.all()]),\n Set([\"CV 3\", \"CV 4\"]),\n )",
"def test_legal_names(self):\n test_list = generate_products()\n names_list = []\n for i in test_list:\n names_list.append(i[0])\n for name in names_list:\n nameparts = name.split()\n self.assertEqual(len(nameparts), 2,\n msg=\"missing noun, space, or adj\")\n the_adj = nameparts[0]\n self.assertIn(the_adj, ADJECTIVES, msg='Bad Adj')\n the_noun = nameparts[1]\n self.assertIn(the_noun, NOUNS, msg='Bad Noun')",
"def find_pseudonyms(original_name, gender, topk):\n firstnames = load_firstnames(gender)\n model = load_model()\n whitelist = LetterBag(slugify.slugify(\n WORD_SPLIT_PATTERN.sub(\"\", original_name)))\n for firstname in firstnames:\n if not whitelist.includes(firstname):\n continue\n for lastname, proba in generate_word(model, whitelist.sub(firstname), topk):\n yield firstname.surface, lastname, proba",
"def rename_proteins(names_csv):\n\n names_frame = pd.read_csv(names_csv)\n\n for _, row in names_frame.iterrows():\n mol_target = row['name']\n alternate_name = row['alternate_name']\n # Remove the replacement of '_0' - this was inconsistently applied as some folders are '_1'\n # The Protein code will be modified to be of format 'xtal_directory:alternate_name'\n new_name = str(mol_target).strip() + ':' + str(alternate_name).strip()\n\n prots = Protein.objects.filter(code=mol_target)\n for prot in prots:\n logger.debug(\"Changing prot.code to '%s'\", new_name)\n prot.code = new_name\n prot.save()",
"def test_legal_names(self):\r\n products = generate_products()\r\n for product in products:\r\n test_adjective, test_noun = product.name.split(\"_\")\r\n self.assertIn(test_adjective, ADJECTIVES)\r\n self.assertIn(test_noun, NOUNS)",
"def test_titles_do_not_match(self):\r\n gm_title = 'Zhao Hua'\r\n sp_title = 'MMXXX (ft Moor Mother)'\r\n self.assertFalse(gmspotify.titles_match(gm_title, sp_title))",
"def partialMatch(self, studentName):\n if studentName.lower() in self._name.lower():\n return True\n return False"
] |
[
"0.5665962",
"0.5651878",
"0.53951806",
"0.5392965",
"0.539256",
"0.5364423",
"0.5346171",
"0.52936655",
"0.5278408",
"0.5257746",
"0.52572376",
"0.52282745",
"0.52014524",
"0.5189774",
"0.5179715",
"0.516316",
"0.5145174",
"0.51316696",
"0.5128949",
"0.5123912",
"0.5123605",
"0.5121676",
"0.5118218",
"0.50946265",
"0.50652814",
"0.50443894",
"0.5032971",
"0.50212425",
"0.50092673",
"0.49926722"
] |
0.73574924
|
0
|
Loads the specified song.
|
def load(self, song):
self.currentSongName = song
self.currentSong = pygame.mixer.music.load(song)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_song(self, path):\n self._menu_select('File->Open')\n self._open_file(path)\n try:\n # Get the annoying Comments window out of the way\n self._app.Comments.minimize()\n except MatchError:\n pass",
"def loadSong(fileName):\n with open (fileName, 'r') as f:\n testSong = ast.literal_eval(f.read())\n\n return testSong",
"def song(self, value):\r\n self._song_id = value\r\n data = Song(value)\r\n self.songtitel = data.songtitel if data.found else \"\"",
"def loadTestSong (filename):\n testSong = {}\n #information of analysed song stored in dictionary testSong\n testSong[\"spectrogram\"] = STFTsignal.getSTFTofFile(filename)\n testSong[\"name\"] = filename\n return testSong",
"def play_song(self):\r\n path = input('Give path to wanted song: ') # Request path to song\r\n path = path.replace('\\\\', '/')\r\n if not self.path_storage_re.match(path): # Check if the wanted song is from the storage directory\r\n print(\"Give a valid path\")\r\n else:\r\n p = vlc.MediaPlayer(path) # Create VLC instance and play the song\r\n p.play()\r\n self.playSong.append(p)\r\n self.isPlaying = True",
"async def get_song(self, song_id: int) -> APIReturn:\n return await self._request(\"GET\", \"/getSong\", extra_query={\"id\": song_id})",
"def get_song(self, song_id):\n url = get_song_url(song_id)\n result = self.get_request(url)\n\n return result['songs'][0]",
"def load_music(self, filename):\n self.music = filename\n self.music_playing = False\n if self.is_running:\n if filename is not None:\n cocos.audio.music.control.load(filename)\n else:\n cocos.audio.music.control.stop()",
"def import_song(self, song, playlist):\n\n try:\n song_uri = self.find_song_uri(song)\n except SongNotFoundError as e:\n print(f\"could not find song {song} to add to playlist '{playlist['name']}'\")\n else:\n self.add_song_to_playlist(song_uri, playlist[\"id\"])",
"def play(song):\n # Show the metadata\n if (verbose==True):\n for s in song.keys():\n print s, \":\", \n print song[s]\n else:\n print \"Title:\", song[\"title\"]\n print \"Artisit:\", song[\"artist\"]\n print \"Album:\", song[\"albumtitle\"]\n print \"Year\", song[\"public_time\"]\n print \"Company:\", song[\"company\"]\n print \"Length\", song[\"length\"]\n print \"Playing...\"\n mp3_url = song[\"url\"]\n song_length = song[\"length\"]\n p = subprocess.Popen([\"mplayer\", \"-msglevel\", \"all=0\", mp3_url])\n\n # At the same time, download the song:\n u = urllib2.urlopen(mp3_url)\n local_mp3 = open(song[\"title\"] + \"-\" + song[\"artist\"] + \".mp3\", \"w\")\n local_mp3.write(u.read())\n local_mp3.close()\n # time.sleep(song_length)\n i = 0\n while(True):\n time.sleep(1)\n i += 1\n if i == song_length:\n # Kill the process when the song is finished.\n p.terminate()\n print \"#\" * 80\n break",
"def get_song(self, song_id):\n url = get_song_url(song_id)\n result = self.common_get_request(url,headers)\n\n return result['songs'][0]",
"def read_song_by_song_id(song_id):\n logging.debug('{CRUD_operations} BEGIN function read_song_by_song_id()')\n logging.debug('{CRUD_operations} Data received: song_id: %s', song_id)\n song = Song.query.filter_by(id=song_id).first()\n logging.debug('{CRUD_operations} END function read_song_by_song_id()')\n return song",
"async def play(self, ctx, *, song: str):\n state = self.get_voice_state(ctx.message.server)\n opts = {\n 'default_search': 'ytsearch',\n 'quiet': True,\n }\n\n if state.voice is None:\n success = await ctx.invoke(self.summon)\n if not success:\n return\n if state.voice.channel != ctx.message.author.voice_channel:\n await self.bot.say('You can only modify the queue if you\\'re in the same channel as me!')\n return\n if len(state.songs._queue) >= 6:\n await self.bot.say('There can only be up to 6 items in queue!')\n return\n\n status = await self.bot.say('Loading... 🌚')\n pg_task = self.loop.create_task(self.progress(status, 'Loading'))\n state.voice.encoder_options(sample_rate=48000, channels=2)\n try:\n player = await state.voice.create_ytdl_player(song, ytdl_options=opts, after=state.toggle_next)\n except Exception as e:\n if type(e).__name__.endswith('DownloadError') or type(e).__name__.endswith('IndexError'):\n pg_task.cancel()\n await self.bot.delete_message(status)\n await self.bot.say('**That video couldn\\'t be found!**')\n return False\n else:\n raise e\n\n player.volume = 0.7\n entry = VoiceEntry(ctx.message, player, False)\n was_empty = state.songs.empty()\n await state.songs.put(entry)\n if state.current:\n await self.bot.say('Queued ' + str(entry))\n pg_task.cancel()\n await self.bot.delete_message(status)",
"def load(self, media):\n path = self.check_media(media)\n if path is False:\n self._log(\"warning\", \"Unknown media {0} => aborting\".format(media))\n #return False prevent continue to play or play last media, force send unknow file to load to vlc\n # self.stdin_queue.put_nowait()\n self._direct_stdin_writer(\"load {0}\".format(path))",
"def selectPlay(id):\n\tsong = music.song()\n\tsql = \"SELECT id, title, path, filename, hash, base FROM songs \" \\\n\t\t+ \"WHERE id = \" + str(id) + \";\"\n\tc, conn = connect()\n\tc.execute(sql)\n\tsinfo = c.fetchone()\n\t\n\tif sinfo[0]:\n\t\tsong.id = sinfo[0]\n\tif sinfo[1]:\n\t\tsong.name = sinfo[1]\n\tif sinfo[2]:\n\t\tsong.path = sinfo[2]\n\tif sinfo[3]:\n\t\tsong.filename = sinfo[3]\n\tif sinfo[4]:\n\t\tsong.hash = sinfo[4]\n\tif sinfo[5]:\n\t\tsong.base = sinfo[5]\n\t\n\treturn song",
"def update(self, song: int) -> None:\n if 0 <= song < len(self.sounds):\n self.sounds[song].play()",
"def load_sound(self, name):\n class NoneSound:\n def play(self): pass\n if not pygame.mixer or not pygame.mixer.get_init():\n sound = NoneSound()\n else:\n fullname = os.path.join('TeddyLevel','data', name)\n try:\n sound = pygame.mixer.Sound(fullname)\n except pygame.error, message:\n print 'Cannot load sound:', fullname\n raise SystemExit, message\n dictname = name[0:name.find('.')]\n self.dict[dictname] = sound",
"def play_music(sid):\n # Get the parameters for the get_song_id request\n artist = None\n album = None\n title = None\n if not request.json:\n # If no JSON parameters were given, just resume playing the song\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n spotify.resume(host['ip'])\n return jsonify({})\n else:\n try:\n # Get the host data from the database\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n artist = None\n album = None\n track = None\n if request.json.has_key('track') and request.json.get('track'):\n track = request.json.get('track')\n elif request.json.has_key('album') and request.json.get('album'):\n album = request.json.get('album')\n elif request.json.has_key('artist') and request.json.get('artist'):\n artist = request.json.get('artist')\n else:\n spotify.resume(host['ip'])\n return jsonify({})\n spotify.compound_play(host['ip'], artist=artist, album=album, song=track)\n return jsonify({})\n except:\n abort(400)",
"def load_sound(self, filename):\n return mixer.Sound(os.path.join(\"sounds\", filename))",
"def getSong(self):\n queue = self.instantiate_queue()\n song_data = queue.pop(0)\n\n history = self.instantiate_history()\n history_song_data = deepcopy(song_data)\n history_song_data['time_played'] = time() + 5\n history.append(history_song_data)\n\n if len(queue) < 5:\n self.addImplicit(queue, history)\n \n self.ageSongs(queue)\n self.calculateScore(queue)\n queue = self.sortSongs(queue)\n\n self.cache.set('queue', queue)\n self.cache.set('history', history)\n\n keys = ['name', 'track_id', 'artist', 'album_uri', 'album_name', 'duration', 'explicit', 'valence', 'energy']\n args = [song_data[key] for key in keys]\n return Song(*args)",
"def retrieve(self, request, pk=None):\n song = get_object_or_404(Song, pk=pk)\n\n serializer = SongSerializer(song)\n return Response(serializer.data)",
"def importsong(fpath):\n result = \"\"\n\n tags = checkid3(fpath)\n if tags is not None:\n sig = sigfile(fpath)\n exsong = Song.objects.filter(uniq=sig)\n\n if len(exsong) > 0:\n if exsong[0].filename != fpath:\n result = updatesong(exsong[0], fpath)\n else:\n result = \"[X] %s\" % exsong[0].title\n else:\n result = createsong(tags, sig, fpath, songminplay())\n else:\n logger.error('No tags found in [%s]' % fpath)\n\n return result",
"def load(self):\n Logger.info(\"VLCPlayer: Entering load\")\n self._load_player(self.source)\n self._set_volume(self.volume)",
"def load(self, uri):\n if self.p:\n self.p.stop()\n self.p = self.vlc.media_player_new(uri)\n Player._finished = False\n e = self.p.event_manager()\n e.event_attach(vlc.EventType.MediaPlayerEndReached, self.__end_reached, None)\n if (not '://' in uri or uri.startswith('file://')) and os.stat(uri).st_size < 100:\n self._finished = True\n else:\n self.p.play()",
"def loadSound(path,name):\n\n class NoneSound:\n def play(self): pass\n if not pygame.mixer:\n return NoneSound()\n fullname = os.path.join(path,name)\n try:\n sound = mixer.Sound(fullname)\n except error, message:\n print \"Cannot load sound:\", name\n raise SystemExit, message\n return sound",
"def _load_player(self, filename):\n self._unload_player()\n\n Logger.info(\"VLCPlayer: Loading player\")\n SoundVLCPlayer.player = player = self.instance.media_player_new()\n media = player.set_mrl(filename)\n player.event_manager().event_attach(\n EventType.MediaPlayerEndReached, self._track_finished)\n media.parse() # Determine duration\n self._length = media.get_duration() / 1000.0\n media.release()",
"def load(self, path):\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n return Sound(name, Waveform.load(path))",
"def load(self, playlist, start=None, end=None):\n track_range = _format_range(start, end)\n yield from self.command('load \"{}\" {}'.format(playlist, track_range))\n return True",
"def fetch_song_data(url):\r\n response = requests.get(url)\r\n return response.text",
"def load(name):\n with pyglet.resource.file(f'sounds/{name}.wav', 'rb') as f:\n return pygame.mixer.Sound(f)"
] |
[
"0.6981721",
"0.6755066",
"0.6409486",
"0.6405645",
"0.63767195",
"0.62946016",
"0.6293204",
"0.62171227",
"0.6199908",
"0.6159191",
"0.61466426",
"0.6089316",
"0.60279983",
"0.6003873",
"0.6002119",
"0.60020953",
"0.59768283",
"0.59588265",
"0.5956575",
"0.5947801",
"0.5932302",
"0.590208",
"0.58681613",
"0.584776",
"0.58475816",
"0.5810272",
"0.58094895",
"0.5784046",
"0.5780316",
"0.577395"
] |
0.82048696
|
0
|
Mark an existing task in the heap as REMOVED and delete it from the entry_finder. Raise KeyError if not found.
|
def remove(self, task):
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove_task(self, task):\n for i, item in enumerate(self.tasks):\n if item is task:\n del self.tasks[i]",
"def delete(self, node):\n\n # logger_cagada.debug(\"norrando nodo %s\" % (type(node)))\n entry = self.entry_finder.pop(node)\n # logger_cagada.debug(\"la entry q c borra %s\" % entry)\n entry[-1] = self.REMOVED\n # logger_cagada.debug(\"el heap es %s\" % self.heap)\n return entry[0]",
"def remove(self, task):\n pass",
"def remove_entry(self, pos: int) -> None:\n del self.entries[pos]",
"def remove(self, item) -> None:\n entry = self.entry_finder.pop(item)\n entry[-1][0] = None",
"def delete(self, entry): # Hashmap.delete\n\n entry.delete()\n\n # remove the entry from the hashmap\n list=self.contentHash[entry.hexdigest]\n newlist = []\n for e in list:\n if e != entry:\n newlist.append(e)\n\n # if there are no more entries for this hashval, remove\n # it from the dictionary m\n if len(newlist):\n self.contentHash[entry.hexdigest] = newlist\n else:\n del self.contentHash[entry.hashval]\n\n # also remove all the deleted children from the hashmap\n self.prune()",
"def move_task(self, task, where):\n for key in self.application.keys():\n if task in self.application[key]:\n self.application[key].remove(task)\n\n self.application[where].append(task)",
"def remove_task(self, task_id):\n with self.lock:\n self.task_map.pop(task_id)",
"def remove(self, task):\n queue = self.__get_task_queue(task)\n queue.remove(task)\n if self._store:\n try:\n del self._tasks_by_id[task.persistent_id]\n except AttributeError:\n # already removed\n pass\n task.detach()\n self.__update_task_counts(task, task.execution.state, -1)",
"def del_task(self, tid: str):\n if tid in self.__tasks:\n task = self.__tasks.pop(tid)\n task.detach()\n self.__dump()",
"def delete(self, name, task):\n assert name, \"Must input a valid dataset name.\"\n assert task, \"Must input a valid task name.\"\n self._assert_dataset_exists_in_cache(name)\n self._assert_task_exists_in_dataset_in_cache(name, task)\n\n self.manager.data[\"dataset\"][name][\"tasks\"].pop(task)\n\n self._update_cache_data()",
"def del_task(self, task_name):\n if task_name not in self.task_list:\n raise Exception(\"Task not in list.\")\n del self.task_list[task_name]\n ii = [i for i, t in enumerate(self.task_order) if t == task_name][::-1]\n for i in ii:\n del self.task_order[i]",
"def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")",
"def _del(self, entry):\n entry.key = dummy\n entry.value = None\n self.used -= 1",
"def remove_entry(self, number: int) -> None:\n raise NotImplementedError",
"def remove(self, task: Task) -> None:\n try:\n self._tasks.remove(task)\n except ValueError as e:\n raise e",
"def delete(self):\n return super(Task, self).delete(None)",
"def deleteTask():\n\tmarkOff(isdelete = 1)",
"def fs_remove_entry(self, path):\n\t\treturn Job(SDK.PrlSrv_FsRemoveEntry(self.handle, path)[0])",
"def remove(self, key):\n ndx = self._findPosition(key)\n assert ndx, 'Invalid map key'\n self._entryList.pop(key)",
"def detach_task(self, task):\n self.tasks.remove(task)",
"def delete_entry(self, arg):\n nums = self.selector.select_by_index(arg)\n if nums is None or not nums:\n self.visual.error(\"Need a selection to delete.\")\n return\n to_delete = [self.reference_entry_id_list[n] for n in nums]\n old_len, del_len = len(self.reference_entry_id_list), len(to_delete)\n for entry_id in to_delete:\n self.entry_collection.remove(entry_id)\n self.visual.log(\"Deleted entry {}\".format(entry_id))\n remaining = [x for x in self.reference_entry_id_list if x not in to_delete]\n self.visual.log(\"Deleted {}/{} entries, left with {}\".format(del_len, old_len, len(remaining)))\n self.push_reference_list(remaining, \"deletion\", force=True)\n self.unselect()",
"def __delitem__(self, key):\n\t\ttry:\n\t\t\tdel self.heap[[item == key for _, item in self.heap].index(True)]\n\t\texcept ValueError:\n\t\t\traise KeyError(str(key) + \" is not in the priority queue\")\n\t\theapq.heapify(self.heap)",
"def rm_task():\n # get task label from user\n responses = accept_inputs([\"Task label\"])\n label = responses[\"Task label\"]\n # check for existence of task\n results = query_with_results(\"select * from task where label = ?\", [label])\n if len(results) == 0:\n print(\"No task found with label '%s' that we could remove.\" % label)\n return\n # the task exists, so remove it\n query_no_results(\"delete from task where label = ?\", [label]) \n # remove all person associations\n query_no_results(\"delete from task_person_pair where task = ?\", [label])\n print(\"Task with label '%s' removed.\" % label)",
"def remove_task(self, container):\n raise NotImplementedError()",
"def remove_task(self, id):\n raise NotImplementedError()",
"def delete(self, entry_id: str):\n try:\n return self.entries.pop(entry_id)\n except KeyError as error:\n raise errors.NoEntryFound(f'No entries found which match {entry_id}') from error",
"def remove_entry(self, entry: Union[int, str, Entry]) -> Optional[Entry]:\n if isinstance(entry, Entry):\n target = entry\n elif isinstance(entry, int):\n target = self.__entries[entry]\n else:\n target = None\n for e in self.__entries:\n if e.get_name() == entry:\n target = e\n if target is None:\n return None\n if isinstance(target, Directory) and target.is_populated():\n return None\n self.__entries.remove(target)\n return target",
"def remove(self, key):\n node, parent = Treap._find_node(key, self.root)\n if not node:\n raise KeyError(key)\n\n node.n -= 1\n if node.n > 0:\n self._prioritize(node)\n return\n\n if not parent and not (node.child[False] and node.child[True]):\n self.root = node.child[False] or node.child[True]\n if self.root:\n self.root.parent = None\n else:\n while node.child[False] and node.child[True]:\n # Pivot a child node up while the node to be deleted has\n # both left and right children.\n is_right = node.child[False].heap_id <= node.child[True].heap_id\n self._pivot_up(node.child[is_right])\n\n child = node.child[False] or node.child[True]\n parent = node.parent\n Treap._set_child(parent, node, child)\n self._prioritize(parent)\n\n node.parent = None\n node.child[False] = None\n node.child[True] = None",
"def _remove_expired_task(self):\n with self.lock:\n curr_time = datetime.utcnow()\n tasks_list = self.tasks_to_remove\n for task_id, t in tasks_list:\n time_elapsed = curr_time - t\n if (time_elapsed.total_seconds() < TASK_EXPIRE_DURATION_SEC):\n break\n self.tasks_to_remove.remove((task_id, t))\n self.task_map.pop(task_id)"
] |
[
"0.66694623",
"0.66056",
"0.64539456",
"0.64364725",
"0.6317414",
"0.6294466",
"0.6164615",
"0.6152803",
"0.6139779",
"0.60942066",
"0.604369",
"0.6039398",
"0.6034774",
"0.60207504",
"0.6006805",
"0.6004575",
"0.59795797",
"0.5877806",
"0.58768666",
"0.5874761",
"0.58690476",
"0.5858955",
"0.584746",
"0.58325005",
"0.5759314",
"0.57255715",
"0.57232744",
"0.5690486",
"0.5645906",
"0.56419045"
] |
0.7819699
|
0
|
Remove and return the lowest priority task from the heap. Raise KeyError if empty.
|
def pop(self):
while self.pq:
priority, count, task = heapq.heappop(self.pq)
if task is not self.REMOVED:
del self.entry_finder[task]
return task
raise KeyError('pop from an empty priority queue')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove_min(self):\r\n # Should raise an exception of size is 0...\r\n if self._size == 0: raise KeyError # Can't remove from an empty heap\r\n result = self._data[0] # remember the smallest\r\n self._data[0] = None # None is so we don't have a reference.\r\n self._size -= 1 # don't forget we have one less\r\n # bring the last to the front and stick the None at the end\r\n self.swap(0, self._size)\r\n # and let the item inserted at the front \"drift down\"\r\n self.down_heap(0)\r\n return result # finally return what was the minimum\r",
"def pop(self):\n while self.heap:\n priority, count, smallest = hpq.heappop(self.heap)\n if smallest is not self.REMOVED:\n del self.set[smallest]\n return priority, smallest\n raise KeyError('pop from an empty priority queue')",
"def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)",
"def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)",
"def peek(self):\n heaps = self.priorities\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n if len(heap_list) == 0:\n self._remove_key()\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n return heap_list[0]",
"def pop_smallest(self):\n smallest = heapq.heappop(self.heap)\n del self.set[smallest]\n return smallest",
"def remove_min(self):\r\n if self.is_empty():\r\n raise Exception('Priority queue is empty.')\r\n self._swap(0, len(self._data) - 1) # put minimum item at the end\r\n item = self._data.pop() # and remove it from the list;\r\n self._downheap(0) # then fix new root\r\n return (item._key, item._value)",
"def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty')\n self._swap(0, len(self) - 1)\n item = self._data.pop()\n self._down_heap(0)\n return (item._key, item._value)",
"def remove_min(self) -> Optional[T]:\n if self._array == []:\n return None\n else:\n # Remove top node\n value = self._array[0]\n self._array = self._array[1:]\n # If nodes remaing in the min heap...\n if self._array:\n # Move end node to the top\n end_node = self._array.pop()\n self._array = [end_node] + self._array\n # Rebuild the heap (heapify)\n self.__build()\n # Return the top node\n return value",
"def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n item = self._data.delete(self._data.first())\n return (item._key, item._value)",
"def pop(self):\n\n while self.heap:\n# #logger_cagada.debug(\"elem de heap %s\" % self.heap)\n priority, node = self.heappop(self.heap)\n if node is not self.REMOVED:\n del self.entry_finder[node]\n return priority, node\n raise KeyError('pop from an empty priority queue')",
"def min(self):\r\n if self._size == 0: raise KeyError # Nothing to return if heap empty\r\n return self._data[0] # so simple!\r",
"def remove_min(self) -> Tuple[K, V]:\n while self.queue:\n # pylint: disable=unused-variable\n value, count, key = heapq.heappop(self.queue)\n if value is not REMOVED:\n del self.__key_map__[key]\n return (key, value)\n return None",
"def remove_min(self):\r\n if self.is_empty():\r\n raise Empty(\"Priority queue is empty.\")\r\n item = self.data.delete(self.data.first())\r\n return item.key, item.value",
"def pop(self):\n\n def sub_pop():\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heap = heaps[keys]\n pop = heap.pop()\n return pop\n\n try:\n val = sub_pop()\n except IndexError:\n self._remove_key()\n val = sub_pop()\n\n return val",
"def dequeue(self):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Remove and return min item from heap, if any\n ...",
"def pop(self):\n\t\tif self.heap:\n\t\t\treturn heapq.heappop(self.heap)[1]\n\t\telse:\n\t\t\traise Exception('Trying to pop from empty PriorityQueue.')",
"def pop_smallest(self):\n values = [item[0] for item in self.items] #list of the values\n #values = L[:]\n heapq.heapify(values)\n smallest = heapq.heappop(values)#not forgetting heapq.heapify(values)\n #directly writing t = heapq.heappop([4,2,4]) would result in t = 4\n i = self.getItemByValue(smallest)\n self.items.remove(i)\n return i[1]",
"def delete_min(self):\n\n self.switch(0, -1)\n\n min = self.heap.pop(-1)\n\n self.bubble_down(0)\n\n return min",
"def pop(self) -> T:\n while self.priority_queue:\n _, _, (item,) = heapq.heappop(self.priority_queue)\n if item is not None:\n del self.entry_finder[item] # type: ignore\n return cast(T, item)\n raise KeyError('pop from an empty priority queue')",
"def deleteMin(self):\n heap = self._heap\n position = self._position\n\n try:\n end = heap.pop(-1)\n except IndexError:\n raise KeyError('pqdict is empty')\n\n if heap:\n node = heap[0]\n # grab last node in PQ to root and sink it down appropriately\n heap[0] = end\n position[end.key] = 0\n self._sink(0)\n else:\n node = end\n del position[node.key] # delete index from position dict\n return node.key, node.value",
"def _remove_key(self):\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heaps.pop(keys)",
"def extract_min(self):\n if self.is_empty():\n raise ValueError(\"Priority queue is empty\")\n\n edge_tuple = heapq.heappop(self.__heap)\n ew = edge_tuple[1]\n return ew.edge()",
"def pop(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n up_next = self.priority_queue[nextkey][0]\n self.priority_queue[nextkey] = self.priority_queue[nextkey][1:]\n return up_next\n else:\n raise IndexError(\"There's nothing in your queue\")",
"def pop(self):\n return heappop(self.priority_queue)[1]",
"def delete_min(self):\n #The length is 1 because the heap list was initialized with 0\n if len(self.heap_list) == 1:\n return \"Empty heap.\"\n\n #Store the min value of the heap\n top = self.heap_list[1]\n\n #Move the last value of the heap to the top\n self.heap_list[1] = self.heap_list[self.current_size]\n\n #Pop the last value from the heap (that was moved to the top)\n *self.heap_list, _ = self.heap_list\n\n # Decrease the size of the heap\n self.current_size -= 1\n\n #Move down the top value to the appropriate position (following the definition of a min heap)\n #The value is at index 1 since the heap list was initialized with 0) \n self.sift_down(1)\n\n #Return the min value of the heap\n return top",
"def pop(self):\n try:\n top_node = self._heap[0]\n self._heap = [self._heap[-1]] + self._heap[1:-1]\n self.sort_down(0)\n return top_node\n except IndexError:\n raise IndexError('Cannot pop from an empty heap')",
"def extractmin(self):\n if len(self.heap) == 0: \n return None\n i = self.heap[0]\n last = self.heap[-1]\n del self.heap[-1]\n if len(self.heap) > 0:\n self.siftdown(last, 0)\n return i",
"def pop(self):\n priority, key = self.__heap[0]\n self.__swap(0, len(self.__heap) - 1)\n del self.__position[key]\n del self.__heap[-1]\n\n if self:\n self.__bubble_down(0)\n\n return priority, key",
"def pop(self):\n if self.heap == [0]:\n raise EmptyHeapException('Heap is empty.')\n self.heap[1], self.heap[-1] = self.heap[-1], self.heap[1]\n minimum = self.heap[-1] # Store min val to return later\n self.heap = self.heap[:-1] # Remove final element\n self._percolate_down(1)\n return minimum"
] |
[
"0.7668255",
"0.7574757",
"0.74667835",
"0.7422379",
"0.7402254",
"0.7376742",
"0.73750573",
"0.7343455",
"0.7297022",
"0.72484124",
"0.7206744",
"0.7189073",
"0.71608317",
"0.71471757",
"0.7085695",
"0.70759726",
"0.7072348",
"0.7043073",
"0.70376694",
"0.7022937",
"0.699216",
"0.6978453",
"0.6974346",
"0.6908654",
"0.68824446",
"0.6870222",
"0.6850202",
"0.6807385",
"0.67999053",
"0.6798774"
] |
0.79744154
|
0
|
Check if device is online and add the entity.
|
def add_entity(device: SmartPlug, async_add_entities):
# Attempt to get the sysinfo. If it fails, it will raise an
# exception that is caught by async_add_entities_retry which
# will try again later.
device.get_sysinfo()
async_add_entities([SmartPlugSwitch(device)], update_before_add=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def async_added_to_hass(self):\n self.hass.data[DOMAIN].add_entity_id(self.entity_id)\n self.hass.data[DOMAIN].add_sensor(self)",
"def is_online(self, device):\n # TODO: Add info for the device if it is actually ONLINE\n return device in self.backends",
"def poll_device(self):\n #self.logger.info(\"poll_device: Checking online status\")\n for tasmota_topic in self.tasmota_devices:\n if self.tasmota_devices[tasmota_topic].get('online', None) is not None:\n if self.tasmota_devices[tasmota_topic]['online_timeout'] < datetime.now():\n self.tasmota_devices[tasmota_topic]['online'] = False\n self.set_item_value(tasmota_topic, 'item_online', False, 'poll_device')\n self.logger.info(f\"poll_device: {tasmota_topic} is not online any more - online_timeout={self.tasmota_devices[tasmota_topic]['online_timeout']}, now={datetime.now()}\")",
"async def async_added_to_hass(self):\n self.hass.data[DOMAIN][\"entities\"][\"fan\"].append(self)",
"def is_cloud_device_already_added(self):\n for entry in self._async_current_entries():\n if entry.unique_id is not None and entry.unique_id == f\"{DOMAIN}Cloud\":\n return True\n return False",
"async def async_added_to_hass(self):\n await super().async_added_to_hass()\n self.coordinator.entities.append(self)",
"def available(self) -> bool:\n return self._device.is_online",
"async def async_new_entities(device_info):\n system_id = device_info[\"system_id\"]\n device_id = device_info[\"device_id\"]\n device = device_info[\"device\"]\n\n device_name = f\"{device['friendlyName']}\"\n\n if device.get(\"friendlyType\"):\n device_name = device_name + f\" ({device['friendlyType']})\"\n\n entity = GoogleWifiDeviceTracker(\n coordinator=coordinator,\n name=device_name,\n icon=DEFAULT_ICON,\n system_id=system_id,\n item_id=device_id,\n )\n entities = [entity]\n async_add_entities(entities)",
"def online(self):\n return False",
"def is_online(self) -> bool:\n return self.data[Attribute.ONLINE]",
"def setup(hass, config):\n\n _LOGGER.info(\"Connected Devices loading.\")\n \n component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)\n\n component.setup(config)\n\n descriptions = load_yaml_config_file(\n os.path.join(os.path.dirname(__file__), 'services.yaml'))\n\n def update_connected_device(service):\n \"\"\"Update a connected device.\"\"\"\n\n update_obj = service.data.get('value')\n\n connected_devices = hass.states.get('connected_devices.connected_devices').as_dict()\n \n attributes = connected_devices[\"attributes\"]\n\n for obj in update_obj:\n # _LOGGER.info(\"update value: %s\", obj[\"value\"])\n # _LOGGER.info(\"target: %s\", obj[\"target\"])\n # _LOGGER.info(\"previous value: %s\", attributes[\"devices\"][obj[\"device\"]][obj[\"target\"]])\n\n attributes[\"devices\"][obj[\"device\"]][obj[\"target\"]] = obj[\"value\"]\n # _LOGGER.info(\"after update: %s\", attributes[\"devices\"][obj[\"device\"]][obj[\"target\"]])\n \n connected_devices[\"attributes\"] = attributes\n\n hass.states.set('connected_devices.connected_devices', 'On', attributes, True)\n\n hass.services.register(\n DOMAIN,\n 'update_connected_device',\n update_connected_device,\n descriptions['update_connected_device'])\n\n return True",
"def update(self):\n if self._skip_update:\n self._skip_update = False\n return\n\n try:\n for prop in AIRER_PROPS:\n self.status[prop] = self.send('get_prop', [prop])[0]\n _LOGGER.debug(\"MiioDevice update: %s\", self.status)\n self.available = True\n self._retry = 0\n except Exception as exc:\n _LOGGER.error(\"Error on update: %s\", exc)\n self._retry += 1\n if self._retry > 3:\n self.available = False\n\n for entity in self.update_entities:\n entity.async_schedule_update_ha_state()",
"def online_check(self):\n self.online = False\n online_topic = '{t_topic}/INFO2'.format(**self)\n print('{BLUE}Watching for {}{NC}'.format(online_topic, **colors))\n try:\n self.mqtt.connect(self.mqtt_host)\n except Exception:\n print('MQTT broker not online')\n return False\n\n self.mqtt.message_callback_add(online_topic, lambda *args: \\\n setattr(self, 'online', True))\n self.mqtt.subscribe(online_topic)\n startTime = dt.datetime.now()\n while not self.online and not too_old(startTime, wait_time):\n self.mqtt.loop(timeout=loop_time)\n time_waited = (dt.datetime.now() - startTime).total_seconds()\n # If we did not see device publish INFO2, sometimes platformio causes\n # a delay by checking for updates and we miss seeing this message.\n # To check for that case, query the device for its build timestamp and\n # check if it was built in the last couple minutes.\n if not self.online:\n self.query_tas_status()\n if 'build_time' in self.reported:\n build_time = dt.datetime.strptime(self.reported['build_time'],\n '%Y-%m-%dT%H:%M:%S')\n if dt.datetime.now() - build_time < dt.timedelta(minutes=2):\n self.online = True\n\n if not self.online:\n print('{RED}{f_name} did not come online within {wait_time} '\n 'seconds{NC}'.format(f_name=self.f_name,\n wait_time=str(wait_time),\n **colors))\n elif self.online:\n print('{GREEN}{f_name} came online in {time_waited} '\n 'seconds{NC}'.format(f_name=self.f_name,\n time_waited=time_waited,\n **colors))\n self.mqtt.unsubscribe(online_topic)\n self.mqtt.message_callback_remove(online_topic)\n self.mqtt.disconnect()\n return self.online",
"async def _discovered_wemo(device):\n async_add_entities([WemoBinarySensor(device)])",
"async def update_or_create_entity(dev, tern):\n model = dev[\"model\"] if \"model\" in dev else \"\"\n version = dev[\"version\"] if \"version\" in dev else \"\"\n available = dev[\"online\"] if \"online\" in dev else False\n if \"services\" not in dev:\n return []\n _LOGGER.info(dev)\n for svc in dev[\"services\"]:\n isLight = False\n profile = svc[\"profile\"]\n features = -1\n if profile == PROFILE_ONOFF_LIGHT:\n features = SUPPORT_TERNCY_ON_OFF\n isLight = True\n elif profile == PROFILE_COLOR_LIGHT:\n features = SUPPORT_TERNCY_COLOR\n isLight = True\n elif profile == PROFILE_EXTENDED_COLOR_LIGHT:\n features = SUPPORT_TERNCY_EXTENDED\n isLight = True\n elif profile == PROFILE_COLOR_TEMPERATURE_LIGHT:\n features = SUPPORT_TERNCY_CT\n isLight = True\n elif profile == PROFILE_DIMMABLE_COLOR_TEMPERATURE_LIGHT:\n features = SUPPORT_TERNCY_CT\n isLight = True\n elif profile == PROFILE_DIMMABLE_LIGHT:\n features = SUPPORT_TERNCY_DIMMABLE\n isLight = True\n elif profile == PROFILE_DIMMABLE_LIGHT2:\n features = SUPPORT_TERNCY_DIMMABLE\n isLight = True\n elif profile == PROFILE_COLOR_DIMMABLE_LIGHT:\n features = SUPPORT_TERNCY_EXTENDED\n isLight = True\n elif profile == PROFILE_EXTENDED_COLOR_LIGHT2:\n features = SUPPORT_TERNCY_EXTENDED\n isLight = True\n elif profile == PROFILE_PLUG:\n features = SUPPORT_TERNCY_ON_OFF\n elif profile == PROFILE_CURTAIN:\n features = SUPPORT_TERNCY_ON_OFF\n elif profile == PROFILE_DOOR_SENSOR:\n features = SUPPORT_TERNCY_ON_OFF\n elif profile == PROFILE_HA_TEMPERATURE_HUMIDITY:\n features = SUPPORT_TERNCY_ON_OFF\n elif profile == PROFILE_PIR:\n features = SUPPORT_TERNCY_ON_OFF\n else:\n _LOGGER.info(\"unsupported profile %d\", profile)\n return []\n\n devid = svc[\"id\"]\n devidTemp = devid + DEVID_EXT_TEMP\n if profile == PROFILE_HA_TEMPERATURE_HUMIDITY:\n devid = devidTemp\n\n disableRelay = get_attr_value(svc[\"attributes\"], \"disableRelay\")\n if disableRelay is not None and disableRelay == 1:\n _LOGGER.info(\"%s is disabled, skip it\", devid)\n return []\n temperature = get_attr_value(svc[\"attributes\"], \"temperature\")\n _LOGGER.info(temperature)\n\n name = svc[\"name\"]\n if name == \"\":\n name = devid\n device = None\n deviceTemp = None\n if devid in tern.hass_platform_data.parsed_devices:\n device = tern.hass_platform_data.parsed_devices[devid]\n if temperature is not None:\n deviceTemp = tern.hass_platform_data.parsed_devices[devidTemp]\n deviceTemp.update_state(svc[\"attributes\"])\n deviceTemp.is_available = available\n else:\n if profile == PROFILE_PLUG:\n device = TerncySmartPlug(tern, devid, name, model, version, features)\n elif profile == PROFILE_CURTAIN:\n device = TerncyCurtain(tern, devid, name, model, version, features)\n elif profile == PROFILE_DOOR_SENSOR:\n device = TerncyDoorSensor(tern, devid, name, model, version, features)\n elif profile == PROFILE_HA_TEMPERATURE_HUMIDITY:\n device = TerncyTemperatureSensor(tern, devid, name + \" temperature\", model, version, features)\n elif profile == PROFILE_PIR:\n device = TerncyMotionSensor(tern, devid, name, model, version, features)\n else:\n device = TerncyLight(tern, devid, name, model, version, features)\n\n if profile != PROFILE_HA_TEMPERATURE_HUMIDITY and temperature is not None:\n _LOGGER.info(\"create temperature sensor\")\n deviceTemp = TerncyTemperatureSensor(tern, devidTemp, name + \" temperature\", model, version, features)\n deviceTemp.update_state(svc[\"attributes\"])\n deviceTemp.is_available = available\n tern.hass_platform_data.parsed_devices[devidTemp] = deviceTemp\n device.update_state(svc[\"attributes\"])\n device.is_available = available\n if devid in tern.hass_platform_data.parsed_devices:\n device.schedule_update_ha_state()\n else:\n for platform in async_get_platforms(tern.hass_platform_data.hass, DOMAIN):\n _LOGGER.info(platform.domain)\n if platform.config_entry.unique_id == tern.dev_id:\n if profile == PROFILE_PLUG and platform.domain == \"switch\":\n await platform.async_add_entities([device])\n elif profile == PROFILE_CURTAIN and platform.domain == \"cover\":\n await platform.async_add_entities([device])\n elif (\n profile == PROFILE_DOOR_SENSOR\n and platform.domain == \"binary_sensor\"\n ):\n await platform.async_add_entities([device])\n elif profile == PROFILE_PIR and platform.domain == \"binary_sensor\":\n await platform.async_add_entities([device])\n elif deviceTemp is not None and platform.domain == \"sensor\":\n await platform.async_add_entities([deviceTemp])\n elif profile == PROFILE_HA_TEMPERATURE_HUMIDITY and platform.domain == \"sensor\":\n await platform.async_add_entities([device])\n elif isLight and platform.domain == \"light\":\n await platform.async_add_entities([device])\n tern.hass_platform_data.parsed_devices[devid] = device",
"async def async_added_to_hass(self):\n await super().async_added_to_hass()\n if DOMAIN not in self.hass.data:\n self.hass.data[DOMAIN] = {}\n if SENSOR_PLATFORM not in self.hass.data[DOMAIN]:\n self.hass.data[DOMAIN][SENSOR_PLATFORM] = {}\n self.hass.data[DOMAIN][SENSOR_PLATFORM][self.entity_id] = self\n\n if self._calendar:\n if CALENDAR_PLATFORM not in self.hass.data[DOMAIN]:\n self.hass.data[DOMAIN][CALENDAR_PLATFORM] = EntitiesCalendarData(self.hass)\n _LOGGER.debug(\"Creating fkfgarbage_collection calendar \" + self._name)\n self.hass.async_create_task(\n async_load_platform(\n self.hass,\n CALENDAR_PLATFORM,\n DOMAIN,\n {\"name\": CALENDAR_NAME},\n {\"name\": CALENDAR_NAME},\n )\n )\n self.hass.data[DOMAIN][CALENDAR_PLATFORM].add_entity(self.entity_id)",
"async def async_setup_entry(hass, config_entry, async_add_entities):\n async_add_entities([SmartSystemWebsocketStatus(hass.data[DOMAIN][GARDENA_SYSTEM].smart_system)], True)",
"async def async_added_to_hass(self):\n await super().async_added_to_hass()\n\n def on_state_changed():\n self.schedule_update_ha_state()\n\n for service in self._device.device_services:\n service.subscribe_callback(self.entity_id, on_state_changed)",
"async def async_init_single_device(dev: Device) -> None:\n await dev.async_added_to_hass()\n dev.async_write_ha_state()",
"async def async_added_to_hass(self):\n\n def gpio_edge_listener(port):\n \"\"\"Update GPIO when edge change is detected.\"\"\"\n self.schedule_update_ha_state(True)\n\n def setup_entity():\n setup_input(self._port)\n edge_detect(self._port, gpio_edge_listener)\n self.schedule_update_ha_state(True)\n\n await self.hass.async_add_executor_job(setup_entity)",
"def availability_message_received(msg):\n payload = msg.payload\n\n if payload == \"online\":\n self._available = True\n elif payload == \"offline\":\n self._available = False\n else:\n _LOGGER.info(f\"Invalid payload received for {self.name}\")\n return\n\n self.async_write_ha_state()",
"def update(self):\n try:\n self.homestatus = pyatmo.HomeStatus(self.auth, home_id=self.home_id)\n except pyatmo.exceptions.NoDevice:\n _LOGGER.error(\"No device found\")\n return\n except TypeError:\n _LOGGER.error(\"Error when getting homestatus\")\n return\n except requests.exceptions.Timeout:\n _LOGGER.warning(\"Timed out when connecting to Netatmo server\")\n return\n for room in self.homestatus.rooms:\n try:\n roomstatus = {}\n homestatus_room = self.homestatus.rooms[room]\n homedata_room = self.homedata.rooms[self.home_id][room]\n\n roomstatus[\"roomID\"] = homestatus_room[\"id\"]\n if homestatus_room[\"reachable\"]:\n roomstatus[\"roomname\"] = homedata_room[\"name\"]\n roomstatus[\"target_temperature\"] = homestatus_room[\n \"therm_setpoint_temperature\"\n ]\n roomstatus[\"setpoint_mode\"] = homestatus_room[\"therm_setpoint_mode\"]\n roomstatus[\"current_temperature\"] = homestatus_room[\n \"therm_measured_temperature\"\n ]\n roomstatus[\"module_type\"] = self.homestatus.thermostatType(\n home_id=self.home_id, rid=room, home=self.home_name\n )\n roomstatus[\"module_id\"] = None\n roomstatus[\"heating_status\"] = None\n roomstatus[\"heating_power_request\"] = None\n batterylevel = None\n for module_id in homedata_room[\"module_ids\"]:\n if (\n self.homedata.modules[self.home_id][module_id][\"type\"]\n == NA_THERM\n or roomstatus[\"module_id\"] is None\n ):\n roomstatus[\"module_id\"] = module_id\n if roomstatus[\"module_type\"] == NA_THERM:\n self.boilerstatus = self.homestatus.boilerStatus(\n rid=roomstatus[\"module_id\"]\n )\n roomstatus[\"heating_status\"] = self.boilerstatus\n batterylevel = self.homestatus.thermostats[\n roomstatus[\"module_id\"]\n ].get(\"battery_level\")\n elif roomstatus[\"module_type\"] == NA_VALVE:\n roomstatus[\"heating_power_request\"] = homestatus_room[\n \"heating_power_request\"\n ]\n roomstatus[\"heating_status\"] = (\n roomstatus[\"heating_power_request\"] > 0\n )\n if self.boilerstatus is not None:\n roomstatus[\"heating_status\"] = (\n self.boilerstatus and roomstatus[\"heating_status\"]\n )\n batterylevel = self.homestatus.valves[\n roomstatus[\"module_id\"]\n ].get(\"battery_level\")\n\n if batterylevel:\n batterypct = interpolate(\n batterylevel, roomstatus[\"module_type\"]\n )\n if roomstatus.get(\"battery_level\") is None:\n roomstatus[\"battery_level\"] = batterypct\n elif batterypct < roomstatus[\"battery_level\"]:\n roomstatus[\"battery_level\"] = batterypct\n self.room_status[room] = roomstatus\n except KeyError as err:\n _LOGGER.error(\"Update of room %s failed. Error: %s\", room, err)\n self.away_temperature = self.homestatus.getAwaytemp(home_id=self.home_id)\n self.hg_temperature = self.homestatus.getHgtemp(home_id=self.home_id)\n self.setpoint_duration = self.homedata.setpoint_duration[self.home_id]",
"def __init__(self, hass, name, device, should_poll=False):\n self.hass = hass\n self._name = name\n self._device = device\n self._should_poll = should_poll\n if not should_poll:\n self._device.update_entities.append(self)",
"def _availability_message_received(self, msg: ReceiveMessage) -> None:\n self._available = msg.payload == \"online\"\n self.async_write_ha_state()",
"def ping(self):\n self.last_seen = datetime.utcnow()\n db.session.add(self)",
"def is_connected(self):\n try:\n if self.coordinator.data[self._system_id][\"devices\"][self._item_id].get(\n \"connected\"\n ):\n connected_ap = self.coordinator.data[self._system_id][\"devices\"][\n self._item_id\n ].get(\"apId\")\n if connected_ap:\n connected_ap = self.coordinator.data[self._system_id][\n \"access_points\"\n ][connected_ap][\"accessPointSettings\"][\"accessPointOtherSettings\"][\n \"roomData\"\n ][\n \"name\"\n ]\n self._attrs[\"connected_ap\"] = connected_ap\n else:\n self._attrs[\"connected_ap\"] = \"NA\"\n\n self._attrs[\"ip_address\"] = self.coordinator.data[self._system_id][\n \"devices\"\n ][self._item_id].get(\"ipAddress\", \"NA\")\n\n self._mac = self.coordinator.data[self._system_id][\"devices\"][\n self._item_id\n ].get(\"macAddress\")\n\n self._attrs[\"mac\"] = self._mac if self._mac else \"NA\"\n\n self._is_connected = True\n else:\n self._is_connected = False\n except TypeError:\n pass\n except KeyError:\n pass\n # self.hass.async_create_task(\n # self.hass.config_entries.async_reload(self.coordinator.entry.entry_id)\n # )\n\n return self._is_connected",
"async def async_setup_entry(hass, config_entry, async_add_entities):\n sensor_entities = []\n bridge = hass.data[DOMAIN]\n\n for device in bridge.devices[\"sensor\"]:\n entity = get_sensor_entity(device, bridge)\n sensor_entities.append(entity)\n _LOGGER.info(\n f\"Added HomeSeer sensor-type device: {entity.name} ({entity.device_state_attributes})\"\n )\n\n if sensor_entities:\n async_add_entities(sensor_entities)",
"async def async_update(self) -> None:\n all_states = [self.hass.states.get(x) for x in self._entity_ids]\n states: list[State] = list(filter(None, all_states))\n on_states = [state for state in states if state.state == STATE_ON]\n\n self._state = len(on_states) > 0\n self._available = any(state.state != STATE_UNAVAILABLE for state in states)",
"async def async_setup_entry(hass, entry, async_add_entities):\n api_key = entry.data[CONF_API_KEY]\n coordinator = hass.data[DOMAIN][entry.entry_id]\n async_add_entities(\n Device(hass, api_key, device, coordinator)\n for device in coordinator.data\n if device[\"type\"] in SUPPORTED_SENSORS\n )",
"async def async_setup_entry(hass, entry, async_add_entities):\n\n coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]\n entities = []\n\n for system_id, system in coordinator.data.items():\n for dev_id, device in system[\"devices\"].items():\n device_name = f\"{device['friendlyName']}\"\n\n if device.get(\"friendlyType\"):\n device_name = device_name + f\" ({device['friendlyType']})\"\n\n entity = GoogleWifiDeviceTracker(\n coordinator=coordinator,\n name=device_name,\n icon=DEFAULT_ICON,\n system_id=system_id,\n item_id=dev_id,\n )\n entities.append(entity)\n\n async_add_entities(entities)\n\n async def async_new_entities(device_info):\n \"\"\"Add new entities when they connect to Google Wifi.\"\"\"\n system_id = device_info[\"system_id\"]\n device_id = device_info[\"device_id\"]\n device = device_info[\"device\"]\n\n device_name = f\"{device['friendlyName']}\"\n\n if device.get(\"friendlyType\"):\n device_name = device_name + f\" ({device['friendlyType']})\"\n\n entity = GoogleWifiDeviceTracker(\n coordinator=coordinator,\n name=device_name,\n icon=DEFAULT_ICON,\n system_id=system_id,\n item_id=device_id,\n )\n entities = [entity]\n async_add_entities(entities)\n\n async_dispatcher_connect(hass, SIGNAL_ADD_DEVICE, async_new_entities)"
] |
[
"0.6015471",
"0.5891063",
"0.5770823",
"0.5731657",
"0.57188225",
"0.56620145",
"0.5643109",
"0.56374156",
"0.5606649",
"0.55505234",
"0.55300385",
"0.551577",
"0.5509076",
"0.5490396",
"0.54673177",
"0.54582417",
"0.5448342",
"0.5441688",
"0.54276526",
"0.5424822",
"0.54099214",
"0.5394539",
"0.5383611",
"0.5378633",
"0.5377535",
"0.53561133",
"0.532333",
"0.5311033",
"0.530507",
"0.53034216"
] |
0.61210483
|
0
|
Turn the switch off.
|
def turn_off(self, **kwargs):
self.smartplug.turn_off()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def off_switch(self):\n self._switch_callback = None",
"def turn_off(self):\n self.handleCommand(1)\n self._state = STATE_OFF",
"def turn_off(self, **kwargs):\n set_sonoff_state(self._host, \"off\")\n self._state = False",
"def turn_off(self):\n print(\"Turning the lights off\")\n self.led.all_off()\n self.client.publish(STATE_TOPIC, OFF) #publish",
"def turn_off(self, **kwargs):\n self._is_on = False",
"def turn_off(self):\n GPIO.output(self.gpio, False) # turn off light",
"def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)",
"def _turn_off(self):\n self._turn_display('OFF')",
"def turn_off(self, **kwargs) -> None:\n self.wink.set_state(False)",
"def turnOff(self):\n self.write(\"E;O0;E;\")\n return self.output()",
"def turn_off(self, **kwargs):\n self._send_command(\"turn_off\")",
"def turn_off(self, **kwargs: Any) -> None:\n with self._wemo_call_wrapper(\"turn off\"):\n self.wemo.off()",
"def switch_off(self):\n if threading.current_thread() != self._blinking_thread:\n self._blinking_thread.unregister(self)\n GPIO.output(self.pin, GPIO.LOW)",
"def turn_off(self) -> None:\n self._monoprice.set_power(self._zone_id, False)",
"def off(self):\n self._set_state(on=False)",
"def lightning_turnoff(self):\n self.turnOff()",
"def turn_off(self, **kwargs: Any) -> None:\n self._device.power_on = False\n _LOGGER.debug(\"Turn off light %s\", self._device.ip)",
"def turn_off(self, **kwargs: Any) -> None:\n self._light.turn_off()",
"def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)",
"def turn_off(self):\n self._state = False\n self.write_state(bytes([1]))\n self.schedule_update_ha_state()",
"def turn_off(self, **kwargs: Any) -> None:\n self._set_light(OFF_STATE)",
"def turn_off(self):\n self.set_pin(0, -1)\n self.set_pin(1, -1)\n self.set_pin(2, -1)",
"def turn_off(self, **kwargs):\n #self._light.turn_off()\n self._brightness = 0\n self._state = 'off'\n _LOGGER.info(\"turn_off() is called\")",
"def turn_off(self):\n self._interrupt_flash()\n if self.on:\n GPIO.output(self.pin, GPIO.LOW)\n self.on = False",
"def turn_off(self, **kwargs):\n self.heater.turn_off()",
"def turn_off(self, **kwargs):\n self.robot.pause_cleaning()\n time.sleep(1)\n self.robot.send_to_base()",
"def turn_off(self, **kwargs):\n if (CommandSwitch._switch(self._command_off) and\n not self._command_state):\n self._state = False\n self.schedule_update_ha_state()",
"def switch_off(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def turn_off(self):\n self.robot.stop_simulation()",
"def turn_off(self, **kwargs):\n if self.is_on:\n _LOGGER.debug(\"Sending STOP command to: %s\", self._name)\n self._api.control('STOP')\n self._mower_status = STATUS_EXECUTING_STOP\n self.schedule_update_ha_state()"
] |
[
"0.867637",
"0.85906535",
"0.8523712",
"0.84588856",
"0.84180963",
"0.8404447",
"0.8398465",
"0.8375364",
"0.83491224",
"0.83192796",
"0.82949907",
"0.8277566",
"0.81680954",
"0.8155346",
"0.8112255",
"0.8091075",
"0.8072859",
"0.8058921",
"0.8058517",
"0.8041678",
"0.8036265",
"0.80006665",
"0.80002415",
"0.7977953",
"0.7969991",
"0.79489374",
"0.7926788",
"0.79186",
"0.7879116",
"0.7863223"
] |
0.870139
|
0
|
Return the plug from the context.
|
def _plug_from_context(self):
children = self.smartplug.sys_info["children"]
return next(c for c in children if c["id"] == self.smartplug.context)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_plug(self, name):\n return self.plug_dict[name]",
"def get_plugin(self, name):",
"def plugin_instance(self):\n return self.__plugin_instance",
"def default_context(plugin, context):\n return {\"plugin\": plugin}",
"def driver(self):\r\n ext = self.extensions[0]\r\n return ext.obj if ext.obj else ext.plugin",
"def get_context(self):\n return self.context.generate()",
"def context(self):\n return self.parent.context",
"def context(self):\n return self._context",
"def context(self):\n return self._context",
"def context(self):\n return self._context",
"def context(self):\n return self._context",
"def context(self):\n return self._context",
"def context(self):\n return self._context",
"def context(self):\n return self._context",
"def context(self) -> InjectionContext:\n return self._context",
"def context(self) -> InjectionContext:\n return self._context",
"def get(self, integrate_with, uid, default=None):\n item = self._registry[integrate_with].get(uid, default)\n\n if not item:\n err_msg = self.plugin_not_found_error_message.format(\n uid, self.__class__\n )\n if self.fail_on_missing_plugin:\n logger.error(err_msg)\n raise self.plugin_not_found_exception_cls(err_msg)\n else:\n logger.debug(err_msg)\n\n return item",
"def _get_device():\n return context.get_context('device_target')",
"def context(self) -> CONTEXT:",
"def plugh():",
"def get_plugin_interface(self):",
"def getPlugin(self, *args):\n return _libsbml.SBase_getPlugin(self, *args)",
"def get_context(self):\n uuid = self.data.get('uuid', None)\n if uuid is None:\n return\n item = ploneapi.content.get(UID=uuid)\n return item",
"def render_plugin_in_context(self, plugin, context=None):\n if plugin.__class__ not in self._renderers:\n raise PluginNotRegistered(\n \"Plugin %s is not registered\" % plugin._meta.label_lower\n )\n template, local_context = self._renderers[plugin.__class__]\n\n if template is None:\n # Simple string renderer\n return local_context(plugin) if callable(local_context) else local_context\n\n if context is None:\n context = Context()\n\n if callable(template):\n template = template(plugin)\n if callable(local_context):\n local_context = local_context(plugin, context)\n\n return render_in_context(context, template, local_context)",
"def context(self):\n LOGGER.debug('Getting context: %s', self._context)\n return self._context",
"def get_current():\n return getattr(_request_store, 'context', None)",
"def current_context():\n return _current.get()",
"def get_context(self):\n\n return self._context",
"def context(self) -> Any:\n ...",
"def __current_object__(self):\n return self.__lookup()"
] |
[
"0.72836846",
"0.66230196",
"0.62327284",
"0.61789984",
"0.60942245",
"0.5989904",
"0.59802115",
"0.5973542",
"0.5973542",
"0.5973542",
"0.5973542",
"0.5973542",
"0.5973542",
"0.5973542",
"0.5966448",
"0.5966448",
"0.5947939",
"0.59386843",
"0.5848881",
"0.5837955",
"0.5817569",
"0.5720945",
"0.5701404",
"0.5698415",
"0.5679895",
"0.56763726",
"0.56351334",
"0.5634283",
"0.56152606",
"0.5576284"
] |
0.7972013
|
0
|
Takes a string, e.g., `'0.17603'` or a float, e.g., `0.17603` and returns a tuple where the first element is the Fraction and the second element is the difference to the original value as a proportion of the original value.
|
def frac(amount, limit=100):
frac = Fraction(amount).limit_denominator(limit)
frac_double = frac.numerator / frac.denominator
try:
frac_diff = frac_double - amount
except TypeError: # amount is a string
amt = float(amount)
frac_diff = frac_double - amt
relative_diff = frac_diff / amt
else:
relative_diff = frac_diff / amount
return (frac, relative_diff)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def percentage_to_fraction(percentage):\n return float(percentage / 100.0)",
"def _fraction_string_to_decimal(fraction: str) -> Decimal:\n parts = fraction.split(\"/\")\n numerator = int(parts[0])\n denominator = int(parts[1])\n return Decimal(numerator / denominator)",
"def change_to_rational(number: float) -> Tuple[int, int]:\n f = Fraction(str(number))\n return (f.numerator, f.denominator)",
"def fraction_to_percentage(fraction):\n return fraction * 100.0",
"def ratio(self, string='') -> float:\n try:\n return(self.find(string)/self.total)\n except Exception as error:\n print(f\"Error: self.ratio({string}) -> {error}\")",
"def parseFraction(f):\n p = f.find(\"/\")\n if p < 1:\n return None\n s1 = f[:p]\n s2 = f[p+1:]\n try:\n v1 = int(s1)\n v2 = int(s2)\n except ValueError:\n return None\n if v2:\n return 1.0 * v1 / v2\n else:\n return None",
"def get_diff_and_percentage(self, first, second, state):\n difference = first - second\n per_difference = (difference / second) * 100\n total_percentage = (first / self.populations[state]) * 100\n return [difference, per_difference, total_percentage]",
"def test_frac_diff(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff(e), 0)\n self.assertEqual(s1.frac_diff(s2), 0.75)\n self.assertEqual(s1.frac_diff(s3), 1)\n self.assertEqual(s1.frac_diff(s4), 0) # note truncation",
"def fraction_to_proper_fraction(rational):\n assert isinstance(rational, Fraction), repr(rational)\n quotient = int(rational)\n residue = rational - quotient\n return quotient, residue",
"def convert_to_float(frac_str):\n try:\n return float(frac_str)\n except ValueError:\n num, denom = frac_str.split('/')\n try:\n leading, num = num.split(' ')\n whole = float(leading)\n except ValueError:\n whole = 0\n frac = float(num) / float(denom)\n result = whole - frac if whole < 0 else whole + frac\n\n return result",
"def computeFraction( poi_messages, all_messages ):\n \n fraction = 0.\n \n poi_messages = float(poi_messages)\n all_messages = float(all_messages)\n \n if isnan(poi_messages) or poi_messages == 0:\n fraction = 0.\n else: fraction = poi_messages / all_messages\n \n return round(fraction, 3)",
"def test_div(self):\n newvalues= Fraction(7,10)/Fraction(4,5)\n fraction1 = Fraction(newvalues[0],newvalues[1])\n self.assertEqual(str(fraction1),\"35/40\")",
"def extract_float(self, s: str) -> float:\n f = re.findall(r'([0-9]*[.]*[0-9]+)', s)\n return float(f[0]) if len(f) > 0 else None",
"def percent_parse(pstring):\n if pstring.strip().endswith('%'):\n return int(pstring.strip()[:-1]) / 100\n else:\n return np.nan",
"def calculate_vote_fractions():\n return _calculate_vote_fractions(models.get_candidate_to_vote_count())",
"def _float_or_percent(val: Union[float, str]) -> float:\n if isinstance(val, float):\n return val\n\n if not isinstance(val, str) or not val.endswith(\"%\"):\n raise ValueError(f\"{val} is neither a float of a percent value\")\n\n return float(val[:-1]) / 100",
"def computeFraction( poi_messages, all_messages ):\n fraction=0\n if poi_messages != 'NaN' and all_messages != 'NaN':\n\t\tfraction = poi_messages/float(all_messages)\n return fraction",
"def computeFraction( poi_messages, all_messages ):\n fraction = 0.\n poi_messages = float(poi_messages)\n all_messages = float(all_messages)\n if not (math.isnan(poi_messages) or math.isnan(all_messages)):\n fraction = poi_messages/all_messages\n return fraction",
"def get_tuple(self, string):\n a = re.search('\\((\\d+\\.\\d+), (\\d+\\.\\d+)\\)', string)\n if not a:\n return None\n else:\n return (float(a.group(1)), float(a.group(2)))",
"def computeFraction( poi_messages, all_messages ):\n fraction = 0.\n \n if all_messages == 'NaN':\n return fraction\n \n if poi_messages == 'NaN':\n poi_messages = 0\n \n fraction = 1.0*poi_messages/all_messages\n\n return fraction",
"def percent_of(part, whole):\n return part * 100 / whole",
"def nearest_fration(value):\r\n try:\r\n from fraction import Fraction\r\n return str(Fraction(value))\r\n except ImportError:\r\n return '%i/100' % int(float(value) * 100)",
"def percent_rating(value):\n value = Decimal(value)\n value = round(value / 3, 2) * 100\n return value",
"def percentage(part, whole):\n return round((100 * float(part)/float(whole)),2)",
"def compare_strings(string1: str, string2: str) -> float:\n return SequenceMatcher(None, string1, string2).ratio()",
"def computeFraction(poi_messages, all_messages):\n fraction = 0.\n if all_messages != \"NaN\":\n fraction = float(poi_messages)/float(all_messages)\n else:\n fraction = 0\n return fraction",
"def fractions():\n\n pi = 22 / 7\n print(\"22/7\\n====\")\n print_as_text(pi)\n\n pi = 333 / 106\n print(\"333/106\\n=======\")\n print_as_text(pi)\n\n pi = 355 / 113\n print(\"355/113\\n=======\")\n print_as_text(pi)\n\n pi = 52163 / 16604\n print(\"52163/16604\\n===========\")\n print_as_text(pi)\n\n pi = 103993 / 33102\n print(\"103993/33102\\n============\")\n print_as_text(pi)\n\n pi = 245850922 / 78256779\n print(\"245850922/78256779\\n==================\")\n print_as_text(pi)",
"def compute_fraction( poi_messages, all_messages ):\n import math\n if poi_messages == 0 or all_messages == 0 or math.isnan(float(poi_messages)) or math.isnan(float(all_messages)) :\n return 0.\n fraction = 0.\n fraction = float(poi_messages) / float(all_messages) \n return fraction",
"def percentage_change(old_value, new_value):\n\n result = float(100 * (new_value - old_value) / old_value)\n\n return result",
"def calculate_fraction(delta, lgHSP, pid, pos):\n\n # Calculate new score, id and positive\n # Calculation: initial_value * (franction of length that has been preserved)\n fraction = 1 - (delta / lgHSP)\n\n new_id = np.floor(pid * fraction)\n new_pos = np.floor(pos * fraction)\n\n # Calculate new length\n new_length = lgHSP - delta\n\n # Set expect value to -1 : this value should not be used after\n # having changed HSPs boundaries\n # new_evalue = -1\n\n return new_id, new_pos, new_length"
] |
[
"0.6407023",
"0.6283856",
"0.61930007",
"0.6166878",
"0.61665225",
"0.61538124",
"0.59310925",
"0.5849572",
"0.584075",
"0.56524926",
"0.5635948",
"0.56213564",
"0.55592436",
"0.5557316",
"0.55544513",
"0.5553573",
"0.5552682",
"0.5548242",
"0.5509264",
"0.5495675",
"0.5476972",
"0.5464214",
"0.545332",
"0.54509133",
"0.54474646",
"0.54268724",
"0.5422697",
"0.5415931",
"0.54026335",
"0.5369886"
] |
0.6342941
|
1
|
Computes the simplest (smallest denominator) fraction that approximates (within `max_diff amount`) amount. By default, increases the limit by 1 each time (slow) but will definitely stop at the first result that is sufficiently simple. Alternatively, pass a function (stepfunc is a func of type (int) > int
|
def simplest_frac(amount, max_diff=0.01, stepfunc=None, debug=DEBUG_MODE):
current_diff = max_diff + 1
current_result = None
current_limit = 1
while abs(current_diff) > max_diff:
current_result = frac(amount, current_limit)
if debug:
print(f'max diff: {max_diff}')
print(f'current_diff: {current_diff}')
print(f'current_limit: {current_limit}')
print(f'current_result: {current_result}')
current_diff = current_result[1]
if stepfunc:
if debug:
print(f'stepfunc provided')
current_limit = stepfunc(current_limit)
else:
if debug:
print(f'no stepfunc provided')
current_limit += 1
if debug:
print(f'\n\nFinal result:')
print(f'{current_result} (limit: {current_limit})')
return current_result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def step(self) -> float:\n step = DEFAULT_STEP\n value_range = abs(self.max_value - self.min_value)\n if value_range != 0:\n while value_range <= step:\n step /= 10.0\n return step",
"def do_work(limit):\n no_of_fractions = 0\n\n # First denominator and numerator\n den = 3\n num = 2\n\n # Note that the next denominator and numerator can be found by formula\n # den_k+1 = den_k + num_k\n # num_k+1 = num_k + 2 * den_k = num_k + 2 *(den_k+1 - num_k)\n # = 2 * den_k+1 - num_k\n\n for _ in range(1, limit + 1):\n den += num\n num = 2 * den - num\n # Check the number of digits of num and en using log10\n if (int)(math.log10(num)) > (int)(math.log10(den)):\n no_of_fractions += 1\n\n return no_of_fractions",
"def calculate_slider_step(\n min_value: float, max_value: float, steps: int = 100\n) -> float:\n\n return 10 ** math.floor(math.log10((max_value - min_value) / steps))",
"def frac(amount, limit=100):\n frac = Fraction(amount).limit_denominator(limit)\n frac_double = frac.numerator / frac.denominator\n\n try:\n frac_diff = frac_double - amount\n except TypeError: # amount is a string\n amt = float(amount)\n frac_diff = frac_double - amt\n relative_diff = frac_diff / amt\n else:\n relative_diff = frac_diff / amount\n\n return (frac, relative_diff)",
"def limitReal(x, max_denominator=1000000):\n f = Fraction(x).limit_denominator(max_denominator)\n return Real((f.numerator, f.denominator))",
"def get_fract(self, var, max_var):\n\t\treturn (var % max_var) / float(max_var)",
"def fraction(amount, start, stop, truncated, sequence):\n ratio = stop\n for x in range(start, amount):\n y = abs(round(ratio / (abs(x) + 1)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def _compute_general_continued_fraction(\n max_iterations,\n numerator_denominator_args_list,\n tolerance=None,\n partial_numerator_fn=None,\n partial_denominator_fn=None,\n dtype=tf.float32,\n name=None):\n with tf.name_scope(name or 'continued_fraction'):\n dtype = dtype_util.common_dtype(\n numerator_denominator_args_list, dtype)\n\n if (partial_numerator_fn is None) and (partial_denominator_fn is None):\n raise ValueError('Expect one of `partial_numerator_fn` and '\n '`partial_denominator_fn` to be set.')\n\n def _continued_fraction_one_step(\n unused_should_stop,\n numerator,\n previous_numerator,\n denominator,\n previous_denominator,\n iteration_count):\n partial_denominator = 1.\n if partial_denominator_fn:\n partial_denominator = partial_denominator_fn(\n iteration_count, *numerator_denominator_args_list)\n new_numerator = partial_denominator * numerator\n new_denominator = partial_denominator * denominator\n\n partial_numerator = 1.\n if partial_numerator_fn:\n partial_numerator = partial_numerator_fn(\n iteration_count, *numerator_denominator_args_list)\n new_numerator = new_numerator + partial_numerator * previous_numerator\n new_denominator = (\n new_denominator + partial_numerator * previous_denominator)\n\n should_stop_next = iteration_count > max_iterations\n\n if tolerance is not None:\n # We can use a more efficient computation when the partial numerators\n # are 1.\n if partial_numerator_fn is None:\n # We now want to compute to relative error between the fraction at\n # this iteration, vs. the previous iteration.\n # Let h_i be the numerator and k_i the denominator, and a_i be the\n # i-th term.\n # h_i / k_i - h_{i-1} / k_{i-1} =\n # (h_i * k_{i - 1} - h_{i - 1} * k_i) / (k_i * k_{i - 1}) =\n # ((a_i h_{i - 1} + h_{i - 2}) * k_{i - 1} -\n # (a_i k_{i - 1} + k_{i - 2}) * h_{i - 1}) / (k_i * k_{i - 1}) =\n # -(h_{i - 1} * k_{i - 2} - h_{i - 2} * k_{i - 1}) / (k_i * k_{i - 1})\n # This suggests we should prove something about the numerator\n # inductively, and indeed\n # (h_i * k_{i - 1} - h_{i - 1} * k_i) = (-1)**i\n delta = tf.math.reciprocal(new_denominator * denominator)\n # We actually need to compute the difference of fractions.\n else:\n delta = new_numerator / new_denominator - numerator / denominator\n\n converged = tf.math.abs(delta) <= tolerance\n should_stop_next = tf.reduce_all(converged) | should_stop_next\n return (should_stop_next,\n new_numerator,\n numerator,\n new_denominator,\n denominator,\n iteration_count + 1.)\n\n # This is to infer the correct shape of tensors\n if partial_denominator_fn:\n term = partial_denominator_fn(1., *numerator_denominator_args_list)\n else:\n term = partial_numerator_fn(1., *numerator_denominator_args_list)\n\n zeroth_numerator = tf.ones_like(term, dtype=dtype)\n zeroth_denominator = tf.zeros_like(term, dtype=dtype)\n first_numerator = tf.zeros_like(term, dtype=dtype)\n first_denominator = tf.ones_like(term, dtype=dtype)\n\n results = tf.while_loop(\n cond=lambda stop, *_: ~stop,\n body=_continued_fraction_one_step,\n loop_vars=(\n False,\n first_numerator,\n zeroth_numerator,\n first_denominator,\n zeroth_denominator,\n tf.cast(1., dtype=dtype)))\n return results[1] / results[3]",
"def est_generator(limit=1000):\n last_guess = ZERO\n for i in range(limit):\n yield 1 + last_guess\n denom = last_guess + 2\n last_guess = 1 / denom",
"def make_divisible(value, divisor, min_value=None, min_ratio=0.9):\n if min_value is None:\n min_value = divisor\n new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)\n if new_value < min_ratio * value:\n new_value += divisor\n return new_value",
"def _getsteps(num_of_steps, limit):\n steps = []\n current = 0.0\n for i in range(0, num_of_steps):\n if i == num_of_steps - 1:\n steps.append(int(round(limit)))\n else:\n steps.append(int(round(current)))\n current += float(limit) / float(num_of_steps - 1)\n return steps",
"def get_optimal_step(self, num_min):\r\n if self.pmax <= self.pmin:\r\n return None\r\n stepex = float(self.pmax - self.pmin) / num_min\r\n step1 = math.pow(10, math.floor(math.log(stepex, 10)))\r\n step2 = step1 * 2\r\n step5 = step1 * 5\r\n if step5 <= stepex:\r\n return step5\r\n if step2 <= stepex:\r\n return step2\r\n return step1",
"def make_divisible(value, divisor, min_value=None, min_ratio=0.9):\n\n if min_value is None:\n min_value = divisor\n new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than (1-min_ratio).\n if new_value < min_ratio * value:\n new_value += divisor\n return new_value",
"def reduce(self):\n import math\n g = math.gcd(self.num, self.den)\n return Fraction(self.num//g, self.den//g)",
"def _make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v",
"def max_multiple(divisor: int, bound: int) -> int:\n\n while bound > 0:\n if bound % divisor == 0:\n return bound\n bound -= 1\n return 0",
"def dectofr(x):\n # n = int(floor(x))\n # x -= n\n # if x < error:\n # # return (n, 1)\n # return Fraction(n, 1)\n # elif 1 - error < x:\n # # return (n+1, 1)\n # return Fraction(n + 1, 1)\n #\n # # The lower fraction is 0/1\n # lower_n = 0\n # lower_d = 1\n # # The upper fraction is 1/1\n # upper_n = 1\n # upper_d = 1\n # while True:\n # # The middle fraction is (lower_n + upper_n) / (lower_d + upper_d)\n # middle_n = lower_n + upper_n\n # middle_d = lower_d + upper_d\n # # If x + error < middle\n # if middle_d * (x + error) < middle_n:\n # # middle is our new upper\n # upper_n = middle_n\n # upper_d = middle_d\n # # Else If middle < x - error\n # elif middle_n < (x - error) * middle_d:\n # # middle is our new lower\n # lower_n = middle_n\n # lower_d = middle_d\n # # Else middle is our best fraction\n # else:\n # # return (n * middle_d + middle_n, middle_d)\n # # return \"{0}/{1}\".format(n*middle_d+middle_n,middle_d)\n # return Fraction(n * middle_d + middle_n, middle_d)\n\n n = str(x)\n d = 1\n dig = digitsafterdecimal(x)\n multiplier = 10 ** dig\n n = n.replace(\".\", \"\")\n return Fraction(int(n), int(d * multiplier))",
"def first_loop_fraction(self) -> int:\n return self.__first_loop_fraction",
"def make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v",
"def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value",
"def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value",
"def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value",
"def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value",
"def compute_gdd(tmin, tmax, base):\r\n gdd = (tmin + tmax) / 2 - base\r\n if gdd < 0:\r\n return 0\r\n else:\r\n return gdd",
"def ceil_div_offline(value, factor):\n return ((value) + (factor)-1) // (factor)",
"def find_count_divisor(this_list):\n max_found = this_list[0][1]\n count = 0\n\n while max_found/50 > 0:\n max_found -= 50\n count += 1\n\n return count",
"def percentage_limiter(percentage: float):\n if percentage < 0:\n return 0\n elif 0 <= percentage <= 1:\n return percentage\n else:\n return 1",
"def factorPR(n):\r\n\tfor slow in [2,3,4,6]:\r\n\t\tnumsteps=2*math.floor(math.sqrt(math.sqrt(n))); fast=slow; i=1\r\n\t\twhile i<numsteps:\r\n\t\t\tslow = (slow*slow + 1) % n\r\n\t\t\ti = i + 1\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tg = gcd(fast-slow,n)\r\n\t\t\tif (g != 1):\r\n\t\t\t\tif (g == n):\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn g\r\n\treturn 1",
"def fraction(self, value: int) -> 'Size':\n raise_not_number(value)\n self.maximum = '{}fr'.format(int(value))\n return self",
"def problem26(limit):\n result = 0\n result_d = 0\n\n for d in range(2, limit):\n d_cycle = calc_rec_cycle(d)\n if d_cycle > result:\n result = d_cycle\n result_d = d\n\n return result_d"
] |
[
"0.63305366",
"0.62399054",
"0.6199106",
"0.5956308",
"0.58406585",
"0.5658276",
"0.56511635",
"0.55913764",
"0.5573046",
"0.5540557",
"0.5475563",
"0.54754543",
"0.54716456",
"0.5400629",
"0.53267694",
"0.5317078",
"0.5296209",
"0.528592",
"0.52608585",
"0.5249198",
"0.5249198",
"0.5249198",
"0.5249198",
"0.523269",
"0.5228407",
"0.5220835",
"0.52204514",
"0.5209016",
"0.51549476",
"0.51019406"
] |
0.8158577
|
0
|
Instantiates generator and discriminator with parameters.
|
def instantiate_network_objects(params):
# Instantiate generator.
generator = generators.Generator(
input_shape=(params["latent_size"]),
kernel_regularizer=tf.keras.regularizers.l1_l2(
l1=params["generator_l1_regularization_scale"],
l2=params["generator_l2_regularization_scale"]
),
bias_regularizer=None,
name="generator",
params=params
)
# Instantiate discriminator.
discriminator = discriminators.Discriminator(
input_shape=(
params["height"] * params["width"] * params["depth"]
),
kernel_regularizer=tf.keras.regularizers.l1_l2(
l1=params["discriminator_l1_regularization_scale"],
l2=params["discriminator_l2_regularization_scale"]
),
bias_regularizer=None,
name="discriminator",
params=params
)
return {"generator": generator, "discriminator": discriminator}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, generator:Model,\n discriminator:Model,\n latent_dim:Optional[Union[int, Tuple]]=None,\n n_disc:int=3,\n epochs:int=100, \n batch_size:int=32,\n optimizer:Optional[Union[str, Dict]]=None,\n optimizer_kwargs:Optional[Dict]=None,\n name:str='QGAN',\n random_state:Optional[int]=None,\n checkpoint_dir:Optional[str]=None,\n checkpoint_interval:int=10,\n checkpoint_max_to_keep:Optional[int]=None):\n super().__init__(generator=generator,\n discriminator=discriminator,\n latent_dim=latent_dim,\n n_disc=n_disc,\n epochs=epochs,\n batch_size=batch_size,\n optimizer=optimizer,\n optimizer_kwargs=optimizer_kwargs,\n name=name,\n random_state=random_state,\n checkpoint_dir=checkpoint_dir,\n checkpoint_interval=checkpoint_interval,\n checkpoint_max_to_keep=checkpoint_max_to_keep)",
"def __init__(self, model_name, logger=None, gpu_ids=None):\n super().__init__(model_name, 'generator', logger, gpu_ids)",
"def __init__(self,**params):\n super(Dynamic,self).__init__(**params)\n\n if callable(self.default):\n self._set_instantiate(True)\n self._initialize_generator(self.default)",
"def GeneratorAndDiscriminator():\n\n # Baseline (G: DCGAN, D: DCGAN)\n return ResnetGenerator, DCGANDiscriminator\n\n # No BN and constant number of filts in G\n # return WGANPaper_CrippledDCGANGenerator, DCGANDiscriminator\n\n # 512-dim 4-layer ReLU MLP G\n # return FCGenerator, DCGANDiscriminator\n\n # No normalization anywhere\n # return functools.partial(DCGANGenerator, bn=False), functools.partial(DCGANDiscriminator, bn=False)\n\n # Gated multiplicative nonlinearities everywhere\n # return MultiplicativeDCGANGenerator, MultiplicativeDCGANDiscriminator\n\n # tanh nonlinearities everywhere\n # return functools.partial(DCGANGenerator, bn=True, nonlinearity=tf.tanh), \\\n # functools.partial(DCGANDiscriminator, bn=True, nonlinearity=tf.tanh)\n\n # 101-layer ResNet G and D\n # return ResnetGenerator, ResnetDiscriminator\n\n raise Exception('You must choose an architecture!')",
"def build(self):\n # add ops for generator (A->B) to graph\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf, norm_type=self.opt.layer_norm_type,\n init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='G')\n\n if self.training:\n # add ops for other generator (B->A) and discriminators to graph\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='D_B')\n\n # generate fake images\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n\n # generate reconstructed images\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n\n # generate identity mapping images\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(reconstructedB))\n\n # add loss ops to graph\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB, reconstructedA,\n reconstructedB, identA, identB)\n\n # add optimizer ops to graph\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else: # only need generator from A->B during testing\n fakeB = self.G(self.realA)\n return fakeB",
"def __init__(self, generator, discriminator, noise_dim, save_path):\n self.generator = generator\n self.discriminator = discriminator\n self.noise_dim = noise_dim\n self.save_path = save_path\n self.check_points_path = os.path.join(save_path, 'check_points')\n self.output_image_path = os.path.join(save_path, 'images_during_training')\n self.generator.generate()",
"def create_model(self):\n # Create the generator and discriminators\n self.generator_lungs = self.generator_model()\n self.generator_organs = self.generator_model()\n\n self.disc_lungs = self.discriminator_model_lungs()\n self.disc_organs = self.discriminator_model_organs()\n\n # Initialize the optimizer and backend\n self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.set_backend = tf.keras.backend.set_floatx('float32')\n\n # Create the summary writer\n self.create_summary_writer()\n print('Models are created.')\n return self",
"def __init__(self, gen):\n self.gen = gen",
"def create(self):\n # Create a graph and add all layers\n self.graph = tf.Graph()\n with self.graph.as_default():\n # Define variable learning rate and dis_noise\n self.relative_lr = tf.placeholder_with_default([1.],[1],name=\"relative_lr\")\n self.relative_lr = self.relative_lr[0]\n \n self.rel_dis_noise = tf.placeholder_with_default([1.],[1],name=\"rel_dis_noise\")\n self.rel_dis_noise = self.rel_dis_noise[0]\n self.dis_noise = self.rel_dis_noise * self.dis_noise_0\n \n \n # Create the generator and discriminator\n if self.architecture == 'Res6':\n gen_dim = [64, 128,256, 256,256,256,256,256,256, 128,64 ]\n kernel_size =[7, 3,3, 3,3,3,3,3,3, 3,3, 7]\n elif self.architecture == 'Res9':\n gen_dim= [64, 128,256, 256,256,256,256,256,256,256,256,256, 128,64 ]\n kernel_size=[7, 3,3, 3,3,3,3,3,3,3,3,3, 3,3, 7]\n else:\n print('Unknown generator architecture')\n return None\n \n self.genA = Res_Gen.ResGen('BtoA',self.a_chan,gen_dim=gen_dim,kernel_size=kernel_size,deconv=self.deconv,verbose=self.verbose)\n self.genB = Res_Gen.ResGen('AtoB',self.b_chan,gen_dim=gen_dim,kernel_size=kernel_size,deconv=self.deconv,verbose=self.verbose)\n \n if self.patchgan == 'Patch34':\n self.disA = PatchGAN34.PatchGAN34('A',noise=self.dis_noise)\n self.disB = PatchGAN34.PatchGAN34('B',noise=self.dis_noise)\n elif self.patchgan == 'Patch70':\n self.disA = PatchGAN70.PatchGAN70('A',noise=self.dis_noise)\n self.disB = PatchGAN70.PatchGAN70('B',noise=self.dis_noise)\n elif self.patchgan == 'Patch142':\n self.disA = PatchGAN142.PatchGAN142('A',noise=self.dis_noise)\n self.disB = PatchGAN142.PatchGAN142('B',noise=self.dis_noise)\n elif self.patchgan == 'MultiPatch':\n self.disA = MultiPatch.MultiPatch('A',noise=self.dis_noise)\n self.disB = MultiPatch.MultiPatch('B',noise=self.dis_noise)\n else:\n print('Unknown Patch discriminator type')\n return None\n \n self.disA_His = HisDis.HisDis('A',noise=self.dis_noise,keep_prob=1.)\n self.disB_His = HisDis.HisDis('B',noise=self.dis_noise,keep_prob=1.)\n \n # Create a placeholder for the input data\n self.A = tf.placeholder(tf.float32,[None, None, None, self.a_chan],name=\"a\")\n self.B = tf.placeholder(tf.float32,[None, None, None, self.b_chan],name=\"b\")\n \n if self.verbose:\n print('Size A: ' +str(self.a_chan)) # Often 1 --> Real\n print('Size B: ' +str(self.b_chan)) # Often 3 --> Syn\n \n # Create cycleGAN \n \n self.fake_A = self.genA.create(self.B,False)\n self.fake_B = self.genB.create(self.A,False)\n \n \n \n # Define the histogram loss\n t_A = tf.transpose(tf.reshape(self.A,[-1, self.a_chan]),[1,0])\n t_B = tf.transpose(tf.reshape(self.B,[-1, self.b_chan]),[1,0])\n t_fake_A = tf.transpose(tf.reshape(self.fake_A,[-1, self.a_chan]),[1,0])\n t_fake_B = tf.transpose(tf.reshape(self.fake_B,[-1, self.b_chan]),[1,0])\n\n self.s_A,_ = tf.nn.top_k(t_A,tf.shape(t_A)[1])\n self.s_B,_ = tf.nn.top_k(t_B,tf.shape(t_B)[1])\n self.s_fake_A,_ = tf.nn.top_k(t_fake_A,tf.shape(t_fake_A)[1])\n self.s_fake_B,_ = tf.nn.top_k(t_fake_B,tf.shape(t_fake_B)[1])\n \n self.m_A = tf.reshape(tf.reduce_mean(tf.reshape(self.s_A,[self.a_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_B = tf.reshape(tf.reduce_mean(tf.reshape(self.s_B,[self.b_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_fake_A = tf.reshape(tf.reduce_mean(tf.reshape(self.s_fake_A,[self.a_chan, self.imsize, -1]),axis=2),[1, -1])\n self.m_fake_B = tf.reshape(tf.reduce_mean(tf.reshape(self.s_fake_B,[self.b_chan, self.imsize, -1]),axis=2),[1, -1])\n \n # Define generator loss functions\n self.lambda_c = tf.placeholder_with_default([self.lambda_c],[1],name=\"lambda_c\")\n self.lambda_c = self.lambda_c[0]\n self.lambda_h = tf.placeholder_with_default([self.lambda_h],[1],name=\"lambda_h\")\n self.lambda_h = self.lambda_h[0]\n \n self.dis_real_A = self.disA.create(self.A,False)\n self.dis_real_Ah = self.disA_His.create(self.m_A,False)\n self.dis_real_B = self.disB.create(self.B,False)\n self.dis_real_Bh = self.disB_His.create(self.m_B,False)\n self.dis_fake_A = self.disA.create(self.fake_A,True)\n self.dis_fake_Ah = self.disA_His.create(self.m_fake_A,True)\n self.dis_fake_B = self.disB.create(self.fake_B,True)\n self.dis_fake_Bh = self.disB_His.create(self.m_fake_B,True)\n \n self.cyc_A = self.genA.create(self.fake_B,True)\n self.cyc_B = self.genB.create(self.fake_A,True)\n \n \n # Define cycle loss (eq. 2)\n self.loss_cyc_A = tf.reduce_mean(tf.abs(self.cyc_A-self.A))\n self.loss_cyc_B = tf.reduce_mean(tf.abs(self.cyc_B-self.B))\n \n self.loss_cyc = self.loss_cyc_A + self.loss_cyc_B\n \n # Define discriminator losses (eq. 1)\n self.loss_dis_A = (tf.reduce_mean(tf.square(self.dis_real_A)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_A)))*0.5 +\\\n (tf.reduce_mean(tf.square(self.dis_real_Ah)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_Ah)))*0.5*self.lambda_h\n \n \n self.loss_dis_B = (tf.reduce_mean(tf.square(self.dis_real_B)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_B)))*0.5 +\\\n (tf.reduce_mean(tf.square(self.dis_real_Bh)) +\\\n tf.reduce_mean(tf.square(1-self.dis_fake_Bh)))*0.5*self.lambda_h\n \n self.loss_gen_A = tf.reduce_mean(tf.square(self.dis_fake_A)) +\\\n self.lambda_h * tf.reduce_mean(tf.square(self.dis_fake_Ah)) +\\\n self.lambda_c * self.loss_cyc/2.\n self.loss_gen_B = tf.reduce_mean(tf.square(self.dis_fake_B)) +\\\n self.lambda_h * tf.reduce_mean(tf.square(self.dis_fake_Bh)) +\\\n self.lambda_c * self.loss_cyc/2.\n \n # Create the different optimizer\n with self.graph.as_default():\n # Optimizer for Gen\n self.list_gen = []\n for var in tf.trainable_variables():\n if 'gen' in str(var):\n self.list_gen.append(var)\n optimizer_gen = tf.train.AdamOptimizer(learning_rate=self.relative_lr*0.0002,beta1=0.5)\n self.opt_gen = optimizer_gen.minimize(self.loss_gen_A+self.loss_gen_B,var_list=self.list_gen)\n \n # Optimizer for Dis\n self.list_dis = []\n for var in tf.trainable_variables():\n if 'dis' in str(var):\n self.list_dis.append(var)\n optimizer_dis = tf.train.AdamOptimizer(learning_rate=self.relative_lr*0.0002,beta1=0.5)\n self.opt_dis = optimizer_dis.minimize(self.loss_dis_A + self.loss_dis_B,var_list=self.list_dis)",
"def __init__(self, generator: FormulaGenerator = NumberGenerator(),\n power_generator: FormulaGenerator = TokenGenerator(\"2\")):\n super().__init__([generator])\n self.power_generator = power_generator",
"def __init__(self, opt: argparse.Namespace) -> None:\n super().__init__(opt)\n\n self.gpu_ids = opt.gpu_ids\n self.is_train = opt.is_train\n self.output_nch = opt.output_nch\n self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n\n # generator module\n self._generator_module = generator_modules[opt.generator_module_name](opt)\n apply_init_weight(self._generator_module, opt, init_weight=init_weights[opt.init_weight_name])\n if self.is_train:\n # discriminator module\n self._discriminator_module = discriminator_modules[opt.discriminator_module_name](opt)\n apply_init_weight(self._discriminator_module, opt, init_weight=init_weights[opt.init_weight_name])\n # generator optimizer\n self._generator_optimizer = optimizers[opt.generator_optimizer_name](self._generator_module.parameters(), opt)\n # discriminator optimizer\n self._discriminator_optimizer = optimizers[opt.discriminator_optimizer_name](self._discriminator_module.parameters(), opt)\n # generator scheduler\n self._generator_scheduler = schedulers[opt.generator_scheduler_name](self._generator_optimizer, opt)\n # discriminator scheduler\n self._discriminator_scheduler = schedulers[opt.discriminator_scheduler_name](self._discriminator_optimizer, opt)\n\n # register\n if not self.is_train:\n self.modules['generator'] = self._generator_module\n else:\n self.modules['generator'] = self._generator_module\n self.modules['discriminator'] = self._discriminator_module\n self.optimizers['generator'] = self._generator_optimizer\n self.optimizers['discriminator'] = self._discriminator_optimizer\n self.schedulers['generator'] = self._generator_scheduler\n self.schedulers['discriminator'] = self._discriminator_scheduler\n\n self.module_transfer_to_device()",
"def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)",
"def create_model(opts):\n # G = DCGenerator(noise_size=opts.noise_size, conv_dim=opts.conv_dim)\n # D = DCDiscriminator(conv_dim=opts.conv_dim)\n G = DCGenerator()\n D = DCDiscriminator()\n\n return G, D",
"def init_net(self):\r\n # initialize the generator network\r\n g_net = Net(\r\n self.architecture['generator'], net_name='gen',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Gen = Routine(g_net)\r\n self.Gen.add_input_layers([64, self.code_size], [0])\r\n self.Gen.seq_links(list(range(g_net.num_layers)))\r\n self.Gen.add_output_layers([g_net.num_layers - 1])\r\n\r\n # initialize the generator network\r\n d_net = Net(\r\n self.architecture['discriminator'], net_name='dis',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Dis = Routine(d_net)\r\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\r\n self.Dis.seq_links(list(range(d_net.num_layers)))\r\n self.Dis.add_output_layers([d_net.num_layers - 1])",
"def build_gan(self):\n # make weights in the discriminator not trainable\n self.d_model.trainable = False\n # get noise and label inputs from generator model\n gen_noise, gen_label = self.g_model.input\n # get image output from the generator model\n gen_output = self.g_model.output\n # connect image output and label input from generator as inputs to discriminator\n gan_output = self.d_model([gen_output, gen_label])\n # define gan model as taking noise and label and outputting a classification\n self.gan_model = Model([gen_noise, gen_label], gan_output)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.gan_model.compile(loss='binary_crossentropy', optimizer=opt)",
"def vanilla_gan_model(params):\n # Instantiate generator and discriminator objects.\n network_dict = instantiate_network_objects(params)\n\n # Instantiate generator optimizer.\n generator_optimizer = instantiate_optimizer(params, scope=\"generator\")\n\n # Instantiate discriminator optimizer.\n discriminator_optimizer = instantiate_optimizer(\n params, scope=\"discriminator\"\n )\n\n return (\n network_dict,\n {\n \"generator\": generator_optimizer,\n \"discriminator\": discriminator_optimizer\n }\n )",
"def build_gan(self):\n\n # Specify te generators used to build various components.\n optimizer_generator = Adam(0.0002, 0.5)\n optimizer_discriminator = Adam(0.0002, 0.5)\n optimizer_GAN = Adam(0.0002, 0.5)\n\n loss_measure_generator = \"binary_crossentropy\"\n loss_measure_discriminator = \"binary_crossentropy\"\n loss_measure_GAN = \"binary_crossentropy\"\n\n metrics = [\"accuracy\", \"mae\", \"mse\", \"mape\", \"cosine\"]\n\n # See if the specified model paths exist, if they don't then we start training new models\n if (\n hasattr(self, \"discriminator_path\")\n and hasattr(self, \"generator_path\")\n and self.discriminator_path.is_file()\n and self.generator_path.is_file()\n ):\n self.discriminator = load_model(self.discriminator_path)\n self.generator = load_model(self.generator_path)\n print(\"Loaded models...\")\n else: # training new model.\n print(\"Training models...\")\n\n # Generate the tensorboard and its call back\n callback_tensorboard = TensorBoard(\n log_dir=path_log_run, histogram_freq=0, write_images=True\n )\n\n # self.callbacks_list = [callback_tensorboard]\n\n # Build discriminator and compile it.\n self.discriminator = self.build_discriminator()\n\n # Training discriminator!\n self.discriminator.compile(\n loss=loss_measure_discriminator,\n optimizer=optimizer_discriminator,\n # metrics=metrics,\n # callbacks=self.callbacks_list,\n )\n\n # Build generator and compile it.\n self.generator = self.build_generator()\n\n # Training generator!\n self.generator.compile(\n loss=loss_measure_generator,\n optimizer=optimizer_generator,\n # callbacks=self.callbacks_list,\n )\n\n # These next few lines setup the training for the GAN, which the input Vector has a shape of noise_parameters\n z = Input(shape=(self.dimensions_noise,))\n img = self.generator(z)\n\n self.discriminator.trainable = False\n\n # Call the discriminator on the image generated by the generator.\n # Store the output\n valid = self.discriminator(img)\n\n # Form a model that combine both the input and the output pair.\n self.combined = Model(z, valid)\n\n # Compile the model using binary_crossentropy with the\n self.combined.compile(loss=loss_measure_GAN, optimizer=optimizer_GAN)",
"def create(self, **kwargs):\n\n if kwargs['profession'] == FIGHTER:\n profession = FighterGenerator.create(**kwargs)\n if kwargs['race'] == DWARF:\n race = DwarfGenerator.create(st=profession.st(),\n generator=kwargs.get('generator'))\n m = PngModel(race, profession)\n m.create()\n return m",
"def make_gan(generator, discriminator):\n\n # Disable training of discriminator as default\n discriminator.trainable = False\n\n # Create GAN\n model = tf.keras.Sequential()\n model.add(generator)\n model.add(discriminator)\n\n # Compile the GAN\n model.compile(loss='binary_crossentropy', optimizer='adam')\n\n return model",
"def __build_generators(self, x, y, split=0.9):\n\n # Sanity check\n assert len(x) == len(y)\n\n # Split dataset into train and validation sets\n cut = int(split * len(x))\n x_train = x[:cut]\n x_valid = x[cut:]\n y_train = y[:cut]\n y_valid = y[cut:]\n\n if self.input_type == \"mols\":\n self.__train_gen = HetSmilesGenerator(\n x_train,\n None,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n self.__valid_gen = HetSmilesGenerator(\n x_valid,\n None,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n else:\n self.__train_gen = DescriptorGenerator(\n x_train,\n y_train,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n self.__valid_gen = DescriptorGenerator(\n x_valid,\n y_valid,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n # Calculate number of batches per training/validation epoch\n train_samples = len(x_train)\n valid_samples = len(x_valid)\n self.__steps_per_epoch = train_samples // self.batch_size\n self.__validation_steps = valid_samples // self.batch_size\n\n print(\n \"Model received %d train samples and %d validation samples.\"\n % (train_samples, valid_samples)\n )",
"def __init__(self, metadata, force = False):\n mi.idk.driver_generator.DriverGenerator.__init__(self, metadata, force)\n\n self.metadata = metadata\n self.force = force",
"def build_model(self):\n input_pencil = tf.keras.Input((128,128,3))\n # generator's output\n gen_image = self.gan_generator.model(input_pencil)\n # generator's output\n x = self.gan_discriminator.model([input_pencil,gen_image])\n model = tf.keras.Model(input_pencil,[x,gen_image])\n # compiling the model\n model.compile(loss=['hinge', 'mae'], optimizer = self.optimizer,loss_weights=[1,100], metrics=['accuracy'])\n self.model = model",
"def create_generators(cfg, backbone):\n if cfg.anchor_params:\n if 'small' in cfg.anchor_params:\n anchor_params = AnchorParameters.small\n else:\n anchor_params = None\n else:\n anchor_params = None\n\n common_args = {\n 'batch_size': cfg.batchsize,\n 'config': None,\n 'image_min_side': cfg.image_size[0],\n 'image_max_side': cfg.image_size[1],\n 'filter_annotations_enabled': False,\n 'preprocess_image': backbone.preprocess_image,\n 'normalize_radar': cfg.normalize_radar,\n 'camera_dropout': cfg.dropout_image,\n 'radar_dropout': cfg.dropout_radar,\n 'channels': cfg.channels,\n 'distance': cfg.distance_detection,\n 'sample_selection': cfg.sample_selection,\n 'only_radar_annotated': cfg.only_radar_annotated,\n 'n_sweeps': cfg.n_sweeps,\n 'noise_filter': cfg.noise_filter_cfg,\n 'noise_filter_threshold': cfg.noise_filter_threshold,\n 'noisy_image_method': cfg.noisy_image_method,\n 'noise_factor': cfg.noise_factor,\n 'perfect_noise_filter': cfg.noise_filter_perfect,\n 'radar_projection_height': cfg.radar_projection_height,\n 'noise_category_selection': None if cfg.class_weights is None else cfg.class_weights.keys(),\n 'inference': cfg.inference,\n 'anchor_params': anchor_params,\n }\n\n # create random transform generator for augmenting training data\n if cfg.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.0,\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n\n category_mapping = cfg.category_mapping\n\n if 'nuscenes' in cfg.data_set:\n # import here to prevent unnecessary dependency on nuscenes\n from crfnet.data_processing.generator.nuscenes_generator import NuscenesGenerator\n from nuscenes.nuscenes import NuScenes\n\n if 'mini' in cfg.data_set:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n else:\n try:\n nusc = NuScenes(version='v1.0-trainval', dataroot=cfg.data_path, verbose=True)\n except ValueError:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n\n\n if 'debug' in cfg.scene_selection or 'mini' in cfg.data_set:\n scenes = Scenes.debug\n else:\n scenes = Scenes.default\n\n train_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.train,\n transform_generator=transform_generator,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n shuffle_groups=True,\n group_method='random',\n **common_args\n )\n\n # no dropouts in validation\n common_args['camera_dropout'] = 0\n common_args['radar_dropout'] = 0\n\n validation_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.val,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_night_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_night,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_rain_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_rain,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n return train_generator, validation_generator, test_generator, test_night_generator, test_rain_generator\n else:\n raise ValueError('Invalid data type received: {}'.format(cfg.data_set))",
"def generator(self, random, args):\n\t\traise NotImplementedError",
"def build_discriminator():\n\n #Slope and weight initializer are chosen to match parmeters in the paper\n weight_initializer = tf.keras.initializers.RandomNormal(stddev=0.02)\n slope = 0.2\n inputs = keras.Input(shape=(64,64,3))\n x = preprocessing.Rescaling(scale=1./127.5, offset=-1.)(inputs)\n\n # First conv layer\n x = Conv2D(\n 64,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Second conv layer\n x = Conv2D(\n 128,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Third conv layer\n x = Conv2D(\n 256,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fourth conv layer\n x = Conv2D(\n 512,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Predictions. Note that we use logits so thhere is no activation at the end. \n x = layers.Flatten()(x)\n x = layers.Dense(1,kernel_initializer=weight_initializer)(x)\n \n model = keras.Model(inputs=inputs, outputs=x)\n return model",
"def generator(T):\n \n class generator_interposer(T):\n \n def __init__(self, *args, **kwargs):\n gen_i = self._get_int()\n \n # Capture the instantiation location\n frame = inspect.stack()[1]\n gen_i.srcinfo_inst = SourceInfo(frame.filename, frame.lineno)\n\n # Call the user's constructor \n with gen_i:\n super().__init__(*args, **kwargs)\n\n self._int_field_info = field_info() \n if gen_i.ctor_level == 0:\n self.build_model()\n \n pass\n\n # Add the interposer class \n ret = type(T.__name__, (generator_interposer,), dict())\n\n if not hasattr(T, \"_gen_init\"):\n def __getattribute__(self, a):\n ret = object.__getattribute__(self, a)\n \n if isinstance(ret, type_base) and not is_raw_mode():\n # We're not in an expression, so the user\n # wants the value of this field\n ret = ret.get_val()\n \n return ret\n \n def __setattr__(self, field, val):\n try:\n # Retrieve the field object so we can check if it's \n # a type_base object. This will throw an exception\n # if the field doesn't exist\n fo = object.__getattribute__(self, field)\n except:\n object.__setattr__(self, field, val)\n else:\n if isinstance(fo, type_base):\n if not is_raw_mode():\n # We're not in an expression context, so the \n # user really wants us to set the actual value\n # of the field\n fo.set_val(val)\n else:\n raise Exception(\"Attempting to use '=' in a constraint\")\n else:\n object.__setattr__(self, field, val) \n \n def randomize(self):\n model = self.get_model()\n Randomizer.do_randomize([model])\n \n def build_field_model(self, name):\n if self._int_field_info.model is None:\n model = FieldCompositeModel(name, self._int_field_info.is_rand, self)\n self._int_field_info.model = model\n \n # Iterate through the fields and constraints\n # First, assign IDs to each of the randomized fields\n with expr_mode():\n for f in dir(self):\n if not f.startswith(\"__\") and not f.startswith(\"_int\"):\n fo = getattr(self, f)\n \n if hasattr(fo, \"_int_field_info\"):\n if fo._int_field_info.model is None:\n fo._int_field_info.model = fo.build_field_model(f)\n\n model.add_field(fo._int_field_info.model)\n \n # Now, elaborate the constraints\n for f in dir(self):\n if not f.startswith(\"__\") and not f.startswith(\"_int\"):\n fo = getattr(self, f)\n if isinstance(fo, constraint_t):\n clear_exprs()\n push_constraint_scope(ConstraintBlockModel(f))\n try:\n fo.c(self)\n except Exception as e:\n print(\"Exception while processing constraint: \" + str(e))\n raise e\n fo.set_model(pop_constraint_scope())\n model.add_constraint(fo.model)\n clear_exprs()\n \n self._int_field_info.model.name = name\n return self._int_field_info.model\n \n def get_model(self):\n with expr_mode():\n if self._int_field_info.model is None:\n self._int_field_info.model = self.build_field_model(None)\n \n return self._int_field_info.model\n \n def _get_int(self):\n if not hasattr(self, \"_gen_int\"):\n self._gen_int = GeneratorInt()\n return self._gen_int\n \n setattr(T, \"__getattribute__\", __getattribute__)\n setattr(T, \"__setattr__\", __setattr__)\n setattr(T, \"randomize\", randomize)\n# setattr(T, \"randomize_with\", randomize_with)\n setattr(T, \"build_field_model\", build_field_model)\n setattr(T, \"get_model\", get_model)\n# setattr(T, \"__enter__\", __enter__)\n# setattr(T, \"__exit__\", __exit__)\n# setattr(T, \"do_pre_randomize\", do_pre_randomize)\n# setattr(T, \"do_post_randomize\", do_post_randomize)\n setattr(T, \"_int_field_info\", field_info(True))\n setattr(T, \"_get_int\", _get_int)\n setattr(T, \"_ro_init\", True)\n \n \n \n \n return ret",
"def generate(cls):\n raise NotImplementedError()",
"def generator(self, random, args):\r\n raise NotImplementedError",
"def _build(self, generation):\n with tf.variable_scope ('discriminator') as scope:\n \n image_unflatten = unflatten_layer ( self.images )\n gen_unflatten = unflatten_layer ( generation )\n\n # Conv Layer 1 - image\n conv1_out_image, params = conv_2d_layer (\n input = image_unflatten,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n name = 'conv_1_img',\n visualize = True ) \n pool1_out_img = max_pool_2d_layer ( input = conv1_out_image, name = 'pool_1_img')\n lrn1_out_img = local_response_normalization_layer (pool1_out_img, name = 'lrn_1_img' ) \n \n # Conv Layer 1 - gen\n conv1_out_gen, params = conv_2d_layer (\n input = gen_unflatten,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n params = params,\n name = 'conv_1_gen',\n visualize = False )\n\n pool1_out_gen = max_pool_2d_layer ( input = conv1_out_gen, name = 'pool_1_gen')\n lrn1_out_gen = local_response_normalization_layer (pool1_out_gen, name = 'lrn_1_gen' ) \n process_params(params, name = self.name)\n c1_params = params\n\n\n\n\n\n # Conv Layer 2 - image\n conv2_out_image, params = conv_2d_layer (\n input = lrn1_out_img,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n name = 'conv_2_img' )\n\n pool2_out_img = max_pool_2d_layer ( input = conv2_out_image, name = 'pool_2_img')\n lrn2_out_img = local_response_normalization_layer (pool2_out_img, name = 'lrn_2_img' ) \n\n\n # Conv Layer 2 - gen\n conv2_out_gen, params = conv_2d_layer (\n input = lrn1_out_gen,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n params = params,\n name = 'conv_2_gen' )\n\n pool2_out_gen = max_pool_2d_layer ( input = conv2_out_gen, name = 'pool_2_gen')\n lrn2_out_gen = local_response_normalization_layer (pool2_out_gen, name = 'lrn_2_gen' ) \n process_params(params, name = self.name)\n c2_params = params\n\n # Dropout Layer\n flat_gen = flatten_layer(lrn2_out_gen)\n flat_img = flatten_layer(lrn2_out_img)\n\n flat_gen_dropout = dropout_layer ( input = flat_gen,\n prob = self.dropout_prob,\n name = 'dropout_1_gen') \n\n flat_img_dropout = dropout_layer ( input = flat_img,\n prob = self.dropout_prob,\n name = 'dropout_1_img') \n\n\n\n # Dot Product Layer 1 -img\n fc1_out_img, params = dot_product_layer ( input = flat_img_dropout,\n neurons = HIDDEN_1,\n name = 'image_disc_dot_1')\n # Dot Product Layer 1 - gen\n fc1_out_gen, params = dot_product_layer ( input = flat_gen_dropout,\n params = params,\n neurons = HIDDEN_2,\n name = 'gen_disc_dot_1')\n\n process_params(params, name = self.name)\n d1_params = params\n \n ##\n fc1_out_gen_dropout = dropout_layer ( input = fc1_out_gen,\n prob = self.dropout_prob,\n name = 'dropout_2_gen') \n fc1_out_img_dropout = dropout_layer ( input = fc1_out_img,\n prob = self.dropout_prob,\n name = 'dropout_2_img')\n\n # Dot Product Layer 2 -img\n fc2_out_img, params = dot_product_layer ( input = fc1_out_img_dropout,\n neurons = HIDDEN_2,\n name = 'image_disc_dot_2')\n # Dot Product Layer 2 - gen\n fc2_out_gen, params = dot_product_layer ( input = fc1_out_gen_dropout,\n params = params,\n neurons = HIDDEN_2,\n name = 'gen_disc_dot_2')\n process_params(params, name = self.name)\n d2_params = params\n\n ##\n fc2_out_gen_dropout = dropout_layer ( input = fc2_out_gen,\n prob = self.dropout_prob,\n name = 'dropout_3_gen') \n fc2_out_img_dropout = dropout_layer ( input = fc2_out_img,\n prob = self.dropout_prob,\n name = 'dropout_3_img')\n\n # Dot Product Layer 1 -img\n self.real, params = dot_product_layer ( input = fc2_out_img_dropout,\n neurons = 1,\n activation = 'sigmoid',\n name = 'real')\n # Dot Product Layer 1 -gen\n self.fake, params = dot_product_layer ( input = fc2_out_gen_dropout,\n params = params,\n neurons = 1,\n activation = 'sigmoid',\n name = 'fake')\n\n process_params(params, name = self.name)\n d3_params = params\n self.params = [c1_params, c2_params, d1_params, d2_params, d3_params] \n\n\n with tf.variable_scope (self.name + '_objectives') as scope: \n with tf.variable_scope( self.name + 'discriminator_obj') as scope: \n # discriminator_obj = - 0.5 * tf.reduce_mean(log(self.real)) - \\\n # 0.5 * tf.reduce_mean(log(1-self.fake))\n discriminator_obj = 0.5 * tf.reduce_mean ((self.real-1)**2) + \\\n 0.5 * tf.reduce_mean ((self.fake)**2)\n tf.summary.scalar('discriminator_obj', discriminator_obj)\n tf.add_to_collection( self.name + '_objectives', discriminator_obj ) \n\n with tf.variable_scope (self.name + '_probabilites') as scope: \n tf.summary.scalar('fake_probability', tf.reduce_mean(self.fake))\n tf.summary.scalar('real_probability', tf.reduce_mean(self.real))\n \n self._cook_optimizer( \n lr = DIS_GAN_LR, \n optimizer = DIS_GAN_OPTIMIZER,\n l1_coeff = DIS_GAN_L1_COEFF,\n l2_coeff = DIS_GAN_WEIGHT_DECAY_COEFF)",
"def __init__(self, gen_model, disc_model, loss_type=\"orig\", disc_lr=1e-3, gen_lr=1e-3):\n if gen_model.get_output_shape_at(0) != disc_model.get_input_shape_at(0):\n raise Exception(\"Generator output and discriminator shape not correct.\")\n\n self.gen_model = gen_model\n self.disc_model = disc_model\n\n self.disc_optimizer = tf.keras.optimizers.RMSprop(learning_rate=disc_lr)\n self.gen_optimizer = tf.keras.optimizers.RMSprop(learning_rate=gen_lr)\n\n self.loss_type = loss_type\n if self.loss_type == \"orig\":\n self.disc_loss = self.disc_loss_orig\n self.gen_loss = self.gen_loss_orig\n elif self.loss_type == \"wasserstein\":\n self.disc_loss = self.disc_loss_wasserstein\n self.gen_loss = self.gen_loss_wasserstein\n else:\n raise Exception(\"Not a valid loss type.\")"
] |
[
"0.6810049",
"0.6650364",
"0.66342735",
"0.6625637",
"0.6547958",
"0.647228",
"0.64604884",
"0.6410235",
"0.63588136",
"0.6335196",
"0.6238652",
"0.61622006",
"0.6111083",
"0.6055124",
"0.6044253",
"0.6030763",
"0.60140043",
"0.59965104",
"0.5979334",
"0.5970905",
"0.59499454",
"0.59496117",
"0.59336805",
"0.591862",
"0.5892282",
"0.5886236",
"0.5883516",
"0.588094",
"0.58808404",
"0.58511794"
] |
0.7294676
|
0
|
Vanilla GAN custom Estimator model function.
|
def vanilla_gan_model(params):
# Instantiate generator and discriminator objects.
network_dict = instantiate_network_objects(params)
# Instantiate generator optimizer.
generator_optimizer = instantiate_optimizer(params, scope="generator")
# Instantiate discriminator optimizer.
discriminator_optimizer = instantiate_optimizer(
params, scope="discriminator"
)
return (
network_dict,
{
"generator": generator_optimizer,
"discriminator": discriminator_optimizer
}
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def define_gan(g_model, d_model):\r\n # make weights in the discriminator (some shared with the q model) as not trainable\r\n d_model.trainable = False\r\n # connect g outputs to d inputs\r\n d_output = d_model(g_model.output)\r\n # define composite model\r\n model = Model(g_model.input, d_output)\r\n # compile model\r\n opt = Adam(lr=0.0001, beta_1=0.5)\r\n model.compile(loss=['binary_crossentropy'], optimizer=opt)\r\n return model",
"def gan(ident=None, gan=None):\n\n model = Sequential()\n\n model.add(gan)\n model.add(ident)\n\n model.compile(loss='binary_crossentropy', optimizer=op2,\n metrics=['accuracy'])\n\n # model.summary()\n\n return model",
"def make_gan(generator, discriminator):\n\n # Disable training of discriminator as default\n discriminator.trainable = False\n\n # Create GAN\n model = tf.keras.Sequential()\n model.add(generator)\n model.add(discriminator)\n\n # Compile the GAN\n model.compile(loss='binary_crossentropy', optimizer='adam')\n\n return model",
"def build_gan(self):\n # make weights in the discriminator not trainable\n self.d_model.trainable = False\n # get noise and label inputs from generator model\n gen_noise, gen_label = self.g_model.input\n # get image output from the generator model\n gen_output = self.g_model.output\n # connect image output and label input from generator as inputs to discriminator\n gan_output = self.d_model([gen_output, gen_label])\n # define gan model as taking noise and label and outputting a classification\n self.gan_model = Model([gen_noise, gen_label], gan_output)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.gan_model.compile(loss='binary_crossentropy', optimizer=opt)",
"def define_gan(g_model, d_model, image_shape):\n\n # make weights in the discriminator not trainable\n for layer in d_model.layers:\n if not isinstance(layer, BatchNormalization):\n layer.trainable = False\n # define the source image\n in_src = Input(shape=image_shape)\n # connect the source image to the generator input\n gen_out = g_model(in_src)\n # connect the source input and generator output to the discriminator input\n dis_out = d_model([in_src, gen_out])\n # src image as input, generated image and classification output\n\n model = Model(in_src, [dis_out, gen_out, gen_out])\n\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss=['binary_crossentropy', 'mae', negDSC], optimizer=opt,\n loss_weights=[1, 100, 100])\n return model",
"def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.gan_mode = gan_mode\n self.loss = nn.MSELoss()",
"def __init__(self, n_in, n_hidden, n_layers=2):\n super(GatedGN, self).__init__()\n\n self.E_n = nn.Sequential(nn.Linear(n_in, n_hidden),\n nn.ReLU(),\n nn.Linear(n_hidden, n_layers*n_hidden),\n nn.ReLU())\n self.E_e = nn.Sequential(nn.Linear(2*n_in, n_hidden),\n nn.ReLU(),\n nn.Linear(n_hidden, n_layers*n_hidden),\n nn.ReLU())\n self.E_g = nn.Linear(n_hidden, n_hidden)\n\n # Takes in features from nodes, node states, edge state, and global state.\n self.U_gru = nn.GRU(input_size=n_hidden+n_in,\n hidden_size=n_hidden,\n num_layers=n_layers,\n batch_first=True)\n self.M_gru = nn.GRU(input_size=2*(n_in+n_hidden),\n hidden_size=n_hidden,\n num_layers=n_layers,\n batch_first=True)\n self.G_gru = nn.GRU(input_size=2*n_hidden,\n hidden_size=n_hidden,\n num_layers=n_layers,\n batch_first=True)\n \n # Output function that predicts stability.\n self.O = nn.Linear(n_hidden, 1)\n \n self.n_in, self.n_hidden, self.n_layers = n_in, n_hidden, n_layers",
"def __call__(self, prediction, target_is_real):\n if self.gan_mode in ['lsgan', 'vanilla','l1']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n elif self.gan_mode == 'wgangp':\n if target_is_real:\n loss = -prediction.mean() # self.relu(1-prediction.mean())\n else:\n loss = prediction.mean() # self.relu(1+prediction.mean())\n \n return loss",
"def get_xception_based_model() -> nn.Module:\n \"\"\"INSERT YOUR CODE HERE, overrun return.\"\"\"\n custom_network = build_xception_backbone()\n\n base_params = sum(p.numel() for p in custom_network.parameters() if p.requires_grad)\n custom_network.fc = nn.Sequential(\n nn.Linear(2048,1000),\n nn.ReLU(), \n nn.Linear(1000,256),\n nn.ReLU(), \n nn.Linear(256,64),\n nn.ReLU(), \n nn.Linear(64,2),\n )\n s_params = sum(p.numel() for p in custom_network.parameters() if p.requires_grad)\n params_diff = s_params - base_params\n return custom_network",
"def __init__(self, args, target_real_label=1.0, target_fake_label=0.0):\r\n super(GANLoss, self).__init__()\r\n\r\n self.register_buffer('real_label', torch.tensor(target_real_label)) #real_label\r\n self.register_buffer('fake_label', torch.tensor(target_fake_label)) #fake_label\r\n self.gan_mode = args.gan_mode\r\n if self.gan_mode == 'lsgan':\r\n self.loss = nn.MSELoss() # Least Square loss in the original paper\r\n elif self.gan_mode == 'vanilla':\r\n self.loss = nn.BCEWithLogitsLoss() # include Sigmoid+BCELoss, BCELoss: cross entropy\r\n else:\r\n raise NotImplementedError('gan mode %s not implemented' % self.gan_mode)",
"def build_gan(self):\n\n # Specify te generators used to build various components.\n optimizer_generator = Adam(0.0002, 0.5)\n optimizer_discriminator = Adam(0.0002, 0.5)\n optimizer_GAN = Adam(0.0002, 0.5)\n\n loss_measure_generator = \"binary_crossentropy\"\n loss_measure_discriminator = \"binary_crossentropy\"\n loss_measure_GAN = \"binary_crossentropy\"\n\n metrics = [\"accuracy\", \"mae\", \"mse\", \"mape\", \"cosine\"]\n\n # See if the specified model paths exist, if they don't then we start training new models\n if (\n hasattr(self, \"discriminator_path\")\n and hasattr(self, \"generator_path\")\n and self.discriminator_path.is_file()\n and self.generator_path.is_file()\n ):\n self.discriminator = load_model(self.discriminator_path)\n self.generator = load_model(self.generator_path)\n print(\"Loaded models...\")\n else: # training new model.\n print(\"Training models...\")\n\n # Generate the tensorboard and its call back\n callback_tensorboard = TensorBoard(\n log_dir=path_log_run, histogram_freq=0, write_images=True\n )\n\n # self.callbacks_list = [callback_tensorboard]\n\n # Build discriminator and compile it.\n self.discriminator = self.build_discriminator()\n\n # Training discriminator!\n self.discriminator.compile(\n loss=loss_measure_discriminator,\n optimizer=optimizer_discriminator,\n # metrics=metrics,\n # callbacks=self.callbacks_list,\n )\n\n # Build generator and compile it.\n self.generator = self.build_generator()\n\n # Training generator!\n self.generator.compile(\n loss=loss_measure_generator,\n optimizer=optimizer_generator,\n # callbacks=self.callbacks_list,\n )\n\n # These next few lines setup the training for the GAN, which the input Vector has a shape of noise_parameters\n z = Input(shape=(self.dimensions_noise,))\n img = self.generator(z)\n\n self.discriminator.trainable = False\n\n # Call the discriminator on the image generated by the generator.\n # Store the output\n valid = self.discriminator(img)\n\n # Form a model that combine both the input and the output pair.\n self.combined = Model(z, valid)\n\n # Compile the model using binary_crossentropy with the\n self.combined.compile(loss=loss_measure_GAN, optimizer=optimizer_GAN)",
"def train_naive(): # add arguments as needed\n pass",
"def build_gan(\n optimizer,\n timesteps,\n vocab_sizes,\n latlon_dense_units=64,\n concat_dense_units=100,\n lstm_units=100,\n latent_dim=100,\n lstm_reg=0.02,\n):\n gen = build_generator(\n timesteps,\n latlon_dense_units,\n concat_dense_units,\n lstm_units,\n latent_dim,\n lstm_reg,\n vocab_sizes,\n )\n dis = build_discriminator(\n timesteps,\n latlon_dense_units,\n concat_dense_units,\n lstm_units,\n latent_dim,\n lstm_reg,\n vocab_sizes,\n )\n # Compile discriminator with masked BCE loss. Mask is last output of generator\n dis.compile(optimizer=optimizer, loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n dis.trainable = False\n\n # The trajectory generator takes real trajectories and noise as inputs\n # inputs = [layers.Input(shape=(timesteps, 2), name=\"input_latlon\")]\n # for key, val in vocab_sizes.items():\n # inputs.append(layers.Input(shape=(timesteps, val), name=\"input_\" + key))\n # inputs.append(layers.Input(shape=(latent_dim,), name=\"input_noise\"))\n # inputs.append(layers.Input(shape=(timesteps, 1), name=\"input_mask\"))\n # gen_trajs = gen(inputs)\n # y_pred = dis(gen_trajs[:-1])\n # mask = inputs[-1]\n # gan = Model(inputs, y_pred)\n # gan.add_loss(traj_loss(inputs[:-2], gen_trajs[:-1], mask))\n ##\n y_pred = dis(gen.outputs[:-1])\n gan = Model(gen.inputs, y_pred)\n mask = gen.inputs[-1]\n gan.add_loss(traj_loss(gen.inputs[:-2], gen.outputs[:-1], mask))\n gan.compile(optimizer=optimizer, loss=\"binary_crossentropy\")\n return gen, dis, gan",
"def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.gan_mode = gan_mode\n if gan_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif gan_mode == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif gan_mode == 'l1':\n self.loss = nn.L1Loss()\n elif gan_mode in ['wgangp']:\n self.loss = None\n self.relu = nn.ReLU()\n else:\n raise NotImplementedError('gan mode %s not implemented' % gan_mode)",
"def __init__(self,\n sess,\n output_shape,\n processing_dtype=tf.float32,\n conditional_input_shapes=None,\n noise_shape=(100,),\n generator_network_fn=gen_lib.mnist_generator_gan,\n discriminator_network_fn=gen_lib.mnist_discriminator_gan,\n tf_device='/cpu:*',\n max_tf_checkpoints_to_keep=4,\n g_optimizer=tf.train.AdamOptimizer(),\n d_optimizer=tf.train.AdamOptimizer(),\n k=1,\n weights_clip=0.01,\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False):\n assert weights_clip > 0\n self.weights_clip = weights_clip\n gan.VanillaGAN.__init__(self,\n sess,\n output_shape,\n processing_dtype=processing_dtype,\n conditional_input_shapes=conditional_input_shapes,\n noise_shape=noise_shape,\n generator_network_fn=generator_network_fn,\n discriminator_network_fn=discriminator_network_fn,\n tf_device=tf_device,\n max_tf_checkpoints_to_keep=max_tf_checkpoints_to_keep,\n g_optimizer=g_optimizer,\n d_optimizer=d_optimizer,\n k=k,\n summary_writer=summary_writer,\n summary_writing_frequency=summary_writing_frequency,\n allow_partial_reload=allow_partial_reload)\n tf.logging.info('\\t weights_clip: %d', weights_clip)",
"def __init__(self, opt):\n BaseModel.__init__(self, opt) # call the initialization method of BaseModel\n\n self.opt = opt\n if opt.d_loss_mode == 'wgan' and not opt.use_gp:\n raise NotImplementedError('using wgan on D must be with use_gp = True.')\n\n self.loss_names = ['G_real', 'G_fake', 'D_real', 'D_fake', 'D_gp', 'G', 'D']\n self.visual_names = ['real_visual', 'gen_visual']\n\n if self.isTrain: # only defined during training time\n self.model_names = ['G', 'D']\n else:\n self.model_names = ['G']\n\n if self.opt.cgan:\n probs = np.ones(self.opt.cat_num)/self.opt.cat_num \n self.CatDis = Categorical(torch.tensor(probs))\n\n # define networks \n self.netG = networks.define_G(opt.z_dim, opt.output_nc, opt.ngf, opt.netG,\n opt.g_norm, opt.cgan, opt.cat_num, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n self.netD = networks.define_D(opt.input_nc, opt.ndf, opt.netD,\n opt.d_norm, opt.cgan, opt.cat_num, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # only defined during training time\n # define G mutations \n self.G_mutations = []\n for g_loss in opt.g_loss_mode: \n self.G_mutations.append(networks.GANLoss(g_loss, 'G', opt.which_D).to(self.device))\n # define loss functions\n self.criterionD = networks.GANLoss(opt.d_loss_mode, 'D', opt.which_D).to(self.device)\n # initialize optimizers\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr_g, betas=(opt.beta1, opt.beta2))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr_d, betas=(opt.beta1, opt.beta2))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n \n # Evolutinoary candidatures setting (init) \n\n self.G_candis = [] \n self.optG_candis = [] \n self.last_evaly = []\n self.last_evalimgs = []\n self.min_Fq = 100.0\n self.max_Fq = -100.0\n self.min_Fd = 100.0\n self.max_Fd = -100.0\n self.normFq = lambda f : (f-self.min_Fq) / (self.max_Fq-self.min_Fq)\n self.normFd = lambda f : (f-self.min_Fd) / (self.max_Fd-self.min_Fd)\n for i in range(opt.candi_num): \n self.G_candis.append(copy.deepcopy(self.netG.state_dict()))\n self.optG_candis.append(copy.deepcopy(self.optimizer_G.state_dict()))\n \n # visulize settings \n self.N =int(np.trunc(np.sqrt(min(opt.batch_size, 64))))\n if self.opt.z_type == 'Gaussian': \n self.z_fixed = torch.randn(self.N*self.N, opt.z_dim, 1, 1, device=self.device) \n elif self.opt.z_type == 'Uniform': \n self.z_fixed = torch.rand(self.N*self.N, opt.z_dim, 1, 1, device=self.device)*2. - 1. \n if self.opt.cgan:\n yf = self.CatDis.sample([self.N*self.N])\n self.y_fixed = one_hot(yf, [self.N*self.N, self.opt.cat_num])\n\n # the # of image for each evluation\n self.eval_size = max(math.ceil((opt.batch_size * opt.D_iters) / opt.candi_num), opt.eval_size)",
"def __init__(self,\n in_node_dim: int = 39,\n hidden_node_dim: int = 64,\n heads: int = 4,\n dropout: float = 0.0,\n num_conv: int = 3,\n predictor_hidden_feats: int = 32,\n n_tasks: int = 1,\n **kwargs):\n model = GAT(\n in_node_dim,\n hidden_node_dim,\n heads,\n dropout,\n num_conv,\n predictor_hidden_feats,\n n_tasks,\n )\n super(GATModel, self).__init__(model, **kwargs)",
"def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.gan_mode = gan_mode\n if gan_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif gan_mode == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif gan_mode == 'hinge':\n pass\n elif gan_mode in ['wgangp']:\n self.loss = None\n elif gan_mode in ['softwgan']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % gan_mode)",
"def FeatLinModel(VGG, layername='features_20', type=\"weight\", weight=None, chan=0, pos=(10, 10)):\n layers_all = get_model_layers(VGG)\n if 'features' in layername:\n layeridx = layers_all.index(layername) - 1 + 1 # -1 for the \"features\" layer\n VGGfeat = VGG.features[:layeridx]\n else:\n VGGfeat = VGG\n hooks, feat_dict = hook_model(VGG, layerrequest=(layername,))\n layernames = list(feat_dict.keys())\n print(layernames)\n if type == \"weight\":\n def weight_objective(img, scaler=True):\n VGGfeat.forward(img.cuda())\n feat = hooks(layername)\n if scaler:\n return -(feat * weight.unsqueeze(0)).mean()\n else:\n batch = img.shape[0]\n return -(feat * weight.unsqueeze(0)).view(batch, -1).mean(axis=1)\n\n return weight_objective\n elif type == \"neuron\":\n def neuron_objective(img, scaler=True):\n VGGfeat.forward(img.cuda())\n feat = hooks(layername)\n if len(feat.shape) == 4:\n if scaler:\n return -(feat[:, chan, pos[0], pos[1]]).mean()\n else:\n batch = img.shape[0]\n return -(feat[:, chan, pos[0], pos[1]]).view(batch, -1).mean(axis=1)\n elif len(feat.shape) == 2:\n if scaler:\n return -(feat[:, chan]).mean()\n else:\n batch = img.shape[0]\n return -(feat[:, chan]).view(batch, -1).mean(axis=1)\n return neuron_objective",
"def _construct_gan(self):\n self.critic.trainable = False\n gan = Model(self.encoder.input, self.critic(self.encoder.output))\n gan.compile(optimizer=self.critic_opt(lr=self.critic_learning_rate),\n loss='binary_crossentropy')\n return gan",
"def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)",
"def all_param_AN(ds, myloss='mean_squared_error'):\n wr = ds[0]\n wl = ds[1]\n V = ds[2]\n omega = ds[3]\n input = np.zeros((len(wl),2))\n input[:,0] = wr\n input[:,1] = wl\n output = np.zeros((len(wl),2))\n output[:,0] = V\n output[:,1] = omega\n input_layer = keras.layers.Input((2,),name=\"input\") #wr et wl\n hidden_layer = keras.layers.Dense(2, activation='linear', kernel_initializer='uniform',\n input_shape=(2,), use_bias=False, name=\"output\") #V et omega\n output_layer = hidden_layer(input_layer)\n ann = keras.models.Model(inputs=input_layer, outputs=output_layer)\n opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n ann.compile(loss=myloss, optimizer=opt)\n ann_in, ann_out = input, output\n history = ann.fit(ann_in, ann_out, epochs=40, batch_size=64, verbose=0,\n shuffle=True, validation_split=0.1)#, callbacks=callbacks)\n\n \"\"\"plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\"\"\"\n\n weights = hidden_layer.get_weights()[0]\n Rr_est = weights[0][0]*2\n Rl_est = weights[1][0]*2\n L_est1 = 1/(weights[0][1]/Rr_est)\n L_est2 = -1/(weights[1][1]/Rr_est)\n return Rr_est, Rl_est, (L_est2+L_est1)/2 #moyenne des deux longueurs obtenues",
"def WGAN_model():\n \n # Generator model\n generator = generator_model()\n generator.compile(optimizer=RMSprop(lr=GEN_LEARNING_RATE), loss=wasserstein_loss, metrics=None)\n\n # Discriminator model\n discriminator = discriminator_model()\n discriminator.compile(optimizer=RMSprop(lr=DIS_LEARNING_RATE), loss=wasserstein_loss, metrics=None)\n\n # GAN model\n GAN = Sequential([generator, discriminator])\n GAN.compile(optimizer=RMSprop(lr=GEN_LEARNING_RATE), loss=wasserstein_loss, metrics=None)\n\n return GAN, generator, discriminator",
"def test_Gaussian_NB_estimators():",
"def __call__(self, prediction, target_is_real, for_discriminator=True):\n if self.gan_mode in ['lsgan', 'vanilla']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n elif self.gan_mode == 'hinge':\n if for_discriminator:\n if target_is_real:\n loss = nn.ReLU()(1 - prediction).mean()\n else:\n loss = nn.ReLU()(1 + prediction).mean() \n else:\n assert target_is_real, \"The generator's hinge loss must be aiming for real\"\n loss = - prediction.mean()\n return loss\n\n elif self.gan_mode == 'wgangp':\n if target_is_real:\n loss = -prediction.mean()\n else:\n loss = prediction.mean()\n elif self.gan_mode == 'softwgan':\n if target_is_real:\n loss = F.softplus(-prediction).mean()\n else:\n loss = F.softplus(prediction).mean()\n return loss",
"def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)",
"def model_fn(self, features, labels, mode, params, config):\n raise NotImplementedError()",
"def __init__(self, target_real_label=1.0, target_fake_label=0.0):\n super(GANLocalLoss, self).__init__()\n # self.pooling = nn.MaxPool2d(kernel_size=4, stride=2, padding=1)\n self.adaptivepooling = nn.AdaptiveAvgPool2d(64)",
"def optim_func(params, model):\n if model.model == 'ARD':\n model.alpha, model.beta = params\n lik = model.pruning_algorithm()\n\n else:\n model.alpha = params[0]\n lik = model.pruning_algorithm()\n \n return -lik",
"def build_model(self):\n input_pencil = tf.keras.Input((128,128,3))\n # generator's output\n gen_image = self.gan_generator.model(input_pencil)\n # generator's output\n x = self.gan_discriminator.model([input_pencil,gen_image])\n model = tf.keras.Model(input_pencil,[x,gen_image])\n # compiling the model\n model.compile(loss=['hinge', 'mae'], optimizer = self.optimizer,loss_weights=[1,100], metrics=['accuracy'])\n self.model = model"
] |
[
"0.6353647",
"0.6304419",
"0.62466717",
"0.61459804",
"0.6136398",
"0.61236703",
"0.60471606",
"0.6017594",
"0.5973777",
"0.5947182",
"0.5872416",
"0.58470273",
"0.58395267",
"0.5826521",
"0.58180445",
"0.5812097",
"0.58079636",
"0.5790119",
"0.5785252",
"0.57568234",
"0.57366437",
"0.57232094",
"0.57015246",
"0.56748426",
"0.5654916",
"0.56291807",
"0.56267375",
"0.5621707",
"0.5593429",
"0.5582136"
] |
0.63768333
|
0
|
return URL for HITRAN 12 parfile
|
def url_HITRAN12():
url=u"https://www.cfa.harvard.edu/HITRAN/HITRAN2012/HITRAN2012/By-Molecule/Uncompressed-files/"
return url
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def url_HITEMP():\n url=u\"https://hitran.org/hitemp/data/bzip2format/\"\n return url",
"def file_url(self, fname):\n gs_url = f\"{self.gs_base_url}/{fname}\"\n return f\"{gs_url}\"",
"def url_HITRANCIA():\n url=u\"https://hitran.org/data/CIA/\"\n return url",
"def url(self):\n if not os.path.exists(self.path):\n self.save()\n return self.uset.url(os.path.join(self.folder, self.get_filename()))",
"def get_public_url(self,project,filename):\n pass",
"def get_url(self):\r\n if self.mod.filename:\r\n return self.mod.service.get_mirror() + self.mod.filename",
"def _file_url(self, fid):\n base = self.tq.threatq_host + '/files/'\n return base + str(fid) + '/details'",
"def getFileURL(filename:str)->str:\n if '--develop' in sys.argv:\n return settings.REACT_DEV_PATH + filename\n\n return settings.EEL_PATH + filename",
"def get_file_url(self):\n return ('/user-media/addons/3615/delicious_bookmarks-2.1.072-fx.xpi?'\n 'filehash=sha256%3A3808b13ef8341378b9c8305ca648200954ee7dcd8dc'\n 'e09fef55f2673458bc31f')",
"def url():\n ...",
"def fileUrl(self) -> str:\n if self.urls is None or len(self.urls) == 0:\n raise InputOutputError('Chart version does not have file urls')\n\n if is_absolute_url(self.urls[0]):\n return self.urls[0]\n return posixpath.join(self.chart.repository.url, self.urls[0])",
"def main(url, localfile):\n ph.download_file(url, localfile)",
"def generatePdsPath(filePrefix):\n \n # File prefix looks like this: hHXXX_DDDD_SSS\n fileType = '.img'\n \n # Extract the run number --> HXXX\n runNum = filePrefix[1:5]\n \n filename = filePrefix + fileType\n baseUrl = \"http://pds-geosciences.wustl.edu/mex/mex-m-hrsc-5-refdr-mapprojected-v2/mexhrsc_1001/data/\"\n fullUrl = baseUrl + runNum +\"/\"+ filename\n\n #print filePrefix + fileType + ' -> ' + fullUrl\n return fullUrl",
"def pr_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"pr_url\")",
"def Url(self) -> str:",
"def prog_url(self):\n # type: () -> string_types\n return self._prog_url",
"def generateUrl(line, stn):\n return predictionDetailed.format(\n line=urllib.parse.quote(line, safe=''),\n station=urllib.parse.quote(stn, safe=''))",
"def get_url(self):\n return self.get_file(uri_type=URI_URL, no_copy=True)",
"def _get_url(self, absolute):",
"def get_url(file):\n try:\n with open(file, 'r') as f:\n return URL + f.readlines()[0].rstrip()\n except IOError as err:\n print(\"Failed with error: %s\" % err)\n sys.exit(1)",
"async def _landing_url(self, responses: SourceResponses) -> URL:\n if not responses:\n return await super()._landing_url(responses)\n web_url = (await responses[0].json())[\"web_url\"]\n branch = self._parameter(\"branch\", quote=True)\n file_path = self._parameter(\"file_path\", quote=True)\n return URL(f\"{web_url}/blob/{branch}/{file_path}\")",
"def url(vmanage_host,vmanage_port,api):\r\n \"\"\" function to get the url provide api endpoint \"\"\"\r\n \r\n return f\"https://{vmanage_host}:{vmanage_port}{api}\"",
"def url(self, name):\n if self.base_url is None:\n raise ValueError(\"This file is not accessible via a URL.\")\n url = filepath_to_uri(name)\n if url is not None:\n url = url.lstrip('/')\n return urljoin(self.base_url, url)",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")"
] |
[
"0.6452669",
"0.63440126",
"0.63233215",
"0.6274853",
"0.6237435",
"0.6148489",
"0.60967404",
"0.6084041",
"0.6079517",
"0.60351443",
"0.60044044",
"0.593875",
"0.59196824",
"0.591958",
"0.58905053",
"0.58827263",
"0.58819574",
"0.58723444",
"0.5845488",
"0.5835844",
"0.5817721",
"0.5762659",
"0.5761291",
"0.57548827",
"0.57548827",
"0.57548827",
"0.57548827",
"0.57548827",
"0.57548827",
"0.57548827"
] |
0.7261343
|
0
|
return URL for HITRAN CIA ciafile
|
def url_HITRANCIA():
url=u"https://hitran.org/data/CIA/"
return url
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def url_HITRAN12():\n url=u\"https://www.cfa.harvard.edu/HITRAN/HITRAN2012/HITRAN2012/By-Molecule/Uncompressed-files/\"\n return url",
"def _file_url(self, fid):\n base = self.tq.threatq_host + '/files/'\n return base + str(fid) + '/details'",
"def remote_url(self) -> str:\n return f\"https://api.figma.com/v1/files/{self.file_id}\"",
"def get_aia_uri(self):\n\n return first_rsync_uri(self.get_POW().getAIA())",
"def get_url():\r\n content = get_creds(CREDS_FILE)\r\n url = content[0]\r\n # get rid of trailing slash\r\n if url[len(url) - 1] == \"/\":\r\n return url[:len(url) - 1]\r\n return url",
"def retrieve_ipac_file(url):\n \n request = urllib2.Request(url)\n \n # Encode the username and password to send for authorization\n base64string = base64.encodestring('%s:%s' % (IPAC_USER, IPAC_PASSWORD)).replace('\\n', '')\n request.add_header(\"Authorization\", \"Basic %s\" % base64string)\n \n # Retrieve the response\n try:\n response = urllib2.urlopen(request)\n except urllib2.HTTPError, e:\n print \"HTTPError: Authorization failed or request invalid.\\n\\t->HTTP Response returned error code {}\".format(e.code)\n raise\n except urllib2.URLError, e:\n print \"URLError: {}\".format(e.reason)\n raise\n \n file = StringIO.StringIO(response.read())\n return file",
"def file_url(self, fname):\n gs_url = f\"{self.gs_base_url}/{fname}\"\n return f\"{gs_url}\"",
"def get_download_url(self, ha):\n return create_ipa_url(ha)",
"def compose_url(base_url, anno, chimico):\n \n return base_url + chimico + '_' + anno + '.txt'",
"def get_public_url(self,project,filename):\n pass",
"def get_cadd_result_url(self):\n if self.cadd_job_id:\n return \"https://cadd.gs.washington.edu/check_avail/%s_anno_%s.tsv.gz\" % (\n self.cadd_version,\n self.cadd_job_id,\n )\n else:\n return None",
"def url(self):\n if not os.path.exists(self.path):\n self.save()\n return self.uset.url(os.path.join(self.folder, self.get_filename()))",
"def getFileURL(filename:str)->str:\n if '--develop' in sys.argv:\n return settings.REACT_DEV_PATH + filename\n\n return settings.EEL_PATH + filename",
"def _assets_url(self):\r\n return \"/assets/\" + self._course_key + \"/\"",
"def get_url(self):\n return self.get_file(uri_type=URI_URL, no_copy=True)",
"def basic_url(self):\n return self.base_name + '.cloudlabs.rc.ucl.ac.uk'",
"def core_cdn_file(request, source):\n\n file_path = settings.CENTIPAIR_TEMPLATE_DIR + \"/cdn/\" + source\n source_file_url = settings.TEMPLATE_STATIC_URL + \"/\" + file_path\n return source_file_url",
"def get_remote_url(self, alias):\n url = self.url_base + 'download/current/'\n if 'interactions' in alias:\n url += \"interactors/\" + alias + '.txt'\n else:\n url += alias + '.txt'\n return url",
"def getOrtURL(self, result):\n pcat = self.portal_catalog\n raw_webcode = result.get('webcode')\n if isinstance(raw_webcode, float):\n webcode = str(int(raw_webcode))\n elif isinstance(raw_webcode, int):\n webcode = str(raw_webcode)\n else:\n webcode = raw_webcode\n brains = pcat(Webcode = webcode)\n if len(brains) == 1:\n return brains[0].getURL()\n return ''",
"def get_url(file):\n try:\n with open(file, 'r') as f:\n return URL + f.readlines()[0].rstrip()\n except IOError as err:\n print(\"Failed with error: %s\" % err)\n sys.exit(1)",
"def _get_cora_filepath():\n # type: () -> Tuple[str, str]\n cache_root = download.get_dataset_directory(_root)\n feat_cache_path = os.path.join(cache_root, feat_file_name)\n edge_cache_path = os.path.join(cache_root, edge_file_name)\n return feat_cache_path, edge_cache_path",
"def Url(self) -> str:",
"def file_url(category, event_id=None, train_or_test=\"train\"):\n if category == 'hit_orders':\n folder = 'particles-in-order'\n elif category in ('sample_submission', 'detectors'):\n return '/home/ec2-user/SageMaker/efs/codalab_dataset/{0}.csv'.format(category)\n else:\n folder = 'codalab_dataset/' + train_or_test\n \n return '/home/ec2-user/SageMaker/efs/{0}/event{1:09d}-{2}.csv'.format(\n folder, event_id, category)",
"async def _landing_url(self, responses: SourceResponses) -> URL:\n if not responses:\n return await super()._landing_url(responses)\n web_url = (await responses[0].json())[\"web_url\"]\n branch = self._parameter(\"branch\", quote=True)\n file_path = self._parameter(\"file_path\", quote=True)\n return URL(f\"{web_url}/blob/{branch}/{file_path}\")",
"def _get_url(self, absolute):",
"def getURL(date):\n\tbase_url = \"https://www.gpo.gov/fdsys/pkg/CREC-\"+date+\"/pdf/CREC-\"+date+\".pdf\"\n\tprint base_url",
"def getUrl(self): #$NON-NLS-1$\r",
"def getUrl(self): #$NON-NLS-1$\r",
"def crl_url(self):\n\n if self._crl_distribution_points is None:\n return None\n\n return self._get_crl_url(self._crl_distribution_points)",
"def getProjectURL():"
] |
[
"0.70408565",
"0.6138763",
"0.60811234",
"0.60591507",
"0.60352266",
"0.60036784",
"0.5966616",
"0.5932678",
"0.59245706",
"0.5885469",
"0.58848125",
"0.58844715",
"0.58674806",
"0.58364475",
"0.58210564",
"0.5757505",
"0.573669",
"0.571453",
"0.5692179",
"0.5679866",
"0.56782615",
"0.567615",
"0.56559324",
"0.5637427",
"0.5636076",
"0.5622444",
"0.5605161",
"0.5605161",
"0.5576803",
"0.5570329"
] |
0.7466736
|
0
|
return URL for HITEMP bz2 parfile
|
def url_HITEMP():
url=u"https://hitran.org/hitemp/data/bzip2format/"
return url
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def url_HITRAN12():\n url=u\"https://www.cfa.harvard.edu/HITRAN/HITRAN2012/HITRAN2012/By-Molecule/Uncompressed-files/\"\n return url",
"def get_file_url(self):\n return ('/user-media/addons/3615/delicious_bookmarks-2.1.072-fx.xpi?'\n 'filehash=sha256%3A3808b13ef8341378b9c8305ca648200954ee7dcd8dc'\n 'e09fef55f2673458bc31f')",
"def get_url(self):\r\n if self.mod.filename:\r\n return self.mod.service.get_mirror() + self.mod.filename",
"def svn_client_commit_item2_t_url_get(svn_client_commit_item2_t_self): # real signature unknown; restored from __doc__\n return \"\"",
"def __get_url_addr(self):\n request = urlopen(self.url)\n version = request.readline()\n request.close()\n request = urlparse.urlparse(self.url)\n unparsed_url = urlparse.urlunparse((request.scheme, request.netloc,\n request.path, '', '', ''))\n updated_url = urlparse.urljoin(unparsed_url, version + '/' +\n self.file_name)\n return updated_url",
"def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"",
"def fileUrl(self) -> str:\n if self.urls is None or len(self.urls) == 0:\n raise InputOutputError('Chart version does not have file urls')\n\n if is_absolute_url(self.urls[0]):\n return self.urls[0]\n return posixpath.join(self.chart.repository.url, self.urls[0])",
"def get_url(self):\n return self.get_file(uri_type=URI_URL, no_copy=True)",
"def _makeWGSurl(WGSline) :\n if not WGSline.startswith(\"WGS \") :\n raise Exception(\"Line does not start with \\\"WGS \\\"\")\n accession = WGSline.split(\" \")[-1]\n accRoot = accession.split(\"-\")[0][0:6]\n url = \"http://www.ncbi.nlm.nih.gov/Traces/wgs/?download=\" + accRoot + \".1.gbff.gz\"\n return url",
"def svn_info_t_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"",
"def getMpcorb(url='https://minorplanetcenter.net/iau/MPCORB/MPCORB.DAT.gz', fname='MPCORB.DAT.gz', verbose=True):\n\n #filename = wget.download(url)\n try:\n r = requests.get(url, allow_redirects=True)\n open(fname, 'wb').write(r.content)\n if (verbose):\n print('Download complete:', url)\n except:\n print(\"Error in getMpcorb: could not download \", fname, \" at \", url)\n raise\n return",
"def get_url(name, details):\n opsys, machine = get_platform()\n _platform = f\"{opsys}_{machine}\"\n\n try:\n version = details[\"version\"]\n except KeyError:\n raise KeyError(f\"version must be specified for plugin {name}\")\n\n # set the file name, allow it to be overridden with key \"filename\"\n default_file_name = f\"terraform-provider-{name}_{version}_{_platform}.zip\"\n file_name = details.get(\"filename\", default_file_name)\n\n # set the base url, allow it to be overridden with key \"baseURL\"\n default_base_url = (\n f\"https://releases.hashicorp.com/terraform-provider-{name}/{version}\"\n )\n base_uri = details.get(\"baseURL\", default_base_url).rstrip(\"/\")\n\n return f\"{base_uri}/{file_name}\"",
"def getFileURL(filename:str)->str:\n if '--develop' in sys.argv:\n return settings.REACT_DEV_PATH + filename\n\n return settings.EEL_PATH + filename",
"def _get_url(self, absolute):",
"def _build_url(self, path):\n return \"{0}/blazing-jdbc/{1}\".format(self.baseurl, path)",
"def replay_url(self):\n if (\n self.cluster is None\n or self.steam_id is None\n or self.replay_salt is None\n ):\n return None\n else:\n return \"http://replay{0}.valve.net/570/{1}_{2}.dem.bz2\".format(\n self.cluster, self.steam_id, self.replay_salt\n )",
"def get_url() -> str:\n parser = ArgumentParser()\n\n parser.add_argument('--url',\n type=str,\n help='Url to download log file')\n\n args = parser.parse_args()\n url = args.url\n return url",
"def url_ExoMol():\n url=u\"http://www.exomol.com/db/\"\n return url",
"def get_download_url(self):\n # return self.file.url # This returns the path where file is stored\n return reverse('products:download', kwargs={\n 'slug': self.product.slug, 'pk': self.pk\n }) # This returns the endpoint where file download is handled",
"def main(url, localfile):\n ph.download_file(url, localfile)",
"def __baseurl(self):\n return \"http://\" + self._host + \\\n \"/cgi-bin/hi3510/param.cgi?cmd={}&-usr=\" + \\\n self._username + \"&-pwd=\" + self._password",
"def get_download_url(self, version=\"latest\", os_name=None, bitness=None):\n raise NotImplementedError",
"def download_url(self, fname):\n if not fname in self.data:\n return ''\n url = '/'.join([\n self.context.absolute_url(),\n '@@download-file',\n self.id + ':' + fname\n ])\n return url",
"def _downloadWGS(WGSurl) :\n gzipContent = urllib2.urlopen(WGSurl).read()\n gzipFile = StringIO.StringIO(gzipContent)\n o = gzip.GzipFile(fileobj = gzipFile)\n output = None\n try :\n output = o.read()\n except IOError as e:\n print(e)\n o.close()\n return output",
"def _get_file_url_from_dropbox(dropbox_url, filename):\n return dropbox_url + '?dl=1'",
"def get_url(self, path):\n\n log(\"Getting URL for path {}\".format(path))\n return 'http://www.bom.gov.au/{}'.format(path)",
"def BPM_PROVHISTORY():\n return download_from_archive(\"bpm_20220128_gmos-s_Ham_11_full_12amp.fits\")",
"def file_url(self, fname):\n gs_url = f\"{self.gs_base_url}/{fname}\"\n return f\"{gs_url}\"",
"def resolved_url(self):\n # '{year}/{release}-Year/csv_{record_type}(state}.zip'\n us = self.url_proto.format(year=self._year, release=self._release,\n record_type=self.record_type.lower(), state = self._state.lower())\n\n return parse_app_url(us)",
"def url(self) -> str:\n if self._download_path is None:\n raise ValueError(\"No download URL given.\")\n return urljoin(self._download_path, self.filename)"
] |
[
"0.5839622",
"0.5803024",
"0.565694",
"0.54593",
"0.5396386",
"0.5365379",
"0.535061",
"0.5326004",
"0.5297789",
"0.5255205",
"0.52542067",
"0.52349323",
"0.52242327",
"0.52028143",
"0.51905954",
"0.5163859",
"0.5162223",
"0.5153333",
"0.5113982",
"0.51062846",
"0.5091156",
"0.5080328",
"0.5068667",
"0.5066664",
"0.5060946",
"0.50587946",
"0.5049801",
"0.5041185",
"0.5037902",
"0.50140977"
] |
0.69981384
|
0
|
return URL for ExoMol
|
def url_ExoMol():
url=u"http://www.exomol.com/db/"
return url
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def Url(self) -> str:",
"def get_url(self):\r\n if self.mod.filename:\r\n return self.mod.service.get_mirror() + self.mod.filename",
"def url():\n ...",
"def url(self):\n if not os.path.exists(self.path):\n self.save()\n return self.uset.url(os.path.join(self.folder, self.get_filename()))",
"def url(self):\n ...",
"def url(self):\n return self.full()",
"def url (self):\n return Links.createURL('/')",
"def _get_url(self, absolute):",
"def __url(self, object):\n return '/'.join(object.getPhysicalPath())",
"def oed_url(self):\n return 'http://www.oed.com/view/th/class/%d' % self.id",
"def getUrl(self): #$NON-NLS-1$\r",
"def getUrl(self): #$NON-NLS-1$\r",
"def url(self):\n url = self.url\n return url",
"def get_uri(self):\n return self.url",
"def show_orion_url(self, obj):\n return obj.orion_url",
"def _uri(self):\n raise NotImplementedError",
"def uri(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uri\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> str:\n if \"main\" not in self._resources:\n self._initialize()\n return self._resources[\"main\"].url",
"def url(self) -> str:\n return self.url_as()",
"def url(self) -> str:\n return pulumi.get(self, \"url\")",
"def url(self) -> str:\n return pulumi.get(self, \"url\")",
"def GetURL(self, rel_url):\n return 'http://localhost:%d/%s' % (self.port, rel_url)",
"def url(self):\n\n if not hasattr(self, \"_url\"):\n query = db.Query(\"query_term u\", \"u.value\")\n query.join(\"query_term t\", \"t.doc_id = u.doc_id\")\n query.where(f\"u.path = '{self.URL_PATH}'\")\n query.where(f\"t.path = '{self.TERM_PATH}'\")\n query.where(query.Condition(\"t.int_val\", self.id))\n rows = query.execute(self.loader.cdr_cursor).fetchall()\n self._url = rows[0].value if rows else \"\"\n return self._url"
] |
[
"0.7125014",
"0.6848426",
"0.6735453",
"0.66766655",
"0.6581823",
"0.65701824",
"0.65248775",
"0.64309263",
"0.63389313",
"0.630634",
"0.6301364",
"0.6301364",
"0.6278326",
"0.62764597",
"0.62506",
"0.6248224",
"0.62394494",
"0.6173976",
"0.6173976",
"0.6173976",
"0.6173976",
"0.6173976",
"0.6173976",
"0.6173976",
"0.6147105",
"0.61357254",
"0.611985",
"0.611985",
"0.61019665",
"0.6096575"
] |
0.8471234
|
0
|
Construct nonlocal game object from a binary constraint system game.
|
def from_bcs_game(cls, constraints: list[np.ndarray], reps: int = 1) -> "NonlocalGame":
num_constraints = len(constraints)
if num_constraints == 0:
raise ValueError("At least 1 constraint is required")
num_variables = constraints[0].ndim
# Retrieve dependent variables for each constraint
dependent_variables = np.zeros((num_constraints, num_variables))
# `v_i` is _not_ a dependent variable of `c_j` if all entries in
# the `i`-th dimension of `constraints[j]` are equal, i.e.:
# c_j[:, ..., :, 0, : ..., :] == c_j[:, ..., :, 1, : ..., :]
for j in range(num_constraints):
for i in range(num_variables):
dependent_variables[j, i] = np.diff(constraints[j], axis=i).any()
# Calculate probability matrix P(x, y) where:
# x: uniformly randomly-selected constraint `c_x`
# y: uniformly randomly-selected variable `v_y` in `c_x`
prob_mat = np.zeros((num_constraints, num_variables))
for j in range(num_constraints):
p_x = 1.0 / num_constraints
num_dependent_vars = dependent_variables[j].sum()
p_y = dependent_variables[j] / num_dependent_vars
prob_mat[j] = p_x * p_y
# Compute prediction matrix of outcomes given questions and answer pairs:
# a: Alice's truth assignment to all variables in `c_x`
# b: Bob's truth assignment for `v_y` in `c_x`
pred_mat = np.zeros((2**num_variables, 2, num_constraints, num_variables))
for x_ques in range(num_constraints):
for a_ans in range(pred_mat.shape[0]):
# Convert to binary representation
bin_a = [int(x) for x in np.binary_repr(a_ans)]
truth_assignment = np.zeros(num_variables, dtype=np.int8)
truth_assignment[-len(bin_a) :] = bin_a
truth_assignment = tuple(truth_assignment)
for y_ques in range(num_variables):
# The verifier can only accept the answer if Bob's truth assignment
# is consistent with Alice's
b_ans = truth_assignment[y_ques]
pred_mat[a_ans, b_ans, x_ques, y_ques] = constraints[x_ques][truth_assignment]
return cls(prob_mat, pred_mat, reps)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def new_game(self, req):\n return models.BattleShip.create(req.left, req.right)",
"def __init__(self, game, current=None, gameInner=None, pieceInner=None, enemyEnv=None):\n\n self.game = game\n self.gameEnv = GameEnvironment(self.game, self)\n if Q_USE_CONVOLUTIONAL_LAYERS:\n self.gameNetwork = ConvNetwork(self.game.area(), self.gameEnv, self.game, Q_GAME_NUM_GRIDS,\n inner=[] if gameInner is None else gameInner)\n\n self.internalNetwork = ConvNetwork(Q_PIECE_NUM_ACTIONS, self, self.game, Q_PIECE_NUM_GRIDS,\n inner=[] if pieceInner is None else pieceInner)\n else:\n self.gameNetwork = Network(self.game.area(), self.gameEnv,\n inner=[] if gameInner is None else gameInner)\n\n self.internalNetwork = Network(Q_PIECE_NUM_ACTIONS, self,\n inner=[] if pieceInner is None else pieceInner)\n\n self.enemyEnv = enemyEnv\n\n self.current = current",
"def __init__(self, objective):\n self.objective = objective\n\n # Initialize players\n # We use three dummy player for the target position\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n\n # Initialize the internal environment\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None",
"def create_gurobi_model_standard_problem() -> gb.Model:\n model = gb.Model()\n x, y, z = model.addVar(vtype=GRB.CONTINUOUS, name=\"x\"), model.addVar(vtype=GRB.CONTINUOUS, name=\"y\"), model.addVar(\n vtype=GRB.CONTINUOUS, name=\"z\")\n constraint1 = x + y + z <= 10\n constraint2 = x + y + z <= 20\n constraint3 = 1 * x <= 5\n model.addConstr(constraint1, \"constr1\")\n model.addConstr(constraint2, \"constr2\")\n model.addConstr(constraint3, \"constr3\")\n model.addConstr(1 * x >= 0)\n model.addConstr(1 * y >= 0)\n model.addConstr(1 * z >= 0)\n model.setObjective(10 * x + y + z)\n return model",
"def create(game):\r\n ## Create Garbage\r\n game.garbage = deque([])\r\n\r\n ## Create Stars\r\n game.create_stars()\r\n\r\n ## Create Millenium Falcon\r\n game.falcons = MilleniumFalcon.init(game)\r\n game.millenium_falcon = MilleniumFalcon()\r\n \r\n ## Create TIE Fighters\r\n game.fighters = Fighter.init(game)\r\n\r\n ## Create Asteroids\r\n game.rocks = Group.mesh(Rock1.init(game), Rock2.init(game))\r\n\r\n ## Create Lasers\r\n game.pro_lasers = ProLaser.init(game)\r\n game.con_lasers = ConLaser.init(game)\r\n\r\n ## Setup collision groups\r\n Group.bind(game.pro_lasers, game.rocks, game.act_laser_void)",
"def __init__(self, affinity, game_type, game_space, opponent=None):\n \n super().__init__(affinity, game_type, game_space, opponent)",
"def __init__(self, game: models.Game):\n self.game = game",
"def create_instance(raw_game, game_acbid, season, competition_phase,round_phase=None):\n # There are two different statistics table in acb.com. I assume they created the new one to introduce the +/- stat.\n estadisticas_tag = '.estadisticasnew' if re.search(r'<table class=\"estadisticasnew\"', raw_game) else '.estadisticas'\n\n doc = pq(raw_game)\n game_dict = dict()\n\n \"\"\"\n Each game has an unique id in acb.com. The id has 5 digits, where the first two digits are the season code (the\n oldest season in 1956 has code 1) and the three last are the number of the game (a simple counter since the beginning\n of the season).\n\n This id can be used to access the concrete game within the link 'http://www.acb.com/fichas/LACBXXYYY.php'\n \"\"\"\n game_dict['game_acbid'] = game_acbid\n game_dict['season'] = season.season\n game_dict['competition_phase'] = competition_phase\n game_dict['round_phase'] = round_phase\n\n\n # Information about the teams.\n info_teams_data = doc(estadisticas_tag).eq(1)\n home_team_name = None\n away_team_name = None\n\n \"\"\"\n We only have the names of the teams (text) within the doc. We will look for its associated id by looking in our teamname table, where\n we have all the historical official names for each team and season. However the ACB sometimes doesn't agree in the names\n and writes them in different ways depending on the game (sometimes taking older names or making small changes).\n For instance VALENCIA BASKET CLUB instead of VALENCIA BASKET.\n So if there is not such a direct correspondance we will take the closest match.\n \"\"\"\n for i in [0, 2]:\n team_data = info_teams_data('.estverde').eq(i)('td').eq(0).text()\n team_name = re.search(\"(.*) [0-9]\", team_data).groups()[0]\n\n try: ## In case the name of the team is exactly the same as one stated in our database for a season\n team_acbid = TeamName.get(TeamName.name == team_name).team_id.team_acbid\n team = Team.get(Team.team_acbid == team_acbid)\n\n except TeamName.DoesNotExist: ## In case there is not an exact correspondance within our database, let's find the closest match.\n query = TeamName.select(TeamName.team_id, TeamName.name)\n teams_names_ids = dict()\n for q in query:\n teams_names_ids[q.name] = q.team_id.id\n\n most_likely_team = difflib.get_close_matches(team_name, teams_names_ids.keys(), 1, 0.4)[0]\n team = Team.get(Team.id == teams_names_ids[most_likely_team])\n\n if most_likely_team not in season.mismatched_teams: # debug info to check the correctness.\n season.mismatched_teams.append(most_likely_team)\n logger.info('Season {} -> {} has been matched to: {}'.format(season.season, team_name, most_likely_team))\n\n # TeamName.get_or_create(**{'team': team, 'name': team_name, 'season': season.season})\n game_dict['team_home_id' if i == 0 else 'team_away_id'] = team\n home_team_name = team_name if i == 0 else home_team_name\n away_team_name = team_name if i != 0 else away_team_name\n\n # Information about the game.\n info_game_data = doc(estadisticas_tag).eq(0)\n\n scheduling_data = info_game_data('.estnegro')('td').eq(0).text()\n scheduling_data = scheduling_data.split(\"|\")\n journey, date, time, venue, attendance = list(map(lambda x: x.strip(), scheduling_data)) # Remove extra spaces.\n\n if date and time:\n day, month, year = list(map(int, date.split(\"/\")))\n hour, minute = list(map(int, time.split(\":\")))\n game_dict['kickoff_time'] = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute)\n\n if attendance:\n try:\n game_dict['attendance'] = int(attendance.split(\":\")[1])\n except ValueError:\n pass\n\n if venue:\n game_dict['venue'] = venue\n\n if journey:\n game_dict['journey'] = journey.split(\" \")[1]\n\n if competition_phase=='cup':\n if int(journey.split(\" \")[1])==1:\n game_dict['round_phase'] =\"quarter_final\"\n elif int(journey.split(\" \")[1])==2:\n game_dict['round_phase'] =\"semi_final\"\n elif int(journey.split(\" \")[1])==3:\n game_dict['round_phase'] =\"final\"\n\n for i in range(2, 7):\n score_home_attribute = ''\n score_away_attribute = ''\n if i == 2:\n score_home_attribute = 'score_home_first'\n score_away_attribute = 'score_away_first'\n elif i == 3:\n score_home_attribute = 'score_home_second'\n score_away_attribute = 'score_away_second'\n elif i == 4:\n score_home_attribute = 'score_home_third'\n score_away_attribute = 'score_away_third'\n elif i == 5:\n score_home_attribute = 'score_home_fourth'\n score_away_attribute = 'score_away_fourth'\n elif i == 6:\n score_home_attribute = 'score_home_extra'\n score_away_attribute = 'score_away_extra'\n\n quarter_data = info_game_data('.estnaranja')('td').eq(i).text()\n if quarter_data:\n try:\n game_dict[score_home_attribute], game_dict[score_away_attribute] = list(\n map(int, quarter_data.split(\"|\")))\n except ValueError:\n pass\n\n referees_data = info_game_data('.estnaranja')('td').eq(0).text()\n if referees_data:\n referees = referees_data.split(\":\")[1].strip().split(\",\")\n referees = list(filter(None, referees))\n referees = list(map(lambda x: x.strip(), referees))\n n_ref = 1\n for referee in referees:\n game_dict['referee_'+str(n_ref)] = referee\n n_ref+=1\n\n try:\n game = Game.get(Game.game_acbid == game_dict['game_acbid'])\n except:\n game = Game.create(**game_dict)\n return game",
"def new_game(cls, user):\n game = Game(user=user,\n game_state=\".........\",\n game_over=False)\n game.put()\n return game",
"def decode_game(obj):\n try:\n players = obj['players']\n jacks = obj['jacks']\n library = obj['library']\n pool = obj['pool']\n stack = obj['stack']\n _current_frame = obj['_current_frame']\n except KeyError as e:\n raise GTREncodingError(e.message)\n\n game_dict = copy.deepcopy(obj)\n\n game_dict['players'] = [decode_player(p) for p in players]\n\n game_dict['jacks'] = decode_zone(jacks, 'jacks')\n game_dict['library'] = decode_zone(library, 'library')\n game_dict['pool'] = decode_zone(pool, 'pool')\n\n game_dict['_current_frame'] = decode_frame(_current_frame)\n game_dict['stack'] = decode_stack(stack)\n\n # Revert the ['Player', <n>] refs to Game.players[<n>]\n for i, frame in enumerate(game_dict['stack'].stack):\n new_args = []\n for arg in frame.args:\n if type(arg) is list and arg[0] == 'Player':\n player = game_dict['players'][arg[1]]\n new_args.append(player)\n else:\n new_args.append(arg)\n\n game_dict['stack'].stack[i].args = tuple(new_args)\n\n if game_dict['_current_frame'] is not None:\n new_args = []\n for arg in game_dict['_current_frame'].args:\n if type(arg) is list and arg[0] == 'Player':\n player = game_dict['players'][arg[1]]\n new_args.append(player)\n else:\n new_args.append(arg)\n\n game_dict['_current_frame'].args = tuple(new_args)\n\n\n\n #TODO: Why are we doing this and not Game(**game_dict), checking for TypeError?\n game_obj = Game()\n for k, v in game_dict.items():\n setattr(game_obj, k, v)\n\n return game_obj",
"def create_one_game(self):\n return Game2048(task_name=self.result_path, game_mode=False)",
"def new_game(cls, x_user, o_user, game_moves):\n game = Game(x_user=x_user,\n o_user=o_user,\n game_moves=game_moves,\n moves_count=0,\n game_over=False)\n game.put()\n usergame = UserGame(user=x_user, \n game_key=game.key,\n moves_count=0,\n game_over=False)\n usergame.put()\n usergame = UserGame(user=o_user, \n game_key=game.key,\n moves_count=0,\n game_over=False)\n usergame.put()\n return game",
"def get_new_game(game_config):\n _type = game_config[\"game_type\"]\n if _type == \"hex\":\n game = Hex(game_config[\"hex\"], verbose=game_config[\"verbose\"])\n else:\n raise ValueError(\"Game type is not supported\")\n return game",
"def make_game(self):\n game = Game(self.data['gamename'])\n self.game = game\n return game",
"def __init__(self, player):\r\n other_player = \"lower\" if player == \"upper\" else \"upper\"\r\n \r\n #set our sides \r\n self.player_information[\"us\"][\"player_side\"] = player\r\n self.player_information[\"them\"][\"player_side\"] = other_player\r\n\r\n #create our board edge and board representation\r\n self.board_edge = hex_boundary_getter((0,0), 4, [])\r\n self.board_array = generate_board()",
"def __init__(self, game, user, *, data=None, **kwargs):\r\n self.game = game\r\n self.user = user\r\n \r\n if data:\r\n self.from_data(data)\r\n else:\r\n self.initialize(**kwargs)",
"def __init__(self):\n self.board = Board()\n self.__root = BinaryNode(Board(), None, Board.PLAYER_1)\n self.player = Board.PLAYER_0\n self.win = False",
"def __init__(self, vocab, game_board, p_id):\r\n pass",
"def copy(self):\n new_game = Game(self.name, *self.agents, independent_update=self.independent_update, default_run_kwargs=self.default_run_kwargs, _set_defaults=False)\n new_game.i = self.i\n new_game.env = self.env_copy()\n new_game.env[\"game\"] = new_game\n return new_game",
"def create_game_internal(black, white,\n sgf_or_stones=None,\n stones=None, sgf=None):\n assert sum(1 for x in [sgf_or_stones, stones, sgf]\n if x is not None) <= 1, \\\n \"can't supply more than one initial state to create_game_internal\"\n if sgf_or_stones:\n if isinstance(sgf_or_stones, str):\n assert sgf_or_stones[0] == '(', \\\n \"invalid SGF passed to create_game_internal; if you meant \" \\\n \"a text map, make it a list\"\n sgf = sgf_or_stones\n else:\n stones = sgf_or_stones\n if not sgf:\n if not stones:\n stones = []\n sgf = sgf_from_text_map(stones)\n game = Game(black=black, white=white, sgf=sgf,\n last_move_time=datetime.now())\n db.session.add(game)\n db.session.commit()\n return game",
"def __init__(self, window: pg.Surface):\n self.window = window\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.maximum_obstacles_on_board = 10\n self.obstacles = self.create_obstacles()",
"def __init__(self, mass, jsa, length):\n RigidBody.counter += 1\n self.length = length\n self.ID = RigidBody.counter\n self.mass_matrix = None\n self.constraints = []\n self.symbolic_variables = []\n self.r_i = BaseObject('r_i', self.ID)\n self.r_j = BaseObject('r_j', self.ID)\n self.u = BaseObject('u', self.ID)\n self.v = BaseObject('v', self.ID)\n self.base_points = [self.r_i,\n self.r_j]\n self.base_vectors = [self.u,\n self.v]\n self.r_i.local_coordinates = np.array([0, 0, 0])\n self.r_j.local_coordinates = np.array([0, 0, length])\n self.r_i.global_coordinates = np.array([0, 0, 0])\n self.r_j.global_coordinates = np.array([0, 0, length])\n self.u.local_coordinates = np.array([1, 0, 0])\n self.v.local_coordinates = np.array([0, 1, 0])\n self.u.global_coordinates = np.array([1, 0, 0])\n self.v.global_coordinates = np.array([0, 1, 0])\n self.r_g_loc = np.array([0, 0, 0.5 * length])\n self.make_symbolic_variables()\n self.calculate_mass_matrix(mass, jsa)\n self.rigid_body_constraints()",
"def __init__(self,game):\r\n self.game=game\r\n self.can=game.can\r\n self.vrai=False\r\n self.select=-1\r\n self.img=game.img_bto\r\n self.bateau()\r\n self.ima()\r\n self.aléatoire(5,self.game.j1)",
"def __init__(\n self,\n space,\n bounds,\n params,\n objective,\n generations,\n n_particles,\n n_dimensions,\n archive_size,\n restart_freq=np.inf\n ):\n self.space = space\n self._bounds = bounds\n self._objective = objective\n self._generations = generations\n self._n_particles = n_particles\n self._n_dimensions = n_dimensions\n self._archive_size = archive_size\n self._restart_freq = restart_freq\n self._vmax = -bounds[0] + bounds[1]\n\n assert generations >= 0\n assert n_particles >= 0\n assert n_dimensions >= 0\n assert restart_freq >= 1\n assert n_particles >= archive_size and archive_size >= 0\n\n try:\n self.w = params['w']\n self.c1 = params['c1']\n self.c2 = params['c2']\n self.c3 = params['c3']\n self._w = self.w\n except KeyError:\n print('Params: Missing parameters')\n raise KeyError",
"def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._acted = False\n self._dash_x = 0\n self._dash_y = 0\n self._energy = 0\n self._genarium = 0\n self._is_busy = False\n self._job = None\n self._legendarium = 0\n self._moves = 0\n self._mythicite = 0\n self._owner = None\n self._protector = None\n self._rarium = 0\n self._shield = 0\n self._x = 0\n self._y = 0",
"def __init__(self, game, player):\n self.game = game\n self.player = player",
"def __init__(self, game_name, sim=None):\n if sim is None:\n sim = Game(game_name)\n self.__sim = sim\n # sim should be a pointer\n self.game_name = game_name",
"def __init__(self, opts: dict, solver_opts: dict):\n self.name = opts.get(\"name\", \"Undefined\") # Name of the problem\n self.gp = opts.get(\"grid_points\") # Number of grid points\n self.nadir_p = opts.get(\"nadir_points\") # Nadir points\n self.eps = opts.get(\"penalty_weight\", 1e-3) # Penalty weight\n self.round = opts.get(\"round_decimals\", 9) # Decimal places to round to\n self.nadir_r = opts.get(\"nadir_ratio\", 1) # Nadir ratio\n self.logdir = opts.get(\"logging_folder\", \"logs\") # Folder to save logs\n self.early_exit = opts.get(\"early_exit\", True) # Whether to enable early exit\n self.bypass = opts.get(\"bypass_coefficient\", True) # Whether to enable bypass coefficient\n self.flag = opts.get(\"flag_array\", True) # Whether to use flag array\n self.cpu_count = opts.get(\"cpu_count\", cpu_count()) # Number of CPUs to use\n self.redivide_work = opts.get(\"redivide_work\", True) # Whether to redivide work\n self.model_fn = opts.get(\"pickle_file\", \"model.p\") # Pickle file name\n self.shared_flag = opts.get(\"shared_flag\", True) # Whether to use shared flag array\n self.output_excel = opts.get(\"output_excel\", True) # Whether to output to Excel\n self.process_logging = opts.get(\"process_logging\", False) # Whether to enable process logging\n self.process_timeout = opts.get(\"process_timeout\", None) # Timeout for processes\n self.solver_name = opts.get(\"solver_name\", \"gurobi\") # Name of solver\n self.solver_io = opts.get(\"solver_io\", \"python\") # IO mode of solver\n\n self.solver_opts = solver_opts # Solver options\n self.solver_opts[\"MIPGap\"] = solver_opts.get(\"MIPGap\", 0.0) # MIP gap\n self.solver_opts[\"NonConvex\"] = solver_opts.get(\"NonConvex\", 2) # Nonconvex setting\n\n # Remove None values from dict when user has overriden them\n for key, value in dict(self.solver_opts).items():\n if value is None or value:\n del self.solver_opts[key]\n\n self.time_created = time.strftime(\"%Y%m%d-%H%M%S\") # Time the options object was created\n self.log_name = self.name + \"_\" + str(self.time_created) # Name of log file",
"def create_game(cls, user, misses_allowed, secret_word, current_solution):\n game = cls(parent=user,\n user=user,\n misses_allowed=misses_allowed,\n misses_remaining=misses_allowed,\n secret_word=secret_word,\n current_solution=current_solution)\n game.put()\n return game",
"def create_instance(c_instance):\n return MonoPedal(c_instance)"
] |
[
"0.5283068",
"0.52795315",
"0.5256074",
"0.52275246",
"0.5195833",
"0.5193166",
"0.5190906",
"0.51309246",
"0.5112733",
"0.5102261",
"0.50863975",
"0.50775015",
"0.50729334",
"0.5063152",
"0.50593895",
"0.50592047",
"0.5057185",
"0.5038618",
"0.5004604",
"0.49999434",
"0.4995234",
"0.4983892",
"0.49768654",
"0.4969392",
"0.49633813",
"0.49515453",
"0.4921588",
"0.49136752",
"0.4911741",
"0.49114537"
] |
0.5664861
|
0
|
r""" Compute a lower bound on the quantum value of a nonlocal game [LD07]_. Calculates a lower bound on the maximum value that the specified nonlocal game can take on in quantum mechanical settings where Alice and Bob each have access to `dim`dimensional quantum system. This function works by starting with a randomlygenerated POVM for Bob, and then optimizing Alice's POVM and the shared entangled state. Then Alice's POVM and the entangled state are fixed, and Bob's POVM is optimized. And so on, back and forth between Alice and Bob until convergence is reached. Note that the algorithm is not guaranteed to obtain the optimal local bound and can get stuck in local minimum values. The alleviate this, the `iter` parameter allows one to run the algorithm some prespecified number of times and keep the highest value obtained. The algorithm is based on the alternating projections algorithm as it can be applied to Bell inequalities as shown in [LD07]_. The alternating projection algorithm has also been referred to as the "seesaw" algorithm as it goes back and forth between the following two
|
def quantum_value_lower_bound(
self,
dim: int = 2,
iters: int = 5,
tol: float = 10e-6,
):
# Get number of inputs and outputs.
_, num_outputs_bob, _, num_inputs_bob = self.pred_mat.shape
best_lower_bound = float("-inf")
for _ in range(iters):
# Generate a set of random POVMs for Bob. These measurements serve
# as a rough starting point for the alternating projection
# algorithm.
bob_tmp = random_povm(dim, num_inputs_bob, num_outputs_bob)
bob_povms = defaultdict(int)
for y_ques in range(num_inputs_bob):
for b_ans in range(num_outputs_bob):
bob_povms[y_ques, b_ans] = bob_tmp[:, :, y_ques, b_ans]
# Run the alternating projection algorithm between the two SDPs.
it_diff = 1
prev_win = -1
best = float("-inf")
while it_diff > tol:
# Optimize over Alice's measurement operators while fixing
# Bob's. If this is the first iteration, then the previously
# randomly generated operators in the outer loop are Bob's.
# Otherwise, Bob's operators come from running the next SDP.
alice_povms, lower_bound = self.__optimize_alice(dim, bob_povms)
bob_povms, lower_bound = self.__optimize_bob(dim, alice_povms)
it_diff = lower_bound - prev_win
prev_win = lower_bound
# As the SDPs keep alternating, check if the winning probability
# becomes any higher. If so, replace with new best.
best = max(best, lower_bound)
best_lower_bound = max(best, best_lower_bound)
return best_lower_bound
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def minimaxLocalSearch(gamestate, depth, timeTotal, alpha, beta, maxEntity):\n bonus = 0\n isTerminalState = gamestate.board.checkTerminalState(gamestate.currentPlayer.noPlayer)\n # Basis Rekursif\n if ((depth == 0) or (time.time() > timeTotal) or (isTerminalState)):\n if (isTerminalState) and (gamestate.currentPlayer.noPlayer == maxEntity):\n bonus = 10\n elif (isTerminalState) and (gamestate.currentPlayer.noPlayer != maxEntity):\n bonus = -10\n return gamestate, U_Function(gamestate.currentPlayer, gamestate.oppositePlayer, gamestate.board.size, maxEntity) + bonus\n\n # Rekurens\n if (gamestate.currentPlayer.noPlayer == maxEntity):\n # Choose the maximum utility of the state\n # Iterate all pion and its possible moves\n maxGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n maxValue = -math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n # Choose the best move from local search heuristic\n if (len(all_possible_moves) > 0):\n move = getBestMove(all_possible_moves, gamestate)\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n \n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimaxLocalSearch(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old max value\n if (utility > maxValue):\n maxValue = utility\n maxGameState = newGameState\n \n alpha = max(alpha, maxValue)\n if (beta <= alpha):\n return maxGameState, maxValue\n\n return maxGameState, maxValue\n\n else:\n # Choose the minimum utility of the state\n minGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n minValue = math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n if (len(all_possible_moves) > 0):\n # Choose the best move from local search heuristic\n move = getBestMove(all_possible_moves, gamestate)\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n\n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimaxLocalSearch(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old min value\n if (utility < minValue):\n minValue = utility\n minGameState = newGameState\n \n beta = min(beta, minValue)\n if (beta <= alpha):\n return minGameState, minValue\n \n return minGameState, minValue",
"def localMin0(R, L, W):\n fo = costFunction(R, W)\n vacantL = vacantPoint(L)\n beta = None\n q = None\n\n while True:\n fmin = fo\n\n for alpha in range(0, len(R)):\n for p in range(0, len(vacantL)):\n TxpR = transpositionMatrix(R, vacantL, alpha, p)\n ftrial = costFunction(TxpR, W)\n if ftrial < fmin:\n fmin = ftrial\n beta = alpha\n q = p\n\n if (beta != None) and (q != None):\n TaqR = transpositionMatrix(R, vacantL, beta, q)\n vacantL[q] = R[beta].copy()\n R = TaqR.copy()\n beta = None\n q = None\n\n if fmin <= fo:\n return fmin, R",
"def get_GP_optimum(obj):\n\n # Define space\n space = Design_space(obj.domain, obj.constraints)\n bounds = space.get_bounds()\n\n # Get function to optimize + gradients\n # Also mask by everything that is allowed by the constraints\n # fun = lambda d: fun_dfun(obj, space, d)[0]\n # f_df = lambda d: fun_dfun(obj, space, d)\n # def fun(d):\n # return fun_dfun(obj, space, d)[0]\n # Specify Optimizer --- L-BFGS\n optimizer = OptLbfgs(space.get_bounds(), maxiter=1000)\n\n # Do the optimisation\n x, _ = optimizer.optimize(\n x0=obj.x_opt,\n f=lambda d: fun_dfun(obj, space, d)[0],\n f_df=lambda d: fun_dfun(obj, space, d))\n # TODO: MULTIPLE RE-STARTS FROM PREVIOUS BEST POINTS\n\n # Round values if space is discrete\n xtest = space.round_optimum(x)[0]\n\n if space.indicator_constraints(xtest):\n opt = xtest\n else:\n # Rounding mixed things up, so need to look at neighbours\n\n # Compute neighbours to optimum\n idx_comb = np.array(\n list(itertools.product([-1, 0, 1], repeat=len(bounds))))\n opt_combs = idx_comb + xtest\n\n # Evaluate\n GP_evals = list()\n combs = list()\n for idx, d in enumerate(opt_combs):\n\n cons_check = space.indicator_constraints(d)[0][0]\n bounds_check = indicator_boundaries(bounds, d)[0][0]\n\n if cons_check * bounds_check == 1:\n pred = obj.model.predict(d)[0][0][0]\n GP_evals.append(pred)\n combs.append(d)\n else:\n pass\n\n idx_opt = np.where(GP_evals == np.min(GP_evals))[0][0]\n opt = combs[idx_opt]\n\n return opt",
"def minimize(self, grid):\n self.deep += 1\n cells = grid.getAvailableCells()\n if cells == [] or self.deep > self.maxDeep:\n self.deep -= 1\n return self.evaluate(grid)\n\n ab_value = MiniMaxAlgorithm.infinity\n for cell in cells:\n for cell_value in self.possibleNewTiles:\n next_grid = grid.clone()\n next_grid.setCellValue(cell, cell_value)\n next_value = self.maximize(next_grid)\n ab_value = min(ab_value, next_value)\n if ab_value <= next_value:\n self.deep -= 1\n return ab_value\n\n self.deep -= 1\n return ab_value",
"def trust_region_solver(M, g, d_max, max_iter=2000, stepsize=1.0e-3):\n x = g / np.linalg.norm(g) * d_max\n for _ in range(max_iter):\n # gradient ascent\n x = x + stepsize * (M @ x + g)\n # projection to sphere\n x = x / np.linalg.norm(x) * d_max\n ## debug\n #loss = 0.5 * x.T @ M @ x + g.T @ x\n #print(f'Loss: {loss}')\n return x",
"def calculate_minimal_vector_bound(self, large_constant, LLL_matrix, GS_matrix, prec=100):\n R = RealField(prec)\n n = len(self.constants.primes) + 1\n\n # First, setup the vector vy.\n vy = [0 for _ in range(n)]\n eta_0 = R(self.constants.w * sqrt(self.constants.delta) / self.constants.a)\n vy[-1] = -R(large_constant * log(eta_0)).floor() \n vy = vector(ZZ, vy)\n print('vec',vy)\n # Second, calculate the constants needed.\n sigma = self.calculate_sigma(LLL_matrix, vy, prec)\n print('sigma',sigma)\n c2 = max([LLL_matrix.column(0).norm()**2 / GS_matrix.column(i).norm()**2 for i in range(n)]) \n # Lastly, calculate the lower-bound.\n print('c2',c2)\n minimal_vector_bound = (1 / c2) * sigma * LLL_matrix.column(0).norm()**2\n print('c1',minimal_vector_bound)\n return minimal_vector_bound",
"def prob2():\n # Define the function to be optimized and the initial condition.\n def multimin(x):\n r = np.sqrt((x[0]+1)**2 + x[1]**2)\n return r**2 *(1+ np.sin(4*r)**2)\n x0 = np.array([-2, -2])\n \n info = {}\n info[.5] = opt.basinhopping(multimin, x0, stepsize=0.5,\n minimizer_kwargs={'method':'nelder-mead'})\n info[.2] = opt.basinhopping(multimin, x0, stepsize=0.2,\n minimizer_kwargs={'method':'nelder-mead'})\n\n # Print the results.\n for step in info:\n print(\"Stepsize:\\t{}\\nMinimum:\\t{}\\n\".format(step, info[step].fun))\n\n # Answer the problem question and return the minimum value.\n print(\"0.2 is too small a stepsize to escape the basin of a local min.\")\n return info[.2].fun",
"def minimax(state, depth, player):\r\n if player == COMP:\r\n best = [-1, -1, -infinity]\r\n else:\r\n best = [-1, -1, +infinity]\r\n\r\n if depth == 0 or game_over(state):\r\n score = evaluate(state)\r\n return [-1, -1, score]\r\n\r\n for cell in empty_cells(state):\r\n x, y = cell[0], cell[1]\r\n state[x][y] = player\r\n score = minimax(state, depth - 1, -player)\r\n state[x][y] = 0\r\n score[0], score[1] = x, y\r\n\r\n if player == COMP:\r\n if score[2] > best[2]:\r\n best = score # max value\r\n else:\r\n if score[2] < best[2]:\r\n best = score # min value\r\n\r\n return best",
"def test_v_bounds(self):\n n = 50\n t_max = 100.0\n dt = 0.1\n\n G = StudentLayer(n)\n G.i_ext_init = np.linspace(-1.0, 1.0, n)\n\n class BoundsChecker(object):\n def __init__(self, target):\n self.target = target\n self.small = None\n self.large = None\n self.order = 1\n\n def evolve(self, t, dt):\n small = np.min(self.target.v)\n large = np.max(self.target.v)\n if self.small is None or self.small > small:\n self.small = small\n if self.large is None or self.large < large:\n self.large = large\n \n M = BoundsChecker(G)\n\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n self.assertLess(M.large, G.v_th)",
"def minimum_spanning_arborescence(sol):",
"def findHeuristic(self, _, __):\n popSize = 100\n retain = 0.25\n random_select = 0.1\n mutate = 0.1\n\n popList = self.populationList(popSize)\n\n solved = False\n count = 0\n while not solved:\n # evolves current\n popList = (self.evolve(popList, retain, random_select, mutate))\n# print(popList) # for troubleshooting\n for i in popList:\n if (self.fitness(i) == 0):\n print(\"solution: \", i)\n solved = True\n break\n # if plateus at a local minima, then end after 50 generations\n if count >= 50:\n if (self.fitness(i) <= 10):\n print(\"solution: \", i)\n solved = True\n break\n if solved is True:\n break\n print(\"-----------------\")\n\n # will modify mutation, random_select and retain values to help leave a\n # local minima. More randomness the longer it takes up to specific points\n if count % 3 == 0:\n if mutate < 0.2:\n mutate += 0.01\n if random_select < 0.3:\n random_select += 0.01\n count += 1\n\n return exit(0)",
"def local_soft_argmin(cost_volume, sigma):\n # type: (torch.Tensor, int) -> torch.Tensor\n if cost_volume.dim() != 4:\n raise ValueError('expected 4D input (got {}D input)'\n .format(cost_volume.dim()))\n\n if not isinstance(sigma, int):\n raise TypeError('argument \\'sigma\\' must be int, not {}'.format(type(sigma)))\n\n # grab max disparity\n max_disp = cost_volume.shape[1]\n N = cost_volume.size()[0]\n H = cost_volume.size()[2]\n W = cost_volume.size()[3]\n\n # d':|d'-d|<=sigma, d' = argmax( P(d) for d in 1:maxDisp ), (BatchSize, 1, Height, Width)\n index = torch.argmax(cost_volume, dim=1, keepdim=True)\n interval = torch.linspace(-sigma, sigma, 2 * sigma + 1).type_as(index).to(cost_volume.device)\n interval = interval.repeat(N, H, W, 1).permute(0, 3, 1, 2).contiguous()\n # (BatchSize, 2*sigma+1, Height, Width)\n index_group = (index + interval)\n\n # get mask in [0, max_disp)\n mask = ((index_group >= 0) & (index_group < max_disp)).detach().type_as(cost_volume)\n index_group = index_group.clamp(0, max_disp - 1)\n\n # gather values in the index_group\n disp_map = torch.gather(cost_volume, dim=1, index=index_group)\n\n # convert index_group from torch.LongTensor to torch.FloatTensor\n index_group = index_group.type_as(cost_volume)\n\n # d * P(d), and mask out index out of [0, max_disp), (BatchSize, 1, Height, Width)\n # if index in [0, max_disp), keep the original disparity value, otherwise -10000.0, as e(-10000.0) approximate 0.0\n disp_map = F.softmax((disp_map * mask + (1 - mask) * (-10000.0)), dim=1)\n disp_map = (disp_map * index_group).sum(dim=1, keepdim=True)\n\n return disp_map",
"def minimax(state, depth, player):\n\n if player == COMP:\n best = [-1, -1, inf]\n else:\n best = [-1, -1, -inf]\n\n if depth == 0 or game_over(state):\n score = evaluate(state)\n return [-1, -1, score]\n\n for cell in empty_cells(state):\n x, y = cell[0], cell[1]\n state[x][y] = player\n score = minimax(state, depth - 1, -player)\n state[x][y] = 0\n score[0], score[1] = x, y\n\n if player == COMP:\n if score[2] < best[2]:\n best = score\n else:\n if score[2] > best[2]:\n best = score\n\n return best",
"def best_allowed(self, base):\n x = base.copy()\n var_opt = None\n dim = x.shape[0]\n for i in range(dim):\n # Plus increment\n x[i] += self.step\n curr_obj = self.obj_wrap(x)\n # Check update feasible, obj improved\n # new point in STM, before accepting\n if (curr_obj and \n not np.isclose(x.T, self.STM).all(axis=1).any()):\n if var_opt is None:\n var_opt = (i, self.step, curr_obj)\n elif var_opt[2] > curr_obj:\n var_opt = (i, self.step, curr_obj)\n\n \n # Minus increment\n x[i] -= 2 * self.step\n curr_obj = self.obj_wrap(x)\n # Check update feasible, obj improved\n # new point in STM, before accepting\n if (curr_obj and \n not np.isclose(x.T, self.STM).all(axis=1).any()):\n if var_opt is None:\n var_opt = (i, -self.step, curr_obj)\n elif var_opt[2] > curr_obj:\n var_opt = (i, -self.step, curr_obj)\n \n # Restore to original value\n x[i] += self.step\n \n if var_opt:\n x[var_opt[0]] += var_opt[1]\n return x, var_opt[2]\n else:\n return None",
"def main(N,N_p,T,lb,ub,prob,N_vars,F_min,F_const,P_c_min,P_c_max):\n\n lb,ub,f,fu,D,U,P = initDE(N_p,lb,ub,prob)\n if N_p < 4:\n raise Exception(\"Sorry, there must be atleast a population of 4. Reccomended 20\")\n for t in np.arange(T):\n for i in np.arange(N_p):\n V = mutation(i,N_p,t,T,P,N_vars,F_min,F_const)\n\n U=crossover(f,P_c_min,P_c_max,i,D,V,P,U)\n\n for j in np.arange(N_p): \n N,f,P = boundgreed(N,j,U,P,f,fu,ub,lb,prob)\n\t\n\t\t#if N == 500:\n\t\t\t#break\n best_of_f= min(f)\n globopt = P[f.argmin()]\n return N,best_of_f, globopt[:N_vars]",
"def minimize(self, evaluate, constrainToLower=False, constrainToUpper=False):\n improved = array([0,0,0])\n #------------------------------------------------\n for index, member in enumerate(self.population):\n #------------------------------------------------\n source = self.population[randrange(len(self.population))]\n x = member.copyAndModify(self.maxMutations, self.scale, source, self.maxIndexes)\n if constrainToLower:\n x = maximum(self.lowerDomain, x)\n if constrainToUpper:\n x = minimum(self.upperDomain, x)\n #------------------------------------------------\n loss = evaluate(x)\n #------------------------------------------------\n if index == self.diversityIndex:\n self.diversity.update(x, loss)\n self.diversityLoss = loss\n #------------------------------------------------\n if loss < self.eliteLoss:\n member.update(x, loss)\n self.eliteIndex = index\n self.eliteLoss = loss\n improved[0] += 1\n else:\n slot = randrange(len(self.population))\n slotMember = self.population[slot]\n if (slot != self.diversityIndex) and (loss <= slotMember.loss):\n # --------------------------------------------------\n slotMember.update(x, loss)\n improved[1] += 1\n # --------------------------------------------------\n elif (index != self.diversityIndex) and (loss <= member.loss):\n # --------------------------------------------------\n member.update(x, loss)\n improved[2] += 1\n # --------------------------------------------------\n #------------------------------------------------\n # --------------------------------------------------\n # reduce the scale if there were less than 'self.minImprovements' \n # improved members in the population.\n if sum(improved) < self.minImprovements:\n self.scale *= self.gamma\n # --------------------------------------------------\n self.improvements += improved",
"def optimize(self, maxiter):\n for iteration in range(maxiter):\n self.sortParticles()\n self.phi = int(phiMin + iteration *((phiMax - phiMin) / float(maxiter)))\n self.cluster()\n #self.ConnectClusters()\n for i in range(self.n_particles):\n x = self.particles_pos[i]\n v = self.velocities[i]\n p_best = self.p_best[i]\n self.velocities[i] = self.update_velocity(x, v, p_best , self.g_best , self.getLbestOfCluster(self.getClusterOfParticle(i)) , i)\n self.particles_pos[i] = self.update_position(x, v)\n # Update the best position for particle i\n if self.func(self.particles_pos[i]) < self.func(p_best):\n self.p_best[i] = self.particles_pos[i]\n # Update the best position overall\n if self.func(self.particles_pos[i]) < self.func(self.g_best):\n \n self.g_best = self.particles_pos[i]\n return self.g_best, self.func(self.g_best)",
"def solve(self):\n # Use a trivial tour (1-2-3-...-N-1) to set the global upper bound.\n tour = list(range(self._N))\n upper_bound = sum([self._G[i][(i + 1) % self._N] for i in range(self._N)])\n trace = []\n\n # Start from a configuration with a single vertex.\n frontier = [BranchAndBoundConfiguration(self._G, self._N, [0], LOWER_BOUND_METHOD)]\n\n # Set the start time.\n start_time = time.time()\n\n # Branch and bound until the frontier set is empty or the time has expired.\n while frontier and (time.time() - start_time) < self._cutoff_time:\n # Fetch the most promising configuration.\n config = heappop(frontier)\n\n # Expand configuration by appending a vertex to the path.\n for v in range(self._N):\n try:\n expanded_config = config.expand(v)\n except ValueError:\n # Expanded configuration is not valid.\n continue\n if expanded_config.is_solution():\n # Update the global upper bound, if needed.\n this_solution = expanded_config.get_cycle_cost()\n if this_solution < upper_bound:\n # Log it.\n trace.append((time.time() - start_time, this_solution))\n # Update the best solution.\n upper_bound = this_solution\n tour = list(expanded_config.get_path())\n elif expanded_config.get_lower_bound() < upper_bound:\n # Add to the frontier set.\n heappush(frontier, expanded_config)\n return (upper_bound, [self._index_to_id[v] for v in tour], trace)",
"def optimize(self, maxiter=200):\n for _ in range(maxiter):\n for i in range(self.n_particles):\n x = self.particles_pos[i]\n v = self.velocities[i]\n p_best = self.p_best[i]\n self.velocities[i] = self.update_velocity(x, v, p_best, self.g_best)\n self.particles_pos[i] = self.update_position(x, v)\n # Update the best position for particle i\n if self.func(self.particles_pos[i]) < self.func(p_best):\n self.p_best[i] = self.particles_pos[i]\n # Update the best position overall\n if self.func(self.particles_pos[i]) < self.func(self.g_best):\n self.g_best = self.particles_pos[i]\n return self.g_best, self.func(self.g_best)",
"def gilpin_ne(A, epsilon=1e-4, max_iter=np.inf):\n A = A.T # make compatible with nesterov_ne\n m, n = A.shape\n R = np.vstack((np.hstack((np.zeros((m, m)), -A)),\n np.hstack((A.T, np.zeros((n, n))))))\n norm_R = linalg.norm(R, 2)\n u_ = (1. / m) * np.ones(m)\n v_ = (1. / n) * np.ones(n)\n x = u_.copy()\n y = v_.copy()\n D = 1. - .5 * (1. / m + 1. / n)\n gamma = np.e\n eps = 1.\n gradx_acc = np.zeros(m)\n grady_acc = np.zeros(n)\n values = []\n gaps = []\n k = 0.\n while k < max_iter and eps >= epsilon:\n mu = eps / (2. * D)\n\n # misc\n L = norm_R ** 2 / mu\n stepsize = 1. / L\n\n gradx_acc *= 0.\n gradx_acc *= 0.\n while True:\n # make call to oracle\n aux1 = A.dot(y)\n aux = -aux1 / mu\n aux += u_\n u = proj_simplex(aux)\n aux2 = A.T.dot(x)\n aux = aux2 / mu\n aux += v_\n v = proj_simplex(aux)\n gradx, grady = A.dot(v), -A.T.dot(u)\n gradx_acc += .5 * (k + 1.) * gradx\n grady_acc += .5 * (k + 1.) * grady\n\n value = x.dot(aux1)\n values.append(value)\n gap = aux2.max() - aux1.min()\n gaps.append(gap)\n assert gap + 1e-10 >= 0., \"The world is a weird place!\"\n print (\"%03i: game value <Ax, u> = %g, primal-dual \"\n \"gap=%g\") % (k + 1, value, gap)\n\n # check convergence\n if gap < eps:\n print \"Converged (primal-dual gap < %g).\" % eps\n break\n\n # y update\n yx = proj_simplex(x - stepsize * gradx)\n yy = proj_simplex(y - stepsize * grady)\n\n # z update\n zx = proj_simplex(u_ - stepsize * gradx_acc)\n zy = proj_simplex(v_ - stepsize * grady_acc)\n\n # x update\n factor = 2. / (k + 3.)\n x = factor * zx\n x += (1. - factor) * yx\n y = factor * zy\n y += (1. - factor) * yy\n\n k += 1\n if k >= max_iter: break\n\n # decrease eps\n eps /= gamma\n print \"Decreasing epsilon to %g\" % eps\n\n return x, y, values, gaps",
"def randomized_range(A, rank, power_iter=2, block_krylov=False, l=0):\n\n Q = np.random.randn(A.shape[1], rank + l)\n K = Q = A.dot(Q)\n\n for i in range(power_iter):\n Q, _ = la.lu(Q, permute_l=True)\n Q, _ = la.lu(A.T.dot(Q), permute_l=True)\n Q = A.dot(Q)\n K = np.hstack((K, Q))\n Q, _ = la.qr(K, mode='economic')\n Q = Q[:, :(rank + l) * (power_iter + 1)]\n\n if block_krylov:\n K = Q\n for i in range(power_iter):\n Q, _ = la.lu(Q, permute_l=True)\n Q, _ = la.lu(A.T.dot(Q), permute_l=True)\n Q = A.dot(Q)\n K = np.hstack((K, Q))\n Q, _ = la.qr(K, mode='economic')\n Q = Q[:, :(rank + l) * (power_iter + 1)]\n else:\n # Power iterations.\n for i in range(power_iter):\n Q, _ = la.lu(Q, permute_l=True)\n Q, _ = la.lu(A.T.dot(Q), permute_l=True)\n Q = A.dot(Q)\n Q, _ = la.qr(Q, mode='economic')\n Q = Q[:, :rank + l]\n return Q",
"def _solve(self, job_server=None):\n\n # loop over generations\n for g in xrange(1,self.max_generations):\n # set the generation\n self.generation = g\n\n # update the population\n self._evolve_population()\n \n # evaluate the population\n self._eval_population(job_server)\n\n # decide what stays\n ind = self.population_errors > self.old_population_errors\n self.population[ind,:] = self.old_population[ind,:]\n self.population_errors[ind] = self.old_population_errors[ind]\n\n # set the index of the best individual\n best_ind = self.population_errors.argmin()\n\n # update what is best\n if self.population_errors[best_ind] < self.best_error:\n self.best_error = self.population_errors[best_ind]\n self.best_individual = numpy.copy(self.population[best_ind,:])\n self.best_generation = self.generation\n\n if self.verbose:\n print \"Best generation: %g\" % (self.best_generation)\n print \"Best Error: %g\" % (self.best_error)\n print \"Best Indiv: \" + str(self.best_individual)\n print\n \n # see if done\n if self.best_error < self.goal_error:\n break\n\n # see if polish with fmin search after the first generation\n if self.polish:\n if self.verbose:\n print \"Polishing best result: %g\" % (self.population_errors[best_ind])\n iprint = 1\n else:\n iprint = -1\n # polish with bounded min search\n polished_individual, polished_error, details = \\\n scipy.optimize.fmin_l_bfgs_b(self.error_func,\n self.population[best_ind,:],\n args=self.args,\n bounds=self.param_ranges,\n approx_grad=True,\n iprint=iprint)\n if self.verbose:\n print \"Polished Result: %g\" % (polished_error)\n print \"Polished Indiv: \" + str(polished_individual)\n if polished_error < self.population_errors[best_ind]:\n # it's better, so keep it\n self.population[best_ind,:] = polished_individual\n self.population_errors[best_ind] = polished_error\n\n # update what is best\n self.best_error = self.population_errors[best_ind]\n self.best_individual = numpy.copy(self.population[best_ind,:])\n \n\n if job_server:\n self.pp_stats = job_server.get_stats()",
"def solve_ilp(self):\n\n ilp_solution = pylp.Solution()\n self.ilp_solver.set_constraints(self.constraints)\n message = self.ilp_solver.solve(ilp_solution)\n print(\"ILP solved with minimal value \" + str(ilp_solution.get_value()) + \" and status \" + message)\n\n solution = lil_matrix(self.graph.shape)\n for i in range(self.num_variables):\n print(\"value of var \" + str(i) + \" is \" + str(ilp_solution.get_vector()[i]))\n if ilp_solution.get_vector()[i] < 0.5:\n continue\n (u,v) = self.var_to_edge[i]\n solution[u,v] = self.graph[u,v] - self.min_cost + 1\n\n return solution",
"def hill_climbing(\n search_prob,\n find_max: bool = True,\n max_x: float = math.inf,\n min_x: float = -math.inf,\n max_y: float = math.inf,\n min_y: float = -math.inf,\n visualization: bool = False,\n max_iter: int = 10000,\n) -> SearchProblem:\n current_state = search_prob\n scores = [] # list to store the current score at each iteration\n iterations = 0\n solution_found = False\n visited = set()\n while not solution_found and iterations < max_iter:\n visited.add(current_state)\n iterations += 1\n current_score = current_state.score()\n scores.append(current_score)\n neighbors = current_state.get_neighbors()\n max_change = -math.inf\n min_change = math.inf\n next_state = None # to hold the next best neighbor\n for neighbor in neighbors:\n if neighbor in visited:\n continue # do not want to visit the same state again\n if (\n neighbor.x > max_x\n or neighbor.x < min_x\n or neighbor.y > max_y\n or neighbor.y < min_y\n ):\n continue # neighbor outside our bounds\n change = neighbor.score() - current_score\n if find_max: # finding max\n # going to direction with greatest ascent\n if change > max_change and change > 0:\n max_change = change\n next_state = neighbor\n else: # finding min\n # to direction with greatest descent\n if change < min_change and change < 0:\n min_change = change\n next_state = neighbor\n if next_state is not None:\n # we found at least one neighbor which improved the current state\n current_state = next_state\n else:\n # since we have no neighbor that improves the solution we stop the search\n solution_found = True\n\n if visualization:\n from matplotlib import pyplot as plt\n\n plt.plot(range(iterations), scores)\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"Function values\")\n plt.show()\n\n return current_state",
"def minimize_cost_golden(f, vmin, offset=0, step=1, maxiter=1000):\n # type: (Callable[[int], float], float, int, int, Optional[int]) -> MinCostResult\n fib2 = fib1 = fib0 = 0\n cur_idx = 0\n nfev = 0\n xmax = vmax = v_prev = None\n while maxiter is None or nfev < maxiter:\n v_cur = f(step * fib0 + offset)\n nfev += 1\n\n if v_cur >= vmin:\n # found upper bound, use binary search to find answer\n stop = step * fib0 + offset\n return minimize_cost_binary(f, vmin, start=step * (fib1 + 1) + offset,\n stop=stop, save=stop, step=step, nfev=nfev)\n else:\n if vmax is not None and v_cur <= vmax:\n if cur_idx <= 3:\n # special case: 0 <= xmax < 3, and we already checked all possibilities, so\n # we know vmax < vmin. There is no solution and just return.\n return MinCostResult(x=None, xmax=step * xmax + offset, vmax=vmax, nfev=nfev)\n else:\n # we found the bracket that encloses maximum, perform golden section search\n a, x, b = fib2, fib1, fib0\n fx = v_prev\n while x > a + 1 or b > x + 1:\n u = a + b - x\n fu = f(step * u + offset)\n nfev += 1\n\n if fu >= fx:\n if u > x:\n a, x = x, u\n fx = fu\n else:\n x, b = u, x\n fx = fu\n\n if fx >= vmin:\n # found upper bound, use binary search to find answer\n stop = step * x + offset\n return minimize_cost_binary(f, vmin, start=step * (a + 1) + offset,\n stop=stop, save=stop, step=step,\n nfev=nfev)\n else:\n if u > x:\n b = u\n else:\n a = u\n\n # golden section search terminated, the maximum is less than vmin\n return MinCostResult(x=None, xmax=step * x + offset, vmax=fx, nfev=nfev)\n else:\n # still not close to maximum, continue searching\n vmax = v_prev = v_cur\n xmax = fib0\n cur_idx += 1\n if cur_idx <= 3:\n fib2, fib1, fib0 = fib1, fib0, cur_idx\n else:\n fib2, fib1, fib0 = fib1, fib0, fib1 + fib0\n\n raise ValueError('Maximum number of iteration achieved')",
"def findStableState(L, boundaryConditions, Minv = None):\n\tn = L.shape[0]\n\tm = len(boundaryConditions)\n\tVb = np.zeros(m)\n\tpositions = {}\n\tfor i in range(m):\n\t\tcondition = boundaryConditions[i]\n\t\tVb[i] = condition[0]\n\t\tpositions[condition[0]] = condition[1]\n\tVb = np.sort(Vb)\n\tBPrime = np.zeros((m, n))\n\tYPrime = np.zeros((m, 3))\n\tfor i in range(m):\n\t\tBPrime[i][int(Vb[i])] = 1\n\t\tYPrime[i] = positions[Vb[i]]\n\n\tif Minv is None:\n\t\tzeroCorner = np.zeros((m, m))\n\t\tM = np.array(np.bmat([[L, -BPrime.T], [BPrime, zeroCorner]]))\n\t\tMinv = np.linalg.inv(M)\n\n\tXT = np.zeros((3, n))\n\t# find x coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[0]\n\tx = np.dot(Minv, y)\n\tXT[0] = x[:n]\n\t# find y coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[1]\n\tx = np.dot(Minv, y)\n\tXT[1] = x[:n]\n\t# find z coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[2]\n\tx = np.dot(Minv, y)\n\tXT[2] = x[:n]\n\n\treturn XT.T",
"def gopt_max(fun, bounds, n_warmup = 1000, n_local = 10):\n x_best, y_best = gopt_min(lambda x: -fun(x), bounds, n_warmup, n_local)\n return x_best, -y_best",
"def global_optimum(self):\r\n x = [random.uniform(0, 1) for _ in range(self.objectives - 1)]\r\n x.extend([0.5 for _ in range(self.dimensions - self.objectives + 1)])\r\n return x",
"def global_optimum(self):\r\n x = [random.uniform(0, 1) for _ in range(self.objectives - 1)]\r\n x.extend([0.5 for _ in range(self.dimensions - self.objectives + 1)])\r\n return x",
"def global_optimum(self):\r\n x = [random.uniform(0, 1) for _ in range(self.objectives - 1)]\r\n x.extend([0.5 for _ in range(self.dimensions - self.objectives + 1)])\r\n return x"
] |
[
"0.57273936",
"0.5695391",
"0.56469387",
"0.5637769",
"0.5607225",
"0.5496913",
"0.54709166",
"0.54532737",
"0.54181194",
"0.5404801",
"0.53838634",
"0.53466105",
"0.532478",
"0.52901936",
"0.52881795",
"0.52795726",
"0.5252258",
"0.5250768",
"0.5213736",
"0.5203388",
"0.5202855",
"0.51943505",
"0.5193815",
"0.5185704",
"0.51690733",
"0.5156425",
"0.51270306",
"0.5121306",
"0.5121306",
"0.5121306"
] |
0.66929513
|
0
|
Compute an upper bound on the commuting measurement value of the nonlocal game. This function calculates an upper bound on the commuting measurement value by using klevels of the NPA hierarchy [NPA]_. The NPA hierarchy is a uniform family of semidefinite programs that converges to the commuting measurement value of any nonlocal game. You can determine the level of the hierarchy by a positive integer or a string of a form like '1+ab+aab', which indicates that an intermediate level of the hierarchy should be used, where this example uses all products of one measurement, all products of one Alice and one Bob measurement, and all products of two Alice and one Bob measurements. References ========== .. [NPA] Miguel Navascues, Stefano Pironio, Antonio Acin, "A convergent hierarchy of semidefinite programs characterizing the set of quantum correlations."
|
def commuting_measurement_value_upper_bound(self, k: int | str = 1) -> float:
alice_out, bob_out, alice_in, bob_in = self.pred_mat.shape
mat = defaultdict(cvxpy.Variable)
for x_in in range(alice_in):
for y_in in range(bob_in):
mat[x_in, y_in] = cvxpy.Variable(
(alice_out, bob_out), name=f"M(a, b | {x_in}, {y_in})"
)
p_win = cvxpy.Constant(0)
for a_out in range(alice_out):
for b_out in range(bob_out):
for x_in in range(alice_in):
for y_in in range(bob_in):
p_win += (
self.prob_mat[x_in, y_in]
* self.pred_mat[a_out, b_out, x_in, y_in]
* mat[x_in, y_in][a_out, b_out]
)
npa = npa_constraints(mat, k)
objective = cvxpy.Maximize(p_win)
problem = cvxpy.Problem(objective, npa)
cs_val = problem.solve()
return cs_val
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def max_pp(level):\n base_pp = 6\n level_pp = 2 * level\n return base_pp + (level_pp - 2)",
"def calc_level(xp, dominion):\n if xp < 3:\n xp_potential = 1\n if xp >= 3 and xp < 6:\n xp_potential = 2\n if xp >= 6 and xp < 12:\n xp_potential = 3\n if xp >= 12 and xp < 24:\n xp_potential = 4\n if xp >= 24 and xp < 48:\n xp_potential = 5\n if xp >= 48 and xp < 72:\n xp_potential = 6\n if xp >= 72 and xp < 96:\n xp_potential = 7\n if xp >= 96 and xp < 130:\n xp_potential = 8\n if xp >= 130 and xp < 170:\n xp_potential = 9\n if xp >= 170:\n xp_potential = 10\n if dominion < 2:\n dom_potential = 1\n if dominion >= 2 and dominion < 4:\n dom_potential = 2\n if dominion >= 4 and dominion < 10:\n dom_potential = 3\n if dominion >= 10 and dominion < 22:\n dom_potential = 4\n if dominion >= 22 and dominion < 38:\n dom_potential = 5\n if dominion >= 38 and dominion < 57:\n dom_potential = 6\n if dominion >= 57 and dominion < 76:\n dom_potential = 7\n if dominion >= 76 and dominion < 95:\n dom_potential = 8\n if dominion >= 95 and dominion < 124:\n dom_potential = 9\n if dominion >= 124:\n dom_potential = 10\n return min(xp_potential, dom_potential)",
"def calculate_upper_boundary(self, divisor):\n\n # see how high you can go\n quotas = [0] * self.states\n fair_shares = [0] * self.states\n counter = 0\n highest_divisor = 0\n prev_divisor = 0\n estimator = 1000000000\n while counter < 1000:\n for i, population in enumerate(self.populations):\n if divisor is None:\n return None\n quotas[i] = population / divisor\n fair_shares[i] = math.floor(quotas[i])\n if sum(fair_shares) != self.num_seats:\n estimator = estimator / 10\n prev_divisor = divisor\n divisor = highest_divisor + estimator\n else:\n highest_divisor = divisor\n divisor = prev_divisor + estimator\n if highest_divisor == divisor:\n break\n counter += 1\n return math.floor(highest_divisor * 1000) / 1000",
"def maxlevel(self, N, c=1):\n return int(np.floor(np.log(N/self.nfreq/c)/np.log(self.scaling))) + 1",
"def level_k_policy_player_2_func(self, y_max=1747, iteration=1, gp_samples=None):\n if self.reasoning_level_player_2 % 2 == 0: \n # if the reasoning level is an even number, we need to start by calculating the level-1 policy of the opponent (Agent 1)\n k = 2\n \n x_1, _ = self.level_1_policy_player_1_func(iteration=iteration)\n \n #### Agent 2 best-responds\n x_1 = np.squeeze(x_1)\n rep = np.tile(x_1, (self.sub_domain_player_2.shape[0], 1))\n sub_domain = np.concatenate((rep, self.sub_domain_player_1), axis=1)\n para_dict = {\"gp\":self.gp_2, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_2 = self.sub_domain_player_2[np.argmax(all_ucb)]\n x_2 = x_2.reshape(1, -1)\n \n while self.reasoning_level_player_2 > k:\n #### Agent 1 best-responds\n x_2 = np.squeeze(x_2)\n rep = np.tile(x_2, (self.sub_domain_player_1.shape[0], 1))\n sub_domain = np.concatenate((self.sub_domain_player_1, rep), axis=1)\n para_dict = {\"gp\":self.gp_1, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_1 = self.sub_domain_player_1[np.argmax(all_ucb)]\n x_1 = x_1.reshape(1, -1)\n\n #### Agent 2 best-responds\n x_1 = np.squeeze(x_1)\n rep = np.tile(x_1, (self.sub_domain_player_2.shape[0], 1))\n sub_domain = np.concatenate((rep, self.sub_domain_player_2), axis=1)\n para_dict = {\"gp\":self.gp_2, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_2 = self.sub_domain_player_2[np.argmax(all_ucb)]\n x_2 = x_2.reshape(1, -1)\n \n k += 2\n\n else: \n # if the reasoning level is an odd number, we need to start by calculating the level-1 policy of Agent 2\n k = 3\n \n x_2, _ = self.level_1_policy_player_2_func(iteration=iteration)\n \n #### Agent 1 best-responds\n x_2 = np.squeeze(x_2)\n rep = np.tile(x_2, (self.sub_domain_player_1.shape[0], 1))\n sub_domain = np.concatenate((self.sub_domain_player_1, rep), axis=1)\n para_dict = {\"gp\":self.gp_1, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_1 = self.sub_domain_player_1[np.argmax(all_ucb)]\n x_1 = x_1.reshape(1, -1)\n\n #### Agent 2 best-responds\n x_1 = np.squeeze(x_1)\n rep = np.tile(x_1, (self.sub_domain_player_2.shape[0], 1))\n sub_domain = np.concatenate((rep, self.sub_domain_player_2), axis=1)\n para_dict = {\"gp\":self.gp_2, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_2 = self.sub_domain_player_2[np.argmax(all_ucb)]\n x_2 = x_2.reshape(1, -1)\n \n while self.reasoning_level_player_2 > k:\n #### Agent 1 best-responds\n x_2 = np.squeeze(x_2)\n rep = np.tile(x_2, (self.sub_domain_player_1.shape[0], 1))\n sub_domain = np.concatenate((self.sub_domain_player_1, rep), axis=1)\n para_dict = {\"gp\":self.gp_1, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_1 = self.sub_domain_player_1[np.argmax(all_ucb)]\n x_1 = x_1.reshape(1, -1)\n\n #### Agent 2 best-responds\n x_1 = np.squeeze(x_1)\n rep = np.tile(x_1, (self.sub_domain_player_2.shape[0], 1))\n sub_domain = np.concatenate((rep, self.sub_domain_player_2), axis=1)\n para_dict = {\"gp\":self.gp_2, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_2 = self.sub_domain_player_2[np.argmax(all_ucb)]\n x_2 = x_2.reshape(1, -1)\n \n k += 2\n\n return x_2, all_ucb",
"def level_k_policy_player_1_func(self, y_max=1747, iteration=1, gp_samples=None):\n\n if self.reasoning_level_player_1 % 2 == 0: \n # if the reasoning level is an even number, we need to start by calculating the level-1 policy of the opponent (Agent 2)\n k = 2\n \n x_2, _ = self.level_1_policy_player_2_func(iteration=iteration)\n\n #### Agent 1 best-responds\n x_2 = np.squeeze(x_2)\n rep = np.tile(x_2, (self.sub_domain_player_1.shape[0], 1))\n sub_domain = np.concatenate((self.sub_domain_player_1, rep), axis=1)\n para_dict = {\"gp\":self.gp_1, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_1 = self.sub_domain_player_1[np.argmax(all_ucb)]\n x_1 = x_1.reshape(1, -1)\n\n while self.reasoning_level_player_1 > k:\n #### Agent 2 best-responds\n x_1 = np.squeeze(x_1)\n rep = np.tile(x_1, (self.sub_domain_player_2.shape[0], 1))\n sub_domain = np.concatenate((rep, self.sub_domain_player_2), axis=1)\n para_dict = {\"gp\":self.gp_2, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_2 = self.sub_domain_player_2[np.argmax(all_ucb)]\n x_2 = x_2.reshape(1, -1)\n\n #### Agent 1 best-responds\n x_2 = np.squeeze(x_2)\n rep = np.tile(x_2, (self.sub_domain_player_1.shape[0], 1))\n sub_domain = np.concatenate((self.sub_domain_player_1, rep), axis=1)\n para_dict = {\"gp\":self.gp_1, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_1 = self.sub_domain_player_1[np.argmax(all_ucb)]\n x_1 = x_1.reshape(1, -1)\n \n k += 2\n\n else:\n # if the reasoning level is an odd number, we need to start by calculating the level-1 policy of Agent 1\n k = 3\n\n x_1, _ = self.level_1_policy_player_1_func(iteration=iteration)\n \n #### Agent 2 best-responds\n x_1 = np.squeeze(x_1)\n rep = np.tile(x_1, (self.sub_domain_player_2.shape[0], 1))\n sub_domain = np.concatenate((rep, self.sub_domain_player_2), axis=1)\n para_dict = {\"gp\":self.gp_2, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_2 = self.sub_domain_player_2[np.argmax(all_ucb)]\n x_2 = x_2.reshape(1, -1)\n\n #### Agent 1 best-responds\n x_2 = np.squeeze(x_2)\n rep = np.tile(x_2, (self.sub_domain_player_1.shape[0], 1))\n sub_domain = np.concatenate((self.sub_domain_player_1, rep), axis=1)\n para_dict = {\"gp\":self.gp_1, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_1 = self.sub_domain_player_1[np.argmax(all_ucb)]\n x_1 = x_1.reshape(1, -1)\n \n while self.reasoning_level_player_1 > k:\n #### Agent 2 best-responds\n x_1 = np.squeeze(x_1)\n rep = np.tile(x_1, (self.sub_domain_player_2.shape[0], 1))\n sub_domain = np.concatenate((rep, self.sub_domain_player_2), axis=1)\n para_dict = {\"gp\":self.gp_2, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_2 = self.sub_domain_player_2[np.argmax(all_ucb)]\n x_2 = x_2.reshape(1, -1)\n\n #### Agent 1 best-responds\n x_2 = np.squeeze(x_2)\n rep = np.tile(x_2, (self.sub_domain_player_1.shape[0], 1))\n sub_domain = np.concatenate((self.sub_domain_player_1, rep), axis=1)\n para_dict = {\"gp\":self.gp_1, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_1 = self.sub_domain_player_1[np.argmax(all_ucb)]\n x_1 = x_1.reshape(1, -1)\n \n k += 2\n\n return x_1, all_ucb",
"def MICRO_upper_bound(reported, Wp, Lp, Sw=None, Sl=None):\n if not Sw:\n Sw = {w: 0 for w in Wp}\n\n if not Sl:\n Sl = {l: 0 for l in Lp}\n\n u = 0\n for w in Wp:\n for l in Lp:\n if w != l:\n curr_u = (d(Sl[l]) + d(Sw[w])) / (d(Sl[l]) * reported[w] - d(Sw[w]) * reported[l])\n u = max(u, curr_u)\n\n return u",
"def lvl_algo(next_level):\n total_xp_needed = (next_level * next_level)\n return total_xp_needed",
"def growth_factor_CDM_baryons_neutrinos(self, k, z):\n return self.growth_cbnu_unnormalized(k, z)/self.growth_cbnu_unnormalized(k, 0.)",
"def growth_cbnu_unnormalized(self, k, z):\n LCDM = self.growth_factor_scale_independent(z)\n \n # Same of LCDM if no massive neutrinos\n if self.M_nu_tot == 0.:\n LCDM = np.array([LCDM for i in range(len(np.atleast_1d(k)))])\n return np.transpose(LCDM)\n else:\n K, Z = np.meshgrid(k,z)\n f_cb = self.f_cb\n f_nu = np.sum(np.atleast_1d(self.f_nu))\n\n # Normalize at z initial\n LCDM = np.transpose([LCDM for i in range(len(np.atleast_1d(k)))])/self.growth_factor_scale_independent(self.z_drag_EH())\n\n # exponent\n p_cb = 1./4.*(5.-np.sqrt(1.+24.*f_cb))\n\n # Growth rate for TOTAL MATTER (cdm+b+nu) for nuLCDM Universe (approx!)\n growth_cbnu = (f_cb**(0.7/p_cb) + (LCDM/(1. + self.y_fs(K)))**0.7)**(p_cb/0.7)*LCDM**(1.-p_cb)\n\n return growth_cbnu",
"def calc_max_level(num_point):\n return int(numpy.ceil(numpy.log2(num_point)))",
"def maxoccupancyscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n occupancy_list = []\n pwm_dictionary_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(len(seq) - 1):\n occupancy = 1\n occupancy_rc = 1\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq):\n occupancy *= 0.25\n occupancy_rc *= 0.25\n elif seq[j + i] not in [\"A\", \"C\", \"G\", \"T\"]:\n occupancy *= 0.25\n occupancy_rc *= 0.25\n else:\n occupancy *= pwm_dictionary[seq[j + i]][j]\n occupancy_rc *= pwm_dictionary_rc[seq[j + i]][j]\n occupancy_list.append(occupancy)\n occupancy_list.append(occupancy_rc)\n max_occupancy = max(occupancy_list)\n return max_occupancy",
"def u_crit(state, sys):\n s = state[0]\n i = state[1]\n tau = scipy.interpolate.interp1d(sys.tau.s, sys.tau.i, kind = \"cubic\")\n phi = scipy.interpolate.interp1d(sys.phi.s, sys.phi.i, kind = \"cubic\")\n cc = scipy.interpolate.interp1d(sys.commutation_curve[0],\n sys.commutation_curve[1],\n kind = \"cubic\")\n if i > sys.imax:\n return sys.umax\n if s <= sys.commutation_curve[0][-1]:\n #print(\"Case 1\")\n if s < sys.sbar or i < tau(s):\n return 0\n return sys.umax\n elif s > sys.commutation_curve[0][-1] and s < sys.commutation_curve[0][0]:\n #print(\"Case 2\")\n if ((i > tau(s)) and (i < cc(s))) or (i > sys.imax):\n return sys.umax\n elif i > cc(s) and i < sys.imax:\n return 0\n else:\n return 0\n else:\n #print(\"Case 3\")\n if i > sys.imax:\n return sys.umax\n elif s > sys.sstar and i > phi(s):\n return sys.umax\n return 0",
"def upper_bound(self) -> float:\n ...",
"def _compute_kl(self, lvl):\n kl = [] # kernal length\n for n in range(lvl):\n fct = self.scaling**n # up-sampling factor\n kl.append(fct*(self.nfreq-1)+1)\n kl.append(kl[-1]) # repeat the value of the coarsest scale for the approximation coefficient\n return kl[::-1]",
"def max_level(board):\n acc_board = accum_board(board)\n for row in acc_board:\n row.append(0)\n acc_board.append([0]*len(acc_board[0]))\n m, n = len(board), len(board[0])\n max_level_sum = float('-inf')\n top_left = None\n for i in range(m):\n for j in range(n):\n for k in range(min(m-i, n-j)):\n level = (acc_board[i+k][j+k] +\n acc_board[i-1][j-1] -\n acc_board[i-1][j+k] -\n acc_board[i+k][j-1])\n if level > max_level_sum:\n max_level_sum = level\n top_left = (j+1, i+1, k+1)\n return top_left",
"def limited_information_privacy_approximate_upper_lb(P0: np.ndarray,\r\n P1: np.ndarray):\r\n P0, P1 = sanity_check_probabilities(P0, P1)\r\n na = P0.shape[0]\r\n ns = P1.shape[1]\r\n gamma = cp.Variable(1, nonneg=True)\r\n pi0 = cp.Variable((ns, na), nonneg=True)\r\n pi1 = cp.Variable((ns, na), nonneg=True)\r\n\r\n constraint = []\r\n constraint_pi0 = [cp.sum(pi0[s, :]) == 1 for s in range(ns)]\r\n constraint_pi1 = [cp.sum(pi1[s, :]) == 1 for s in range(ns)]\r\n for s in range(ns):\r\n Ds = 0.\r\n for y in range(ns):\r\n P1_pi1 = P1[:, s, y] @ pi1[s, :]\r\n P0_pi0 = P0[:, s, y] @ pi0[s, :]\r\n Ds += cp.kl_div(P1_pi1, P0_pi0) + P1_pi1 - P0_pi0\r\n constraint += [Ds <= gamma]\r\n\r\n constraints = constraint + constraint_pi0 + constraint_pi1\r\n problem = cp.Problem(cp.Minimize(gamma), constraints)\r\n\r\n result = problem.solve()\r\n return result, pi0.value, pi1.value",
"def compute_corollary_7a_gibbs_bound(empirical_gibbs_risk, m, N, KLQP, delta=0.05):\n m_over_N = float(m)/N \n complexity_term = 3 * log(m) * sqrt( m*(1.-m_over_N) ) \n return compute_general_transductive_gibbs_bound(new_transductive_divergence, empirical_gibbs_risk, m, N, KLQP, delta, complexity_term)",
"def langmuir_occ(p, k):\n\n intermediate = k * p\n\n occupancy = intermediate / (intermediate + 1)\n\n return occupancy",
"def maximum_level(self, question_type):\n\t\treturn 2",
"def get_level(k):\r\n return int(log2(k))",
"def max_self_consumption(parameter, ppv, pl, pvmod=True, ideal=False):\r\n # Maximize self consumption for AC-coupled systems\r\n if parameter['Top'] == 'AC':\r\n\r\n # DC power output of the PV generator\r\n if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp \r\n if ideal:\r\n Ppv = np.maximum(0, ppv ) * parameter['P_PV'] * 1000\r\n else:\r\n Ppv = np.minimum(ppv * parameter['P_PV'], parameter['P_PV2AC_in']) * 1000\r\n else: # ppv: DC power output of the PV generator in W\r\n \r\n if ideal:\r\n Ppv = np.maximum(0, ppv)\r\n else:\r\n Ppv = np.minimum(ppv, parameter['P_PV2AC_in'] * 1000)\r\n\r\n\r\n # Normalized input power of the PV inverter\r\n ppvinvin = Ppv / parameter['P_PV2AC_in'] / 1000\r\n\r\n # AC power output of the PV inverter taking into account the conversion losses and maximum\r\n # output power of the PV inverter\r\n Ppvs = np.minimum(np.maximum(0, Ppv-(parameter['PV2AC_a_in'] * ppvinvin * ppvinvin + parameter['PV2AC_b_in'] * ppvinvin + parameter['PV2AC_c_in'])), parameter['P_PV2AC_out'] * 1000)\r\n\r\n # 3.2 Residual power\r\n\r\n # Additional power consumption of other system components (e.g. AC power meter) in W\r\n Pperi = np.ones_like(ppv) * parameter['P_PERI_AC']\r\n\r\n # Adding the standby consumption of the PV inverter in times without any AC power output of the PV system\r\n # to the additional power consumption\r\n Pperi[Ppvs == 0] += parameter['P_PVINV_AC']\r\n\r\n # Residual power\r\n if ideal:\r\n Pr = Ppv - pl\r\n else:\r\n Pr = Ppvs - pl - Pperi\r\n\r\n return Pr, Ppv, Ppvs, Pperi\r\n\r\n # Maximize self consumption for DC-coupled systems\r\n elif parameter['Top'] == 'DC':\r\n # Initialization and preallocation\r\n Ppv2ac_in_ac = np.zeros_like(ppv)\r\n Ppv = np.empty_like(ppv) # DC power output of the PV generator\r\n\r\n if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp\r\n Ppv = ppv * parameter['P_PV'] * 1000\r\n else:\r\n Ppv = ppv\r\n\r\n # DC power output of the PV generator taking into account the maximum\r\n # DC input power of the PV2AC conversion pathway\r\n Ppv = np.minimum(Ppv, parameter['P_PV2AC_in'] * 1000)\r\n\r\n # Residual power\r\n\r\n # Power demand on the AC side\r\n Pac = pl + parameter['P_PERI_AC']\r\n\r\n # Normalized AC output power of the PV2AC conversion pathway to cover the AC\r\n # power demand\r\n ppv2ac = np.minimum(\r\n Pac, parameter['P_PV2AC_out'] * 1000) / parameter['P_PV2AC_out'] / 1000\r\n\r\n # Target DC input power of the PV2AC conversion pathway\r\n Ppv2ac_in_ac = np.minimum(Pac, parameter['P_PV2AC_out'] * 1000) + (\r\n parameter['PV2AC_a_out'] * ppv2ac**2 + parameter['PV2AC_b_out'] * ppv2ac + parameter['PV2AC_c_out'])\r\n\r\n # Normalized DC input power of the PV2AC conversion pathway TODO 1\r\n ppv2ac = Ppv / parameter['P_PV2AC_in'] / 1000\r\n\r\n # Target AC output power of the PV2AC conversion pathway\r\n Ppv2ac_out = np.maximum(\r\n 0, Ppv - (parameter['PV2AC_a_in'] * ppv2ac**2 + parameter['PV2AC_b_in'] * ppv2ac + parameter['PV2AC_c_in']))\r\n\r\n # Residual power for battery charging\r\n Prpv = Ppv - Ppv2ac_in_ac\r\n\r\n # Residual power for battery discharging\r\n Pr = Ppv2ac_out - Pac\r\n\r\n return Pr, Prpv, Ppv, ppv2ac, Ppv2ac_out\r\n\r\n # Maximize self consumption for PV-coupled systems\r\n elif parameter['Top'] == 'PV':\r\n # Preallocation\r\n # Pbat = np.zeros_like(ppv) # DC power of the battery in W\r\n # soc = np.zeros_like(ppv) # State of charge of the battery\r\n # Ppv2ac_out = np.zeros_like(ppv) # Output power of the PV2AC conversion pathway in W\r\n # Ppv2bat_in = np.zeros_like(ppv) # Input power of the PV2BAT conversion pathway in W\r\n # Pbat2pv_out = np.zeros_like(ppv) # Output power of the BAT2PV conversion pathway in W\r\n # Ppvbs = np.zeros_like(ppv) # AC power of the PV-battery system in W\r\n Ppv = np.empty_like(ppv) # DC power output of the PV generator\r\n # Additional power consumption of other system components (e.g. AC power meter) in W\r\n Pperi = np.ones_like(ppv) * parameter['P_PERI_AC']\r\n # dt = 1 # Time increment in s\r\n # th = 0 # Start threshold for the recharging of the battery\r\n # soc0 = 0 # State of charge of the battery in the first time step\r\n\r\n # DC power output of the PV generator\r\n if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp\r\n Ppv = ppv * parameter['P_PV'] * 1000\r\n\r\n else: # ppv: DC power output of the PV generator in W\r\n Ppv = ppv\r\n\r\n # Power demand on the AC side\r\n Pac = pl + Pperi\r\n\r\n return Pac, Ppv, Pperi",
"def est_maxlevel(dims,bandwidth):\n lev = math.floor((math.log(min(dims))/math.log(2)-2)/bandwidth)\n lev=int(lev)\n return lev",
"def _G_to_km_on_basis_single_level(self, w, m):\n kB = self._sym.kBoundedSubspace(self.k,t=1)\n g = kB.K_kschur()\n mon = self.km()\n if m < w.length():\n return 0\n ans = self.zero()\n for la in Partitions(m, max_part = self.k):\n ans += g.homogeneous_basis_noncommutative_variables_zero_Hecke((la)).coefficient(w)*mon(la)\n return ans",
"def get_l_n_u_ramping(ppc, lower_bound, upper_bound, Nhrs=2):\n \"\"\"lower_bound must be neagtiva, upper_bound must be positive\"\"\"\n gens_hrs = ppc['gen'][:, 0]\n gens_hrs = np.sort(gens_hrs)\n \n n_buses = set_n_buses(ppc, Nhrs)\n n_gens = len(gens_hrs) // 2\n l = np.zeros(n_buses)\n u = np.zeros(n_buses)\n for i in range(len(l)):\n if (i+1) in gens_hrs:\n l[i] = lower_bound\n u[i] = upper_bound\n else:\n l[i] = -np.inf\n u[i] = np.inf\n return l, u",
"def gomeroccupancyscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n gomer_occupancy = 1\n area_pwm_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(pwm_length - 1, 1, -1):\n prod_gomer = 1\n prod_gomer_rc = 1\n for j in range(pwm_length):\n if j <= i:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n elif (j + i) > len(seq) - 1:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n else:\n # print \"got to else\"\n s = seq[j + i]\n prod_gomer *= pwm_dictionary[s][j]\n prod_gomer_rc *= area_pwm_rc[s][j]\n gomer_occupancy *= (1 - prod_gomer) * (1 - prod_gomer_rc)\n for i in range(len(seq) - 1):\n prod_gomer = 1\n prod_gomer_rc = 1\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq) - 1:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n else:\n prod_gomer *= pwm_dictionary[seq[j + i]][j]\n prod_gomer_rc *= area_pwm_rc[seq[j + i]][j]\n gomer_occupancy *= (1 - prod_gomer) * (1 - prod_gomer_rc)\n gomer_occupancy = 1 - gomer_occupancy\n\n return gomer_occupancy",
"def knapval_norep(W, wt):\n # choose to use item.weight and get item.value + optimal from what's left\n # last_item = items[-1]\n # other_items = items[:-1]\n # options = list(\n # knpaval_norep(capacity, other_items))\n # if last_item.weight <= capacity:\n # options.append(last_item.value +\n # knapval_norep(capacity-last_item.weight, other_items),\n # )\n #\n\n \"\"\"Find max weight that can fit in knapsack size W.\"\"\"\n # Create n nested arrays of 0 * (W + 1)\n max_vals = [[0] * (W + 1) for x in range(len(wt))]\n # Set max_vals[0] to wt[0] if wt[0] <= j\n max_vals[0] = [wt[0] if wt[0] <= j else 0 for j in range(W + 1)]\n for i in range(1, len(wt)):\n for j in range(1, W + 1):\n value = max_vals[i - 1][j] # previous i @ same j\n if wt[i] <= j:\n val = (max_vals[i - 1][j - wt[i]]) + wt[i]\n if value < val:\n value = val\n max_vals[i][j] = value\n else:\n max_vals[i][j] = value # set to [i - 1][j]\n else:\n max_vals[i][j] = value # set to [i - 1][j]\n\n return max_vals[-1][-1]",
"def _uppLim(self):\n if self.getResult(param='TS value')[0] >= self.tsmin:\n print(\"\\t=== TS value {} is above TSmin {}, no need to compute an upperlimit ===\"\n .format(self.getResult(param='TS value')[0], self.tsmin))\n return\n\n from UpperLimits import UpperLimits\n import UnbinnedAnalysis as UA\n \n like = UA.unbinnedAnalysis(evfile=self.outmktime, scfile=self.ft2, expmap=self.outexpmap,\n expcube=self.outltcube, irfs=self.irf, optimizer=\"NewMinuit\", srcmdl=self.model)\n like.fit(0)\n ul = UpperLimits(like)\n\n try:\n upp, norm=ul['TARGET'].bayesianUL(emin=self.emin, emax=self.emax, cl=0.95) \n except:\n upp = -1\n wf = open(self.outgtlike, 'a')\n wf.write(\"\\nUpper limit on source 'TARGET': {} ph/cm2/s.\".format(upp))\n wf.close()\n return",
"def get_no_strat_levels(no_vert_levels_atmos):\n try:\n if float(no_vert_levels_atmos) > 60:\n no_strat_levels = '20'\n else:\n no_strat_levels = '10'\n except:\n no_strat_levels = '10'\n\n return no_strat_levels",
"def bernul(n, k, p):\n return comb(n, k) * p ** k * (1 - p) ** (n-k)"
] |
[
"0.56043136",
"0.5423251",
"0.5390842",
"0.5316683",
"0.52712446",
"0.5253936",
"0.5244817",
"0.52361995",
"0.5169396",
"0.514784",
"0.50897783",
"0.5058986",
"0.5043447",
"0.5019133",
"0.50130534",
"0.4990135",
"0.4977293",
"0.49693656",
"0.49640068",
"0.49362233",
"0.49251145",
"0.49091205",
"0.49017766",
"0.4899975",
"0.4892911",
"0.48928753",
"0.48892134",
"0.48785493",
"0.48725733",
"0.48693272"
] |
0.61479115
|
0
|
return True if word starts or ends with a word from wordlist
|
def isWordPartOf(self,word,wordlist):
for w in wordlist:
if w in self._part_of_badword:
return True
if w.startswith(word) or w.endswith(word):
self._part_of_badword[w] = True
return True
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_word(wordlist, word):\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\:;'<>?,./\\\"\")\n return word in wordlist",
"def isWord(wordList, word):\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\\\:;'<>?,./\\\"\")\n return word in wordList",
"def isWord(wordList, word):\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\\\:;'<>?,./\\\"\")\n return word in wordList",
"def isWord(wordList, word):\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\\\:;'<>?,./\\\"\")\n return word in wordList",
"def check_word(words, word):\r\n if word in words:\r\n return True\r\n else:\r\n return False",
"def search(self, word):\n now = self.tree\n for i in word:\n if i in now:\n now = now[i]\n else:\n return False\n return True if 'end' in now else False",
"def check_word(self, word):\n first_letter, rest = word[0], word[1:]\n\n for possible_start in self._find_letter(first_letter):\n if self._check_word(possible_start, rest):\n return True\n\n return False",
"def start_with_a_end_with_b(words):\n return [word for word in words if re.match(r\"a\\w*b$\", word)]",
"def check_words(dictionary_, start_word, stop_word):\n if dictionary_.is_real_word(start_word) is False:\n print(\"Word {} not found in the dictionary\".format(start_word))\n return False\n if dictionary_.is_real_word(stop_word) is False:\n print(\"Word {} not found in the dictionary\".format(stop_word))\n return False\n return True",
"def check_words(title, wordlist, verbose=False):\n\tfor word in wordlist:\n\t\tif title.find(word) >= 0:\n\t\t\tif verbose:\n\t\t\t\tprint(\"\\t\\tFOUND '\"+word+\"' IN:\", title)\n\t\t\treturn True\n\treturn False",
"def search(self, word: str) -> bool:\n curr_chars = self.chars\n for c in list(word):\n if c not in curr_chars:\n return False\n curr_chars = curr_chars[c]\n return self.end_of_word in curr_chars",
"def is_stop_word(word):\n return word in final_stop_words",
"def isAlphabeticalWord(word, wordList):\n \n if (len(word) > 0):\n curr = word[0]\n for letter in word:\n if (curr > letter):\n return False\n else:\n curr = letter\n if wordList is None:\n return True\n return word in wordList",
"def startswith(list, prefix):\n\n return list[:len(prefix)] == prefix",
"def check_the_list_for_matching(checked_list: list, phrase_to_match: str) -> bool:\n for word in checked_list:\n if phrase_to_match.startswith(word):\n return True\n return False",
"def valid(phrase):\n words = []\n series_of_words = phrase.split(' ')\n words.append(series_of_words.pop())\n for word in series_of_words:\n if word in words:\n return False\n words.append(word)\n return True",
"def search(self, word: str, starts_with: bool = False) -> bool:\n current_node = self.trie\n\n for i in word:\n if i in current_node.get_child():\n current_node = current_node.children[i]\n continue\n return False\n\n if not starts_with and not current_node.is_end():\n return False\n\n return True",
"def in_bisect(word_list, word):\n\ti = bisect_left(word_list, word)\n\tif i != len(word_list) and word_list[i] == word:\n\t\treturn True\n\telse:\n\t\treturn False",
"def check_word_in_list_in_string(list, string):\n stuff = [string for word in list if(word in string)]\n return stuff",
"def split_precondition(\n tokens: Sequence[str], words: Sequence[str], word_ends: Sequence[str]\n) -> bool:\n duplicated_word_ends = []\n for end1, end2 in zip(word_ends, word_ends[1:]):\n if end1 == end2:\n duplicated_word_ends.append(end1)\n\n if not duplicated_word_ends:\n return False\n\n duplicate_not_word = False\n for duplicate in duplicated_word_ends:\n if duplicate not in words:\n duplicate_not_word = True\n break\n\n if not duplicate_not_word:\n return False\n\n return True",
"def search(self, word: str) -> bool:\n # Checking if the word is present in the list.\n return word in self.mylist",
"def win(word_list):\n\n if '_' not in word_list:\n\n return True\n else:\n return False",
"def match(self, sentence) -> bool:\r\n for word in self.word_list:\r\n if word.lower() in sentence.lower():\r\n return True\r\n return False",
"def filter1(word):\n if not word: return False\n w = word.lower()\n if w in STOPWORDS: return False\n return True",
"def search(self, word: str) -> bool:\n node = self\n for c in word:\n node = node.d.get(c)\n if not node:\n return False\n return node.end",
"def is_valid_word(word, hand, word_list):\n failure=True\n word=word.lower()\n if word not in word_list:\n failure=False\n for i in word:\n w=hand.get(i,0)\n if w==0:\n failure=False\n break\n return failure",
"def is_prefix(prefix: str, word: str):\n return word.startswith(prefix)",
"def words_in_dictionary(word_list):\n for word in word_list:\n word = word.lower()\n raw_word = word.replace(\"'\", '').replace('.', '')\n if word not in DICTIONARY_LOWER and raw_word not in DICTIONARY_LOWER:\n return False\n return True",
"def isPresent(self, word):\n\t\treturn word in self.link_words",
"def has_word(self, word)->bool:\n if len(word) == 1:\n chars = word + GNode.CHAR_EOW\n else:\n chars = word[0] + GNode.CHAR_REV + word[1:] + GNode.CHAR_EOW\n cursor = self.root\n for c in chars.lower():\n if c not in cursor.children:\n return False\n else:\n cursor = cursor.children[c]\n return True"
] |
[
"0.71524596",
"0.704898",
"0.704898",
"0.704898",
"0.68477887",
"0.68353873",
"0.68219405",
"0.6771139",
"0.67498654",
"0.67202973",
"0.66723657",
"0.6666607",
"0.66264325",
"0.6624651",
"0.6590032",
"0.6564156",
"0.6558892",
"0.6540614",
"0.65204513",
"0.64893687",
"0.6488182",
"0.6468986",
"0.64261603",
"0.6425225",
"0.64036804",
"0.63894117",
"0.6388996",
"0.63809085",
"0.6368127",
"0.63648665"
] |
0.7566571
|
0
|
Returns the result of detect intent with an audio file as input. Using the same `session_id` between requests allows continuation of the conversation.
|
def detect_intent_audio():
audio_file_path = "/home/gal/toibot_ws/src/ToiBot1/src/speech_to_text/speech_wavs/filename.wav"
session_client = dialogflow.SessionsClient()
# Note: hard coding audio_encoding and sample_rate_hertz for simplicity.
audio_encoding = dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16
sample_rate_hertz = 16000
session = session_client.session_path("toibot-1549026967633", "gal1")
print('Session path: {}\n'.format(session))
with open(audio_file_path, 'rb') as audio_file:
input_audio = audio_file.read()
audio_config = dialogflow.types.InputAudioConfig(
audio_encoding=audio_encoding, language_code="en",
sample_rate_hertz=sample_rate_hertz)
query_input = dialogflow.types.QueryInput(audio_config=audio_config)
response = session_client.detect_intent(
session=session, query_input=query_input,
input_audio=input_audio)
print('=' * 20)
print('Query text: {}'.format(response.query_result.query_text))
print('Detected intent: {} (confidence: {})\n'.format(
response.query_result.intent.display_name,
response.query_result.intent_detection_confidence))
print('Fulfillment text: {}\n'.format(
response.query_result.fulfillment_text))
# return response
return(response.query_result.fulfillment_text)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def recognize(self, session_id):\n session_client = dialogflow.SessionsClient()\n\n # Note: hard coding audio_encoding and sample_rate_hertz for simplicity.\n audio_encoding = dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16\n sample_rate_hertz = 16000\n\n session = session_client.session_path(self.project_id, session_id)\n print('Session path: {}\\n'.format(session))\n\n self.audio_stream.reset()\n self._recorder.add_processor(self.audio_stream)\n\n audio_config = dialogflow.types.InputAudioConfig(\n audio_encoding=audio_encoding, language_code=self.language_code,\n sample_rate_hertz=sample_rate_hertz)\n\n requests = self.audio_stream.request_stream(audio_config, session)\n responses = session_client.streaming_detect_intent(requests)\n\n print('=' * 20)\n try:\n for response in responses:\n print('Intermediate transcript: \"{}\".'.format(\n response.recognition_result.transcript))\n print('is_final?: \"{}\".'.format(\n response.recognition_result.is_final))\n if response.recognition_result.is_final == True:\n self._recorder.remove_processor(self.audio_stream)\n self.audio_stream.end_audio()\n # Note: The result from the last response is the final transcript along\n # with the detected content.\n query_result = response.query_result\n\n print('=' * 20)\n print('Query text: {}'.format(query_result.query_text))\n print('Detected intent: {} (confidence: {})\\n'.format(\n query_result.intent.display_name,\n query_result.intent_detection_confidence))\n print('Fulfillment text: {}\\n'.format(\n query_result.fulfillment_text))\n return query_result\n\n except:\n print ('May be timeout')\n print('=' * 20)\n self._recorder.remove_processor(self.audio_stream)\n self.audio_stream.end_audio()\n return \n # [END dialogflow_detect_intent_streaming]",
"def detect_intent_audio():\n audio_file_path = home+\"catkin_ws/src/robot_ears/speech_wavs/normalized.wav\"\n session_client = dialogflow.SessionsClient()\n\n # Note: hard coding audio_encoding and sample_rate_hertz for simplicity.\n audio_encoding = dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16\n sample_rate_hertz = 16000\n session = session_client.session_path(\"toibot-1549026967633\", \"gal1\")\n print('Session path: {}\\n'.format(session))\n\n with open(audio_file_path, 'rb') as audio_file:\n input_audio = audio_file.read()\n\n audio_config = dialogflow.types.InputAudioConfig(\n audio_encoding=audio_encoding, language_code=\"en\",\n sample_rate_hertz=sample_rate_hertz)\n\n query_input = dialogflow.types.QueryInput(audio_config=audio_config)\n response = session_client.detect_intent(\n session=session, query_input=query_input,\n input_audio=input_audio)\n\n print('=' * 20)\n # save query.txt \n write_to_file(home+\"catkin_ws/src/robot_ears/text_files/query.txt\", response.query_result.query_text)\n print(\"query: \" + response.query_result.query_text)\n # save intent.txt \n write_to_file(home+\"catkin_ws/src/robot_ears/text_files/intent.txt\", response.query_result.intent.display_name)\n print(\"response: \" + response.query_result.intent.display_name)\n # save response.txt \n write_to_file(home+\"catkin_ws/src/robot_ears/text_files/response.txt\", response.query_result.fulfillment_text)\n print(\"intent: \" + response.query_result.fulfillment_text)\n print('=' * 20)",
"def detect(self, audio_file: str) -> SpeechDetectedResponse:\n pass",
"def intent_detection(project_id, session_id, credentials_file_path, text, language_code):\n\n try:\n\n credentials = service_account.Credentials.from_service_account_file(credentials_file_path)\n session_client = dialogflow.SessionsClient(credentials=credentials)\n\n session = session_client.session_path(project_id, session_id)\n print('Session path: {}\\n'.format(session))\n\n text_input = dialogflow.types.TextInput(\n text=text, language_code=language_code)\n\n query_input = dialogflow.types.QueryInput(text=text_input)\n\n response = session_client.detect_intent(\n session=session, query_input=query_input)\n\n print('=' * 20)\n print('Query text: {}'.format(response.query_result.query_text))\n print('Detected intent: {} (confidence: {})\\n'.format(\n response.query_result.intent.display_name,\n response.query_result.intent_detection_confidence))\n\n prediction = response.query_result.intent.display_name\n\n if prediction == 'Default Fallback Intent':\n prediction = 'None'\n\n except:\n\n prediction = 'Fail'\n\n return prediction",
"def get_intent(session_id, text=None, input_audio=None) -> IntentResult:\n if text is None and input_audio is None:\n raise ValueError(\"Either text or input_audio must be provided\")\n\n project_id = \"dojochatbot-gcietn\"\n language_code = \"en\"\n session_client = dialogflow.SessionsClient.from_service_account_file(\"keyfile.json\")\n session = session_client.session_path(project_id, session_id)\n\n if text is not None:\n text_input = dialogflow.types.TextInput(text=text, language_code=language_code)\n query_input = dialogflow.types.QueryInput(text=text_input)\n else:\n audio_encoding = dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16\n audio_config = dialogflow.types.InputAudioConfig(\n audio_encoding=audio_encoding, language_code=language_code,\n )\n query_input = dialogflow.types.QueryInput(audio_config=audio_config)\n\n response = session_client.detect_intent(\n session=session, query_input=query_input, input_audio=input_audio\n )\n query_result = response.query_result\n intent = query_result.intent.display_name\n params = None\n all_params_present = True\n\n if intent == consts.SCHEDULE_MEETING:\n params = get_schedule_meeting_params(query_result.parameters)\n elif intent in {\n consts.STORE_AGENDA,\n consts.GET_AGENDA,\n consts.STORE_NOTES,\n consts.GET_NOTES,\n consts.CHANGE_REMIND,\n consts.CANCEL_MEETING,\n consts.DATE_INTENT,\n }:\n params = {\"datetime\": get_datetime(query_result.parameters)}\n\n if params is not None:\n all_params_present = check_params_present(params)\n\n is_mentioned = query_result.query_text.lower().startswith(\n f\"hey {consts.BOT_NAME.lower()}\"\n )\n result = IntentResult(\n intent, params, all_params_present, query_result.fulfillment_text, is_mentioned\n )\n\n return result",
"async def recognize_intent(\n self, intent_input: str, conversation_id: str | None\n ) -> str:\n if self.intent_agent is None:\n raise RuntimeError(\"Recognize intent was not prepared\")\n\n self.process_event(\n PipelineEvent(\n PipelineEventType.INTENT_START,\n {\n \"engine\": self.intent_agent,\n \"language\": self.pipeline.conversation_language,\n \"intent_input\": intent_input,\n },\n )\n )\n\n try:\n conversation_result = await conversation.async_converse(\n hass=self.hass,\n text=intent_input,\n conversation_id=conversation_id,\n context=self.context,\n language=self.pipeline.conversation_language,\n agent_id=self.intent_agent,\n )\n except Exception as src_error:\n _LOGGER.exception(\"Unexpected error during intent recognition\")\n raise IntentRecognitionError(\n code=\"intent-failed\",\n message=\"Unexpected error during intent recognition\",\n ) from src_error\n\n _LOGGER.debug(\"conversation result %s\", conversation_result)\n\n self.process_event(\n PipelineEvent(\n PipelineEventType.INTENT_END,\n {\"intent_output\": conversation_result.as_dict()},\n )\n )\n\n speech: str = conversation_result.response.speech.get(\"plain\", {}).get(\n \"speech\", \"\"\n )\n\n return speech",
"def send_audio_file(\n self, audio_file, device_state, authentication_headers\n ) -> bytes:\n\n payload = {\n 'context': device_state,\n 'event': {\n 'header': {\n 'namespace': 'SpeechRecognizer',\n 'name': 'Recognize',\n 'messageId': self.generate_message_id(),\n 'dialogRequestId': self.generate_dialogue_id(),\n },\n 'payload': {\n 'profile': 'CLOSE_TALK',\n 'format': 'AUDIO_L16_RATE_16000_CHANNELS_1'\n }\n }\n }\n multipart_data = MultipartEncoder(\n fields=[\n (\n 'request', (\n 'request',\n json.dumps(payload),\n 'application/json;',\n {'Content-Disposition': \"form-data; name='request'\"}\n ),\n ),\n (\n 'audio', (\n 'audio',\n audio_file,\n 'application/octet-stream',\n {'Content-Disposition': \"form-data; name='audio'\"}\n )\n ),\n ],\n boundary='boundary',\n )\n headers = {\n **authentication_headers,\n 'Content-Type': multipart_data.content_type\n }\n stream_id = self.connection.request(\n 'POST',\n '/v20160207/events',\n headers=headers,\n body=multipart_data,\n )\n response = self.connection.get_response(stream_id)\n return self.parse_response(response)",
"def request_endpoint(audio, speech_config, output_directory, lexical):\n audio_config = speechsdk.audio.AudioConfig(filename = audio)\n speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config)\n result = speech_recognizer.recognize_once()\n filename = audio[audio.rindex('\\\\')+1:]\n text = process_recognition(result, filename, output_directory, lexical)\n return text, filename",
"def act(self, audio_file=None):\n #file as source\n if self.src == 'file':\n if audio_file is None:\n raise ValueError(\"Please provide a audio_file\")\n return None\n elif not os.path.exists(audio_file):\n raise FileNotFoundError(\"Specified file not found\")\n return None\n else:\n file = speech_recognition.AudioFile(audio_file)\n with file:\n speech = self.recog_obj.record(file)\n \n #mic as source\n elif self.src == 'mic':\n if audio_file is not None:\n print(\"WARNING: source is set to device microphone. Audio file will be ignored\\n\")\n \n try:\n with self.mic_obj:\n print(\"Speak into the mic....\\n\")\n self.recog_obj.adjust_for_ambient_noise(self.mic_obj)\n speech = self.recog_obj.listen(self.mic_obj)\n #if microphone is not detected\n except OSError:\n print(\"Error: Microphone not detected\")\n return None\n \n \n try:\n print(\"Please wait while we transcribe...\\n\")\n text = self.recog_obj.recognize_google(speech, language='en', show_all=self.debug)\n \n #if audio is not detected\n except speech_recognition.UnknownValueError:\n print(\"Error: Sorry audio not detected by device microphone\")\n return None\n \n #if there is connection issue or api issue\n except speech_recognition.RequestError:\n print(\"Error: API for transcription is not reachable. There may be some connection issue or server side issue\")\n return None\n \n #for imposing various rules to text \n #But if debug mode is enabled, transcript variable will store a dictionary of various transcriptions \n #along with their confidence probabilities, so conversion rules are disabled meanwhile \n transcript = self.tcr.deconcat(text) if not self.debug else text\n return transcript",
"def recognize_sync_audio_file(self, file, language_code = \"en-US\", is_long_recording = False):\n with io.open(file, \"rb\") as audio_file:\n content = audio_file.read()\n audio = speech.RecognitionAudio(content=content)\n\n if language_code not in self.languages:\n print('\\\"{}\\\" is not a supported language code. Make sure it\\'s supported by Google and try adding it to the languages list.\\n'.format(language_code))\n return -1\n\n config = speech.RecognitionConfig(\n encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=self.microphone_handler.RATE,\n language_code=language_code,\n )\n\n try:\n response = self.client.long_running_recognize(config=config, audio=audio, timeout=500).result() \n except:\n return -1\n\n return self._get_message_from_proto(response)['transcript']",
"def predict(uploaded_file):\n loc = AudioPredict.return_image(uploaded_file)\n return loc",
"def _recognize_audio(self, frames: io.BytesIO) -> str:\n audio = sr.AudioData(\n frames.getvalue(),\n self.framerate,\n pyaudio.get_sample_size(self.SAMPLE_FORMAT))\n\n recognizer = sr.Recognizer()\n response = None\n try:\n response = recognizer.recognize_google(audio)\n except sr.UnknownValueError:\n print(\"UnknownValueError: Apparently unrecognizable\")\n\n return response",
"def voice_recognizer():\n while dr.ttsIsSpeaking().result or dr.mediaIsPlaying().result:\n time.sleep(1)\n return dr.recognizeSpeech().result",
"def recognize_async_audio_stream(self, language_code = \"en-US\"): \n if language_code not in self.languages:\n print('\\\"{}\\\" is not a supported language code. Make sure it\\'s supported by Google and try adding adding it to the languages list.\\n'.format(language_code))\n return\n\n self.final_result_queue.queue.clear() # Clear all items in queue for new stream.\n\n config_stream = speech.StreamingRecognitionConfig(\n config = speech.RecognitionConfig(\n encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=self.microphone_handler.RATE,\n language_code=language_code,\n enable_automatic_punctuation=True,\n ),\n interim_results=True \n )\n\n self.microphone_handler.start_recording(streaming=True)\n while self.microphone_handler.streaming:\n data = self.microphone_handler.stream_generator()\n requests = (speech.StreamingRecognizeRequest(audio_content=content) for content in data)\n\n try:\n responses = self.client.streaming_recognize(config_stream, requests)\n for response in responses:\n self.final_result_queue.put(response.results[0])\n if response.results[0].is_final:\n return # Stops more recordings than one. Doesn't halt after recording is done. (temp)\n if self.debug:\n print(response.results[0].alternatives[0].transcript + '\\n') # Print all non final results in terminal(debug).\n except:\n print('Failed to get response.')",
"def request():\n return face_client.face.detect_with_stream(image=open(\"frame.png\", 'rb'),\n return_face_attributes=[emotion_attribute],\n recognition_model='recognition_02')",
"def speech(self, audio_file, verbose=None, headers=None):\n params = {}\n headers = headers or {}\n if verbose:\n params['verbose'] = True\n resp = req(self.logger, self.access_token, 'POST', '/speech', params,\n data=audio_file, headers=headers)\n return resp",
"def transcribe_file_ret(speech_file):\n from google.cloud import speech\n from google.cloud.speech import enums\n from google.cloud.speech import types\n client = speech.SpeechClient()\n\n # [START migration_sync_request]\n # [START migration_audio_config_file]\n with io.open(speech_file, 'rb') as audio_file:\n content = audio_file.read()\n\n audio = types.RecognitionAudio(content=content)\n config = types.RecognitionConfig(\n encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=8000,\n language_code='en-US',\n speech_contexts=[types.SpeechContext(\n phrases=phrases,\n )])\n #use_enhanced=True,\n # model='phone_call',)\n # [END migration_audio_config_file]\n\n # [START migration_sync_response]\n response = client.recognize(config, audio)\n # [END migration_sync_request]\n # Each result is for a consecutive portion of the audio. Iterate through\n # them to get the transcripts for the entire audio file.\n return(response)",
"def detect_intent_texts(project_id, session_id, text, language_code):\r\n from google.cloud import dialogflow\r\n\r\n session_client = dialogflow.SessionsClient()\r\n\r\n session = session_client.session_path(project_id, session_id)\r\n # print(\"Session path: {}\\n\".format(session))\r\n\r\n \r\n text_input = dialogflow.TextInput(text=text, language_code=language_code)\r\n\r\n query_input = dialogflow.QueryInput(text=text_input)\r\n\r\n response = session_client.detect_intent(\r\n request={\"session\": session, \"query_input\": query_input}\r\n )\r\n\r\n # print(\"=\" * 20)\r\n # print(\"Query text: {}\".format(response.query_result.query_text))\r\n # print(\r\n # \"Detected intent: {} (confidence: {})\\n\".format(\r\n # response.query_result.intent.display_name,\r\n # response.query_result.intent_detection_confidence,\r\n # )\r\n # )\r\n # print(\"Fulfillment text: {}\\n\".format(response.query_result.fulfillment_text))\r\n # print('#####')\r\n # print(type(response.query_result))\r\n # para = result[\"parameters\"][\"coinname\"]\r\n # result = goo.to_value(response.query_result)\r\n # print(type(result))\r\n # new_dict = json.loads(result)\r\n # print('parameter: ',result.struct_value.fields['parameters'])\r\n return response.query_result.fulfillment_text",
"def audio_recognized(evt: speechsdk.SpeechRecognitionEventArgs):\n if evt.result.reason == speechsdk.ResultReason.RecognizedSpeech:\n if evt.result.properties.get(\n speechsdk.PropertyId.SpeechServiceConnection_AutoDetectSourceLanguageResult) is None:\n print(\"Unable to detect any language\")\n else:\n detected_src_lang = evt.result.properties[\n speechsdk.PropertyId.SpeechServiceConnection_AutoDetectSourceLanguageResult]\n json_result = evt.result.properties[speechsdk.PropertyId.SpeechServiceResponse_JsonResult]\n detail_result = json.loads(json_result)\n start_offset = detail_result['Offset']\n duration = detail_result['Duration']\n if duration >= 0:\n end_offset = duration + start_offset\n else:\n end_offset = 0\n print(\"Detected language = \" + detected_src_lang)\n print(f\"Start offset = {start_offset}, End offset = {end_offset}, \"\n f\"Duration = {duration} (in units of hundreds of nanoseconds (HNS))\")\n global language_detected\n language_detected = True",
"def test_get_audio_fulfillment_file(self):\n self.api.queue_response(200, content=\"A license\")\n response = self.api.get_audio_fulfillment_file(\"patron id\", \"bib id\")\n\n [[method, url, args, kwargs]] = self.api.requests\n eq_(\"POST\", method)\n assert url.endswith('GetItemAudioFulfillment')\n eq_('<AudioFulfillmentRequest><ItemId>bib id</ItemId><PatronId>patron id</PatronId></AudioFulfillmentRequest>', kwargs['data'])\n\n eq_(200, response.status_code)\n eq_(\"A license\", response.content)",
"def handle_audio_input(message):\n def build_context(msg: Message):\n ctx = {'client_name': 'mycroft_listener',\n 'source': msg.context.get(\"source\" or \"speech_api\"),\n 'destination': [\"skills\"],\n \"audio_parser_data\": msg.context.get(\"audio_parser_data\"),\n \"client\": msg.context.get(\"client\"), # origin (local, klat, nano, mobile, api)\n \"neon_should_respond\": msg.context.get(\"neon_should_respond\"),\n \"username\": msg.context.get(\"username\"),\n \"timing\": {\"start\": msg.data.get(\"time\"),\n \"transcribed\": time.time()},\n \"ident\": msg.context.get(\"ident\", time.time())\n }\n if msg.context.get(\"klat_data\"):\n ctx[\"klat_data\"] = msg.context(\"klat_data\")\n ctx[\"nick_profiles\"] = msg.context.get(\"nick_profiles\")\n return ctx\n\n ident = message.context.get(\"ident\") or \"neon.audio_input.response\"\n wav_file_path = message.data.get(\"audio_file\")\n lang = message.data.get(\"lang\")\n try:\n _, parser_data, transcriptions = _get_stt_from_file(wav_file_path, lang)\n message.context[\"audio_parser_data\"] = parser_data\n context = build_context(message)\n data = {\n \"utterances\": transcriptions,\n \"lang\": message.data.get(\"lang\", \"en-us\")\n }\n handled = _emit_utterance_to_skills(Message('recognizer_loop:utterance', data, context))\n bus.emit(message.reply(ident, data={\"parser_data\": parser_data,\n \"transcripts\": transcriptions,\n \"skills_recv\": handled}))\n except Exception as e:\n LOG.error(e)\n bus.emit(message.reply(ident, data={\"error\": repr(e)}))",
"def get_audio(path):\n return send_from_directory('audio', path)",
"def recognize(self, audio):\n response = self.service.Recognize(cloud_speech_extended_pb2.RecognizeRequest(\n config=cloud_speech_extended_pb2.RecognitionConfig(\n # There are a bunch of config options you can specify. See https://goo.gl/KPZn97 for the full list.\n encoding='LINEAR16', # one of LINEAR16, FLAC, MULAW, AMR, AMR_WB\n sample_rate_hertz=audio.frame_rate, # the rate in hertz\n # See https://g.co/cloud/speech/docs/languages for a list of supported languages.\n language_code=self.settings.language, # a BCP-47 language tag\n enable_word_time_offsets=self.settings.time_offsets, # if true, return recognized word time offsets\n max_alternatives=self.settings.max_alternatives, # maximum number of returned hypotheses\n ),\n audio=cloud_speech_extended_pb2.RecognitionAudio(\n uri=None,\n content=audio.raw_data\n )\n ), self.settings.deadline)\n\n # Print the recognition result alternatives and confidence scores.\n results = []\n\n # for result in response.results:\n if len(response.results) > 0:\n result = response.results[0] # TODO: check why here we have list of results ?, when it is possible ?\n\n alternative = result.alternatives[0]\n alignment = [] #\n confirmed_results = []\n if self.settings.time_offsets:\n word_indices = [j for j in range(len(alternative.words)) if\n alternative.words[j].word != '<eps>']\n\n if len(word_indices) > 0:\n confirmed_results.append([alternative.words[i].word for i in word_indices])\n else: # alignment was not returned\n confirmed_results.append(alternative.transcript)\n\n alignment.append(\n [[alternative.words[i].start_time, alternative.words[i].end_time] for i in\n word_indices])\n results.append({\n 'transcript': ' '.join(confirmed_results),\n 'confidence': alternative.confidence,\n 'alignment': alignment,\n })\n else:\n results.append({\n 'transcript': alternative.transcript,\n 'confidence': alternative.confidence\n })\n\n return results",
"def sample_recognize(local_file_path):\n\n client = speech_v1p1beta1.SpeechClient()\n\n # local_file_path = 'resources/brooklyn_bridge.flac'\n\n # The language of the supplied audio. Even though additional languages are\n # provided by alternative_language_codes, a primary language is still required.\n language_code = \"fr-FR\"\n\n # Specify up to 3 additional languages as possible alternative languages\n # of the supplied audio.\n alternative_language_codes_element = \"en-GB\"\n alternative_language_codes_element_2 = \"ja-JP\"\n alternative_language_codes = [\n alternative_language_codes_element,\n alternative_language_codes_element_2,\n ]\n config = {\n \"language_code\": language_code,\n \"alternative_language_codes\": alternative_language_codes,\n }\n with io.open(local_file_path, \"rb\") as f:\n content = f.read()\n audio = {\"content\": content}\n\n response = client.recognize(config, audio)\n for result in response.results:\n # The language_code which was detected as the most likely being spoken in the audio\n # print(u\"Detected language: {}\".format(result.language_code))\n # First alternative is the most probable result\n alternative = result.alternatives[0]\n print(u\"Transcript: {}\".format(alternative.transcript))\n return(result.language_code)\n # return(response.results.language_code)",
"def read_utterances_from_files(session_folder, voice_sample_folder):\n utterance_content = []\n dialog_act = []\n\n with open('../test/data/' + session_folder + '/' + voice_sample_folder + '/label.json') as label_data:\n label = json.load(label_data)\n\n for j in range(len(label['turns'])):\n utterance_content += [label['turns'][j]['transcription']]\n dialog_act += [label['turns'][j]['semantics']['cam']]\n\n session_id = label['session-id']\n return [dialog_act, utterance_content, session_id]",
"def recognize_intent_once_from_file():\n # <IntentRecognitionOnceWithFile>\n try:\n intent_config = speechsdk.SpeechConfig(subscription=config.intent_key, region=config.intent_service_region)\n audio_config = speechsdk.audio.AudioConfig(filename=lampfilename)\n\n intent_recognizer = speechsdk.intent.IntentRecognizer(speech_config=intent_config, audio_config=audio_config)\n\n model = speechsdk.intent.LanguageUnderstandingModel(app_id=config.language_understanding_app_id)\n intents = [\n (model, \"HomeAutomation.TurnOn\"),\n (model, \"HomeAutomation.TurnOff\"),\n (\"This is a test.\", \"test\"),\n (\"Switch to the channel 34.\", \"34\"),\n (\"what's the weather like\", \"weather\"),\n (\"Please turn off the light\", \"turn off\" )\n ]\n intent_recognizer.add_intents(intents)\n\n intent_result = intent_recognizer.recognize_once()\n print(intent_result.text)\n except:\n print(sys.exc_info()[0])\n raise",
"def transcribe_streaming_voice_activity_timeouts(\n project_id: str,\n speech_start_timeout: int,\n speech_end_timeout: int,\n audio_file: str,\n) -> cloud_speech.StreamingRecognizeResponse:\n # Instantiates a client\n client = SpeechClient()\n\n # Reads a file as bytes\n with open(audio_file, \"rb\") as f:\n content = f.read()\n\n # In practice, stream should be a generator yielding chunks of audio data\n chunk_length = len(content) // 20\n stream = [\n content[start : start + chunk_length]\n for start in range(0, len(content), chunk_length)\n ]\n audio_requests = (\n cloud_speech.StreamingRecognizeRequest(audio=audio) for audio in stream\n )\n\n recognition_config = cloud_speech.RecognitionConfig(\n auto_decoding_config=cloud_speech.AutoDetectDecodingConfig(),\n language_codes=[\"en-US\"],\n model=\"long\",\n )\n\n # Sets the flag to enable voice activity events and timeout\n speech_start_timeout = duration_pb2.Duration(seconds=speech_start_timeout)\n speech_end_timeout = duration_pb2.Duration(seconds=speech_end_timeout)\n voice_activity_timeout = (\n cloud_speech.StreamingRecognitionFeatures.VoiceActivityTimeout(\n speech_start_timeout=speech_start_timeout,\n speech_end_timeout=speech_end_timeout,\n )\n )\n streaming_features = cloud_speech.StreamingRecognitionFeatures(\n enable_voice_activity_events=True, voice_activity_timeout=voice_activity_timeout\n )\n\n streaming_config = cloud_speech.StreamingRecognitionConfig(\n config=recognition_config, streaming_features=streaming_features\n )\n\n config_request = cloud_speech.StreamingRecognizeRequest(\n recognizer=f\"projects/{project_id}/locations/global/recognizers/_\",\n streaming_config=streaming_config,\n )\n\n def requests(config: cloud_speech.RecognitionConfig, audio: list) -> list:\n yield config\n for message in audio:\n sleep(0.5)\n yield message\n\n # Transcribes the audio into text\n responses_iterator = client.streaming_recognize(\n requests=requests(config_request, audio_requests)\n )\n\n responses = []\n for response in responses_iterator:\n responses.append(response)\n if (\n response.speech_event_type\n == cloud_speech.StreamingRecognizeResponse.SpeechEventType.SPEECH_ACTIVITY_BEGIN\n ):\n print(\"Speech started.\")\n if (\n response.speech_event_type\n == cloud_speech.StreamingRecognizeResponse.SpeechEventType.SPEECH_ACTIVITY_END\n ):\n print(\"Speech ended.\")\n for result in response.results:\n print(f\"Transcript: {result.alternatives[0].transcript}\")\n\n return responses",
"def listen2(self):\n with self.m as source:\n self.r.adjust_for_ambient_noise(source, duration=0.5)\n audio = self.r.listen(source)\n flac_data = audio.get_flac_data(\n convert_rate=None if audio.sample_rate >= 16000 else 16000,\n # audio samples should be at least 16 kHz\n convert_width=None if audio.sample_width >= 2 else 2 # audio samples should be at least 16-bit\n )\n\n try:\n print(json.dumps(self.speech_to_text.recognize(flac_data, content_type='audio/flac',\n customization_id=\"3a2e04c0-5346-11e7-aeaf-57afcb850a3a\",\n model=None), indent=4))\n except sr.UnknownValueError:\n print(str(-1))\n except sr.RequestError:\n print(str(404))",
"def capture_voice(bot, update):\n user_id = update.message.from_user.id\n audio_file = update.message.voice.get_file() # telegram.File object\n\n with open(handle_path(user_id), \"wb\") as f:\n logger.warning(audio_file.download(out=f))",
"def recognize_speech_from_file(recognizer, path, start, duration, recognize):\n # check that recognizer and microphone arguments are appropriate type\n if not isinstance(recognizer, sr.Recognizer):\n raise TypeError(\"`recognizer` must be `Recognizer` instance\")\n\n # adjust the recognizer sensitivity to ambient noise and record audio\n # from the microphone\n with sr.AudioFile(path) as source:\n print(\"before adjust, the energy=\", recognizer.energy_threshold)\n recognizer.adjust_for_ambient_noise(source)\n duration = duration if duration > 0 else None\n audio = recognizer.record(source, offset=start, duration=duration)\n print(\"after adjust, the energy=\", recognizer.energy_threshold)\n\n\n # set up the response object\n response = {\n \"success\": True,\n \"error\": None,\n \"transcription\": None\n }\n\n try:\n response[\"transcription\"] = recognize(audio)\n except sr.RequestError:\n # API was unreachable or unresponsive\n response[\"success\"] = False\n response[\"error\"] = \"API unavailable\"\n except sr.UnknownValueError:\n # speech was unintelligible\n response[\"error\"] = \"Unable to recognize speech\"\n\n return response"
] |
[
"0.78198",
"0.695255",
"0.6603663",
"0.6244179",
"0.60823435",
"0.6044113",
"0.5958468",
"0.5925653",
"0.5858983",
"0.58178073",
"0.5726882",
"0.5590196",
"0.5583141",
"0.5570892",
"0.55702007",
"0.5562698",
"0.55581576",
"0.5537515",
"0.55369747",
"0.5535112",
"0.5524328",
"0.55229324",
"0.55225664",
"0.54434216",
"0.54342103",
"0.54100996",
"0.5397489",
"0.53725684",
"0.53721464",
"0.5367369"
] |
0.7353236
|
1
|
Create a VirtualNetworkAppliance resource with the given unique name, props, and options.
|
def __init__(__self__,
resource_name: str,
args: VirtualNetworkApplianceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n name: Optional[pulumi.Input[str]] = None,\n virtual_hub_id: Optional[pulumi.Input[str]] = None) -> 'VirtualNetworkAppliance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _VirtualNetworkApplianceState.__new__(_VirtualNetworkApplianceState)\n\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"virtual_hub_id\"] = virtual_hub_id\n return VirtualNetworkAppliance(resource_name, opts=opts, __props__=__props__)",
"def create():\n\n # remember what is created or not\n vpc = False\n igw = False\n sg = False\n sub = False\n vm = False\n\n vpc = _create_resource('vpc', CidrBlock=args.cidr, InstanceTenancy='default')\n igw = _create_resource('igw')\n\n if vpc and igw:\n _attach_vpc_igw(vpc=_existing.vpc, igw=_existing.igw)\n else:\n print('Cannot attach an igw to a vpc as at least one of them could not be created.')\n\n if vpc:\n sg = _create_resource(\n 'sg',\n GroupName=args.role,\n Description='SG for ' + args.role,\n VpcId=getattr(_existing.vpc, 'id', None)\n )\n else:\n print('Cannot create a sg as the vpc to attach it to could not be created.')\n\n if sg:\n _add_ingress_rules()\n else:\n print('Cannot create ingress rule as the sg could not be created.')\n\n if vpc:\n sub = _create_resource(\n 'sub',\n VpcId=getattr(_existing.vpc, 'id', None),\n CidrBlock=args.cidr\n )\n else:\n print('Cannot create a subnet as the vpc to attach it to could not be created.')\n\n if vpc and sub:\n _link_route_table()\n else:\n print('Cannot link subnet and VPC in the route table as vpc or sub not created.')\n\n if sub and sg:\n vm = _create_resource(\n 'vm',\n ImageId=args.ami,\n MinCount=1,\n MaxCount=1,\n KeyName=args.keypair,\n InstanceType=args.instance,\n # Note that there will be no internal name.\n # To get one, create first a DHCP options set and associate it with the VPC.\n NetworkInterfaces=[{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0, # needs to be 0 to get a public IP\n 'SubnetId': getattr(_existing.sub, 'id', None),\n 'Groups': [getattr(_existing.sg, 'id', None)],\n }],\n )\n else:\n print('Cannot create an instance as the sub or sg to use could not be created.')\n\n if vm:\n if not dry:\n print('Waiting for the instance to be up and running, usually done in less than 45 seconds...')\n _existing.vm.wait_until_running()\n _tag_volume()\n print('you can reach your VM at ' + _existing.vm.public_ip_address)\n\n else:\n print('VM not created for some reason.')",
"def __init__(self, version=None):\n super(VirtualNetworkApplianceService, self).__init__(\n service_type='virtual-network-appliance',\n version=version\n )",
"def create(ctx, iface, resource_config, **_):\n resource_id = \\\n utils.get_resource_id(\n ctx.node,\n ctx.instance,\n resource_config.get(VPN_CONNECTION_ID),\n use_instance_id=True\n )\n utils.update_resource_id(ctx.instance, resource_id)\n # Actually create the resource\n create_response = iface.create(resource_config)\n ctx.instance.runtime_properties['create_response'] = \\\n utils.JsonCleanuper(create_response).to_dict()\n ctx.instance.runtime_properties['VPN_CONNECTION_ID'] = \\\n resource_config.get(VPN_CONNECTION_ID)\n ctx.instance.runtime_properties['DESTINATION_CIDR_BLOCK'] = \\\n resource_config.get(DESTINATION_CIDR_BLOCK)",
"def _create_vlan(self, conn, vlan_id, vlan_name):\n\n req_js = {}\n req_js['vlan_id'] = vlan_id\n req_js['vlan_name'] = vlan_name\n req_js['admin_state'] = 'up'\n\n resp = conn.post(self.VLAN_REST_OBJ, req_js)\n self._check_process_resp(resp)",
"def create_vm(args):\n if not args.disk and not args.pool:\n print(\"Either --disk or --pool option must be specified\", file=sys.stderr)\n return 1\n\n if args.disk and args.pool:\n print(\"--disk and --pool options are exclusive\", file=sys.stderr)\n return 1\n if args.pool and not args.disk_size:\n print(\"You must specify a disk size\", file=sys.stderr)\n return 1\n\n if args.net and args.virtual_network:\n print(\"--net and --virtual_network option are exclusive\", file=sys.stderr)\n return 1\n\n # insure unicity in networking options in BM case\n\n _all_net_names = set()\n if args.net:\n for n_name in args.net:\n if n_name not in _all_net_names:\n _all_net_names.add(n_name)\n else:\n print('Duplicate virtual network name [%s], ignore it', n_name)\n\n if '--network' in args.virt:\n sys.stderr.write(\"--network is not a supported option. Please retry without --network option.\\n\")\n return 1\n\n # sanity on extra arguments passed to virt-install(1)\n # some options do not create the guest but display information\n # this is wrongly interpreted as a succcess by underlying layers and we\n # may setup things by mistake\n _virt_install_extra = []\n for _a in args.virt:\n if _a not in ('--print-xml', '--version', '-h', '--help'):\n _virt_install_extra.append(_a)\n\n return oci_utils.kvm.virt.create(name=args.domain,\n root_disk=args.disk,\n pool=args.pool,\n disk_size=args.disk_size,\n network=list(_all_net_names),\n virtual_network=args.virtual_network,\n extra_args=_virt_install_extra)",
"def create(vm_):\n try:\n # Check for required profile parameters before sending any API calls.\n if (\n vm_[\"profile\"]\n and config.is_profile_configured(\n __opts__,\n (_get_active_provider_name() or \"profitbricks\"),\n vm_[\"profile\"],\n )\n is False\n ):\n return False\n except AttributeError:\n pass\n\n if \"image_alias\" in vm_ and not version_compatible(\"4.0\"):\n raise SaltCloudNotFound(\n \"The 'image_alias' parameter requires the profitbricks \"\n \"SDK v4.0.0 or greater.\"\n )\n\n if \"image\" not in vm_ and \"image_alias\" not in vm_:\n log.error(\"The image or image_alias parameter is required.\")\n\n signal_event(vm_, \"creating\", \"starting create\")\n\n data = None\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n\n # Assemble list of network interfaces from the cloud profile config.\n nics = _get_nics(vm_)\n\n # Assemble list of volumes from the cloud profile config.\n volumes = [_get_system_volume(vm_)]\n if \"volumes\" in vm_:\n volumes.extend(_get_data_volumes(vm_))\n\n # Assembla the composite server object.\n server = _get_server(vm_, volumes, nics)\n\n signal_event(vm_, \"requesting\", \"requesting instance\")\n\n try:\n data = conn.create_server(datacenter_id=datacenter_id, server=server)\n log.info(\n \"Create server request ID: %s\",\n data[\"requestId\"],\n exc_info_on_loglevel=logging.DEBUG,\n )\n\n _wait_for_completion(conn, data, get_wait_timeout(vm_), \"create_server\")\n except PBError as exc:\n log.error(\n \"Error creating %s on ProfitBricks\\n\\n\"\n \"The following exception was thrown by the profitbricks library \"\n \"when trying to run the initial deployment: \\n%s\",\n vm_[\"name\"],\n exc,\n exc_info_on_loglevel=logging.DEBUG,\n )\n return False\n except Exception as exc: # pylint: disable=W0703\n log.error(\n \"Error creating %s \\n\\nError: \\n%s\",\n vm_[\"name\"],\n exc,\n exc_info_on_loglevel=logging.DEBUG,\n )\n return False\n\n vm_[\"server_id\"] = data[\"id\"]\n\n def __query_node_data(vm_, data):\n \"\"\"\n Query node data until node becomes available.\n \"\"\"\n running = False\n try:\n data = show_instance(vm_[\"name\"], \"action\")\n if not data:\n return False\n log.debug(\n \"Loaded node data for %s:\\nname: %s\\nstate: %s\",\n vm_[\"name\"],\n pprint.pformat(data[\"name\"]),\n data[\"state\"],\n )\n except Exception as err: # pylint: disable=broad-except\n log.error(\n \"Failed to get nodes list: %s\",\n err,\n # Show the trackback if the debug logging level is enabled\n exc_info_on_loglevel=logging.DEBUG,\n )\n # Trigger a failure in the wait for IP function\n return False\n\n running = data[\"state\"] == \"RUNNING\"\n if not running:\n # Still not running, trigger another iteration\n return\n\n if ssh_interface(vm_) == \"private_lan\" and data[\"private_ips\"]:\n vm_[\"ssh_host\"] = data[\"private_ips\"][0]\n\n if ssh_interface(vm_) != \"private_lan\" and data[\"public_ips\"]:\n vm_[\"ssh_host\"] = data[\"public_ips\"][0]\n\n return data\n\n try:\n data = salt.utils.cloud.wait_for_ip(\n __query_node_data,\n update_args=(vm_, data),\n timeout=config.get_cloud_config_value(\n \"wait_for_ip_timeout\", vm_, __opts__, default=10 * 60\n ),\n interval=config.get_cloud_config_value(\n \"wait_for_ip_interval\", vm_, __opts__, default=10\n ),\n )\n except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:\n try:\n # It might be already up, let's destroy it!\n destroy(vm_[\"name\"])\n except SaltCloudSystemExit:\n pass\n finally:\n raise SaltCloudSystemExit(str(exc.message))\n\n log.debug(\"VM is now running\")\n log.info(\"Created Cloud VM %s\", vm_)\n log.debug(\"%s VM creation details:\\n%s\", vm_, pprint.pformat(data))\n\n signal_event(vm_, \"created\", \"created instance\")\n\n if \"ssh_host\" in vm_:\n vm_[\"key_filename\"] = get_key_filename(vm_)\n ret = __utils__[\"cloud.bootstrap\"](vm_, __opts__)\n ret.update(data)\n return ret\n else:\n raise SaltCloudSystemExit(\"A valid IP address was not found.\")",
"def test_create_vip_with_mandatory_params(self):\r\n resource = 'vip'\r\n cmd = vip.CreateVip(test_cli20.MyApp(sys.stdout), None)\r\n pool_id = 'my-pool-id'\r\n name = 'my-name'\r\n subnet_id = 'subnet-id'\r\n protocol_port = '1000'\r\n protocol = 'TCP'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--name', name,\r\n '--protocol-port', protocol_port,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id,\r\n pool_id]\r\n position_names = ['pool_id', 'name', 'protocol_port', 'protocol',\r\n 'subnet_id', 'tenant_id']\r\n position_values = [pool_id, name, protocol_port, protocol,\r\n subnet_id, tenant_id]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n admin_state_up=True)",
"def test_create_vip_with_mandatory_params(self):\n resource = 'vip'\n cmd = vip.CreateVip(test_cli20.MyApp(sys.stdout), None)\n pool_id = 'my-pool-id'\n name = 'my-name'\n subnet_id = 'subnet-id'\n protocol_port = '1000'\n protocol = 'TCP'\n tenant_id = 'my-tenant'\n my_id = 'my-id'\n args = ['--name', name,\n '--protocol-port', protocol_port,\n '--protocol', protocol,\n '--subnet-id', subnet_id,\n '--tenant-id', tenant_id,\n pool_id]\n position_names = ['pool_id', 'name', 'protocol_port', 'protocol',\n 'subnet_id', 'tenant_id']\n position_values = [pool_id, name, protocol_port, protocol,\n subnet_id, tenant_id]\n self._test_create_resource(resource, cmd, name, my_id, args,\n position_names, position_values,\n admin_state_up=True)",
"def post_service_appliance_create(self, resource_dict):\n pass",
"def create_network(options, vsm_obj):\n edge_id = get_edge(vsm_obj)\n if not edge_id:\n if not add_edge(options):\n print(\"Failed to create edge\")\n return False\n edge_id = get_edge(vsm_obj)\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n name = get_network_name(options)\n response = virtual_wire.read_by_name(name)\n if response != \"FAILURE\":\n print(\"Found network %s already exists\" % options.name)\n return True\n\n virtual_wire_create = VirtualWireCreateSpecSchema()\n virtual_wire_create.name = name\n virtual_wire_create.tenantId = name\n virtual_wire_create.description = 'NSX network %s' % name\n\n # check if user needs to enable guest vlan tagging,\n # this is require if one needs to run vlan tests in nested\n # environment.\n if hasattr(options, 'guest_vlan'):\n if options.guest_vlan is True:\n print(\"network %s has guest vlan tagging enabled\"\\\n % options.name)\n virtual_wire_create.guestVlanAllowed = True\n\n print(\"Creating network %s\" % options.name)\n result = virtual_wire.create(virtual_wire_create)\n if (result[0].response.status != 201):\n print \"response: %s\" % result[0].response.status\n print \"response: %s\" % result[0].response.reason\n return False\n print(\"Changing security settings on the network\")\n set_network_security_policy(options)\n return add_edge_interface(options, edge_id)",
"def create(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVirtNet_Create'))",
"def _create_network_vm(args):\n #\n # maximum length of network name is 14 chars, longer names will result in\n # a failure 'numerical result out of range' when creating the bridge.\n if len(args.network_name) > 14:\n _logger.error('Network name %s to long, max is 14 characters.', args.network_name)\n return 1\n # check network name unicity\n conn = libvirt.openReadOnly(None)\n _vnets = []\n if conn:\n _vnets = [n.name() for n in conn.listAllNetworks() if n.name() == args.network_name]\n conn.close()\n else:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n if len(_vnets) != 0:\n print(\"Network with name [%s] already exists\" % args.network_name, file=sys.stderr)\n return 1\n\n return oci_utils.kvm.virt.create_virtual_network(network=args.net,\n network_name=args.network_name,\n ip_bridge=args.ip_bridge,\n ip_prefix=args.ip_prefix,\n ip_start=args.ip_start,\n ip_end=args.ip_end)",
"def pre_virtual_network_create(self, resource_dict):\n pass",
"def create(cls, client_object, backing=None):\n vm = client_object.parent.vm\n pylogger.debug(\"Calling create on %s with network_label %s\" % (vm.name,\n backing.name))\n bridge = backing.get_bridge()\n return vm.create_vif(bridge)",
"def post_virtual_network_create(self, resource_dict):\n pass",
"def create_instance(StackId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, AvailabilityZone=None, VirtualizationType=None, SubnetId=None, Architecture=None, RootDeviceType=None, BlockDeviceMappings=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None, Tenancy=None):\n pass",
"def create(vmname):\n\n imgpath = os.path.join(base_disk_path, vmname + '.img')\n shutil.copyfile(base_vm_img, imgpath)\n config = _vm_conf_template.format(**locals())\n vm = _conn.defineXML(config)\n xml = ET.fromstring(vm.XMLDesc(0))\n mac = xml.find('devices').find('interface').find('mac').attrib['address']\n infokeeper.add_vm(vmname, mac)\n return 'VM %s created' % vmname",
"def create(ctx, iface, resource_config, params, **_):\n\n lb_name = params.get(LB_NAME)\n if not lb_name:\n targs = \\\n utils.find_rels_by_node_type(\n ctx.instance,\n LB_TYPE)\n lb_name = \\\n targs[0].target.instance.runtime_properties[\n EXTERNAL_RESOURCE_ID]\n params.update({LB_NAME: lb_name})\n\n ctx.instance.runtime_properties[LB_NAME] = \\\n lb_name\n\n # Actually create the resource\n iface.create(params)",
"def create(profile, name, application, cname=None, version=None,\n tier=\"web\", key_pair=None, instance_type=\"t1.micro\",\n instance_profile=None, service_role=None,\n healthcheck_url=None, security_groups=None,\n max_instances=1, min_instances=1, tags=None,\n vpc_id=None, subnets=None, db_subnets=None,\n elb_subnets=None, elb_scheme=None,\n public_ip=None, root_volume_size=None):\n client = boto3client.get(\"elasticbeanstalk\", profile)\n params = {}\n params[\"ApplicationName\"] = application\n params[\"EnvironmentName\"] = name\n if cname:\n params[\"CNAMEPrefix\"] = cname\n if version:\n params[\"VersionLabel\"] = version\n stack = utils.get_multicontainer_docker_solution_stack(profile)\n params[\"SolutionStackName\"] = stack \n if tier == \"web\":\n tier_definition = {\n \"Name\": \"WebServer\",\n \"Type\": \"Standard\",\n \"Version\": \"1.0\",\n }\n elif tier == \"worker\":\n tier_definition = {\n \"Name\": \"Worker\",\n \"Type\": \"SQS/HTTP\",\n \"Version\": \"1.0\",\n }\n else:\n raise Exception(\"tier must be 'web' or 'worker'\")\n params[\"Tier\"] = tier_definition\n if tags:\n params[\"Tags\"] = tags\n options = []\n if key_pair:\n key_pair_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"EC2KeyName\",\n \"Value\": key_pair,\n }\n options.append(key_pair_option)\n if instance_type:\n instance_type_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"InstanceType\",\n \"Value\": instance_type,\n }\n options.append(instance_type_option)\n if instance_profile:\n profile_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"IamInstanceProfile\",\n \"Value\": instance_profile,\n }\n options.append(profile_option)\n if service_role:\n role_option = {\n \"Namespace\": \"aws:elasticbeanstalk:environment\",\n \"OptionName\": \"ServiceRole\",\n \"Value\": service_role,\n }\n options.append(role_option)\n if healthcheck_url:\n healthcheck_url_option = {\n \"Namespace\": \"aws:elasticbeanstalk:application\",\n \"OptionName\": \"Application Healthcheck URL\",\n \"Value\": healthcheck_url,\n }\n options.append(healthcheck_url_option)\n if security_groups:\n security_groups_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"SecurityGroups\",\n \"Value\": \",\".join(security_groups),\n }\n options.append(security_groups_option)\n if min_instances:\n min_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MinSize\",\n \"Value\": str(min_instances),\n }\n options.append(min_instances_option)\n if max_instances:\n max_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MaxSize\",\n \"Value\": str(max_instances),\n }\n options.append(max_instances_option)\n if vpc_id:\n vpc_id_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"VPCId\",\n \"Value\": vpc_id,\n }\n options.append(vpc_id_option)\n if subnets:\n subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"Subnets\",\n \"Value\": \",\".join(subnets),\n }\n options.append(subnets_option)\n if db_subnets:\n db_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"DBSubnets\",\n \"Value\": \",\".join(db_subnets),\n }\n options.append(db_subnets_option)\n if elb_subnets:\n elb_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBSubnets\",\n \"Value\": \",\".join(elb_subnets),\n }\n options.append(elb_subnets_option)\n if elb_scheme:\n elb_scheme_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBScheme\",\n \"Value\": elb_scheme,\n }\n options.append(elb_scheme_option)\n if public_ip:\n public_ip_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"AssociatePublicIpAddress\",\n \"Value\": str(public_ip),\n }\n options.append(public_ip_option)\n if root_volume_size:\n root_volume_size_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"RootVolumeSize\",\n \"Value\": str(root_volume_size),\n }\n options.append(root_volume_size_option)\n if options:\n params[\"OptionSettings\"] = options\n return client.create_environment(**params)",
"def add_resource(self, name, controller, ipaddress, ram, cpus, storage, owner=None, flavor='', compute=None, huge_pages=False):\n if compute is None: compute = controller\n args = { 'vm': name,\n 'controller': controller,\n 'ipaddress': ipaddress,\n 'ram': ram,\n 'cpus': cpus,\n 'storage': storage,\n 'flavor': flavor,\n 'compute': compute,\n 'hugepages': huge_pages,\n }\n if owner is not None:\n args['owner'] = owner\n self._NDL_API('addresource', args, None)",
"def pre_service_appliance_create(self, resource_dict):\n pass",
"def vlan_create(handle, name, vlan_id, sharing=\"none\",\r\n mcast_policy_name=\"\", compression_type=\"included\",\r\n default_net=\"no\", pub_nw_name=\"\", parent_dn=\"fabric/lan\"):\r\n from ucsmsdk.mometa.fabric.FabricVlan import FabricVlan\r\n\r\n obj = handle.query_dn(parent_dn)\r\n if obj:\r\n vlan = FabricVlan(parent_mo_or_dn=obj,\r\n sharing=sharing,\r\n name=name,\r\n id=vlan_id,\r\n mcast_policy_name=mcast_policy_name,\r\n policy_owner=\"local\",\r\n default_net=default_net,\r\n pub_nw_name=pub_nw_name,\r\n compression_type=compression_type)\r\n\r\n handle.add_mo(vlan, modify_present=True)\r\n handle.commit()\r\n else:\r\n log.info(parent_dn + \" MO is not available\")",
"def __init__(__self__,\n resource_name: str,\n args: VirtualNetworkPeeringArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def create_or_update(\n self, resource_group_name, appliance_name, parameters, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Solutions/appliances/{applianceName}'\n path_format_arguments = {\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\\w\\._\\(\\)]+$'),\n 'applianceName': self._serialize.url(\"appliance_name\", appliance_name, 'str', max_length=64, min_length=3),\n 'subscriptionId': self._serialize.url(\"self.config.subscription_id\", self.config.subscription_id, 'str')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n query_parameters['api-version'] = self._serialize.query(\"self.api_version\", self.api_version, 'str')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct body\n body_content = self._serialize.body(parameters, 'Appliance')\n\n # Construct and send request\n def long_running_send():\n\n request = self._client.put(url, query_parameters)\n return self._client.send(\n request, header_parameters, body_content, **operation_config)\n\n def get_long_running_status(status_link, headers=None):\n\n request = self._client.get(status_link)\n if headers:\n request.headers.update(headers)\n return self._client.send(\n request, header_parameters, **operation_config)\n\n def get_long_running_output(response):\n\n if response.status_code not in [200, 201]:\n raise models.ErrorResponseException(self._deserialize, response)\n\n deserialized = None\n\n if response.status_code == 200:\n deserialized = self._deserialize('Appliance', response)\n if response.status_code == 201:\n deserialized = self._deserialize('Appliance', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized\n\n if raw:\n response = long_running_send()\n return get_long_running_output(response)\n\n long_running_operation_timeout = operation_config.get(\n 'long_running_operation_timeout',\n self.config.long_running_operation_timeout)\n return AzureOperationPoller(\n long_running_send, get_long_running_output,\n get_long_running_status, long_running_operation_timeout)",
"def create(self, *args, **kwargs):\n\n if not args and not kwargs:\n raise Exception('attributes for Voucher are missing')\n\n initial_attributes = args[0] if args else kwargs\n attributes = dict((k, v) for k, v in initial_attributes.items())\n attributes.update({'service': self.SERVICE})\n _, _, voucher = self.http_client.post(\"/vouchers\", body=attributes)\n return voucher",
"def create_fleet(Name=None, ImageName=None, ImageArn=None, InstanceType=None, FleetType=None, ComputeCapacity=None, VpcConfig=None, MaxUserDurationInSeconds=None, DisconnectTimeoutInSeconds=None, Description=None, DisplayName=None, EnableDefaultInternetAccess=None, DomainJoinInfo=None):\n pass",
"def create_vip(self, context, vip, netinfo):\n LOG.info(_(\"Agent received create_vip\"))\n self.driver.create_vip(vip, netinfo)",
"def make_tenant_vlan(name, ip, vid, interface):\n\n script = '\\n'.join([\n 'name={}',\n 'ip={}',\n 'vid={}',\n 'interface={}',\n '',\n '#',\n '# Binding br_ext to $interface',\n '#',\n 'sudo brctl addbr br_ext',\n 'sudo ip link set dev br_ext up',\n 'sudo brctl addif br_ext $interface',\n '',\n '#',\n '# Creating a namespace with $name with $ip',\n '# ',\n '',\n 'sudo ip netns add $name',\n 'sudo brctl addbr br_$name',\n 'sudo ip link set dev br_$name up',\n 'sudo ip link add veth0 type veth peer name veth0_$name ',\n 'sudo ip link set veth0 netns $name',\n 'sudo ip netns exec $name ip link set dev veth0 up',\n 'sudo ip netns exec $name ifconfig veth0 $ip netmask 255.255.255.0 up',\n 'sudo ip link set dev veth0_$name up',\n '',\n '#',\n '# Binding VID $vid to br_$name',\n '# Binding veth0_$name to br_$name',\n '#',\n 'sudo ip link add link br_ext br_ext.$vid type vlan id $vid',\n 'sudo ip link set dev br_ext.$vid up',\n 'sudo brctl addif br_$name veth0_$name',\n 'sudo brctl addif br_$name br_ext.$vid',\n ]).format(name, ip, vid, interface)\n return run_script(script)",
"def test_create_vip_with_all_params(self):\r\n resource = 'vip'\r\n cmd = vip.CreateVip(test_cli20.MyApp(sys.stdout), None)\r\n pool_id = 'my-pool-id'\r\n name = 'my-name'\r\n description = 'my-desc'\r\n address = '10.0.0.2'\r\n admin_state = False\r\n connection_limit = '1000'\r\n subnet_id = 'subnet-id'\r\n protocol_port = '80'\r\n protocol = 'TCP'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--name', name,\r\n '--description', description,\r\n '--address', address,\r\n '--admin-state-down',\r\n '--connection-limit', connection_limit,\r\n '--protocol-port', protocol_port,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id,\r\n pool_id]\r\n position_names = ['pool_id', 'name', 'description', 'address',\r\n 'admin_state_up', 'connection_limit',\r\n 'protocol_port', 'protocol', 'subnet_id',\r\n 'tenant_id']\r\n position_values = [pool_id, name, description, address,\r\n admin_state, connection_limit, protocol_port,\r\n protocol, subnet_id,\r\n tenant_id]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)"
] |
[
"0.6483709",
"0.637213",
"0.6368308",
"0.60333127",
"0.5933028",
"0.5920531",
"0.589828",
"0.57977635",
"0.5757888",
"0.5748333",
"0.5737162",
"0.5731979",
"0.5704052",
"0.56233805",
"0.56227636",
"0.5603627",
"0.55801874",
"0.555959",
"0.55522823",
"0.55382586",
"0.55228966",
"0.55026746",
"0.54896605",
"0.5485979",
"0.54503196",
"0.54455185",
"0.54217523",
"0.5421192",
"0.53761375",
"0.53492546"
] |
0.71253884
|
0
|
Get an existing VirtualNetworkAppliance resource's state with the given name, id, and optional extra properties used to qualify the lookup.
|
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
virtual_hub_id: Optional[pulumi.Input[str]] = None) -> 'VirtualNetworkAppliance':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VirtualNetworkApplianceState.__new__(_VirtualNetworkApplianceState)
__props__.__dict__["name"] = name
__props__.__dict__["virtual_hub_id"] = virtual_hub_id
return VirtualNetworkAppliance(resource_name, opts=opts, __props__=__props__)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)",
"def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n comparison: Optional[pulumi.Input[str]] = None,\n created_at: Optional[pulumi.Input[int]] = None,\n critical: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionCriticalArgs']]] = None,\n description: Optional[pulumi.Input[str]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n entity_guid: Optional[pulumi.Input[str]] = None,\n event: Optional[pulumi.Input[str]] = None,\n integration_provider: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n policy_id: Optional[pulumi.Input[int]] = None,\n process_where: Optional[pulumi.Input[str]] = None,\n runbook_url: Optional[pulumi.Input[str]] = None,\n select: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n updated_at: Optional[pulumi.Input[int]] = None,\n violation_close_timer: Optional[pulumi.Input[int]] = None,\n warning: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionWarningArgs']]] = None,\n where: Optional[pulumi.Input[str]] = None) -> 'InfraAlertCondition':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InfraAlertConditionState.__new__(_InfraAlertConditionState)\n\n __props__.__dict__[\"comparison\"] = comparison\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"critical\"] = critical\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"enabled\"] = enabled\n __props__.__dict__[\"entity_guid\"] = entity_guid\n __props__.__dict__[\"event\"] = event\n __props__.__dict__[\"integration_provider\"] = integration_provider\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"policy_id\"] = policy_id\n __props__.__dict__[\"process_where\"] = process_where\n __props__.__dict__[\"runbook_url\"] = runbook_url\n __props__.__dict__[\"select\"] = select\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"updated_at\"] = updated_at\n __props__.__dict__[\"violation_close_timer\"] = violation_close_timer\n __props__.__dict__[\"warning\"] = warning\n __props__.__dict__[\"where\"] = where\n return InfraAlertCondition(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n address_space_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n allow_forwarded_traffic: Optional[pulumi.Input[bool]] = None,\n allow_gateway_transit: Optional[pulumi.Input[bool]] = None,\n allow_virtual_network_access: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n remote_address_space_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n remote_virtual_network_id: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n use_remote_gateways: Optional[pulumi.Input[bool]] = None,\n virtual_network_id: Optional[pulumi.Input[str]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None) -> 'VirtualNetworkPeering':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _VirtualNetworkPeeringState.__new__(_VirtualNetworkPeeringState)\n\n __props__.__dict__[\"address_space_prefixes\"] = address_space_prefixes\n __props__.__dict__[\"allow_forwarded_traffic\"] = allow_forwarded_traffic\n __props__.__dict__[\"allow_gateway_transit\"] = allow_gateway_transit\n __props__.__dict__[\"allow_virtual_network_access\"] = allow_virtual_network_access\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"remote_address_space_prefixes\"] = remote_address_space_prefixes\n __props__.__dict__[\"remote_virtual_network_id\"] = remote_virtual_network_id\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"use_remote_gateways\"] = use_remote_gateways\n __props__.__dict__[\"virtual_network_id\"] = virtual_network_id\n __props__.__dict__[\"workspace_id\"] = workspace_id\n return VirtualNetworkPeering(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n cpu_count: Optional[pulumi.Input[int]] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n is_static_ip: Optional[pulumi.Input[bool]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n public_ip_address: Optional[pulumi.Input[str]] = None,\n ram_size: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"add_on\"] = add_on\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"blueprint_id\"] = blueprint_id\n __props__.__dict__[\"bundle_id\"] = bundle_id\n __props__.__dict__[\"cpu_count\"] = cpu_count\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"ip_address_type\"] = ip_address_type\n __props__.__dict__[\"ipv6_address\"] = ipv6_address\n __props__.__dict__[\"ipv6_addresses\"] = ipv6_addresses\n __props__.__dict__[\"is_static_ip\"] = is_static_ip\n __props__.__dict__[\"key_pair_name\"] = key_pair_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"public_ip_address\"] = public_ip_address\n __props__.__dict__[\"ram_size\"] = ram_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"user_data\"] = user_data\n __props__.__dict__[\"username\"] = username\n return Instance(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n minimal_action: Optional[pulumi.Input[str]] = None,\n most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n region_instance_group_manager: Optional[pulumi.Input[str]] = None,\n remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None) -> 'RegionPerInstanceConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RegionPerInstanceConfigState.__new__(_RegionPerInstanceConfigState)\n\n __props__.__dict__[\"minimal_action\"] = minimal_action\n __props__.__dict__[\"most_disruptive_allowed_action\"] = most_disruptive_allowed_action\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"preserved_state\"] = preserved_state\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"region_instance_group_manager\"] = region_instance_group_manager\n __props__.__dict__[\"remove_instance_state_on_destroy\"] = remove_instance_state_on_destroy\n return RegionPerInstanceConfig(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)",
"def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None) -> 'Agent':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AgentState.__new__(_AgentState)\n\n __props__.__dict__[\"activation_key\"] = activation_key\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"ip_address\"] = ip_address\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_link_endpoint\"] = private_link_endpoint\n __props__.__dict__[\"security_group_arns\"] = security_group_arns\n __props__.__dict__[\"subnet_arns\"] = subnet_arns\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"vpc_endpoint_id\"] = vpc_endpoint_id\n return Agent(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())",
"def fusion_api_get_appliance_state(self, appliance):\n return self.appstate.get(appliance)",
"def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200",
"def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def get_state_by_id(state_id):\r\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\r\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\r\n if validator_id.is_valid({\"id\": state_id}):\r\n state_get = State()\r\n state_get.id_state = state_id\r\n result = state_get.get_state()\r\n if result in (ResponsesREST.NOT_FOUND.value, ResponsesREST.SERVER_ERROR.value):\r\n response = Response(json.dumps(json_error(result)),\r\n status=result, mimetype=\"application/json\")\r\n else:\r\n response = Response(json.dumps(result.json_state()),\r\n status=ResponsesREST.SUCCESSFUL.value,\r\n mimetype=\"application/json\")\r\n return response",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_series: Optional[pulumi.Input[str]] = None,\n mysql_version: Optional[pulumi.Input[int]] = None,\n port: Optional[pulumi.Input[str]] = None,\n specification: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"instance_series\"] = instance_series\n __props__.__dict__[\"mysql_version\"] = mysql_version\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"specification\"] = specification\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Instance(resource_name, opts=opts, __props__=__props__)",
"def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)",
"def get(isamAppliance, name, check_mode=False, force=False):\n ret_obj = search(isamAppliance, name=name, check_mode=check_mode, force=force)\n id = ret_obj['data']\n\n if id == {}:\n warnings = [\"STS Chain {0} had no match, skipping retrieval.\".format(name)]\n return isamAppliance.create_return_object(warnings=warnings)\n else:\n return _get(isamAppliance, id)",
"def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n asset_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ZoneAssetStatusArgs']]]]] = None,\n create_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_spec: Optional[pulumi.Input[pulumi.InputType['ZoneDiscoverySpecArgs']]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n lake: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n resource_spec: Optional[pulumi.Input[pulumi.InputType['ZoneResourceSpecArgs']]] = None,\n state: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n uid: Optional[pulumi.Input[str]] = None,\n update_time: Optional[pulumi.Input[str]] = None) -> 'Zone':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ZoneState.__new__(_ZoneState)\n\n __props__.__dict__[\"asset_statuses\"] = asset_statuses\n __props__.__dict__[\"create_time\"] = create_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_spec\"] = discovery_spec\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"labels\"] = labels\n __props__.__dict__[\"lake\"] = lake\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"resource_spec\"] = resource_spec\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"uid\"] = uid\n __props__.__dict__[\"update_time\"] = update_time\n return Zone(resource_name, opts=opts, __props__=__props__)",
"def get_state(state_id):\n try:\n state = jsonify(storage.get(State, state_id).to_dict())\n return state\n except:\n abort(404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n custom_block_response_body: Optional[pulumi.Input[str]] = None,\n custom_block_response_status_code: Optional[pulumi.Input[int]] = None,\n custom_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FirewallPolicyCustomRuleArgs']]]]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n frontend_endpoint_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n managed_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FirewallPolicyManagedRuleArgs']]]]] = None,\n mode: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n redirect_url: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'FirewallPolicy':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _FirewallPolicyState.__new__(_FirewallPolicyState)\n\n __props__.__dict__[\"custom_block_response_body\"] = custom_block_response_body\n __props__.__dict__[\"custom_block_response_status_code\"] = custom_block_response_status_code\n __props__.__dict__[\"custom_rules\"] = custom_rules\n __props__.__dict__[\"enabled\"] = enabled\n __props__.__dict__[\"frontend_endpoint_ids\"] = frontend_endpoint_ids\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"managed_rules\"] = managed_rules\n __props__.__dict__[\"mode\"] = mode\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"redirect_url\"] = redirect_url\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"tags\"] = tags\n return FirewallPolicy(resource_name, opts=opts, __props__=__props__)",
"def get_state(state_id):\n state = storage.get(\"State\", state_id)\n if state:\n return jsonify(state.to_dict())\n abort(404)",
"def get_state(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Machine':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = MachineArgs.__new__(MachineArgs)\n\n __props__.__dict__[\"ad_fqdn\"] = None\n __props__.__dict__[\"agent_configuration\"] = None\n __props__.__dict__[\"agent_upgrade\"] = None\n __props__.__dict__[\"agent_version\"] = None\n __props__.__dict__[\"client_public_key\"] = None\n __props__.__dict__[\"cloud_metadata\"] = None\n __props__.__dict__[\"detected_properties\"] = None\n __props__.__dict__[\"display_name\"] = None\n __props__.__dict__[\"dns_fqdn\"] = None\n __props__.__dict__[\"domain_name\"] = None\n __props__.__dict__[\"error_details\"] = None\n __props__.__dict__[\"extensions\"] = None\n __props__.__dict__[\"identity\"] = None\n __props__.__dict__[\"last_status_change\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"location_data\"] = None\n __props__.__dict__[\"machine_fqdn\"] = None\n __props__.__dict__[\"mssql_discovered\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"os_name\"] = None\n __props__.__dict__[\"os_profile\"] = None\n __props__.__dict__[\"os_sku\"] = None\n __props__.__dict__[\"os_type\"] = None\n __props__.__dict__[\"os_version\"] = None\n __props__.__dict__[\"parent_cluster_resource_id\"] = None\n __props__.__dict__[\"private_link_scope_resource_id\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"resources\"] = None\n __props__.__dict__[\"service_statuses\"] = None\n __props__.__dict__[\"status\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"vm_id\"] = None\n __props__.__dict__[\"vm_uuid\"] = None\n return Machine(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"additional_info\"] = None\n __props__.__dict__[\"affinity\"] = None\n __props__.__dict__[\"availability_zone\"] = None\n __props__.__dict__[\"block_device_mappings\"] = None\n __props__.__dict__[\"cpu_options\"] = None\n __props__.__dict__[\"credit_specification\"] = None\n __props__.__dict__[\"disable_api_termination\"] = None\n __props__.__dict__[\"ebs_optimized\"] = None\n __props__.__dict__[\"elastic_gpu_specifications\"] = None\n __props__.__dict__[\"elastic_inference_accelerators\"] = None\n __props__.__dict__[\"enclave_options\"] = None\n __props__.__dict__[\"hibernation_options\"] = None\n __props__.__dict__[\"host_id\"] = None\n __props__.__dict__[\"host_resource_group_arn\"] = None\n __props__.__dict__[\"iam_instance_profile\"] = None\n __props__.__dict__[\"image_id\"] = None\n __props__.__dict__[\"instance_initiated_shutdown_behavior\"] = None\n __props__.__dict__[\"instance_type\"] = None\n __props__.__dict__[\"ipv6_address_count\"] = None\n __props__.__dict__[\"ipv6_addresses\"] = None\n __props__.__dict__[\"kernel_id\"] = None\n __props__.__dict__[\"key_name\"] = None\n __props__.__dict__[\"launch_template\"] = None\n __props__.__dict__[\"license_specifications\"] = None\n __props__.__dict__[\"monitoring\"] = None\n __props__.__dict__[\"network_interfaces\"] = None\n __props__.__dict__[\"placement_group_name\"] = None\n __props__.__dict__[\"private_dns_name\"] = None\n __props__.__dict__[\"private_dns_name_options\"] = None\n __props__.__dict__[\"private_ip\"] = None\n __props__.__dict__[\"private_ip_address\"] = None\n __props__.__dict__[\"propagate_tags_to_volume_on_creation\"] = None\n __props__.__dict__[\"public_dns_name\"] = None\n __props__.__dict__[\"public_ip\"] = None\n __props__.__dict__[\"ramdisk_id\"] = None\n __props__.__dict__[\"security_group_ids\"] = None\n __props__.__dict__[\"security_groups\"] = None\n __props__.__dict__[\"source_dest_check\"] = None\n __props__.__dict__[\"ssm_associations\"] = None\n __props__.__dict__[\"subnet_id\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"tenancy\"] = None\n __props__.__dict__[\"user_data\"] = None\n __props__.__dict__[\"volumes\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"create_sample_data\"] = create_sample_data\n __props__.__dict__[\"db_instance_category\"] = db_instance_category\n __props__.__dict__[\"db_instance_class\"] = db_instance_class\n __props__.__dict__[\"db_instance_mode\"] = db_instance_mode\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"encryption_key\"] = encryption_key\n __props__.__dict__[\"encryption_type\"] = encryption_type\n __props__.__dict__[\"engine\"] = engine\n __props__.__dict__[\"engine_version\"] = engine_version\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"instance_group_count\"] = instance_group_count\n __props__.__dict__[\"instance_network_type\"] = instance_network_type\n __props__.__dict__[\"instance_spec\"] = instance_spec\n __props__.__dict__[\"ip_whitelists\"] = ip_whitelists\n __props__.__dict__[\"maintain_end_time\"] = maintain_end_time\n __props__.__dict__[\"maintain_start_time\"] = maintain_start_time\n __props__.__dict__[\"master_node_num\"] = master_node_num\n __props__.__dict__[\"payment_type\"] = payment_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"resource_group_id\"] = resource_group_id\n __props__.__dict__[\"security_ip_lists\"] = security_ip_lists\n __props__.__dict__[\"seg_node_num\"] = seg_node_num\n __props__.__dict__[\"seg_storage_type\"] = seg_storage_type\n __props__.__dict__[\"ssl_enabled\"] = ssl_enabled\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"storage_size\"] = storage_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"used_time\"] = used_time\n __props__.__dict__[\"vector_configuration_status\"] = vector_configuration_status\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Instance(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n connection_string_prefix: Optional[pulumi.Input[str]] = None,\n db_instance_endpoint_description: Optional[pulumi.Input[str]] = None,\n db_instance_endpoint_id: Optional[pulumi.Input[str]] = None,\n db_instance_endpoint_type: Optional[pulumi.Input[str]] = None,\n db_instance_id: Optional[pulumi.Input[str]] = None,\n ip_type: Optional[pulumi.Input[str]] = None,\n node_items: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DbInstanceEndpointNodeItemArgs']]]]] = None,\n port: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None) -> 'DbInstanceEndpoint':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _DbInstanceEndpointState.__new__(_DbInstanceEndpointState)\n\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"connection_string_prefix\"] = connection_string_prefix\n __props__.__dict__[\"db_instance_endpoint_description\"] = db_instance_endpoint_description\n __props__.__dict__[\"db_instance_endpoint_id\"] = db_instance_endpoint_id\n __props__.__dict__[\"db_instance_endpoint_type\"] = db_instance_endpoint_type\n __props__.__dict__[\"db_instance_id\"] = db_instance_id\n __props__.__dict__[\"ip_type\"] = ip_type\n __props__.__dict__[\"node_items\"] = node_items\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n return DbInstanceEndpoint(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n business_status: Optional[pulumi.Input[str]] = None,\n create_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n expired_time: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n ipv6_gateway_id: Optional[pulumi.Input[str]] = None,\n ipv6_gateway_name: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n spec: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None) -> 'Ipv6Gateway':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _Ipv6GatewayState.__new__(_Ipv6GatewayState)\n\n __props__.__dict__[\"business_status\"] = business_status\n __props__.__dict__[\"create_time\"] = create_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"expired_time\"] = expired_time\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"ipv6_gateway_id\"] = ipv6_gateway_id\n __props__.__dict__[\"ipv6_gateway_name\"] = ipv6_gateway_name\n __props__.__dict__[\"resource_group_id\"] = resource_group_id\n __props__.__dict__[\"spec\"] = spec\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"vpc_id\"] = vpc_id\n return Ipv6Gateway(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n address_family: Optional[pulumi.Input[str]] = None,\n allocation_default_netmask_length: Optional[pulumi.Input[int]] = None,\n allocation_max_netmask_length: Optional[pulumi.Input[int]] = None,\n allocation_min_netmask_length: Optional[pulumi.Input[int]] = None,\n allocation_resource_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auto_import: Optional[pulumi.Input[bool]] = None,\n aws_service: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n ipam_scope_id: Optional[pulumi.Input[str]] = None,\n ipam_scope_type: Optional[pulumi.Input[str]] = None,\n locale: Optional[pulumi.Input[str]] = None,\n pool_depth: Optional[pulumi.Input[int]] = None,\n public_ip_source: Optional[pulumi.Input[str]] = None,\n publicly_advertisable: Optional[pulumi.Input[bool]] = None,\n source_ipam_pool_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'VpcIpamPool':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _VpcIpamPoolState.__new__(_VpcIpamPoolState)\n\n __props__.__dict__[\"address_family\"] = address_family\n __props__.__dict__[\"allocation_default_netmask_length\"] = allocation_default_netmask_length\n __props__.__dict__[\"allocation_max_netmask_length\"] = allocation_max_netmask_length\n __props__.__dict__[\"allocation_min_netmask_length\"] = allocation_min_netmask_length\n __props__.__dict__[\"allocation_resource_tags\"] = allocation_resource_tags\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auto_import\"] = auto_import\n __props__.__dict__[\"aws_service\"] = aws_service\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"ipam_scope_id\"] = ipam_scope_id\n __props__.__dict__[\"ipam_scope_type\"] = ipam_scope_type\n __props__.__dict__[\"locale\"] = locale\n __props__.__dict__[\"pool_depth\"] = pool_depth\n __props__.__dict__[\"public_ip_source\"] = public_ip_source\n __props__.__dict__[\"publicly_advertisable\"] = publicly_advertisable\n __props__.__dict__[\"source_ipam_pool_id\"] = source_ipam_pool_id\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n return VpcIpamPool(resource_name, opts=opts, __props__=__props__)"
] |
[
"0.6201526",
"0.57982093",
"0.5736015",
"0.5646393",
"0.5609894",
"0.5605176",
"0.5562416",
"0.5540888",
"0.5526297",
"0.5516919",
"0.54881024",
"0.54570943",
"0.54539675",
"0.5443733",
"0.5311846",
"0.53039557",
"0.52989674",
"0.5293997",
"0.5219828",
"0.5211493",
"0.51362145",
"0.51056397",
"0.5079961",
"0.50752574",
"0.50728357",
"0.5051168",
"0.5046867",
"0.5029153",
"0.5018956",
"0.50145906"
] |
0.75466305
|
0
|
check if pie chart is plotted correctly (using default style).
|
def test_plot_pie_chart_default_style(self):
pie_plot = PiePlot(
wedge_sizes=[20, 40, 30, 10],
labels=["light-flavour jets", "c-jets", "b-jets", "tau-jets"],
)
plotname = "test_pie_chart_default_style.png"
pie_plot.savefig(f"{self.actual_plots_dir}/{plotname}")
# Uncomment line below to update expected image
# pie_plot.savefig(f"{self.expected_plots_dir}/{plotname}")
self.assertIsNone(
compare_images(
f"{self.actual_plots_dir}/{plotname}",
f"{self.expected_plots_dir}/{plotname}",
tol=1,
)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_plot_pie_chart_custom_style(self):\n pie_plot = PiePlot(\n wedge_sizes=[20, 40, 30, 10],\n labels=[\"light-flavour jets\", \"c-jets\", \"b-jets\", \"tau-jets\"],\n draw_legend=True,\n colours=get_good_colours()[:4],\n # have a look at the possible kwargs for matplotlib.pyplot.pie here:\n # https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.pie.html\n mpl_pie_kwargs={\n \"explode\": [0, 0, 0, 0.1],\n \"shadow\": False,\n \"startangle\": 90,\n \"textprops\": {\"fontsize\": 10},\n \"radius\": 1,\n \"wedgeprops\": {\"width\": 0.4, \"edgecolor\": \"w\"},\n \"pctdistance\": 0.4,\n },\n # kwargs passed to puma.PlotObject\n atlas_second_tag=(\n \"Unit test plot to test if the custom\\nstyling of the pie plot\"\n ),\n figsize=(5.5, 3.5),\n y_scale=1.3,\n )\n plotname = \"test_pie_chart_custom_style.png\"\n pie_plot.savefig(f\"{self.actual_plots_dir}/{plotname}\")\n # Uncomment line below to update expected image\n pie_plot.savefig(f\"{self.expected_plots_dir}/{plotname}\")\n\n self.assertIsNone(\n compare_images(\n f\"{self.actual_plots_dir}/{plotname}\",\n f\"{self.expected_plots_dir}/{plotname}\",\n tol=1,\n )\n )",
"def show_pie_chart(self):\n\n chart_type_index = self.ui.comboBox_pie_charts.currentIndex()\n if chart_type_index < 1:\n return\n self.get_selected_categories_and_codes()\n if chart_type_index == 1: # Code frequency\n self.piechart_code_frequency()\n if chart_type_index == 2: # Code by characters\n self.piechart_code_volume_by_characters()\n if chart_type_index == 3: # Code by image area\n self.piechart_code_volume_by_area()\n if chart_type_index == 4: # Code by audio/video segments\n self.piechart_code_volume_by_segments()\n self.ui.comboBox_pie_charts.setCurrentIndex(0)",
"def pie_plot(data,ara,rd_f,cla_arr=string.ascii_lowercase):\n \n data=pd.Series(data)\n dataclass=pd.Series(value_to_class_label(ara,data,cla_arr))\n \n parti=data.groupby(dataclass).agg(rd_f)\n \n labels=parti.index\n parts = parti.tolist()\n colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue','peru',\n 'teal','cornflowerblue','crimson','cadetblue','beige']\n\n plt.pie(parts, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=140) \n plt.axis('equal')",
"def pie_plot(data,ara,rd_f,cla_arr=string.ascii_lowercase):\n\n data=pd.Series(data)\n dataclass=pd.Series(value_to_class_label(ara,data,cla_arr))\n\n parti=data.groupby(dataclass).agg(rd_f)\n\n labels=parti.index\n parts = parti.tolist()\n colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue','peru',\n 'teal','cornflowerblue','crimson','cadetblue','beige']\n\n plt.pie(parts, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=140)\n plt.axis('equal')",
"def test_make_pie_chart(self):\r\n\r\n filename1 = '/tmp/qiimewebfiles/charts/pie_chart.png'\r\n filename2 = '/tmp/qiimewebfiles/charts/pie_chart_legend.pdf'\r\n filename3 = '/tmp/qiimewebfiles/charts/pie_chart.pdf'\r\n\r\n obs1, obs2, obs3, obs4 = make_pie_chart(self.fracs, self.dir_path, 1,\r\n self.prefs, self.color_prefs, \"black\", \"white\",\r\n self.generate_image_type, self.plot_width,\r\n self.plot_height, self.bar_width, self.dpi, False,\r\n file_prefix=\"pie_chart\",\r\n props=self.props)\r\n\r\n self.assertTrue(exists(filename1), 'The png file was not created in \\\r\nthe appropriate location')\r\n self.assertTrue(exists(filename2), 'The eps file was not created in \\\r\nthe appropriate location')\r\n self.assertTrue(exists(filename3), 'The pdf file was not created in \\\r\nthe appropriate location')\r\n self._paths_to_clean_up = [\"/tmp/qiimewebfiles/charts/\" + f\r\n for f in listdir(\"/tmp/qiimewebfiles/charts\")]",
"def matplotlib_pie_chart() -> Tuple:\n df = process_life_expectancy_dataset(\"classification\")\n num_cols = get_numeric_columns(df)\n bin_cols = get_binary_columns(df)\n text_cols = get_text_categorical_columns(df)\n\n x_arr = np.array([len(num_cols), len(bin_cols), len(text_cols)])\n # The plot only shows numeric columns because process_life_expectancy_dataset returned df only\n # contains numeric columns \n fig, ax = a_libraries.matplotlib_pie_chart(x_arr)\n\n return fig, ax",
"def has_charts(self):\n return self.__charts is not None",
"def generate_personnel_outcome_pie_chart(personnel_search_table_selected_indices, selected_funding, selected_year, rows):\n personnel_outcome_data, personnel_name = personnel_outcomes_helper(personnel_search_table_selected_indices,\n selected_funding, selected_year, rows)\n personnel_outcome_type_value_count = {}\n if personnel_outcome_data is not None:\n personnel_outcome_type_value_count = personnel_outcome_data.type.value_counts()\n personnel_outcome_type_value_count = personnel_outcome_type_value_count.to_dict()\n\n\n print(\"Inside PIE CHART\", personnel_outcome_type_value_count)\n print(\"Inside PIE CHART\", personnel_name)\n\n\n trace = go.Pie(\n labels=list(personnel_outcome_type_value_count.keys()),\n values=list(personnel_outcome_type_value_count.values()),\n name=personnel_name,\n hoverinfo='label+percent',\n textinfo='value',\n textfont=dict(size=20),\n # marker=dict(colors=colors, line=dict(color='#000000', width=2)),\n )\n\n return{\n \"data\": [trace],\n \"layout\": go.Layout(\n # autosize=False,\n title='Professor Publications Breakdown',\n showlegend=True,\n # legend=go.Legend(\n # x=0,\n # y=1.0\n # ),\n # bargap =0.5,\n # width = 100,\n # autosize=F,\n margin=go.layout.Margin(l=40, r=0, t=40, b=30),\n # paper_bgcolor='rgb(233,233,233)', # set paper (outside plot)\n plot_bgcolor='rgb(192,192,192)', # plot color to grey\n )\n }",
"def pie(self):\n return self._pie",
"def pie_chart(self, pct=True, day='today'):\n assert isinstance(pct, bool), 'Error! The pct parameter must be boolean.'\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n def func(pct, allvals):\n \"\"\"Funciton for the lambda function. Returns the formatted value of each ETF\"\"\"\n return str(format(round(pct/100.*np.sum(allvals), 2),\".2f\")) + \"€\"\n vals = {}\n for name, etf in self.etfs.items():\n if etf.get_value(day) != 0:\n if name.split('-')[0].split('.')[0] in vals:\n vals[name.split('-')[0].split('.')[0]] += etf.get_value(day)\n else:\n vals[name.split('-')[0].split('.')[0]] = etf.get_value(day)\n wgt_values = [round(v, 2) for v in vals.values()]\n explode = [0 if x != max(wgt_values) else 0.06 for x in wgt_values]\n fig = plt.figure(figsize=(4,2), dpi=200)\n ax = fig.add_subplot(111)\n fig.patch.set_facecolor('#ececec')\n if pct:\n _, texts, autotexts = ax.pie(wgt_values, explode=explode, labels=vals.keys(), autopct='%1.1f%%')\n plt.setp(autotexts, size=5)\n else:\n _, texts, autotexts = ax.pie(wgt_values, explode=explode, labels=vals.keys(), \n autopct=lambda pct: func(pct, wgt_values), pctdistance=0.7)\n plt.setp(autotexts, size=4)\n plt.setp(texts, size=7, family='monospace')\n ax.set_title('Portfolio\\'s Composition', size='large', color='red', weight='bold')\n return fig, ax",
"def setPieLabelStyle(mode='percent',position='internal',justify='center', digits=1, color=-1):\n mdict = {'none':'NONE','percent':'PERCENT','data':'DATA','both':'BOTH'}\n pdict = {'internal':'INTERNAL','external':'EXTERNAL','aligned':'ALIGNED'}\n jdict = {'center':'CENTER','left':'LEFT','right':'RIGHT',\n 'outwards':'OUTWARDS','inwards':'INWARDS'}\n dislin.labels(mdict[mode],'PIE')\n dislin.labpos(pdict[position],'PIE')\n dislin.labtyp(jdict[justify],'PIE')\n dislin.labdig(digits,'PIE')\n dislin.labclr(color,'PIE')",
"def _construct_pie(self):\n\n # count labels and instances\n label_count = count(self.labels)\n label_instances = [instance for instance in label_count.values()]\n colours = [\n rgb2hex(\n self.clusters[label]/255) for label in label_count.keys()]\n self.pie = plot.figure()\n plot.pie(\n x=label_instances, labels=label_count.keys(\n ), colors=colours, autopct=\"%.2f%%\")\n plot.tight_layout()\n plot.axis('equal')",
"def pie_chart_score(self, grouped):\n picked_scenario = self.scenario_dict[\"%d\" % (self.scenario_num-1)]\n distinct_enum_X = self.data_dict[picked_scenario[\"X\"]]['distinct_enum']\n score = 0\n if min(grouped) < 0:\n score = 0\n elif distinct_enum_X == 1:\n score = 0\n elif picked_scenario[\"Agg_func_Y\"] == \"avg\":\n score = 0\n elif distinct_enum_X >= 2 and distinct_enum_X <= 8:\n score += self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]]) / 8\n elif distinct_enum_X > 8:\n score += 4 * (self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]])) / distinct_enum_X\n if score > 3:\n score = 3\n return score",
"def plot_pies(labels, data, categories):\n # Create the figure and traces\n fig = go.Figure()\n visibility, annotations = [], []\n traces = 0\n for var in labels:\n if var in categories:\n values = list(dict.fromkeys(categories[var].values()))\n else:\n values = list(dict.fromkeys(data[var]))\n values.sort()\n nvals = len(values)\n counts = []\n annotations.append([])\n for i in values: # get the counts for each category\n s = data[(data[var] == i) & (data[\"Score\"] == \"good\")].shape[0]\n counts.append([s, data[data[var] == i].shape[0]-s])\n for i in range(nvals): # plot them in nested pie charts\n fig.add_trace(go.Pie(hole=(0.3+i*0.7/nvals)/(0.3+(0.7*(i+1)/nvals)), sort=False,\n domain={'x': [0.35-(i+1)*0.35/nvals, 0.65+(i+1)*0.35/nvals], 'y': [0.35-(i+1)*0.35/nvals, 0.65+(i+1)*0.35/nvals]}, name=values[i], values=counts[i], textposition='inside',\n textfont_size=20, marker={'colors': ['mediumturquoise', 'darkorange'], 'line': {'color': '#000000', 'width': 2}},\n labels=['Good', 'Bad'], hoverinfo='label+name+percent', textinfo='percent'))\n annotations[-1].append(dict(text=values[i], x=0.577+(i+1)*0.184/nvals, y=0.5,\n arrowwidth=1.5, arrowhead=6, ax=100+20*i, ay=100+20*i))\n traces += 1\n visibility = [l+nvals*[False] for l in visibility]\n visibility.append((traces-nvals)*[False]+nvals*[True])\n # Create buttons for drop down menu\n buttons = []\n for i, label in enumerate(labels):\n button = dict(\n label=label,\n method='update',\n args=[{'visible': visibility[i]},\n {'title': label, 'annotations': annotations[i]}])\n buttons.append(button)\n updatemenus = list([\n dict(active=-1,\n x=1.06, y=1.27,\n buttons=buttons)])\n # Setup layout\n fig['layout']['updatemenus'] = updatemenus\n fig['layout']['title'] = \"Distribution of the scores of categorical variables:\"\n iplot(fig, config={\"displayModeBar\": False})",
"def pieChart2():\n n = 4\n Z = np.ones(n)\n Z[-1] *= 2\n\n labels = 'A', 'B', 'C', 'D'\n plt.axes([0.025,0.025,0.95,0.95])\n\n plt.pie(Z, explode=Z*.05, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)\n plt.gca().set_aspect('equal')\n plt.xticks([]), plt.yticks([])\n\n # savefig('../figures/pie_ex.png',dpi=48)\n plt.show()",
"def _check_styles(self, dataset):\n # list of column ids to exclude from plotting\n exclude_cols = [\"sample_name\", \"patient_id\", \"title\"]\n\n # check column styles\n if dataset['styles']['columns']['color'] == []:\n if dataset['metadata']['columns'] != '':\n # load metadata\n mdat = load_data(dataset['metadata']['columns'])\n\n # exclude known uninformative columns\n cols_to_drop = [x for x in exclude_cols if x in mdat.columns]\n\n if len(cols_to_drop) > 0:\n mdat = mdat.drop(cols_to_drop, axis=1)\n\n # set default columns to use for plotting\n dataset['styles']['columns']['color'] = mdat.columns[mdat.nunique() > 1].tolist()\n\n # check row styles\n if dataset['styles']['rows']['color'] == []:\n if dataset['metadata']['rows'] != '':\n mdat = load_data(dataset['metadata']['rows'])\n dataset['styles']['rows']['color'] = mdat.columns[mdat.nunique() > 1].tolist()",
"def pieChart1():\n n = 20\n Z = np.ones(n)\n Z[-1] *= 2\n\n plt.axes([0.025,0.025,0.95,0.95])\n\n plt.pie(Z, explode=Z*.05, colors = ['%f' % (i/float(n)) for i in range(n)])\n plt.gca().set_aspect('equal')\n plt.xticks([]), plt.yticks([])\n\n # savefig('../figures/pie_ex.png',dpi=48)\n plt.show()",
"def is_plot(session_id, test_name):\n return Plot.is_plot(session_id, test_name)",
"def draw_pie(self, n, r):\n pass",
"def test_plt_status():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n result = ta.plt_status()\n\n assert bokeh_plot_type == type(result)",
"def cake() -> None:\n\n\tdef real_value(val):\n\t\t\"\"\"Return the real value instead of the %.\"\"\"\n\t\treturn round(val/100*sum(euros), 1)\n\n\tdata = [(name, players[name].cake, players[name].color)\n\t for name in get_people() if players[name].cake > 0]\n\tdata.sort(key=lambda x: x[1], reverse=True)\n\n\tnames = [el[0] for el in data]\n\teuros = [el[1] for el in data]\n\tcolors = [el[2] for el in data]\n\n\texplode = [0.04] * len(names)\n\texplode[0] = 0.07\n\n\t_, ax = plt.subplots(figsize=(6, 6))\n\tpatches, text, autotext = ax.pie(\n\t\t\teuros, labels=names, explode=explode, colors=colors,\n\t\t\tstartangle=120, radius=1.5, autopct=real_value)\n\n\t# Change the style of the plot\n\tfor patch in patches:\n\t\tpatch.set_linewidth(1.5)\n\t\tpatch.set_edgecolor('black')\n\tfor x in range(len(names)):\n\t\tif x == 0:\n\t\t\ttext[x].set_fontsize(30)\n\t\t\tautotext[x].set_fontsize(30)\n\t\telse:\n\t\t\ttext[x].set_fontsize(15)\n\t\t\tautotext[x].set_fontsize(15)\n\n\tinvisible = [(name, -1*players[name].cake) for name in get_people()\n\t\t\t\t if players[name].cake < 0]\n\tinvisible.sort(key=lambda x: x[1], reverse=True)\n\ttxt = 'Players with bonus:'\n\tfor pl, bn in invisible:\n\t\ttxt += f'\\n - {bn: .1f}, {pl}'\n\tplt.text(-.15, 0.99, txt, transform=ax.transAxes, fontsize=10)\n\n\tplt.savefig('cake.png', dpi=120, bbox_inches='tight')\n\tplt.gcf().clear()",
"def check_is_plottable(self, var):\n self.plot_button.disabled = False # important to enable button once disabled\n data = self.data[var[0]]\n self.plot_button.disabled = len(data.dims) <= 1",
"def plot_evaluation(values, info, measures = ['Dice','Jaccard', 'TPR', 'TNR', '1-GCE', 'VS', 'RI', 'ARI', 'MI', '1-VOI', 'ICC','1/(1+PBD)', 'KAP', 'AUC', '1/(1+HD)', '1/(1+AVD)', 'MHD' ], colourmap=None, outfile='polar_results.png'):\n _min = info['minimum']\n _max = info['maximum']\n if colourmap is None:\n colourmap = [[86./255.,180./255.,233./255.] for ii in range(values.shape[0])]\n else:\n # normalize colourmap values between 0 and 1\n colourmap = (colourmap-_min)/(_max-_min)\n # apply cividis, returns the RBG1 values for cividis, for dots\n colourmap = [[cm.cividis(ii)] for ii in colourmap] \n\n # elements of the circle\n N = len(measures)\n # evenly space measures around circle\n x_as = [n / float(N) * 2 * pi for n in range(N)] \n\n # Set color of axes\n plt.rc('axes', linewidth=0.5, edgecolor=\"#888888\")\n\n # Create polar plot\n fig = plt.figure(figsize = (11,9.5))\n gs = gridspec.GridSpec(1, 3, width_ratios=[17,2,1])\n ax = plt.subplot(gs[0], polar=True)\n \n # Set position of y-labels\n ax.set_rlabel_position(0)\n\n # Set color and linestyle of grid\n ax.xaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n ax.yaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n\n # Set yticks\n plt.yticks([0.2, 0.4, 0.6, 0.8, 1.0], [\"0.2\", \"0.4\", \"0.6\", \"0.8\", \"1.0\"], fontsize=15)\n pos=ax.get_rlabel_position()\n ax.set_rlabel_position(pos+0.4*360./float(len(measures)))\n\n # Plot data\n for ii in np.arange(values.shape[0]):\n xx = np.asarray(x_as) + np.random.randn(len(x_as))*np.diff(x_as)[0]/15.\n data_norm = None\n if info['logplot']:\n data_norm = matplotlib.colors.LogNorm(vmin=_min, vmax=_max)\n sc = ax.scatter(xx, values[ii,:], 23, color=colourmap[ii]*len(xx), norm=data_norm, zorder=3) \n\n # Fill area\n # close the circle\n median = list(np.median(values, axis=0))\n median += median[:1]\n upper = list(np.percentile(values, 75, axis=0))\n upper += upper[:1]\n lower = list(np.percentile(values, 25, axis=0))\n lower += lower[:1]\n x_as += x_as[:1]\n ax.plot(x_as, median, color=[86./255.,180./255.,233./255.], zorder=5)\n ax.fill_between(x_as, upper, lower, zorder=4, color=[86./255.,180./255.,233./255.], alpha=0.3)\n\n # Set number of radial axes and remove labels\n plt.xticks(x_as[:-1], [])\n\n # Set axes limits\n plt.ylim(0, 1)\n\n # Draw ytick labels to make sure they fit properly\n for i in range(N):\n angle_rad = i / float(N) * 2 * pi-0.05\n text_size = 21\n if i in {3,8}:\n ax.text(angle_rad, 1.15, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {0}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {1,5,7}:\n ax.text(angle_rad, 1.29, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {4}:\n ax.text(angle_rad, 1.32, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"top\")\n elif i in {10}:\n ax.text(angle_rad, 1.26, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {6}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {9}:\n ax.text(angle_rad, 1.18, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n else:\n ax.text(angle_rad, 1.22, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n\n # colorbar location on figure\n cbaxes = plt.subplot(gs[2])\n\n # log scaling option\n norm = None\n if info['logplot']:\n norm = matplotlib.colors.LogNorm(vmin=_min,vmax=_max)\n\n img = plt.imshow(np.array([[_min,_max]]), aspect='auto', cmap=\"cividis\", norm=norm)\n img.set_visible(False)\n\n # initialize colorbar\n cbar = plt.colorbar(cax = cbaxes)\n\n # ticks and label\n c_values = cbar.get_ticks().tolist()\n \n ticklabels = [\"\" for ii in c_values]\n if _min < np.min(c_values):\n c_values = [_min] + c_values\n ticklabels = [\"%0.1f %s\" %(np.min(c_values), info['unit'])] + ticklabels\n else:\n ticklabels[0] = \"%0.1f %s\" %(np.min(c_values), info['unit'])\n\n if _max > np.max(c_values):\n c_values = c_values + [_max]\n ticklabels = ticklabels + [\"%0.1f %s\" %(np.max(c_values), info['unit'])]\n else:\n ticklabels[-1] = \"%0.1f %s\" %(np.max(c_values), info['unit'])\n \n cbar.set_ticks(c_values)\n cbar.set_ticklabels(ticklabels)\n cbaxes.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n cbar.ax.set_ylabel(info[\"label\"], labelpad=-20)\n \n # font sizes for colorbar\n cbar.ax.yaxis.label.set_size(19)\n cbar.ax.tick_params(labelsize=14)\n\n # Save and show polar plot \n plt.savefig(outfile)\n if info['display']:\n plt.show()\n plt.clf()\n plt.close('all')",
"def plot_graph_pie(filename,titulo,serie):\n validSerie = []\n labels =[]\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral','cyan','green','pink']\n explode = [0.1,0.1,0.1,0.1,0.1,0.1,0.1]\n for cop in serie.keys():\n if cop != 'TODOS' :\n validSerie.append(cop)\n labels.append(cop)\n sizes = []\n for cop in validSerie:\n sizes.append(len(serie[cop]))\n\n plt.close('all')\n title(titulo)\n plt.pie(sizes,labels=labels,colors=colors,explode=explode,autopct='%1.1f%%')\n plt.axis('equal')\n plt.savefig(filename,dpi=96)",
"def pie(\n df,\n value=\"value\",\n category=\"variable\",\n legend=False,\n title=None,\n ax=None,\n cmap=None,\n **kwargs,\n):\n\n # cast to DataFrame if necessary\n # TODO: select only relevant meta columns\n if not isinstance(df, pd.DataFrame):\n df = df.as_pandas()\n\n for col in set(SORT_IDX) - set([category]):\n if len(df[col].unique()) > 1:\n msg = (\n \"Can not plot multiple {}s in a pie plot with value={} and category={}\"\n )\n raise ValueError(msg.format(col, value, category))\n\n if ax is None:\n fig, ax = plt.subplots()\n\n # get data, set negative values to explode\n _df = df.groupby(category)[value].sum()\n where = _df > 0\n explode = tuple(0 if _ else 0.2 for _ in where)\n _df = _df.abs()\n\n # explicitly get colors\n defaults = default_props(reset=True, num_colors=len(_df.index), colormap=cmap)[\n \"color\"\n ]\n rc = run_control()\n\n if \"colors\" in kwargs:\n colors = kwargs.pop(\"colors\")\n else:\n colors = []\n for key, c in zip(_df.index, defaults):\n if category in rc[\"color\"] and key in rc[\"color\"][category]:\n c = rc[\"color\"][category][key]\n colors.append(c)\n\n # plot data\n _df.plot(kind=\"pie\", colors=colors, ax=ax, explode=explode, **kwargs)\n\n # add legend and title\n ax.legend(loc=\"center left\", bbox_to_anchor=(1.0, 0.5), labels=_df.index)\n if not legend:\n ax.legend_.remove()\n if title:\n ax.set_title(title)\n\n # remove label\n ax.set_ylabel(\"\")\n\n return ax",
"def goodPlots( self, tCoords, result, argsList ):\n\t\tbPaint = True\n\t\tbContinue = True\n\t\tpCurrent = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif pCurrent.isHills() or pCurrent.isFlatlands():\n\t\t\tif not pCurrent.isImpassable():\n\t\t\t\tif not pCurrent.isUnit():\n\t\t\t\t\tif pCurrent.getTerrainType() not in [con.iDesert, con.iSemidesert, con.iTundra, con.iWetland]:\n\t\t\t\t\t\tif pCurrent.calculateCulturalOwner() == -1: # edead: bugfix\n\t\t\t\t\t\t\t# this is a good plot, so paint it and continue search\n\t\t\t\t\t\t\treturn (None, bPaint, bContinue)\n\t\t# not a good plot, so don't paint it but continue search\n\t\treturn (None, not bPaint, bContinue)",
"def plottable(self):\n if self.model_or_sim.type == \"Simulation\":\n return False\n else:\n return True",
"def test_overplotting(self):\n arr = self.arr\n out = ternary(arr)\n self.assertTrue(hasattr(out, \"tax\"))\n out2 = ternary(arr, ax=out)\n self.assertTrue(out.tax is out2.tax) # hasn't added a new ternary axis",
"def etio_by_csa_no_pie(df):\n\n # TODO: see if there's a way to combine this information with Outcome by Etio\n\n sns.set(style=\"white\", palette=sns.color_palette(\"cubehelix\", 6))\n f, axes = plt.subplots(4, 1, figsize=(6, 9))#, sharex=True)\n sns.despine(top=True, bottom=True)\n #f.suptitle(\"Etiology of Central Events, Grouped by %Central Events\")\n\n OSA_pure_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Mainly OSA\"], return_df=True).sort_values(\"Dx\")\n OSA_predom_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Combined OSA/CSA\"], return_df=True).sort_values(\"Dx\")\n CSA_predom_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Predominantly CSA\"], return_df=True).sort_values(\"Dx\")\n CSA_pure_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Pure CSA\"], return_df=True).sort_values(\"Dx\")\n\n # Create count plot for each #CSA on the left, then a Pie Chart with proportion on the right\n\n hatches = ['///', '|||', 'xxx', '\\\\\\\\\\\\', '', '+++']\n face_color = [ 'dimgray', 'silver', 'whitesmoke', 'grey', 'gainsboro', 'darkgrey']\n\n # Pure OSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=OSA_pure_hist, ax=axes[3])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n #axes[3].set(xlabel=\"Patients With Each Etiology Contributing to Central Events\", ylabel=\"<10% Central Events\", yticklabels = [])\n axes[3].set(xlabel=\"Patients With Each Etiology Contributing to Central Events\", ylabel=\"\")\n\n # Predom OSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=OSA_predom_hist, ax=axes[2])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[2].set(xlabel=\"\", ylabel=\"\")\n # axes[2].set(xlabel=\"\", ylabel=\"10-50% Central Events\", yticklabels=[])\n\n # Predom CSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=CSA_predom_hist, ax=axes[1])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[1].set(xlabel=\"\", ylabel=\"\")\n # axes[1].set(xlabel=\"\", ylabel=\"50-90% Central Events\", yticklabels=[])\n\n # Pure CSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=CSA_pure_hist, ax=axes[0])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[0].set(xlabel=\"\", ylabel=\"\")\n # axes[0].set(xlabel=\"\", ylabel=\">90% Central Events\", yticklabels=[])\n\n # Combined X axis for L side\n axes[3].get_shared_x_axes().join(axes[3], axes[2], axes[1], axes[0])\n axes[0].set_xticklabels(\"\")\n axes[1].set_xticklabels(\"\")\n axes[2].set_xticklabels(\"\")\n # Leave bottom aka [3,0] labels in\n\n # Resize all\n axes[0].autoscale()\n axes[1].autoscale()\n axes[2].autoscale()\n axes[3].autoscale()\n\n f.tight_layout(rect=[0, 0, 1, 1]) # .95 to leave space for title\n f.savefig('Etio by percentage CSA no pie.png', dpi=100)\n # plt.show()",
"def test_one(self):\n arr = self.arr[0, :]\n out = ternary(arr)\n self.assertTrue(hasattr(out, \"tax\"))\n self.assertEqual(\n type(out.tax), pyternary.ternary_axes_subplot.TernaryAxesSubplot\n )"
] |
[
"0.6810645",
"0.6215535",
"0.5943787",
"0.59015816",
"0.57677025",
"0.5628101",
"0.56067884",
"0.55829704",
"0.5552299",
"0.5546897",
"0.55407673",
"0.5468097",
"0.5450928",
"0.54284465",
"0.53893375",
"0.53854704",
"0.53848565",
"0.53733534",
"0.5364938",
"0.53586507",
"0.53188056",
"0.5316465",
"0.52810925",
"0.52312654",
"0.5217034",
"0.5204099",
"0.51521814",
"0.5145475",
"0.51338017",
"0.51107705"
] |
0.701249
|
0
|
check if pie chart is plotted correctly (using default style).
|
def test_plot_pie_chart_custom_style(self):
pie_plot = PiePlot(
wedge_sizes=[20, 40, 30, 10],
labels=["light-flavour jets", "c-jets", "b-jets", "tau-jets"],
draw_legend=True,
colours=get_good_colours()[:4],
# have a look at the possible kwargs for matplotlib.pyplot.pie here:
# https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.pie.html
mpl_pie_kwargs={
"explode": [0, 0, 0, 0.1],
"shadow": False,
"startangle": 90,
"textprops": {"fontsize": 10},
"radius": 1,
"wedgeprops": {"width": 0.4, "edgecolor": "w"},
"pctdistance": 0.4,
},
# kwargs passed to puma.PlotObject
atlas_second_tag=(
"Unit test plot to test if the custom\nstyling of the pie plot"
),
figsize=(5.5, 3.5),
y_scale=1.3,
)
plotname = "test_pie_chart_custom_style.png"
pie_plot.savefig(f"{self.actual_plots_dir}/{plotname}")
# Uncomment line below to update expected image
pie_plot.savefig(f"{self.expected_plots_dir}/{plotname}")
self.assertIsNone(
compare_images(
f"{self.actual_plots_dir}/{plotname}",
f"{self.expected_plots_dir}/{plotname}",
tol=1,
)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_plot_pie_chart_default_style(self):\n pie_plot = PiePlot(\n wedge_sizes=[20, 40, 30, 10],\n labels=[\"light-flavour jets\", \"c-jets\", \"b-jets\", \"tau-jets\"],\n )\n plotname = \"test_pie_chart_default_style.png\"\n pie_plot.savefig(f\"{self.actual_plots_dir}/{plotname}\")\n # Uncomment line below to update expected image\n # pie_plot.savefig(f\"{self.expected_plots_dir}/{plotname}\")\n\n self.assertIsNone(\n compare_images(\n f\"{self.actual_plots_dir}/{plotname}\",\n f\"{self.expected_plots_dir}/{plotname}\",\n tol=1,\n )\n )",
"def show_pie_chart(self):\n\n chart_type_index = self.ui.comboBox_pie_charts.currentIndex()\n if chart_type_index < 1:\n return\n self.get_selected_categories_and_codes()\n if chart_type_index == 1: # Code frequency\n self.piechart_code_frequency()\n if chart_type_index == 2: # Code by characters\n self.piechart_code_volume_by_characters()\n if chart_type_index == 3: # Code by image area\n self.piechart_code_volume_by_area()\n if chart_type_index == 4: # Code by audio/video segments\n self.piechart_code_volume_by_segments()\n self.ui.comboBox_pie_charts.setCurrentIndex(0)",
"def pie_plot(data,ara,rd_f,cla_arr=string.ascii_lowercase):\n \n data=pd.Series(data)\n dataclass=pd.Series(value_to_class_label(ara,data,cla_arr))\n \n parti=data.groupby(dataclass).agg(rd_f)\n \n labels=parti.index\n parts = parti.tolist()\n colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue','peru',\n 'teal','cornflowerblue','crimson','cadetblue','beige']\n\n plt.pie(parts, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=140) \n plt.axis('equal')",
"def pie_plot(data,ara,rd_f,cla_arr=string.ascii_lowercase):\n\n data=pd.Series(data)\n dataclass=pd.Series(value_to_class_label(ara,data,cla_arr))\n\n parti=data.groupby(dataclass).agg(rd_f)\n\n labels=parti.index\n parts = parti.tolist()\n colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue','peru',\n 'teal','cornflowerblue','crimson','cadetblue','beige']\n\n plt.pie(parts, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=140)\n plt.axis('equal')",
"def test_make_pie_chart(self):\r\n\r\n filename1 = '/tmp/qiimewebfiles/charts/pie_chart.png'\r\n filename2 = '/tmp/qiimewebfiles/charts/pie_chart_legend.pdf'\r\n filename3 = '/tmp/qiimewebfiles/charts/pie_chart.pdf'\r\n\r\n obs1, obs2, obs3, obs4 = make_pie_chart(self.fracs, self.dir_path, 1,\r\n self.prefs, self.color_prefs, \"black\", \"white\",\r\n self.generate_image_type, self.plot_width,\r\n self.plot_height, self.bar_width, self.dpi, False,\r\n file_prefix=\"pie_chart\",\r\n props=self.props)\r\n\r\n self.assertTrue(exists(filename1), 'The png file was not created in \\\r\nthe appropriate location')\r\n self.assertTrue(exists(filename2), 'The eps file was not created in \\\r\nthe appropriate location')\r\n self.assertTrue(exists(filename3), 'The pdf file was not created in \\\r\nthe appropriate location')\r\n self._paths_to_clean_up = [\"/tmp/qiimewebfiles/charts/\" + f\r\n for f in listdir(\"/tmp/qiimewebfiles/charts\")]",
"def matplotlib_pie_chart() -> Tuple:\n df = process_life_expectancy_dataset(\"classification\")\n num_cols = get_numeric_columns(df)\n bin_cols = get_binary_columns(df)\n text_cols = get_text_categorical_columns(df)\n\n x_arr = np.array([len(num_cols), len(bin_cols), len(text_cols)])\n # The plot only shows numeric columns because process_life_expectancy_dataset returned df only\n # contains numeric columns \n fig, ax = a_libraries.matplotlib_pie_chart(x_arr)\n\n return fig, ax",
"def has_charts(self):\n return self.__charts is not None",
"def generate_personnel_outcome_pie_chart(personnel_search_table_selected_indices, selected_funding, selected_year, rows):\n personnel_outcome_data, personnel_name = personnel_outcomes_helper(personnel_search_table_selected_indices,\n selected_funding, selected_year, rows)\n personnel_outcome_type_value_count = {}\n if personnel_outcome_data is not None:\n personnel_outcome_type_value_count = personnel_outcome_data.type.value_counts()\n personnel_outcome_type_value_count = personnel_outcome_type_value_count.to_dict()\n\n\n print(\"Inside PIE CHART\", personnel_outcome_type_value_count)\n print(\"Inside PIE CHART\", personnel_name)\n\n\n trace = go.Pie(\n labels=list(personnel_outcome_type_value_count.keys()),\n values=list(personnel_outcome_type_value_count.values()),\n name=personnel_name,\n hoverinfo='label+percent',\n textinfo='value',\n textfont=dict(size=20),\n # marker=dict(colors=colors, line=dict(color='#000000', width=2)),\n )\n\n return{\n \"data\": [trace],\n \"layout\": go.Layout(\n # autosize=False,\n title='Professor Publications Breakdown',\n showlegend=True,\n # legend=go.Legend(\n # x=0,\n # y=1.0\n # ),\n # bargap =0.5,\n # width = 100,\n # autosize=F,\n margin=go.layout.Margin(l=40, r=0, t=40, b=30),\n # paper_bgcolor='rgb(233,233,233)', # set paper (outside plot)\n plot_bgcolor='rgb(192,192,192)', # plot color to grey\n )\n }",
"def pie(self):\n return self._pie",
"def pie_chart(self, pct=True, day='today'):\n assert isinstance(pct, bool), 'Error! The pct parameter must be boolean.'\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n def func(pct, allvals):\n \"\"\"Funciton for the lambda function. Returns the formatted value of each ETF\"\"\"\n return str(format(round(pct/100.*np.sum(allvals), 2),\".2f\")) + \"€\"\n vals = {}\n for name, etf in self.etfs.items():\n if etf.get_value(day) != 0:\n if name.split('-')[0].split('.')[0] in vals:\n vals[name.split('-')[0].split('.')[0]] += etf.get_value(day)\n else:\n vals[name.split('-')[0].split('.')[0]] = etf.get_value(day)\n wgt_values = [round(v, 2) for v in vals.values()]\n explode = [0 if x != max(wgt_values) else 0.06 for x in wgt_values]\n fig = plt.figure(figsize=(4,2), dpi=200)\n ax = fig.add_subplot(111)\n fig.patch.set_facecolor('#ececec')\n if pct:\n _, texts, autotexts = ax.pie(wgt_values, explode=explode, labels=vals.keys(), autopct='%1.1f%%')\n plt.setp(autotexts, size=5)\n else:\n _, texts, autotexts = ax.pie(wgt_values, explode=explode, labels=vals.keys(), \n autopct=lambda pct: func(pct, wgt_values), pctdistance=0.7)\n plt.setp(autotexts, size=4)\n plt.setp(texts, size=7, family='monospace')\n ax.set_title('Portfolio\\'s Composition', size='large', color='red', weight='bold')\n return fig, ax",
"def setPieLabelStyle(mode='percent',position='internal',justify='center', digits=1, color=-1):\n mdict = {'none':'NONE','percent':'PERCENT','data':'DATA','both':'BOTH'}\n pdict = {'internal':'INTERNAL','external':'EXTERNAL','aligned':'ALIGNED'}\n jdict = {'center':'CENTER','left':'LEFT','right':'RIGHT',\n 'outwards':'OUTWARDS','inwards':'INWARDS'}\n dislin.labels(mdict[mode],'PIE')\n dislin.labpos(pdict[position],'PIE')\n dislin.labtyp(jdict[justify],'PIE')\n dislin.labdig(digits,'PIE')\n dislin.labclr(color,'PIE')",
"def _construct_pie(self):\n\n # count labels and instances\n label_count = count(self.labels)\n label_instances = [instance for instance in label_count.values()]\n colours = [\n rgb2hex(\n self.clusters[label]/255) for label in label_count.keys()]\n self.pie = plot.figure()\n plot.pie(\n x=label_instances, labels=label_count.keys(\n ), colors=colours, autopct=\"%.2f%%\")\n plot.tight_layout()\n plot.axis('equal')",
"def pie_chart_score(self, grouped):\n picked_scenario = self.scenario_dict[\"%d\" % (self.scenario_num-1)]\n distinct_enum_X = self.data_dict[picked_scenario[\"X\"]]['distinct_enum']\n score = 0\n if min(grouped) < 0:\n score = 0\n elif distinct_enum_X == 1:\n score = 0\n elif picked_scenario[\"Agg_func_Y\"] == \"avg\":\n score = 0\n elif distinct_enum_X >= 2 and distinct_enum_X <= 8:\n score += self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]]) / 8\n elif distinct_enum_X > 8:\n score += 4 * (self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]])) / distinct_enum_X\n if score > 3:\n score = 3\n return score",
"def plot_pies(labels, data, categories):\n # Create the figure and traces\n fig = go.Figure()\n visibility, annotations = [], []\n traces = 0\n for var in labels:\n if var in categories:\n values = list(dict.fromkeys(categories[var].values()))\n else:\n values = list(dict.fromkeys(data[var]))\n values.sort()\n nvals = len(values)\n counts = []\n annotations.append([])\n for i in values: # get the counts for each category\n s = data[(data[var] == i) & (data[\"Score\"] == \"good\")].shape[0]\n counts.append([s, data[data[var] == i].shape[0]-s])\n for i in range(nvals): # plot them in nested pie charts\n fig.add_trace(go.Pie(hole=(0.3+i*0.7/nvals)/(0.3+(0.7*(i+1)/nvals)), sort=False,\n domain={'x': [0.35-(i+1)*0.35/nvals, 0.65+(i+1)*0.35/nvals], 'y': [0.35-(i+1)*0.35/nvals, 0.65+(i+1)*0.35/nvals]}, name=values[i], values=counts[i], textposition='inside',\n textfont_size=20, marker={'colors': ['mediumturquoise', 'darkorange'], 'line': {'color': '#000000', 'width': 2}},\n labels=['Good', 'Bad'], hoverinfo='label+name+percent', textinfo='percent'))\n annotations[-1].append(dict(text=values[i], x=0.577+(i+1)*0.184/nvals, y=0.5,\n arrowwidth=1.5, arrowhead=6, ax=100+20*i, ay=100+20*i))\n traces += 1\n visibility = [l+nvals*[False] for l in visibility]\n visibility.append((traces-nvals)*[False]+nvals*[True])\n # Create buttons for drop down menu\n buttons = []\n for i, label in enumerate(labels):\n button = dict(\n label=label,\n method='update',\n args=[{'visible': visibility[i]},\n {'title': label, 'annotations': annotations[i]}])\n buttons.append(button)\n updatemenus = list([\n dict(active=-1,\n x=1.06, y=1.27,\n buttons=buttons)])\n # Setup layout\n fig['layout']['updatemenus'] = updatemenus\n fig['layout']['title'] = \"Distribution of the scores of categorical variables:\"\n iplot(fig, config={\"displayModeBar\": False})",
"def pieChart2():\n n = 4\n Z = np.ones(n)\n Z[-1] *= 2\n\n labels = 'A', 'B', 'C', 'D'\n plt.axes([0.025,0.025,0.95,0.95])\n\n plt.pie(Z, explode=Z*.05, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)\n plt.gca().set_aspect('equal')\n plt.xticks([]), plt.yticks([])\n\n # savefig('../figures/pie_ex.png',dpi=48)\n plt.show()",
"def _check_styles(self, dataset):\n # list of column ids to exclude from plotting\n exclude_cols = [\"sample_name\", \"patient_id\", \"title\"]\n\n # check column styles\n if dataset['styles']['columns']['color'] == []:\n if dataset['metadata']['columns'] != '':\n # load metadata\n mdat = load_data(dataset['metadata']['columns'])\n\n # exclude known uninformative columns\n cols_to_drop = [x for x in exclude_cols if x in mdat.columns]\n\n if len(cols_to_drop) > 0:\n mdat = mdat.drop(cols_to_drop, axis=1)\n\n # set default columns to use for plotting\n dataset['styles']['columns']['color'] = mdat.columns[mdat.nunique() > 1].tolist()\n\n # check row styles\n if dataset['styles']['rows']['color'] == []:\n if dataset['metadata']['rows'] != '':\n mdat = load_data(dataset['metadata']['rows'])\n dataset['styles']['rows']['color'] = mdat.columns[mdat.nunique() > 1].tolist()",
"def pieChart1():\n n = 20\n Z = np.ones(n)\n Z[-1] *= 2\n\n plt.axes([0.025,0.025,0.95,0.95])\n\n plt.pie(Z, explode=Z*.05, colors = ['%f' % (i/float(n)) for i in range(n)])\n plt.gca().set_aspect('equal')\n plt.xticks([]), plt.yticks([])\n\n # savefig('../figures/pie_ex.png',dpi=48)\n plt.show()",
"def is_plot(session_id, test_name):\n return Plot.is_plot(session_id, test_name)",
"def draw_pie(self, n, r):\n pass",
"def test_plt_status():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n result = ta.plt_status()\n\n assert bokeh_plot_type == type(result)",
"def cake() -> None:\n\n\tdef real_value(val):\n\t\t\"\"\"Return the real value instead of the %.\"\"\"\n\t\treturn round(val/100*sum(euros), 1)\n\n\tdata = [(name, players[name].cake, players[name].color)\n\t for name in get_people() if players[name].cake > 0]\n\tdata.sort(key=lambda x: x[1], reverse=True)\n\n\tnames = [el[0] for el in data]\n\teuros = [el[1] for el in data]\n\tcolors = [el[2] for el in data]\n\n\texplode = [0.04] * len(names)\n\texplode[0] = 0.07\n\n\t_, ax = plt.subplots(figsize=(6, 6))\n\tpatches, text, autotext = ax.pie(\n\t\t\teuros, labels=names, explode=explode, colors=colors,\n\t\t\tstartangle=120, radius=1.5, autopct=real_value)\n\n\t# Change the style of the plot\n\tfor patch in patches:\n\t\tpatch.set_linewidth(1.5)\n\t\tpatch.set_edgecolor('black')\n\tfor x in range(len(names)):\n\t\tif x == 0:\n\t\t\ttext[x].set_fontsize(30)\n\t\t\tautotext[x].set_fontsize(30)\n\t\telse:\n\t\t\ttext[x].set_fontsize(15)\n\t\t\tautotext[x].set_fontsize(15)\n\n\tinvisible = [(name, -1*players[name].cake) for name in get_people()\n\t\t\t\t if players[name].cake < 0]\n\tinvisible.sort(key=lambda x: x[1], reverse=True)\n\ttxt = 'Players with bonus:'\n\tfor pl, bn in invisible:\n\t\ttxt += f'\\n - {bn: .1f}, {pl}'\n\tplt.text(-.15, 0.99, txt, transform=ax.transAxes, fontsize=10)\n\n\tplt.savefig('cake.png', dpi=120, bbox_inches='tight')\n\tplt.gcf().clear()",
"def check_is_plottable(self, var):\n self.plot_button.disabled = False # important to enable button once disabled\n data = self.data[var[0]]\n self.plot_button.disabled = len(data.dims) <= 1",
"def plot_evaluation(values, info, measures = ['Dice','Jaccard', 'TPR', 'TNR', '1-GCE', 'VS', 'RI', 'ARI', 'MI', '1-VOI', 'ICC','1/(1+PBD)', 'KAP', 'AUC', '1/(1+HD)', '1/(1+AVD)', 'MHD' ], colourmap=None, outfile='polar_results.png'):\n _min = info['minimum']\n _max = info['maximum']\n if colourmap is None:\n colourmap = [[86./255.,180./255.,233./255.] for ii in range(values.shape[0])]\n else:\n # normalize colourmap values between 0 and 1\n colourmap = (colourmap-_min)/(_max-_min)\n # apply cividis, returns the RBG1 values for cividis, for dots\n colourmap = [[cm.cividis(ii)] for ii in colourmap] \n\n # elements of the circle\n N = len(measures)\n # evenly space measures around circle\n x_as = [n / float(N) * 2 * pi for n in range(N)] \n\n # Set color of axes\n plt.rc('axes', linewidth=0.5, edgecolor=\"#888888\")\n\n # Create polar plot\n fig = plt.figure(figsize = (11,9.5))\n gs = gridspec.GridSpec(1, 3, width_ratios=[17,2,1])\n ax = plt.subplot(gs[0], polar=True)\n \n # Set position of y-labels\n ax.set_rlabel_position(0)\n\n # Set color and linestyle of grid\n ax.xaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n ax.yaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n\n # Set yticks\n plt.yticks([0.2, 0.4, 0.6, 0.8, 1.0], [\"0.2\", \"0.4\", \"0.6\", \"0.8\", \"1.0\"], fontsize=15)\n pos=ax.get_rlabel_position()\n ax.set_rlabel_position(pos+0.4*360./float(len(measures)))\n\n # Plot data\n for ii in np.arange(values.shape[0]):\n xx = np.asarray(x_as) + np.random.randn(len(x_as))*np.diff(x_as)[0]/15.\n data_norm = None\n if info['logplot']:\n data_norm = matplotlib.colors.LogNorm(vmin=_min, vmax=_max)\n sc = ax.scatter(xx, values[ii,:], 23, color=colourmap[ii]*len(xx), norm=data_norm, zorder=3) \n\n # Fill area\n # close the circle\n median = list(np.median(values, axis=0))\n median += median[:1]\n upper = list(np.percentile(values, 75, axis=0))\n upper += upper[:1]\n lower = list(np.percentile(values, 25, axis=0))\n lower += lower[:1]\n x_as += x_as[:1]\n ax.plot(x_as, median, color=[86./255.,180./255.,233./255.], zorder=5)\n ax.fill_between(x_as, upper, lower, zorder=4, color=[86./255.,180./255.,233./255.], alpha=0.3)\n\n # Set number of radial axes and remove labels\n plt.xticks(x_as[:-1], [])\n\n # Set axes limits\n plt.ylim(0, 1)\n\n # Draw ytick labels to make sure they fit properly\n for i in range(N):\n angle_rad = i / float(N) * 2 * pi-0.05\n text_size = 21\n if i in {3,8}:\n ax.text(angle_rad, 1.15, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {0}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {1,5,7}:\n ax.text(angle_rad, 1.29, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {4}:\n ax.text(angle_rad, 1.32, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"top\")\n elif i in {10}:\n ax.text(angle_rad, 1.26, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {6}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {9}:\n ax.text(angle_rad, 1.18, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n else:\n ax.text(angle_rad, 1.22, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n\n # colorbar location on figure\n cbaxes = plt.subplot(gs[2])\n\n # log scaling option\n norm = None\n if info['logplot']:\n norm = matplotlib.colors.LogNorm(vmin=_min,vmax=_max)\n\n img = plt.imshow(np.array([[_min,_max]]), aspect='auto', cmap=\"cividis\", norm=norm)\n img.set_visible(False)\n\n # initialize colorbar\n cbar = plt.colorbar(cax = cbaxes)\n\n # ticks and label\n c_values = cbar.get_ticks().tolist()\n \n ticklabels = [\"\" for ii in c_values]\n if _min < np.min(c_values):\n c_values = [_min] + c_values\n ticklabels = [\"%0.1f %s\" %(np.min(c_values), info['unit'])] + ticklabels\n else:\n ticklabels[0] = \"%0.1f %s\" %(np.min(c_values), info['unit'])\n\n if _max > np.max(c_values):\n c_values = c_values + [_max]\n ticklabels = ticklabels + [\"%0.1f %s\" %(np.max(c_values), info['unit'])]\n else:\n ticklabels[-1] = \"%0.1f %s\" %(np.max(c_values), info['unit'])\n \n cbar.set_ticks(c_values)\n cbar.set_ticklabels(ticklabels)\n cbaxes.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n cbar.ax.set_ylabel(info[\"label\"], labelpad=-20)\n \n # font sizes for colorbar\n cbar.ax.yaxis.label.set_size(19)\n cbar.ax.tick_params(labelsize=14)\n\n # Save and show polar plot \n plt.savefig(outfile)\n if info['display']:\n plt.show()\n plt.clf()\n plt.close('all')",
"def plot_graph_pie(filename,titulo,serie):\n validSerie = []\n labels =[]\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral','cyan','green','pink']\n explode = [0.1,0.1,0.1,0.1,0.1,0.1,0.1]\n for cop in serie.keys():\n if cop != 'TODOS' :\n validSerie.append(cop)\n labels.append(cop)\n sizes = []\n for cop in validSerie:\n sizes.append(len(serie[cop]))\n\n plt.close('all')\n title(titulo)\n plt.pie(sizes,labels=labels,colors=colors,explode=explode,autopct='%1.1f%%')\n plt.axis('equal')\n plt.savefig(filename,dpi=96)",
"def pie(\n df,\n value=\"value\",\n category=\"variable\",\n legend=False,\n title=None,\n ax=None,\n cmap=None,\n **kwargs,\n):\n\n # cast to DataFrame if necessary\n # TODO: select only relevant meta columns\n if not isinstance(df, pd.DataFrame):\n df = df.as_pandas()\n\n for col in set(SORT_IDX) - set([category]):\n if len(df[col].unique()) > 1:\n msg = (\n \"Can not plot multiple {}s in a pie plot with value={} and category={}\"\n )\n raise ValueError(msg.format(col, value, category))\n\n if ax is None:\n fig, ax = plt.subplots()\n\n # get data, set negative values to explode\n _df = df.groupby(category)[value].sum()\n where = _df > 0\n explode = tuple(0 if _ else 0.2 for _ in where)\n _df = _df.abs()\n\n # explicitly get colors\n defaults = default_props(reset=True, num_colors=len(_df.index), colormap=cmap)[\n \"color\"\n ]\n rc = run_control()\n\n if \"colors\" in kwargs:\n colors = kwargs.pop(\"colors\")\n else:\n colors = []\n for key, c in zip(_df.index, defaults):\n if category in rc[\"color\"] and key in rc[\"color\"][category]:\n c = rc[\"color\"][category][key]\n colors.append(c)\n\n # plot data\n _df.plot(kind=\"pie\", colors=colors, ax=ax, explode=explode, **kwargs)\n\n # add legend and title\n ax.legend(loc=\"center left\", bbox_to_anchor=(1.0, 0.5), labels=_df.index)\n if not legend:\n ax.legend_.remove()\n if title:\n ax.set_title(title)\n\n # remove label\n ax.set_ylabel(\"\")\n\n return ax",
"def goodPlots( self, tCoords, result, argsList ):\n\t\tbPaint = True\n\t\tbContinue = True\n\t\tpCurrent = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif pCurrent.isHills() or pCurrent.isFlatlands():\n\t\t\tif not pCurrent.isImpassable():\n\t\t\t\tif not pCurrent.isUnit():\n\t\t\t\t\tif pCurrent.getTerrainType() not in [con.iDesert, con.iSemidesert, con.iTundra, con.iWetland]:\n\t\t\t\t\t\tif pCurrent.calculateCulturalOwner() == -1: # edead: bugfix\n\t\t\t\t\t\t\t# this is a good plot, so paint it and continue search\n\t\t\t\t\t\t\treturn (None, bPaint, bContinue)\n\t\t# not a good plot, so don't paint it but continue search\n\t\treturn (None, not bPaint, bContinue)",
"def plottable(self):\n if self.model_or_sim.type == \"Simulation\":\n return False\n else:\n return True",
"def test_overplotting(self):\n arr = self.arr\n out = ternary(arr)\n self.assertTrue(hasattr(out, \"tax\"))\n out2 = ternary(arr, ax=out)\n self.assertTrue(out.tax is out2.tax) # hasn't added a new ternary axis",
"def etio_by_csa_no_pie(df):\n\n # TODO: see if there's a way to combine this information with Outcome by Etio\n\n sns.set(style=\"white\", palette=sns.color_palette(\"cubehelix\", 6))\n f, axes = plt.subplots(4, 1, figsize=(6, 9))#, sharex=True)\n sns.despine(top=True, bottom=True)\n #f.suptitle(\"Etiology of Central Events, Grouped by %Central Events\")\n\n OSA_pure_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Mainly OSA\"], return_df=True).sort_values(\"Dx\")\n OSA_predom_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Combined OSA/CSA\"], return_df=True).sort_values(\"Dx\")\n CSA_predom_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Predominantly CSA\"], return_df=True).sort_values(\"Dx\")\n CSA_pure_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Pure CSA\"], return_df=True).sort_values(\"Dx\")\n\n # Create count plot for each #CSA on the left, then a Pie Chart with proportion on the right\n\n hatches = ['///', '|||', 'xxx', '\\\\\\\\\\\\', '', '+++']\n face_color = [ 'dimgray', 'silver', 'whitesmoke', 'grey', 'gainsboro', 'darkgrey']\n\n # Pure OSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=OSA_pure_hist, ax=axes[3])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n #axes[3].set(xlabel=\"Patients With Each Etiology Contributing to Central Events\", ylabel=\"<10% Central Events\", yticklabels = [])\n axes[3].set(xlabel=\"Patients With Each Etiology Contributing to Central Events\", ylabel=\"\")\n\n # Predom OSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=OSA_predom_hist, ax=axes[2])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[2].set(xlabel=\"\", ylabel=\"\")\n # axes[2].set(xlabel=\"\", ylabel=\"10-50% Central Events\", yticklabels=[])\n\n # Predom CSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=CSA_predom_hist, ax=axes[1])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[1].set(xlabel=\"\", ylabel=\"\")\n # axes[1].set(xlabel=\"\", ylabel=\"50-90% Central Events\", yticklabels=[])\n\n # Pure CSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=CSA_pure_hist, ax=axes[0])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[0].set(xlabel=\"\", ylabel=\"\")\n # axes[0].set(xlabel=\"\", ylabel=\">90% Central Events\", yticklabels=[])\n\n # Combined X axis for L side\n axes[3].get_shared_x_axes().join(axes[3], axes[2], axes[1], axes[0])\n axes[0].set_xticklabels(\"\")\n axes[1].set_xticklabels(\"\")\n axes[2].set_xticklabels(\"\")\n # Leave bottom aka [3,0] labels in\n\n # Resize all\n axes[0].autoscale()\n axes[1].autoscale()\n axes[2].autoscale()\n axes[3].autoscale()\n\n f.tight_layout(rect=[0, 0, 1, 1]) # .95 to leave space for title\n f.savefig('Etio by percentage CSA no pie.png', dpi=100)\n # plt.show()",
"def test_one(self):\n arr = self.arr[0, :]\n out = ternary(arr)\n self.assertTrue(hasattr(out, \"tax\"))\n self.assertEqual(\n type(out.tax), pyternary.ternary_axes_subplot.TernaryAxesSubplot\n )"
] |
[
"0.7013918",
"0.62174886",
"0.5943961",
"0.59015816",
"0.57674253",
"0.56307137",
"0.5607042",
"0.55853486",
"0.5553504",
"0.5548517",
"0.55420166",
"0.5469935",
"0.54514027",
"0.5428705",
"0.53906536",
"0.5386055",
"0.53860503",
"0.537129",
"0.5366187",
"0.5360201",
"0.5319689",
"0.531661",
"0.5281951",
"0.5231338",
"0.5217802",
"0.52038693",
"0.5153088",
"0.514706",
"0.5135155",
"0.5111618"
] |
0.6812573
|
1
|
Run OCR for all the boxes.
|
def run_ocr_in_chart(chart, pad=0, psm=PSM.SINGLE_LINE):
img = chart.image
# add a padding to the initial figure
fpad = 1
img = cv2.copyMakeBorder(img.copy(), fpad, fpad, fpad, fpad, cv2.BORDER_CONSTANT, value=(255, 255, 255))
fh, fw, _ = img.shape
api = PyTessBaseAPI(psm=psm, lang='eng')
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4, 4))
for tbox in chart.texts:
# adding a pad to original image. Some case in quartz corpus, the text touch the border.
x, y, w, h = ru.wrap_rect(u.ttoi(tbox.rect), (fh, fw), padx=pad, pady=pad)
x, y = x + fpad, y + fpad
if w * h == 0:
tbox.text = ''
continue
# crop region of interest
roi = img[y:y + h, x:x + w]
# to gray scale
roi_gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
#
roi_gray = cv2.resize(roi_gray, None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC)
# binarization
_, roi_bw = cv2.threshold(roi_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# removing noise from borders
roi_bw = 255 - clear_border(255-roi_bw)
# roi_gray = cv2.copyMakeBorder(roi_gray, 5, 5, 5, 5, cv2.BORDER_CONSTANT, value=255)
# when testing boxes from csv files
if tbox.num_comp == 0:
# Apply Contrast Limited Adaptive Histogram Equalization
roi_gray2 = clahe.apply(roi_gray)
_, roi_bw2 = cv2.threshold(roi_gray2, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
_, num_comp = morphology.label(roi_bw2, return_num=True, background=255)
tbox.regions.extend(range(num_comp))
pil_img = smp.toimage(roi_bw)
if SHOW:
pil_img.show()
max_conf = -np.inf
min_dist = np.inf
correct_text = ''
correct_angle = 0
u.log('---------------')
for angle in [0, -90, 90]:
rot_img = pil_img.rotate(angle, expand=1)
api.SetImage(rot_img)
conf = api.MeanTextConf()
text = api.GetUTF8Text().strip()
dist = abs(len(text.replace(' ', '')) - tbox.num_comp)
u.log('text: %s conf: %f dist: %d' % (text, conf, dist))
if conf > max_conf and dist <= min_dist:
max_conf = conf
correct_text = text
correct_angle = angle
min_dist = dist
tbox.text = post_process_text(lossy_unicode_to_ascii(correct_text))
tbox.text_conf = max_conf
tbox.text_dist = min_dist
tbox.text_angle = correct_angle
u.log('num comp %d' % tbox.num_comp)
u.log(u'** text: {} conf: {} angle: {}'.format(correct_text, max_conf, correct_angle))
api.End()
# return boxes
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def evaluate_textboxes(gt_boxes, boxes):\n assert len(gt_boxes) == len(boxes)\n \n iou = 0\n # compute IOU per image\n for i in range(len(boxes)):\n if len(boxes[i]) == 0 or len(gt_boxes[i]) == 0:\n continue\n \n max_dim = np.max(np.max(boxes[i]))\n shape = (max_dim, max_dim)\n # We compute the IOU by generating both masks with all given textboxes highlighted.\n gt_mask, mask = generate_text_mask(shape, gt_boxes[i]), generate_text_mask(shape, boxes[i])\n iou += compute_iou(gt_mask, mask)\n return iou / len(boxes)",
"def pre_process_and_run_ocr(image_path):\n\n img = cv2.imread(image_path)\n\n # Rescale the image, if needed.\n # img = cv2.resize(img, None, fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC)\n # img = cv2.resize(img, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_AREA)\n\n # Convert to gray\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Apply dilation and erosion to remove some noise\n kernel = np.ones((1, 1), np.uint8)\n img = cv2.dilate(img, kernel, iterations=1)\n img = cv2.erode(img, kernel, iterations=1)\n\n # Apply blur to smooth out the edges\n img = cv2.bilateralFilter(img, 9, 75, 75)\n\n # Thresholding types\n # img=cv2.threshold(img,127,255,cv2.THRESH_BINARY)\n # img=cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2)\n\n tesser_output = pytesseract.image_to_data(img, lang='Greek+eng', config='--psm 12', output_type=Output.DICT)\n\n return tesser_output",
"def __update_tesseract__(self):\n if self.row_bitmaps != []:\n self.__write_out_row__()\n cv2.imwrite(\"active_weather.basic.exp\" + str(self.box_count) + \".tiff\", self.training_page)\n # call([\"convert\", \"-density 300\", \"-depth 4\", \"active_weather.basic.exp0.tiff\",\"active_weather.basic.exp0.tiff\"])\n call([\"/usr/bin/tesseract\", \"active_weather.basic.exp0.tiff\", \"active_weather.basic.exp0\", \"nobatch\", \"box.train\"])\n\n with open(\"font_properties\",\"w\") as f:\n f.write(\"basic 0 0 0 0 0\\n\")\n\n call([\"unicharset_extractor\", \"active_weather.basic.exp0.box\"])\n os.system(\"/home/ggdhines/github/tesseract/training/set_unicharset_properties -F font_properties -U unicharset -O unicharset --script_dir=/home/ggdhines/langdata\")\n # os.system(\"shapeclustering -F font_properties -U unicharset active_weather.basic.exp0.tr\")\n # os.system(\"shapeclustering -F font_properties active_weather.basic.exp0.tr\")\n os.system(\"mftraining -F font_properties -U unicharset -O active_weather.unicharset active_weather.basic.exp0.tr\")\n os.system(\"cntraining active_weather.basic.exp0.tr\")\n\n os.system(\"mv inttemp active_weather.inttemp\")\n os.system(\"mv normproto active_weather.normproto\")\n os.system(\"mv pffmtable active_weather.pffmtable\")\n os.system(\"mv shapetable active_weather.shapetable\")\n os.system(\"combine_tessdata active_weather.\")\n\n os.system(\"mv active_weather.basic.* /tmp/tessdata/\")\n os.system(\"mv active_weather.inttemp /tmp/tessdata/\")\n os.system(\"mv active_weather.normproto /tmp/tessdata/\")\n os.system(\"mv active_weather.pffmtable /tmp/tessdata/\")\n os.system(\"mv active_weather.shapetable /tmp/tessdata/\")\n os.system(\"mv active_weather.traineddata /tmp/tessdata/\")\n os.system(\"mv active_weather.unicharset /tmp/tessdata/\")\n os.system(\"mv font_properties /tmp/tessdata/\")",
"def process_image( img_path_filename, output_dir_name, output_path_filename):\n\t########################### Google OCR #############################\n\tclient = vision.ImageAnnotatorClient()\n\n\tlines_boxes_img = []\n\tlines_texts_img = []\n\tlines_probs_img = []\n\n\t# Path + Base name for the block files\n\tfilename = img_path_filename.split('/')[-1]\n\tbasename = filename.split('.')[0]\n\n\tcontent = None\n\twith io.open( img_path_filename, 'rb' ) as image_file:\n\t\tcontent = image_file.read()\n\n\ttry:\n\t\t# Process image and recognize its parts and text\n\t\timage = types.Image( content=content )\n\t\tresponse = client.document_text_detection(image=image)\n\t\tdocument = response.full_text_annotation\n\n\t\tfulltext_path_filename = output_dir_name + \"/\" + basename + \".txt\"\t\n\t\t# Save all the extracted text in a text file\n\t\twith open( fulltext_path_filename,'w') as f:\n\t\t\tf.write( response.full_text_annotation.text )\n\n\t\t# Collect the lines, their probabilities, and their bounding boxes\n\t\tfor page in document.pages:\n\t\t\tfor block in page.blocks:\n\t\t\t\tfor paragraph in block.paragraphs:\n\t\t\t\t\t# Divide the paragraph in lines and get its lines, bounding boxes, and symbols' probabilities\n\t\t\t\t\tlines_boxes_par, lines_texts_par, lines_probs_par = process_paragraph( paragraph )\n\t\t\t\t\t# Extend the line lists\n\t\t\t\t\tlines_boxes_img.extend( lines_boxes_par )\n\t\t\t\t\tlines_texts_img.extend( lines_texts_par )\n\t\t\t\t\tlines_probs_img.extend( lines_probs_par )\n\texcept Exception as e:\n\t\tprint(\"Error: \" + img_path_filename + \", \" + str(e))\n\t\treturn\n\n\t# Crop and save the image for each paragraph, its text files, and its probabilities files. It also returns the bbox statistics.\n\ttext_local, text_global = \"\", \"\"\n\ttext_local, text_global = crop_save( img_path_filename, lines_boxes_img, lines_texts_img, lines_probs_img, filename, basename, output_dir_name )\n\n\t# Save the bounding box information in the local and in the global file\n\tif text_global != \"\":\n\t\t# Save the data of the lines in the local text file\n\t\twith open(output_dir_name + \"/\" + basename + \"_lines.csv\", \"w+\") as f:\n\t\t\tf.write( text_local )\n\n\t\t# Save the data of the lines in the global text file\n\t\twith open(output_path_filename, \"a+\") as f:\n\t\t\tf.write( text_global )",
"def text_detect(img, original):\n # Resize image\n small = resize(img, 2000)\n image = resize(original, 2000)\n \n # Finding contours\n mask = np.zeros(small.shape, np.uint8)\n im2, cnt, hierarchy = cv2.findContours(np.copy(small), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n \n implt(img, 'gray')\n \n # Variables for contour index and words' bounding boxes\n index = 0 \n boxes = []\n # CCOMP hierarchy: [Next, Previous, First Child, Parent]\n # cv2.RETR_CCOMP - contours into 2 levels\n # Go through all contours in first level\n while (index >= 0):\n x,y,w,h = cv2.boundingRect(cnt[index])\n # Get only the contour\n cv2.drawContours(mask, cnt, index, (255, 255, 255), cv2.FILLED)\n maskROI = mask[y:y+h, x:x+w]\n # Ratio of white pixels to area of bounding rectangle\n r = cv2.countNonZero(maskROI) / (w * h)\n \n # Limits for text (white pixel ratio, width, height)\n # TODO Test h/w and w/h ratios\n if r > 0.1 and 2000 > w > 10 and 1600 > h > 10 and h/w < 3 and w/h < 10:\n boxes += [[x, y, w, h]]\n \n # Index of next contour\n index = hierarchy[0][index][0]\n \n # Group intersecting rectangles\n boxes = group_rectangles(boxes)\n bounding_boxes = np.array([0,0,0,0])\n for (x, y, w, h) in boxes:\n cv2.rectangle(image, (x, y),(x+w,y+h), (0, 255, 0), 8)\n bounding_boxes = np.vstack((bounding_boxes, np.array([x, y, x+w, y+h])))\n\n implt(image, t='Bounding rectangles')\n\n # Recalculate coordinates to original scale\n boxes = bounding_boxes.dot(ratio(image, small.shape[0])).astype(np.int64)\n return boxes[1:]",
"def show_bounding_boxes(dir_path: str) -> None:\r\n \r\n for image_file in glob.glob(dir_path + '/*.png'):\r\n image = cv2.imread(image_file)\r\n height, width, _ = image.shape\r\n\r\n with open(image_file.split(\".\")[0] +'.txt', 'r') as reader:\r\n annotations = reader.readlines()\r\n for annot in annotations:\r\n annot = annot.split()\r\n \r\n # Calculation of top left point and bottom right point of the bounding box \r\n x1, y1 = int((float(annot[1]) - float(annot[3])/2)*width), int((float(annot[2]) - float(annot[4])/2)*height)\r\n x2, y2 = int((float(annot[1]) + float(annot[3])/2)*width), int((float(annot[2]) + float(annot[4])/2)*height)\r\n \r\n # BGR color format\r\n if annot[0] == '0':\r\n color = (0,255,0) # Mask is worn correctly (Green color)\r\n label = 'Good'\r\n else:\r\n color = (0,0,255) # Mask is either not worn correctly or not worn at all (Red color)\r\n label = 'Bad'\r\n \r\n cv2.putText(image,\r\n label, \r\n (x1, y1 - 10),\r\n fontFace=cv2.FONT_HERSHEY_TRIPLEX,\r\n fontScale=0.5, \r\n color=color,\r\n thickness=1) \r\n \r\n cv2.rectangle(image, (x1, y1), (x2, y2), color, thickness=1)\r\n \r\n k = cv2.waitKey(0) & 0xFF\r\n cv2.imshow(image_file.split(\"sss\")[-1], image)\r\n if k == 27:\r\n cv2.destroyAllWindows()\r\n break",
"def display_precomputed_boxes(self, sample_index, all_boxes):\n image_rois = [class_detections[sample_index]\n for class_detections in all_boxes]\n\n image_rois_list = []\n image_classes = []\n for class_index, class_rois in enumerate(image_rois):\n if len(class_rois) > 0:\n classes = np.ones((class_rois.shape[0])) * class_index\n image_rois_list.extend(class_rois)\n image_classes.extend(classes)\n image_rois_list = np.array(image_rois_list)\n image_classes = np.array(image_classes)\n\n show_gt_boxes = False\n self.display_detections(image_rois_list, image_classes, \n self.data_loader.dataset.samples[sample_index])",
"def run(self):\n n_previous_bounding_boxes = 0\n # Initialize the names and confidences with empty lists\n emotion_names, emotion_confidences, age_names, age_confidences, gender_names, gender_confidences = 6 * [[]]\n\n # Main loop, continues until CTRL-C is pressed\n for count in itertools.count():\n # Read camera\n frame = self.read_camera()\n out = frame.copy()\n try:\n if count % 2 == 0:\n bounding_boxes = get_bounding_boxes(frame, fac=1.2, yoffset=0.04)\n\n # Bounding boxes not always in same order, bb == (left, right, top, bottom)\n bounding_boxes = sorted(bounding_boxes, key=lambda bb: bb[0])\n n_bounding_boxes = len(bounding_boxes)\n\n # Perform predictions whenever there are any AND\n # (we have a different number of bounding boxes than before OR we have reached a count of modulo 5)\n if n_bounding_boxes > 0 and (n_bounding_boxes != n_previous_bounding_boxes or count % 5 == 0):\n emotion_names, emotion_confidences = self.emotion_predictor.predict(\n bounding_boxes, frame, oversample=True\n )\n age_names, age_confidences = self.age_predictor.predict(bounding_boxes, frame, oversample=True)\n gender_names, gender_confidences = self.gender_predictor.predict(\n bounding_boxes, frame, oversample=True\n )\n\n # Update the number of bounding boxes\n n_previous_bounding_boxes = n_bounding_boxes\n\n # Display the name of the classes\n self.display_labels(out, emotion_names, emotion_confidences, bounding_boxes, TOP_LEFT)\n self.display_labels(out, age_names, age_confidences, bounding_boxes, TOP_RIGHT, prefix='Age: ')\n self.display_labels(out, gender_names, gender_confidences, bounding_boxes, BOTTOM_RIGHT)\n\n # Draw the bounding boxes\n self.draw_bounding_boxes(bounding_boxes, out)\n\n # Show on smaller window\n if self.dual_display:\n cv2.imshow(self.wname + ' Small View', cv2.resize(out, (960, 540)))\n\n # Show on main window\n cv2.imshow(self.wname, out)\n except cv2.error as e:\n print(e)\n\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break",
"def apply_tesseract(image: Image.Image, lang: Optional[str], tesseract_config: Optional[str]):\n # apply OCR\n data = pytesseract.image_to_data(image, lang=lang, output_type=\"dict\", config=tesseract_config)\n words, left, top, width, height = data[\"text\"], data[\"left\"], data[\"top\"], data[\"width\"], data[\"height\"]\n\n # filter empty words and corresponding coordinates\n irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()]\n words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices]\n left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices]\n top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices]\n width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices]\n height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices]\n\n # turn coordinates into (left, top, left+width, top+height) format\n actual_boxes = []\n for x, y, w, h in zip(left, top, width, height):\n actual_box = [x, y, x + w, y + h]\n actual_boxes.append(actual_box)\n\n image_width, image_height = image.size\n\n # finally, normalize the bounding boxes\n normalized_boxes = []\n for box in actual_boxes:\n normalized_boxes.append(normalize_box(box, image_width, image_height))\n\n assert len(words) == len(normalized_boxes), \"Not as many words as there are bounding boxes\"\n\n return words, normalized_boxes",
"def plot_n_box(img_prepro):\n print(img_prepro)\n h, w= img_prepro.shape\n boxes = pytesseract.image_to_boxes(img_prepro)\n for b in boxes.splitlines():\n b = b.split(' ')\n img_prepro = cv2.rectangle(img_prepro, (int(b[1]), h - int(b[2])), (int(b[3]), h - int(b[4])), (0, 255, 0), 2)\n cv2.imshow('img', img_prepro)\n cv2.waitKey(0)\n\n return",
"def main():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--image\", required=True, help=\"path to image file\")\n args = vars(ap.parse_args())\n filename = args['image']\n\n with open(filename, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.text_detection(image=image)\n\n texts = response.text_annotations\n print('Texts:')\n\n for text in texts:\n print('\\n\"{}\"'.format(text.description))\n vertices = (['({},{})'.format(vertex.x, vertex.y)\n for vertex in text.bounding_poly.vertices])\n print('bound: {}'.format(','.join(vertices)))\n\n if response.error.message:\n raise Exception(\n '{}\\nFor more info on error messages, check: '\n 'https://cloud.google.com/apis/design/errors'.format(\n response.error.message))",
"def evaluate_detections(self, all_boxes, output_dir=None):\n raise NotImplementedError",
"def evaluate_detections(self, all_boxes, output_dir=None):\n raise NotImplementedError",
"def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)",
"def vis_all_boxes(im_array, boxes):\r\n import matplotlib.pyplot as plt\r\n from ..fio.load_ct_img import windowing_rev, windowing\r\n\r\n im = windowing_rev(im_array+config.PIXEL_MEANS, config.WINDOWING)\r\n im = windowing(im, [-175,275]).astype(np.uint8) # soft tissue window\r\n plt.imshow(im)\r\n color = (0.,1.,0.)\r\n for bbox in boxes:\r\n rect = plt.Rectangle((bbox[0], bbox[1]),\r\n bbox[2] - bbox[0],\r\n bbox[3] - bbox[1], fill=False,\r\n edgecolor=color, linewidth=2)\r\n plt.gca().add_patch(rect)\r\n if boxes.shape[1] == 5:\r\n score = bbox[-1]\r\n plt.gca().text(bbox[0], bbox[1] - 2,\r\n '{:s} {:.3f}'.format(name, score),\r\n bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')\r\n plt.show()",
"def yolo_show_img(image, class_ids, boxes, labels, confidences, colors):\n for i, box in enumerate(boxes):\n # extract the bounding box coordinates\n (x, y) = (box[0], box[1])\n (w, h) = (box[2], box[3])\n\n # draw a bounding box rectangle and label on the image\n color = [int(c) for c in colors[class_ids[i]]]\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 3)\n text = '{}: {:.4f}'.format(labels[i], confidences[i])\n print(text)\n\n font_scale = 1.3\n # set the rectangle background to white\n rectangle_bgr = color\n # set some text\n # get the width and height of the text box\n (text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=1)[0]\n # set the text start position\n text_offset_x = x\n text_offset_y = y - 3 \n # make the coords of the box with a small padding of two pixels\n box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 10, text_offset_y - text_height - 10 ))\n cv2.rectangle(image, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)\n cv2.putText(image, text, (text_offset_x, text_offset_y), cv2.FONT_HERSHEY_SIMPLEX, \n fontScale=font_scale, color=(255, 255, 255), thickness=2)\n\n cv2.imshow('yolo prediction', image)\n cv2.waitKey(0)",
"def draw_detections_final(image, plate_detection_list, plate_classes, plates):\n i = 0\n for detection in plate_detection_list:\n x = detection[2]\n y = detection[3]\n w = detection[4]\n h = detection[5]\n espessura = int(w * 0.02)\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), espessura)\n cv2.rectangle(image, (x, y), (x + w, y - h), (0, 0, 0), -1)\n cv2.putText(image, plates, (x, y - int(h * 0.3)), cv2.FONT_HERSHEY_SIMPLEX, w * 0.006,\n (0, 255, 0), thickness=int(w*0.02))\n i += 1\n # cv2.imshow('final', image)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()",
"def locate_all_text(pipeline: Pipeline, text: str, img: ImageBGR) -> List[ndarray]:\n\n predictions = recognize(pipeline, img)\n\n return [box for (txt, box) in predictions if txt == text]",
"def get_word_boxes(self):\n word_boxes = self.lang.tool.image_to_string(\n self.image,\n lang=\"eng\",\n builder=pyocr.builders.WordBoxBuilder()\n )\n return word_boxes",
"def draw(font_string,font_size,lang,alphabets,outdir=\".\"): # language, font file name, font full path, font size, characters\n \n \n image_dir=lang+\".\"+\"images\"\n if(os.path.exists(image_dir)):\n pass\n else:\n os.mkdir(image_dir)\n \n #Using a font\n #font= ImageFont.truetype(font,fsz)\n boxfile=image_dir+\"/\"+\"bigimage.box\"\n f=open(boxfile,\"w\")\n wt = 4000\n ht = 4000 #modified later using a separate script\n\t\n bigimage=Image.new(\"L\",(wt,ht),255)\t#change here for inverting\n bigdraw=ImageDraw.Draw(bigimage)\n x=y=10\n count=0\n for akshar in alphabets:\n akshar.strip() #remove nasty characters\n \n #I shall now create an image with black bgc and white font color. One\n #getbbox() determines the bounding box values I shall invert the image.\n #This has to be done since getbbox() only finds bounding box values for\n #non-zero pixels (read as white), but tesseract-ocr runs on the exact\n #opposite bgc fgc combination. Contact [email protected].\n \n \n #The lines below are pango/cairo code \n surface = cairo.ImageSurface(cairo.FORMAT_A8, font_size*4, font_size*3)\n context = cairo.Context(surface)\n\n pc = pangocairo.CairoContext(context)\n\n layout = pc.create_layout()\n layout.set_font_description(pango.FontDescription(font_string))\n layout.set_text(akshar)\n print akshar\n\n # lines take care of centering the text.\n width, height = surface.get_width(), surface.get_height()\n w, h = layout.get_pixel_size()\n position = (10,10) #most likely this part messes up when you try to change the size within this script. It is suggested to use the separate script.\n context.move_to(*position)\n pc.show_layout(layout)\n surface.write_to_png(\"pango.png\")\n\t\n #Here we open the generated image using PIL functions\n temp_image=Image.open(\"pango.png\") #black background, white text\n draw = ImageDraw.Draw(temp_image)\n bbox = temp_image.getbbox()\n deltax=bbox[2]-bbox[0]\n deltay=bbox[3]-bbox[1]\n\n \n print bbox\n new_image=temp_image.crop(bbox)\n temp_image=temp_image.load()\n inverted_image = ImageChops.invert(new_image) #White background, black text\n\t\n\tinverted_image.save(image_dir+\"/\"+str(count)+\".png\")\n\tbigimage.paste(inverted_image,(x,y))\n\tos.unlink(image_dir+\"/\"+str(count)+\".png\")\n\tcount = count+1\n\t#bigimage.load()\n bigbox=(x,y,x+deltax,y+deltay)\n print bigbox\n draw=ImageDraw.Draw(bigimage)\n\t#draw.rectangle(bigbox,None,100)\n x=bigbox[2]+5\n if x>(wt-10):\n x=10; y=y+40\n\n os.unlink(\"pango.png\") #delete the pango generated png\n\n line=akshar+\" \"+str(bigbox[0]-1)+\" \"+str(ht-(bigbox[1]+deltay)-1)+\" \"+str(bigbox[2]+1)+\" \"+str(ht-(bigbox[3]-deltay)+1) # this is the line to be added to the box file\n\tf.write(line+'\\n')\n\n\t#degrade code starts\n\tstrip=[deltax*.2,deltax*.4,deltax*.7]\n\tfor values in range(0,2):\n\t\tdistort2=inverted_image\n\t\tfor wai in range(0,deltay):\n\t\t\tfor ex in range(strip[values],strip[values]+1):\n\t\t\t\tdistort2.putpixel((ex,wai),255)\n\t\tbigbox=(x,y,x+deltax,y+deltay)\n\t\t#draw.rectangle(bigbox,None,10)\n\t\tline=akshar+\" \"+str(bigbox[0]-1)+\" \"+str(ht-(bigbox[1]+deltay)-1)+\" \"+str(bigbox[2]+1)+\" \"+str(ht-(bigbox[3]-deltay)+1) # this is the line to be added to the box file\n \tf.write(line+'\\n')\n\t\tbigimage.paste(distort2,(x,y))\n\t\tx=bigbox[2]+5\n \tif x>(wt-10):\n \t\tx=10; y=y+40\n\t\t\n\t\t\t\n\t#degrade code ends\n \n #distort.distort(filename2,bbox,fsz,akshar)\n \n \n \n #bigimage.save(image_dir+\"/\"+\"bigimage.tif\",\"TIFF\") #useful to generate merged file for all images when using default sizes.\n f.close()\n train.train(lang,outdir)",
"def draw_bounding_box(objects,color):\n\n for i in range(len(objects)):\n x, y, w, h, d = objects[i].get_attributes()\n print(x, y, w, h, d)\n corr = get_correction(d, a, hfov, x)\n cv2.rectangle(color, (x-corr, y), (x+w-corr, y+h), (0, 255, 0), 4)\n\n try:\n real_x, real_y = get_dimensions(d, w, h, hfov, vfov, 640, 480)\n real_x = round(real_x, 3)\n real_y = round(real_y, 3)\n\n except:\n real_x, real_y = 'ERROR'\n\n cv2.putText(color, 'depth = ' + str(d) + 'm', (30, i*60 + 30) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n cv2.putText(color, 'width = ' + str(real_x)+ 'm', (30, i*60 + 45) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n cv2.putText(color, 'height = ' + str(real_y)+ 'm', (30, i*60 + 60) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n\n if(i < len(objects)-1):\n ## distance between left and right object\n distance = round(distance_between_objects(objects[i], objects[i+1], hfov, 640), 3)\n if distance > l:\n textcolor = (0, 255, 0)\n else:\n textcolor = (0, 0, 255)\n\n cv2.putText(color, 'distance between objects = ' + str(distance) + 'm',\n (320, i*60 + 70) , cv2.FONT_HERSHEY_SIMPLEX, 0.5, textcolor, 1)",
"def demo(net, data_dir, imgfile, out_dir):\n\n # Load the demo image\n im_file = os.path.join(data_dir, imgfile)\n im = cv2.imread(im_file)\n\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im)\n scores = np.squeeze(scores)\n timer.toc()\n print ('Detection took {:.3f}s for '\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0.12\n NMS_THRESH = 0.3\n color_white = (0, 0, 0)\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 \n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))\n inds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n bbox = map(int, bbox)\n cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=4)\n cv2.putText(im, '%s %.3f' % (cls, score), (bbox[0], bbox[1] + 15),\n color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)\n return im",
"def draw_boxes_texts(img,\n boxes,\n texts=None,\n colors=None,\n line_width=1,\n draw_start=False,\n box_format='x1y1x2y2'):\n assert box_format in ('x1y1x2y2', 'x1y1wh', 'xywh', 'xywha',\n 'polygon'), 'not supported box format!'\n img = imread(img)\n if len(boxes) == 0:\n return img\n boxes = copy.deepcopy(boxes)\n # convert bbox type to int\n if not isinstance(boxes, np.ndarray):\n if box_format != 'polygon':\n boxes = np.array(boxes)\n if box_format != 'xywha':\n boxes = boxes.astype(np.int)\n if len(boxes.shape) == 1:\n boxes = [boxes]\n else:\n boxes = [list(map(int, box)) for box in boxes]\n else:\n boxes = boxes.astype(np.int)\n if texts is not None and not isinstance(texts, (list, np.ndarray)):\n texts = [texts]\n if isinstance(img, Image.Image):\n img = cv.cvtColor(np.asarray(img), cv.COLOR_RGB2BGR)\n if not isinstance(img, np.ndarray):\n return\n if colors == 'random':\n colors = np.random.randint(0, 255, size=(len(boxes), 3))\n colors = [tuple(map(int, color)) for color in colors]\n text_color = (0, 255, 255)\n thickness = line_width\n font = cv.FONT_HERSHEY_SIMPLEX\n for idx, box in enumerate(boxes):\n # default color: red, BGR order\n box_color = (0, 0, 255) if colors is None else colors[idx]\n if box_format == 'x1y1x2y2':\n cv.rectangle(img, tuple(box[0:2]),\n tuple(box[2:4]), box_color, thickness)\n elif box_format == 'x1y1wh':\n box[0:4] = cvtools.x1y1wh_to_x1y1x2y2(list(box[0:4]))\n cv.rectangle(img, tuple(box[0:2]),\n tuple(box[2:4]), box_color, thickness)\n elif box_format == 'xywh':\n box[0:4] = cvtools.xywh_to_x1y1x2y2(list(box[0:4]))\n cv.rectangle(img, tuple(box[0:2]),\n tuple(box[2:4]), box_color, thickness)\n elif box_format == 'xywha':\n rrect = tuple(box[:2]), tuple(box[2:4]), box[4]\n box = cv.boxPoints(rrect).astype(np.int)\n # box = np.int0(box)\n cv.drawContours(img, [box], 0, box_color, thickness)\n box = box.reshape((-1,))\n elif box_format == 'polygon':\n # for i in np.arange(2, len(box), 2):\n # cv.line(img, tuple(box[i-2:i]),\n # tuple(box[i:i+2]), box_color, thickness)\n # cv.line(img, tuple(box[-2:]),\n # tuple(box[:2]), box_color, thickness)\n # 如果img内存非连续,cv的所有绘制都会失效\n cv.polylines(img, np.int32([np.array(box).reshape(-1, 2)]), 1, box_color, thickness)\n # cv.line(img, tuple(box[:2]), tuple(box[2:4]), box_color, thickness)\n # cv.line(img, tuple(box[2:4]), tuple(box[4:6]), box_color, thickness)\n # cv.line(img, tuple(box[4:6]), tuple(box[6:8]), box_color, thickness)\n # cv.line(img, tuple(box[6:]), tuple(box[:2]), box_color, thickness)\n if draw_start:\n cv.circle(img, tuple(box[:2]),\n radius=5, color=text_color, thickness=-1)\n if texts is not None:\n cv.putText(img, texts[idx],\n (box[0]+2, box[1]-2), font, 0.5, text_color, 1)\n return img",
"def plate_recognition(plate):\r\n cv2.destroyAllWindows()\r\n print(\"Without preprocessing: \")\r\n cv2.imshow('Plate', plate)\r\n print(\"Pytesseract: {}\".format(pytesseract.image_to_string(plate)))\r\n img = Image.fromarray(plate)\r\n print(\"OCR: {}\".format(tesserocr.image_to_text(img)))\r\n\r\n print(\"With preprocessing: \")\r\n image = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)\r\n image = cv2.bilateralFilter(image, 11, 17, 17)\r\n image = cv2.threshold(image, 177, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n cv2.imshow('Processed Plate', image)\r\n print(\"Pytesseract: {}\".format(pytesseract.image_to_string(image)))\r\n img = Image.fromarray(image)\r\n print(\"OCR: {}\".format(tesserocr.image_to_text(img)))\r\n cv2.waitKey(0)",
"def draw_boxes_and_labels(img, localized_objs, obj_classes, box_color=(0, 255, 255)):\n img_h, img_w = img.shape[:2]\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_size = 0.5\n font_color = (0, 0, 0)\n\n for (i, bbox_cv2) in localized_objs:\n # Draw the object boxes\n left, right, top, bottom = handle_bad_corners(bbox_cv2[0], bbox_cv2[1], bbox_cv2[2], bbox_cv2[3], img_w, img_h)\n cv2.rectangle(img, (left, top), (right, bottom), box_color, 4)\n # Draw a filled boxes on top of the bounding box (as the background for the labels)\n left1, top1, right1, _ = handle_bad_corners(left-2, top-40, right+2, bottom, img_w, img_h)\n cv2.rectangle(img, (left1, top1), (right1, top), box_color, -1, 1)\n # Output the labels that show the x and y coordinates of the bounding box center.\n text_label= obj_classes[i]\n top2 = 0 if top<25 else top-25\n cv2.putText(img, text_label, (left, top2), font, font_size, font_color, 1, cv2.LINE_AA)\n text_xy= 'x='+str((left+right)/2)+' y='+str((top+bottom)/2)\n cv2.putText(img, text_xy, (left,top2+20), font, 0.4, font_color, 1, cv2.LINE_AA)\n\n return img",
"def perform_OCR(self, OCR, greyscale_image, lower_treshold, upper_threshold):\n # If the user has chosen to perform OCR\n if OCR == \"True\":\n # Since pytesseract expects black text on a white background, I perform binarization thresholding on the image\n (T, edged_image) = cv2.threshold(greyscale_image,\n 110, # threshold value used to classify the pixel values\n 255, # maximum pixel value assigned to values above 110\n cv2.THRESH_BINARY) # binary thresholding - everything above 110 in pixel intensity will be set to white (255)\n \n # Below I am tweaking tesseract parameters. I use a combination of the original tesseract model and the neural network approach, which is indicated by --oem 2, and I tell it to \"assume a single column of text of variable sizes\", which is indicated by --psm 4\n custom_oem_psm_config = r'--oem 2 --psm 4'\n \n # Perform the OCR on the edged image\n text_string = pytesseract.image_to_string(edged_image)\n \n # Perform some manual cleanup of the text \n processed_text_string = ocr.replace(text_string)\n \n # Save text as txt-file to output directory\n with open(os.path.join(\"..\", self.output_dir, f\"{self.input_image}_OCR_text.txt\"), \"w\") as f:\n f.write(f\"Below you can see the result of the OCR run on {self.input_image}:\\n \\n {processed_text_string}\")\n \n # User message\n print(f\"\\n[INFO] OCR is done! {self.input_image}_OCR_text.txt has been saved in {self.output_dir}.\") \n \n # If the user has not specified that they want to perform OCR on the input image\n if OCR == \"False\":\n return None",
"def crop_all_bounding_boxes(boxes, image_path, crop_path):\n index = 0\n for box in boxes:\n object_class = box[0]\n cropped_image = crop_bounding_box_from_image(\n box, image_path, crop_path)\n filename = object_class + \"_\" + os.path.basename(image_path)\n while os.path.isfile(os.path.join(crop_path, filename)):\n print('File %s already exists!' % (filename))\n index += 1\n filename = str(index) + \"_\" + filename\n cropped_image.save(filename)",
"def _images_and_boxes_preprocessing(self, imgs, boxes):\r\n # Image [0, 255] -> [0, 1].\r\n imgs = imgs.float()\r\n imgs = imgs / 255.0\r\n\r\n height, width = imgs.shape[2], imgs.shape[3]\r\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\r\n # range of [0, 1].\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n boxes = transform.clip_boxes_to_image(boxes, height, width)\r\n\r\n if self._split == \"train\":\r\n # Train split\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs,\r\n min_size=self._jitter_min_scale,\r\n max_size=self._jitter_max_scale,\r\n boxes=boxes,\r\n )\r\n imgs, boxes = transform.random_crop(imgs, self._crop_size, boxes=boxes)\r\n\r\n # Random flip.\r\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\r\n elif self._split == \"val\":\r\n # Val split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n # Apply center crop for val split\r\n imgs, boxes = transform.uniform_crop(\r\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n elif self._split == \"test\":\r\n # Test split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n else:\r\n raise NotImplementedError(\"{} split not supported yet!\".format(self._split))\r\n\r\n # Do color augmentation (after divided by 255.0).\r\n if self._split == \"train\" and self._use_color_augmentation:\r\n if not self._pca_jitter_only:\r\n imgs = transform.color_jitter(\r\n imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4\r\n )\r\n\r\n imgs = transform.lighting_jitter(\r\n imgs,\r\n alphastd=0.1,\r\n eigval=np.array(self._pca_eigval).astype(np.float32),\r\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\r\n )\r\n\r\n # Normalize images by mean and std.\r\n imgs = transform.color_normalization(\r\n imgs,\r\n np.array(self._data_mean, dtype=np.float32),\r\n np.array(self._data_std, dtype=np.float32),\r\n )\r\n\r\n if self._use_bgr:\r\n # Convert image format from RGB to BGR.\r\n # Note that Kinetics pre-training uses RGB!\r\n imgs = imgs[:, [2, 1, 0], ...]\r\n\r\n boxes = transform.clip_boxes_to_image(boxes, self._crop_size, self._crop_size)\r\n\r\n return imgs, boxes",
"def _images_and_boxes_preprocessing(self, imgs, boxes, gt_boxes=None):\n # Image [0, 255] -> [0, 1].\n imgs = imgs.float()\n imgs = imgs / 255.0\n\n height, width = imgs.shape[2], imgs.shape[3]\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\n # range of [0, 1].\n # boxes[:, [0, 2]] *= width\n # boxes[:, [1, 3]] *= height\n boxes = transform.clip_boxes_to_image(boxes, height, width)\n\n if self._split == \"train\":\n # Train split\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._jitter_min_scale,\n max_size=self._jitter_max_scale,\n boxes=boxes,\n )\n imgs, boxes = transform.random_crop(\n imgs, self._crop_size, boxes=boxes\n )\n\n # Random flip.\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\n elif self._split == \"val\":\n # Val split\n # Resize short side to crop_size. Non-local and STRG uses 256.\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._crop_size,\n max_size=self._crop_size,\n boxes=boxes,\n )\n\n # Apply center crop for val split\n imgs, boxes = transform.uniform_crop(\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\n )\n\n if self._test_force_flip:\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\n elif self._split == \"test\":\n # Test split\n # Resize short side to crop_size. Non-local and STRG uses 256.\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._crop_size,\n max_size=self._crop_size,\n boxes=boxes,\n )\n\n if self._test_force_flip:\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\n else:\n raise NotImplementedError(\n \"{} split not supported yet!\".format(self._split)\n )\n\n # Do color augmentation (after divided by 255.0).\n if self._split == \"train\" and self._use_color_augmentation:\n if not self._pca_jitter_only:\n imgs = transform.color_jitter(\n imgs,\n img_brightness=0.4,\n img_contrast=0.4,\n img_saturation=0.4,\n )\n\n imgs = transform.lighting_jitter(\n imgs,\n alphastd=0.1,\n eigval=np.array(self._pca_eigval).astype(np.float32),\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\n )\n\n # Normalize images by mean and std.\n imgs = transform.color_normalization(\n imgs,\n np.array(self._data_mean, dtype=np.float32),\n np.array(self._data_std, dtype=np.float32),\n )\n\n if not self._use_bgr:\n # Convert image format from BGR to RGB.\n # Note that Kinetics pre-training uses RGB!\n imgs = imgs[:, [2, 1, 0], ...]\n\n boxes = transform.clip_boxes_to_image(\n boxes, self._crop_size, self._crop_size\n )\n\n return imgs, boxes",
"def _draw_boxes(self, image, boxes, classes, thickness=4):\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i]) - 1\n color = self.COLOR_LIST[class_id]\n cv2.rectangle(image, (left, top), (right, bot), color=color, thickness=thickness)"
] |
[
"0.65326566",
"0.6261916",
"0.6236352",
"0.61314714",
"0.6113341",
"0.6108616",
"0.5936302",
"0.59256",
"0.5911103",
"0.5908094",
"0.5882378",
"0.5880716",
"0.5813167",
"0.58092713",
"0.5807783",
"0.57885075",
"0.5769818",
"0.5737877",
"0.5705715",
"0.5673986",
"0.5651309",
"0.5650616",
"0.56408745",
"0.5619902",
"0.5606331",
"0.5588441",
"0.55828655",
"0.557174",
"0.5558358",
"0.55519503"
] |
0.63410026
|
1
|
Reads the contents of a pfile. Returns a tuple (features, labels), where both elements are lists of 2D numpy arrays. Each element of a list corresponds to a sentence; each row of a 2D array corresponds to a frame. In the case where the pfile doesn't contain labels, "labels" will be None.
|
def readPfile(filename):
with smart_open(filename, "rb") as f:
# Read header
# Assuming all data are consistent
for line in f:
tokens = line.decode().split()
if tokens[0] == "-pfile_header":
headerSize = int(tokens[4])
elif tokens[0] == "-num_sentences":
nSentences = int(tokens[1])
elif tokens[0] == "-num_frames":
nFrames = int(tokens[1])
elif tokens[0] == "-first_feature_column":
cFeature = int(tokens[1])
elif tokens[0] == "-num_features":
nFeatures = int(tokens[1])
elif tokens[0] == "-first_label_column":
cLabel = int(tokens[1])
elif tokens[0] == "-num_labels":
nLabels = int(tokens[1])
elif tokens[0] == "-format":
format = tokens[1].replace("d", "i")
elif tokens[0] == "-end":
break
nCols = len(format)
dataSize = nFrames * nCols
# Read sentence index
f.seek(headerSize + dataSize * 4)
index = struct.unpack(">%di" % (nSentences + 1), f.read(4 * (nSentences + 1)))
# Read data
f.seek(headerSize)
features = []
labels = []
sen = 0
for i in range(nFrames):
if i == index[sen]:
features.append([])
labels.append([])
sen += 1
data = struct.unpack(">" + format, f.read(4 * nCols))
features[-1].append(data[cFeature : cFeature + nFeatures])
labels[-1].append(data[cLabel : cLabel + nLabels])
features = [numpy.array(x) for x in features]
labels = [numpy.array(x) for x in labels] if nLabels > 0 else None
return (features, labels)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_features_from_file(filename):\n\tf = np.loadtxt(filename)\n\treturn f[:,:4],f[:,4:] # feature locations, descriptors",
"def read_data(feature_file, label_file):",
"def read_file(filename):\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n try:\n label,content = line.strip().split('\\t')\n contents.append(list(content))\n labels.append(label)\n except:\n pass\n return contents,labels",
"def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()",
"def read_traindata (filename, labels = ['pos', 'neg']):\n def split (l):\n \"\"\"split one line into words and label\"\"\"\n segs = l.strip().split ('\\t')\n label = segs [-1]\n words = segs [:-1]\n return words, label\n \n encoding = chardet.detect(open (filename).read ()) ['encoding']\n \n with codecs.open (filename, 'r', encoding) as f:\n for line in f.readlines ():\n row = split (line)\n assert len (row) == 2\n assert isinstance(row [0], list)\n assert isinstance(row [1], basestring)\n print row [1]\n assert row [1] in labels\n yield row",
"def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels",
"def read_labels(labels_file):\n if not labels_file:\n print 'WARNING: No labels file provided. Results will be difficult to interpret.'\n return None\n\n labels = []\n with open(labels_file) as infile:\n for line in infile:\n label = line.strip()\n if label:\n labels.append(label)\n assert len(labels), 'No labels found'\n return labels",
"def load_data(filename, use_labels=True):\n # load column 1 to 8 (ignore last one)\n data = np.loadtxt(open( filename), delimiter=',',\n usecols=range(1, 9), skiprows=1)\n if use_labels:\n labels = np.loadtxt(open( filename), delimiter=',',\n usecols=[0], skiprows=1)\n else:\n labels = np.zeros(data.shape[0])\n return labels, data",
"def readPFM(file):\n file = open(file, 'rb')\n\n color = None\n width = None\n height = None\n scale = None\n endian = None\n\n header = file.readline().rstrip()\n if header == b'PF':\n color = True\n elif header == b'Pf':\n color = False\n else:\n raise Exception('Not a PFM file.')\n\n dims = file.readline()\n try:\n width, height = list(map(int, dims.split()))\n except:\n raise Exception('Malformed PFM header.')\n\n scale = float(file.readline().rstrip())\n if scale < 0: # little-endian\n endian = '<'\n scale = -scale\n else:\n endian = '>' # big-endian\n\n data = np.fromfile(file, endian + 'f')\n shape = (height, width, 3) if color else (height, width, 1)\n\n data = np.reshape(data, shape)\n data = np.flipud(data)\n return data, scale",
"def read_data(filename):\r\n with open(filename,'rb') as f:\r\n data = pk.load(f,encoding='bytes')\r\n return data[b'data'],data[b'labels']",
"def read_features_from_file(filename):\n f = loadtxt(filename)\n return f[:, :4], f[:, 4:] # feature locations, descriptors",
"def read_feature_labels(output):\n path = os.path.join(output, 'features.list')\n if not os.path.exists(path):\n logging.warning(\"Cannot read feature labels. Path/File does not exist.\")\n return None\n else:\n with open(path, 'r') as in_file:\n feature_labels = in_file.readlines()\n feature_labels = [feature_label.strip() for feature_label in feature_labels]\n\n return np.asarray(feature_labels)",
"def load_labels(filename):\n\n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read()\n\n magic, n_labels = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(2))\n\n assert magic[0] == 2049, \"bad magic number, what do?\"\n\n label_stream = array.array('B', b[8:])\n \n assert len(label_stream) == n_labels[0], \"mismatch in label length\"\n \n # label_stream is actually type array.array, which is iterable surely.\n # i'll convert it anyway...\n return tuple(label_stream)",
"def load_file(filename):\n\tlabels = []\n\tdocs = []\n\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tcontent = line.split('\\t')\n\n\t\t\tif len(content) > 2:\n\t\t\t\tprint('incorrect read')\n\t\t\t\texit()\n\n\t\t\tif len(content[1]) == 0: continue\n\n\t\t\tdocs.append(str(content[1]).strip('\\r').strip('\\n').strip('\\r\\n'))\n\t\t\tlabels.append(content[0])\n\n\treturn docs, labels",
"def read_label(filepath, read_scalars=False):\n label_array = np.loadtxt(filepath, dtype=np.int, skiprows=2, usecols=[0])\n if read_scalars:\n scalar_array = np.loadtxt(filepath, skiprows=2, usecols=[-1])\n return label_array, scalar_array\n return label_array",
"def _read_libffm_file(self, filename):\n\n X_true = np.zeros((self.num_rows, self.num_features))\n y_true = np.zeros((self.num_rows, 1))\n field_true = np.zeros((self.num_features, 1))\n with open(filename, 'r') as f:\n i = 0\n for line in f:\n tmp_row = line.replace('\\n', '').split(' ')\n\n # extract label\n y_true[i] = int(tmp_row[0])\n\n # extract data and fields\n for k in range(1, len(tmp_row)):\n if len(tmp_row[k]) > 0:\n tmp_str = tmp_row[k].split(':')\n j = int(tmp_str[1])\n field_true[j] = int(tmp_str[0])\n tmp_data = float(tmp_str[2])\n X_true[i, j] = tmp_data\n i = i + 1\n\n return X_true, y_true, field_true",
"def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)",
"def read_pfeatures(namefile):\n db = shelve.open(namefile)\n hashes = db['hashes']\n nif = db['nif']\n year = db['year']\n pfeatures = db['pfeatures']\n methodvalues = db['methodvalues']\n db.close()\n return hashes, nif, year, pfeatures, methodvalues",
"def read_data(feats_file, labels_file, size=None):\n feats = np.loadtxt(feats_file)\n labels = np.loadtxt(labels_file, ndmin=2)\n if size:\n feats = feats[:size, :]\n labels = labels[:size, :]\n return np.concatenate((feats, labels), axis=1)",
"def load_pfm(filename):\n filename = process(filename)\n with open(filename, \"r\", encoding=\"ISO-8859-1\") as file:\n nc = 3 if file.readline().rstrip() == \"PF\" else 1\n width, height = [int(x) for x in file.readline().rstrip().split()]\n shape = (height, width, nc)\n img = np.fromfile(file, '{0}{1}'.format(\"<\" if float(file.readline().rstrip()) < 0 else \">\",'f') )\n img = np.reshape(img, shape)\n return np.flip(np.flip(img, 2), 0).copy()",
"def read_labels(labels_path):\n with open(labels_path, 'r') as file:\n data = file.read()\n data = data.split()\n data = np.array(data)\n data = np.reshape(data, (-1, 2))\n return data",
"def read_data(cls, input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n lines = []\n for line in f:\n line = line.strip()\n if line.startswith('-DOCSTART-'):\n continue\n else:\n word_labels = line.split('-seq-')\n assert len(word_labels) == 2\n\n words = word_labels[0]\n labels = word_labels[1]\n lines.append([words, labels])\n\n return lines",
"def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()",
"def load_data(filename):\r\n with open(filename,'rb') as f:\r\n data = pk.load(f,encoding='bytes')\r\n return data[b'data'],data[b'labels']",
"def load_data_and_labels():\n # Load data from files\n positive_examples = list(\n open(\"./data/rt-polarity.pos\", \"r\", encoding='latin-1').readlines())\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = list(\n open(\"./data/rt-polarity.neg\", \"r\", encoding='latin-1').readlines())\n negative_examples = [s.strip() for s in negative_examples]\n # Split by words\n x_text = positive_examples + negative_examples\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n return [x_text, y]",
"def read_input_file(file_name):\n matrix = np.asmatrix(np.loadtxt(file_name))\n matrix = matrix[:, :-1]\n\n (rows, attribute_count) = np.shape(matrix)\n\n # convert data into an list of Examples\n examples = [\n Example(matrix[i, :])\n for i in range(0, rows)\n ]\n\n return (examples, attribute_count)",
"def load_data_and_labels(positive_data_file, negative_data_file):\n # Load data from files\n positive_examples = list(open(positive_data_file, \"r\",encoding='utf-8').readlines())\n positive_examples = [s.strip() for s in positive_examples]\n print (\"len of pos\"+positive_data_file, len(positive_examples))\n negative_examples = list(open(negative_data_file, \"r\",encoding='latin-1').readlines())\n negative_examples = [s.strip() for s in negative_examples]\n print (\"len of neg\"+negative_data_file,len(negative_examples))\n # Split by words\n x_text = positive_examples + negative_examples\n x_text = [clean_str(sent) for sent in x_text]\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n return [x_text, y]",
"def extract_labels(nlabels,filename, one_hot=False):\n print('Extracting', filename,'bbbccicicicicib')\n\n labels=numpy.loadtxt(filename,dtype='int64')\n \n if one_hot:\n print(\"LABELS ONE HOT\")\n print(labels.shape)\n XXX=dense_to_one_hot(labels,nlabels)\n print(XXX.shape)\n return dense_to_one_hot(labels,nlabels)\n print(\"LABELS\")\n print(labels.shape)\n return labels",
"def read_ptbtagged(ptbtagged_path: str) -> Iterator[Tuple[TokenSeq, PosSeq]]:\n #do this immediately (first)\n #start generating feature matrices\n \n #read file into an array \n with open(ptbtagged_path) as f:\n file_array = f.readlines()\n file_array.append(\"\\n\")\n array_of_tuples = create_tuples(file_array)\n\n return generator(array_of_tuples)",
"def read_data(data_path, filename,feature_number):\n\n with open(data_path + \"/\" + filename, 'r', encoding='utf-8-sig') as f: \n X = np.genfromtxt(f, delimiter=',')[:,0:feature_number]\n\n\n # Last column of datafile contains output labels\n Y = np.genfromtxt(data_path + \"/\" + filename,delimiter=\",\")[:,feature_number]\n Y = Y.reshape(X.shape[0])\n\n return X,Y"
] |
[
"0.6479107",
"0.64477426",
"0.63880754",
"0.6314104",
"0.63055766",
"0.62895685",
"0.6252734",
"0.6156262",
"0.6133945",
"0.61212313",
"0.61121446",
"0.6105066",
"0.61001927",
"0.60388434",
"0.5959132",
"0.59425837",
"0.59364706",
"0.5934543",
"0.59034926",
"0.5866229",
"0.58512974",
"0.5846918",
"0.5840235",
"0.5835824",
"0.5835324",
"0.5832751",
"0.58086336",
"0.5795598",
"0.57756585",
"0.5774928"
] |
0.7912509
|
0
|
Writes "features" and "labels" to a pfile. Both "features" and "labels" should be lists of 2D numpy arrays. Each element of a list corresponds to a sentence; each row of a 2D array corresponds to a frame. In the case where there is only one label per frame, the elements of the "labels" list can be 1D arrays.
|
def writePfile(filename, features, labels = None):
nSentences = len(features)
nFrames = sum(len(x) for x in features)
nFeatures = len(numpy.array(features[0][0]).ravel())
nLabels = len(numpy.array(labels[0][0]).ravel()) if labels is not None else 0
nCols = 2 + nFeatures + nLabels
headerSize = 32768
dataSize = nFrames * nCols
with smart_open(filename, "wb") as f:
# Write header
writeBytes(f, "-pfile_header version 0 size %d\n" % headerSize)
writeBytes(f, "-num_sentences %d\n" % nSentences)
writeBytes(f, "-num_frames %d\n" % nFrames)
writeBytes(f, "-first_feature_column 2\n")
writeBytes(f, "-num_features %d\n" % nFeatures)
writeBytes(f, "-first_label_column %d\n" % (2 + nFeatures))
writeBytes(f, "-num_labels %d\n" % nLabels)
writeBytes(f, "-format dd" + "f" * nFeatures + "d" * nLabels + "\n")
writeBytes(f, "-data size %d offset 0 ndim 2 nrow %d ncol %d\n" % (dataSize, nFrames, nCols))
writeBytes(f, "-sent_table_data size %d offset %d ndim 1\n" % (nSentences + 1, dataSize))
writeBytes(f, "-end\n")
# Write data
f.seek(headerSize)
for i in range(nSentences):
for j in range(len(features[i])):
f.write(struct.pack(">2i", i, j))
f.write(struct.pack(">%df" % nFeatures, *numpy.array(features[i][j]).ravel()))
if labels is not None:
f.write(struct.pack(">%di" % nLabels, *numpy.array(labels[i][j].astype(int)).ravel()))
# Write sentence index
index = numpy.cumsum([0] + [len(x) for x in features])
f.write(struct.pack(">%di" % (nSentences + 1), *index))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write_feature_labels(output, feature_labels):\n with open(os.path.join(output, 'features.list'), 'w') as out_file:\n out_file.write('\\n'.join(feature_labels))",
"def writeFeatures(features, labels, output_filename):\n\twith open(output_filename, 'w') as csvfile:\n\t fieldnames = features[0].keys()\n\t fieldnames.append('label')\n\t writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n\t writer.writeheader()\n\t for i in range(len(features)):\n\t \tfeatures[i]['label'] = labels[i]\n\t \twriter.writerow(features[i])\n\n\treturn",
"def write_labels():\n with open('../data/labels.txt', 'w') as labels_file:\n labels = generate_labels()\n labels_file.write('\\n'.join(labels))",
"def save_data(features, labels, mask, file_name):\n label = labels[mask]\n label = label.reshape((len(label), 1))\n data = np.concatenate((features[mask, :], label), axis = 1)\n np.save(file_name, data)",
"def save_features_to_file(path: str, features: Data_dict_type, labels: Labels_dict_type_numpy):\n for key, item in features.items():\n filename = key\n values, sample_rate = item\n window_labels = labels[filename].reshape((-1, 1))\n concatenated_data = np.concatenate(\n [np.array([i for i in range(values.shape[0])])[..., np.newaxis], # window_idx\n values, # features\n window_labels], axis=-1) # labels\n df_to_save = pd.DataFrame(data=concatenated_data)\n columns = ['window_idx'] + ['feature_%i' % i for i in range(values.shape[-1])] + ['label']\n df_to_save.columns = columns\n df_to_save.to_csv(os.path.join(path, filename.split('.')[0] + '.csv'), index=False)",
"def write_features_to_file(filename,locs,desc):\n savetxt(filename, hstack((locs, desc)))",
"def write_label_file(labels_to_class_names, labels_filename):\n with tf.gfile.Open(labels_filename, \"w\") as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n'%(label, class_name))",
"def dump_slice_dataset(X: csr_matrix,\n y: csr_matrix,\n feat_file: Union[str, TextIOWrapper],\n label_file: Union[str, TextIOWrapper]) -> None:\n if isinstance(feat_file, str):\n feat_file = open(feat_file, 'w')\n elif isinstance(feat_file, TextIOWrapper):\n pass\n else:\n raise TypeError(f'feature_file is type {type(feat_file)} but should be either str or TextIOWrapper')\n\n if isinstance(label_file, str):\n label_file = open(label_file, 'w')\n elif isinstance(label_file, TextIOWrapper):\n pass\n else:\n raise TypeError(f'label_file is type {type(label_file)} but should be either str or TextIOWrapper')\n\n if X.shape[0] != y.shape[0]:\n raise Exception('X and y must have same shape')\n\n # 1. create sparse label file\n # format:\n # The first line of both the files contains the number of rows\n # the label file contains indices of active labels\n # and the corresponding value (always 1 in this case) starting from 0\n\n # write header\n label_header = f'{y.shape[0]} {y.shape[1]}\\n'\n label_file.write(label_header)\n # write data\n for label_vector in y:\n label_idx = label_vector.nonzero()[1]\n line = f'{\" \".join([f\"{label_id}:1\" for label_id in map(str, label_idx)])}\\n'\n label_file.write(line)\n\n label_file.close()\n\n # 2. create dense feature file\n # format:\n # The first line of both the files contains the number of rows\n # For features, each line contains D (the dimensionality of the feature vectors), space separated, float values\n\n # write header\n feature_header = f'{X.shape[0]} {X.shape[1]}\\n'\n feat_file.write(feature_header)\n # write data\n for feature_vector in X:\n line = f'{\" \".join(map(str, [i if i > 0.0 else int(0) for i in feature_vector[0].toarray().ravel()]))}\\n'\n feat_file.write(line)\n\n feat_file.close()\n\n return",
"def write_features(self):\r\n def pack_keypoint(keypoints, descriptors):\r\n kpts = np.array([[kp.pt[0], kp.pt[1], kp.size,\r\n kp.angle, kp.response, kp.octave,\r\n kp.class_id]\r\n for kp in keypoints])\r\n desc = np.array(descriptors)\r\n return kpts, desc\r\n\r\n filename = self.features_path + self.id\r\n kpts, desc = pack_keypoint(self.keypoints, self.descriptors)\r\n logging.info(f'Writing features of image {self.name} to file...')\r\n np.savez(filename, keypoints=kpts, descriptors=desc)\r\n logging.info('Features saved.')",
"def write_label(filename, label, verbose=None):\n\n with open(filename, 'wb') as fid:\n n_vertices = len(label.vertices)\n data = np.zeros((n_vertices, 5), dtype=np.float)\n data[:, 0] = label.vertices\n data[:, 1:4] = label.coords # self.pos #1e3 *\n data[:, 4] = label.values\n fid.write(b(\"#%s\\n\" % label.comment))\n fid.write(b(\"%d\\n\" % n_vertices))\n for d in data:\n fid.write(b(\"%d %f %f %f %f\\n\" % tuple(d)))\n return label",
"def write_label_file(labels_to_class_names, dataset_dir, filename='labels.txt'):\n labels_filename = os.path.join(dataset_dir, filename)\n with tf.gfile.Open(labels_filename, 'w') as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n' % (label, class_name))",
"def save_vectors (feat_vec = None, labels = None, file_extension = None):\n\n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n\n prettyPrint('Saving feature vector file: {0} ... \\n'\n 'Saving Labels file: {1} ... '.format(feat_file_name, label_file_name), color.CYAN)\n\n #Save feature vector to disk\n with open(feat_file_name, 'w') as f:\n pickle.dump(feat_vec, f)\n #Save label file\n with open(label_file_name, 'w') as f:\n pickle.dump(labels, f)",
"def write_labels_file(labels_to_class_names, dataset_dir,\n filename='labels.txt'):\n labels_path = os.path.join(dataset_dir, filename)\n with open(labels_path, 'w') as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n' % (label, class_name))",
"def write_metadata(filename, labels):\n with open(filename, 'w') as f:\n f.write(\"Index\\tLabel\\n\")\n for index, label in enumerate(labels):\n f.write(\"{}\\t{}\\n\".format(index, label))\n\n print('Metadata file saved in {}'.format(filename))\n return True\n\n\n \"\"\"\n Helper functions for sampled attack\n and ellipse attack\n \"\"\"\n\n def calc_X_featurized_star(sess, model, y_train, x_train, num_samples_perturb, num_samples_ellipse, display_step = 1):\n A_list = []\n b_list = []\n x_featurized_star = []\n for (idx, x_i) in enumerate(x_train):\n if idx % display_step == 0:\n print(\"Training point number %d\" % idx)\n perturbed_x_i = random_perturbation(x_i, eps = eps_train, num_samples = num_samples_perturb)\n featurized_perturbed_x = model.get_activation(sess, perturbed_x_i)[-2]\n A_i, b_i = learn_constraint_setV2(featurized_perturbed_x)\n A_list.append(A_i)\n b_list.append(b_i)\n x_i_star = solve_inner_opt_problem(sess, model, y_train[idx], num_samples_ellipse, A_i, b_i)\n x_featurized_star.append(x_i_star)\n return np.array(x_featurized_star)",
"def _write_labels(self, labels: List[str], labels_path: Path):\n labels_path.write_text(escape_line_delimited_texts(labels))",
"def _write_labels(self, labels: List[str], labels_path: Path):\n labels_path.write_text(escape_line_delimited_texts(labels))",
"def write_file(self, file_path, ids, X_texts, y_probs, y_labels, verbose=False):\n frame_list = []\n for id_, X_text, y_label, y_probs in zip(ids, X_texts, y_labels, y_probs):\n if verbose:\n row = [id_, \" \".join(X_text), y_label] + list(y_probs)\n columns = [u\"id\", u\"text\", u\"label\"] + list(self.label_encoder.classes_)\n else:\n row = [id_] + list(y_probs)\n columns = [\"id\"] + list(self.label_encoder.classes_)\n frame_list.append(row)\n\n data_frame = pd.DataFrame(frame_list, columns=columns)\n\n logger.info(\"Writing predictions to file '%s'.\" % file_path)\n data_frame.to_csv(file_path, encoding=\"utf-8\", index=False, quoting=csv.QUOTE_NONNUMERIC)",
"def SaveLabels(filepath, labels):\n # 1) Create a string with all the text to be stored\n text = '\\n'.join(labels)\n\n # 2) Open the datafile and save the text\n with open(filepath, 'w') as outfile:\n outfile.write(text)",
"def save2file(self):\n ids_input = []\n labels_input = []\n ids_path = os.path.join(self.path, 'ids')\n if not os.path.exists(ids_path):\n os.makedirs(ids_path)\n labels_path = os.path.join(self.path, 'labels')\n if not os.path.exists(labels_path):\n os.makedirs(labels_path)\n ids_total = len(self.test)\n for i in range(ids_total):\n ids_input = self.test[i][0]\n labels_input = self.test[i][1]\n file_name = \"ids/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(ids_input, dtype=np.int32).tofile(file_path)\n file_name = \"labels/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(labels_input, dtype=np.int32).tofile(file_path)\n print(\"\\n ****** Success! ******\\n \")",
"def write_labels_txt(labels: pd.DataFrame, path: str):\n\n # If the file containing the labels already exist, delete it\n if os.path.isfile(path):\n print('\\nA labels file already exists at {}, deleting it...'.format(path))\n os.remove(path)\n\n # Write the names of the labels on a txt\n labels.to_csv(path, header=None, index=None, sep=' ', mode='a')\n\n print('\\nThe labels file has been written at', path)",
"def create_output_csv(labels, filename):\n\n keyframe_ind = [labels[i] != labels[i-1] for i, val in enumerate(labels)]\n keyframe_idxs = [i for i, val in enumerate(keyframe_ind) if val==True]\n keyframe_filenames = [\"%06d\" % (i+1) + \".jpg\" for i, val in enumerate(keyframe_ind) if val==True]\n keyframe_scenes = labels[keyframe_idxs]\n keyframe_scenes_ascii = [string.ascii_lowercase[i] for i in keyframe_scenes]\n result = pd.DataFrame([keyframe_filenames, keyframe_scenes_ascii]).transpose()\n result.columns = ['keyframe', 'scene id']\n filepath = os.getcwd()\n result.to_csv(filepath + '/' + filename)",
"def write_output(label1, label2, label3, submission_file):\n with open(submission_file, 'w') as f:\n f.write('Id,Bound'+ '\\n')\n for index, lab in enumerate(label1):\n f.write(str(index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label2):\n f.write(str(len(label1) + index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label3):\n f.write(str(len(label1) + len(label2) + index) + ',' + str(int(lab)))\n if index < len(label3) - 1:\n f.write('\\n')",
"def save_feature(ndarray, feature_name, out_path, x, y, new_labels, filename=None):\n # this is kind-of standard\n filename = filename or FeatureExtractor.get_file_name(x, feature_name)\n np.save(out_path / filename, ndarray)\n new_labels.append([filename, y])\n print('info: {} transformed and saved!'.format(filename))\n return filename",
"def _write_input(\n self,\n X: List[str],\n labels: Optional[Union[List[str], List[List[str]]]],\n input_path: Path,\n ):\n df = pd.DataFrame({\"Text\": X})\n\n if labels is not None:\n df[\"Label\"] = labels\n\n df.to_csv(input_path, sep=\"\\t\", index=False)",
"def write_processed_data_to_file(labeled_texts: List[Tuple[list, str]], file):\n\n try:\n for text, label in labeled_texts:\n output_tagged_sents(text, out=file)\n print(f'#{label}#', file=file)\n return True\n except IOError:\n print('Could not write to stream', file)\n return False",
"def _write_input(\n self, X: List[str], labels: Optional[List[List[str]]], input_path: Path\n ):\n df = pd.DataFrame({\"Text\": X})\n\n if labels is not None:\n df[\"Label\"] = labels\n\n df.to_csv(input_path, sep=\"\\t\", index=False)",
"def write(self, filename_inputs_out, filename_label=None, verbose=True):\n # Set filename\n if(filename_label):\n filename_inputs_out = \"inputs.dat.\" + filename_label\n else:\n filename_inputs_out = \"inputs.dat\"\n\n if(verbose):\n lal_cuda.log.open(\"Writing inputs to '%s'...\" % (filename_inputs_out), end='')\n with open(filename_inputs_out, \"wb\") as inputs_file:\n self.np_floats().tofile(inputs_file)\n self.np_ints().tofile(inputs_file)\n self.freqs.tofile(inputs_file)\n if(verbose):\n lal_cuda.log.close(\"Done.\")",
"def save_labels_to_disk(labels: list, label_path: str):\n\n with open(label_path, \"w\") as result_file:\n wr = csv.writer(result_file, dialect=\"excel\")\n wr.writerows(labels)",
"def write_file(_data, _label, _clinical, _contour, _type):\n pickle.dump(np.array(_data), open(_type + '_data.pxl', 'wb'))\n pickle.dump(np.array(_label), open(_type + '_label.pxl', 'wb'))\n pickle.dump(np.array(_clinical), open(_type + '_clinical.pxl', 'wb'))\n pickle.dump(np.array(_contour), open(_type + '_contour.pxl', 'wb'))",
"def write_hdf5(data, labels, output_filename):\n\n x = data.astype(numpy.float32)\n y = labels.astype(numpy.float32)\n\n with h5py.File(output_filename, 'w') as h:\n h.create_dataset('data', data=x, shape=x.shape)\n h.create_dataset('label', data=y, shape=y.shape)\n # h.create_dataset()"
] |
[
"0.7423558",
"0.7189319",
"0.6823785",
"0.67402846",
"0.661547",
"0.6509026",
"0.6493378",
"0.6491411",
"0.6418999",
"0.6394735",
"0.6366539",
"0.63443375",
"0.63089496",
"0.6292406",
"0.62670076",
"0.62670076",
"0.6166569",
"0.61093026",
"0.59354526",
"0.5918788",
"0.58827",
"0.5872113",
"0.5861292",
"0.58248085",
"0.57957953",
"0.57948613",
"0.5792119",
"0.5785511",
"0.57258904",
"0.5710434"
] |
0.7796732
|
0
|
Builds phi (longitudinal) spectrogram from a sanitized particle data structure.
|
def mms_pgs_make_phi_spec(data_in, resolution=32):
data = data_in.copy()
n_phi = resolution
# zero inactive bins to ensure areas with no data are represented as NaN
zero_bins = np.argwhere(data['bins'] == 0)
if zero_bins.size != 0:
for item in zero_bins:
data['data'][item[0], item[1]] = 0.0
ave = np.zeros(n_phi)
bin_size = 360.0/n_phi
outbins = np.arange(0, 361, bin_size)
phi_flat = data['phi'].flatten()
data_flat = data['data'].flatten()
bins_flat = data['bins'].flatten()
for bin_idx in range(0, len(outbins)-1):
this_bin = np.argwhere((phi_flat >= outbins[bin_idx]) & (phi_flat < outbins[bin_idx+1]))
if len(this_bin) > 0:
bins = nansum(bins_flat[this_bin])
if bins != 0.0:
ave[bin_idx] += nansum(data_flat[this_bin])/bins
y = outbins[0:n_phi]+0.5*(outbins[1::]-outbins[0:n_phi])
return y, ave
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generate_phi(self):\n self.phi = np.empty((100, self.K))\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n self.phi[i][j] = np.exp(-self.gamma * distance.euclidean(point, center) ** 2)\n self.phi = np.concatenate((self.phi, np.ones((100, 1))), axis=1)",
"def phi(nbins, p, t, x):\n\n phase = calc_phase(p, t)\n x_means, phase_bs, Ns, sj2s, xb, pb = phase_bins(nbins, phase, x)\n total_binned_variance_s2 = s2(Ns, sj2s, nbins)\n total_variance = sj2(x, np.mean(x), len(x))\n\n return total_binned_variance_s2/total_variance",
"def make_phase(mag, omega, phi, samples, end_time):\n\n array_time = np.linspace(0, end_time, samples)\n\n x = omega * array_time + phi\n\n return to_complex(mag, x), array_time",
"def create_flat_phase(dim, phi_0):\n return np.ones((dim, dim)) * phi_0",
"def phi(N60):\n return 27.1 + 0.3*N60 - 0.00054* N60**2",
"def prepare_phi(phi):\n phi[phi == 0.0] = 1.0e-10 * phi[phi > 0.0].min().min()\n return phi",
"def __init__(self):\n self.pidDict = { # particle_name, pid\n \"total\" : 0,\n \"charged\" : 1,\n \"charged_eta\" : 2,\n \"pion\" : 6, # sum(7, 8, -7)\n \"pion_p\" : 7,\n \"pion_0\" : 8,\n \"pion_m\" : -7,\n \"kaon\" : 11, # sum(12, 13)\n \"kaon_p\" : 12,\n \"kaon_0\" : 13,\n \"anti_kaon\" : -11, # sum(-12, -13)\n \"kaon_m\" : -12,\n \"anti_kaon_0\" : -13,\n \"nucleon\" : 16, # sum(17, 18)\n \"proton\" : 17,\n \"neutron\" : 18,\n \"anti_nucleon\" : -16, # sum(-17, -18)\n \"anti_proton\" : -17,\n \"anti_neutron\" : -18,\n \"sigma\" : 21, # sum(22, 23, 24)\n \"sigma_p\" : 22,\n \"sigma_0\" : 23,\n \"sigma_m\" : 24,\n \"anti_sigma\" : -21,\n \"anti_sigma_p\" : -22,\n \"anti_sigma_0\" : -23,\n \"anti_sigma_m\" : -24,\n \"xi\" : 26, # sum(27, 28)\n \"xi_0\" : 27,\n \"xi_m\" : 28,\n \"anti_xi\" : -26,\n \"anti_xi_0\" : -27,\n \"anti_xi_m\" : -28,\n \"lambda\" : 31,\n \"anti_lambda\" : -31,\n \"omega\" : 36,\n \"anti_omega\" : -36,\n \"phi\" : 41,\n \"rho\" : 46, #sum(47, 48, -47)\n \"rho_p\" : 47,\n \"rho_0\" : 48,\n \"rho_m\" : -47,\n \"eta\" : 51,\n \"eta_prime\" : 52,\n \"gamma\" : 61,\n \"omega782\" : 65,\n \"eta\" : 71,\n \"etap\" : 72,\n }\n\n for aParticle in self.pidDict.keys():\n if self.pidDict[aParticle]>=0:\n self.pidDict[aParticle+\"_hydro\"] = self.pidDict[aParticle]+1000\n else:\n self.pidDict[aParticle+\"_hydro\"] = self.pidDict[aParticle]-1000\n if self.pidDict[aParticle]>=0:\n self.pidDict[aParticle+\"_thermal\"] = self.pidDict[aParticle]+2000\n else:\n self.pidDict[aParticle+\"_thermal\"] = self.pidDict[aParticle]-2000\n\n self.pidDict.update({\n \"photon_total\" : 9000,\n \"photon_total_eq\" : 9001,\n \"photon_QGP_tot\" : 9002,\n \"photon_QGP_eq\" : 9003,\n \"photon_HG_tot\" : 9004,\n \"photon_HG_eq\" : 9005,\n \"direct_gamma_shortdecay_hydro\" : 9006,\n \"decay_gamma_pi0_hydro\" : 9007,\n \"decay_gamma_eta_hydro\" : 9008,\n \"decay_gamma_omega_hydro\" : 9009,\n \"decay_gamma_phi_hydro\" : 9010,\n \"decay_gamma_etap_hydro\" : 9011,\n \"decay_gamma_Sigma0_hydro\" : 9012,\n })\n\n #UrQMD pid Dictionary, name conversion defined as in binUtility\n self.UrQMDpidDict = { #particle name, UrQMD id# : isospin*2000 + pid\n 2101 : \"pion_p\",\n -1899 : \"pion_m\",\n 101 : \"pion_0\",\n 1106 : \"kaon_p\",\n -894 : \"kaon_0\",\n -1106 : \"kaon_m\",\n 894 : \"anti_kaon_0\",\n 1001 : \"proton\",\n -999 : \"neutron\",\n -1001 : \"anti_proton\",\n 999 : \"anti_neutron\",\n 2040 : \"sigma_p\",\n -1960 : \"sigma_m\",\n 40 : \"sigma_0\",\n -2040 : \"anti_sigma_p\",\n 1960 : \"anti_sigma_m\",\n -40 : \"anti_sigma_0\",\n 1049 : \"xi_0\",\n -951 : \"xi_m\",\n -1049 : \"anti_xi_0\",\n 951 : \"anti_xi_m\",\n 27 : \"lambda\",\n -27 : \"anti_lambda\",\n 55 : \"omega\",\n -55 : \"anti_omega\",\n 109 : \"phi\",\n 102 : \"eta\",\n 107 : \"eta_prime\",\n 100 : \"gamma\",\n }\n\n #pdg pid Dictionary\n self.PDGpidDict = { #pdg id#, particle name\n 211 : \"pion_p\",\n -211 : \"pion_m\",\n 111 : \"pion_0\",\n 321 : \"kaon_p\",\n 311 : \"kaon_0\",\n -321 : \"kaon_m\",\n -311 : \"anti_kaon_0\",\n 2212 : \"proton\",\n 2112 : \"neutron\",\n -2212 : \"anti_proton\",\n -2112 : \"anti_neutron\",\n 3222 : \"sigma_p\",\n 3112 : \"sigma_m\",\n 3212 : \"sigma_0\",\n -3222 : \"anti_sigma_p\",\n -3112 : \"anti_sigma_m\",\n -3212 : \"anti_sigma_0\",\n 3322 : \"xi_0\",\n 3312 : \"xi_m\",\n -3322 : \"anti_xi_0\",\n -3312 : \"anti_xi_m\",\n 3122 : \"lambda\",\n -3122 : \"anti_lambda\",\n 3334 : \"omega\",\n -3334 : \"anti_omega\",\n 333 : \"phi\",\n 221 : \"eta\",\n 331 : \"eta_prime\",\n 22 : \"gamma\",\n }\n\n #particle mass Dictionary (unit in GeV)\n self.masspidDict = {\n \"pion\" : 0.13957,\n \"pion_p\" : 0.13957,\n \"pion_0\" : 0.13498,\n \"pion_m\" : 0.13957,\n \"kaon\" : 0.49368,\n \"kaon_p\" : 0.49368,\n \"kaon_0\" : 0.49765,\n \"anti_kaon\" : 0.49368,\n \"kaon_m\" : 0.49368,\n \"anti_kaon_0\" : 0.49765,\n \"nucleon\" : 0.93827,\n \"proton\" : 0.93827,\n \"neutron\" : 0.93957,\n \"anti_nucleon\" : 0.93827,\n \"anti_proton\" : 0.93827,\n \"anit_neutron\" : 0.93957,\n \"sigma\" : 1.18937,\n \"sigma_p\" : 1.18937,\n \"sigma_0\" : 1.19264,\n \"sigma_m\" : 1.19745,\n \"anti_sigma\" : 1.18937,\n \"anti_sigma_p\" : 1.18937,\n \"anti_sigma_0\" : 1.19264,\n \"anti_sigma_m\" : 1.19745,\n \"xi\" : 1.31483,\n \"xi_0\" : 1.31483,\n \"xi_m\" : 1.32131,\n \"anti_xi\" : 1.31483,\n \"anti_xi_0\" : 1.31483,\n \"anti_xi_m\" : 1.32131,\n \"lambda\" : 1.11568,\n \"anti_lambda\" : 1.11568,\n \"omega\" : 1.67243,\n \"anti_omega\" : 1.67243,\n \"rho\" : 0.77580,\n \"rho_p\" : 0.77580,\n \"rho_0\" : 0.77580,\n \"rho_m\" : 0.77580,\n \"phi\" : 1.01946,\n \"eta\" : 0.54775,\n \"eta_prime\" : 0.95778,\n \"gamma\" : 0.0,\n }\n for aParticle in self.masspidDict.keys():\n self.masspidDict[aParticle+\"_hydro\"] = self.masspidDict[aParticle]\n self.masspidDict[aParticle+\"_thermal\"] = self.masspidDict[aParticle]\n\n # charged hadrons list\n self.charged_hadron_list = [\n \"pion_p\", \"pion_m\", \"kaon_p\", \"kaon_m\", \"proton\", \"anti_proton\",\n \"sigma_p\", \"sigma_m\", \"anti_sigma_p\", \"anti_sigma_m\",\n \"xi_m\", \"anti_xi_m\"]",
"def estimatePhiQuad(self,powerTT=None,callback=\"camb_dimensionless\",noise_keys=None,lmax=3500,filtering=None):\n\n\t\t#Compute Phi FFT, invert the FFT\n\t\tphifft = self.estimatePhiFFTQuad(powerTT,callback,noise_keys,lmax,filtering)\n\t\tphi = fftengine.ifft2(phifft)\n\n\t\t#Return\n\t\treturn PhiMap(phi.real,angle=self.side_angle,unit=u.rad**2)",
"def mel_spectrogram(self, y):\n # assert(torch.min(y.data) >= -1)\n # assert(torch.max(y.data) <= 1)\n\n magnitudes, phases = self.stft_fn.transform(y)\n # magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n return mel_output",
"def _make_phi(self, F):\n scaled_bins_left = tf.concat(\n [self.bin_edges / self.sigma,\n np.array([np.inf])], 0)\n scaled_bins_right = tf.concat(\n [np.array([-np.inf]), self.bin_edges / self.sigma], 0)\n return inv_probit(scaled_bins_left - tf.reshape(F, (-1, 1)) / self.sigma) \\\n - inv_probit(scaled_bins_right - tf.reshape(F, (-1, 1)) / self.sigma)",
"def phi_t(self):\n\t\tdim = self.dim\n\t\ttim_all = self.tim_all \n\t\tphi_all = np.zeros((tim_all+1,dim,1),dtype = complex)\n\t\tphi_all[0,:,:] = self.phi_i[:]\n\t\tu_all = self.u_t()\n\n\t\tfor tim in xrange(tim_all):\n\t\t\tphi_all[tim+1,:,:] = np.dot(u_all[tim+1,:,:], phi_all[0,:,:])\n\t\t\n\t\treturn phi_all",
"def create_spectrogram(audio_file, sampling_rate = 44100):\n\n #print(audio_file)\n S, freqs, times = mlab.specgram(audio_file, NFFT=4096, Fs=sampling_rate,window=mlab.window_hanning,noverlap=2048)\n \n #print(S.shape)\n return S",
"def phi(self):\n if self._phi is None:\n b = self.stem + self.counter + self.radius + (\n self.stem - 2 * self.radius) - self.overlap\n c = sqrt(self.a ** 2 + b ** 2)\n _phi = atan2(self.a, b)\n b = sqrt(c ** 2 - self.radius ** 2)\n _phi += atan2(self.radius, b)\n self._phi = _phi\n return self._phi",
"def estimatePhiFFTQuad(self,powerTT=None,callback=\"camb_dimensionless\",noise_keys=None,lmax=3500,filtering=None):\n\n\t\t#CMB lensing routines \n\t\tqlens = Lens()\n\t\t\n\t\t#Perform potential estimation with the quadratic estimator (pass the temperature values in uK)\n\t\tphifft = qlens.phiTT(fftengine.fft2(self.data)*self.unit.to(u.uK),self.side_angle,powerTT,callback,noise_keys,lmax,filtering)\n\n\t\t#Return\n\t\treturn phifft",
"def _build_parsed_values(self):\n\n # \n # Generate a velocity data particle.\n # Note that raw_data already contains the individual fields\n # extracted and unpacked from the velocity data record.\n #\n global flags\n particle = []\n field = 0\n for flag in range(0, FLAG_RECORD_SIZE):\n #\n # If the flags indicated that this field is to be expected,\n # store the next unpacked value into the data particle.\n #\n key = VEL3D_PARAMETERS[flag][INDEX_KEY]\n if flags[flag]:\n if flag == INDEX_FLAG_Time:\n #\n # This returns a tuple, but particle wants a list.\n #\n time_array = self.raw_data[field:field + OUTPUT_TIME_SIZE]\n\n particle.append({DataParticleKey.VALUE_ID: key,\n DataParticleKey.VALUE: list(time_array)})\n field += OUTPUT_TIME_SIZE\n else:\n particle.append({DataParticleKey.VALUE_ID: key,\n DataParticleKey.VALUE: self.raw_data[field]})\n field += 1\n\n #\n # If flags indicate that this field is not present,\n # output a value of None.\n #\n else:\n particle.append({DataParticleKey.VALUE_ID: key,\n DataParticleKey.VALUE: None})\n\n return particle",
"def phiprime_phi(phi):\n f = 0.0022927\n phiprime = np.arctan2(np.tan(phi*np.pi/180.),(1.-f)**2.)*180./np.pi\n return phiprime",
"def mel_spectrogram(self, y):\n if isinstance(y, np.ndarray):\n y = torch.from_numpy(y).float()\n y = y.unsqueeze(0)\n y = torch.autograd.Variable(y, requires_grad=False)\n\n assert (torch.min(y.data) >= -1)\n assert (torch.max(y.data) <= 1)\n\n magnitudes, phases = self.stft_fn.transform(y)\n magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n return torch.squeeze(mel_output, 0).detach().cpu().numpy().T",
"def create_melspectrogram_dataset(label_folder='electronic_music/Trance_label/Train/', save_folder='song_mel_label_data',\n sr=44100, n_mels=128, n_fft=2048, hop_length=512, song_duration=180.0,\n create_data=False):\n if create_data:\n # get list of all labels\n os.makedirs(save_folder, exist_ok=True)\n labels = [path for path in os.listdir(label_folder) if os.path.isdir(label_folder + path)]\n\n # iterate through all lables, songs and find mel spectrogram\n for label in labels:\n print('{} \\n'.format(label))\n label_path = os.path.join(label_folder, label)\n label_songs = os.listdir(label_path)\n\n for song in label_songs:\n print(song)\n song_path = os.path.join(label_path, song)\n\n # Create mel spectrogram for song_duration in the middle of the song and convert it to the log scale\n audio = MP3(song_path)\n audio_lenght = int(audio.info.length)\n audio_middle = (audio_lenght - int(song_duration))/2\n y, sr = librosa.load(song_path, sr=sr, offset=audio_middle, duration=song_duration)\n S = librosa.feature.melspectrogram(y, sr=sr, n_mels=n_mels, n_fft=n_fft, hop_length=hop_length)\n log_S = librosa.logamplitude(S, ref_power=1.0)\n data = (label, log_S, song)\n\n # Save each song\n save_name = label + '_%%-%%_' + song\n with open(os.path.join(save_folder, save_name), 'wb') as fp:\n dill.dump(data, fp)",
"def get_phase_law(N, d, wavelength, phi):\r\n phase_law = []\r\n for n in range(N):\r\n phase_law.append(-2 * np.pi * n * d / wavelength * np.sin(phi))\r\n return phase_law",
"def phi(U, n):\n phi_params = n['phi']\n return phi_params['r_max'] / (1 + np.exp(-phi_params['beta'] * (U - phi_params['alpha'])))",
"def compute_melgram(audio_path):\n\n mg_path = audio_path + '-mg.npy'\n\n if os.path.exists(mg_path):\n return np.load(mg_path)\n\n print('computing mel-spetrogram for audio: ', audio_path)\n\n # mel-spectrogram parameters\n sampling_rate = 12000\n n_fft = 512\n n_mels = 96\n hop_length = 256\n duration_in_seconds = 29.12 # to make it 1366 frame (1366 = 12000 * 29.12 / 256)\n\n src, sr = librosa.load(audio_path, sr=sampling_rate) # whole signal\n n_sample = src.shape[0]\n n_sample_fit = int(duration_in_seconds * sampling_rate)\n\n if n_sample < n_sample_fit: # if too short\n src = np.hstack((src, np.zeros((int(duration_in_seconds * sampling_rate) - n_sample,))))\n elif n_sample > n_sample_fit: # if too long\n src = src[(n_sample - n_sample_fit) // 2:(n_sample + n_sample_fit) // 2]\n logam = librosa.core.amplitude_to_db\n melgram = librosa.feature.melspectrogram\n ret = logam(melgram(y=src, sr=sampling_rate, hop_length=hop_length,\n n_fft=n_fft, n_mels=n_mels) ** 2,\n ref=1.0)\n ret = np.expand_dims(ret, axis=2)\n\n np.save(mg_path, ret)\n\n return ret",
"def prosody_static(self, audio, plots):\n fs, data_audio = read(audio)\n\n if len(data_audio.shape)>1:\n data_audio = data_audio.mean(1)\n data_audio = data_audio-np.mean(data_audio)\n data_audio = data_audio/float(np.max(np.abs(data_audio)))\n size_frameS = self.size_frame*float(fs)\n size_stepS = self.step*float(fs)\n thr_len_pause = self.thr_len*float(fs)\n\n if self.pitch_method == 'praat':\n name_audio = audio.split('/')\n temp_uuid = 'prosody'+name_audio[-1][0:-4]\n if not os.path.exists(PATH+'/../tempfiles/'):\n os.makedirs(PATH+'/../tempfiles/')\n temp_filename_f0 = PATH+'/../tempfiles/tempF0'+temp_uuid+'.txt'\n temp_filename_vuv = PATH+'/../tempfiles/tempVUV'+temp_uuid+'.txt'\n praat_functions.praat_vuv(audio, temp_filename_f0, temp_filename_vuv,\n time_stepF0=self.step, minf0=self.minf0, maxf0=self.maxf0)\n\n F0, _ = praat_functions.decodeF0(\n temp_filename_f0, len(data_audio)/float(fs), self.step)\n os.remove(temp_filename_f0)\n os.remove(temp_filename_vuv)\n elif self.pitch_method == 'rapt':\n data_audiof = np.asarray(data_audio*(2**15), dtype=np.float32)\n F0 = pysptk.sptk.rapt(data_audiof, fs, int(\n size_stepS), min=self.minf0, max=self.maxf0, voice_bias=self.voice_bias, otype='f0')\n\n segmentsV = V_UV(F0, data_audio, type_seg=\"Voiced\",\n size_stepS=size_stepS)\n segmentsUP = V_UV(F0, data_audio, type_seg=\"Unvoiced\",\n size_stepS=size_stepS)\n\n segmentsP = []\n segmentsU = []\n for k in range(len(segmentsUP)):\n if (len(segmentsUP[k]) > thr_len_pause):\n segmentsP.append(segmentsUP[k])\n else:\n segmentsU.append(segmentsUP[k])\n\n F0_features = F0feat(F0)\n energy_featuresV = energy_feat(segmentsV, fs, size_frameS, size_stepS)\n energy_featuresU = energy_feat(segmentsU, fs, size_frameS, size_stepS)\n duration_features = duration_feat(\n segmentsV, segmentsU, segmentsP, data_audio, fs)\n\n if plots:\n self.plot_pros(data_audio, fs, F0, segmentsV,\n segmentsU, F0_features)\n\n features = np.hstack(\n (F0_features, energy_featuresV, energy_featuresU, duration_features))\n\n return features",
"def phi(self):\n return (np.sum(self.diameters**self.ndim)*np.pi / (2*self.ndim))",
"def create_spec(data, fs, n_mels=32, n_fft=2048, hop_len=1024):\r\n # Calculate spectrogram\r\n S = librosa.feature.melspectrogram(\r\n data, sr=fs, n_fft=n_fft, hop_length=hop_len, n_mels=n_mels)\r\n S = S.astype(np.float32)\r\n\r\n # Convert power to dB\r\n S = librosa.power_to_db(S)\r\n\r\n return S",
"def _build_parsed_values(self):\n\n # \n # Generate a time data particle.\n # Note that raw_data already contains the individual fields\n # extracted and unpacked from the time data record.\n #\n particle = [\n {\n DataParticleKey.VALUE_ID: \n Vel3dKWfpStcTimeDataParticleKey.TIME_ON, \n DataParticleKey.VALUE: self.raw_data[INDEX_TIME_ON]\n },\n {\n DataParticleKey.VALUE_ID: \n Vel3dKWfpStcTimeDataParticleKey.TIME_OFF,\n DataParticleKey.VALUE: self.raw_data[INDEX_TIME_OFF]\n },\n {\n DataParticleKey.VALUE_ID: \n Vel3dKWfpStcTimeDataParticleKey.NUMBER_OF_RECORDS, \n DataParticleKey.VALUE: self.raw_data[INDEX_RECORDS]\n }\n ]\n\n return particle",
"def mel_spectrogram(self, y):\n assert torch.min(y.data) >= -1\n assert torch.max(y.data) <= 1\n magnitudes, phases = self.stft_fn.transform(y)\n magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n energy = torch.norm(magnitudes, dim=1)\n return mel_output, energy",
"def create_phi(self, T, pos, m, alpha=1):\n phi = tr.zeros((len(T), self.tag_size()), dtype=tr.float64)\n for i, _, _ in T:\n m_i = self.get_tag_index(m[i - 1])\n phi[i - 1, m_i] = alpha\n return phi",
"def spot1d_phi(infile, sequence):\n return np.loadtxt(infile, usecols=10, skiprows=1).reshape((1, -1, 1))",
"def _create_phi_data(training_data, test_data):\n _METRICS = ['vmsram', 'tasks', 't_rscthnetno', 't_rscthhfsrb', 'c_ucpupct']\n phi_training_data = {}\n phi_test_data = {}\n\n # Iterate and compute arccos of each time series in training and test data\n for key in training_data.keys():\n if key in _METRICS:\n phi_training_data[key] = np.arccos(training_data[key])\n phi_test_data[key] = np.arccos(test_data[key])\n else:\n phi_training_data[key] = training_data[key]\n phi_test_data[key] = test_data[key]\n\n return phi_training_data, phi_test_data",
"def _phi(self, x, d, p):\n ks = np.arange(self.p + 1)\n ks = ks[np.where(2 * (self.p - ks) - d >= 0)][:, np.newaxis]\n return np.sum(\n binom(self.p, ks)\n * (-1) ** ks\n * x[np.newaxis, :] ** (2 * (self.p - ks) - d)\n * perm(2 * (self.p - ks), d),\n axis=0,\n )"
] |
[
"0.5458877",
"0.53676385",
"0.5313945",
"0.5233425",
"0.5209792",
"0.5116669",
"0.50947446",
"0.5071428",
"0.50414526",
"0.50410014",
"0.50201",
"0.501828",
"0.5016945",
"0.49950805",
"0.49817097",
"0.49776",
"0.49595466",
"0.4944712",
"0.49397746",
"0.49216998",
"0.4919174",
"0.49166346",
"0.4899267",
"0.4887638",
"0.48864046",
"0.48794705",
"0.48731688",
"0.4857207",
"0.4853102",
"0.48447075"
] |
0.5455222
|
1
|
Add an attribute with a ``ajax.Update`` value
|
def add_ajax_attribute(self, name, value):
# Generate a XHR request
xml.add_attribute(self, name, value.generate_action(self._actions[0], self.renderer))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_js_attribute(self, name, value):\n self.set(name, value.generate_action(None, self.renderer))",
"def add_method_attribute(self, name, value):\n # Transcode the method to javascript\n xml.add_attribute(self, name, ajax.JS(value))",
"def _attr_updated(self, name, value):\n event = AttributeUpdateEvent(self, name, value)\n events.notify(event)",
"def add_function_attribute(self, name, value):\n # Transcode the function to javascript\n xml.add_attribute(self, name, ajax.JS(value))",
"def attribute_updated(self, attrid: int, value: Any, _: Any) -> None:\n attr_name = self._get_attribute_name(attrid)\n self.debug(\n \"Attribute report '%s'[%s] = %s\", self.cluster.name, attr_name, value\n )\n if attr_name == \"fan_mode\":\n self.async_send_signal(\n f\"{self.unique_id}_{SIGNAL_ATTR_UPDATED}\", attrid, attr_name, value\n )",
"def add_attribute(self, attr):\n self.add(attr)",
"def add_attribute(self, attr):\n self.attrs.add_attribute(attr)",
"def OnAttributesUpdated():\n pass",
"def set_attr(self, asset_key, attr, value=True):\r\n self.set_attrs(asset_key, {attr: value})",
"def add_attribute(obj, name, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, name, value)",
"def add_attribute(self, attr):\n self.attrs.add(attr)",
"def update_attr(attr, new_attr):\n if attr is None:\n attr = {}\n elif attr is str:\n attr = json.loads(attr)\n if isinstance(attr, dict):\n attr.update(new_attr)\n return json.dumps(attr)\n return attr",
"def add_attribute(obj, attribute, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attribute, value)",
"def add_attribute(obj, name, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, name, value)\n else:\n raise TypeError(\"can't add new attribute\")",
"def add_attribute(self, name, value):\n\t\tif name in self.__attr_hash:\n#\t\t\tattribue already exists\n\t\t\ta = self.__attr_hash[name]\n\t\t\tif name == 'class':\n#\t\t\t\t'class' is a magic attribute\n\t\t\t\tif a['value']:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\ta['value'] += value\n\t\t\telse:\n\t\t\t\ta['value'] = value\n\t\telse:\n\t\t\ta = {'name': name, 'value': value}\n\t\t\tself.__attr_hash[name] = a\n\t\t\tself.attributes.append(a)",
"def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass",
"def attribute_updated(self, attrid: int, value: Any, _: Any) -> None:\n attr_name = self._get_attribute_name(attrid)\n self.debug(\n \"Attribute report '%s'[%s] = %s\", self.cluster.name, attr_name, value\n )\n self.async_send_signal(\n f\"{self.unique_id}_{SIGNAL_ATTR_UPDATED}\",\n AttributeUpdateRecord(attrid, attr_name, value),\n )",
"def add_attribute(obj, attribute, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, attribute, value)\n else:\n raise TypeError(\"can't add new attribute\")",
"def add_attribute(self, subject_id, id=None, value=None):",
"def add_attribute(self, subject_id, id=None, value=None):",
"def add_attribute(self, subject_id, id=None, value=None):",
"def add_attribute(self, subject_id, id=None, value=None):",
"def add_attribute(self, subject_id, id=None, value=None):",
"def set_attribute(self, attribute, value) -> None:\n logging.info(f\"setting element attribute. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.setAttribute(\"{attribute}\", \"{value}\");\n \"\"\"\n self._execute_javascript(js)",
"def add_update_function(self, update_function):\n self._update_function = update_function\n if self._update_function is not None:\n self.index_wid.add_update_function(self._update_function)",
"def add_attribute(obj, attr, val):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attr, val)",
"def add_request_attribute(self, attr, name=None, decorator=None,\n reify=False):\n if not name:\n if hasattr(attr, '__name__'):\n name = attr.__name__\n elif isinstance(attr, property):\n name = attr.fget.__name__\n if not name:\n raise ValueError(\n 'attribute of type {} requires a name'.format(attr.__class__))\n if callable(attr):\n if decorator:\n attr = decorator(attr)\n if reify:\n attr = tangled.decorators.cached_property(attr)\n elif decorator or reify:\n raise ValueError(\"can't decorate a non-callable attribute\")\n self.register('dynamic_request_attr', attr, name)",
"def do_update(self, arg):\n obj = self.verify(arg, 1)\n if obj:\n args = arg.split(\" \")\n if len(args) < 3:\n print(\"** attribute name missing **\")\n return\n if len(args) < 4:\n print(\"** value missing **\")\n return\n setattr(obj, args[2], args[3])\n obj.save()",
"def addattribute(self, uid, field, value):\n\n raise NotImplementedError",
"def update(self, attribute: str, result: ProcessorResult) -> None:\n pass"
] |
[
"0.68151414",
"0.6515366",
"0.6233622",
"0.6075469",
"0.5787591",
"0.57643515",
"0.5748664",
"0.5742068",
"0.5690859",
"0.5606841",
"0.56038415",
"0.5575926",
"0.55740505",
"0.5551931",
"0.554361",
"0.5536292",
"0.55248076",
"0.55197036",
"0.5480152",
"0.5480152",
"0.5480152",
"0.5480152",
"0.5480152",
"0.54576755",
"0.5415222",
"0.53981423",
"0.53889257",
"0.5382734",
"0.53625876",
"0.5349076"
] |
0.7841153
|
0
|
Add an attribute with a function value
|
def add_function_attribute(self, name, value):
# Transcode the function to javascript
xml.add_attribute(self, name, ajax.JS(value))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_attrib(self, key, func, func_args):\n if key in self.aux_attrib:\n raise KeyError(\"Attribute '{0}' already exists, please use 'set_attrib'.\".format(key))\n else:\n self.set_attrib(key, func, func_args)",
"def add_function(self, func_name, *args, **kwargs):\n if len(args) > 0:\n attr = args[0]\n else:\n attr = func_name.func_name\n self._user_funcs[attr] = func_name",
"def replaces_attribute(func: Callable[..., Tuple[str]], classname: str, attr_name: str):\n Replacements._attr_rep[(classname, attr_name)] = func\n return func",
"def set_attrib(self, key, func, func_args):\n self.aux_attrib[key] = func\n self.aux_attrib_args[key] = func_args",
"def __call__(self, function: FuncStrArg):\n self._add_attr(function)\n return function",
"def add_attribute(a, name, other):\n raise TypeError(\"can't add new attribute\")",
"def add_attribute(self, attr):\n self.add(attr)",
"def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass",
"def add_attribute(obj, attribute, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attribute, value)",
"def __call__(self, function: FuncSpeechArg):\n self._add_attr(function)\n return function",
"def addattribute(self, uid, field, value):\n\n raise NotImplementedError",
"def auto_attr(func):\r\n return OneTimeProperty(func)",
"def add_attribute(node_proto, name, value):\n node_proto.attribute.extend([make_attribute(name, value)])",
"def addAttr(self, *args):\n return _libsbml.XMLToken_addAttr(self, *args)",
"def add_attribute(self, attr):\n self.attrs.add_attribute(attr)",
"def add_attribute(obj, name, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, name, value)",
"def add_attribute(obj, attr, val):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attr, val)",
"def add_attribute(obj, attribute, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, attribute, value)\n else:\n raise TypeError(\"can't add new attribute\")",
"def add_method_attribute(self, name, value):\n # Transcode the method to javascript\n xml.add_attribute(self, name, ajax.JS(value))",
"def add_attribute(self, name, value):\n\t\tif name in self.__attr_hash:\n#\t\t\tattribue already exists\n\t\t\ta = self.__attr_hash[name]\n\t\t\tif name == 'class':\n#\t\t\t\t'class' is a magic attribute\n\t\t\t\tif a['value']:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\ta['value'] += value\n\t\t\telse:\n\t\t\t\ta['value'] = value\n\t\telse:\n\t\t\ta = {'name': name, 'value': value}\n\t\t\tself.__attr_hash[name] = a\n\t\t\tself.attributes.append(a)",
"def add_request_attribute(self, attr, name=None, decorator=None,\n reify=False):\n if not name:\n if hasattr(attr, '__name__'):\n name = attr.__name__\n elif isinstance(attr, property):\n name = attr.fget.__name__\n if not name:\n raise ValueError(\n 'attribute of type {} requires a name'.format(attr.__class__))\n if callable(attr):\n if decorator:\n attr = decorator(attr)\n if reify:\n attr = tangled.decorators.cached_property(attr)\n elif decorator or reify:\n raise ValueError(\"can't decorate a non-callable attribute\")\n self.register('dynamic_request_attr', attr, name)",
"def add_attribute(obj, name, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, name, value)\n else:\n raise TypeError(\"can't add new attribute\")",
"def add_attribute(self, attribute_name, attribute_value):\n self.attributes[attribute_name] = attribute_value",
"def append_attribute(myobj, attrib_k, val):\n vals = getattr(myobj, attrib_k, [])\n if val not in vals:\n vals.append(val)\n setattr(myobj, attrib_k, vals)",
"def add_attribute(self, key, value):\n self.attributes[key] = value",
"def add_attribute(self, attr):\n self.attrs.add(attr)",
"def add_attr(self, key: str, value):\n if key in self._attr_names():\n raise ValueError(\"Already have an attribute called '{}'\".format(key))\n self._attributes.append((key, value))",
"def _Attribute(self,t):\n # Only a limited set of globals supported\n func_dict = None\n \n # pyflamegpu singleton\n if isinstance(t.value, ast.Name):\n if t.value.id == \"pyflamegpu\":\n if t.attr in self.fgpu_attrs:\n # proceed\n self.write(\"flamegpu::\")\n self.write(t.attr)\n else:\n self.RaiseError(t, f\"Attribute '{t.attr}' does not exist in pyflamegpu object\")\n # math functions (try them in raw function call format) or constants\n elif t.value.id == \"math\":\n if t.attr in self.mathconsts:\n self.write(self.mathconsts[t.attr])\n else:\n self.RaiseError(t, f\"Unsupported math constant '{t.attr}'\")\n # numpy types\n elif t.value.id == \"numpy\" or t.value.id == \"np\":\n # not sure how a numpy attribute would be used without function call or type hint but translate anyway \n if t.attr in self.numpytypes:\n self.write(self.numpytypes[t.attr])\n else: \n self.RaiseError(t, f\"Unsupported numpy type {t.attr}\")\n else:\n self.RaiseError(t, f\"Global '{t.value.id}' identifiers not supported\")\n else:\n self.RaiseError(t, \"Unsupported attribute\")",
"def add_js_attribute(self, name, value):\n self.set(name, value.generate_action(None, self.renderer))",
"def add_attribute(source, target, attribute_name):\n\n # check if attribute already exists on target\n if cmds.objExists(\"{}.{}\".format(target, attribute_name)):\n return\n\n logger.info(\"Adding {} attribute on {}\".format(attribute_name, target))\n\n # gets the given attribute_name plug attribute\n m_depend_node = get_dependency_node(source)\n m_attribute = m_depend_node.findPlug(attribute_name).attribute()\n\n # gets the addAttr command from the MFnAttribute function\n fn_attr = OpenMaya.MFnAttribute(m_attribute)\n add_attr_cmd = fn_attr.getAddAttrCmd()[1:-1]\n\n # creates the attribute on the target\n mel.eval(\"{} {}\".format(add_attr_cmd, target))"
] |
[
"0.73063284",
"0.72746456",
"0.7231678",
"0.7195455",
"0.707141",
"0.7026056",
"0.69747573",
"0.6970949",
"0.696804",
"0.69499767",
"0.6926322",
"0.6922663",
"0.691625",
"0.6868396",
"0.68668944",
"0.6850461",
"0.6821584",
"0.68176943",
"0.680651",
"0.6790069",
"0.6734101",
"0.67141426",
"0.67129886",
"0.66945964",
"0.66823965",
"0.6661638",
"0.66588885",
"0.659637",
"0.65711874",
"0.65679264"
] |
0.7730336
|
0
|
Add an attribute with a method value
|
def add_method_attribute(self, name, value):
# Transcode the method to javascript
xml.add_attribute(self, name, ajax.JS(value))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_attribute(self, attr):\n self.add(attr)",
"def add_attribute(self, attr):\n self.attrs.add_attribute(attr)",
"def addattribute(self, uid, field, value):\n\n raise NotImplementedError",
"def add_attrib(self, key, func, func_args):\n if key in self.aux_attrib:\n raise KeyError(\"Attribute '{0}' already exists, please use 'set_attrib'.\".format(key))\n else:\n self.set_attrib(key, func, func_args)",
"def add_attribute(obj, attribute, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attribute, value)",
"def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass",
"def add_attribute(self, attr):\n self.attrs.add(attr)",
"def add_function_attribute(self, name, value):\n # Transcode the function to javascript\n xml.add_attribute(self, name, ajax.JS(value))",
"def add_attr(self, key: str, value):\n if key in self._attr_names():\n raise ValueError(\"Already have an attribute called '{}'\".format(key))\n self._attributes.append((key, value))",
"def add_attribute(self, name, value):\n\t\tif name in self.__attr_hash:\n#\t\t\tattribue already exists\n\t\t\ta = self.__attr_hash[name]\n\t\t\tif name == 'class':\n#\t\t\t\t'class' is a magic attribute\n\t\t\t\tif a['value']:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\ta['value'] += value\n\t\t\telse:\n\t\t\t\ta['value'] = value\n\t\telse:\n\t\t\ta = {'name': name, 'value': value}\n\t\t\tself.__attr_hash[name] = a\n\t\t\tself.attributes.append(a)",
"def add_attribute(a, name, other):\n raise TypeError(\"can't add new attribute\")",
"def add_attribute(obj, attribute, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, attribute, value)\n else:\n raise TypeError(\"can't add new attribute\")",
"def __setattr__(self, attr, value):\n self[attr] = value",
"def add_attribute(self, key, value):\n self.attributes[key] = value",
"def addAttr(self, *args):\n return _libsbml.XMLToken_addAttr(self, *args)",
"def add_attribute(self, attribute_name, attribute_value):\n self.attributes[attribute_name] = attribute_value",
"def add_function(self, func_name, *args, **kwargs):\n if len(args) > 0:\n attr = args[0]\n else:\n attr = func_name.func_name\n self._user_funcs[attr] = func_name",
"def add_request_attribute(self, attr, name=None, decorator=None,\n reify=False):\n if not name:\n if hasattr(attr, '__name__'):\n name = attr.__name__\n elif isinstance(attr, property):\n name = attr.fget.__name__\n if not name:\n raise ValueError(\n 'attribute of type {} requires a name'.format(attr.__class__))\n if callable(attr):\n if decorator:\n attr = decorator(attr)\n if reify:\n attr = tangled.decorators.cached_property(attr)\n elif decorator or reify:\n raise ValueError(\"can't decorate a non-callable attribute\")\n self.register('dynamic_request_attr', attr, name)",
"def add_attribute(obj, name, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, name, value)",
"def add_attribute(obj, attr, val):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attr, val)",
"def set_attrib(self, key, func, func_args):\n self.aux_attrib[key] = func\n self.aux_attrib_args[key] = func_args",
"def __setattr__(self, attribute, value):\n\t\tassert ltrace_func(TRACE_BASE)\n\n\t\tif attribute[0] == '_' or callable(value) \\\n\t\t\t\t\t\tor attribute in self.__class__._licorn_protected_attrs:\n\t\t\tdict.__setattr__(self, attribute, value)\n\n\t\telse:\n\t\t\tdict.__setitem__(self, attribute, value)",
"def is_attribute(method, name=None):\n if name is None:\n name = method.__name__\n method.is_attribute = True\n method.name = name\n return method",
"def add_attribute(node_proto, name, value):\n node_proto.attribute.extend([make_attribute(name, value)])",
"def replaces_attribute(func: Callable[..., Tuple[str]], classname: str, attr_name: str):\n Replacements._attr_rep[(classname, attr_name)] = func\n return func",
"def append_attribute(myobj, attrib_k, val):\n vals = getattr(myobj, attrib_k, [])\n if val not in vals:\n vals.append(val)\n setattr(myobj, attrib_k, vals)",
"def add_attribute(obj, name, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, name, value)\n else:\n raise TypeError(\"can't add new attribute\")",
"def add_attribute(self, name, value):\n\n self._attributes[name] = value",
"def add_attribute(self, name, value):\n\n self._attributes[name] = value",
"def add_attribute(self, name, value):\n\n self._attributes[name] = value"
] |
[
"0.72672397",
"0.70987725",
"0.6989709",
"0.69685096",
"0.69390506",
"0.69300306",
"0.6916354",
"0.68777883",
"0.6869262",
"0.6845307",
"0.68409324",
"0.6807101",
"0.67792225",
"0.6776735",
"0.6774255",
"0.6766906",
"0.6755039",
"0.6735037",
"0.6719012",
"0.67090887",
"0.67022413",
"0.6687399",
"0.6638615",
"0.66355175",
"0.6610778",
"0.6607938",
"0.66036254",
"0.6603561",
"0.6603561",
"0.6603561"
] |
0.761008
|
0
|
Add an attribute with a ``ajax.JS`` value
|
def add_js_attribute(self, name, value):
self.set(name, value.generate_action(None, self.renderer))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_ajax_attribute(self, name, value):\n # Generate a XHR request\n xml.add_attribute(self, name, value.generate_action(self._actions[0], self.renderer))",
"def add_function_attribute(self, name, value):\n # Transcode the function to javascript\n xml.add_attribute(self, name, ajax.JS(value))",
"def add_method_attribute(self, name, value):\n # Transcode the method to javascript\n xml.add_attribute(self, name, ajax.JS(value))",
"def javascript(self, name, script, **kw):\n if callable(script):\n # Transcode the function or the method to javascript code\n script = ajax.javascript(script)\n\n if isinstance(script, ajax.JS):\n # Transcoded javascript needs a helper\n self.javascript_url('/static/nagare/pyjslib.js')\n script = script.javascript\n\n self._named_javascript.setdefault(name, (self._order, script, kw))\n self._order += 1\n return ()",
"def set_attribute(self, attribute, value) -> None:\n logging.info(f\"setting element attribute. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.setAttribute(\"{attribute}\", \"{value}\");\n \"\"\"\n self._execute_javascript(js)",
"def _javascript(self, script):\n self._anonymous_javascript.append((self._order, script))\n self._order += 1",
"def add_attribute(obj, attribute, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attribute, value)",
"def add_attribute(obj, attribute, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, attribute, value)\n else:\n raise TypeError(\"can't add new attribute\")",
"def add_attribute(self, attr):\n self.attrs.add_attribute(attr)",
"def add_attribute(obj, name, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, name, value)",
"def add_attribute(self, attr):\n self.add(attr)",
"def set_attr(self, asset_key, attr, value=True):\r\n self.set_attrs(asset_key, {attr: value})",
"def add_attribute(obj, name, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, name, value)\n else:\n raise TypeError(\"can't add new attribute\")",
"def add_request_attribute(self, attr, name=None, decorator=None,\n reify=False):\n if not name:\n if hasattr(attr, '__name__'):\n name = attr.__name__\n elif isinstance(attr, property):\n name = attr.fget.__name__\n if not name:\n raise ValueError(\n 'attribute of type {} requires a name'.format(attr.__class__))\n if callable(attr):\n if decorator:\n attr = decorator(attr)\n if reify:\n attr = tangled.decorators.cached_property(attr)\n elif decorator or reify:\n raise ValueError(\"can't decorate a non-callable attribute\")\n self.register('dynamic_request_attr', attr, name)",
"def add_attribute(self, attr):\n self.attrs.add(attr)",
"def add_attribute(self, name, value):\n\t\tif name in self.__attr_hash:\n#\t\t\tattribue already exists\n\t\t\ta = self.__attr_hash[name]\n\t\t\tif name == 'class':\n#\t\t\t\t'class' is a magic attribute\n\t\t\t\tif a['value']:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\ta['value'] += value\n\t\t\telse:\n\t\t\t\ta['value'] = value\n\t\telse:\n\t\t\ta = {'name': name, 'value': value}\n\t\t\tself.__attr_hash[name] = a\n\t\t\tself.attributes.append(a)",
"def add_attribute(obj, attr, val):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attr, val)",
"def add_attribute(self, subject_id, id=None, value=None):",
"def add_attribute(self, subject_id, id=None, value=None):",
"def add_attribute(self, subject_id, id=None, value=None):",
"def add_attribute(self, subject_id, id=None, value=None):",
"def add_attribute(self, subject_id, id=None, value=None):",
"def addattribute(self, uid, field, value):\n\n raise NotImplementedError",
"def wrap_javascript_in_anonfct(js_data):\n return \"(function(){\"+js_data+\"})()\"",
"def add_javascripts_subscriber(event):\n c = event.request.tmpl_context\n c.javascripts = [\n ('spline', 'lib/jquery-1.7.1.min'),\n ('spline', 'lib/jquery.cookies-2.2.0.min'),\n ('spline', 'lib/jquery.ui-1.8.4.min'),\n ('spline', 'core'),\n ('pokedex', 'pokedex-suggestions'),\n ('pokedex', 'pokedex'), # XXX only on main pokedex pages\n ]",
"def add_attribute(node_proto, name, value):\n node_proto.attribute.extend([make_attribute(name, value)])",
"def addAttr(self, *args):\n return _libsbml.XMLToken_addAttr(self, *args)",
"def append_attribute(myobj, attrib_k, val):\n vals = getattr(myobj, attrib_k, [])\n if val not in vals:\n vals.append(val)\n setattr(myobj, attrib_k, vals)",
"def javascript_url(self, url, **kw):\n self._javascript_url.setdefault(absolute_url(url, self.static_url), (self._order, kw))\n self._order += 1\n return ()",
"def add_attribute(a, name, other):\n raise TypeError(\"can't add new attribute\")"
] |
[
"0.78889525",
"0.7223823",
"0.7151742",
"0.59485906",
"0.5572903",
"0.54335433",
"0.52503747",
"0.5216377",
"0.5129981",
"0.51049256",
"0.509444",
"0.50548637",
"0.5052543",
"0.5051976",
"0.5036709",
"0.49863476",
"0.49621892",
"0.49498203",
"0.49498203",
"0.49498203",
"0.49498203",
"0.49498203",
"0.49133003",
"0.4906131",
"0.48766762",
"0.483616",
"0.4819747",
"0.47547668",
"0.47352356",
"0.4716087"
] |
0.7985821
|
0
|
Convert a relative URL of a static content to an absolute one
|
def absolute_url(url, static):
if url.startswith('#'):
return url
i = url.find(':')
if ((i == -1) or not url[:i].isalpha()) and url and (url[0] != '/'):
# If this is a relative URL, it's relative to the statics directory
if not static.endswith('/') and not url.startswith('/'):
url = static + '/' + url
else:
url = static + url
return url
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remap_static_url(original_url, course):\r\n # Ick: this should be possible without having to quote and unquote the URL...\r\n input_url = \"'\" + original_url + \"'\"\r\n output_url = replace_static_urls(\r\n input_url,\r\n getattr(course, 'data_dir', None),\r\n course_id=course.id,\r\n static_asset_path=course.static_asset_path\r\n )\r\n # strip off the quotes again...\r\n return output_url[1:-1]",
"def staticfile(path):\n normalized_path = posixpath.normpath(urllib.unquote(path)).lstrip('/')\n absolute_path = finders.find(normalized_path)\n if not absolute_path and getattr(settings, 'STATIC_ROOT', None):\n absolute_path = os.path.join(settings.STATIC_ROOT, path)\n if absolute_path:\n return '%s%s?v=%s' % (settings.STATIC_URL, path, os.stat(absolute_path)[stat.ST_MTIME])\n return path",
"def static(filename):\n return href.static(file=filename)",
"def _get_url(self, absolute):",
"def _update_object_content(name, input):\n content = input._content\n\n hrefs = re.compile(r'<\\s*[^\\>]*href\\s*=\\s*([\"\\'])(.*?)\\1')\n srcs = re.compile(r'<\\s*[^\\>]*src\\s*=\\s*([\"\\'])(.*?)\\1')\n\n matches = hrefs.findall(content)\n matches.extend(srcs.findall(content))\n relative_paths = []\n for found in matches:\n found = found[1]\n if found not in relative_paths:\n relative_paths.append(found)\n\n for relative_path in relative_paths:\n if not \"://\" in relative_path: # we don't want to rewrite protocols\n dest_path = os.sep.join((get_relative_path(name), \"static\",\n relative_path))\n content = content.replace(relative_path, dest_path)\n\n return content",
"def absolute_asset_url(module, path):\n return absolute_uri(get_asset_url(module, path))",
"def href(self, request) -> str:\n return request.static_path(self.url_spec)",
"def _get_file_url(path):\n return urlparse.urljoin(BASE_URL, path)",
"def static_url(self, path):\n\t\tif not hasattr(self, \"_static_hashes\"):\n\t\t\tself._static_hashes = {}\n\t\thashes = self._static_hashes\n\t\tif path not in hashes:\n\t\t\timport hashlib\n\t\t\ttry:\n\t\t\t\tf = open(os.path.join(\n\t\t\t\t\tself.application.settings[\"static_path\"], path))\n\t\t\t\thashes[path] = hashlib.md5(f.read()).hexdigest()\n\t\t\t\tf.close()\n\t\t\texcept:\n\t\t\t\tprint \"Could not open static file %r\"%path\n\t\t\t\thashes[path] = None\n\t\tbase = \"http://static.\"+_config.get(\"varnish\", \"ovzcphost\") + \"/\"\n\t\tif hashes.get(path):\n\t\t\treturn base + path + \"?v=\" + hashes[path][:5]\n\t\telse:\n\t\t\treturn base + path",
"def static_url(self, path, include_host=None, **kwargs):\n raise NotImplementedError()",
"def core_cdn_file(request, source):\n\n file_path = settings.CENTIPAIR_TEMPLATE_DIR + \"/cdn/\" + source\n source_file_url = settings.TEMPLATE_STATIC_URL + \"/\" + file_path\n return source_file_url",
"def cdn_file(request, source):\n site = request.site\n file_path = site.template_dir + \"/cdn/\" + source\n source_file_url = settings.TEMPLATE_STATIC_URL + \"/\" + file_path\n return source_file_url",
"def _absurl(fragment):\r\n root = settings.MEDIA_URL\r\n root += root[-1:] != '/' and '/' or ''\r\n return urlparse.urljoin(root, fragment)",
"def get_external_link_for_static_file(fpath=''):\r\n url_path = static(fpath)\r\n url_scheme = settings.EXTERNAL_URL_SCHEME\r\n url_host = settings.EXTERNAL_URL_HOST\r\n return f'{url_scheme}://{url_host}{url_path}'",
"def test_static_url_map_static_asset_path(self):\r\n self.make_course(pdf_textbooks=[PORTABLE_PDF_BOOK], static_asset_path='awesomesauce')\r\n url = self.make_url('pdf_book', book_index=0, chapter=1)\r\n response = self.client.get(url)\r\n self.assertNotContains(response, 'file={}'.format(PORTABLE_PDF_BOOK['chapters'][0]['url']))\r\n self.assertNotContains(response, 'file=/c4x/{0.org}/{0.course}/asset/{1}'.format(\r\n self.course.location,\r\n PORTABLE_PDF_BOOK['chapters'][0]['url'].replace('/static/', '')))\r\n self.assertContains(response, 'file=/static/awesomesauce/{}'.format(\r\n PORTABLE_PDF_BOOK['chapters'][0]['url'].replace('/static/', '')))",
"def get_absolute_url(path):\n if is_absolute_url(path):\n return path\n site = settings.SITES['front']\n return build_url(path, scheme=site['scheme'], domain=site['domain'])",
"def get_dirurl(self, dirpath):\n paths = dirpath.split(\"/\")\n\n return \"/\".join(paths[paths.index(\"static\"):])",
"def asset_url(filename=\"\", version=True):\n if filename.startswith(\"http\") or filename.startswith(\"/\"):\n return filename\n else:\n if config.static_url:\n return_url = \"http://\" + config.static_url\n else:\n return_url = \"/static\" # web.ctx.home + \"/static\"\n if filename:\n return_url += \"/\" + filename\n if version:\n return_url += \"?\" + config.asset_version\n return return_url",
"def _remap_static(self, stream, prefix='/static/'):\n def map_static(name, event):\n attrs = event[1][1]\n name = attrs.get(name)[len(prefix):]\n if self.static_map:\n name = self.static_map.get(name, name)\n return static(name)\n return stream | Transformer('//*[matches(@src, \"^%s\")]' % prefix).attr('src', map_static) | \\\n Transformer('//*[matches(@href, \"^%s\")]' % prefix).attr('href', map_static)",
"def _convert_file_to_url(filename, no_file_check = False):\n if no_file_check: # We already a priori know that the path is\n # correct and in its final form.\n return filename\n relpath = os.path.relpath(filename, settings.SENDFILE_ROOT)\n\n url = [settings.SENDFILE_URL]\n\n while relpath:\n relpath, head = os.path.split(relpath)\n url.insert(1, head)\n\n return u'/'.join(url) # Note: xlates from os.path.sep to '/'",
"def _fix_css_urls(self, page_instruction, css_source):\n if 'static' in page_instruction:\n template_name = page_instruction['static']\n elif 'inline' in page_instruction:\n template_name = page_instruction['inline']\n\n return cssimgreplace.relative_replace(\n css_source,\n os.path.dirname(template_name),\n self.cache_url)",
"def link_callback(self, uri, rel):\n result = finders.find(uri)\n if result:\n if not isinstance(result, (list, tuple)):\n result = [result]\n result = list(os.path.realpath(path) for path in result)\n path=result[0]\n else:\n sUrl = settings.STATIC_URL # Typically /static/\n sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/\n mUrl = settings.MEDIA_URL # Typically /media/\n mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/\n\n if uri.startswith(mUrl):\n path = os.path.join(mRoot, uri.replace(mUrl, \"\"))\n elif uri.startswith(sUrl):\n path = os.path.join(sRoot, uri.replace(sUrl, \"\"))\n else:\n return uri\n\n # make sure that file exists\n if not os.path.isfile(path):\n raise Exception(\n 'media URI must start with %s or %s' % (sUrl, mUrl)\n )\n return path",
"def get_absolute_url(base_url: str, relative_url: str) -> str:\n\n absolute_url = relative_url\n\n if absolute_url.startswith('//'):\n absolute_url = absolute_url[2:]\n\n if absolute_url.startswith('/'):\n if base_url.endswith('/'):\n base_url = base_url[:-1]\n\n absolute_url = base_url + absolute_url\n\n return absolute_url",
"def get_file_url(path, config):\n file_url_regex = re.compile(config['file_url_regex'])\n new_path = re.sub(file_url_regex, config['file_url_base'], path)\n return new_path",
"def path_static():\n return os.path.abspath(os.path.dirname(__file__))+'/_static'",
"def get_url(f):\n return f.replace(Enums.Paths.MEDIA_BASE, Enums.Paths.WEB_BASE)",
"def replace_static_urls(data_dir, block, view, frag, context, course_id=None, static_asset_path=''): # pylint: disable=unused-argument\n return wrap_fragment(frag, static_replace.replace_static_urls(\n frag.content,\n data_dir,\n course_id,\n static_asset_path=static_asset_path\n ))",
"def url(self, name):\n if self.base_url is None:\n raise ValueError(\"This file is not accessible via a URL.\")\n url = filepath_to_uri(name)\n if url is not None:\n url = url.lstrip('/')\n return urljoin(self.base_url, url)",
"def create_absolute_url(path: str) -> str:\n domain = settings.ALLOWED_HOSTS[0]\n return \"https://{domain}{path}\".format(domain=domain, path=path)",
"def convert_legacy_static_url_with_course_id(path, course_id):\r\n # Generate url of urlparse.path component\r\n scheme, netloc, orig_path, params, query, fragment = urlparse(path)\r\n loc = StaticContent.compute_location(course_id, orig_path)\r\n loc_url = loc.to_deprecated_string()\r\n\r\n # parse the query params for \"^/static/\" and replace with the location url\r\n orig_query = parse_qsl(query)\r\n new_query_list = []\r\n for query_name, query_value in orig_query:\r\n if query_value.startswith(\"/static/\"):\r\n new_query = StaticContent.compute_location(\r\n course_id,\r\n query_value[len('/static/'):],\r\n )\r\n new_query_url = new_query.to_deprecated_string()\r\n new_query_list.append((query_name, new_query_url))\r\n else:\r\n new_query_list.append((query_name, query_value))\r\n\r\n # Reconstruct with new path\r\n return urlunparse((scheme, netloc, loc_url, params, urlencode(new_query_list), fragment))"
] |
[
"0.6940429",
"0.68051386",
"0.6632963",
"0.64693373",
"0.6446491",
"0.64338994",
"0.63079447",
"0.6306596",
"0.6290737",
"0.6266544",
"0.6254872",
"0.62398434",
"0.62171376",
"0.62120706",
"0.62011",
"0.61750597",
"0.6109502",
"0.60486513",
"0.60411066",
"0.6038771",
"0.6008183",
"0.6008163",
"0.5989191",
"0.5931797",
"0.59262425",
"0.5899287",
"0.58982176",
"0.5891174",
"0.58854383",
"0.58823866"
] |
0.7290656
|
0
|
Renderer initialisation The ``HeadRenderer`` keeps track of the javascript and css used by every views, to be able to concatenate them into the ```` section.
|
def __init__(self, static_url):
super(HeadRenderer, self).__init__()
# Directory where are located the static contents of the application
self.static_url = static_url
self._named_css = {} # CSS code
self._css_url = {} # CSS URLs
self._named_javascript = {} # Javascript code
self._javascript_url = {} # Javascript URLs
self._order = 0 # Memorize the order of the javascript and css
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def render(self, h, *args):\n\n # Create the tags to include the CSS styles and the javascript codes\n\n head = self.root\n\n if isinstance(head, ET.ElementBase) and (head.tag == 'head'):\n # If a ``<head>`` tag already exist, take its content\n head = self.head(head[:], dict(head.attrib))\n else:\n head = self.head(head)\n\n head.extend([self.link(rel='stylesheet', type='text/css', href=url, **attributes) for (url, attributes) in self._get_css_url()])\n head.extend([self.script(type='text/javascript', src=url, **attributes) for (url, attributes) in self._get_javascript_url()])\n\n head.extend([self.style(css, type='text/css', **attributes) for (name, css, attributes) in self._get_named_css()])\n head.extend([self.script(js, type='text/javascript', **attributes) for (name, js, attributes) in self._get_named_javascript()])\n\n return head",
"def __init__(self, static_url):\n super(AsyncHeadRenderer, self).__init__(static_url=static_url)\n\n self._anonymous_css = [] # CSS\n self._anonymous_javascript = [] # Javascript code",
"def createBasicRenderSetup():\n\n pass",
"def render_async_head(self, h, *args):\n return \"nagare_loadAll(%s, %s, %s, %s, %s, %s)\" % (\n ajax.py2js(self._get_named_css(), h),\n ajax.py2js(r'\\n'.join(self._get_anonymous_css()), h),\n ajax.py2js(self._get_css_url(), h),\n ajax.py2js(self._get_named_javascript(), h),\n ajax.py2js(';'.join(self._get_anonymous_javascript()), h),\n ajax.py2js(self._get_javascript_url(), h)\n )",
"def render(self, template_name, **kwargs):\n currentUser = self.current_user\n from_workspace_str = self.get_argument(\"from_workspace\", default=\"0\", strip=False)\n from_workspace = from_workspace_str == \"1\"\n html = self.render_string(template_name, currentUser=currentUser, from_workspace = from_workspace, **kwargs)\n if from_workspace :\n scriptName = self.__class__.__name__\n\n if scriptName.endswith('Handler') :\n scriptName = scriptName[:-7] \n\n path = self.static_url('scripts/' + scriptName + '.js')\n\n js = '<script src=\"' + escape.xhtml_escape(path) + '\" type=\"text/javascript\"></script>'\n html = html + utf8(js)\n self.finish(html)\n return\n\n # Insert the additional JS and CSS added by the modules on the page\n js_embed = []\n js_files = []\n css_embed = []\n css_files = []\n html_heads = []\n html_bodies = []\n for module in getattr(self, \"_active_modules\", {}).values():\n embed_part = module.embedded_javascript()\n if embed_part:\n js_embed.append(utf8(embed_part))\n file_part = module.javascript_files()\n if file_part:\n if isinstance(file_part, (unicode_type, bytes_type)):\n js_files.append(file_part)\n else:\n js_files.extend(file_part)\n embed_part = module.embedded_css()\n if embed_part:\n css_embed.append(utf8(embed_part))\n file_part = module.css_files()\n if file_part:\n if isinstance(file_part, (unicode_type, bytes_type)):\n css_files.append(file_part)\n else:\n css_files.extend(file_part)\n head_part = module.html_head()\n if head_part:\n html_heads.append(utf8(head_part))\n body_part = module.html_body()\n if body_part:\n html_bodies.append(utf8(body_part))\n\n def is_absolute(path):\n return any(path.startswith(x) for x in [\"/\", \"http:\", \"https:\"])\n if js_files:\n # Maintain order of JavaScript files given by modules\n paths = []\n unique_paths = set()\n for path in js_files:\n if not is_absolute(path):\n path = self.static_url(path)\n if path not in unique_paths:\n paths.append(path)\n unique_paths.add(path)\n js = ''.join('<script src=\"' + escape.xhtml_escape(p) +\n '\" type=\"text/javascript\"></script>'\n for p in paths)\n sloc = html.rindex(b'</body>')\n html = html[:sloc] + utf8(js) + b'\\n' + html[sloc:]\n if js_embed:\n js = b'<script type=\"text/javascript\">\\n//<![CDATA[\\n' + \\\n b'\\n'.join(js_embed) + b'\\n//]]>\\n</script>'\n sloc = html.rindex(b'</body>')\n html = html[:sloc] + js + b'\\n' + html[sloc:]\n if css_files:\n paths = []\n unique_paths = set()\n for path in css_files:\n if not is_absolute(path):\n path = self.static_url(path)\n if path not in unique_paths:\n paths.append(path)\n unique_paths.add(path)\n css = ''.join('<link href=\"' + escape.xhtml_escape(p) + '\" '\n 'type=\"text/css\" rel=\"stylesheet\"/>'\n for p in paths)\n hloc = html.index(b'</head>')\n html = html[:hloc] + utf8(css) + b'\\n' + html[hloc:]\n if css_embed:\n css = b'<style type=\"text/css\">\\n' + b'\\n'.join(css_embed) + \\\n b'\\n</style>'\n hloc = html.index(b'</head>')\n html = html[:hloc] + css + b'\\n' + html[hloc:]\n if html_heads:\n hloc = html.index(b'</head>')\n html = html[:hloc] + b''.join(html_heads) + b'\\n' + html[hloc:]\n if html_bodies:\n hloc = html.index(b'</body>')\n html = html[:hloc] + b''.join(html_bodies) + b'\\n' + html[hloc:]\n self.finish(html)",
"def renderHeader(self, name):\n html = u\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n\"\n html += u'<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 '\n html += u'Transitional//EN\" '\n html += u'\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\\n'\n html += u\"<html xmlns=\\\"http://www.w3.org/1999/xhtml\\\">\\n\"\n html += u\"<head>\\n\"\n html += u\"<style type=\\\"text/css\\\">\\n\"\n html += u\"@import url(base.css);\\n\"\n html += u\"@import url(content.css);\\n\"\n html += u\"</style>\"\n html += u\"<title>\"\n html += name\n html += \"</title>\\n\"\n html += u\"<meta http-equiv=\\\"Content-Type\\\" content=\\\"text/html; \"\n html += u\" charset=utf-8\\\" />\\n\";\n html += u'<script type=\"text/javascript\" src=\"common.js\"></script>\\n'\n html += u\"</head>\\n\"\n return html",
"def home(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'AscensionESports_Baseline/index.html',\n {\n 'background': getSiteBackground(),\n 'color': getSiteColor(),\n 'title':'Home Page',\n 'year':datetime.now().year,\n }\n )",
"def __call__(self, req, res):\n if not hasattr(res, 'render'):\n res.render = Renderer(res)\n res.locals = {}\n res.render.add_engine(self)",
"def open_head(self) -> str:\n self.html_doc = self.html_doc + \"\"\"<head>\"\"\"\n return self.html_doc",
"def setup_render(\n self, options: Dict[str, Any], env: MutableMapping[str, Any]\n ) -> None:\n self.md_env = env\n self.config: Dict[str, Any] = options\n self.document: nodes.document = self.config.get(\"document\", make_document())\n self.current_node: nodes.Element = self.config.get(\n \"current_node\", self.document\n )\n self.reporter: Reporter = self.document.reporter\n # note there are actually two possible language modules:\n # one from docutils.languages, and one from docutils.parsers.rst.languages\n self.language_module_rst: ModuleType = get_language_rst(\n self.document.settings.language_code\n )\n self._level_to_elem: Dict[int, nodes.Element] = {0: self.document}",
"def load_render(views):\n render = render_jinja(\n views, encoding='utf-8',\n extensions=['jinja2.ext.do', AssetsExtension])\n render._lookup.assets_environment = env\n render._lookup.globals.update(dict(DEV=config.DEV,\n VERSION=get_version()))\n def inner():\n web.ctx.render = render;\n return inner",
"def render_home():\r\n\treturn render_template(\"index.html\")",
"def add_head(self):\n self.scenes[self.current_scene].add_object(Head())\n self.redraw()",
"def viewHeadOn(*args, **kwargs)->None:\n pass",
"def header(style=u'default'):\n return (docType() + \n u'<html xmlns=\"http://www.w3.org/1999/xhtml\">\\n'\n u'<head>\\n'\n u'<style type=\"text/css\">\\n'\n u' @import url(/css/exe.css);\\n'\n u' @import url(/style/base.css);\\n'\n u' @import url(/style/%s/content.css);</style>\\n'\n u'<script type=\"text/javascript\" src=\"/scripts/common.js\">'\n u'</script>\\n'\n u'<script type=\"text/javascript\" src=\"/scripts/libot_drag.js\">'\n u'</script>\\n'\n u'<title>%s</title>\\n'\n u'<meta http-equiv=\"content-type\" '\n u' content=\"text/html; charset=UTF-8\"></meta>\\n'\n u'</head>\\n'\n % (style, _('eXe : elearning XHTML editor')))",
"def show(self):\n\t\tself.html += '<head>\\n' + self.head + '</head>\\n<body>\\n' + self.body + '</body>\\n</html>'\n\n\t\treturn self.html",
"def generateHead(self, headType):\n # load the multi-head models\n filePrefix, phase = ModelDict[self.style.body]\n headModel = loader.loadModel(\"phase_\" + str(phase) + filePrefix + \"heads\")\n\n # search for the appropriate parts\n headReferences = headModel.findAllMatches(\"**/\" + headType)\n for i in range(0, headReferences.getNumPaths()):\n headPart = self.instance(headReferences.getPath(i), \"modelRoot\",\n \"joint_head\")\n # set head texture if necessary\n if self.headTexture:\n headTex = loader.loadTexture(\"phase_\" + str(phase) + \"/maps/\" +\n self.headTexture)\n headTex.setMinfilter(Texture.FTLinearMipmapLinear)\n headTex.setMagfilter(Texture.FTLinear) \n headPart.setTexture(headTex, 1)\n\n # set head color if necessary\n if self.headColor:\n headPart.setColor(self.headColor)\n self.headParts.append(headPart)\n\n # Now remove the extra instance that was created in the\n # loadModelOnce call; we don't need it anymore now that we've\n # copied everything out.\n headModel.removeNode()",
"def start(self):\r\n\r\n # Sets start values\r\n self.tableHeader = None\r\n self.formatBeginTag = None\r\n\r\n # Initializes the output text with HTML header\r\n self.text = \"\"\"\r\n <html>\r\n <head>\r\n <style type='text/css'>\r\n table, td, th {border:1px solid black;border-collapse:collapse;padding:3px;margin:5px;}\r\n br {mso-data-placement:same-cell}\r\n th {background-color:lightgrey}\r\n </style>\r\n </head>\r\n <body style='font-family:arial;'>\r\n \"\"\"",
"def create_head(css, title):\r\n doc = \"<!DOCTYPE html>\\n<html>\\n\"\r\n head = \"<head>\\n<title>\" + title + \"\\n</title>\" + css + \"</head>\\n\"\r\n header = doc + head + \"<body>\\n<h1>\" + title + \"\\n</h1>\\n<hr/>\\n\"\r\n return header",
"def initialPage():\n\treturn header() + footer()",
"def make_renderer(app_conf):\n\n # Uncomment for an example of Mako templating support.\n import pkg_resources\n import os.path\n from restish.contrib.makorenderer import MakoRenderer\n return MakoRenderer(\n directories=[\n pkg_resources.resource_filename('example', 'templates'),\n pkg_resources.resource_filename('formish', 'templates/mako'),\n pkg_resources.resource_filename('adminish', 'templates'),\n ],\n module_directory=os.path.join(app_conf['cache_dir'], 'templates'),\n input_encoding='utf-8', output_encoding='utf-8',\n default_filters=['unicode', 'h']\n )",
"def head(*args, **kwargs):\r\n\r\n return HttpResponse()",
"def get_html_parts(self):\n script_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'data')\n with open(os.path.join(script_path, 'head.html'), 'r') as hfile:\n self.header = hfile.read()\n with open(os.path.join(script_path, 'template.html'), 'r') as hfile:\n self.template = hfile.read()\n with open(os.path.join(script_path, 'footer.html'), 'r') as hfile:\n self.footer = hfile.read()\n self.module_icon = os.path.join(script_path, 'icon.png')\n return True",
"def main():\n\n return render_template(\"index.html\", title=\"Home\", heading=\"Dublin Bus\")",
"def index(request):\n return render_to_response(\n # note: this is slightly different than the labs app with \"app/app.html\" rather than the labs/labs.html\n # and we don't pass submodule name. fixme, by changing to new style with name = app_name\n settings.JS_HOME+'app.html',\n {'INDIVO_UI_APP_CSS': settings.INDIVO_UI_SERVER_BASE+'/jmvc/ui/resources/css/ui.css'}\n )",
"def call(self, **kwargs):\n # Get additional resources links\n css = []\n for path in (\"creative/vendor/bootstrap/css/bootstrap.min.css\",\n \"creative/vendor/font-awesome/css/font-awesome.min.css\",\n \"creative/vendor/magnific-popup/magnific-popup.css\",\n \"creative/css/creative.css\"):\n css.append(self._cw.data_url(path))\n js = []\n for path in (\"creative/vendor/jquery/jquery.min.js\",\n \"creative/vendor/bootstrap/js/bootstrap.min.js\",\n \"creative/vendor/scrollreveal/scrollreveal.min.js\",\n \"creative/vendor/magnific-popup/jquery.magnific-popup.min.js\",\n \"creative/js/creative.js\"):\n js.append(self._cw.data_url(path))\n\n # Format template\n template = self._cw.vreg.template_env.get_template(\"startup.jinja2\")\n html = template.render(\n header_url=self._cw.data_url(\"creative/img/neurospin.jpg\"),\n login_url=self._cw.build_url(\n \"login\", __message=u\"Please login with your account.\"),\n contact_email=self._cw.vreg.config.get(\n \"administrator-emails\", \"[email protected]\"),\n css_url=css,\n js_url=js)\n self.w(html)",
"def paintHead(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"head\", self.avatarConfiguration[\"skin\"] + IMG_EXTENSION))\n self.newAvatarImage(imgPath, \"head\")",
"def get_index(self, css_path=None):\n\n return \"\"\"\n <!doctype html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <title>RAMOSE</title>\n <meta name=\"description\" content=\"Documentation of RAMOSE API Manager\">\n <style>%s</style>\n %s\n </head>\n <body>\n %s\n <footer>%s</footer>\n </body>\n </html>\n \"\"\" % (\n self.__css(),\n self.__css_path(css_path),\n self.__parse_logger_ramose(),\n self.__footer(),\n )",
"def home(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'research/index.html',\n {\n 'title':'Health Infinity',\n 'info' :'Medical BigData Platform',\n 'year' : datetime.now().year,\n 'temp': models.load_data(),\n 'test': models.hchart_str(),\n }\n )",
"def author_view(self, context=None):\n # creating xblock fragment\n fragment = Fragment(u\"<!-- This is the studio -->\")\n fragment.add_javascript(load(self.js_path))\n fragment.initialize_js('WhoWhereWhyXBlock')\n\n return fragment"
] |
[
"0.6792161",
"0.6449125",
"0.58466434",
"0.5689005",
"0.55307853",
"0.55128866",
"0.5467106",
"0.54037964",
"0.53657",
"0.53409976",
"0.5316373",
"0.5263636",
"0.5252489",
"0.52406925",
"0.52315617",
"0.5227174",
"0.52092224",
"0.5201493",
"0.5186905",
"0.5182038",
"0.5177873",
"0.5165008",
"0.51634103",
"0.51160395",
"0.51127183",
"0.5111452",
"0.51014733",
"0.50999033",
"0.509707",
"0.5067874"
] |
0.72004354
|
0
|
Memorize an inline named css style
|
def css(self, name, style, **kw):
self._named_css.setdefault(name, (self._order, style, kw))
self._order += 1
return ()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generateInlineCSS():",
"def _css(self, style):\n self._anonymous_css.append((self._order, style))\n self._order += 1",
"def cache_style_content(self, content, inline=False):\n\t\tif inline:\n\t\t\tsheet = cssutils.parseStyle(content)\n\t\telse:\n\t\t\tsheet = cssutils.parseString(content, href=self.url)\n\t\tif not inline:\n\t\t\tfor rule in sheet.cssRules:\n\t\t\t\tif rule.type == rule.IMPORT_RULE:\n\t\t\t\t\tf = self._recursive_cache_resource(rule.styleSheet.href)\n\t\t\t\t\trule.href = f\n\t\tdef replacer(url):\n\t\t\tif url.startswith('data'):\n\t\t\t\treturn url\n\t\t\t# TODOs:\n\t\t\t# Check for absolute url before joining\n\t\t\treturn self._recursive_cache_resource(urljoin(self.url, url))\n\t\tcssutils.replaceUrls(sheet, replacer, ignoreImportRules=True)\n\t\treturn sheet.cssText",
"def generateInlineCSS(self, *args, **kwargs): \n return \"\"\"\n .googleLoading.googleMapView #CustomGoogleMap { \n height:520px;\n background: url(\"../image/map/stripeHorz.gif\") repeat-x scroll 0 0 transparent;\n padding: 20px;\n position: relative;\n } \n .googleLoading.googleMapView {\n background: url(\"/edit/maploader.gif\") no-repeat scroll 215px 115px transparent;\n }\n \"\"\".replace('\\n', '').replace(' ','')",
"def style(self, style):\n\n self.container['style'] = style",
"def load_style() -> str:\n return '<style id=\"scipp-style-sheet\">' + load_style_sheet() + '</style>'",
"def showsrcstyle(self, line):\n \n name = line.strip()\n if not name:\n name = \"default\"\n self.style_name = name\n self.formatter = HtmlFormatter(style=name)\n display(HTML(\"\"\"<style type='text/css'>\n span.inspector-header {\n font-family: monospace;\n border-bottom: 1px solid #555;\n }\n table.highlighttable, .highlighttable td, .highlighttable tr {\n border: 0px;\n }\n .highlighttable td.linenos {\n border-right: 1px solid #555;\n }\n \n span.inspector-filename {\n text-decoration: italic;\n }\n span.inspector-lineno {\n font-weight: bold;\n }\n %s\n </style>\n \"\"\" % self.formatter.get_style_defs()\n ))",
"def set_style(self):",
"def render_servicestyle(self, ctx, data):\n\t\tif self.service and self.service.getProperty(\"customCSS\", False):\n\t\t\treturn ctx.tag[self.service.getProperty(\"customCSS\")]\n\t\treturn \"\"",
"def _inline_css(self):\n \n # Stores all inlined elements.\n elms = {}\n \n # Get all the CSS rules in a dictionary that we can operate on.\n style_rules = cssutils.parseString(self.aggregated_css)\n \n for rule in style_rules:\n \n # Look through all elements that match this CSS selector.\n if hasattr(rule, 'selectorText'):\n \n try:\n for element in self.document.cssselect(rule.selectorText):\n \n # \n if element not in elms:\n elms[element] = cssutils.css.CSSStyleDeclaration()\n \n # Add existing inline style if present\n inline_styles = element.get('style')\n if inline_styles:\n inline_styles= cssutils.css.CSSStyleDeclaration(\n cssText=inline_styles\n )\n else:\n inline_styles = None\n if inline_styles:\n for p in inline_styles:\n # Set inline style specificity\n elms[element].setProperty(p)\n \n # Add the styles to the element.\n for p in rule.style:\n if p not in elms[element]:\n elms[element].setProperty(p.name, p.value, p.priority)\n else:\n # sameprio = (p.priority == view[element].getPropertyPriority(p.name))\n # if not sameprio and bool(p.priority) or (sameprio and selector.specificity >= specificities[element][p.name]):\n # # later, more specific or higher prio \n elms[element].setProperty(p.name, p.value, p.priority)\n except:\n # TODO: Need to catch errors like ExpressionError here...\n pass\n \n # Set inline style attributes unless the element is not worth styling.\n ignore_list = [\n 'html',\n 'head',\n 'title',\n 'meta',\n 'link',\n 'script'\n ]\n for element, style in elms.items():\n if element.tag not in ignore_list:\n css = style.getCssText(separator=u'')\n element.set('style', css)\n \n # Convert tree back to plain a HTML string.\n html = etree.tostring(self.document, method=\"xml\", \n pretty_print=True, encoding='UTF-8')\n \n return html",
"def style(self, style):\n self.style += [ style ]\n return self",
"def __createStyleFromString(self, string):\n\n matches = re.findall(r\"([^=]+)=([^;]+)(;|$)\", str(string).lower());\n if not matches :\n return False;\n\n\n style = OutputFormatterStyle();\n for match in matches:\n if ('fg' == match[0]) :\n style.setForeground(match[1]);\n elif ('bg' == match[0]) :\n style.setBackground(match[1]);\n else :\n style.setOption(match[1]);\n\n\n\n return style;",
"def getstyle(self, tag):\n try:\n styledict = tag.style.__dict__\n except AttributeError:\n return []\n else:\n stylelist = [x + \": \" + y for x, y in styledict.items()]\n return [u(' style=\"%s\"') % u(\"; \").join(stylelist)]",
"def style(self):\n return self.container['style']",
"def css(self) -> str:\n return self._css",
"def style(self):\n return self['style']",
"def get_style ( self, object ):\n return self.style",
"def add_style(self, strstyle, content=\"\"):\n if content: # str is name of css file to use\n src = self.add_style_str(content, strstyle)\n else: # str is filename of actual css file\n src = self.add_style_file(strstyle)\n\n self.opf.add_manifest(sluggify(src), src, \"text/css\")",
"def update_style(self):\n pass",
"def css(self, **kwargs):\n\n with open(\"style.css\", 'a+') as s:\n for key, value in kwargs:\n s.write(f\"\\n\\t{key.replace('_', '-')}: {value};\")\n s.write(\"\\n}\")",
"def css(self, **kwargs):\n\n with open(\"style.css\", 'a+') as s:\n for key, value in kwargs:\n s.write(f\"\\n\\t{key.replace('_', '-')}: {value};\")\n s.write(\"\\n}\")",
"def _read_stylesheet(self, style):\n tree = ET.parse(style)\n for marker in tree.findall('style'):\n if marker.get('publishable') == 'true':\n self.publishable.add(marker.get('id'))",
"def CSSClasses(self):",
"def CreateDimStyle(self,name):\n\t\treturn self.acad.ActiveDocument.DimStyles.Add(name)",
"def test_overwrite(self):\n html = '<html><head><style>h1 {color: #000;}</style></head><body><h1 style=\"color: #fff\">Foo</h1></body></html>'\n desired_output = '<html><head></head><body><h1 style=\"color: #000; color: #fff\">Foo</h1></body></html>'\n output = Pynliner().from_string(html).run()\n self.assertEqual(output, desired_output)",
"def condense_style(html): # May look silly but Emmet does this and is wrong.\n log.debug(\"Condensing HTML Style CSS tags.\")\n return html.replace('<style type=\"text/css\">', '<style>').replace(\n \"<style type='text/css'>\", '<style>').replace(\n \"<style type=text/css>\", '<style>')",
"def css_file(self):\n pass",
"def _set_style(style):\n if isinstance(style, (str, dict)):\n return Style(style)\n elif isinstance(style, Style):\n return style\n else:\n return Style()",
"def _get_named_css(self):\n\n return [(name, style, attributes) for (name, (order, style, attributes)) in sorted(self._named_css.items(), key=operator.itemgetter(1))]",
"def style_resize(self) -> str:\n resize = \"\"\".resize{\n width: 1000px;\n height: auto;}\\n\"\"\"\n self.html_doc = self.html_doc + resize\n return self.html_doc"
] |
[
"0.72046155",
"0.67504823",
"0.62656724",
"0.6125977",
"0.59719205",
"0.59340906",
"0.5891716",
"0.5886623",
"0.58833194",
"0.5823726",
"0.5817398",
"0.57964844",
"0.57822603",
"0.57586986",
"0.5738716",
"0.56741893",
"0.5671205",
"0.5650821",
"0.5650079",
"0.5635341",
"0.5635341",
"0.5632006",
"0.56029695",
"0.5544288",
"0.55393445",
"0.54945856",
"0.54920053",
"0.54843056",
"0.54772985",
"0.5463405"
] |
0.67921805
|
1
|
Memorize an inline named javascript code
|
def javascript(self, name, script, **kw):
if callable(script):
# Transcode the function or the method to javascript code
script = ajax.javascript(script)
if isinstance(script, ajax.JS):
# Transcoded javascript needs a helper
self.javascript_url('/static/nagare/pyjslib.js')
script = script.javascript
self._named_javascript.setdefault(name, (self._order, script, kw))
self._order += 1
return ()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def wrap_javascript_in_anonfct(js_data):\n return \"(function(){\"+js_data+\"})()\"",
"def js_minify2(js):\n log.debug(\"Obfuscating Javascript variables names inside functions.\")\n # if eval() or with{} is used on JS is not too Safe to Obfuscate stuff.\n is_ok = \"eval(\" not in js and \"with{\" not in js and \"with {\" not in js\n return slim_func_names(slim_params(js)) if is_ok else js.strip()",
"def code(self):\n return '{}\\n<script>{}</script>'.format(self.html, self.js)",
"def _javascript(self, script):\n self._anonymous_javascript.append((self._order, script))\n self._order += 1",
"def code():",
"def replacement(self):\n assert (self.src or self.inline) and not (self.src and self.inline)\n if self.src:\n return '<script async type=\"text/javascript\" src=\"%s\"></script>' % urllib.quote(self.src)\n else:\n return '<script>\\n%s\\n</script>' % self.inline",
"def un_src(self):\n if self.src is None:\n return\n self.inline = '''\n var script = document.createElement('script');\n script.src = \"%s\";\n document.body.appendChild(script);\n''' % self.src\n self.src = None",
"def create_js(self):\n for x in self.__js:\n self.__content.append(\"<script src=\\\"%s\\\"></script>\\n\"% (x))",
"def separate_asm_js(final, asm_target):\n logging.debug('separating asm')\n subprocess.check_call([shared.PYTHON, shared.path_from_root('tools', 'separate_asm.py'), final, asm_target, final])\n\n # extra only-my-code logic\n if shared.Settings.ONLY_MY_CODE:\n temp = asm_target + '.only.js'\n print jsrun.run_js(shared.path_from_root('tools', 'js-optimizer.js'), shared.NODE_JS, args=[asm_target, 'eliminateDeadGlobals', 'last', 'asm'], stdout=open(temp, 'w'))\n shutil.move(temp, asm_target)",
"def slim_func_names(js):\n renamed_func, functions = [], re.compile('(function (\\w+)\\()').findall(js)\n new_names_generator = NamesGenerator()\n for whole_func, func_name in functions:\n count = js.count(func_name) # more than 1 mention of the function\n if len(func_name) > 3 and count > 1: # function name larger than 3\n new_name = new_names_generator.next()\n if re.findall(r'\\b%s\\b' % re.escape(new_name), js):\n continue\n js = re.sub(r'\\b%s\\b' % re.escape(func_name), new_name, js)\n renamed_func.append((func_name, new_name))\n list_of_replacements = ['{}={}'.format(x, y) for (x, y) in renamed_func]\n js_function_name_replacements = ';var {};'.format( # ;var a=b,c=d; or ''\n ','.join(list_of_replacements)) if len(list_of_replacements) else \"\"\n return js + js_function_name_replacements",
"def inline_javascript(html_src, path=None):\n javascript_re = re.compile(\"\\<script src\\=\\\"([0-9a-zA-Z./]+)\\\"\\>\\</script>\")\n\n def fetch_jssource(in_match):\n rel_path = in_match.group(1)\n jspath = os.path.join(path, rel_path)\n return \"<script>\\n{0}\\n</script>\".format(open(jspath, 'r').read())\n\n return javascript_re.sub(fetch_jssource, html_src)",
"def generateJavascriptContent(notification):",
"def condense_script(html): # May look silly but Emmet does this and is wrong.\n log.debug(\"Condensing HTML Script JS tags.\")\n return html.replace('<script type=\"text/javascript\">', '<script>').replace(\n \"<style type='text/javascript'>\", '<script>').replace(\n \"<style type=text/javascript>\", '<script>')",
"def _as_inline_code(text):\n escaped = text.replace(\"`\", r\"\\`\")\n return f\"`{escaped}`\"",
"def make_js(scheme, netloc, host, port, cname, type_):\n js = get_cache(host, port, type_)\n if not js:\n js = TEMPLATE\n js = __replace(js, '$SCHEMA', str(scheme))\n js = __replace(js, '$NETLOC', str(netloc))\n js = __replace(js, '$HOST', str(host))\n js = __replace(js, '$PORT', str(port))\n js = __replace(js, '$CNAME', str(cname))\n js = __replace(js, '$TYPE', str(type_))\n js = slimit.minify(js, mangle=True, mangle_toplevel=True)\n make_cache(host, port, type_, js)\n return js",
"def exec_js(expr, args=None):\n\n if args is None:\n str_args = ''\n else:\n str_args = '(' + args2js(args) + ')'\n\n display(Javascript(expr + str_args))",
"def _get_anonymous_javascript(self):\n return [js for (order, js) in sorted(self._anonymous_javascript)]",
"def simple_replacer_js(js):\n log.debug(\"Force strip simple replacements from Javascript.\")\n return condense_semicolons(js.replace(\"debugger;\", \";\").replace(\n \";}\", \"}\").replace(\"; \", \";\").replace(\" ;\", \";\").rstrip(\"\\n;\"))",
"def md_inline_code(raw_text):\n return '`%s`' % md_escape(raw_text, characters='`')",
"def do_js(js_input_dir, js_output_dir):\n\n remove_extention('.js', js_output_dir)\n js_str = get_js(js_input_dir)\n js_name = get_cachebusting_name(js_str) + '.js'\n fu.str_to_file(js_output_dir + js_name, js_str)\n return js_name",
"def js(self, script):\n self.page().mainFrame().evaluateJavaScript(script)",
"def compress_javascript_data_closure(data):\n tmp_fname = tempfile.mktemp(\"urfastr-player-min.js\")\n open(tmp_fname, \"w+\").write(data)\n cmdline = [\"closure-compiler\", \"--js\", tmp_fname]\n compressed_data = subprocess.Popen(cmdline, stdout=subprocess.PIPE).communicate()[0]\n os.remove(tmp_fname) \n return compressed_data",
"def view_source_js(fn): #py:view_source_js\n RUR._view_source_js_(fn)",
"def get_massage():\n # Javascript code in ths page generates HTML markup\n # that isn't parsed correctly by BeautifulSoup.\n # To avoid this problem, all document.write fragments are removed\n my_massage = copy(BeautifulSoup.MARKUP_MASSAGE)\n my_massage.append((re.compile(u\"document.write(.+);\"), lambda match: \"\"))\n my_massage.append((re.compile(u'alt=\".+\">'), lambda match: \">\"))\n return my_massage",
"def process_lessjs(source, filepath):\n # This is a simple pass through, we don't need to do anything for less.js\n # to work\n return source",
"def propeller_javascript(jquery=None):\n javascript = ''\n # See if we have to include jQuery\n if jquery is None:\n jquery = get_propeller_setting('include_jquery', False)\n # NOTE: No async on scripts, not mature enough. See issue #52 and #56\n if jquery:\n url = propeller_jquery_url()\n if url:\n javascript += render_tag('script', attrs={'src': url})\n url = propeller_javascript_url()\n if url:\n attrs = {'src': url}\n javascript += render_tag('script', attrs=attrs)\n return mark_safe(javascript)",
"def add_js_attribute(self, name, value):\n self.set(name, value.generate_action(None, self.renderer))",
"def js(self, file):\n\t\tfor f in file:\n\t\t\tself.to_head('<script type=\"text/javascript\" src=\"' + f + '\"></script>\\n')",
"def source_to_code(self, data, path):\n\t\treturn _call_with_frames_removed(compile, data, path, 'exec', dont_inherit=True)",
"def add_javascripts_subscriber(event):\n c = event.request.tmpl_context\n c.javascripts = [\n ('spline', 'lib/jquery-1.7.1.min'),\n ('spline', 'lib/jquery.cookies-2.2.0.min'),\n ('spline', 'lib/jquery.ui-1.8.4.min'),\n ('spline', 'core'),\n ('pokedex', 'pokedex-suggestions'),\n ('pokedex', 'pokedex'), # XXX only on main pokedex pages\n ]"
] |
[
"0.6899933",
"0.64296746",
"0.6203096",
"0.6180724",
"0.6160276",
"0.61385953",
"0.58633286",
"0.585141",
"0.58496076",
"0.5697568",
"0.56682783",
"0.5664831",
"0.5648165",
"0.56456304",
"0.5525566",
"0.54634964",
"0.54531133",
"0.5352807",
"0.53292936",
"0.53059787",
"0.5297572",
"0.5291822",
"0.5291419",
"0.5275291",
"0.5270875",
"0.5252608",
"0.5240007",
"0.523862",
"0.5217406",
"0.52081215"
] |
0.6545581
|
1
|
Memorize a javascript URL
|
def javascript_url(self, url, **kw):
self._javascript_url.setdefault(absolute_url(url, self.static_url), (self._order, kw))
self._order += 1
return ()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def propeller_javascript_url():\n return javascript_url()",
"def ajax_url(url):\n\n hashbang_index = url.find('#!')\n if hashbang_index != -1:\n base = url[:hashbang_index]\n joiner = '?' if '?' not in base else '&'\n url = ''.join((base, joiner, '_escaped_fragment_=',\n urllib.parse.quote(url[hashbang_index+2:],\n '!\"$\\'()*,/:;<=>?@[\\\\]^`{|}~')))\n return url",
"def url():\n ...",
"def get_url(self,urldata):\n return \"%s?%s\" % (self.script_url, urllib.urlencode(urldata,1))",
"def make_js(scheme, netloc, host, port, cname, type_):\n js = get_cache(host, port, type_)\n if not js:\n js = TEMPLATE\n js = __replace(js, '$SCHEMA', str(scheme))\n js = __replace(js, '$NETLOC', str(netloc))\n js = __replace(js, '$HOST', str(host))\n js = __replace(js, '$PORT', str(port))\n js = __replace(js, '$CNAME', str(cname))\n js = __replace(js, '$TYPE', str(type_))\n js = slimit.minify(js, mangle=True, mangle_toplevel=True)\n make_cache(host, port, type_, js)\n return js",
"def url(self, url):\n prefix = self.request_local.environ['toscawidgets.prefix']\n script_name = self.request_local.environ['SCRIPT_NAME']\n if hasattr(url, 'url_mapping'):\n url = url.url_mapping['normal']\n return ''.join([script_name, prefix, url])",
"def _make_url(self):\n ...",
"def propeller_jquery_url():\n return jquery_url()",
"def get_short_url_base():",
"def urlsafe(self):\n # This is 3-4x faster than urlsafe_b64decode()\n urlsafe = base64.b64encode(self.reference().Encode())\n return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')",
"def get_url():\n key = _get_key()\n return key.generate_url(300)",
"def bootstrap_javascript_url():\n return javascript_url()",
"def set_short_url_base(url):",
"def encode(self, longUrl):\n sh_URL = ''.join(\n random.choice(string.ascii_letters) for _ in range(len(str(hash(longUrl)))))\n self.storage[sh_URL] = longUrl\n return self.baseUrl + self.prefix + sh_URL",
"def _get_javascript_url(self):\n return [(url, attributes) for (url, (order, attributes)) in sorted(self._javascript_url.items(), key=operator.itemgetter(1))]",
"def _get_url(self, absolute):",
"def encode(self, longUrl):\n char_list = string.ascii_letters + string.digits\n TINY_URL = 'http://tinyurl.com/'\n while True:\n url_key = random.sample(char_list,(random.randint(0,10)))\n if self.url_dict.get(''.join(url_key),None) == None:\n self.url_dict[''.join(url_key)] = longUrl\n break\n return (TINY_URL + ''.join(url_key))",
"def obfuscate_url(url: str) -> str:\n return re.sub(r\"\\/\\/.*:.*@\", \"//***:***@\", url)",
"def short_url(lastid):\n number = lastid +100000000000\n bs62encoded = base62.encode(number)\n return 'https://abc.com/{id}'.format(id=str(bs62encoded))",
"def __get_url(cls, url):\n url = url + AdvertCoordinationAdaptor.BASE_URL_QUERY_STRING\n return url",
"def static_url(self, path):\n\t\tif not hasattr(self, \"_static_hashes\"):\n\t\t\tself._static_hashes = {}\n\t\thashes = self._static_hashes\n\t\tif path not in hashes:\n\t\t\timport hashlib\n\t\t\ttry:\n\t\t\t\tf = open(os.path.join(\n\t\t\t\t\tself.application.settings[\"static_path\"], path))\n\t\t\t\thashes[path] = hashlib.md5(f.read()).hexdigest()\n\t\t\t\tf.close()\n\t\t\texcept:\n\t\t\t\tprint \"Could not open static file %r\"%path\n\t\t\t\thashes[path] = None\n\t\tbase = \"http://static.\"+_config.get(\"varnish\", \"ovzcphost\") + \"/\"\n\t\tif hashes.get(path):\n\t\t\treturn base + path + \"?v=\" + hashes[path][:5]\n\t\telse:\n\t\t\treturn base + path",
"def url_src_build(web_url, url_part):\r\n url_full = urljoin(web_url,url_part)\r\n return url_full",
"def getUrl(self): #$NON-NLS-1$\r",
"def getUrl(self): #$NON-NLS-1$\r",
"def create_url(self):\n self.base_url = self.base + self.strs[jpn.path_latest]",
"def Url(self) -> str:",
"def encode(self, longUrl):\n self.hash = {}\n if longUrl not in self.hash:\n idx = hash(longUrl)\n self.hash[idx] = longUrl\n final_string = 'https://tinyurl.com/' + str(idx)\n return (final_string)",
"def shorten_url():\n return rh.shorten_url(request)",
"def _shortenUrl(self, url):\n posturi = \"https://www.googleapis.com/urlshortener/v1/url\"\n headers = {'Content-Type' : 'application/json'}\n data = {'longUrl' : url}\n data = json.dumps(data)\n request = urllib2.Request(posturi,data,headers)\n response = urllib2.urlopen(request)\n response_data = response.read()\n shorturi = json.loads(response_data)['id']\n return shorturi",
"def url_shortner(self):"
] |
[
"0.6716826",
"0.6287909",
"0.59694946",
"0.5952668",
"0.5952474",
"0.58800566",
"0.5849158",
"0.583355",
"0.57910466",
"0.57712585",
"0.57588375",
"0.57173413",
"0.56773674",
"0.5629055",
"0.5622679",
"0.557159",
"0.5567627",
"0.55660737",
"0.55287856",
"0.5492426",
"0.5479819",
"0.54521704",
"0.5451089",
"0.5451089",
"0.54152334",
"0.54132456",
"0.5404176",
"0.53973496",
"0.5394938",
"0.5361367"
] |
0.6487602
|
1
|
Return the list of the inline named css styles, sorted by order of insertion
|
def _get_named_css(self):
return [(name, style, attributes) for (name, (order, style, attributes)) in sorted(self._named_css.items(), key=operator.itemgetter(1))]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_anonymous_css(self):\n return [css for (order, css) in sorted(self._anonymous_css)]",
"def proc_style(self, tokens):\n # FIXME: Implement style attributes.\n return []",
"def getstyle(self, tag):\n try:\n styledict = tag.style.__dict__\n except AttributeError:\n return []\n else:\n stylelist = [x + \": \" + y for x, y in styledict.items()]\n return [u(' style=\"%s\"') % u(\"; \").join(stylelist)]",
"def std_styles(self):\n return [DXFEngine.style(name, font=f) for name, f in std.styles() ]",
"def _get_styles(formatter: HtmlFormatter, *, prefix: str) -> Iterator[str]:\n for line in formatter.get_linenos_style_defs():\n yield f\"{prefix} {line}\"\n yield from formatter.get_background_style_defs(prefix)\n yield from formatter.get_token_style_defs(prefix)",
"def _get_aggregated_css(self):\n css = self.aggregated_css\n \n # Retrieve CSS rel links from HTML pasted and aggregate into one string.\n rel_links = 'link[rel=stylesheet],link[rel=StyleSheet],link[rel=STYLESHEET]'\n for element in self.document.cssselect(rel_links):\n try:\n css_path = element.get('href')\n \n # If a base URL was passed, we attempt to find the resources \n # based off of that URL.\n if base_url:\n if element.get('href').lower().find('http://', 0) < 0:\n parsed_url = urlparse.urlparse(base_url)\n css_path = urlparse.urljoin('%s://%s' % (\n parsed_url.scheme,\n parsed_url.hostname,\n ), css_path)\n \n # Grab the CSS from the URL.\n f = urllib.urlopen(css_path)\n css = css + ''.join(f.read())\n \n # Remove the <link> element from the HTML.\n element.getparent().remove(element)\n \n except:\n raise IOError('The stylesheet %s could not be found' % \n element.get(\"href\"))\n \n # Include inline style elements from <style> tags. Go through each \n # element, grab the CSS and then remove it after.\n style_blocks = 'style,Style,STYLE'\n for element in self.document.cssselect(style_blocks):\n css = css + element.text\n element.getparent().remove(element)\n \n return css",
"def css(self, name, style, **kw):\n self._named_css.setdefault(name, (self._order, style, kw))\n self._order += 1\n return ()",
"def getStyles(self):\r\n return self.styles",
"def _get_css_url(self):\n return [(url, attributes) for (url, (order, attributes)) in sorted(self._css_url.items(), key=operator.itemgetter(1))]",
"def getStyles(self):\n return self.styles",
"def _css(self, style):\n self._anonymous_css.append((self._order, style))\n self._order += 1",
"def parse_styles(text: str) -> List[dict]:\n styles = []\n regex = r'(\\d{3})=(\".*?\"),(\\d+\\.?\\d+),(\\(.*?\\))'\n\n for line in text.split(\"\\r\\n\"):\n if line == \"\":\n continue\n\n n, font, font_size, color = re.match(regex, line).groups()\n styles.append(\n {\n \"id\": int(n),\n \"f\": font.replace('\"', \"\"),\n \"fs\": float(font_size),\n \"rgb\": [\n int(i)\n for i in color.replace(\"(\", \"\")\n .replace(\")\", \"\").split(\",\")]\n }\n )\n\n return styles",
"def _inline_css(self):\n \n # Stores all inlined elements.\n elms = {}\n \n # Get all the CSS rules in a dictionary that we can operate on.\n style_rules = cssutils.parseString(self.aggregated_css)\n \n for rule in style_rules:\n \n # Look through all elements that match this CSS selector.\n if hasattr(rule, 'selectorText'):\n \n try:\n for element in self.document.cssselect(rule.selectorText):\n \n # \n if element not in elms:\n elms[element] = cssutils.css.CSSStyleDeclaration()\n \n # Add existing inline style if present\n inline_styles = element.get('style')\n if inline_styles:\n inline_styles= cssutils.css.CSSStyleDeclaration(\n cssText=inline_styles\n )\n else:\n inline_styles = None\n if inline_styles:\n for p in inline_styles:\n # Set inline style specificity\n elms[element].setProperty(p)\n \n # Add the styles to the element.\n for p in rule.style:\n if p not in elms[element]:\n elms[element].setProperty(p.name, p.value, p.priority)\n else:\n # sameprio = (p.priority == view[element].getPropertyPriority(p.name))\n # if not sameprio and bool(p.priority) or (sameprio and selector.specificity >= specificities[element][p.name]):\n # # later, more specific or higher prio \n elms[element].setProperty(p.name, p.value, p.priority)\n except:\n # TODO: Need to catch errors like ExpressionError here...\n pass\n \n # Set inline style attributes unless the element is not worth styling.\n ignore_list = [\n 'html',\n 'head',\n 'title',\n 'meta',\n 'link',\n 'script'\n ]\n for element, style in elms.items():\n if element.tag not in ignore_list:\n css = style.getCssText(separator=u'')\n element.set('style', css)\n \n # Convert tree back to plain a HTML string.\n html = etree.tostring(self.document, method=\"xml\", \n pretty_print=True, encoding='UTF-8')\n \n return html",
"def get_styledefs(self):\n \n return self.formatter.get_style_defs()",
"def sort_properties(css_unsorted_string):\n log.debug(\"Alphabetically Sorting all CSS / SCSS Properties.\")\n css_pgs = _compile_props(CSS_PROPS_TEXT, grouped=False) # Do Not Group.\n pattern = re.compile(r'(.*?{\\r?\\n?)(.*?)(}.*?)|(.*)',\n re.DOTALL + re.MULTILINE)\n matched_patterns = pattern.findall(css_unsorted_string)\n sorted_patterns, sorted_buffer = [], css_unsorted_string\n RE_prop = re.compile(r'((?:.*?)(?:;)(?:.*?\\n)|(?:.*))',\n re.DOTALL + re.MULTILINE)\n if len(matched_patterns) != 0:\n for matched_groups in matched_patterns:\n sorted_patterns += matched_groups[0].splitlines(True)\n props = map(lambda line: line.lstrip('\\n'),\n RE_prop.findall(matched_groups[1]))\n props = list(filter(lambda line: line.strip('\\n '), props))\n props = _props_grouper(props, css_pgs)\n sorted_patterns += props\n sorted_patterns += matched_groups[2].splitlines(True)\n sorted_patterns += matched_groups[3].splitlines(True)\n sorted_buffer = ''.join(sorted_patterns)\n return sorted_buffer",
"def GetStyleSheet():\n styles = []\n for locale in translation.LOCALES:\n styles.append(\"\"\"\n .goofy-label-{locale} {{\n display: none;\n }}\n .goofy-locale-{locale} .goofy-label-{locale} {{\n display: inline;\n }}\"\"\".format(locale=locale))\n return '\\n'.join(styles)",
"def style_lines(self):\n self.parent.finalize()\n for name, svg in self.iter_svgs(): # recurse here\n for line in svg._meta.style_lines():\n yield line\n if isinstance(self.parent.style, str):\n yield self.parent.style\n else:\n for cls in self.parent.style:\n yield \"%s {\" % str(cls)\n for key, value in self.parent.style[cls].items():\n yield \" %s: %s;\" % (key, value)\n yield \"}\"",
"def generateInlineCSS():",
"def select_stylestrs(cfgstr):\n stylestrs = []\n for s in cfgstr.split():\n if s in vars(fmt):\n stylestrs.append(s)\n return stylestrs",
"def embed_styles(self):\n for style in self.book.xpath(\"//link[@rel='stylesheet']\"):\n style_raw = self.get_remote_content(style.attrib[\"href\"])\n if style_raw != None:\n style_content = style_raw.decode(\"utf-8\")\n new_style = html.Element(\"style\")\n new_style.attrib[\"type\"] = \"text/css\"\n new_style.text = style_content \n style.xpath(\"//head\")[0].insert(0, new_style)\n style.getparent().remove(style)",
"def apply_styles(source, styles):\n soup = BeautifulSoup(source)\n\n for style in styles:\n for markup in soup.findAll(style.markup):\n markup['style'] = style.style.strip()\n\n return soup.prettify()",
"def readBeerStyles():\n styles = []\n with open('../data/styles.csv') as stylesFile:\n reader = csv.DictReader(stylesFile)\n for row in reader:\n if row['style'] not in styles:\n styles.append(row['style'])\n return styles",
"def get_css(self, selection='all'):\n self._collect()\n parts = []\n if selection == 'all':\n for key in self._css:\n parts.extend(self._css[key])\n else:\n parts.extend(self._css[selection])\n return '\\n\\n'.join(parts)",
"def get_styles(u):\n stat, ds_request = u.request(method = 'GET',\n path = 'rest/styles.json',\n payload = None,\n mime = 'application/json')\n json_data = json.loads(ds_request)\n if json_data.get('styles') == '':\n return None\n styles = json_data.get('styles').get('style')\n\n out = {}\n for style in styles:\n out[style.get('name')] = {'href': style.get('href')}\n return out",
"def GetKeywords(self):\n kwlist = [CSS1_KEYWORDS , CSS_PSUEDO_CLASS]\n # 2.9 supports CSS3 so for 2.8 just add CSS3 keywords to the css2 list \n if wx.VERSION < (2, 9, 0, 0, ''):\n css2_kw = (CSS2_KEYWORDS[0], \" \".join((CSS2_KEYWORDS[1], CSS3_KEYWORDS[1])))\n kwlist.append(css2_kw)\n else:\n kwlist.append(CSS2_KEYWORDS)\n kwlist.append(CSS3_KEYWORDS)\n kwlist.append(PSEUDO_ELEMENTS)\n return kwlist",
"def get_style_lst(db_name=DB_NAME):\n\n style_lst = []\n db_dest = 'database/' + db_name\n conn = sqlite3.connect(db_dest)\n cur = conn.cursor()\n\n statement = '''\n SELECT DISTINCT NumOfBd\n FROM Houses\n ORDER BY NumOfBd\n ;'''\n result = cur.execute(statement)\n result_lst = result.fetchall()\n for (num_of_bd,) in result_lst:\n style_lst.append(num_of_bd)\n\n conn.close()\n return style_lst",
"def get_styles():\n base_styles = {\n \"text-align\": \"center\",\n \"border\": \"1px solid #ddd\",\n \"padding\": \"7px\",\n \"border-radius\": \"2px\",\n }\n text_styles = {\n \"background-color\": \"#eee\",\n \"margin\": \"auto\",\n \"width\": \"50%\"\n }\n text_styles.update(base_styles)\n\n button_styles = {\n \"text-decoration\": \"none\",\n }\n button_styles.update(base_styles)\n\n fig_style = {\n \"padding\": \"10px\",\n \"width\": \"80%\",\n \"margin\": \"auto\",\n \"margin-top\": \"5px\"\n }\n fig_style.update(base_styles)\n return {\n \"text_styles\" : text_styles,\n \"base_styles\" : base_styles,\n \"button_styles\" : button_styles,\n \"fig_style\": fig_style,\n }",
"def stylecrunch(stystr):\n return dict(pair.split(\":\") for pair in semicolons.findall(stystr))",
"def line_styles (self):\n return self._line_styles",
"def get_css(soup, fileDict):\r\n for link in soup.findAll('link'):\r\n if link['type'] == 'text/css':\r\n css = DOMAIN + link['href']\r\n if css not in fileDict['cssUrls']:\r\n fileDict['cssUrls'].append(css)\r\n fileName = css[css.rfind(\"/\") + 1:]\r\n if fileName.find(\"?\") > 0:\r\n fileName = fileName[:fileName.find(\"?\")]\r\n fileDict['cssFileNames'].append(fileName)\r\n fileName = css.rfind('/')\r\n return fileDict"
] |
[
"0.7197739",
"0.67697215",
"0.6636097",
"0.6589225",
"0.64021933",
"0.631277",
"0.63127476",
"0.6253289",
"0.6182361",
"0.6136956",
"0.6096252",
"0.60787004",
"0.6058815",
"0.60491645",
"0.60416996",
"0.5828495",
"0.5809348",
"0.579352",
"0.5786266",
"0.5776939",
"0.5715244",
"0.5682517",
"0.56520545",
"0.5630407",
"0.5525872",
"0.5476607",
"0.54506767",
"0.5408924",
"0.5407261",
"0.5397267"
] |
0.77601963
|
0
|
Return the list of css URLs, sorted by order of insertion
|
def _get_css_url(self):
return [(url, attributes) for (url, (order, attributes)) in sorted(self._css_url.items(), key=operator.itemgetter(1))]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sort_urls(urls):\n order = {\"css\": 0, \"js\": 1}\n urls.sort(key=lambda x: order.get(x.rsplit(\".\")[-1].lower(), 2))\n return urls",
"def get_css(soup, fileDict):\r\n for link in soup.findAll('link'):\r\n if link['type'] == 'text/css':\r\n css = DOMAIN + link['href']\r\n if css not in fileDict['cssUrls']:\r\n fileDict['cssUrls'].append(css)\r\n fileName = css[css.rfind(\"/\") + 1:]\r\n if fileName.find(\"?\") > 0:\r\n fileName = fileName[:fileName.find(\"?\")]\r\n fileDict['cssFileNames'].append(fileName)\r\n fileName = css.rfind('/')\r\n return fileDict",
"def _get_aggregated_css(self):\n css = self.aggregated_css\n \n # Retrieve CSS rel links from HTML pasted and aggregate into one string.\n rel_links = 'link[rel=stylesheet],link[rel=StyleSheet],link[rel=STYLESHEET]'\n for element in self.document.cssselect(rel_links):\n try:\n css_path = element.get('href')\n \n # If a base URL was passed, we attempt to find the resources \n # based off of that URL.\n if base_url:\n if element.get('href').lower().find('http://', 0) < 0:\n parsed_url = urlparse.urlparse(base_url)\n css_path = urlparse.urljoin('%s://%s' % (\n parsed_url.scheme,\n parsed_url.hostname,\n ), css_path)\n \n # Grab the CSS from the URL.\n f = urllib.urlopen(css_path)\n css = css + ''.join(f.read())\n \n # Remove the <link> element from the HTML.\n element.getparent().remove(element)\n \n except:\n raise IOError('The stylesheet %s could not be found' % \n element.get(\"href\"))\n \n # Include inline style elements from <style> tags. Go through each \n # element, grab the CSS and then remove it after.\n style_blocks = 'style,Style,STYLE'\n for element in self.document.cssselect(style_blocks):\n css = css + element.text\n element.getparent().remove(element)\n \n return css",
"def _get_anonymous_css(self):\n return [css for (order, css) in sorted(self._anonymous_css)]",
"def urls(self) -> list[str]:\r\n ...",
"def get_urls(inputfiles):\n urls = []\n scheme_rgx = re.compile(r'^https?://')\n for ifile in inputfiles:\n urls.append(ifile.read().splitlines())\n urls = set([n for l in urls for n in l])\n urls = list(filter(None, urls))\n for i in range(len(urls)):\n if not scheme_rgx.match(urls[i]):\n urls[i] = 'http://' + urls[i]\n return urls",
"def read_urls(filename):\n \n urls = []\n with open(filename, 'r') as f:\n for line in f:\n if 'puzzle' in line:\n match = re.search(r'GET\\s(.*)HTTP', line)\n url = match.group(1)\n urls.append(url.strip())\n sorted_urls = sorted(set(urls))\n for url in sorted_urls:\n print (url[-8:-4])\n return sorted_urls",
"def collect_css(self, location):\n css_files = glob(join(location, \"*.css\"))\n\n return css_files",
"def getUrlsList(self):\n\t\ttry:\n\t\t\tf = ur.urlopen(self.sitemap_url)\n\t\t\tres = f.readlines()\n\t\t\tfor d in res:\n\t\t\t data = re.findall('<loc>(https?:\\/\\/.+?)<\\/loc>',d)\n\t\t\t for i in data:\n\t\t\t\tself.urls.append(i)\n\t\texcept Exception as e:\n\t\t\tself.app.printflush(str(e))\n\t\t\tself.app.printflush(traceback.format_exc())\n\t\tself.fetched_count = len(self.urls)",
"def update_css_urls(soup, fileDict, index=False):\r\n for a in soup.find_all('link', {'type': 'text/css'}):\r\n for (cssUrl, cssFileName) in zip(fileDict['cssUrls'],\r\n fileDict['cssFileNames']):\r\n if cssUrl.find(a['href']) > 0:\r\n #if a['href'] == urllib.parse.urlparse(cssUrl).path:\r\n if index == True:\r\n a['href'] = './formatting/' + cssFileName\r\n else:\r\n a['href'] = '../formatting/' + cssFileName",
"def get_urls():\r\n return []",
"def extract_images(files):\n images = []\n\n for css_path in files:\n if not css_path.endswith('.css'):\n continue\n with open(css_path, 'r') as css_in:\n css_content = css_in.read()\n # Look for all url() declarations within the CSS rules.\n imgs = re.findall(r'''url\\(['\"]?([^\\)'\"]+)['\"]?\\)''', css_content)\n css_dir = os.path.dirname(css_path)\n if imgs:\n for img in imgs:\n if img.lower().startswith('http'):\n # External image to cache.\n images.append(img)\n continue\n parsed = urlparse(img)\n img = parsed.path\n path = os.path.normpath(os.path.join(css_dir, img))\n if not os.path.exists(path):\n raise ValueError('image %r in CSS file %r does not '\n 'exist at %r' % (img, css_path, path))\n url = media_path_to_url(path)\n url = recreate_rel_url(url, parsed)\n images.append(url)\n\n return set(images)",
"def __url_list(self, page):\n url_list = []\n for tag_a in page.find_all('a'):\n href = str(tag_a.get('href'))\n if self.__verify(href):\n url = parse.quote(self.__add_main_site(href), '/:#')\n url_list.append(url)\n return url_list",
"def getURLs():",
"def read_urls(filename):\n # Searches the file for any urls containing \"puzzle\", removing duplicates\n # and then sorting them by the word before .jpg\n with open(filename) as f:\n urls = set(re.split(r'(\\S+)', f.read()))\n urls = filter(lambda url: \"puzzle\" in url, urls)\n server = re.split('_', filename)[1]\n for i, url in enumerate(urls):\n urls[i] = 'https://' + server + '/' + url\n return sorted(urls, key=lambda x: re.findall(r'(\\w+).jpg', x))",
"def codeup_blog_urls():\n \n url1 = 'https://codeup.com/codeup-news/codeup-launches-first-podcast-hire-tech/' \n\n url2 ='https://codeup.com/tips-for-prospective-students/why-should-i-become-a-system-administrator/'\n \n url3 ='https://codeup.com/codeup-news/codeup-candidate-for-accreditation/'\n \n url4 ='https://codeup.com/codeup-news/codeup-takes-over-more-of-the-historic-vogue-building/'\n \n url5 ='https://codeup.com/codeup-news/inclusion-at-codeup-during-pride-month-and-always/'\n \n return [url1, url2, url3, url4, url5]",
"def read_urls(filename):\n # +++your code here+++\n result = []\n if not path_exists(filename):\n print 'Path ' + filename + ' doesn\\'t exist!'\n sys.exit(1)\n \n # get base url from the filename\n match = re.search(r'\\S*_(\\S*)', filename)\n host = 'http://' + match.group(1)\n \n # read file for urls\n file = open(filename, 'rU')\n for line in file:\n match = re.search(r'\\S*puzzle\\S*.jpg', line)\n if match:\n result.append(host + match.group())\n file.close()\n # sort the list and remove duplicates (-> set)\n return sorted(set(result), key=sortedFn)\n #return sorted(set(result))",
"def css_url(self, url, **kw):\n self._css_url.setdefault(absolute_url(url, self.static_url), (self._order, kw))\n self._order += 1\n return ()",
"def all_headlines_from(url):\n pass",
"def get_urls(url_list='urls.blur'):\n with open(os.path.join('..', 'data', url_list), 'r') as f:\n urls = [tuple(line.split('\\t')) for line in f.read().split('\\n') \n if line and line[0] != '#']\n return urls",
"def parse(self):\n if not os.path.exists(self.file):\n raise Exception(\"File not found\")\n f = open(self.file)\n\n content = f.read()\n soup = BeautifulSoup(content, \"html.parser\")\n if self.prefix:\n css_selector = \"a[href^='\"+self.prefix+\"']\"\n else:\n css_selector = 'a'\n links = soup.select(css_selector)\n return [item['href'] for item in links]",
"def get_website_URLs():\n\tfilepath = os.path.dirname(os.path.realpath(__file__)) +\"/web_sources\"\n\tf = open(filepath, 'r')\n\twebsites = []\n\tfor line in f:\n\t\tif line != \"\\n\":\n\t\t\tendl_index = line.index('\\n')\n\t\t\tclean_line = line[:endl_index]\n\t\t\tnew_list = clean_line.split(' ', 1)\n\t\t\twebsites.append(new_list)\n\tf.close()\n\treturn websites",
"def get_urls(r):\n url_list = find_urls(r)\n url_list += find_tag_urls(r)\n return set(url_list)",
"def get_links_from_url(url):\n return [get_base(url)]",
"def _get_named_css(self):\n\n return [(name, style, attributes) for (name, (order, style, attributes)) in sorted(self._named_css.items(), key=operator.itemgetter(1))]",
"def read_urls(filename, server_name='http://code.google.com/'):\n # Construct unique URLs from file as - http://code.google.com/<url from file>\n animal_list = []\n ordered_list = []\n src_file = open(filename, 'rU')\n for line in src_file :\n animal_path = re.search( 'GET\\s+/(.+jpg)', line )\n if animal_path is not None :\n if animal_path.group(1) not in animal_list :\n animal_list.append( animal_path.group(1) )\n ordered_list = sorted(animal_list,key=sort_img_name)\n # Used in in range loop to operate on ordered_list rather than shallow copy, e.g. for path in ordered_list\n for i in range(0, len(ordered_list), 1) :\n ordered_list[i] = server_name + ordered_list[i]\n return ordered_list",
"def sort_properties(css_unsorted_string):\n log.debug(\"Alphabetically Sorting all CSS / SCSS Properties.\")\n css_pgs = _compile_props(CSS_PROPS_TEXT, grouped=False) # Do Not Group.\n pattern = re.compile(r'(.*?{\\r?\\n?)(.*?)(}.*?)|(.*)',\n re.DOTALL + re.MULTILINE)\n matched_patterns = pattern.findall(css_unsorted_string)\n sorted_patterns, sorted_buffer = [], css_unsorted_string\n RE_prop = re.compile(r'((?:.*?)(?:;)(?:.*?\\n)|(?:.*))',\n re.DOTALL + re.MULTILINE)\n if len(matched_patterns) != 0:\n for matched_groups in matched_patterns:\n sorted_patterns += matched_groups[0].splitlines(True)\n props = map(lambda line: line.lstrip('\\n'),\n RE_prop.findall(matched_groups[1]))\n props = list(filter(lambda line: line.strip('\\n '), props))\n props = _props_grouper(props, css_pgs)\n sorted_patterns += props\n sorted_patterns += matched_groups[2].splitlines(True)\n sorted_patterns += matched_groups[3].splitlines(True)\n sorted_buffer = ''.join(sorted_patterns)\n return sorted_buffer",
"def parse_links(self, source):\n all_links = set()\n for tag in soup(source).findAll(\"a\", {\"href\": True}):\n val = tag.attrMap[\"href\"]\n urls = re.findall(\"\"\"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\"\"\", val)\n if len(urls) == 1:\n all_links.add(urls[0])\n return sorted(list(all_links))",
"def _get_javascript_url(self):\n return [(url, attributes) for (url, (order, attributes)) in sorted(self._javascript_url.items(), key=operator.itemgetter(1))]",
"def urls(self):\r\n urls = []\r\n\r\n for url_name in sorted(self.resources.keys()):\r\n\r\n resource = self.resources[url_name]\r\n urls.append(resource.as_url(\r\n api=self,\r\n name_prefix='-'.join(\r\n (self.prefix, self.str_version)).strip('-'),\r\n url_prefix=self.str_version\r\n ))\r\n\r\n return patterns(self.prefix, *urls)"
] |
[
"0.6759898",
"0.65670496",
"0.6496946",
"0.63735026",
"0.62205386",
"0.62195784",
"0.6213182",
"0.6068431",
"0.6044544",
"0.60424286",
"0.59850913",
"0.59765625",
"0.59358805",
"0.59200627",
"0.58554524",
"0.5838407",
"0.58196706",
"0.5811292",
"0.57994336",
"0.57936436",
"0.5791683",
"0.5754939",
"0.5736835",
"0.5736332",
"0.5732065",
"0.57289875",
"0.5644728",
"0.5644431",
"0.5624185",
"0.55950135"
] |
0.7716838
|
0
|
Return the list of named javascript codes, sorted by order of insertion
|
def _get_named_javascript(self):
return [(name, js, attributes) for (name, (order, js, attributes)) in sorted(self._named_javascript.items(), key=operator.itemgetter(1))]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_anonymous_javascript(self):\n return [js for (order, js) in sorted(self._anonymous_javascript)]",
"def nameList(self):\r\n return [self.name.lower(), self.code] + self._otherNames",
"def get_code():\n return inspect.getsource(sort)",
"def list():\n\n return cache.codeTableList()",
"def codelists():\n return CodelistSet()",
"def parseNames(self, compiledCode):\n res = []\n if not compiledCode is None:\n res = compiledCode.co_names\n for co in compiledCode.co_consts:\n if not co is None and isinstance(co, CodeType):\n res += co.co_names\n return res",
"def encode(sourcelist,code):\n answer = \"\"\n for s in sourcelist:\n co = find(lambda p: p.name == s, code)\n if ( not co ):\n import sys\n print >> sys.stderr, \"Warning: symbol\",`s`,\"has no encoding!\"\n pass\n else:\n answer = answer + co.word\n pass\n return answer",
"def js_minify2(js):\n log.debug(\"Obfuscating Javascript variables names inside functions.\")\n # if eval() or with{} is used on JS is not too Safe to Obfuscate stuff.\n is_ok = \"eval(\" not in js and \"with{\" not in js and \"with {\" not in js\n return slim_func_names(slim_params(js)) if is_ok else js.strip()",
"def linecodes_all_names(self) -> str:\n return Bridge.var_array_function(self.dss_obj.LineCodesV, 6, None, '')",
"def get_code(self) -> List[str]:\n if self.__prefix__ == \"\":\n out = []\n else:\n out = [self.__prefix__]\n\n if self.__spacing__ == \"\":\n return out + self.__code_block__\n\n for line in self.__code_block__:\n out.append(self.__spacing__ + line)\n return out",
"def get_pcode_list(self) -> List[str]:\n return self.pcodes",
"def slim_func_names(js):\n renamed_func, functions = [], re.compile('(function (\\w+)\\()').findall(js)\n new_names_generator = NamesGenerator()\n for whole_func, func_name in functions:\n count = js.count(func_name) # more than 1 mention of the function\n if len(func_name) > 3 and count > 1: # function name larger than 3\n new_name = new_names_generator.next()\n if re.findall(r'\\b%s\\b' % re.escape(new_name), js):\n continue\n js = re.sub(r'\\b%s\\b' % re.escape(func_name), new_name, js)\n renamed_func.append((func_name, new_name))\n list_of_replacements = ['{}={}'.format(x, y) for (x, y) in renamed_func]\n js_function_name_replacements = ';var {};'.format( # ;var a=b,c=d; or ''\n ','.join(list_of_replacements)) if len(list_of_replacements) else \"\"\n return js + js_function_name_replacements",
"def get_ordered(statistic, source_text):\n ordered = []\n list_statictic = list(statistic.keys())\n for symbol in source_text:\n if symbol in list_statictic:\n ordered.append(chr(symbol))\n return ordered",
"def _get_bulma_js() -> List[str]:\n return list(get_js_files())",
"def populate_code_list():\n\tletter_code_ST = \"JZIHGFEDCBA\"\n\tletter_code_FG = \"XWUTRQPNMLK\"\n\tfor pos in range(\n\t len(letter_code_ST)): #Interestingly, the values start from 0\n\t\tcode_ST.append(pos) # Number first\n\t\tcode_ST.append(letter_code_ST[pos])\n\tfor pos in range(len(letter_code_FG)):\n\t\tcode_FG.append(pos)\n\t\tcode_FG.append(letter_code_FG[pos])",
"def names(self):\n if not self.extensions:\n self.discover()\n\n names = list(self.builtins.keys())\n names += self.extensions.keys()\n\n return sorted(names)",
"def allFunctions(self):\n\t\tmodulos=sublime.decode_value(open(RutasPython.funciones()).read())\n\t\tlista=[]\n\t\tfor modulo in modulos:\n\t\t\tlista+=[ (funcion+\"\\t•\"+modulo, self.ponerCursor(modulo+\".\"+funcion)) for funcion in modulos[modulo]]\n\t\treturn sorted(lista)",
"def _get_javascript_url(self):\n return [(url, attributes) for (url, (order, attributes)) in sorted(self._javascript_url.items(), key=operator.itemgetter(1))]",
"def extract_messages_from_javascript_code(code: str) -> list[tuple[int, str, str | None]]:\n\n\tmessages = []\n\n\tfor message in extract_javascript(\n\t\tcode,\n\t\tkeywords=[\"__\"],\n\t\toptions={},\n\t):\n\t\tlineno, _func, args = message\n\n\t\tif not args or not args[0]:\n\t\t\tcontinue\n\n\t\tsource_text = args[0] if isinstance(args, tuple) else args\n\t\tcontext = None\n\n\t\tif isinstance(args, tuple) and len(args) == 3 and isinstance(args[2], str):\n\t\t\tcontext = args[2]\n\n\t\tmessages.append((lineno, source_text, context))\n\n\treturn messages",
"def _javascript(self, script):\n self._anonymous_javascript.append((self._order, script))\n self._order += 1",
"def namelist():\n\n\n session = Session(engine)\n\n results = session.query(lockdown.country).order_by(lockdown.country).all()\n\n #session.close()\n all_symbols = list(np.ravel(results))\n sym = all_symbols[1]\n\n return jsonify(all_symbols)",
"def codes(self, name):\n return self._get_valuemap(name, non_mapped='codes')",
"def get_code(self, obj):\n return [], []",
"def list_code(self, ofile=sys.stdout):\r\n for i, line in enumerate(self.code().split('\\n')):\r\n print >> ofile, ('%4i' % (i + 1)), line\r\n ofile.flush()",
"def sort_by_version(compiled_re, names):\n annotated_names = [([int(n) for n in compiled_re.match(name).groups()], name) for name in names]\n annotated_names.sort()\n return [annotated_name[1] for annotated_name in reversed(annotated_names)]",
"def decode_cpp_function_names(self) -> None:\n with Popen(['c++filt'], stdin=PIPE, stdout=PIPE, universal_newlines=True) as proc:\n for func in self.source_functions:\n proc.stdin.write(func.name + '\\n')\n proc.stdin.flush()\n func.pretty_name = proc.stdout.readline().rstrip('\\n\\r')",
"def names(self) -> list[str]:",
"def js_data(self):\n js_providers = getAdapters((self.context, self.request, self.view), IJSObjectDataProvider)\n results = []\n for name, provider in js_providers:\n if not name:\n raise ComponentLookupError('IJSObjectDataProvider must be a named adapter')\n names = name.split('.')\n var_defs = ''\n parts = []\n for n in names[:-1]:\n parts.append(n)\n var_defs += VAR_DEF.replace('{{name}}', '.'.join(parts))\n parts.append(names[-1])\n code = JS_SCRIPT.replace('{{placeholder}}', var_defs)\n code = code.replace('{{var_assignment}}',\n VAR_ASSIGN.replace('{{name}}',\n '.'.join(parts)).replace('{{object}}',\n json.dumps(provider())))\n results.append(code)\n return results",
"def get_code_mapping( id ):\n returnVal = []\n theCodes = _theRegistry.get_code( id )\n codes = theCodes.get_codes()\n descs = theCodes.get_descriptions()\n for (code, desc) in map(None, codes, descs):\n returnVal.append( { 'code' : code, 'description' : desc } )\n return returnVal",
"def opcode_list(self, script):\n opcodes = []\n new_pc = 0\n try:\n for opcode, data, pc, new_pc in self.get_opcodes(script):\n opcodes.append(self.disassemble_for_opcode_data(opcode, data))\n except ScriptError:\n opcodes.append(binascii.hexlify(script[new_pc:]).decode(\"utf8\"))\n\n return opcodes"
] |
[
"0.715811",
"0.6300202",
"0.60737324",
"0.60523796",
"0.59363806",
"0.5925758",
"0.58533365",
"0.578013",
"0.57658386",
"0.57317615",
"0.56563723",
"0.5631284",
"0.56270975",
"0.5598216",
"0.5595417",
"0.55879956",
"0.55612814",
"0.55516833",
"0.55338776",
"0.54731494",
"0.5463972",
"0.54608405",
"0.54208916",
"0.5401348",
"0.5386134",
"0.53722286",
"0.5370417",
"0.5350386",
"0.53418",
"0.53241456"
] |
0.7621781
|
0
|
Return the list of javascript URLs, sorted by order of insertion
|
def _get_javascript_url(self):
return [(url, attributes) for (url, (order, attributes)) in sorted(self._javascript_url.items(), key=operator.itemgetter(1))]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sort_urls(urls):\n order = {\"css\": 0, \"js\": 1}\n urls.sort(key=lambda x: order.get(x.rsplit(\".\")[-1].lower(), 2))\n return urls",
"def _get_anonymous_javascript(self):\n return [js for (order, js) in sorted(self._anonymous_javascript)]",
"def get_urls():\r\n return []",
"def getURLs():",
"def urls(self) -> list[str]:\r\n ...",
"def __url_list(self, page):\n url_list = []\n for tag_a in page.find_all('a'):\n href = str(tag_a.get('href'))\n if self.__verify(href):\n url = parse.quote(self.__add_main_site(href), '/:#')\n url_list.append(url)\n return url_list",
"def _get_bulma_js() -> List[str]:\n return list(get_js_files())",
"def load_url_list(url_list_file):\n url_list = []\n with open(url_list_file, 'r') as f:\n for eachline in f:\n eachline = eachline.rstrip('\\n')\n parts = eachline.split('\\t')\n domain, script_url = parts\n url_list.append((domain, script_url))\n\n return url_list",
"def javascript_url(self, url, **kw):\n self._javascript_url.setdefault(absolute_url(url, self.static_url), (self._order, kw))\n self._order += 1\n return ()",
"def get_links_from_url(url):\n return [get_base(url)]",
"def read_urls(filename):\n \n urls = []\n with open(filename, 'r') as f:\n for line in f:\n if 'puzzle' in line:\n match = re.search(r'GET\\s(.*)HTTP', line)\n url = match.group(1)\n urls.append(url.strip())\n sorted_urls = sorted(set(urls))\n for url in sorted_urls:\n print (url[-8:-4])\n return sorted_urls",
"def read_urls(filename, server_name='http://code.google.com/'):\n # Construct unique URLs from file as - http://code.google.com/<url from file>\n animal_list = []\n ordered_list = []\n src_file = open(filename, 'rU')\n for line in src_file :\n animal_path = re.search( 'GET\\s+/(.+jpg)', line )\n if animal_path is not None :\n if animal_path.group(1) not in animal_list :\n animal_list.append( animal_path.group(1) )\n ordered_list = sorted(animal_list,key=sort_img_name)\n # Used in in range loop to operate on ordered_list rather than shallow copy, e.g. for path in ordered_list\n for i in range(0, len(ordered_list), 1) :\n ordered_list[i] = server_name + ordered_list[i]\n return ordered_list",
"def get_urls(url_list='urls.blur'):\n with open(os.path.join('..', 'data', url_list), 'r') as f:\n urls = [tuple(line.split('\\t')) for line in f.read().split('\\n') \n if line and line[0] != '#']\n return urls",
"def get_contents_of_urls(urls):\n contents = []\n\n for url in urls:\n content = read_url(url)\n parsed_content = json.loads(content)\n contents.extend(parsed_content)\n return contents",
"def from_lists_to_list(lists):\n new_lists = copy.deepcopy(lists)\n list_final = []\n for url_str in lists[-1]:\n if \"index.php\" in url_str or \"javascript\" in url_str:\n new_lists.remove(lists[-1])\n for j in range(len(lists) - 1):\n if lists[j] in new_lists:\n for i in range(j + 1, len(lists)):\n for k in range(len(lists[j])):\n if \"index.php\" in lists[j][k] or \"javascript\" in str(lists[j][k]):\n try:\n new_lists.remove((lists[j]))\n except ValueError:\n pass\n elif lists[j][k] in lists[i]:\n if len(lists[i]) > len(lists[j]):\n try:\n new_lists.remove(lists[j])\n except ValueError:\n pass\n elif len(lists[i]) < len(lists[j]):\n try:\n new_lists.remove(lists[i])\n except ValueError:\n pass\n for mini_list in new_lists:\n list_final.extend(mini_list)\n return list_final",
"def get_xmodule_urls():\r\n if settings.DEBUG:\r\n paths = [path.replace(\".coffee\", \".js\") for path in\r\n settings.PIPELINE_JS['module-js']['source_filenames']]\r\n else:\r\n paths = [settings.PIPELINE_JS['module-js']['output_filename']]\r\n return [staticfiles_storage.url(path) for path in paths]",
"def get_urls(r):\n url_list = find_urls(r)\n url_list += find_tag_urls(r)\n return set(url_list)",
"def getUrlsList(self):\n\t\ttry:\n\t\t\tf = ur.urlopen(self.sitemap_url)\n\t\t\tres = f.readlines()\n\t\t\tfor d in res:\n\t\t\t data = re.findall('<loc>(https?:\\/\\/.+?)<\\/loc>',d)\n\t\t\t for i in data:\n\t\t\t\tself.urls.append(i)\n\t\texcept Exception as e:\n\t\t\tself.app.printflush(str(e))\n\t\t\tself.app.printflush(traceback.format_exc())\n\t\tself.fetched_count = len(self.urls)",
"def inshorts_urls():\n\n url1 = 'https://inshorts.com/en/read/business'\n url2 = 'https://inshorts.com/en/read/sports'\n url3 = 'https://inshorts.com/en/read/technology'\n url4 = 'https://inshorts.com/en/read/entertainment'\n return [url1, url2, url3, url4]",
"def load_chrome_urls():\n\t#path to user's history database (Chrome)\n\tdata_path = os.path.expanduser('~')+\"/Library/Application Support/Google/Chrome/Default\"\n\tfiles = os.listdir(data_path)\n\thistory_db = os.path.join(data_path, 'history')\n\t#querying the db\n\tc = sqlite3.connect(history_db)\n\tcursor = c.cursor()\n\tselect_statement = \"SELECT urls.url FROM urls, visits WHERE urls.id = visits.url;\"\n\tcursor.execute(select_statement)\n\n\tresults = cursor.fetchall() #tuple\n\turls = [result[0] for result in results]\n\treturn urls",
"def codeup_blog_urls():\n \n url1 = 'https://codeup.com/codeup-news/codeup-launches-first-podcast-hire-tech/' \n\n url2 ='https://codeup.com/tips-for-prospective-students/why-should-i-become-a-system-administrator/'\n \n url3 ='https://codeup.com/codeup-news/codeup-candidate-for-accreditation/'\n \n url4 ='https://codeup.com/codeup-news/codeup-takes-over-more-of-the-historic-vogue-building/'\n \n url5 ='https://codeup.com/codeup-news/inclusion-at-codeup-during-pride-month-and-always/'\n \n return [url1, url2, url3, url4, url5]",
"def get_links() -> list:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n }\n p = re.compile(r'\\d+.html')\n base_url = 'http://stateoftheunion.onetwothree.net/texts/'\n essay_url = base_url + 'index.html'\n res = requests.get(essay_url, headers=headers)\n soup = BeautifulSoup(res.content, 'html')\n links = soup.find_all('a')\n sotu_links = {link.text: base_url + link.get('href', '') for link in links if re.match(p, link.get('href', ''))}\n return sotu_links",
"def get_urls(self):\n urls = []\n params = ['<{}>'.format(x) for x in self.args]\n args_length = len(self.args) - len(self.defaults)\n for i in range(len(self.defaults) + 1):\n index = -i if i > args_length else None\n urls.append(self.get_url(params[:index]))\n return urls",
"def get_urls(inputfiles):\n urls = []\n scheme_rgx = re.compile(r'^https?://')\n for ifile in inputfiles:\n urls.append(ifile.read().splitlines())\n urls = set([n for l in urls for n in l])\n urls = list(filter(None, urls))\n for i in range(len(urls)):\n if not scheme_rgx.match(urls[i]):\n urls[i] = 'http://' + urls[i]\n return urls",
"def ListUrlEntries(self):\n return [WprUrlEntry(request, self._http_archive[request])\n for request in self._http_archive.get_requests()]",
"def read_urls(filename):\n # Searches the file for any urls containing \"puzzle\", removing duplicates\n # and then sorting them by the word before .jpg\n with open(filename) as f:\n urls = set(re.split(r'(\\S+)', f.read()))\n urls = filter(lambda url: \"puzzle\" in url, urls)\n server = re.split('_', filename)[1]\n for i, url in enumerate(urls):\n urls[i] = 'https://' + server + '/' + url\n return sorted(urls, key=lambda x: re.findall(r'(\\w+).jpg', x))",
"def get_website_URLs():\n\tfilepath = os.path.dirname(os.path.realpath(__file__)) +\"/web_sources\"\n\tf = open(filepath, 'r')\n\twebsites = []\n\tfor line in f:\n\t\tif line != \"\\n\":\n\t\t\tendl_index = line.index('\\n')\n\t\t\tclean_line = line[:endl_index]\n\t\t\tnew_list = clean_line.split(' ', 1)\n\t\t\twebsites.append(new_list)\n\tf.close()\n\treturn websites",
"def get_resource_urls():\n base_url = 'http://developer.pardot.com/'\n pattern = re.compile(\n r'(?ims)\\<a [^>]*?href=\"(kb/api-version-3/[^>]*?/)\"[^>]*?\\>'\n r'[^<]*?\\</a\\>')\n response = requests.get(base_url)\n return [\n '%s/%s' % (base_url, url) for url in pattern.findall(response.text)]",
"def URLs(self, default=[{}]):\n tmp = self.data.get('urls', default)\n return [HEP.URLObject(i) for i in tmp]",
"def _get_named_javascript(self):\n return [(name, js, attributes) for (name, (order, js, attributes)) in sorted(self._named_javascript.items(), key=operator.itemgetter(1))]"
] |
[
"0.7061752",
"0.67775434",
"0.6522144",
"0.63693315",
"0.6277138",
"0.6155464",
"0.61204743",
"0.6007929",
"0.5985964",
"0.5912752",
"0.58802736",
"0.5813567",
"0.57517964",
"0.5739859",
"0.57275695",
"0.5709757",
"0.5709638",
"0.57080024",
"0.56940836",
"0.56722206",
"0.56491506",
"0.564676",
"0.5646173",
"0.56378835",
"0.56174326",
"0.5614239",
"0.56060314",
"0.56021804",
"0.5597056",
"0.5595725"
] |
0.7669407
|
0
|
Register a synchronous action
|
def sync_action(self, renderer, action, with_request):
self.set(self._actions[1], renderer.register_callback(self._actions[0], action, with_request))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def perform_action(self) -> None:",
"def register_action(self, name, command):\n action_name = command['action_name']\n if action_name not in self.al_clients:\n action_type = self.get_interface_type(command['interface_type'], '.action')\n self.al_clients[action_name] = ActionClient(self, action_type, action_name)\n\n if self.al_clients[action_name].server_is_ready():\n if action_name in self.offline_actions:\n self.offline_actions.remove(action_name)\n else:\n if action_name not in self.offline_actions:\n self.get_logger().warn(\n 'action {} is not read yet'.format(action_name))\n self.offline_actions.append(action_name)",
"def async_action(self, renderer, action, with_request):\n if not isinstance(action, ajax.Update):\n action = ajax.Update(action=action, with_request=with_request)\n\n self.set(self._actions[2], action.generate_action(self._actions[0], renderer))",
"def action(self, tag, action, with_request):\n tag.async_action(self, action, with_request)",
"def register_action(self, traced_action, action_type):\n command, response = traced_action\n command_placeholder_ids = self._store_placeholders(command)\n return_placeholder_ids = None\n\n if response is not None:\n return_placeholder_ids = self._store_placeholders(response)\n if not isinstance(return_placeholder_ids, (list, tuple)):\n return_placeholder_ids = (return_placeholder_ids,)\n\n action = action_type(*command_placeholder_ids, return_ids=return_placeholder_ids)\n self.actions.append(action)",
"def test_success(self):\n\n @sync_performer\n def succeed(dispatcher, intent):\n return intent\n\n dispatcher = lambda _: succeed\n result = sync_perform(dispatcher, Effect(\"foo\"))\n self.assertEqual(result, \"foo\")",
"def _async_register(service, notifier):\n\n proc = multiprocessing.Process(\n name='Async Registration {}'.format(service.iden),\n target=_register, args=(service, notifier))\n proc.start()",
"def register_periodic_action(self, action, use_bytecode_counter):\n assert isinstance(action, PeriodicAsyncAction)\n # hack to put the release-the-GIL one at the end of the list,\n # and the report-the-signals one at the start of the list.\n if use_bytecode_counter:\n self._periodic_actions.append(action)\n self.has_bytecode_counter = True\n else:\n self._periodic_actions.insert(0, action)\n self._rebuild_action_dispatcher()",
"def registerAction(self, actionId, action): #$NON-NLS-1$\r",
"def action(self, tag, action, with_request):\n tag.sync_action(self, action, with_request)",
"async def before_action(self, action, *args, **kwargs):\n return True",
"def _SetPendingAction(self, cb):\r\n \r\n self.__fPendingAction = cb",
"async def _register_mid(self, mid: int) -> None:\n async with self._pending_operations_condition:\n if mid not in self._pending_operations:\n self._pending_operations[mid] = asyncio.Event()",
"def register_execution(in_progress, future, node):\n in_progress[future] = node",
"def test_sync_perform_effect_function_dispatch(self):\n intent = lambda box: box.succeed(\"foo\")\n self.assertEqual(sync_perform(func_dispatcher, Effect(intent)), \"foo\")",
"async def before_action(self, action: str, *args, **kwargs) -> bool:\n return True",
"def step_async(self, actions):",
"def sync_action(self, renderer, action, with_request):\n f = partial.Partial(self._set_content_type, _action=action, with_request=with_request)\n self.set('src', renderer.add_sessionid_in_url(sep=';') + ';' + renderer.register_callback(2, f, with_request=True))",
"def sync_action(self, renderer, action, with_request):\n href = self.get('href', '').partition('#')\n self.set('href', renderer.add_sessionid_in_url(href[0], (renderer.register_callback(4, action, with_request),)) + href[1] + href[2])",
"def _register(self, comm, handler):",
"async def async_apply_action(self, cmd_name, *args):\n await self.hass.async_add_executor_job(self.apply_action, cmd_name, *args)",
"def take_action(self, action):\n\t\traise NotImplementedError",
"async def pre_action_init(self) -> None:",
"def test_sync_perform_async_effect(self):\n intent = lambda box: None\n self.assertRaises(\n NotSynchronousError, lambda: sync_perform(func_dispatcher, Effect(intent))\n )",
"def add_action(self, action):\n with self._mutex:\n _id = max(self._used_ids) if self._used_ids else 0\n while _id in self._used_ids:\n _id += 1\n self._actions[_id] = action\n self._used_ids.add(_id)\n self._workers[_id] = Thread(target=action.run, args=(self.api,))\n\n return _id",
"def register(self, callback):\n self.callback = callback",
"async def cb_at_start(hass):\n calls.append(1)",
"async def cb_at_start(hass):\n calls.append(1)",
"async def cb_at_start(hass):\n calls.append(1)",
"def sync_start(self):"
] |
[
"0.6163529",
"0.61518764",
"0.60082024",
"0.5948825",
"0.5912427",
"0.58522606",
"0.5807863",
"0.5774992",
"0.5769407",
"0.5764641",
"0.5739238",
"0.570953",
"0.57032704",
"0.56630397",
"0.5657883",
"0.55930287",
"0.558469",
"0.55544263",
"0.55245966",
"0.5490227",
"0.5487672",
"0.54629284",
"0.5457035",
"0.53998464",
"0.5385196",
"0.53555477",
"0.53190887",
"0.53190887",
"0.53190887",
"0.53140736"
] |
0.6893943
|
0
|
Register an asynchronous action
|
def async_action(self, renderer, action, with_request):
if not isinstance(action, ajax.Update):
action = ajax.Update(action=action, with_request=with_request)
self.set(self._actions[2], action.generate_action(self._actions[0], renderer))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sync_action(self, renderer, action, with_request):\n self.set(self._actions[1], renderer.register_callback(self._actions[0], action, with_request))",
"async def perform_action(self) -> None:",
"def action(self, tag, action, with_request):\n tag.async_action(self, action, with_request)",
"def _async_register(service, notifier):\n\n proc = multiprocessing.Process(\n name='Async Registration {}'.format(service.iden),\n target=_register, args=(service, notifier))\n proc.start()",
"def register_async_callback(self, async_callback):\n self._async_callbacks.append(async_callback)",
"async def async_apply_action(self, cmd_name, *args):\n await self.hass.async_add_executor_job(self.apply_action, cmd_name, *args)",
"def step_async(self, actions):",
"async def async_attach_trigger(hass, config, action, automation_info):\n trigger_data = automation_info.get(\"trigger_data\", {}) if automation_info else {}\n webhook_id = config.get(CONF_WEBHOOK_ID)\n job = HassJob(action)\n hass.components.webhook.async_register(\n automation_info[\"domain\"],\n automation_info[\"name\"],\n webhook_id,\n partial(_handle_webhook, job, trigger_data),\n )\n\n @callback\n def unregister():\n \"\"\"Unregister webhook.\"\"\"\n hass.components.webhook.async_unregister(webhook_id)\n\n return unregister",
"def register_action(self, name, command):\n action_name = command['action_name']\n if action_name not in self.al_clients:\n action_type = self.get_interface_type(command['interface_type'], '.action')\n self.al_clients[action_name] = ActionClient(self, action_type, action_name)\n\n if self.al_clients[action_name].server_is_ready():\n if action_name in self.offline_actions:\n self.offline_actions.remove(action_name)\n else:\n if action_name not in self.offline_actions:\n self.get_logger().warn(\n 'action {} is not read yet'.format(action_name))\n self.offline_actions.append(action_name)",
"def register_action(self, traced_action, action_type):\n command, response = traced_action\n command_placeholder_ids = self._store_placeholders(command)\n return_placeholder_ids = None\n\n if response is not None:\n return_placeholder_ids = self._store_placeholders(response)\n if not isinstance(return_placeholder_ids, (list, tuple)):\n return_placeholder_ids = (return_placeholder_ids,)\n\n action = action_type(*command_placeholder_ids, return_ids=return_placeholder_ids)\n self.actions.append(action)",
"def call_async(self, name, *args, **kwargs):",
"def registerAction(self, actionId, action): #$NON-NLS-1$\r",
"def register_active_event(t, callback, args, action_runner, plugin, msg_obj, mutex=None):\n def func(func_args):\n action = callback(*func_args)\n if action:\n action_runner(action=action, plugin=plugin, msg_obj=msg_obj)\n register_event(t, func, args, mutex=mutex)",
"def register_periodic_action(self, action, use_bytecode_counter):\n assert isinstance(action, PeriodicAsyncAction)\n # hack to put the release-the-GIL one at the end of the list,\n # and the report-the-signals one at the start of the list.\n if use_bytecode_counter:\n self._periodic_actions.append(action)\n self.has_bytecode_counter = True\n else:\n self._periodic_actions.insert(0, action)\n self._rebuild_action_dispatcher()",
"def register_execution(in_progress, future, node):\n in_progress[future] = node",
"async def execute(self):",
"def register(self, callback):\n self.callback = callback",
"def _SetPendingAction(self, cb):\r\n \r\n self.__fPendingAction = cb",
"def auto_discover():\n auto_registration(\"actions\")",
"async def _register_command(self) -> JSON:\n loop = asyncio.get_event_loop()\n async with aiohttp.ClientSession() as session:\n async with session.post(\n url=InteractionRoute().application(self._application_id).commands(self._id).url,\n json=self._data\n ) as response:\n interaction: JSON = await response.json(encoding='utf-8')\n return interaction",
"async def _register(self, name, source):\n self._last[name] = {}\n\n self._srcTaskList[name] = asyncio.create_task(\n self._monitor(name, source)\n )",
"def step_async(self, actions: np.ndarray) -> None:\n raise NotImplementedError()",
"def add_action(self, action):\n with self._mutex:\n _id = max(self._used_ids) if self._used_ids else 0\n while _id in self._used_ids:\n _id += 1\n self._actions[_id] = action\n self._used_ids.add(_id)\n self._workers[_id] = Thread(target=action.run, args=(self.api,))\n\n return _id",
"async def async_setup(self):\n pass",
"async def async_event(self, event: str, *args, **kwargs):\n for cb in self.event_handlers[event]:\n asyncio.ensure_future(cb(*args, **kwargs), loop=self.loop)",
"def async_attach(self, action: AutomationActionType, variables: Dict[str, Any]):\n\n @callback\n def _remove():\n del self._actions[_remove]\n self._update()\n\n job = HassJob(action)\n\n self._actions[_remove] = (job, variables)\n self._update()\n\n return _remove",
"def async_response(\n func: const.AsyncWebSocketCommandHandler,\n) -> const.WebSocketCommandHandler:\n task_name = f\"websocket_api.async:{func.__name__}\"\n\n @callback\n @wraps(func)\n def schedule_handler(\n hass: HomeAssistant, connection: ActiveConnection, msg: dict[str, Any]\n ) -> None:\n \"\"\"Schedule the handler.\"\"\"\n # As the webserver is now started before the start\n # event we do not want to block for websocket responders\n hass.async_create_background_task(\n _handle_async_response(func, hass, connection, msg),\n task_name,\n )\n\n return schedule_handler",
"async def _execute(self):",
"def async_request(self, callback, *args):\r\n seq = self.send_request(*args)\r\n self.async_replies[seq] = callback",
"async def _register_mid(self, mid: int) -> None:\n async with self._pending_operations_condition:\n if mid not in self._pending_operations:\n self._pending_operations[mid] = asyncio.Event()"
] |
[
"0.65998065",
"0.6508633",
"0.64818287",
"0.62131906",
"0.6178128",
"0.6048961",
"0.5997042",
"0.59651434",
"0.59346753",
"0.59256625",
"0.581508",
"0.57448465",
"0.5705868",
"0.5680487",
"0.5638908",
"0.56294847",
"0.56239974",
"0.56195617",
"0.561077",
"0.55405813",
"0.55399597",
"0.5537668",
"0.5526346",
"0.55095196",
"0.550848",
"0.54754686",
"0.544913",
"0.5447192",
"0.5439205",
"0.5399793"
] |
0.6518126
|
1
|
Register a synchronous action The action will have to return the image data
|
def sync_action(self, renderer, action, with_request):
f = partial.Partial(self._set_content_type, _action=action, with_request=with_request)
self.set('src', renderer.add_sessionid_in_url(sep=';') + ';' + renderer.register_callback(2, f, with_request=True))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sync_action(self, renderer, action, with_request):\n self.set(self._actions[1], renderer.register_callback(self._actions[0], action, with_request))",
"def do_start(self, action):\n StaticFile = Pool().get('nereid.static.file')\n\n static_file = StaticFile(Transaction().context.get('active_id'))\n static_file.is_large_file = True\n static_file.save()\n\n post_args = static_file.get_post_form_args()\n\n action['url'] = self.base_url + '?data=' + \\\n base64.b64encode(json.dumps(post_args))\n\n return action, {}",
"async def async_image(self) -> bytes | None:\n image_path = Path(__file__).parent / self._image_filename\n return await self.hass.async_add_executor_job(image_path.read_bytes)",
"async def perform_action(self) -> None:",
"def action(self, tag, action, with_request):\n tag.async_action(self, action, with_request)",
"def async_action(self, renderer, action, with_request):\n if not isinstance(action, ajax.Update):\n action = ajax.Update(action=action, with_request=with_request)\n\n self.set(self._actions[2], action.generate_action(self._actions[0], renderer))",
"def register_action(self, traced_action, action_type):\n command, response = traced_action\n command_placeholder_ids = self._store_placeholders(command)\n return_placeholder_ids = None\n\n if response is not None:\n return_placeholder_ids = self._store_placeholders(response)\n if not isinstance(return_placeholder_ids, (list, tuple)):\n return_placeholder_ids = (return_placeholder_ids,)\n\n action = action_type(*command_placeholder_ids, return_ids=return_placeholder_ids)\n self.actions.append(action)",
"def step_async(self, actions):",
"def take_action(self, action):\n\t\traise NotImplementedError",
"def action(self, tag, action, with_request):\n tag.sync_action(self, action, with_request)",
"def step_async(self, actions: np.ndarray) -> None:\n raise NotImplementedError()",
"def perform_action(self, action_data):\n pass",
"async def before_action(self, action, *args, **kwargs):\n return True",
"def register_action(self, name, command):\n action_name = command['action_name']\n if action_name not in self.al_clients:\n action_type = self.get_interface_type(command['interface_type'], '.action')\n self.al_clients[action_name] = ActionClient(self, action_type, action_name)\n\n if self.al_clients[action_name].server_is_ready():\n if action_name in self.offline_actions:\n self.offline_actions.remove(action_name)\n else:\n if action_name not in self.offline_actions:\n self.get_logger().warn(\n 'action {} is not read yet'.format(action_name))\n self.offline_actions.append(action_name)",
"def test_success(self):\n\n @sync_performer\n def succeed(dispatcher, intent):\n return intent\n\n dispatcher = lambda _: succeed\n result = sync_perform(dispatcher, Effect(\"foo\"))\n self.assertEqual(result, \"foo\")",
"async def execute(self):",
"async def _async_request_image(\n self, request_method: Callable[[], Coroutine[Any, Any, None]]\n ) -> bytes | None:\n if not self.available:\n return None\n image_future = self._loop.create_future()\n self._image_futures.append(image_future)\n await request_method()\n if not await image_future:\n return None\n return self._state.data",
"async def capture_and_upload_screenshot(self) -> None:",
"def sync_action(self, renderer, action, with_request):\n href = self.get('href', '').partition('#')\n self.set('href', renderer.add_sessionid_in_url(href[0], (renderer.register_callback(4, action, with_request),)) + href[1] + href[2])",
"async def _upload(self) -> None:\n\n # filename given?\n filename = str(uuid.uuid4()) if self.filename is None else self.filename\n\n # check\n if self._upload_path is None:\n raise ValueError(\"No upload URL given.\")\n\n # send data and return image ID\n async with aiohttp.ClientSession() as session:\n data = aiohttp.FormData()\n data.add_field(\"file\", self._buffer, filename=self.filename)\n async with session.post(self._upload_path, auth=self._auth, data=data, timeout=self._timeout) as response:\n if response.status == 401:\n log.error(\"Wrong credentials for uploading file.\")\n raise FileNotFoundError\n elif response.status != 200:\n log.error(f\"Could not upload file to filecache: {response.status} {response.reason}\")\n raise FileNotFoundError",
"def load_action(action, controller='', ajax=True):\n return LOAD(controller, action, ajax=ajax)",
"async def async_apply_action(self, cmd_name, *args):\n await self.hass.async_add_executor_job(self.apply_action, cmd_name, *args)",
"async def generic_action(self, request):\n pass",
"def result(self, state, action):\n\t\traise NotImplementedError",
"def GET_upload_sr_img(self, *a, **kw):\r\n return \"nothing to see here.\"",
"def process_action(*args, **kwargs):\n raise NotImplementedError()",
"async def async_camera_image(self) -> bytes:\n websession = async_get_clientsession(self.hass)\n\n with async_timeout.timeout(10):\n response = await websession.get(self._latest_url)\n\n image = await response.read()\n return image",
"async def _execute(self):",
"async def pre_action_init(self) -> None:",
"async def add(self, ctx, url, name):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as resp:\n data = await resp.read()\n with open(os.path.join(os.getcwd(), \"data\",\n \"image\", name), \"wb\") as img:\n img.write(data)\n await self._image_reload()\n await ctx.message.add_reaction(\"👍\")"
] |
[
"0.61154354",
"0.604753",
"0.5784594",
"0.5754564",
"0.5661058",
"0.56537765",
"0.52781683",
"0.5231107",
"0.52116144",
"0.5155357",
"0.51319826",
"0.5126661",
"0.5122823",
"0.51012886",
"0.50637245",
"0.50510484",
"0.503998",
"0.5019895",
"0.50023127",
"0.49943146",
"0.4991569",
"0.49911964",
"0.4947828",
"0.49476478",
"0.49300092",
"0.49251306",
"0.49207747",
"0.49168545",
"0.4909232",
"0.48948538"
] |
0.6150861
|
0
|
Create an associated asynchronous HTML renderer
|
def AsyncRenderer(self, *args, **kw):
# If no arguments are given, this renderer becomes the parent of the
# newly created renderer
if not args and not kw:
args = (self,)
return AsyncRenderer(*args, **kw)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def AsyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return self.__class__(*args, **kw)",
"def render(self):\n raise NotImplementedError(\"Renderer is an abstract class\")",
"def run(self):\n try:\n self.event_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.event_loop)\n template_system = LegionTemplateEngine(self.template_path, self.output_path)\n template_system.render_loop()\n\n except Exception as exception:\n self.raised_exception = exception\n raise self.raised_exception",
"def _render(self) -> None:\n pass",
"def task_render():\n target = 'analysis.html'\n dep = 'analysis.ipynb'\n return {\n 'file_dep': [dep],\n 'targets': [target],\n 'actions': [\n f\"jupyter nbconvert --execute --to html {dep}\"\n ],\n 'clean': True\n }",
"def render(self, r):\n raise NotImplementedError",
"def render():\n html = request.get_data().decode('utf-8')\n sio.emit('render', html)\n return 'OK'",
"def __html__(self):\n return self.html",
"def end_rendering(self, output):\n if self.wrapper_to_generate:\n output = self.div(output, id=self.id, class_='nagare-generated nagare-async-view')\n\n return output",
"def render(self):\n raise NotImplementedError",
"async def render(\n self, filename: str, *args: dict, **kwargs: typing.Any\n ) -> str:\n with self._enable_async():\n return await self._get_template(filename).render_async(\n *args, **kwargs\n )",
"def run(self) -> None:\n self._render()\n print(self.sio.getvalue())",
"def render(self):\n raise NotImplementedError()",
"def _render_callback(self, _sim, _viewer):\n pass",
"def main():\r\n return render_template(\"UI.html\")",
"def render(self):\n return self",
"def get_html(self):\r\n pass",
"def SyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return self.__class__(*args, **kw)",
"def render(self):\n self.run()\n return [{'dest' : self.dest,\n 'text' : self.tmpl.render(**self.data)}]",
"def __init__(self, static_url):\n super(AsyncHeadRenderer, self).__init__(static_url=static_url)\n\n self._anonymous_css = [] # CSS\n self._anonymous_javascript = [] # Javascript code",
"async def respondHTML(self, html):\n self.HTMLResponse = html",
"def SyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return Renderer(*args, **kw)",
"def SyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return Renderer(*args, **kw)",
"def render_view(self, h, *args):\n return self.view(h)",
"def createBasicRenderSetup():\n\n pass",
"def rawHTMLrendered(self):",
"def render(self, *args, **kwargs):\r\n raise NotImplementedError",
"def render(self):\n raise RenderNotImplemented('Render function is not implemented.')",
"def render_async_head(self, h, *args):\n return \"nagare_loadAll(%s, %s, %s, %s, %s, %s)\" % (\n ajax.py2js(self._get_named_css(), h),\n ajax.py2js(r'\\n'.join(self._get_anonymous_css()), h),\n ajax.py2js(self._get_css_url(), h),\n ajax.py2js(self._get_named_javascript(), h),\n ajax.py2js(';'.join(self._get_anonymous_javascript()), h),\n ajax.py2js(self._get_javascript_url(), h)\n )",
"def main():\n return render_template('doc.html', docid=queue.pop(0))"
] |
[
"0.710635",
"0.5953376",
"0.59113735",
"0.58568114",
"0.5802428",
"0.5719081",
"0.5716875",
"0.5715162",
"0.5706176",
"0.56993824",
"0.56985855",
"0.5662752",
"0.5657352",
"0.56467086",
"0.5646058",
"0.5620385",
"0.5608474",
"0.5581224",
"0.5558615",
"0.5558548",
"0.55566925",
"0.55506337",
"0.55506337",
"0.55418485",
"0.55054563",
"0.549432",
"0.549277",
"0.54868203",
"0.5480112",
"0.54774266"
] |
0.7090812
|
1
|
Generate the DOCTYPE of the document If a doctype was set on the response object, use it Else, use the HTML ou XHTML doctypes of this renderer
|
def doctype(self):
response = self.response
if response.doctype is not None:
return response.doctype
return self.XML_DOCTYPE if response.xml_output else self.HTML_DOCTYPE
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def xmldoc(self, doctype):\n # we hack in the DOCTYPE using the parser\n docstr = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <!DOCTYPE %s SYSTEM \"file:///usr/share/emane/dtd/%s.dtd\">\n <%s/>\"\"\" % (doctype, doctype, doctype)\n # normally this would be: doc = Document()\n return parseString(docstr)",
"def __init__(self, doctype: str = \"\", spaces: int = 4) -> None:\n self.auto_spacing = True\n self.depth = 0\n self.spaces = spaces\n if doctype in PyHTML.DOCTYPES:\n self.elems = [PyHTML.DOCTYPES[doctype]]\n elif not doctype:\n self.elems = []\n else:\n raise ValueError(f\"Unknown doctype declaration: '{doctype}'\")",
"def _test_doctype(self, doctype_fragment):\n doctype_str = '<!DOCTYPE %s>' % doctype_fragment\n markup = doctype_str + '<p>foo</p>'\n soup = self.soup(markup)\n doctype = soup.contents[0]\n self.assertEqual(doctype.__class__, Doctype)\n self.assertEqual(doctype, doctype_fragment)\n self.assertEqual(str(soup)[:len(doctype_str)], doctype_str)\n\n # Make sure that the doctype was correctly associated with the\n # parse tree and that the rest of the document parsed.\n self.assertEqual(soup.p.contents[0], 'foo')",
"def content_type(self):\n response = self.response\n\n if response.content_type:\n return response.content_type\n\n return 'application/xhtml+xml' if response.xml_output else 'text/html'",
"def new(doctype: str = \"\", spaces: int = 4) -> \"PyHTML\":\n return PyHTML(doctype, spaces)",
"def docType():\n return (u'<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'\n u'<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 '\n u'Transitional//EN\" '\n u'\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\\n')",
"def get_doctype(data):\n if not hasattr(data, 'readlines'):\n data = StringIO(data)\n for line in data.readlines():\n line = line.strip()\n if not line:\n continue\n if line.startswith('<?xml') or line.startswith('<!-- '):\n continue\n m = DT_RGX.match(line)\n if m is not None:\n return m.group(1)\n else:\n raise DTException('Unable to match doctype in \"%s\"' % line)",
"def test_search_category_starts_with_doctype(self):\n self.assertTrue(self.response.content.decode().startswith('<!DOCTYPE html>'), f\"{FAILURE_HEADER}Your search_category.html template does not start with <!DOCTYPE html> -- this is requirement of the HTML specification.{FAILURE_FOOTER}\")",
"def handle_decl(self, decl):\n if verbose(): print(\"TIParser.handle_decl(self, '%s')\" % (decl))\n if 'doctype' in decl.lower():\n self.doctype = \"present\"",
"def test_search_starts_with_doctype(self):\n self.assertTrue(self.response.content.decode().startswith('<!DOCTYPE html>'), f\"{FAILURE_HEADER}Your search.html template does not start with <!DOCTYPE html> -- this is requirement of the HTML specification.{FAILURE_FOOTER}\")",
"def setXhtmlDocument(self, zxhtmlDocument, bodyOnly = True): #$NON-NLS-1$\r",
"def init(content_type='text/html'):\n header('Content-type', content_type)",
"def render(self, context):\n engine = Renderer()\n engine.environment.filters['format_date'] = format_date\n engine.environment.filters['format_datetime'] = format_datetime\n result = engine.render(self.template, **context)\n response = HttpResponse(\n content_type='application/vnd.oasis.opendocument.text; charset=UTF-8'\n )\n response['Content-Disposition'] = 'inline; filename=' + self.filename\n with tempfile.NamedTemporaryFile() as output:\n output.write(result)\n output.flush()\n output = open(output.name, 'rb')\n response.write(output.read())\n return response",
"def render_to_response(self, context, **response_kwargs):\n\n response = HttpResponse(mimetype=self.mimetype)\n response['Content-Disposition'] = ('attachment; filename=%s.%s' %\n (context['filename'],\n self.extension))\n f = render_to_pdf(self.template_name, context)\n response.write(f)\n return response",
"def render(self, out_file=None, cur_ind=\"\"):\n header_string = \"<!DOCTYPE html>\\n\"\n output_string = Element.render(self, out_file=None, cur_ind=cur_ind)\n if out_file:\n out_file.write(header_string + output_string)\n return header_string + output_string",
"def create_html(self):\n # Add html content to the self.doc\n self.doc.asis('<!DOCTYPE html>')\n with self.tag('html'):\n self.design_header()\n self.design_body()\n # Write html content from self.doc\n with codecs.open(self.filestream.name, 'w', 'utf-8') as f:\n html_content = indent(\n self.doc.getvalue(),\n indentation=' ',\n newline='\\r\\n'\n )\n f.write(html_content)",
"def _produce_pdf_as_a_response(self, html):\n # Create a Django response object, and specify content_type as pdf\n response = HttpResponse(content_type='application/pdf')\n # Define that this is an attachment. \n response['Content-Disposition'] = 'attachment;'\n pisaStatus = pisa.CreatePDF(html, dest=response)\n \n return response",
"def visit_document(self, node):\n self.printer.start_document()\n if node.xml_system_id:\n for child in node.xml_children:\n if child.xml_type == tree.element.xml_type:\n self.printer.doctype(child.xml_qname, node.xml_public_id, node.xml_system_id)\n break\n\n for child in node.xml_children:\n self.visit(child)\n self.printer.end_document()\n return",
"def _render(cls, request, code, ctype, msg):\r\n request.setResponseCode(code)\r\n request.setHeader('content-type', ctype)\r\n request.write(msg)\r\n request.finish()",
"def xml():\n response = make_response(render_template(\"sample.xml\"))\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response",
"def gen_header():\n return (\n '<?xml version=\"1.0\" encoding=\"UTF-8\"?><!DOCTYPE html '\n + 'PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" '\n + '\"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">'\n + '<html xmlns=\"http://www.w3.org/1999/xhtml\"> '\n + '<head><meta '\n + 'http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"/> '\n + '</head> <body>')",
"def add_soup(response, soup_config):\r\n if \"text/html\" in response.headers.get(\"Content-Type\", \"\"):\r\n response.soup = BeautifulSoup(\r\n response.content, \"html.parser\", **soup_config)",
"def is_html(self):\r\n return self.__content_type == html_ctype",
"def render(self, data, accepted_media_type=None, renderer_context=None):\n if data is None:\n return ''\n\n stream = StringIO()\n\n xml = SimplerXMLGenerator(stream, self.charset)\n xml.startDocument()\n #xml.startElement(\"root\", {})\n\n self._to_xml(xml, data)\n\n #xml.endElement(\"root\")\n xml.endDocument()\n return stream.getvalue()",
"def set_response(self):\r\n import cherrypy\r\n \r\n response = cherrypy.response\r\n \r\n clean_headers(self.status)\r\n \r\n # In all cases, finalize will be called after this method,\r\n # so don't bother cleaning up response values here.\r\n response.status = self.status\r\n tb = None\r\n if cherrypy.request.show_tracebacks:\r\n tb = format_exc()\r\n response.headers['Content-Type'] = \"text/html\"\r\n \r\n content = self.get_error_page(self.status, traceback=tb,\r\n message=self._message)\r\n response.body = content\r\n response.headers['Content-Length'] = len(content)\r\n \r\n _be_ie_unfriendly(self.status)",
"def xml2html(self):\n handler = open(self.xml_doc).read()\n soup = BeautifulSoup(handler, 'xml')\n\n fw = open(self.filename_out, 'w')\n\n fw.write(\"<!DOCTYPE html>\" + os.linesep)\n fw.write(\"<html>\" + os.linesep)\n fw.write(\"<head>\" + os.linesep)\n fw.write('<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">' + os.linesep)\n fw.write(\"<link rel=\\\"stylesheet\\\" href=\\\"%s\\\" type=\\\"text/css\\\" />\" % self.stylesheet_name + os.linesep)\n fw.write(\"<title></title>\" + os.linesep)\n fw.write(\"</head>\" + os.linesep)\n fw.write(\"<body>\" + os.linesep)\n\n # Load styles in dictionaries\n for style in soup.find_all(\"style\"):\n style_name = style.get(\"style:name\")\n #print \"style: %s children: %s descendants: %s\" % (str(style_name), str(len(list(style.children))), len(list(style.descendants)))\n for style_child in style.children:\n fs = style_child.get(\"fo:font-style\")\n if fs:\n self.style_fontstyle[style_name] = fs\n fontw = style_child.get(\"fo:font-weight\")\n if fontw:\n self.style_fontweight[style_name] = fontw\n # read alignment\n txta = style_child.get(\"fo:text-align\")\n if txta:\n self.style_textalignment[style_name] = txta\n # !!!\n tu = style_child.get(\"style:text-underline-type\")\n if tu:\n self.style_textunderline[style_name] = \"underlined\"\n # page break\n break_before = style_child.get(\"fo:break-before\")\n if break_before:\n self.style_break_before[style_name] = break_before\n\n\n # Navigate down the document through h and p tags\n #\n for text in soup.find_all(re.compile(\"^h|^p\")):\n\n # From bs4 docs: If a tag has only one child, and that child is a NavigableString, the child is made available as .string:\n # This covers the following case (e.g.):\n #\n # <text:p text:style-name=\"P9\">- Any text here!</text:p>\n #\n # To do:\n #\n # Beware of this case:\n # - <text:p text:style-name=\"P8\">\n # <text:span text:style-name=\"T4\">\n #\n\n # Get the attributes so the styles and the outlines\n text_attrs = dict(text.attrs)\n\n # Get the styles, if any\n try:\n t_style = text_attrs[\"text:style-name\"]\n except:\n t_style = \"nostyle\"\n\n # Get the outline-levels, if any\n try:\n t_outline_level = text_attrs[\"text:outline-level\"]\n except:\n t_outline_level = \"paragraph\"\n\n if text.string:\n t = unicode(text.string)\n if t:\n fw.write(self.outliner(self.stylizer(t, t_style), t_outline_level, t_style).encode('utf-8'))\n\n # e.g. page breaks come as a node with no children whose style contains fo:break-before:\"page\"\n elif len(list(text.children)) == 0:\n fw.write(self.outliner(unicode(\"\"), t_outline_level, t_style).encode('utf-8'))\n\n # This covers the following case (e.g.):\n #\n # <text:p text:style-name=\"Textbody\">\n # jkjksk skjkjkjs dhh\n # <text:s />\n # <text:span text:style-name=\"T3\">Bold</text:span>\n # <text:s />\n # </text:p>\n #\n # else drill down one level\n else:\n buffer = unicode(\"\")\n t = buffer\n u = buffer\n t_outline_level = \"paragraph\"\n t_style = \"\"\n for i in text.children:\n # Get the attributes so the styles\n try:\n text_attrs = dict(i.attrs)\n t_style = text_attrs[\"text:style-name\"]\n except:\n # whenever the element has no style\n # take the parent's one\n try:\n text_attrs = dict(i.parent.attrs)\n t_style = text_attrs[\"text:style-name\"]\n except:\n t_style = \"nostyle\"\n\n # Get the outline-levels, if any\n try:\n t_outline_level = text_attrs[\"text:outline-level\"]\n except:\n t_outline_level = \"paragraph\"\n\n # if the current tag has only one child, and that child is a NavigableString\n if i.string:\n t = unicode(i.string)\n\n # space\n elif i.name == \"s\":\n t = unicode(\" \")\n\n # else drill down another level\n else:\n t = unicode(\"\")\n for j in i.children:\n if j.string:\n u = unicode(j.string)\n elif j.name == \"s\":\n u = unicode(\" \")\n else:\n u = unicode(\"\")\n if u:\n t = t + self.stylizer(u, t_style)\n\n # build up a unicode string containing the whole paragraph\n if t:\n buffer = buffer + self.stylizer(t, t_style)\n\n # outline the buffered unicode string and write it to the output file\n fw.write(self.outliner(buffer, t_outline_level, t_style).encode('utf-8'))\n\n fw.write(\"</body>\" + os.linesep)\n fw.write(\"</html>\" + os.linesep)\n fw.close()",
"def debugDumpDTD(self, output):\n libxml2mod.xmlDebugDumpDTD(output, self._o)",
"def process_response(response: Response) -> Response:\n if request.endpoint in ('static', 'baseframe.static'):\n if 'Access-Control-Allow-Origin' not in response.headers:\n # This is required for webfont resources\n # Note: We do not serve static assets in production, nginx does.\n # That means this piece of code will never be called in production.\n response.headers['Access-Control-Allow-Origin'] = '*'\n\n # If Babel was accessed in this request, the response's contents will vary with\n # the accepted language\n if ctx_has_locale():\n response.vary.add('Accept-Language')\n # If current_auth was accessed during this request, it is sensitive to the lastuser\n # cookie\n if request_has_auth():\n response.vary.add('Cookie')\n\n # If request_is_xhr() was called, add a Vary header for that\n if request_checked_xhr():\n response.vary.add('X-Requested-With')\n\n # Prevent pages from being placed in an iframe. If the response already\n # set has a value for this option, let it pass through\n if 'X-Frame-Options' in response.headers:\n frameoptions = response.headers.get('X-Frame-Options')\n if not frameoptions or frameoptions == 'ALLOW':\n # 'ALLOW' is an unofficial signal from the app to Baseframe.\n # It signals us to remove the header and not set a default\n response.headers.pop('X-Frame-Options')\n else:\n if request_has_auth() and getattr(current_auth, 'login_required', False):\n # Protect only login_required pages from appearing in frames\n response.headers['X-Frame-Options'] = 'SAMEORIGIN'\n\n # In memoriam. http://www.gnuterrypratchett.com/\n response.headers['X-Clacks-Overhead'] = 'GNU Terry Pratchett'\n\n return response",
"def is_html(self):\n return self.__content_type == html_ctype",
"def render_response(self, context):\n\n # if object is a string just return as is\n if isinstance(context, basestring):\n self.response.write(context)\n # else attempt to serialise and return\n else:\n context = json.dumps(context)\n self.response.write(context)\n # set the right content-type header\n self.response.headers['Content-Type'] = 'application/json'"
] |
[
"0.6120321",
"0.54680014",
"0.5340721",
"0.5258562",
"0.51871645",
"0.5171257",
"0.5152302",
"0.5116068",
"0.5028383",
"0.50265384",
"0.50095177",
"0.50023174",
"0.49935582",
"0.49037078",
"0.48833546",
"0.48567227",
"0.48562366",
"0.4814372",
"0.47897103",
"0.470156",
"0.46326154",
"0.46206832",
"0.46085614",
"0.45537645",
"0.45468852",
"0.45412",
"0.45330524",
"0.4525846",
"0.45250493",
"0.45025828"
] |
0.76996183
|
0
|
Generate the content type of the document If a content type was set on the response object, use it Else, use the HTML ou XHTML content type of this renderer
|
def content_type(self):
response = self.response
if response.content_type:
return response.content_type
return 'application/xhtml+xml' if response.xml_output else 'text/html'
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def CONTENT_TYPE(self):",
"def content_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"content_type\")",
"def content_type(self) -> str:\n return pulumi.get(self, \"content_type\")",
"def content_type(self):\r\n return self.__content_type",
"def content_type(self):\n return self.__content_type",
"def content_type(self):\n return self.environ.get('CONTENT_TYPE') or 'application/octet-stream'",
"def init(content_type='text/html'):\n header('Content-type', content_type)",
"def content_type(self):\n return self._content_type",
"def _get_content_type(self):\n return '%s; charset=%s' % (self.content_type, self.charset)",
"def GetOutputType(self, response_type):\n if response_type == \"KML\":\n return \"xml\"\n return \"json\"",
"def _set_content_type(cls, request, response, _action, with_request):\n e = webob.exc.HTTPOk(headerlist=[('Content-Type', '')])\n img = _action(request, e) if with_request else _action()\n e.body = img\n\n content_type = e.content_type\n if not content_type:\n # If no ``Content-Type`` is already set, use the ``imghdr`` module\n # to guess the format of the image\n content_type = 'image/' + (imghdr.what(None, img[:32]) or '*')\n e.content_type = content_type\n\n raise e",
"def render_response(self, context):\n\n # if object is a string just return as is\n if isinstance(context, basestring):\n self.response.write(context)\n # else attempt to serialise and return\n else:\n context = json.dumps(context)\n self.response.write(context)\n # set the right content-type header\n self.response.headers['Content-Type'] = 'application/json'",
"def content_type(self, _format=None):\r\n _format = _format or self.format\r\n return \"application/%s\" % (_format)",
"def CONTENT_TYPE(self):\n return self.content_type",
"def content_type(self) -> str:\n raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined]\n if self._stored_content_type != raw:\n self._parse_content_type(raw)\n return self._content_type # type: ignore[return-value]",
"def __set_content_type(self):\n if self.headers is None:\n return\n\n content_type = self.headers.get(\"content-type\", None)\n\n if content_type is None:\n return\n if \";\" in content_type:\n content_type_parts = content_type.split(\";\")\n\n if len(content_type_parts) == 2:\n self.__content_type = content_type_parts[0]\n else:\n self.__content_type = content_type",
"def __set_content_type(self):\r\n if self.headers is None:\r\n return\r\n\r\n content_type = self.headers.get(\"content-type\", None)\r\n\r\n if content_type is None:\r\n return\r\n if \";\" in content_type:\r\n content_type_parts = content_type.split(\";\")\r\n\r\n if len(content_type_parts) == 2:\r\n self.__content_type = content_type_parts[0]\r\n else:\r\n self.__content_type = content_type",
"def content_type(self):\n return self._headers.get(\"content-type\")",
"def _render(cls, request, code, ctype, msg):\r\n request.setResponseCode(code)\r\n request.setHeader('content-type', ctype)\r\n request.write(msg)\r\n request.finish()",
"def doctype(self):\n response = self.response\n\n if response.doctype is not None:\n return response.doctype\n\n return self.XML_DOCTYPE if response.xml_output else self.HTML_DOCTYPE",
"def get_content_type(ct):\n content_type = ct\n\n if ct == \"csv\":\n content_type = \"text/csv\"\n elif ct == \"json\":\n content_type = \"application/json\"\n\n return content_type",
"def _render_as(self, extra_context={}, RESPONSE=None):\n import urlparse, re\n \n request = getattr(self, 'REQUEST', None)\n \n method = None\n if (extra_context.has_key('options') and\n extra_context['options'].has_key('method')):\n method = extra_context['options']['method']\n elif request.has_key('method'):\n method = request['method']\n \n if method not in self.render_methods:\n method = self.default_render_method\n \n transform_id = getattr(self, 'transform_%s' % method, '')\n transform_id = re.sub('\\s\\([^\\(]*?\\)$', '', transform_id)\n \n content_type = self.content_type_map.get(method, 'text/plain')\n\n # note we make sure we don't have a unicode object at the later steps,\n # because that causes all sorts of headaches with the XML parser later\n xml_rendered = self.pt_render(extra_context=extra_context).encode(self.char_encoding)\n \n if not transform_id or transform_id == self.unselected_transform:\n rendered = xml_rendered\n else:\n transform = getattr(self, transform_id, None)\n if not transform or transform.meta_type != 'XSLT Template':\n for obj in self._get_path_objs(self.transform_paths):\n transform = getattr(obj, transform_id, None)\n if transform and transform.meta_type == 'XSLT Template':\n break\n if not transform:\n raise TransformError, ('Transform %s did not exist' % \n transform_id)\n self.prune_cache()\n cached = self.retrieve_cache(transform, xml_rendered)\n if cached:\n rendered = cached\n else:\n rendered = transform.render_xml(xml_rendered, content_type)\n self.update_cache(transform, xml_rendered, rendered, 0)\n \n \n # set the base properly\n pathparts = list(urlparse.urlparse(self.absolute_url()))\n base = os.path.split(pathparts[2])[0]\n pathparts[2] = base\n base = urlparse.urlunparse(pathparts)\n \n RESPONSE.setBase(base)\n RESPONSE.setHeader('Content-Type', '%s; charset=%s' % (content_type,\n self.char_encoding))\n \n return rendered",
"def serialize_response(response_data, content_type=None):\n content_type = content_type or get_best_mimetype()\n\n if not content_type:\n abort(406)\n\n rv = current_app.blueprints[request.blueprint]\\\n .response_mimetypes[content_type](response_data)\n\n response = make_response(rv)\n\n if isinstance(response_data, HTTPException):\n response.status_code = response_data.code\n\n response.headers['Content-type'] = content_type\n return response",
"def content_type(self):\n return self.guess_content_type(self.store_key)",
"def content_type(self):\n return self.content_types[0]",
"def get_content_type(self):\n if \"Content-Type\" not in self.headers:\n return None\n\n content_type = self.content_type\n\n # NOTE(markmc): text/plain is the default for eventlet and\n # other webservers which use mimetools.Message.gettype()\n # whereas twisted defaults to ''.\n if not content_type or content_type == 'text/plain':\n return None\n\n if content_type not in SUPPORTED_CONTENT_TYPES:\n raise exception.InvalidContentType(content_type=content_type)\n\n return content_type",
"def get_content_type(self):\n if hasattr(self, '_content_type'):\n return self._content_type\n mimetype = None\n querystring_mimetype = self.request.get('mimetype')\n acceptheader = self.request.getHeader('Accept')\n\n if querystring_mimetype and querystring_mimetype in self.content_types:\n mimetype = querystring_mimetype\n else:\n querystring_error = 'No acceptable mimetype in QUERY_STRING: {0}'.format(querystring_mimetype)\n if acceptheader:\n mimetype = self.content_types.negotiate_accept_header(acceptheader)\n if not mimetype:\n acceptheader_error = 'No acceptable mimetype in ACCEPT header: {0}'.format(acceptheader)\n raise CouldNotDetermineContentType(querystring_error=querystring_error,\n acceptheader_error=acceptheader_error,\n acceptable_mimetypes=self.content_types.get_mimetypelist())\n content_type = self.content_types[mimetype]\n self._content_type = content_type\n return content_type",
"def getResponseContentType(self):\n return self.JSON_APPLICATION",
"def _content_type_strategy(self, host, port, environ):\n app = None\n params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]\n if 'version' in params:\n app, app_url = self._match(host, port, '/v' + params['version'])\n if app:\n app = self._set_script_name(app, app_url)\n\n return app",
"def render(self, context):\n engine = Renderer()\n engine.environment.filters['format_date'] = format_date\n engine.environment.filters['format_datetime'] = format_datetime\n result = engine.render(self.template, **context)\n response = HttpResponse(\n content_type='application/vnd.oasis.opendocument.text; charset=UTF-8'\n )\n response['Content-Disposition'] = 'inline; filename=' + self.filename\n with tempfile.NamedTemporaryFile() as output:\n output.write(result)\n output.flush()\n output = open(output.name, 'rb')\n response.write(output.read())\n return response"
] |
[
"0.6765369",
"0.6763161",
"0.6675109",
"0.6672418",
"0.65277874",
"0.645715",
"0.6426409",
"0.6417607",
"0.63847506",
"0.63470113",
"0.63402563",
"0.6328932",
"0.6328278",
"0.62982404",
"0.6285868",
"0.6284358",
"0.6283006",
"0.6272976",
"0.6272383",
"0.6260134",
"0.6251464",
"0.624621",
"0.61774606",
"0.61476356",
"0.6143509",
"0.6104893",
"0.6084738",
"0.6076409",
"0.60747105",
"0.606366"
] |
0.8085524
|
0
|
Register a synchronous action on a tag
|
def action(self, tag, action, with_request):
tag.sync_action(self, action, with_request)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def action(self, tag, action, with_request):\n tag.async_action(self, action, with_request)",
"def push_tag(self, tag):\n _tag_entity('task', self.task_id, tag)",
"def sync_action(self, renderer, action, with_request):\n self.set(self._actions[1], renderer.register_callback(self._actions[0], action, with_request))",
"def _register(self, comm, handler):",
"async def _register_mid(self, mid: int) -> None:\n async with self._pending_operations_condition:\n if mid not in self._pending_operations:\n self._pending_operations[mid] = asyncio.Event()",
"def async_action(self, renderer, action, with_request):\n if not isinstance(action, ajax.Update):\n action = ajax.Update(action=action, with_request=with_request)\n\n self.set(self._actions[2], action.generate_action(self._actions[0], renderer))",
"async def send_tag(self, tag, reactions, *args, **kwargs):\n message = await self.send_react(reactions, *args, **kwargs)\n await self.set_trigger(tag, message)\n return message",
"def tag(self, sent):\n # WORK HERE!!",
"async def szuru_tag(self, ctx: commands.Context, postid: int, operation: str, *tags):\n raise NotImplementedError(f\"Work in progress!\") # TODO",
"def add_tag(self, session, tag):\n self._tag(session.put, key=tag, session=session)",
"def tag(self, tag):\n self.tag = tag",
"async def perform_action(self) -> None:",
"def send_sync(self, tag, uid):\n self._messaged.emit((\"sync\",tag,0,uid))",
"async def generic_action(self, request):\n pass",
"def tag(request, tag_name):\n raise NotImplementedError",
"def add_tag(*, tag='tag!'):\n def _apply_on(f):\n setattr(f, 'my_tag', tag)\n return f\n return _apply_on",
"def update_tag(tag):\n remove_tag(tag)\n add_tag(tag)",
"def add_tagging(self, task_instance):",
"def _async_register(service, notifier):\n\n proc = multiprocessing.Process(\n name='Async Registration {}'.format(service.iden),\n target=_register, args=(service, notifier))\n proc.start()",
"def tag(self, tag):\n\n self._tag = tag",
"def tag(self, tag):\n\n self._tag = tag",
"def tag(self, tag):\n\n self._tag = tag",
"def tag(self, tag):\n\n self._tag = tag",
"async def tag(self, ctx: \"IceTeaContext\", *, tag_name: str):\n tag_content = await ctx.guild_data.call_tag(tag_name, ctx.channel.id, ctx.author.id)\n if tag_content:\n await ctx.send(tag_content)\n else:\n await ctx.send(\"No Tag found\")",
"def updateTag(self, authenticationToken, tag):\r\n pass",
"def sync_action(self, renderer, action, with_request):\n href = self.get('href', '').partition('#')\n self.set('href', renderer.add_sessionid_in_url(href[0], (renderer.register_callback(4, action, with_request),)) + href[1] + href[2])",
"def register(self, hook_url):\n raise NotImplementedError()",
"def registerAction(self, actionId, action): #$NON-NLS-1$\r",
"def add_tag(self, tag):\n self.tags.append(tag)",
"def register_action(self, name, command):\n action_name = command['action_name']\n if action_name not in self.al_clients:\n action_type = self.get_interface_type(command['interface_type'], '.action')\n self.al_clients[action_name] = ActionClient(self, action_type, action_name)\n\n if self.al_clients[action_name].server_is_ready():\n if action_name in self.offline_actions:\n self.offline_actions.remove(action_name)\n else:\n if action_name not in self.offline_actions:\n self.get_logger().warn(\n 'action {} is not read yet'.format(action_name))\n self.offline_actions.append(action_name)"
] |
[
"0.72214663",
"0.60455585",
"0.5851493",
"0.57009196",
"0.5500962",
"0.5495582",
"0.5483852",
"0.54292995",
"0.54260516",
"0.5349953",
"0.534746",
"0.5329074",
"0.5291505",
"0.52782446",
"0.52682793",
"0.52675897",
"0.5250884",
"0.52481234",
"0.5213831",
"0.5185042",
"0.5185042",
"0.5185042",
"0.5185042",
"0.5172777",
"0.5144615",
"0.5137115",
"0.50956815",
"0.50860286",
"0.5077492",
"0.5072014"
] |
0.71138674
|
1
|
During the rendering, highlight an element that has an error
|
def decorate_error(self, element, error):
if error is None:
return element
div = self.div(class_='nagare-error-input')
div.append(element)
return self.div(
div,
self.div(error, class_='nagare-error-message'),
class_='nagare-error-field'
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _try_highlight(code: str) -> str:\n pretty = pygments.highlight(code, lexer, signature_formatter).strip()\n if '<span class=\"err\">' not in pretty:\n return pretty\n else:\n return html.escape(code)",
"def _highlight_error(doc: str, err_lineno: int) -> str:\n doc_list: list = []\n lineno = 1\n lines_total = doc.count(\"\\n\")\n indent = len(str(lines_total))\n for line in doc.splitlines():\n if lineno == err_lineno:\n doc_list.append(\n \"{lineno:>{indent}}: {line}<---- Error line:{err_lineno}\".format(\n lineno=lineno, indent=indent, line=line, err_lineno=err_lineno\n )\n )\n marks = [\"^\" for _ in line]\n doc_list.append(\n \"{_:{indent}} {marks}------- Erroneous line above\".format(\n _=\"\", indent=indent, marks=\"\".join(marks)\n )\n )\n else:\n doc_list.append(\n \"{lineno:>{indent}}: {line}\".format(\n lineno=lineno, indent=indent, line=line\n )\n )\n lineno += 1\n return \"\\n\".join(doc_list)",
"def error(self, msg):\n with self._lock:\n self.wraptext(msg, fg=\"red\", bold=True)\n return self",
"def error(text):\n return color_str(text, 'RED')",
"def __showError(self, out):\n self.errorGroup.show()\n self.errors.insertPlainText(Utilities.filterAnsiSequences(out))\n self.errors.ensureCursorVisible()\n \n QCoreApplication.processEvents()",
"def is_invalid():\n print(colored('Invalid input\\n', 'red', attrs=['bold']))",
"def indicate_error(self):\n pass",
"def set_error(errTxt):\r\n core.set_item_color(\"Start\", mvGuiCol_Button, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonActive, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonHovered, (255, 0, 0, 255))\r\n if not core.does_item_exist(\"Error##ErrorNoFACEITName\"):\r\n with simple.collapsing_header(\"Error##ErrorNoFACEITName\", parent=\"##GroupStats\",\r\n default_open=True,\r\n closable=False,\r\n bullet=True):\r\n core.add_text(\"ErrorText\", default_value=errTxt, color=(255, 0, 0, 255))",
"def err_message(self, message):\n self.errors.append(1)\n message = \"<b>\" + message + \"</b>\"\n self.timer_id = GLib.timeout_add_seconds(5, self.error_false)\n # Show if is was hidden\n if self.hidden:\n self.toggle()\n self.was_hidden = True\n self.left_label.set_markup(message)",
"def mark_error(self):\r\n self.status = ERROR",
"def add_error(self, content):\n self._add_content(html_error(content))",
"def mark_text_with_error_tag(law_text, law_mat):\n law_lines = law_text.split(\"\\n\")\n marked_lines = []\n for i, line in enumerate(law_lines):\n curr_damaged_vec = law_mat[i]\n curr_words = line.split()\n marked_line = get_marked_line(curr_words, curr_damaged_vec)\n marked_line = \" \".join(marked_line)\n marked_lines.append(marked_line)\n return \"\\n\".join(marked_lines)",
"def show_error(self, error):\n if (error == \"\"):\n self.ui.errorLabel.setText(\"\")\n else:\n self.ui.errorLabel.setText(\"<span style=\\\"font-weight:600; color:#ff0000;\\\">{0}</span>\".format(error))",
"def _highlight_error(doc: str, err_lineno: int, err_colno: int) -> str:\n doc_list: list = []\n lineno = 1\n lines_total = doc.count(\"\\n\")\n indent = len(str(lines_total))\n for line in doc.splitlines():\n if lineno == err_lineno:\n err_indent = indent + 1 + err_colno\n doc_list.append(\n \"{lineno:>{indent}}: {line}<---- Error line:{err_lineno}, position {err_colno}\".format(\n lineno=lineno,\n indent=indent,\n line=line,\n err_lineno=err_lineno,\n err_colno=err_colno,\n )\n )\n doc_list.append(\n \"{_:{err_indent}}^---- Exact Error position\".format(\n _=\"\", err_indent=err_indent\n )\n )\n else:\n doc_list.append(\n \"{lineno:>{indent}}: {line}\".format(\n lineno=lineno, indent=indent, line=line\n )\n )\n lineno += 1\n return \"\\n\".join(doc_list)",
"def setError(self, color=QtCore.Qt.red):\n self.format.setUnderlineStyle(\n QtGui.QTextCharFormat.SpellCheckUnderline)\n self.format.setUnderlineColor(color)",
"def showErrorSource(self, source, showIt):\r\n\r\n if source:\r\n if showIt:\r\n source.palette().setColor(QPalette.Active, QColorGroup.Base, Qt.red)\r\n else:\r\n source.palette().setColor(QPalette.Active, QColorGroup.Base, self.backGroundColor)",
"def __gotoSyntaxError(self):\n self.activeWindow().gotoSyntaxError()",
"def error(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))",
"def error(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['error']:\n self.print_lines(self.colored(('red', 'bold'), lines))",
"def html_error(string):\n return html_div(string, \"error\")",
"def _show_error_if_checking(\n self, node, msg=None, error_code=None, replacement=None\n ):\n if self._is_checking():\n self.show_error(node, msg, error_code=error_code, replacement=replacement)",
"def formatError(self,error):\n return '<font color=\"#f00\"><b><i>%s</i></b></font><br />\\n' % error",
"def renderInlineException(request, reason):",
"def style_error(msg='{}'):\n red_code = '\\033[0;31m'\n return text_color(msg, red_code)",
"def click_error_icon(self):\n self.click_element(self.error_icon_locator)",
"def highlight_diagnostics(diagnostics, annotation_set):\n for (line, diags) in diagnostics.iteritems():\n most_severe_class = None\n for (diag_class, msg) in diags:\n if most_severe_class is None:\n most_severe_class = diag_class\n elif diag_class == 'error':\n most_severe_class = diag_class\n break\n\n messages = '<br />'.join([diag[0] + ': ' + diag[1] for diag in diags])\n annotation_set.add_tag('span',\n [\n ('class', diag_class),\n ('title', messages),\n ],\n EntireLineSourceLocation(line))",
"def conditionally_raise(self, error: ImageNotFound) -> None:",
"def _insertErrorMsg(self, ErrorMessage, outputFileObject):\n outputFileObject.write('<font color=\"' + AutoGrader.Const.ERROR_COLOR + '\">')\n outputFileObject.write (ErrorMessage)\n outputFileObject.write('</font>')",
"def failure(self, message=''):\n print(colored(message, 'red'))",
"def error(self, msg, elem):\n if elem is not None:\n msg += \" (line %d)\" % elem.sourceline\n if self.ignore_errors:\n return self.warn(msg, elem)\n raise ParserException(msg)"
] |
[
"0.6315305",
"0.6040992",
"0.60155904",
"0.60121435",
"0.6011253",
"0.5966383",
"0.59566885",
"0.59525627",
"0.59378624",
"0.5895664",
"0.5884718",
"0.5851046",
"0.58482516",
"0.5827483",
"0.5770929",
"0.57320607",
"0.57220846",
"0.5695249",
"0.56752497",
"0.56749165",
"0.5666272",
"0.5641061",
"0.56232864",
"0.5616782",
"0.5603395",
"0.5588621",
"0.558179",
"0.5571134",
"0.55521625",
"0.5504243"
] |
0.6175157
|
1
|
Add the session and continuation ids into an url Forward this call to the sessions manager
|
def add_sessionid_in_url(self, u='', params=None, sep='&'):
i = u.find(':')
if ((i == -1) or not u[:i].isalpha()) and (not u or (u[0] != '/')):
u = self.url + '/' + u
if params is None:
params = ()
if self.session:
u += '?' + sep.join(self.session.sessionid_in_url(self.request, self.response) + params)
return u
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __call__(self, action, uids):\n uids = \",\".join(uids)\n url = \"{}/courier_shipment?uids={}\".format(self.back_url, uids)\n return self.redirect(redirect_url=url)",
"def generate_auth_sub_url(next, scopes, secure=False, session=True,\n request_url=atom.http_core.parse_uri(\n 'https://www.google.com/accounts/AuthSubRequest'),\n domain='default', scopes_param_prefix='auth_sub_scopes'):\n if isinstance(next, (str, unicode)):\n next = atom.http_core.Uri.parse_uri(next)\n scopes_string = ' '.join([str(scope) for scope in scopes])\n next.query[scopes_param_prefix] = scopes_string\n\n if isinstance(request_url, (str, unicode)):\n request_url = atom.http_core.Uri.parse_uri(request_url)\n request_url.query['next'] = str(next)\n request_url.query['scope'] = scopes_string\n if session:\n request_url.query['session'] = '1'\n else:\n request_url.query['session'] = '0'\n if secure:\n request_url.query['secure'] = '1'\n else:\n request_url.query['secure'] = '0'\n request_url.query['hd'] = domain\n return request_url",
"def sync_action(self, renderer, action, with_request):\n href = self.get('href', '').partition('#')\n self.set('href', renderer.add_sessionid_in_url(href[0], (renderer.register_callback(4, action, with_request),)) + href[1] + href[2])",
"def setSessionParameters(self,\n url=None,\n protocols=None,\n server=None,\n headers=None,\n externalPort=None):",
"def setSessionParameters(self,\n url=None,\n origin=None,\n protocols=None,\n useragent=None,\n headers=None,\n proxy=None):",
"def __call__(self, url, session):\n result = session.post(url + self.endpoint, json=self.credentials)\n print(result.status_code)",
"async def update_session_history(request, call_next):\n response = await call_next(request)\n history = request.session.setdefault(\n 'history', []).append(request.url.path)\n return response",
"async def add_page_views(self, url_sub: str, session: Session) -> None:\n data = {\n \"uuid\": \"5e43e1c6-ca97-4787-a033asddasd\",\n \"referrer\": \"https://www.jianshu.com\"\n }\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0\",\n \"X-CSRF-Token\": \"+LOfEEblhHerUy77qZvhhWkwiUIYrirND0ofIfYesBLc0WUIhsHwFTTzmE+jprN8Yto50LE79knh3Ao8oq30vg==\",\n \"Cookie\": \"sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22164f0ee834e42a-06c76e6f7944b38-4c312b7b-1327104-164f0ee834f2ab%22%2C%22%24device_id%22%3A%22164f0ee834e42a-06c76e6f7944b38-4c312b7b-1327104-164f0ee834f2ab%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%2C%22first_id%22%3A%22%22%7D; read_mode=day; default_font=font2; locale=zh-CN; _m7e_session=920bfd266a5d73adf6bff486082f84e4; signin_redirect=https%3A%2F%2Fwww.jianshu.com%2Fp%2F4a526bc6276b\",\n \"Host\": \"www.jianshu.com\",\n \"Referer\": f\"https://www.jianshu.com/p/{url_sub}\",\n }\n url = f\"https://www.jianshu.com/notes/{url_sub}/mark_viewed.json\"\n try:\n await session.post(url, headers=headers, data=data)\n except Exception:\n # Todo\n pass",
"def open_new_sessions(self, number_of_sessions=1):\n session_ids = []\n\n for x in range(0, number_of_sessions):\n init_request = self.make_request()\n session_ids.append(init_request['ident'])\n print 'Acquired SessionID #%s: %s' % (\n x, init_request['ident']\n )\n\n return session_ids",
"def session(self):",
"def start_session(base_url, group_id, token, payload):\n url = base_url + route_sessions\n response = requests.post(url, headers=headers(group_id, token), json=payload)\n return response",
"def _insert_new_session():\n request = self._make_request()\n session_existing = self._set_up_session_in_Redis_and_makeOne( # noqa: F841\n request, session_id, session_dict={\"visited\": True}, **session_args\n )\n return request",
"def test_multiple_requests(self):\n s = self.api.session()\n s.request(\"1.2.3.4\", \"mozilla\", \"/foo/bar\").end()\n s.request(\"1.2.3.4\", \"mozilla\", \"/foo/blah\").end()\n s.end()\n data = self.connector.transcription()\n assert len(data) == 2\n assert data[0].get('action') == \"session_start\"\n assert data[1].get('action') == \"session_end\"",
"def __init__(self, url, session):\n self._url = url\n self._session = session",
"def sessions(connection, verbose=False):\n response = requests.put(url=connection.base_url + '/sessions',\n headers={'X-MSTR-AuthToken': connection.auth_token},\n cookies=connection.cookies,\n verify=connection.ssl_verify)\n if verbose:\n print(response.url)\n return response",
"def addsession_url_link(update, context):\n\t\n\turl_link = update.message.text\n\n\turl_link_valid = check_new_url(url_link)\n\tif (not url_link_valid):\n\t\tupdate.message.reply_text('Oops, your link is not a valid URL-link.\\n'\n\t\t'Please, retype the URL-link in a good format.')\n\t\treturn URL_LINK\n\n\tcontext.user_data['url-str'] = url_link\n\tupdate.message.reply_text('Thank you.\\n')\n\n\t# query to finalize adding new session\n\tkeyboard = [\n\t\t[ \n\t\t\tInlineKeyboardButton(\"Yes\", callback_data='Yes'), \n\t\t\tInlineKeyboardButton(\"No\", callback_data='No'),\n\t\t]\n\t,]\n\treply_markup = InlineKeyboardMarkup(keyboard)\n\n\tupdate.message.reply_text('So, you want to add a session with the following parameters, right?\\n\\n'\n\t\t'Date and time: {}\\nURL-link: {}\\n'.format(context.user_data['datetime-str'], context.user_data['url-str']),\n\t\treply_markup = reply_markup)\n\t\n\treturn PROCESS_NEW_SESSION",
"def on_session_started(session_started_request, session):",
"def start_session():\n home_timeline = oauth_req(construct_url(screen_name))\n return home_timeline",
"def get_sp_auth_url(self, session, sp_id, **kwargs):\n return None",
"def client(client, app):\n app.add_route(\"/view_session\", view_session, methods=[\"GET\"])\n return client",
"def __call__(self, action, uids):\n url = \"{}/partition_magic?uids={}\".format(self.back_url, \",\".join(uids))\n return self.redirect(redirect_url=url)",
"def on_session_started(session_started_request, session):\n \n #session.attributes['result_number'] = 1\n session['attributes'] = {}\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])",
"def start_requests(self):\n # First request to Youtube to get user live ID and related cookies\n yield scrapy.Request('https://accounts.google.com/ServiceLogin/identifier?passive=true&uilel=3&hl=en&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop&service=youtube&flowName=GlifWebSignIn&flowEntry=AddSession'\n , callback=self.login)",
"def next_request(self):\n item = self.server.lpop(self.redis_key)\n if item:\n # print type(eval(item))\n item = eval(item)\n cookie = self.qunar_hotel_login.get_user_cookie(item['user_name']+'_'+item['channel_number'])\n if not cookie:\n cookie = self.qunar_hotel_login.login(item['user_name'],item['password'],item['channel_number'])#[item['user_name']]\n # print cookie\n if cookie:\n url = 'http://hota.qunar.com/stats/ohtml/announcement/queryAnnouncements'\n item['cookie'] = eval(cookie)\n return Request(url,meta = {'cookiejar' : 1,'item':item},callback = self.parse_after_login,dont_filter = True,cookies=eval(cookie),headers=self.qunar_login_header_2)",
"def redirect(url):",
"def session_path(cls, project, session):\n return google.api_core.path_template.expand(\n 'projects/{project}/agent/sessions/{session}',\n project=project,\n session=session, )",
"async def run_requests(self):\n loop = asyncio.get_event_loop()\n tasks = []\n async with aiohttp.ClientSession(connector=self.connector) as session:\n\n for index, id in enumerate(self.ids):\n if id not in self.processed_ids:\n url = self.base_url + id\n auth_token = base64.b64encode(id.encode('ascii'))\n header = {\"Authorization\": auth_token.decode('UTF-8')}\n tasks.append(asyncio.ensure_future(self._request_one(url=url, header=header, id=id, index = index, session = session)))\n\n _ = await asyncio.gather(*tasks)",
"def put_request_session(self, key, inst):\n with self.GLOB_LOCK:\n inst.touch()\n self._request_sessions[key] = inst",
"def get_session_id(session):\n return {'src_ip': session['src_ip'], 'src_port': session['src_port'], 'dest_ip': session['dest_ip'],\n 'dest_port': session['dest_port'], 'protocol': session['protocol'], 'start_time': session['start_time']}",
"def output_server(self, session_id=DEFAULT_SESSION_ID, url=\"default\",\n app_path='/', autopush=False):\n self._session_coords = _SessionCoordinates(dict(session_id=session_id,\n url=url,\n app_path=app_path))\n\n self._autopush = autopush\n self._server_enabled = True"
] |
[
"0.52751213",
"0.5235955",
"0.5218152",
"0.520356",
"0.51765317",
"0.5126691",
"0.51162815",
"0.5115808",
"0.5087683",
"0.50780606",
"0.49630216",
"0.4914782",
"0.49137837",
"0.49079183",
"0.4866877",
"0.47644067",
"0.47576442",
"0.47518677",
"0.47369367",
"0.47294745",
"0.4723444",
"0.47106996",
"0.47029343",
"0.47019616",
"0.46612346",
"0.4650193",
"0.464821",
"0.4607285",
"0.45833883",
"0.45819822"
] |
0.56874806
|
0
|
Memorize an inline anonymous css style
|
def _css(self, style):
self._anonymous_css.append((self._order, style))
self._order += 1
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generateInlineCSS():",
"def generateInlineCSS(self, *args, **kwargs): \n return \"\"\"\n .googleLoading.googleMapView #CustomGoogleMap { \n height:520px;\n background: url(\"../image/map/stripeHorz.gif\") repeat-x scroll 0 0 transparent;\n padding: 20px;\n position: relative;\n } \n .googleLoading.googleMapView {\n background: url(\"/edit/maploader.gif\") no-repeat scroll 215px 115px transparent;\n }\n \"\"\".replace('\\n', '').replace(' ','')",
"def cache_style_content(self, content, inline=False):\n\t\tif inline:\n\t\t\tsheet = cssutils.parseStyle(content)\n\t\telse:\n\t\t\tsheet = cssutils.parseString(content, href=self.url)\n\t\tif not inline:\n\t\t\tfor rule in sheet.cssRules:\n\t\t\t\tif rule.type == rule.IMPORT_RULE:\n\t\t\t\t\tf = self._recursive_cache_resource(rule.styleSheet.href)\n\t\t\t\t\trule.href = f\n\t\tdef replacer(url):\n\t\t\tif url.startswith('data'):\n\t\t\t\treturn url\n\t\t\t# TODOs:\n\t\t\t# Check for absolute url before joining\n\t\t\treturn self._recursive_cache_resource(urljoin(self.url, url))\n\t\tcssutils.replaceUrls(sheet, replacer, ignoreImportRules=True)\n\t\treturn sheet.cssText",
"def css(self, name, style, **kw):\n self._named_css.setdefault(name, (self._order, style, kw))\n self._order += 1\n return ()",
"def _inline_css(self):\n \n # Stores all inlined elements.\n elms = {}\n \n # Get all the CSS rules in a dictionary that we can operate on.\n style_rules = cssutils.parseString(self.aggregated_css)\n \n for rule in style_rules:\n \n # Look through all elements that match this CSS selector.\n if hasattr(rule, 'selectorText'):\n \n try:\n for element in self.document.cssselect(rule.selectorText):\n \n # \n if element not in elms:\n elms[element] = cssutils.css.CSSStyleDeclaration()\n \n # Add existing inline style if present\n inline_styles = element.get('style')\n if inline_styles:\n inline_styles= cssutils.css.CSSStyleDeclaration(\n cssText=inline_styles\n )\n else:\n inline_styles = None\n if inline_styles:\n for p in inline_styles:\n # Set inline style specificity\n elms[element].setProperty(p)\n \n # Add the styles to the element.\n for p in rule.style:\n if p not in elms[element]:\n elms[element].setProperty(p.name, p.value, p.priority)\n else:\n # sameprio = (p.priority == view[element].getPropertyPriority(p.name))\n # if not sameprio and bool(p.priority) or (sameprio and selector.specificity >= specificities[element][p.name]):\n # # later, more specific or higher prio \n elms[element].setProperty(p.name, p.value, p.priority)\n except:\n # TODO: Need to catch errors like ExpressionError here...\n pass\n \n # Set inline style attributes unless the element is not worth styling.\n ignore_list = [\n 'html',\n 'head',\n 'title',\n 'meta',\n 'link',\n 'script'\n ]\n for element, style in elms.items():\n if element.tag not in ignore_list:\n css = style.getCssText(separator=u'')\n element.set('style', css)\n \n # Convert tree back to plain a HTML string.\n html = etree.tostring(self.document, method=\"xml\", \n pretty_print=True, encoding='UTF-8')\n \n return html",
"def load_style() -> str:\n return '<style id=\"scipp-style-sheet\">' + load_style_sheet() + '</style>'",
"def test_overwrite(self):\n html = '<html><head><style>h1 {color: #000;}</style></head><body><h1 style=\"color: #fff\">Foo</h1></body></html>'\n desired_output = '<html><head></head><body><h1 style=\"color: #000; color: #fff\">Foo</h1></body></html>'\n output = Pynliner().from_string(html).run()\n self.assertEqual(output, desired_output)",
"def set_style(self):",
"def css(self) -> str:\n return self._css",
"def fancy( _inStr):\r\n return '<head><style type=\"text/css\">td.special{ background-color:aqua;font-size: 100%;margin-left: 20px;font-family: times, sans-serif, arial}</style></head><table><tr><td class=\"special\">' + _inStr + '</td></tr></table>'",
"def style(self, style):\n self.style += [ style ]\n return self",
"def condense_style(html): # May look silly but Emmet does this and is wrong.\n log.debug(\"Condensing HTML Style CSS tags.\")\n return html.replace('<style type=\"text/css\">', '<style>').replace(\n \"<style type='text/css'>\", '<style>').replace(\n \"<style type=text/css>\", '<style>')",
"def CSSClasses(self):",
"def inline_css(html_src, path=None):\n css_re = re.compile(\"\\<link rel\\=\\\"stylesheet\\\" media\\=\\\"(screen|print)\\\" href\\=\\\"([0-9a-zA-Z.\\-_/]+)\\\"\\>\")\n\n def fetch_jssource(in_match):\n #media_type = in_match.group(1)\n rel_path = in_match.group(2)\n csspath = os.path.join(path, rel_path)\n return \"<style>\\n{0}\\n</style>\".format(open(csspath, 'r').read())\n #return \"<style media=\\\"{0}\\\">\\n{1}\\n</style>\".format(media_type, open(csspath, 'r').read())\n\n return css_re.sub(fetch_jssource, html_src)",
"def showsrcstyle(self, line):\n \n name = line.strip()\n if not name:\n name = \"default\"\n self.style_name = name\n self.formatter = HtmlFormatter(style=name)\n display(HTML(\"\"\"<style type='text/css'>\n span.inspector-header {\n font-family: monospace;\n border-bottom: 1px solid #555;\n }\n table.highlighttable, .highlighttable td, .highlighttable tr {\n border: 0px;\n }\n .highlighttable td.linenos {\n border-right: 1px solid #555;\n }\n \n span.inspector-filename {\n text-decoration: italic;\n }\n span.inspector-lineno {\n font-weight: bold;\n }\n %s\n </style>\n \"\"\" % self.formatter.get_style_defs()\n ))",
"def style_resize(self) -> str:\n resize = \"\"\".resize{\n width: 1000px;\n height: auto;}\\n\"\"\"\n self.html_doc = self.html_doc + resize\n return self.html_doc",
"def style(self, style):\n\n self.container['style'] = style",
"def restore_needed_space(css):\n return css.replace(\"!important\", \" !important\").replace( # !important\n \"@media(\", \"@media (\").replace( # media queries # jpeg > jpg\n \"data:image/jpeg;base64,\", \"data:image/jpg;base64,\").rstrip(\"\\n;\")",
"def render_servicestyle(self, ctx, data):\n\t\tif self.service and self.service.getProperty(\"customCSS\", False):\n\t\t\treturn ctx.tag[self.service.getProperty(\"customCSS\")]\n\t\treturn \"\"",
"def open_style(self) -> str:\n self.html_doc = self.html_doc + \"\"\"<style>\\n\"\"\"\n return self.html_doc",
"def css(self, **kwargs):\n\n with open(\"style.css\", 'a+') as s:\n for key, value in kwargs:\n s.write(f\"\\n\\t{key.replace('_', '-')}: {value};\")\n s.write(\"\\n}\")",
"def css(self, **kwargs):\n\n with open(\"style.css\", 'a+') as s:\n for key, value in kwargs:\n s.write(f\"\\n\\t{key.replace('_', '-')}: {value};\")\n s.write(\"\\n}\")",
"def _get_anonymous_css(self):\n return [css for (order, css) in sorted(self._anonymous_css)]",
"def no_style():\n class Dummy:\n \"\"\"A style object that has no colors.\"\"\"\n def __getattr__(self, attr):\n return lambda x: x\n return Dummy()",
"def load_style():\n display(HTML(Path('bhsa.css').read_text()))",
"def convert(self):\n self.aggregated_css = self._get_aggregated_css()\n self.converted_html = self._inline_css()\n \n return self.converted_html",
"def test_inline_styles(self):\n html = '<span><div class=\"pink\" style=\"font-size: 1em;\">test</div><div>t2</div></span>'\n css = 'div { font-size: 3em; } .pink { font-size: 2em; }'\n expected = '<span><div class=\"pink\" style=\"font-size: 1em;\">test</div><div style=\"font-size: 3em;\">t2</div></span>'\n result = inline_css(html, css, pretty_print=False)\n self.assertEqual(expected, result)",
"def minify_css(css: str) -> str:\n css = re.sub(r\"[ ]{4}|\\n|(?<=[:{}]) | (?=[{}])\", \"\", css)\n css = re.sub(\n r\"/\\*.+?\\*/\", lambda m: m.group(0) if m.group(0).startswith(\"/*!\") else \"\", css\n )\n return Markup(css.replace(\"<style\", \"\\n<style\"))",
"def update_style(self):\n pass",
"def apply_styles(source, styles):\n soup = BeautifulSoup(source)\n\n for style in styles:\n for markup in soup.findAll(style.markup):\n markup['style'] = style.style.strip()\n\n return soup.prettify()"
] |
[
"0.7789631",
"0.64866835",
"0.60587925",
"0.6024121",
"0.59967977",
"0.5881994",
"0.57379484",
"0.57242656",
"0.5719525",
"0.56941307",
"0.56649125",
"0.56587523",
"0.5635615",
"0.5622646",
"0.5617564",
"0.56134486",
"0.56113535",
"0.5573434",
"0.55668753",
"0.5535555",
"0.5492425",
"0.5492425",
"0.54656965",
"0.5456578",
"0.54546416",
"0.5447693",
"0.5408131",
"0.54058945",
"0.5401477",
"0.5399296"
] |
0.68824816
|
1
|
Memorize an inline anonymous javascript code
|
def _javascript(self, script):
self._anonymous_javascript.append((self._order, script))
self._order += 1
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def wrap_javascript_in_anonfct(js_data):\n return \"(function(){\"+js_data+\"})()\"",
"def code():",
"def js_minify2(js):\n log.debug(\"Obfuscating Javascript variables names inside functions.\")\n # if eval() or with{} is used on JS is not too Safe to Obfuscate stuff.\n is_ok = \"eval(\" not in js and \"with{\" not in js and \"with {\" not in js\n return slim_func_names(slim_params(js)) if is_ok else js.strip()",
"def javascript(self, name, script, **kw):\n if callable(script):\n # Transcode the function or the method to javascript code\n script = ajax.javascript(script)\n\n if isinstance(script, ajax.JS):\n # Transcoded javascript needs a helper\n self.javascript_url('/static/nagare/pyjslib.js')\n script = script.javascript\n\n self._named_javascript.setdefault(name, (self._order, script, kw))\n self._order += 1\n return ()",
"def exec_js(expr, args=None):\n\n if args is None:\n str_args = ''\n else:\n str_args = '(' + args2js(args) + ')'\n\n display(Javascript(expr + str_args))",
"def code(self):\n return '{}\\n<script>{}</script>'.format(self.html, self.js)",
"def separate_asm_js(final, asm_target):\n logging.debug('separating asm')\n subprocess.check_call([shared.PYTHON, shared.path_from_root('tools', 'separate_asm.py'), final, asm_target, final])\n\n # extra only-my-code logic\n if shared.Settings.ONLY_MY_CODE:\n temp = asm_target + '.only.js'\n print jsrun.run_js(shared.path_from_root('tools', 'js-optimizer.js'), shared.NODE_JS, args=[asm_target, 'eliminateDeadGlobals', 'last', 'asm'], stdout=open(temp, 'w'))\n shutil.move(temp, asm_target)",
"def un_src(self):\n if self.src is None:\n return\n self.inline = '''\n var script = document.createElement('script');\n script.src = \"%s\";\n document.body.appendChild(script);\n''' % self.src\n self.src = None",
"def _get_anonymous_javascript(self):\n return [js for (order, js) in sorted(self._anonymous_javascript)]",
"def exec_js(expr, args=None):\n\n if args is None:\n str_args = ''\n else:\n str_args = '(' + args2js(args) + ')'\n\n _exec_js_display.update(Javascript(expr + str_args))",
"def js_minify(js):\n log.info(\"Compressing Javascript...\")\n ins, outs = StringIO(js), StringIO()\n JavascriptMinify(ins, outs).minify()\n return force_single_line_js(outs.getvalue())",
"def replacement(self):\n assert (self.src or self.inline) and not (self.src and self.inline)\n if self.src:\n return '<script async type=\"text/javascript\" src=\"%s\"></script>' % urllib.quote(self.src)\n else:\n return '<script>\\n%s\\n</script>' % self.inline",
"def compress_javascript_data_closure(data):\n tmp_fname = tempfile.mktemp(\"urfastr-player-min.js\")\n open(tmp_fname, \"w+\").write(data)\n cmdline = [\"closure-compiler\", \"--js\", tmp_fname]\n compressed_data = subprocess.Popen(cmdline, stdout=subprocess.PIPE).communicate()[0]\n os.remove(tmp_fname) \n return compressed_data",
"def simple_replacer_js(js):\n log.debug(\"Force strip simple replacements from Javascript.\")\n return condense_semicolons(js.replace(\"debugger;\", \";\").replace(\n \";}\", \"}\").replace(\"; \", \";\").replace(\" ;\", \";\").rstrip(\"\\n;\"))",
"def generateJavascriptContent(notification):",
"def make_js(scheme, netloc, host, port, cname, type_):\n js = get_cache(host, port, type_)\n if not js:\n js = TEMPLATE\n js = __replace(js, '$SCHEMA', str(scheme))\n js = __replace(js, '$NETLOC', str(netloc))\n js = __replace(js, '$HOST', str(host))\n js = __replace(js, '$PORT', str(port))\n js = __replace(js, '$CNAME', str(cname))\n js = __replace(js, '$TYPE', str(type_))\n js = slimit.minify(js, mangle=True, mangle_toplevel=True)\n make_cache(host, port, type_, js)\n return js",
"def _as_inline_code(text):\n escaped = text.replace(\"`\", r\"\\`\")\n return f\"`{escaped}`\"",
"def js2py(js: str, context: Dict[str, Any]) -> Dict[str, Any]:\n new_context = context.copy()\n ast = transform(js)\n code = compile(ast, filename='<ast>', mode='exec')\n exec(code, new_context)\n # __builtins__ is automatically added to the context on execution\n del new_context['__builtins__']\n return new_context",
"def dummy_code_block() -> CodeBlock:\n return make_dummy_code_block()",
"def create_js(self):\n for x in self.__js:\n self.__content.append(\"<script src=\\\"%s\\\"></script>\\n\"% (x))",
"def cache_globals(f):\n def action(co):\n return __cache_globals__(co,f.func_globals)\n f.func_code = __transform_codeobjects__(f.func_code,action)\n return f",
"def view_source_js(fn): #py:view_source_js\n RUR._view_source_js_(fn)",
"def source_to_code(self, data, path):\n\t\treturn _call_with_frames_removed(compile, data, path, 'exec', dont_inherit=True)",
"def make_dummy_code_block() -> CodeBlock:\n\n def one():\n time.sleep(0.1)\n\n def two():\n time.sleep(0.2)\n\n def three():\n time.sleep(0.3)\n\n return CodeBlock(\n instructions=[Instruction(method=method) for method in (one, two, three)]\n )",
"def code(self):\n if not self._code:\n filename = '<fluxtools function %s>' % self.tag\n self._code = compile(self.math, filename, mode='eval')\n return self._code",
"def to_code(self, ipt_args_in_construct: str, variable_name: str, output_var: str, code_fragment):",
"def __call__(self,thing):\n return self.compiled(thing)",
"def inline_javascript(html_src, path=None):\n javascript_re = re.compile(\"\\<script src\\=\\\"([0-9a-zA-Z./]+)\\\"\\>\\</script>\")\n\n def fetch_jssource(in_match):\n rel_path = in_match.group(1)\n jspath = os.path.join(path, rel_path)\n return \"<script>\\n{0}\\n</script>\".format(open(jspath, 'r').read())\n\n return javascript_re.sub(fetch_jssource, html_src)",
"def execute_javascript(self, code):\n return self.loop.run_until_complete(self.get_async_keyword_group().execute_javascript(code))",
"def js(self, script):\n self.page().mainFrame().evaluateJavaScript(script)"
] |
[
"0.7441437",
"0.6073743",
"0.6041061",
"0.59601086",
"0.58316284",
"0.5755714",
"0.56314254",
"0.5598951",
"0.55498785",
"0.5523802",
"0.5402592",
"0.5402403",
"0.5378602",
"0.53712034",
"0.53552216",
"0.5354268",
"0.53472936",
"0.5322025",
"0.53124946",
"0.52592057",
"0.5254754",
"0.52460486",
"0.5225728",
"0.5203758",
"0.516037",
"0.51039976",
"0.50835943",
"0.50405693",
"0.50248784",
"0.50161225"
] |
0.6128811
|
1
|
Return the list of the inline anonymous css styles, sorted by order of insertion
|
def _get_anonymous_css(self):
return [css for (order, css) in sorted(self._anonymous_css)]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_named_css(self):\n\n return [(name, style, attributes) for (name, (order, style, attributes)) in sorted(self._named_css.items(), key=operator.itemgetter(1))]",
"def proc_style(self, tokens):\n # FIXME: Implement style attributes.\n return []",
"def _css(self, style):\n self._anonymous_css.append((self._order, style))\n self._order += 1",
"def generateInlineCSS():",
"def _get_aggregated_css(self):\n css = self.aggregated_css\n \n # Retrieve CSS rel links from HTML pasted and aggregate into one string.\n rel_links = 'link[rel=stylesheet],link[rel=StyleSheet],link[rel=STYLESHEET]'\n for element in self.document.cssselect(rel_links):\n try:\n css_path = element.get('href')\n \n # If a base URL was passed, we attempt to find the resources \n # based off of that URL.\n if base_url:\n if element.get('href').lower().find('http://', 0) < 0:\n parsed_url = urlparse.urlparse(base_url)\n css_path = urlparse.urljoin('%s://%s' % (\n parsed_url.scheme,\n parsed_url.hostname,\n ), css_path)\n \n # Grab the CSS from the URL.\n f = urllib.urlopen(css_path)\n css = css + ''.join(f.read())\n \n # Remove the <link> element from the HTML.\n element.getparent().remove(element)\n \n except:\n raise IOError('The stylesheet %s could not be found' % \n element.get(\"href\"))\n \n # Include inline style elements from <style> tags. Go through each \n # element, grab the CSS and then remove it after.\n style_blocks = 'style,Style,STYLE'\n for element in self.document.cssselect(style_blocks):\n css = css + element.text\n element.getparent().remove(element)\n \n return css",
"def getstyle(self, tag):\n try:\n styledict = tag.style.__dict__\n except AttributeError:\n return []\n else:\n stylelist = [x + \": \" + y for x, y in styledict.items()]\n return [u(' style=\"%s\"') % u(\"; \").join(stylelist)]",
"def std_styles(self):\n return [DXFEngine.style(name, font=f) for name, f in std.styles() ]",
"def _inline_css(self):\n \n # Stores all inlined elements.\n elms = {}\n \n # Get all the CSS rules in a dictionary that we can operate on.\n style_rules = cssutils.parseString(self.aggregated_css)\n \n for rule in style_rules:\n \n # Look through all elements that match this CSS selector.\n if hasattr(rule, 'selectorText'):\n \n try:\n for element in self.document.cssselect(rule.selectorText):\n \n # \n if element not in elms:\n elms[element] = cssutils.css.CSSStyleDeclaration()\n \n # Add existing inline style if present\n inline_styles = element.get('style')\n if inline_styles:\n inline_styles= cssutils.css.CSSStyleDeclaration(\n cssText=inline_styles\n )\n else:\n inline_styles = None\n if inline_styles:\n for p in inline_styles:\n # Set inline style specificity\n elms[element].setProperty(p)\n \n # Add the styles to the element.\n for p in rule.style:\n if p not in elms[element]:\n elms[element].setProperty(p.name, p.value, p.priority)\n else:\n # sameprio = (p.priority == view[element].getPropertyPriority(p.name))\n # if not sameprio and bool(p.priority) or (sameprio and selector.specificity >= specificities[element][p.name]):\n # # later, more specific or higher prio \n elms[element].setProperty(p.name, p.value, p.priority)\n except:\n # TODO: Need to catch errors like ExpressionError here...\n pass\n \n # Set inline style attributes unless the element is not worth styling.\n ignore_list = [\n 'html',\n 'head',\n 'title',\n 'meta',\n 'link',\n 'script'\n ]\n for element, style in elms.items():\n if element.tag not in ignore_list:\n css = style.getCssText(separator=u'')\n element.set('style', css)\n \n # Convert tree back to plain a HTML string.\n html = etree.tostring(self.document, method=\"xml\", \n pretty_print=True, encoding='UTF-8')\n \n return html",
"def _get_styles(formatter: HtmlFormatter, *, prefix: str) -> Iterator[str]:\n for line in formatter.get_linenos_style_defs():\n yield f\"{prefix} {line}\"\n yield from formatter.get_background_style_defs(prefix)\n yield from formatter.get_token_style_defs(prefix)",
"def _get_css_url(self):\n return [(url, attributes) for (url, (order, attributes)) in sorted(self._css_url.items(), key=operator.itemgetter(1))]",
"def embed_styles(self):\n for style in self.book.xpath(\"//link[@rel='stylesheet']\"):\n style_raw = self.get_remote_content(style.attrib[\"href\"])\n if style_raw != None:\n style_content = style_raw.decode(\"utf-8\")\n new_style = html.Element(\"style\")\n new_style.attrib[\"type\"] = \"text/css\"\n new_style.text = style_content \n style.xpath(\"//head\")[0].insert(0, new_style)\n style.getparent().remove(style)",
"def style_lines(self):\n self.parent.finalize()\n for name, svg in self.iter_svgs(): # recurse here\n for line in svg._meta.style_lines():\n yield line\n if isinstance(self.parent.style, str):\n yield self.parent.style\n else:\n for cls in self.parent.style:\n yield \"%s {\" % str(cls)\n for key, value in self.parent.style[cls].items():\n yield \" %s: %s;\" % (key, value)\n yield \"}\"",
"def sort_properties(css_unsorted_string):\n log.debug(\"Alphabetically Sorting all CSS / SCSS Properties.\")\n css_pgs = _compile_props(CSS_PROPS_TEXT, grouped=False) # Do Not Group.\n pattern = re.compile(r'(.*?{\\r?\\n?)(.*?)(}.*?)|(.*)',\n re.DOTALL + re.MULTILINE)\n matched_patterns = pattern.findall(css_unsorted_string)\n sorted_patterns, sorted_buffer = [], css_unsorted_string\n RE_prop = re.compile(r'((?:.*?)(?:;)(?:.*?\\n)|(?:.*))',\n re.DOTALL + re.MULTILINE)\n if len(matched_patterns) != 0:\n for matched_groups in matched_patterns:\n sorted_patterns += matched_groups[0].splitlines(True)\n props = map(lambda line: line.lstrip('\\n'),\n RE_prop.findall(matched_groups[1]))\n props = list(filter(lambda line: line.strip('\\n '), props))\n props = _props_grouper(props, css_pgs)\n sorted_patterns += props\n sorted_patterns += matched_groups[2].splitlines(True)\n sorted_patterns += matched_groups[3].splitlines(True)\n sorted_buffer = ''.join(sorted_patterns)\n return sorted_buffer",
"def getStyles(self):\r\n return self.styles",
"def get_styledefs(self):\n \n return self.formatter.get_style_defs()",
"def css(self, name, style, **kw):\n self._named_css.setdefault(name, (self._order, style, kw))\n self._order += 1\n return ()",
"def parse_styles(text: str) -> List[dict]:\n styles = []\n regex = r'(\\d{3})=(\".*?\"),(\\d+\\.?\\d+),(\\(.*?\\))'\n\n for line in text.split(\"\\r\\n\"):\n if line == \"\":\n continue\n\n n, font, font_size, color = re.match(regex, line).groups()\n styles.append(\n {\n \"id\": int(n),\n \"f\": font.replace('\"', \"\"),\n \"fs\": float(font_size),\n \"rgb\": [\n int(i)\n for i in color.replace(\"(\", \"\")\n .replace(\")\", \"\").split(\",\")]\n }\n )\n\n return styles",
"def get_css(self, selection='all'):\n self._collect()\n parts = []\n if selection == 'all':\n for key in self._css:\n parts.extend(self._css[key])\n else:\n parts.extend(self._css[selection])\n return '\\n\\n'.join(parts)",
"def apply_styles(source, styles):\n soup = BeautifulSoup(source)\n\n for style in styles:\n for markup in soup.findAll(style.markup):\n markup['style'] = style.style.strip()\n\n return soup.prettify()",
"def GetStyleSheet():\n styles = []\n for locale in translation.LOCALES:\n styles.append(\"\"\"\n .goofy-label-{locale} {{\n display: none;\n }}\n .goofy-locale-{locale} .goofy-label-{locale} {{\n display: inline;\n }}\"\"\".format(locale=locale))\n return '\\n'.join(styles)",
"def getStyles(self):\n return self.styles",
"def condense_style(html): # May look silly but Emmet does this and is wrong.\n log.debug(\"Condensing HTML Style CSS tags.\")\n return html.replace('<style type=\"text/css\">', '<style>').replace(\n \"<style type='text/css'>\", '<style>').replace(\n \"<style type=text/css>\", '<style>')",
"def create_css():\r\n background_color, font, paragraph_color, head_color = prompt_style()\r\n style = \"\"\r\n file = open(TEMPLATE_FILE)\r\n for line in file:\r\n search = True\r\n while search is True:\r\n if \"@BACKCOLOR\" in line:\r\n line = line.split(\"@BACKCOLOR\")\r\n line = line[0] + background_color + line[1]\r\n search = True\r\n elif \"@HEADCOLOR\" in line:\r\n line = line.split(\"@HEADCOLOR\")\r\n line = line[0] + head_color + line[1]\r\n search = True\r\n elif \"@FONTSTYLE\" in line:\r\n line = line.split(\"@FONTSTYLE\")\r\n line = line[0] + font + line[1]\r\n search = True\r\n elif \"@FONTCOLOR\" in line:\r\n line = line.split(\"@FONTCOLOR\")\r\n line = line[0] + paragraph_color + line[1]\r\n search = True\r\n else:\r\n style += line\r\n search = False\r\n style += '\\n'\r\n file.close()\r\n return style",
"def test_style_hang():\n style = [\n \"margin-top: 0px;\",\n \"margin-right: 0px;\",\n \"margin-bottom: 1.286em;\",\n \"margin-left: 0px;\",\n \"padding-top: 15px;\",\n \"padding-right: 15px;\",\n \"padding-bottom: 15px;\",\n \"padding-left: 15px;\",\n \"border-top-width: 1px;\",\n \"border-right-width: 1px;\",\n \"border-bottom-width: 1px;\",\n \"border-left-width: 1px;\",\n \"border-top-style: dotted;\",\n \"border-right-style: dotted;\",\n \"border-bottom-style: dotted;\",\n \"border-left-style: dotted;\",\n \"border-top-color: rgb(203, 200, 185);\",\n \"border-right-color: rgb(203, 200, 185);\",\n \"border-bottom-color: rgb(203, 200, 185);\",\n \"border-left-color: rgb(203, 200, 185);\",\n \"background-image: initial;\",\n \"background-attachment: initial;\",\n \"background-origin: initial;\",\n \"background-clip: initial;\",\n \"background-color: rgb(246, 246, 242);\",\n \"overflow-x: auto;\",\n \"overflow-y: auto;\",\n \"font: italic small-caps bolder condensed 16px/3 cursive;\",\n \"background-position: initial initial;\",\n \"background-repeat: initial initial;\",\n ]\n html = '<p style=\"%s\">Hello world</p>' % \" \".join(style)\n styles = [\n \"border\",\n \"float\",\n \"overflow\",\n \"min-height\",\n \"vertical-align\",\n \"white-space\",\n \"margin\",\n \"margin-left\",\n \"margin-top\",\n \"margin-bottom\",\n \"margin-right\",\n \"padding\",\n \"padding-left\",\n \"padding-top\",\n \"padding-bottom\",\n \"padding-right\",\n \"background\",\n \"background-color\",\n \"font\",\n \"font-size\",\n \"font-weight\",\n \"text-align\",\n \"text-transform\",\n ]\n\n expected = (\n '<p style=\"'\n \"margin-top: 0px; \"\n \"margin-right: 0px; \"\n \"margin-bottom: 1.286em; \"\n \"margin-left: 0px; \"\n \"padding-top: 15px; \"\n \"padding-right: 15px; \"\n \"padding-bottom: 15px; \"\n \"padding-left: 15px; \"\n \"background-color: rgb(246, 246, 242); \"\n \"font: italic small-caps bolder condensed 16px/3 cursive;\"\n '\">Hello world</p>'\n )\n\n css_sanitizer = CSSSanitizer(allowed_css_properties=styles)\n assert clean(html, css_sanitizer=css_sanitizer) == expected",
"def convert(self):\n self.aggregated_css = self._get_aggregated_css()\n self.converted_html = self._inline_css()\n \n return self.converted_html",
"def css(self) -> str:\n return self._css",
"def stylecrunch(stystr):\n return dict(pair.split(\":\") for pair in semicolons.findall(stystr))",
"def load_style():\n display(HTML(Path('bhsa.css').read_text()))",
"def scan_system_css():\r\n pass",
"def select_stylestrs(cfgstr):\n stylestrs = []\n for s in cfgstr.split():\n if s in vars(fmt):\n stylestrs.append(s)\n return stylestrs"
] |
[
"0.7230669",
"0.6899848",
"0.6571776",
"0.6504407",
"0.65043885",
"0.64826506",
"0.643032",
"0.6383127",
"0.62738925",
"0.6034242",
"0.59477663",
"0.5926201",
"0.59257543",
"0.5914245",
"0.5897163",
"0.5893435",
"0.58758426",
"0.5852099",
"0.5830343",
"0.58041245",
"0.5780828",
"0.551966",
"0.5509941",
"0.54403937",
"0.5429367",
"0.54091185",
"0.53982455",
"0.5387023",
"0.5384381",
"0.53780407"
] |
0.77384955
|
0
|
Return the list of anonymous javascript codes, sorted by order of insertion
|
def _get_anonymous_javascript(self):
return [js for (order, js) in sorted(self._anonymous_javascript)]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_named_javascript(self):\n return [(name, js, attributes) for (name, (order, js, attributes)) in sorted(self._named_javascript.items(), key=operator.itemgetter(1))]",
"def _javascript(self, script):\n self._anonymous_javascript.append((self._order, script))\n self._order += 1",
"def get_code():\n return inspect.getsource(sort)",
"def get_code(self, obj):\n return [], []",
"def list():\n\n return cache.codeTableList()",
"def get_code(self) -> List[str]:\n if self.__prefix__ == \"\":\n out = []\n else:\n out = [self.__prefix__]\n\n if self.__spacing__ == \"\":\n return out + self.__code_block__\n\n for line in self.__code_block__:\n out.append(self.__spacing__ + line)\n return out",
"def wrap_javascript_in_anonfct(js_data):\n return \"(function(){\"+js_data+\"})()\"",
"def list_code(self, ofile=sys.stdout):\r\n for i, line in enumerate(self.code().split('\\n')):\r\n print >> ofile, ('%4i' % (i + 1)), line\r\n ofile.flush()",
"def codelists():\n return CodelistSet()",
"def _get_javascript_url(self):\n return [(url, attributes) for (url, (order, attributes)) in sorted(self._javascript_url.items(), key=operator.itemgetter(1))]",
"def js_minify2(js):\n log.debug(\"Obfuscating Javascript variables names inside functions.\")\n # if eval() or with{} is used on JS is not too Safe to Obfuscate stuff.\n is_ok = \"eval(\" not in js and \"with{\" not in js and \"with {\" not in js\n return slim_func_names(slim_params(js)) if is_ok else js.strip()",
"def extract_messages_from_javascript_code(code: str) -> list[tuple[int, str, str | None]]:\n\n\tmessages = []\n\n\tfor message in extract_javascript(\n\t\tcode,\n\t\tkeywords=[\"__\"],\n\t\toptions={},\n\t):\n\t\tlineno, _func, args = message\n\n\t\tif not args or not args[0]:\n\t\t\tcontinue\n\n\t\tsource_text = args[0] if isinstance(args, tuple) else args\n\t\tcontext = None\n\n\t\tif isinstance(args, tuple) and len(args) == 3 and isinstance(args[2], str):\n\t\t\tcontext = args[2]\n\n\t\tmessages.append((lineno, source_text, context))\n\n\treturn messages",
"def init_code(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_init_code()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)",
"def _get_anonymous_css(self):\n return [css for (order, css) in sorted(self._anonymous_css)]",
"def get_js(self, selection='all'):\n self._collect()\n parts = ['\"use strict\";']\n if selection == 'all':\n for key in self._js:\n parts.extend(self._js[key])\n else:\n parts.extend(self._js[selection])\n return clean_code('\\n\\n'.join(parts))",
"def _jsmin(self):\r\n self.theA = '\\n'\r\n self._action(3)\r\n\r\n while self.theA != '\\000':\r\n if self.theA == ' ':\r\n if isAlphanum(self.theB):\r\n self._action(1)\r\n else:\r\n self._action(2)\r\n elif self.theA == '\\n':\r\n if self.theB in ['{', '[', '(', '+', '-']:\r\n self._action(1)\r\n elif self.theB == ' ':\r\n self._action(3)\r\n else:\r\n if isAlphanum(self.theB):\r\n self._action(1)\r\n else:\r\n self._action(2)\r\n else:\r\n if self.theB == ' ':\r\n if isAlphanum(self.theA):\r\n self._action(1)\r\n else:\r\n self._action(3)\r\n elif self.theB == '\\n':\r\n if self.theA in ['}', ']', ')', '+', '-', '\"', '\\'']:\r\n self._action(1)\r\n else:\r\n if isAlphanum(self.theA):\r\n self._action(1)\r\n else:\r\n self._action(3)\r\n else:\r\n self._action(1)",
"def populate_code_list():\n\tletter_code_ST = \"JZIHGFEDCBA\"\n\tletter_code_FG = \"XWUTRQPNMLK\"\n\tfor pos in range(\n\t len(letter_code_ST)): #Interestingly, the values start from 0\n\t\tcode_ST.append(pos) # Number first\n\t\tcode_ST.append(letter_code_ST[pos])\n\tfor pos in range(len(letter_code_FG)):\n\t\tcode_FG.append(pos)\n\t\tcode_FG.append(letter_code_FG[pos])",
"def make_codes(self):\n\t\troot = heapq.heappop(self.heap)#obtenemos la raiz del arbol\n\t\tcurrent_code = \"\"\n\t\tself.make_codes_helper(root, current_code)",
"def allFunctions(self):\n\t\tmodulos=sublime.decode_value(open(RutasPython.funciones()).read())\n\t\tlista=[]\n\t\tfor modulo in modulos:\n\t\t\tlista+=[ (funcion+\"\\t•\"+modulo, self.ponerCursor(modulo+\".\"+funcion)) for funcion in modulos[modulo]]\n\t\treturn sorted(lista)",
"def js_data(self):\n js_providers = getAdapters((self.context, self.request, self.view), IJSObjectDataProvider)\n results = []\n for name, provider in js_providers:\n if not name:\n raise ComponentLookupError('IJSObjectDataProvider must be a named adapter')\n names = name.split('.')\n var_defs = ''\n parts = []\n for n in names[:-1]:\n parts.append(n)\n var_defs += VAR_DEF.replace('{{name}}', '.'.join(parts))\n parts.append(names[-1])\n code = JS_SCRIPT.replace('{{placeholder}}', var_defs)\n code = code.replace('{{var_assignment}}',\n VAR_ASSIGN.replace('{{name}}',\n '.'.join(parts)).replace('{{object}}',\n json.dumps(provider())))\n results.append(code)\n return results",
"def block_code(self, code, lang=None):\n code = code.rstrip('\\n')\n return [\"<code>\"] + code",
"def _get_bulma_js() -> List[str]:\n return list(get_js_files())",
"def get_pcode_list(self) -> List[str]:\n return self.pcodes",
"def nameList(self):\r\n return [self.name.lower(), self.code] + self._otherNames",
"def extract_javascript(code, keywords=(\"__\",), options=None):\n\tfrom babel.messages.jslexer import Token, tokenize, unquote_string\n\n\tif options is None:\n\t\toptions = {}\n\n\tfuncname = message_lineno = None\n\tmessages = []\n\tlast_argument = None\n\tconcatenate_next = False\n\tlast_token = None\n\tcall_stack = -1\n\n\t# Tree level = depth inside function call tree\n\t# Example: __(\"0\", [\"1\", \"2\"], \"3\")\n\t# Depth __()\n\t# / | \\\n\t# 0 \"0\" [...] \"3\" <- only 0th level strings matter\n\t# / \\\n\t# 1 \"1\" \"2\"\n\ttree_level = 0\n\topening_operators = {\"[\", \"{\"}\n\tclosing_operators = {\"]\", \"}\"}\n\tall_container_operators = opening_operators.union(closing_operators)\n\tdotted = any(\".\" in kw for kw in keywords)\n\n\tfor token in tokenize(\n\t\tcode,\n\t\tjsx=True,\n\t\ttemplate_string=options.get(\"template_string\", True),\n\t\tdotted=dotted,\n\t):\n\t\tif ( # Turn keyword`foo` expressions into keyword(\"foo\") calls:\n\t\t\tfuncname\n\t\t\tand (last_token and last_token.type == \"name\") # have a keyword...\n\t\t\tand token.type # we've seen nothing after the keyword...\n\t\t\t== \"template_string\" # this is a template string\n\t\t):\n\t\t\tmessage_lineno = token.lineno\n\t\t\tmessages = [unquote_string(token.value)]\n\t\t\tcall_stack = 0\n\t\t\ttree_level = 0\n\t\t\ttoken = Token(\"operator\", \")\", token.lineno)\n\n\t\tif token.type == \"operator\" and token.value == \"(\":\n\t\t\tif funcname:\n\t\t\t\tmessage_lineno = token.lineno\n\t\t\t\tcall_stack += 1\n\n\t\telif call_stack >= 0 and token.type == \"operator\" and token.value in all_container_operators:\n\t\t\tif token.value in opening_operators:\n\t\t\t\ttree_level += 1\n\t\t\tif token.value in closing_operators:\n\t\t\t\ttree_level -= 1\n\n\t\telif call_stack == -1 and token.type == \"linecomment\" or token.type == \"multilinecomment\":\n\t\t\tpass # ignore comments\n\n\t\telif funcname and call_stack == 0:\n\t\t\tif token.type == \"operator\" and token.value == \")\":\n\t\t\t\tif last_argument is not None:\n\t\t\t\t\tmessages.append(last_argument)\n\t\t\t\tif len(messages) > 1:\n\t\t\t\t\tmessages = tuple(messages)\n\t\t\t\telif messages:\n\t\t\t\t\tmessages = messages[0]\n\t\t\t\telse:\n\t\t\t\t\tmessages = None\n\n\t\t\t\tif messages is not None:\n\t\t\t\t\tyield (message_lineno, funcname, messages)\n\n\t\t\t\tfuncname = message_lineno = last_argument = None\n\t\t\t\tconcatenate_next = False\n\t\t\t\tmessages = []\n\t\t\t\tcall_stack = -1\n\t\t\t\ttree_level = 0\n\n\t\t\telif token.type in (\"string\", \"template_string\"):\n\t\t\t\tnew_value = unquote_string(token.value)\n\t\t\t\tif tree_level > 0:\n\t\t\t\t\tpass\n\t\t\t\telif concatenate_next:\n\t\t\t\t\tlast_argument = (last_argument or \"\") + new_value\n\t\t\t\t\tconcatenate_next = False\n\t\t\t\telse:\n\t\t\t\t\tlast_argument = new_value\n\n\t\t\telif token.type == \"operator\":\n\t\t\t\tif token.value == \",\":\n\t\t\t\t\tif last_argument is not None:\n\t\t\t\t\t\tmessages.append(last_argument)\n\t\t\t\t\t\tlast_argument = None\n\t\t\t\t\telse:\n\t\t\t\t\t\tif tree_level == 0:\n\t\t\t\t\t\t\tmessages.append(None)\n\t\t\t\t\tconcatenate_next = False\n\t\t\t\telif token.value == \"+\":\n\t\t\t\t\tconcatenate_next = True\n\n\t\telif call_stack > 0 and token.type == \"operator\" and token.value == \")\":\n\t\t\tcall_stack -= 1\n\t\t\ttree_level = 0\n\n\t\telif funcname and call_stack == -1:\n\t\t\tfuncname = None\n\n\t\telif (\n\t\t\tcall_stack == -1\n\t\t\tand token.type == \"name\"\n\t\t\tand token.value in keywords\n\t\t\tand (last_token is None or last_token.type != \"name\" or last_token.value != \"function\")\n\t\t):\n\t\t\tfuncname = token.value\n\n\t\tlast_token = token",
"def create_code(node):\n code = []\n for i in range(256):\n code.append(i)\n _create_code_helper(node, '', code)\n return code",
"def get_event_code(evnid):\n code = []\n event_found = False\n file = open(WebEvent.script_file)\n for line in file:\n if not event_found:\n l = line.strip(\" \")\n if l == \"def \" + evnid + \"():\\n\":\n event_found = True\n code.append(\" \" * 4 + \"@staticmethod\\n\")\n code.append(line)\n else:\n if line == \"\\n\":\n return code\n else:\n code.append(line)\n return code",
"def get_init_code(self, obj):\n return []",
"def encode(sourcelist,code):\n answer = \"\"\n for s in sourcelist:\n co = find(lambda p: p.name == s, code)\n if ( not co ):\n import sys\n print >> sys.stderr, \"Warning: symbol\",`s`,\"has no encoding!\"\n pass\n else:\n answer = answer + co.word\n pass\n return answer",
"def opcode_list(self, script):\n opcodes = []\n new_pc = 0\n try:\n for opcode, data, pc, new_pc in self.get_opcodes(script):\n opcodes.append(self.disassemble_for_opcode_data(opcode, data))\n except ScriptError:\n opcodes.append(binascii.hexlify(script[new_pc:]).decode(\"utf8\"))\n\n return opcodes"
] |
[
"0.7072183",
"0.6108628",
"0.599285",
"0.59644276",
"0.5930817",
"0.5765333",
"0.5730981",
"0.5686627",
"0.5684599",
"0.56248623",
"0.55959797",
"0.5586417",
"0.5547204",
"0.55260557",
"0.55190676",
"0.55190426",
"0.5511106",
"0.5398968",
"0.53989315",
"0.53965914",
"0.5391874",
"0.5355103",
"0.53531843",
"0.534579",
"0.53430426",
"0.53211224",
"0.53042454",
"0.52919704",
"0.52603817",
"0.5244756"
] |
0.79115456
|
0
|
Create an associated asynchronous HTML renderer
|
def AsyncRenderer(self, *args, **kw):
# If no arguments are given, this renderer becomes the parent of the
# newly created renderer
if not args and not kw:
args = (self,)
return self.__class__(*args, **kw)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def AsyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return AsyncRenderer(*args, **kw)",
"def AsyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return AsyncRenderer(*args, **kw)",
"def render(self):\n raise NotImplementedError(\"Renderer is an abstract class\")",
"def run(self):\n try:\n self.event_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.event_loop)\n template_system = LegionTemplateEngine(self.template_path, self.output_path)\n template_system.render_loop()\n\n except Exception as exception:\n self.raised_exception = exception\n raise self.raised_exception",
"def _render(self) -> None:\n pass",
"def task_render():\n target = 'analysis.html'\n dep = 'analysis.ipynb'\n return {\n 'file_dep': [dep],\n 'targets': [target],\n 'actions': [\n f\"jupyter nbconvert --execute --to html {dep}\"\n ],\n 'clean': True\n }",
"def render(self, r):\n raise NotImplementedError",
"def render():\n html = request.get_data().decode('utf-8')\n sio.emit('render', html)\n return 'OK'",
"def __html__(self):\n return self.html",
"def end_rendering(self, output):\n if self.wrapper_to_generate:\n output = self.div(output, id=self.id, class_='nagare-generated nagare-async-view')\n\n return output",
"async def render(\n self, filename: str, *args: dict, **kwargs: typing.Any\n ) -> str:\n with self._enable_async():\n return await self._get_template(filename).render_async(\n *args, **kwargs\n )",
"def render(self):\n raise NotImplementedError",
"def run(self) -> None:\n self._render()\n print(self.sio.getvalue())",
"def render(self):\n raise NotImplementedError()",
"def main():\r\n return render_template(\"UI.html\")",
"def _render_callback(self, _sim, _viewer):\n pass",
"def render(self):\n return self",
"def get_html(self):\r\n pass",
"def SyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return self.__class__(*args, **kw)",
"def __init__(self, static_url):\n super(AsyncHeadRenderer, self).__init__(static_url=static_url)\n\n self._anonymous_css = [] # CSS\n self._anonymous_javascript = [] # Javascript code",
"def render(self):\n self.run()\n return [{'dest' : self.dest,\n 'text' : self.tmpl.render(**self.data)}]",
"async def respondHTML(self, html):\n self.HTMLResponse = html",
"def SyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return Renderer(*args, **kw)",
"def SyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return Renderer(*args, **kw)",
"def render_view(self, h, *args):\n return self.view(h)",
"def createBasicRenderSetup():\n\n pass",
"def rawHTMLrendered(self):",
"def render(self, *args, **kwargs):\r\n raise NotImplementedError",
"def render(self):\n raise RenderNotImplemented('Render function is not implemented.')",
"def render_async_head(self, h, *args):\n return \"nagare_loadAll(%s, %s, %s, %s, %s, %s)\" % (\n ajax.py2js(self._get_named_css(), h),\n ajax.py2js(r'\\n'.join(self._get_anonymous_css()), h),\n ajax.py2js(self._get_css_url(), h),\n ajax.py2js(self._get_named_javascript(), h),\n ajax.py2js(';'.join(self._get_anonymous_javascript()), h),\n ajax.py2js(self._get_javascript_url(), h)\n )"
] |
[
"0.7091472",
"0.7091472",
"0.594948",
"0.5911716",
"0.5854364",
"0.58015937",
"0.5716082",
"0.5714984",
"0.5713032",
"0.57068735",
"0.5698525",
"0.56964815",
"0.56612664",
"0.5654472",
"0.56453305",
"0.56444037",
"0.5617823",
"0.5607098",
"0.55799204",
"0.5559009",
"0.55571735",
"0.55562663",
"0.55488986",
"0.55488986",
"0.5539041",
"0.5502392",
"0.54922396",
"0.5489914",
"0.548344",
"0.5481894"
] |
0.7106957
|
0
|
Register an asynchronous action on a tag
|
def action(self, tag, action, with_request):
tag.async_action(self, action, with_request)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def action(self, tag, action, with_request):\n tag.sync_action(self, action, with_request)",
"def async_action(self, renderer, action, with_request):\n if not isinstance(action, ajax.Update):\n action = ajax.Update(action=action, with_request=with_request)\n\n self.set(self._actions[2], action.generate_action(self._actions[0], renderer))",
"async def perform_action(self) -> None:",
"async def async_attach_trigger(hass, config, action, automation_info):\n trigger_data = automation_info.get(\"trigger_data\", {}) if automation_info else {}\n webhook_id = config.get(CONF_WEBHOOK_ID)\n job = HassJob(action)\n hass.components.webhook.async_register(\n automation_info[\"domain\"],\n automation_info[\"name\"],\n webhook_id,\n partial(_handle_webhook, job, trigger_data),\n )\n\n @callback\n def unregister():\n \"\"\"Unregister webhook.\"\"\"\n hass.components.webhook.async_unregister(webhook_id)\n\n return unregister",
"def _async_register(service, notifier):\n\n proc = multiprocessing.Process(\n name='Async Registration {}'.format(service.iden),\n target=_register, args=(service, notifier))\n proc.start()",
"def sync_action(self, renderer, action, with_request):\n self.set(self._actions[1], renderer.register_callback(self._actions[0], action, with_request))",
"def push_tag(self, tag):\n _tag_entity('task', self.task_id, tag)",
"def _register(self, comm, handler):",
"def add_tagging(self, task_instance):",
"async def generic_action(self, request):\n pass",
"def post(self):\n self.finish(self.register())",
"def step_async(self, actions):",
"async def async_apply_action(self, cmd_name, *args):\n await self.hass.async_add_executor_job(self.apply_action, cmd_name, *args)",
"def call_async(self, name, *args, **kwargs):",
"def register(self, hook_url):\n raise NotImplementedError()",
"def register_to_event(request):\n pass",
"def register_active_event(t, callback, args, action_runner, plugin, msg_obj, mutex=None):\n def func(func_args):\n action = callback(*func_args)\n if action:\n action_runner(action=action, plugin=plugin, msg_obj=msg_obj)\n register_event(t, func, args, mutex=mutex)",
"async def _register_mid(self, mid: int) -> None:\n async with self._pending_operations_condition:\n if mid not in self._pending_operations:\n self._pending_operations[mid] = asyncio.Event()",
"def registerAction(self, actionId, action): #$NON-NLS-1$\r",
"async def szuru_tag(self, ctx: commands.Context, postid: int, operation: str, *tags):\n raise NotImplementedError(f\"Work in progress!\") # TODO",
"async def send_tag(self, tag, reactions, *args, **kwargs):\n message = await self.send_react(reactions, *args, **kwargs)\n await self.set_trigger(tag, message)\n return message",
"async def tag(self, ctx: \"IceTeaContext\", *, tag_name: str):\n tag_content = await ctx.guild_data.call_tag(tag_name, ctx.channel.id, ctx.author.id)\n if tag_content:\n await ctx.send(tag_content)\n else:\n await ctx.send(\"No Tag found\")",
"def auto_discover():\n auto_registration(\"actions\")",
"def register_to_loop(self, loop):\n loop.create_task(self.__fetch())",
"def tag(self, tag):\n self.tag = tag",
"def async_attach(self, action: AutomationActionType, variables: Dict[str, Any]):\n\n @callback\n def _remove():\n del self._actions[_remove]\n self._update()\n\n job = HassJob(action)\n\n self._actions[_remove] = (job, variables)\n self._update()\n\n return _remove",
"def register(target: str, response_callback: Callable[[str, str],\n Optional[str]],\n status_callback: Optional[Callable[[int, str], None]] = None,\n xml_encoding: str = \"UTF-8\") -> None:\n ...",
"async def get_tag_command(self, ctx):\n await self.get_tag(ctx)",
"def on_register(cls):",
"def AddAsyncFlagToParser(parser):\n base.ASYNC_FLAG.AddToParser(parser)"
] |
[
"0.6502161",
"0.59794897",
"0.58341646",
"0.5751508",
"0.57190454",
"0.56805253",
"0.56396633",
"0.55127037",
"0.54871196",
"0.5465213",
"0.537171",
"0.53155005",
"0.5300982",
"0.52875805",
"0.52851075",
"0.52552724",
"0.5253015",
"0.5245315",
"0.52284205",
"0.51902205",
"0.51848763",
"0.5176119",
"0.5172843",
"0.5158326",
"0.51492363",
"0.51461726",
"0.51445705",
"0.51438606",
"0.51155764",
"0.51126057"
] |
0.75662386
|
0
|
The name of the machine
|
def machine_name(self) -> str:
return pulumi.get(self, "machine_name")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_machine_name(self):\n self.machine = platform.uname().node\n return self.machine",
"def computer_name(self) -> str:\n return pulumi.get(self, \"computer_name\")",
"def machine():\n return uname().machine",
"def machine():\n return uname().machine",
"def machine_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"machine_name\")",
"def machine_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"machine_name\")",
"def computer_network_name(self) -> str:\n return self._computer_network_name",
"def name(self) -> str:\n return self._device.name or self._device.mac",
"def get_system_name(self):\n\n\t\treturn self.__system_name",
"def machine(self):\n return util.get_xml_path(self.get_cap_xml(), \"/capabilities/guest/arch/machine\")",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system",
"def get_systemname(self) -> str:\n\n return self.send(self.cmd.GET_SYSTEMNAME)",
"def get_host_name(self):\n return self.get_command_output(\"hostname\").strip(\"\\n\")",
"def name(self):\n # self._name = \"wyzeapi_\"+self._device_mac+\"_\"+ self._name\n return self._device.nickname",
"def get_name(self, address):\n our_beacon = self.format_beacon('connected', False)\n machine_name = re.compile('machine=(.*)\\n').search\n\n try:\n tsock = socket.socket()\n tsock.connect((address, 2190))\n self.send_packet(tsock, our_beacon)\n tivo_beacon = self.recv_packet(tsock)\n tsock.close()\n name = machine_name(tivo_beacon).groups()[0]\n except:\n name = address\n\n return name",
"def name(self):\n return self.device.name()",
"def name(self):\n if not self._name:\n prefix = self.random.choice(['Desktop'] * 4 + ['Laptop'])\n self._name = '{}-{}'.format(prefix, ''.join(\n self.random.choice(string.ascii_uppercase + string.digits) for _ in range(7)))\n return self._name",
"def get_hostname(self):\n return self.name",
"def get_device_name(self):\n name = self._device[\"name\"]\n if not name or name == \"--\":\n name = self._mac\n\n return name",
"def get_pcname(): \n pc_name = '' \n try: \n pc_name = socket.gethostname() \n except Exception, e:\n initlog('failed to get PC name; %s' % str(e)) \n return pc_name",
"def name(self):\n return self._device.name",
"def name(self):\n return self._device.name",
"def name(self):\n return self._device.name",
"def server_name(self) -> str:\n return pulumi.get(self, \"server_name\")",
"def name(self):\n return f\"{get_device_name(self._data, 0, self._name)}\"",
"def name(self):\n return \"myhomeserver1_\" + self._light_id",
"def host_name(self):\n return self._host_name",
"def get_hostname(self):\n return self.mycam.devicemgmt.GetHostname()",
"def get_host_name():\n return socket.gethostname()",
"def name(self):\n return self._fetch_element('uname')"
] |
[
"0.8530881",
"0.83184624",
"0.8035836",
"0.8035836",
"0.80100316",
"0.80100316",
"0.7528964",
"0.74322504",
"0.7399433",
"0.7358223",
"0.7357329",
"0.7301388",
"0.7256131",
"0.72033924",
"0.7202233",
"0.7151172",
"0.71449375",
"0.710232",
"0.710175",
"0.70895344",
"0.7066498",
"0.7066498",
"0.7066498",
"0.7021935",
"0.7013894",
"0.70032907",
"0.6989432",
"0.698626",
"0.69798034",
"0.69745266"
] |
0.90458274
|
0
|
Azure resource Id of the workspace the machine is attached to
|
def workspace_id(self) -> str:
return pulumi.get(self, "workspace_id")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_workspace_id() -> str:\n token = BaseHook.get_connection(AirflowConns.TERRAFORM).password\n terraform_api = TerraformApi(token)\n\n # Get organization\n organization = Variable.get(AirflowVars.TERRAFORM_ORGANIZATION)\n\n # Get workspace\n environment = Variable.get(AirflowVars.ENVIRONMENT)\n workspace = TerraformConfig.WORKSPACE_PREFIX + environment\n\n # Get workspace ID\n workspace_id = terraform_api.workspace_id(organization, workspace)\n\n return workspace_id",
"def workspace_id(self) -> Optional[str]:\n return pulumi.get(self, \"workspace_id\")",
"def get_workspace_resource_id(self) -> Union[str, None]:\n return self._get_workspace_resource_id(enable_validation=True)",
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_id\")",
"def log_analytics_workspace_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"log_analytics_workspace_resource_id\")",
"def synapse_workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def synapse_workspace_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def virtual_machine_resource_id(self) -> Optional[str]:\n return pulumi.get(self, \"virtual_machine_resource_id\")",
"def synapse_workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def name(self):\n return self.attributes.workspace.name",
"def workspace(self) -> str:\n return self._workspace",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resourceid(self):",
"def storage_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"storage_resource_id\")",
"def target_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_resource_id\")",
"def target_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_resource_id\")",
"def resourceDocumentId(self, resource: Resource) -> str:",
"def get_workspace(self):\n wid = self._config[\"workspace\"]\n return sim_consts.workspace_origin[wid], sim_consts.workspace_size[wid]",
"def source_computer_id(self) -> str:\n return pulumi.get(self, \"source_computer_id\")",
"def source_computer_id(self) -> str:\n return pulumi.get(self, \"source_computer_id\")",
"def master_id(self):\r\n return self._arm.master_id"
] |
[
"0.72242045",
"0.72234267",
"0.72091603",
"0.71540064",
"0.71540064",
"0.70464975",
"0.6891388",
"0.6891388",
"0.6812387",
"0.66599363",
"0.65873826",
"0.6529958",
"0.6398516",
"0.6271846",
"0.6264693",
"0.6120342",
"0.6120342",
"0.6120342",
"0.6056778",
"0.6056778",
"0.6056778",
"0.6052159",
"0.5860688",
"0.5832875",
"0.5832875",
"0.58016527",
"0.5796366",
"0.5778821",
"0.5778821",
"0.57524294"
] |
0.76052606
|
0
|
The Sql database name installed on the machine
|
def database_name(self) -> str:
return pulumi.get(self, "database_name")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getDatabaseName(self):\n return self._base.getDatabaseName()",
"def getDatabaseName(self):\n raise NotImplementedError",
"def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")",
"def get_db_name(self):\n\t\treturn conf.db_name",
"def db_name(self):\n return self._db_name",
"def get_name(self) -> str:\n return self.dbname",
"def getDatabaseName( self ):\n return self.mDbname",
"def database_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_name\")",
"def fulldbname(self):\n return 'myfls_'+self.user.username+'_'+self.dbname",
"def dbname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dbname\")",
"def database_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_name\")",
"def getDatabaseName(self):\n return f\"n{self.name.capitalize()}\"",
"def get_database_name(self, data: dict) -> str: # pylint: disable=arguments-differ\n if not data[\"database_name\"] and self.service_connection.database:\n return self.service_connection.database\n return data[\"database_name\"]",
"def database_name(self):\n try:\n return self._database_name\n except:\n pass\n\n if 'X-UnitTest' in self.request.headers:\n if self.request.headers['X-UnitTest'] == 'True':\n self._database_name = TEST_DATABASE\n return TEST_DATABASE\n default_database = self.application.databases['default']['NAME']\n self._database_name = default_database\n return default_database",
"def database(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database\")",
"def get_db_server_name(self):\n if self.db_config_file.key_exists(\"server_name\"):\n return self.db_config_file_value(\"server_name\").strip('\"')\n return self.get_system_id()",
"def db(self) -> str:\n return self._db",
"def database(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database\")",
"def getDefaultDBName() -> str:\n return f\"dbname={getpass.getuser()}\"",
"def mysql_database():\n return DATABASE",
"def mysql_database_name():\n return 'test'",
"def database_id(self) -> str:\n return pulumi.get(self, \"database_id\")",
"def _extract_db_name_from_db_path(self):\n return os.path.basename(self.db_path).rsplit('.', 1)[0]",
"def database_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_id\")",
"def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")",
"def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")",
"def database():\n return conf().database",
"def dbName(self, code) -> str:\n return f'{code}{self.name}'",
"def db_proxy_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"db_proxy_name\")",
"def database(self) -> str:\n\t\treturn os.getenv('APP_DATABASE', 'memory').lower()"
] |
[
"0.80053836",
"0.793443",
"0.79146576",
"0.776606",
"0.7692705",
"0.7633034",
"0.7602243",
"0.7477083",
"0.74397504",
"0.7425721",
"0.73586315",
"0.72684866",
"0.71109664",
"0.707927",
"0.7013845",
"0.69296294",
"0.6894068",
"0.6840425",
"0.67579883",
"0.67426413",
"0.673065",
"0.67069525",
"0.66879475",
"0.6590492",
"0.65901875",
"0.65901875",
"0.65883774",
"0.6566208",
"0.65556127",
"0.6476323"
] |
0.80771595
|
0
|
The name of the machine
|
def machine_name(self) -> str:
return pulumi.get(self, "machine_name")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_machine_name(self):\n self.machine = platform.uname().node\n return self.machine",
"def computer_name(self) -> str:\n return pulumi.get(self, \"computer_name\")",
"def machine():\n return uname().machine",
"def machine():\n return uname().machine",
"def machine_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"machine_name\")",
"def machine_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"machine_name\")",
"def computer_network_name(self) -> str:\n return self._computer_network_name",
"def name(self) -> str:\n return self._device.name or self._device.mac",
"def get_system_name(self):\n\n\t\treturn self.__system_name",
"def machine(self):\n return util.get_xml_path(self.get_cap_xml(), \"/capabilities/guest/arch/machine\")",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system",
"def get_systemname(self) -> str:\n\n return self.send(self.cmd.GET_SYSTEMNAME)",
"def get_host_name(self):\n return self.get_command_output(\"hostname\").strip(\"\\n\")",
"def name(self):\n # self._name = \"wyzeapi_\"+self._device_mac+\"_\"+ self._name\n return self._device.nickname",
"def get_name(self, address):\n our_beacon = self.format_beacon('connected', False)\n machine_name = re.compile('machine=(.*)\\n').search\n\n try:\n tsock = socket.socket()\n tsock.connect((address, 2190))\n self.send_packet(tsock, our_beacon)\n tivo_beacon = self.recv_packet(tsock)\n tsock.close()\n name = machine_name(tivo_beacon).groups()[0]\n except:\n name = address\n\n return name",
"def name(self):\n return self.device.name()",
"def name(self):\n if not self._name:\n prefix = self.random.choice(['Desktop'] * 4 + ['Laptop'])\n self._name = '{}-{}'.format(prefix, ''.join(\n self.random.choice(string.ascii_uppercase + string.digits) for _ in range(7)))\n return self._name",
"def get_device_name(self):\n name = self._device[\"name\"]\n if not name or name == \"--\":\n name = self._mac\n\n return name",
"def get_hostname(self):\n return self.name",
"def get_pcname(): \n pc_name = '' \n try: \n pc_name = socket.gethostname() \n except Exception, e:\n initlog('failed to get PC name; %s' % str(e)) \n return pc_name",
"def name(self):\n return self._device.name",
"def name(self):\n return self._device.name",
"def name(self):\n return self._device.name",
"def server_name(self) -> str:\n return pulumi.get(self, \"server_name\")",
"def name(self):\n return f\"{get_device_name(self._data, 0, self._name)}\"",
"def name(self):\n return \"myhomeserver1_\" + self._light_id",
"def host_name(self):\n return self._host_name",
"def get_hostname(self):\n return self.mycam.devicemgmt.GetHostname()",
"def get_host_name():\n return socket.gethostname()",
"def name(self):\n return self._fetch_element('uname')"
] |
[
"0.8532088",
"0.8318477",
"0.8037362",
"0.8037362",
"0.8010908",
"0.8010908",
"0.7529093",
"0.74321127",
"0.7400003",
"0.73594725",
"0.7358173",
"0.730195",
"0.72559154",
"0.72024065",
"0.720158",
"0.71506864",
"0.71440697",
"0.7101558",
"0.7101514",
"0.70889777",
"0.7065817",
"0.7065817",
"0.7065817",
"0.70225406",
"0.7013454",
"0.7002727",
"0.69893885",
"0.6985441",
"0.6980035",
"0.6974059"
] |
0.90468234
|
1
|
The Sql server name installed on the machine
|
def server_name(self) -> str:
return pulumi.get(self, "server_name")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_db_server_name(self):\n if self.db_config_file.key_exists(\"server_name\"):\n return self.db_config_file_value(\"server_name\").strip('\"')\n return self.get_system_id()",
"def get_host_name(self):\n return self.get_command_output(\"hostname\").strip(\"\\n\")",
"def get_server_name(self):\n configured_value = self.charm_config[\"server-name\"]\n if configured_value:\n return configured_value\n else:\n fqdn = socket.getfqdn()\n return fqdn",
"def server_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_name\")",
"def server_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_name\")",
"def get_name(self):\n \n return 'TCP/IP Server'",
"def get_hostname(self):\n # We set a default in install.py in case it isn't preseeded but when we\n # preseed, we are looking for None anyhow.\n return ''",
"def getConnectionName(self):\n return self.system",
"def get_name(self) -> str:\n return self.dbname",
"def GetServerHost():\n return GetHostName(True)",
"def getSlavename():",
"def hostname(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"hostname\", _args)\n return _ctx.execute_sync(str)",
"def ServerHostName(self):\n if self.force_auto_sync:\n self.get('ServerHostName')\n return self._ServerHostName",
"def get_product_name(self):\n system = self._get_host_details()\n return system['Model']",
"def machine_name(self) -> str:\n return pulumi.get(self, \"machine_name\")",
"def machine_name(self) -> str:\n return pulumi.get(self, \"machine_name\")",
"def get_hostname(self):\n return self.name",
"def get_systemname(self) -> str:\n\n return self.send(self.cmd.GET_SYSTEMNAME)",
"def get_pcname(): \n pc_name = '' \n try: \n pc_name = socket.gethostname() \n except Exception, e:\n initlog('failed to get PC name; %s' % str(e)) \n return pc_name",
"def _get_servername(self):\n #recuperation objet bdd tango\n db = PyTango.Database()\n #recuperation de la liste des servers dans la bdd\n server_list = db.get_server_list()\n server_name = ''\n #pour chaque servers de la liste\n for server in server_list:\n #recuperation de la liste des noms des devices\n lst_devices_address = db.get_device_class_list(server).value_string\n #mise de la liste en lower case\n lst_devices_address_lower = [ i.lower() for i in lst_devices_address]\n #si le nom du device est dans la liste, alors on retourne le nom du serveur\n if self.device_name.lower() in lst_devices_address_lower:\n server_name = server\n return server_name",
"def get_hostname():\n host = os.getenv(\"OPSIM_HOSTNAME\")\n if host is None or host == \"\":\n import socket\n host = socket.gethostname()\n host = host.split('.')[0]\n return host",
"def get_host_name(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetHostName', self.handle)",
"def get_host_name():\n return socket.gethostname()",
"def get_system_name(self):\n\n\t\treturn self.__system_name",
"def gethostname():\n if socket.gethostname().find('.') >= 0:\n host = socket.gethostname()\n else:\n host = socket.gethostbyaddr(socket.gethostname())[0]\n return host",
"def server_site_name(self):\n return dsdb._samdb_server_site_name(self)",
"async def get_hostname(self):\n ngc = await self.middleware.call('network.configuration.config')\n if 'hostname_virtual' in ngc:\n failover_status = await self.middleware.call('failover.status')\n if failover_status == 'MASTER':\n return ngc['hostname_virtual']\n elif failover_status == 'BACKUP':\n return None\n else:\n return ngc['hostname_local']",
"def service_operations_host_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_operations_host_name\")",
"def primary_name_server(self) -> str:\n return pulumi.get(self, \"primary_name_server\")",
"def get_name(cls):\n return DRIVER_NAME"
] |
[
"0.70862",
"0.6715162",
"0.66858387",
"0.66587603",
"0.66587603",
"0.66160357",
"0.66100967",
"0.6593616",
"0.6554033",
"0.65457565",
"0.6545528",
"0.65286964",
"0.65076476",
"0.6498059",
"0.64489233",
"0.64489233",
"0.64475244",
"0.64432335",
"0.64315045",
"0.64294493",
"0.64205307",
"0.6418642",
"0.641461",
"0.6411465",
"0.6394492",
"0.639143",
"0.63803685",
"0.63685",
"0.63682836",
"0.63673145"
] |
0.6821417
|
1
|
Azure resource Id of the workspace the machine is attached to
|
def workspace_id(self) -> str:
return pulumi.get(self, "workspace_id")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_workspace_id() -> str:\n token = BaseHook.get_connection(AirflowConns.TERRAFORM).password\n terraform_api = TerraformApi(token)\n\n # Get organization\n organization = Variable.get(AirflowVars.TERRAFORM_ORGANIZATION)\n\n # Get workspace\n environment = Variable.get(AirflowVars.ENVIRONMENT)\n workspace = TerraformConfig.WORKSPACE_PREFIX + environment\n\n # Get workspace ID\n workspace_id = terraform_api.workspace_id(organization, workspace)\n\n return workspace_id",
"def workspace_id(self) -> Optional[str]:\n return pulumi.get(self, \"workspace_id\")",
"def get_workspace_resource_id(self) -> Union[str, None]:\n return self._get_workspace_resource_id(enable_validation=True)",
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_id\")",
"def log_analytics_workspace_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"log_analytics_workspace_resource_id\")",
"def synapse_workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def synapse_workspace_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def virtual_machine_resource_id(self) -> Optional[str]:\n return pulumi.get(self, \"virtual_machine_resource_id\")",
"def synapse_workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def name(self):\n return self.attributes.workspace.name",
"def workspace(self) -> str:\n return self._workspace",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resourceid(self):",
"def storage_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"storage_resource_id\")",
"def target_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_resource_id\")",
"def target_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_resource_id\")",
"def resourceDocumentId(self, resource: Resource) -> str:",
"def get_workspace(self):\n wid = self._config[\"workspace\"]\n return sim_consts.workspace_origin[wid], sim_consts.workspace_size[wid]",
"def source_computer_id(self) -> str:\n return pulumi.get(self, \"source_computer_id\")",
"def source_computer_id(self) -> str:\n return pulumi.get(self, \"source_computer_id\")",
"def master_id(self):\r\n return self._arm.master_id"
] |
[
"0.72242045",
"0.72234267",
"0.72091603",
"0.71540064",
"0.71540064",
"0.70464975",
"0.6891388",
"0.6891388",
"0.6812387",
"0.66599363",
"0.65873826",
"0.6529958",
"0.6398516",
"0.6271846",
"0.6264693",
"0.6120342",
"0.6120342",
"0.6120342",
"0.6056778",
"0.6056778",
"0.6056778",
"0.6052159",
"0.5860688",
"0.5832875",
"0.5832875",
"0.58016527",
"0.5796366",
"0.5778821",
"0.5778821",
"0.57524294"
] |
0.76052606
|
1
|
Name of the company of the partner
|
def partner_name(self) -> str:
return pulumi.get(self, "partner_name")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def company_name(self):\n if \"companyName\" in self._prop_dict:\n return self._prop_dict[\"companyName\"]\n else:\n return None",
"def _get_name(self):\n partner = self\n name = partner.name or ''\n\n if partner.company_name or partner.parent_id:\n if not name and partner.type in ['invoice', 'delivery', 'other']:\n name = dict(self.fields_get(['type'])['type']['selection'])[partner.type]\n #if not partner.is_company:\n # name = \"%s, %s\" % (partner.commercial_company_name or partner.parent_id.name, name)\n if self._context.get('show_address_only'):\n name = partner._display_address(without_company=True)\n if self._context.get('show_address'):\n name = name + \"\\n\" + partner._display_address(without_company=True)\n name = name.replace('\\n\\n', '\\n')\n name = name.replace('\\n\\n', '\\n')\n if self._context.get('address_inline'):\n name = name.replace('\\n', ', ')\n if self._context.get('show_email') and partner.email:\n name = \"%s <%s>\" % (name, partner.email)\n if self._context.get('html_format'):\n name = name.replace('\\n', '<br/>')\n if self._context.get('show_vat') and partner.vat:\n name = \"%s ‒ %s\" % (name, partner.vat)\n return 'HOLA'",
"def company_name(self) -> Optional[str]:\n return pulumi.get(self, \"company_name\")",
"def get_company_name(self):\n\t\treturn call_sdk_function('PrlLic_GetCompanyName', self.handle)",
"def get_company(self, name):\n return self.website.company.id",
"def get_companyName(self, obj):\n groupCompanies = get_objects_for_group(\n obj, \"view_company\", klass=models.Company)\n return [x.name for x in groupCompanies]",
"def get_company(self, name):\n return self.instance.company.id",
"def get_company(self, name):\n return self.store.company.id",
"def get_company_domain(self) -> str:\n lower_comp_name = self.company_name.lower()\n domain_prefix = re.sub(\"[^0-9a-zA-Z]+\", \"\", lower_comp_name)\n return domain_prefix + self.top_level_domain",
"def company(self):\n return self._company",
"def company(self):\n return self._company",
"def _cname(self,account_id):\n company = self.pool.get('account.account').browse(self.cr, self.uid, account_id).company_id\n self.caddress = self._cadd(company)\n return company.name",
"def company(self):\n\n x = 0\n my_company = self.data[\"Company Name\"]\n my_account = self.data[\"Account\"]\n result = []\n for i in my_company:\n my_string = i + \" -- \" + my_account[x]\n x += 1\n result.append(my_string)\n\n return result",
"def guess_company_name(self, response):\n # TODO here guess the name of the company\n # if og:title or title or smth else\n # if domain in the title then its the name\n # if not\n # take domain\n\n parts = urllib.parse.urlparse(response.url)\n name_parts = parts.netloc.split(\".\")\n if len(name_parts) > 2:\n name = name_parts[1]\n else:\n name = name_parts[0]\n\n site_name = response.xpath('//*/meta[@property=\"description\"]/@content').extract_first()\n if site_name:\n return site_name\n else:\n return name.title()",
"def account_name(self):\n\n name1 = self.business_trade_name\n name2 = self.business_name\n\n if not name1 and not name2:\n return 'NAME MISSING - ' + self.license_number\n elif name1 and not name2:\n return name1\n elif name2 and not name1:\n return name2\n else:\n return name1 + ' (' + name2 + ')'",
"def __str__(self):\n return str('%s (%s)' % (self.company, self.owner))",
"def marketing_name(self):\n return \"Custom solution - 2\"",
"def Company_Name(intent, session):\n\n global session_attributes\n session_attributes = {}\n session_attributes['previous_node'] = 'Company_Name'\n\n companyName = intent['slots']['companyName']['value']\n\n card_title = intent['name']\n speech_output = \"I can help you with the \"+companyName+\" four o one K plan. So that I may authenticate you, can you provide your employee number?\"\n\n\n reprompt_text = \"I can help you with the \"+companyName+\" four o one K plan. So that I may authenticate you, can you provide your employee number?\"\n\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))",
"def design_company(self):\n return self._design_company",
"def default_company():\n return Transaction().context.get('company')",
"def __getCompanyName(parsed: BeautifulSoup) -> str:\n\n # Company name container\n name_container = parsed.find('span', class_='companyName')\n\n # Extracting raw text elements\n name_raw_text = [s for s in name_container.children if isinstance(s, str)]\n\n # Getting name (first raw text instance)\n return name_raw_text[0].strip()",
"def company_name():\r\n\r\n cursor.execute('SELECT name from companies \\\r\n order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]",
"def get_company(self, cmp):\n if cmp in self.cnames:\n return self.cnames[cmp]\n else:\n return None",
"def account_name(self):\n return self.civic_no_city()",
"def name(self):\n return f\"{self._client.friendly_name} {CLIENT_SUFFIX}\"",
"def get_client_name(self, obj):\n\t\treturn obj.client.name",
"def get_company_id_label(self):\n return self.company_id_label",
"def contact_name(self) -> str:\n return pulumi.get(self, \"contact_name\")",
"def get_name():\n return \"Boss\"",
"def name(self):\n return f\"{self.client_name} {self._name}\""
] |
[
"0.76758903",
"0.75698215",
"0.7544758",
"0.7507912",
"0.7061919",
"0.6872663",
"0.6865251",
"0.676369",
"0.6688448",
"0.6614327",
"0.6614327",
"0.6613212",
"0.65558517",
"0.6515439",
"0.64176357",
"0.6416026",
"0.64144844",
"0.6400489",
"0.63944924",
"0.63944405",
"0.6386602",
"0.63510936",
"0.63451767",
"0.6300628",
"0.62378883",
"0.61926055",
"0.61495125",
"0.61387074",
"0.6132199",
"0.61195594"
] |
0.78047764
|
1
|
Azure resource ID of the policy definition that turns this assessment calculation on
|
def policy_definition_id(self) -> str:
return pulumi.get(self, "policy_definition_id")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def policy_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"policy_id\")",
"def policy_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"policy_id\")",
"def policyid(self):\n return self._policyid",
"def policy_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"policy_id\")",
"def security_policy_id(self) -> str:\n return pulumi.get(self, \"security_policy_id\")",
"def security_policy_id(self) -> str:\n return pulumi.get(self, \"security_policy_id\")",
"def policy_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"policy_id\")",
"def policy_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy_id\")",
"def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> Optional[str]:\n return pulumi.get(self, \"policy\")",
"def get_policy(self):\n\n return",
"def _get_schedule_policy_id(self):\n schedule_policies = SchedulePolicies(self._commcell_object)\n return schedule_policies.get(self.schedule_policy_name).schedule_policy_id",
"def policy_name(self):\n return self._policy_name",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def resourceid(self):",
"def policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy\")",
"def policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")"
] |
[
"0.68844306",
"0.687918",
"0.6872656",
"0.6835604",
"0.6645063",
"0.6645063",
"0.66422343",
"0.6623568",
"0.648301",
"0.648301",
"0.648301",
"0.6418869",
"0.6418869",
"0.63896173",
"0.62850535",
"0.62806606",
"0.62457865",
"0.62367773",
"0.62367773",
"0.62367773",
"0.6202891",
"0.6133614",
"0.6133614",
"0.6133614",
"0.61134374",
"0.61134374",
"0.61134374",
"0.61134374",
"0.61134374",
"0.61134374"
] |
0.753271
|
0
|
The severity level of the assessment
|
def severity(self) -> str:
return pulumi.get(self, "severity")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def severity(self):\n return self._severity",
"def severity(self):\n return self._severity",
"def severity(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"severity\")",
"def severity(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"severity\")",
"def severity(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"severity\")",
"def severity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"severity\")",
"def severity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"severity\")",
"def severity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"severity\")",
"def severity(self) -> Optional[pulumi.Input['TestIssueSeverity']]:\n return pulumi.get(self, \"severity\")",
"def get_severity(chid):\n return get_timevars(chid).get('severity', 0)",
"def getSeverity(self, *args):\n return _libsbml.SBMLExtension_getSeverity(self, *args)",
"def get_status_severity(self):\n return self.data[\"allMessagesForFrontend\"][\"maxSeverity\"]",
"def severity(self) -> pulumi.Input['EndpointSeverity']:\n return pulumi.get(self, \"severity\")",
"def getSeverity(self):\n return _libsbml.XMLError_getSeverity(self)",
"def normalise_severity(self, severity):\n return \"Info\" if severity == \"Unknown\" else severity",
"async def test_minimum_severity(self):\n self.set_source_parameter(\"severities\", [\"medium\", \"high\"])\n response = await self.collect(get_request_json_return_value=self.vulnerabilities_json)\n self.assert_measurement(response, value=\"0\", entities=[])",
"def severity(self, severity):\n self._severity = severity",
"def error_severity(self) -> Union[MqexsErrorSeverity, str]:\n return self.__error_severity",
"def getSeverityAsString(self):\n return _libsbml.XMLError_getSeverityAsString(self)",
"def minimum_severity_level(self) -> Optional[str]:\n return pulumi.get(self, \"minimum_severity_level\")",
"def getLevel( self ):\n level = self.getEffectiveLevel()\n if level == logging.CRITICAL:\n return 'critical'\n elif level == logging.ERROR:\n return 'error'\n elif level == logging.WARNING:\n return 'warning'\n elif level == logging.INFO:\n return 'info'\n elif level == logging.DEBUG:\n return 'debug'\n elif level == logging.NOTSET:\n return 'notset'\n else:\n return 'unknown ({})'.format( level )",
"def severity(self, severity):\n\n self._severity = severity",
"def level(self):\n return self.__level",
"def level(self):\n return self.__level",
"def _impact_to_severity(impact):\n if impact >= 0.9:\n return Finding.Severity.CRITICAL\n elif impact >= 0.7:\n return Finding.Severity.HIGH\n elif impact >= 0.4:\n return Finding.Severity.MEDIUM\n elif impact >= 0.01:\n return Finding.Severity.LOW\n else:\n return Finding.Severity.SEVERITY_UNSPECIFIED",
"def level(self):\n return self._level",
"def level(self):\n return self._level",
"def level(self):\n return self._level",
"def level(self):\n return self._level",
"def getSeverityOverride(self):\n return _libsbml.XMLErrorLog_getSeverityOverride(self)"
] |
[
"0.82292265",
"0.82292265",
"0.8184784",
"0.801551",
"0.7976034",
"0.77871335",
"0.77871335",
"0.77871335",
"0.76659864",
"0.71346414",
"0.7123284",
"0.7088337",
"0.7083264",
"0.6930347",
"0.6812457",
"0.678891",
"0.6676945",
"0.6643234",
"0.6637242",
"0.6548396",
"0.65459704",
"0.64409345",
"0.64374363",
"0.64374363",
"0.6427625",
"0.6417784",
"0.6417784",
"0.6417784",
"0.6417784",
"0.6397941"
] |
0.8408239
|
1
|
True if this assessment is in preview release status
|
def preview(self) -> Optional[bool]:
return pulumi.get(self, "preview")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_preview(self) -> Optional[bool]:\n return pulumi.get(self, \"is_preview\")",
"def is_previewable(self, **parameters):\n if not hasattr(self, '_is_previewable'):\n self._is_previewable = True\n return self._is_previewable",
"def is_on(self) -> bool:\n current = self.coordinator.data.info.version\n beta = self.coordinator.data.info.version_latest_beta\n stable = self.coordinator.data.info.version_latest_stable\n\n return current is not None and (\n (stable is not None and stable > current)\n or (\n beta is not None\n and (current.alpha or current.beta or current.release_candidate)\n and beta > current\n )\n )",
"def is_stable(self) -> bool:\n return not self.is_prerelease",
"def test_is_preview(self):\r\n self.assertTrue(self.unit.q(css=\".discussion-preview\").present)\r\n self.assertFalse(self.unit.q(css=\".discussion-show\").present)",
"def is_viewed(self):\n return self.has_label(VIEWED_LABEL)",
"def is_reviewed(self, obj) -> bool: # pylint:disable=R0201\n return obj.profile.is_reviewed",
"def in_studio_preview(self):\n # When we're running in Studio Preview mode, the XBlock won't provide us with a user ID.\n # (Note that `self.xmodule_runtime` will still provide an anonymous\n # student ID, so we can't rely on that)\n return self.scope_ids.user_id is None",
"def is_up(self):\n data = self.vxprint()\n return self.name in data and data[self.name].STATE == \"ACTIVE\"",
"def is_release(self):\n # version string: N.N.N.N is for release.\n return bool(re.match(r'^[\\d.]+$', self.version))",
"def is_devel(self):\r\n\r\n return self.is_debug()",
"def is_developer(self):\n return int(self.developer_status) == 2",
"def is_page_break_preview(self):\n return self.container['is_page_break_preview']",
"def is_vintage(self):\n return self.get_age() >= VINTAGE_AGE",
"def is_vintage(self):\n return self.get_age() >= VINTAGE_AGE",
"def is_visible(self):\n return self.visible_date < timezone.now()",
"def is_versioning_enabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_versioning_enabled\")",
"def get_visible(\n self,\n *,\n context: Context,\n ) -> bool:\n from reviewboard.urls import diffviewer_url_names\n match = context['request'].resolver_match\n\n if match.url_name in diffviewer_url_names:\n return match.url_name != 'view-interdiff'\n\n return super().get_visible(context=context)",
"def in_travis_pr():\n # NOTE: We're a little extra cautious and make sure that the\n # PR environment variable is an integer.\n try:\n int(os.getenv(TRAVIS_PR_ENV, ''))\n return True\n except ValueError:\n return False",
"def publication_status(self):\n content = self.context.get_silva_object()\n status = None\n unapproved_version = content.get_unapproved_version()\n approved_version = content.get_approved_version()\n public_version = content.get_public_version()\n previous_versions = content.get_previous_versions()\n if unapproved_version and unapproved_version == self.context.id:\n status = \"unapproved\"\n elif approved_version and approved_version == self.context.id:\n status = \"approved\"\n elif public_version and public_version == self.context.id:\n status = \"public\"\n else:\n if previous_versions and previous_versions[-1] == self.context.id:\n status = \"last_closed\"\n elif self.context.id in previous_versions:\n status = \"closed\"\n else:\n # this is a completely new version not even registered\n # with the machinery yet\n status = 'unapproved'\n return status",
"def is_featured(self):\n return self._is_featured",
"def is_release():\n return VERSION[-1]",
"def is_vintage(self):\n return self.get_age() >= Guitar.VINTAGE_THRESHOLD",
"def allowed_to_preview(user):\n if (\n user.is_authenticated and\n user.is_active and\n user.is_staff\n ):\n return True\n return False",
"def is_approved(self) -> bool:\n return self.state == Order.OrderState.APPROVED.choice_value",
"def is_previous_release_present(self, application_id):\n releases = self.get_releases_by_application(application_id)\n\n if releases.items and releases.total_count > 0:\n return True\n else:\n return False",
"def is_versioning_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_versioning_enabled\")",
"def is_published(self) -> bool:\n return self.published and self.published <= timezone.now()",
"def _has_valid_visa(self):\n if self.visa is not None:\n return self.visa_is_valid()\n else:\n return False",
"def is_on(self) -> bool:\n return self.entity_description.state_fn(self._valve)"
] |
[
"0.7212936",
"0.6533736",
"0.6367383",
"0.63376224",
"0.6307647",
"0.6133312",
"0.5996001",
"0.5985087",
"0.59606266",
"0.59270465",
"0.5891324",
"0.58804035",
"0.5822516",
"0.582101",
"0.582101",
"0.58103555",
"0.57750773",
"0.57650584",
"0.5734912",
"0.57282776",
"0.57165176",
"0.5690312",
"0.5688889",
"0.5685486",
"0.5670782",
"0.56516427",
"0.5622834",
"0.5613782",
"0.5600572",
"0.5581321"
] |
0.71290624
|
1
|
The user impact of the assessment
|
def user_impact(self) -> Optional[str]:
return pulumi.get(self, "user_impact")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def user_impact(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_impact\")",
"def user_impact(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_impact\")",
"def user_impact(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_impact\")",
"def get_expertise(self, user):\r\n if user.is_anonymous():\r\n return OI_SCORE_ANONYMOUS\r\n \r\n # Non expert and anonymous have the same score\r\n expertise = OI_SCORE_ANONYMOUS\r\n if self.is_expert(user):\r\n expertise = self.expert_set.filter(user=user)[0].score\r\n # Gets expertise from the whole branch, ie including parent\r\n if self.parent:\r\n expertise += self.parent.get_expertise(user)*OI_SCORE_FRACTION_FROM_PARENT\r\n return expertise",
"def get_statistic_for_user(self, attr):\n all_payments = Payment.objects.payments(user=self).exclude(project__isnull=True)\n user_impact = 0\n for payment in all_payments:\n project = payment.project\n if project:\n user_financial_contribution = payment.amount\n project_funding_total = (int)(project.funding_goal)\n project_impact = getattr(project.statistics, attr)\n user_impact_for_project = project_impact * user_financial_contribution * 1.0 / project_funding_total\n user_impact += user_impact_for_project\n return user_impact",
"def test_impact_for_exp_with_no_answers(self):\n # Sign up a user and have them create an exploration.\n user_a_id = self._sign_up_user(\n self.USER_A_EMAIL, self.USER_A_USERNAME)\n exploration = self._create_exploration(self.EXP_ID_3, user_a_id)\n self._rate_exploration(exploration.id, 5, 3)\n self._run_computation()\n user_stats_model = user_models.UserStatsModel.get(user_a_id)\n self.assertEqual(user_stats_model.impact_score, 0)",
"def measure(self, recommender):\n diff = recommender.predicted_scores.value - recommender.users.actual_user_scores.value\n self.observe((diff ** 2).mean(), copy=False)",
"def measure(self, recommender):\n diff = recommender.predicted_scores.value - recommender.users.actual_user_scores.value\n self.observe((diff ** 2).mean() ** 0.5, copy=False)",
"def sensitivity(self):\n return self.recall",
"def offense(self):\n #return self.stats.strength + self.stats.level\n return self.stats.offense",
"def averaged_risk(self):\n return self._averaged_risk",
"def averaged_risk(self):\n return self._averaged_risk",
"def user_interaction_score(uv, recommended_News, ranked=True):\n\n iv = recommended_News[\"topical_vector\"]\n\n product = simple_doct_product(uv, iv)\n\n epsilon = 10e-5\n\n if (product + epsilon) > 1.0:\n vui = 0.99\n else:\n vui = beta_distribution(product)\n\n # Awared preference\n ita = beta_distribution(0.98)\n pui = vui * ita\n\n return pui",
"def upgrad_one_contribute_score(user_total_click_num):\n return 1/math.log(1 + user_total_click_num)",
"def get_final_score(self, user):\n\n iq_subject_score = self.get_overall_score(user=user)\n\n try:\n speech_score = UserQuizMark.objects.filter(user=user, quiz=self.get_object('speech_training')).latest(\n 'timestamp').marks\n drawing_score = UserQuizMark.objects.filter(user=user, quiz=self.get_object('drawing')).latet(\n 'timestamp').marks\n except UserQuizMark.DoesNotExist:\n raise Http404\n\n avg_speech_drawing_score = speech_score + drawing_score\n\n return (iq_subject_score + avg_speech_drawing_score) / 2",
"def mutual_info_score(self):\n _, _, I_CK = self._entropies()\n return I_CK / self.grand_total",
"def compute_uct(self):\n if self.visits != 0:\n return - self.reward / self.visits + self.C * math.sqrt(math.log(self.parent.visits) / self.visits)\n else:\n return float('inf')",
"def elution_score(self):\n return self.score",
"def getDefense(self):\n\t\treturn(self.maneuverability * self.protection)",
"def defense(self):\n #return self.stats.dexterity + (self.stats.reiatsu * self.stats.density)\n return self.stats.defense",
"def effectiveness(self):\n self._effectiveness = 0.20 * self.ANA + 0.20 * self.DAM + 0.20 * self.MOA + 0.20 * self.MFA + 0.20 * self.NOP\n return round(self._effectiveness, 5)",
"def em_var(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical variance.')\n return (self.__sum_of_square_reward -\n self.__total_rewards**2 / self.__total_pulls) / self.__total_pulls",
"def get_experience(self):\n return sum([i.get_experience for i in self.__units])",
"def test_impact_for_exp_with_no_ratings(self):\n # Sign up a user and have them create an exploration.\n user_a_id = self._sign_up_user(\n self.USER_A_EMAIL, self.USER_A_USERNAME)\n self._create_exploration(self.EXP_ID_1, user_a_id)\n user_stats_model = user_models.UserStatsModel.get(\n user_a_id, strict=False)\n self.assertEqual(user_stats_model, None)",
"def GetCostIncurred(self):\r\n return self.costsIncurred",
"def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100",
"def life_insurance_to_recive_total(self):\n pass",
"def do_damage(self) -> float:\n sum = 0\n for operator in self.__operators:\n if operator.is_alive:\n operator.experience += 1\n sum += operator.experience / 100\n return 0.1 + sum",
"def calc_score(self, user_id, item_id): \n p = np.dot(self.U[user_id], self.V[item_id])\n if self.trunc_score_rule==None:pass\n else: p=self.trunc_score_rule(p)\n \n return p",
"def do_damage(self) -> float:\n res = 0.05 + self.experience / 100\n self.experience = self.experience + 1\n return res"
] |
[
"0.7757892",
"0.7385608",
"0.7385608",
"0.705583",
"0.6925266",
"0.64094913",
"0.6353756",
"0.63442504",
"0.623233",
"0.61492014",
"0.6068278",
"0.6068278",
"0.60317314",
"0.6017876",
"0.59702986",
"0.5966446",
"0.5962166",
"0.5954543",
"0.5910639",
"0.59077024",
"0.58973444",
"0.5838562",
"0.5788258",
"0.57443476",
"0.5688676",
"0.567309",
"0.56652904",
"0.56537163",
"0.56459314",
"0.561943"
] |
0.7439828
|
1
|
Name of the company of the partner
|
def partner_name(self) -> str:
return pulumi.get(self, "partner_name")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def company_name(self):\n if \"companyName\" in self._prop_dict:\n return self._prop_dict[\"companyName\"]\n else:\n return None",
"def _get_name(self):\n partner = self\n name = partner.name or ''\n\n if partner.company_name or partner.parent_id:\n if not name and partner.type in ['invoice', 'delivery', 'other']:\n name = dict(self.fields_get(['type'])['type']['selection'])[partner.type]\n #if not partner.is_company:\n # name = \"%s, %s\" % (partner.commercial_company_name or partner.parent_id.name, name)\n if self._context.get('show_address_only'):\n name = partner._display_address(without_company=True)\n if self._context.get('show_address'):\n name = name + \"\\n\" + partner._display_address(without_company=True)\n name = name.replace('\\n\\n', '\\n')\n name = name.replace('\\n\\n', '\\n')\n if self._context.get('address_inline'):\n name = name.replace('\\n', ', ')\n if self._context.get('show_email') and partner.email:\n name = \"%s <%s>\" % (name, partner.email)\n if self._context.get('html_format'):\n name = name.replace('\\n', '<br/>')\n if self._context.get('show_vat') and partner.vat:\n name = \"%s ‒ %s\" % (name, partner.vat)\n return 'HOLA'",
"def company_name(self) -> Optional[str]:\n return pulumi.get(self, \"company_name\")",
"def get_company_name(self):\n\t\treturn call_sdk_function('PrlLic_GetCompanyName', self.handle)",
"def get_company(self, name):\n return self.website.company.id",
"def get_companyName(self, obj):\n groupCompanies = get_objects_for_group(\n obj, \"view_company\", klass=models.Company)\n return [x.name for x in groupCompanies]",
"def get_company(self, name):\n return self.instance.company.id",
"def get_company(self, name):\n return self.store.company.id",
"def get_company_domain(self) -> str:\n lower_comp_name = self.company_name.lower()\n domain_prefix = re.sub(\"[^0-9a-zA-Z]+\", \"\", lower_comp_name)\n return domain_prefix + self.top_level_domain",
"def company(self):\n return self._company",
"def company(self):\n return self._company",
"def _cname(self,account_id):\n company = self.pool.get('account.account').browse(self.cr, self.uid, account_id).company_id\n self.caddress = self._cadd(company)\n return company.name",
"def company(self):\n\n x = 0\n my_company = self.data[\"Company Name\"]\n my_account = self.data[\"Account\"]\n result = []\n for i in my_company:\n my_string = i + \" -- \" + my_account[x]\n x += 1\n result.append(my_string)\n\n return result",
"def guess_company_name(self, response):\n # TODO here guess the name of the company\n # if og:title or title or smth else\n # if domain in the title then its the name\n # if not\n # take domain\n\n parts = urllib.parse.urlparse(response.url)\n name_parts = parts.netloc.split(\".\")\n if len(name_parts) > 2:\n name = name_parts[1]\n else:\n name = name_parts[0]\n\n site_name = response.xpath('//*/meta[@property=\"description\"]/@content').extract_first()\n if site_name:\n return site_name\n else:\n return name.title()",
"def account_name(self):\n\n name1 = self.business_trade_name\n name2 = self.business_name\n\n if not name1 and not name2:\n return 'NAME MISSING - ' + self.license_number\n elif name1 and not name2:\n return name1\n elif name2 and not name1:\n return name2\n else:\n return name1 + ' (' + name2 + ')'",
"def __str__(self):\n return str('%s (%s)' % (self.company, self.owner))",
"def marketing_name(self):\n return \"Custom solution - 2\"",
"def Company_Name(intent, session):\n\n global session_attributes\n session_attributes = {}\n session_attributes['previous_node'] = 'Company_Name'\n\n companyName = intent['slots']['companyName']['value']\n\n card_title = intent['name']\n speech_output = \"I can help you with the \"+companyName+\" four o one K plan. So that I may authenticate you, can you provide your employee number?\"\n\n\n reprompt_text = \"I can help you with the \"+companyName+\" four o one K plan. So that I may authenticate you, can you provide your employee number?\"\n\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))",
"def design_company(self):\n return self._design_company",
"def default_company():\n return Transaction().context.get('company')",
"def __getCompanyName(parsed: BeautifulSoup) -> str:\n\n # Company name container\n name_container = parsed.find('span', class_='companyName')\n\n # Extracting raw text elements\n name_raw_text = [s for s in name_container.children if isinstance(s, str)]\n\n # Getting name (first raw text instance)\n return name_raw_text[0].strip()",
"def company_name():\r\n\r\n cursor.execute('SELECT name from companies \\\r\n order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]",
"def get_company(self, cmp):\n if cmp in self.cnames:\n return self.cnames[cmp]\n else:\n return None",
"def account_name(self):\n return self.civic_no_city()",
"def name(self):\n return f\"{self._client.friendly_name} {CLIENT_SUFFIX}\"",
"def get_client_name(self, obj):\n\t\treturn obj.client.name",
"def get_company_id_label(self):\n return self.company_id_label",
"def contact_name(self) -> str:\n return pulumi.get(self, \"contact_name\")",
"def get_name():\n return \"Boss\"",
"def name(self):\n return f\"{self.client_name} {self._name}\""
] |
[
"0.76758903",
"0.75698215",
"0.7544758",
"0.7507912",
"0.7061919",
"0.6872663",
"0.6865251",
"0.676369",
"0.6688448",
"0.6614327",
"0.6614327",
"0.6613212",
"0.65558517",
"0.6515439",
"0.64176357",
"0.6416026",
"0.64144844",
"0.6400489",
"0.63944924",
"0.63944405",
"0.6386602",
"0.63510936",
"0.63451767",
"0.6300628",
"0.62378883",
"0.61926055",
"0.61495125",
"0.61387074",
"0.6132199",
"0.61195594"
] |
0.78047764
|
0
|
transforms a list of points from ego_vehicle/lidar/lidar1/ frame to map frame using tf2 and the current transformation
|
def transform_lidar_into_map_coords(self, points):
poses = []
for p in points:
pose = PoseStamped()
pose.header.frame_id = "ego_vehicle/lidar/lidar1"
pose.header.stamp = rospy.Time.now()
pose.pose.position.x = p[0]
pose.pose.position.y = p[1]
pose.pose.position.z = p[2]
poses.append(pose)
try:
trans = self.tf_buffer.lookup_transform('map', 'ego_vehicle/lidar/lidar1', rospy.Time())
return [tf2_geometry_msgs.do_transform_pose(p, trans) for p in poses]
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.loginfo("Error in transformation")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def transform_obj_list(object_list,M,M2 = None):\n \n for i, obj in enumerate(object_list):\n points = obj.all\n num_points = len(points)\n \n \n # add third row\n ones = np.ones([num_points,1])\n points3d = np.concatenate((points,ones),1)\n points3d = points3d[:,[0,1,4]]\n # transform points\n tf_points3d = np.transpose(np.matmul(M,np.transpose(points3d)))\n \n # condense to two-dimensional coordinates\n tf_points = np.zeros([num_points,2])\n tf_points[:,0] = tf_points3d[:,0]/tf_points3d[:,2]\n tf_points[:,1] = tf_points3d[:,1]/tf_points3d[:,2]\n \n object_list[i].all_world = tf_points\n \n if M2 is not None:\n tf_points3d = np.transpose(np.matmul(M2,np.transpose(points3d)))\n \n # condense to two-dimensional coordinates\n tf_points = np.zeros([num_points,2])\n tf_points[:,0] = tf_points3d[:,0]/tf_points3d[:,2]\n tf_points[:,1] = tf_points3d[:,1]/tf_points3d[:,2]\n \n object_list[i].all_gps = tf_points\n \n return object_list",
"def TransformFromPointsTF(left_points, right_points):\n\n lefts = UncenterPoints(left_points) # 3xN\n rights = UncenterPoints(right_points)\n # lefts = left_points\n # rights = right_points\n\n ## Compute scale\n left_norm_square = tf.reduce_sum(tf.square(tf.norm(lefts, axis=0)))\n right_norm_square = tf.reduce_sum(tf.square(tf.norm(rights, axis=0)))\n scale = tf.sqrt(right_norm_square / (left_norm_square+1e-6))\n\n ## Compute rotation\n #rights = tf.Print(rights, [rights], message='rights', summarize=2 * 68)\n M = tf.matmul(lefts, rights, transpose_b=True) # 3x3\n #M = tf.Print(M, [M.shape, M], message=\"M\", summarize=64)\n\n N00 = M[0, 0] + M[1, 1] + M[2, 2]\n N11 = M[0, 0] - M[1, 1] - M[2, 2]\n N22 = -M[0, 0] + M[1, 1] - M[2, 2]\n N33 = -M[0, 0] - M[1, 1] + M[2, 2]\n\n N01 = M[1, 2] - M[2, 1]\n N10 = M[1, 2] - M[2, 1]\n N02 = M[2, 0] - M[0, 2]\n N20 = M[2, 0] - M[0, 2]\n\n N03 = M[0, 1] - M[1, 0]\n N30 = M[0, 1] - M[1, 0]\n N12 = M[0, 1] + M[1, 0]\n N21 = M[0, 1] + M[1, 0]\n\n N13 = M[0, 2] + M[2, 0]\n N31 = M[0, 2] + M[2, 0]\n N23 = M[1, 2] + M[2, 1]\n N32 = M[1, 2] + M[2, 1]\n N = tf.stack([N00,N01,N02,N03,N10,N11,N12,N13,N20,N21,N22,N23,N30,N31,N32,N33], axis=0)\n N = tf.reshape(N, [4,4])\n\n #N = tf.Print(N, [N.shape, N], message=\"N\", summarize=64)\n\n eigen_vals, eigen_vecs = tf.self_adjoint_eig(N)\n quaternion = tf.squeeze((tf.slice(eigen_vecs, [0, 3], [4, 1]))) # 4\n #quaternion = tf_render.Print(quaternion, [quaternion], message='quaternion', summarize=4)\n rotation = Quaternion2Mat(quaternion) # 3x3\n\n ## Compute translation\n left_center = CenterOfPoints(left_points)\n right_center = CenterOfPoints(right_points)\n rot_left_center = tf.squeeze(tf.matmul(rotation, tf.expand_dims(left_center, axis=-1))) # 3\n translation = right_center - scale * rot_left_center\n\n return scale, rotation, translation",
"def publish_tf(self,pose, stamp=None):\n if stamp == None:\n stamp = rospy.Time.now()\n\n # this may cause issues with the TF tree. If so, see the below code.\n # self.pub_tf.sendTransform((pose[0],pose[1],0),tf.transformations.quaternion_from_euler(0, 0, pose[2]), \n # stamp , \"/laser\", \"/map\")\n\n # also publish odometry to facilitate getting the localization pose\n if self.PUBLISH_ODOM:\n odom = Odometry()\n odom.header = Utils.make_header(\"/map\", stamp)\n odom.pose.pose.position.x = pose[0]\n odom.pose.pose.position.y = pose[1]\n odom.pose.pose.orientation = Utils.angle_to_quaternion(pose[2])\n self.odom_pub.publish(odom)\n \n # return # below this line is disabled\n\n \"\"\"\n Our particle filter provides estimates for the \"laser\" frame\n since that is where our laser range estimates are measure from. Thus,\n We want to publish a \"map\" -> \"laser\" transform.\n\n However, the car's position is measured with respect to the \"base_link\"\n frame (it is the root of the TF tree). Thus, we should actually define\n a \"map\" -> \"base_link\" transform as to not break the TF tree.\n \"\"\"\n\n # Get map -> laser transform.\n map_laser_pos = np.array( (pose[0],pose[1],0) )\n map_laser_rotation = np.array( tf.transformations.quaternion_from_euler(0, 0, pose[2]) )\n # Apply laser -> base_link transform to map -> laser transform\n # This gives a map -> base_link transform\n laser_base_link_offset = (0.265, 0, 0)\n map_laser_pos -= np.dot(tf.transformations.quaternion_matrix(tf.transformations.unit_vector(map_laser_rotation))[:3,:3], laser_base_link_offset).T\n \n map_laser_pos[0] -= self.position[0]\n map_laser_pos[1] -= self.position[1]\n\n orientation_list = [self.odom_orientation.x,self.odom_orientation.y,self.odom_orientation.z,self.odom_orientation.w]\n (roll,pitch,odom_base_link_yaw) = tf.transformations.euler_from_quaternion(orientation_list)\n map_odom_yaw = pose[2] - odom_base_link_yaw\n\n map_odom_rotation = np.array(tf.transformations.quaternion_from_euler(0,0,map_odom_yaw))\n\n # Publish transform\n self.pub_tf.sendTransform(map_laser_pos, map_odom_rotation, stamp , \"/odom\", \"/map\")",
"def CoordTrans(frame1, frame2, original_vec, oe=np.zeros(6), \n theta_gst=float('NaN'), lla_gs=np.zeros(3), mu=c.mu_earth, \n r_body=c.r_earth):\n\n # Orbital Elements\n a, e, inc, raan, w, nu = oe\n\n # Warnings\n oe_frames = ['ric', 'ntw', 'pqw']\n if any(frame in oe_frames for frame in (frame1, frame2)):\n if oe.dot(oe) == 0:\n print('ERROR: You forgot to define the orbital elements!')\n\n topocentric_frames = ['sez']\n if any(frame in topocentric_frames for frame in (frame1, frame2)):\n if lla_gs.dot(lla_gs) == 0:\n print('ERROR: You forgot lla for the ground stations!')\n\n # Coordinate System Logic\n if frame1.lower() == 'bci':\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(original_vec, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(original_vec, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(original_vec, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(original_vec, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec1 = bci2bcbf(original_vec, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec1, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec1 = bci2bcbf(original_vec, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec1, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'bcbf':\n if frame2.lower() == 'bci':\n rotated_vec = bcbf2bci(original_vec, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ntw':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'pqw':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'lla':\n rotated_vec = bcbf2lla(original_vec, r_body=r_body)\n \n elif frame2.lower() == 'sez':\n rotated_vec = bcbf2sez(original_vec, lla_gs, r_body=r_body)\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'ric':\n rotated_vec1 = ric2bci(original_vec, raan, inc, w, nu)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'ntw':\n rotated_vec1 = ntw2bci(original_vec, e, raan, inc, w, nu)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'pqw':\n rotated_vec1 = pqw2bci(original_vec, raan, inc, w)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'lla':\n rotated_vec1 = lla2bcbf(original_vec, r_body=r_body)\n if frame2.lower() == 'bcbf':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ric':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2ric(rotated_vec2, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ntw':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2ntw(rotated_vec2, e, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'pqw':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2pqw(rotated_vec2, raan, inc, w)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'bci':\n rotated_vec = bcbf2bci(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec = bcbf2sez(rotated_vec1, lla_gs, r_body=r_body)\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'sez':\n rotated_vec1 = sez2bcbf(original_vec, lla_gs, r_body=r_body)\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n if frame2.lower() == 'bcbf':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec2, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec2, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec2, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec = bcbf2lla(rotated_vec1, r_body=r_body)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec2\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n else:\n print('ERROR: Frame1 is not included in this function!')\n\n return rotated_vec",
"def traj_pipeline(self, prev_trmat=None):\n # image_seq = [image(frame_idx-2), image(frame_idx-1), image(frame_idx)]\n # egomotion update\n egomo = self.est.get_egomotion(self.image_seq)\n\n # egomotion transformation\n assert self.frame_idx >= 2, 'invalid self.frame_idx'\n if prev_trmat is None:\n assert self.frame_idx == 2, 'invalid self.frame_idx'\n # initialization of ego transformation matrix\n init_trmat = egomo_vec2mat(self.init_egomo_vec)\n prev_trmat = np.matmul(init_trmat, egomo_vec2mat(egomo[0])) # frame 0 to 1\n egomo_trmat = np.matmul(prev_trmat, egomo_vec2mat(egomo[1]))\n\n # tracker list update\n for t in self.t_list:\n # skip lost trackers\n if t.get_status()==False:\n continue\n # bounding box & depth\n bbox, depth = t.get_bbox(), t.get_depth()\n # project to 3d camera coordinate\n p3d_cam = cam_proj(self.k_mat, bbox, depth)\n # transform to world coordinate\n p3d = coord_transform(egomo_trmat, p3d_cam)\n t.add_attr_to_est_dict('traj', p3d)\n \n return egomo_trmat",
"def convert_vehicle(nyc3dcars_session, labeler_vehicle):\n\n photo, lat, lon, alt = nyc3dcars_session.query(\n Photo,\n func.ST_Y(Photo.lla),\n func.ST_X(Photo.lla),\n func.ST_Z(Photo.lla)) \\\n .filter_by(id=labeler_vehicle.revision.annotation.pid) \\\n .options(joinedload('dataset')) \\\n .one()\n left = labeler_vehicle.x1\n right = labeler_vehicle.x2\n top = labeler_vehicle.y1\n bottom = labeler_vehicle.y2\n\n camera_lla = numpy.array([[lat], [lon], [alt]])\n camera_enu = pygeo.LLAToENU(camera_lla.T).reshape((3, 3))\n dataset_correction = numpy.array([\n [photo.dataset.t1],\n [photo.dataset.t2],\n [photo.dataset.t3],\n ])\n camera_rotation = numpy.array([\n [photo.r11, photo.r12, photo.r13],\n [photo.r21, photo.r22, photo.r23],\n [photo.r31, photo.r32, photo.r33],\n ])\n\n camera_up = camera_enu.T.dot(\n camera_rotation.T.dot(numpy.array([[0], [1], [0]])))\n offset = numpy.array([[-labeler_vehicle.x], [-labeler_vehicle.z], [0]])\n camera_offset = camera_up * \\\n labeler_vehicle.revision.cameraheight / camera_up[2]\n total_offset = offset - camera_offset\n ecef_camera = pygeo.LLAToECEF(camera_lla.T).T\n ecef_camera += dataset_correction\n ecef_total_offset = camera_enu.dot(total_offset)\n vehicle_ecef = ecef_camera + ecef_total_offset\n\n vehicle_type = labeler_vehicle.type\n model = nyc3dcars_session.query(VehicleType) \\\n .filter_by(label=vehicle_type) \\\n .one()\n\n vehicle_lla = pygeo.ECEFToLLA(vehicle_ecef.T).T\n\n theta = math.radians(-labeler_vehicle.theta)\n mlength = model.length\n mwidth = model.width\n car_a = -math.sin(theta) * 0.3048 * \\\n mlength / 2 + math.cos(theta) * 0.3048 * mwidth / 2\n car_b = math.cos(theta) * 0.3048 * mlength / \\\n 2 + math.sin(theta) * 0.3048 * mwidth / 2\n car_c = math.sin(theta) * 0.3048 * mlength / \\\n 2 + math.cos(theta) * 0.3048 * mwidth / 2\n car_d = -math.cos(theta) * 0.3048 * \\\n mlength / 2 + math.sin(theta) * 0.3048 * mwidth / 2\n car_corner_offset1 = camera_enu.dot(numpy.array([[car_a], [car_b], [0]]))\n car_corner_offset2 = camera_enu.dot(numpy.array([[car_c], [car_d], [0]]))\n\n car_corner1 = pygeo.ECEFToLLA(\n (vehicle_ecef + car_corner_offset1).T).T.flatten()\n car_corner2 = pygeo.ECEFToLLA(\n (vehicle_ecef - car_corner_offset1).T).T.flatten()\n car_corner3 = pygeo.ECEFToLLA(\n (vehicle_ecef + car_corner_offset2).T).T.flatten()\n car_corner4 = pygeo.ECEFToLLA(\n (vehicle_ecef - car_corner_offset2).T).T.flatten()\n\n pg_corner1 = func.ST_SetSRID(\n func.ST_MakePoint(car_corner1[1], car_corner1[0], car_corner1[2]), 4326)\n pg_corner2 = func.ST_SetSRID(\n func.ST_MakePoint(car_corner2[1], car_corner2[0], car_corner2[2]), 4326)\n pg_corner3 = func.ST_SetSRID(\n func.ST_MakePoint(car_corner3[1], car_corner3[0], car_corner3[2]), 4326)\n pg_corner4 = func.ST_SetSRID(\n func.ST_MakePoint(car_corner4[1], car_corner4[0], car_corner4[2]), 4326)\n\n collection = func.ST_Collect(pg_corner1, pg_corner2)\n collection = func.ST_Collect(collection, pg_corner3)\n collection = func.ST_Collect(collection, pg_corner4)\n\n car_polygon = func.ST_ConvexHull(collection)\n\n camera_ecef = pygeo.LLAToECEF(camera_lla.T).T\n vehicle_ecef = pygeo.LLAToECEF(vehicle_lla.T).T\n\n diff = camera_ecef - vehicle_ecef\n\n normalized = diff / numpy.linalg.norm(diff)\n\n vehicle_enu = pygeo.LLAToENU(vehicle_lla.T).reshape((3, 3))\n\n rotated = vehicle_enu.T.dot(normalized)\n\n theta = func.acos(rotated[2][0])\n\n view_phi = func.atan2(rotated[1][0], rotated[0][0])\n\n vehicle_phi = math.radians(-labeler_vehicle.theta)\n\n phi = vehicle_phi - view_phi\n\n out = nyc3dcars_session.query(\n theta.label('theta'),\n phi.label('phi')) \\\n .one()\n out.phi = ((out.phi + math.pi) % (2 * math.pi)) - math.pi\n out.theta = ((out.theta + math.pi) % (2 * math.pi)) - math.pi\n view_phi = out.phi\n view_theta = out.theta\n\n left = labeler_vehicle.x1\n right = labeler_vehicle.x2\n top = labeler_vehicle.y1\n bottom = labeler_vehicle.y2\n\n for bbox_session in labeler_vehicle.bbox_sessions:\n if not bbox_session.user.trust:\n continue\n\n print((\n bbox_session.user.username,\n labeler_vehicle.revision.annotation.pid\n ))\n left = bbox_session.x1\n right = bbox_session.x2\n top = bbox_session.y1\n bottom = bbox_session.y2\n break\n\n occlusions = [\n occlusion.category for occlusion in labeler_vehicle.occlusionrankings\n if occlusion.occlusion_session.user.trust and occlusion.category != 5\n ]\n\n if len(occlusions) == 0:\n return\n\n pg_lla = func.ST_SetSRID(\n func.ST_MakePoint(vehicle_lla[1][0], vehicle_lla[0][0], vehicle_lla[2][0]), 4326)\n\n nyc3dcars_vehicle = Vehicle(\n id=labeler_vehicle.id,\n pid=photo.id,\n x=labeler_vehicle.x,\n z=labeler_vehicle.z,\n theta=labeler_vehicle.theta,\n x1=left,\n x2=right,\n y1=top,\n y2=bottom,\n type_id=model.id,\n occlusion=min(occlusions),\n geom=car_polygon,\n lla=pg_lla,\n view_theta=view_theta,\n view_phi=view_phi,\n cropped=labeler_vehicle.cropped,\n )\n nyc3dcars_session.add(nyc3dcars_vehicle)",
"def _tag_pose_callback(self):\n for msg in self.pose_msgs:\n\n detections = msg.detections\n if (len(msg.detections)==0):\n continue\n\n exponential_coordinates = []\n translations = []\n for detection in detections:\n self._T_tag2cam = get_T(detection.pose.pose.pose)\n self._marker_num = detection.id\n current_header = detection.pose.header\n inter_pose = self._world_map[self._marker_num, :]\n inter_pose = np.squeeze(inter_pose)\n\n self._T_tag2world = get_tag2world(inter_pose)\n self._T = np.dot(self._T_tag2world, np.linalg.inv(self._T_tag2cam))\n\n T = np.dot(tf.transformations.inverse_matrix(self.Previous_T), self._T)\n angle, direc, point = tf.transformations.rotation_from_matrix(T)\n translation = tf.transformations.translation_from_matrix(T)\n\n exponential_coordinate = direc*angle\n o = tf.transformations.translation_from_matrix(self._T)\n\n if o[2] < 0.697 and o[0] < -0.9 and o[0] > -4 and o[1] < -0.8 and o[1] > -4:\n if self.Previous_time != None:\n time_interval = detection.pose.header.stamp.to_sec() - self.Previous_time\n angular_velocity = angle / time_interval\n translational_velocity = np.linalg.norm(translation) / time_interval\n\n if (np.abs(angular_velocity) < 0.9) and (translational_velocity < 3):\n exponential_coordinates.append(exponential_coordinate)\n translations.append(translation)\n else:\n exponential_coordinates.append(exponential_coordinate)\n translations.append(translation)\n\n if len(exponential_coordinates):\n exponential_coordinates = np.array(exponential_coordinates)\n exponential_coordinates = np.mean(exponential_coordinates, axis=0)\n\n translations = np.array(translations)\n translations = np.mean(translations, axis=0)\n\n angle = np.linalg.norm(exponential_coordinates)\n direc = exponential_coordinate / angle\n\n T = tf.transformations.rotation_matrix(angle, direc)\n T[:3, 3] = translations\n self._T = np.dot(self.Previous_T, T)\n\n q = tf.transformations.quaternion_from_matrix(self._T)\n o = tf.transformations.translation_from_matrix(self._T)\n if q[0] < 0:\n q = -q;\n\n self.poses.append(np.concatenate([q, o]))\n self.pose_times.append(msg.header.stamp.to_sec())\n\n self.Previous_T = self._T\n self.Previous_time = msg.header.stamp.to_sec()\n\n self.poses = np.array(self.poses)\n self.pose_times = np.array(self.pose_times)",
"def img_map_transforms(ts):\n # XXX TODO: unchecked textures give error of variable referenced before assignment XXX\n # POV-Ray \"scale\" is not a number of repetitions factor, but ,its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # scale is 0.5,0.5 in blender and 0,0 in POV\n # Strange that the translation factor for scale is not the same as for\n # translate.\n # TODO: verify both matches with other blender renderers / internal in previous versions.\n image_map_transforms = \"\"\n image_map_transforms = \"scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % (\n ts.scale[0],\n ts.scale[1],\n ts.scale[2],\n ts.offset[0],\n ts.offset[1],\n ts.offset[2],\n )\n # image_map_transforms = (\" translate <-0.5,-0.5,0.0> scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % \\\n # ( 1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # (0.5 / ts.scale.x) + ts.offset.x,\n # (0.5 / ts.scale.y) + ts.offset.y,\n # ts.offset.z))\n # image_map_transforms = (\n # \"translate <-0.5,-0.5,0> \"\n # \"scale <-1,-1,1> * <%.4g,%.4g,%.4g> \"\n # \"translate <0.5,0.5,0> + <%.4g,%.4g,%.4g>\" % \\\n # (1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # ts.offset.x,\n # ts.offset.y,\n # ts.offset.z)\n # )\n return image_map_transforms",
"def map(inputs, e0,e1,k):\r\n codebook = tf.cast(inputs[0][0:2 ** k], tf.float32)\r\n soft_map = tf.TensorArray(tf.float32, size=0, dynamic_size=True)\r\n for y in inputs[1]:\r\n Pxy_map = pyx(y, codebook, e0, e1)\r\n soft_map = soft_map.write(soft_map.size(), Pxy_map)\r\n\r\n soft_map = soft_map.stack()\r\n return soft_map",
"def tf_map(stacked_points, stacked_evecs, stacked_evecs_trans,\n stacked_evals, stacked_evecs_full, obj_inds, stack_lengths):\n\n # Get batch indice for each point\n batch_inds = self.tf_get_batch_inds(stack_lengths)\n\n # Augment input points\n stacked_points, scales, rots = self.tf_augment_input(stacked_points,\n batch_inds,\n config)\n\n # First add a column of 1 as feature for the network to be able to learn 3D shapes\n stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)\n\n # Then use positions or not\n if config.in_features_dim == 1:\n pass\n elif config.in_features_dim == 3:\n stacked_features = tf.concat((stacked_features, stacked_points), axis=1)\n else:\n raise ValueError('Only accepted input dimensions are 1, 3 (with or without XYZ)')\n\n # Get the whole input list\n input_list = self.tf_shape_matching_inputs(config,\n stacked_points,\n stacked_features,\n stack_lengths,\n batch_inds)\n\n # Add scale and rotation for testing\n input_list += [scales, rots, obj_inds]\n input_list += [stack_lengths] # in order further on to multiply element-wise in the stack\n input_list += [stacked_evecs, stacked_evecs_trans, stacked_evals]\n input_list += [stacked_evecs_full]\n\n return input_list",
"def transform_sequence(\n reloc_poses: Sequence[np.ndarray],\n gt_poses: Sequence[np.ndarray],\n reference_pose: np.ndarray,\n) -> List[np.ndarray]:\n assert len(reloc_poses) == len(\n gt_poses\n ), \"Reloc and GT pose lists must have the same length.\"\n transformed_sequence = []\n\n # reloc_poses are cam_t -> reloc_origin (wold)\n # gt_poses are cam_t -> current_origin (w)\n # reference_pose is cam_end -> current_origin (w)\n # we want to transform all reloc_poses into cam_end -> reloc_origin (wold)\n e_T_w = np.linalg.inv(reference_pose)\n for reloc_T_t, w_T_t in zip(reloc_poses, gt_poses):\n e_T_t = e_T_w @ w_T_t\n t_T_e = np.linalg.inv(e_T_t)\n reloc_T_e = reloc_T_t @ t_T_e\n\n transformed_sequence.append(reloc_T_e)\n\n return transformed_sequence",
"def fix_map_to_odom_transform(self, robot_pose, timestamp):\n (translation, rotation) = \\\n self.convert_pose_inverse_transform(robot_pose)\n p = PoseStamped(\n pose=self.convert_translation_rotation_to_pose(translation,\n rotation),\n header=Header(stamp=timestamp, frame_id='base_link'))\n self.tf_listener.waitForTransform('base_link',\n 'odom',\n timestamp,\n rospy.Duration(2.0)) # Extended duration due to tf timeout error\n self.odom_to_map = self.tf_listener.transformPose('odom', p)\n (self.translation, self.rotation) = \\\n self.convert_pose_inverse_transform(self.odom_to_map.pose)",
"def transform_points(Points,R,t):\r\n return [transform_point(p,R,t) for p in Points]",
"def make_features(targs_pb, pf):\n camera, to_uvd, to_world, keys_uvd, _, visible, _ = utils.get_contents_pb(\n targs_pb.kp_target)\n num_kp = len(keys_uvd)\n # Restrict to max projection targets\n proj_targs = [\n utils.get_contents_pb(targ_pb) for targ_pb in targs_pb.proj_targets\n ][:utils.MAX_TARGET_FRAMES]\n targets_keys_uvd = []\n targets_to_uvd = []\n for proj_targ in proj_targs:\n _, to_uvd, _, keys_uvd, _, _, _ = proj_targ\n targets_keys_uvd.append(keys_uvd)\n targets_to_uvd.append(to_uvd)\n # Add dummy targets if necessary.\n num_targets = len(proj_targs)\n for _ in range(utils.MAX_TARGET_FRAMES - num_targets):\n targets_keys_uvd.append(utils.dummy_keys_uvd(num_kp))\n targets_to_uvd.append(utils.dummy_to_uvd())\n\n def feat_int(num):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[num]))\n\n def feat_floats(floats):\n return tf.train.Feature(float_list=tf.train.FloatList(value=floats))\n\n feats = {\n 'to_world_' + pf:\n feat_floats(to_world.flatten()),\n 'to_uvd_' + pf:\n feat_floats(to_uvd.flatten()),\n 'camera_' + pf:\n feat_floats(utils.cam_pb_to_array(camera)),\n 'keys_uvd_' + pf:\n feat_floats(np.array(keys_uvd).flatten()),\n 'visible_' + pf:\n feat_floats(visible),\n 'num_kp_' + pf:\n feat_int(num_kp),\n 'num_targets_' + pf:\n feat_int(num_targets),\n 'targets_to_uvd_' + pf:\n feat_floats(np.array(targets_to_uvd).flatten()),\n 'targets_keys_uvd_' + pf:\n feat_floats(np.array(targets_keys_uvd).flatten()),\n 'mirrored':\n feat_int(int(targs_pb.mirrored)),\n }\n return feats",
"def generate(path, cam_orig, cam_new, fnames_left, fnames_right,\n fnames_target_left, fnames_target_right):\n with tf.io.TFRecordWriter(path) as tfrecord_writer:\n with tf.Graph().as_default():\n im0 = tf.compat.v1.placeholder(dtype=tf.uint8)\n im1 = tf.compat.v1.placeholder(dtype=tf.uint8)\n encoded0 = tf.image.encode_png(im0)\n encoded1 = tf.image.encode_png(im1)\n\n with tf.compat.v1.Session() as sess:\n for fleft, fright, ftleft, ftright in zip(fnames_left, fnames_right,\n fnames_target_left,\n fnames_target_right):\n assert (os.path.basename(\n os.path.splitext(fleft)[0]) == os.path.basename(\n os.path.splitext(ftleft)[0]))\n assert (os.path.basename(\n os.path.splitext(fright)[0]) == os.path.basename(\n os.path.splitext(ftright)[0]))\n print(fleft)\n image_left = utils.read_image(fleft)\n image_right = utils.read_image(fright)\n targs_left = utils.read_target_pb(ftleft)\n targs_right = utils.read_target_pb(ftright)\n image_left = utils.resize_image(image_left, cam_new, cam_orig,\n targs_left)\n image_right = utils.resize_image(image_right, cam_new, cam_orig,\n targs_right)\n st0, st1 = sess.run([encoded0, encoded1],\n feed_dict={\n im0: image_left,\n im1: image_right\n })\n feats = {'img_L': bytes_feature(st0), 'img_R': bytes_feature(st1)}\n feats.update(make_features(targs_left, 'L'))\n feats.update(make_features(targs_right, 'R'))\n example = tf.train.Example(features=tf.train.Features(feature=feats))\n tfrecord_writer.write(example.SerializeToString())",
"def get_goal_ee_pose(self):\n #self.target_endpoint = #magic tf call that I can add ie the pose of the palm from camera aruco detection\n while True:\n try:\n translation, rotation = self.listener.lookupTransform('world_frame', 'palm_frame_camera', rospy.Time()) # ee_frame_camera_flipped\n break # once the transform is obtained move on\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue # if it fails try again\n point = [translation[0], translation[1], translation[2]]\n self.target_endpoint = np.array(point)\n # rospy.logerr(self.target_endpoint)",
"def transform(self, R, t, scale = 1):\n\n # Build 4-by-4 projection matrix from args ----------------------------\n # This is what we are doing internally:\n # Proj = np.r_[ scale * np.c_[R, t], [[0, 0, 0, 1]] ]\n # InvProj = np.r_[ scale * np.c_[R.T, -np.dot(R.T, t)], [[0,0,0,scale]] ]\n Proj = tf_format.tf_format('4x4', R, t)\n Proj[:-1,:] *= scale\n InvProj = tf_format.tf_format('i4x4', R, t) * scale\n \n \n # Apply transformation to pts3D ---------------------------------------\n if self.pts3D is not None and self.pts3D.shape[1] > 0:\n # Use homogeneous coords\n pts3D = np.r_[self.pts3D, np.ones((1, self.pts3D.shape[1]))]\n pts3D = np.dot(Proj, pts3D)\n self.pts3D = pts3D[:3, :]\n\n # Apply transformation to cameras -------------------------------------\n # Camera poses are stored using camera-to-world transformations, we \n # need to invert the projection matrix for this to work --> \n # we use InvProj\n\n cposes = self.cam_poses\n for i in range(cposes.shape[1]):\n\n # Extract camera projection matrix\n p_cam = tf_format.tf_format('4x4', cposes[:, i])\n\n # Transform camera projection matrix\n new_p_cam = np.dot(p_cam, InvProj)\n \n # Make sure it's a true rotation!\n [u, s, vT] = np.linalg.svd(new_p_cam[:3,:3])\n cposes[:3, i] = tf_format.rodrigues( np.dot(u,vT) ).ravel()\n cposes[3:, i] = new_p_cam[:3, 3]\n\n self.cam_poses = cposes",
"def predict_poses(self, inputs, features):\n outputs = dict()\n # Compute the pose to each source frame via a separate forward pass through the pose network.\n # select what features the pose network takes as input\n if self.args.pose_model_type == \"shared\":\n pose_feats = {frame_id: features[frame_id] for frame_id in self.args.frame_idxs}\n else:\n pose_feats = {frame_id: inputs[(\"color_aug\", frame_id, 0)] for frame_id in self.args.frame_idxs}\n\n for frame_id in self.args.frame_idxs[1:]:\n # To maintain ordering we always pass frames in temporal order\n if frame_id == -1:\n pose_inputs = [pose_feats[frame_id], pose_feats[0]]\n else:\n pose_inputs = [pose_feats[0], pose_feats[frame_id]]\n\n if self.args.pose_model_type == \"separate\":\n pose_inputs = [self.models[\"pose_encoder\"](torch.cat(pose_inputs, 1))]\n\n axisangle, translation = self.models[\"pose\"](pose_inputs)\n\n # Normalize the translation vec and multiply by the displacement magnitude obtained from speed\n # of the vehicle to scale it to the real world translation\n translation_magnitude = translation[:, 0].squeeze(1).norm(p=\"fro\",\n dim=1).unsqueeze(1).unsqueeze(2)\n translation_norm = translation[:, 0] / translation_magnitude\n translation_norm *= inputs[(\"displacement_magnitude\", frame_id)].unsqueeze(1).unsqueeze(2)\n translation = translation_norm\n\n outputs[(\"axisangle\", 0, frame_id)] = axisangle\n outputs[(\"translation\", 0, frame_id)] = translation\n # Invert the matrix if the frame id is negative\n outputs[(\"cam_T_cam\", 0, frame_id)] = pose_vec2mat(axisangle[:, 0],\n translation,\n invert=(frame_id < 0),\n rotation_mode=self.args.rotation_mode)\n return outputs",
"def fix_map_to_odom_transform(self, msg):\n (translation, rotation) = convert_pose_inverse_transform(self.robot_pose)\n p = PoseStamped(pose=convert_translation_rotation_to_pose(translation, rotation),\n header=Header(stamp=msg.header.stamp, frame_id=self.base_frame))\n self.odom_to_map = self.tf_listener.transformPose(self.odom_frame, p)\n (self.translation, self.rotation) = convert_pose_inverse_transform(self.odom_to_map.pose)",
"def preprocess(args, id2info, mapping):\n polyline_spans = []\n keys = list(id2info.keys())\n assert 'AV' in keys\n assert 'AGENT' in keys\n keys.remove('AV')\n keys.remove('AGENT')\n keys = ['AGENT', 'AV'] + keys\n vectors = []\n two_seconds = mapping['two_seconds']\n mapping['trajs'] = []\n mapping['agents'] = []\n for id in keys:\n polyline = {}\n\n info = id2info[id]\n start = len(vectors)\n if args.no_agents:\n if id != 'AV' and id != 'AGENT':\n break\n\n agent = []\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n break\n agent.append((line[X], line[Y]))\n\n if args.visualize:\n traj = np.zeros([args.hidden_size])\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n traj = traj[:i * 2].copy()\n break\n traj[i * 2], traj[i * 2 + 1] = line[X], line[Y]\n if i == len(info) - 1:\n traj = traj[:(i + 1) * 2].copy()\n traj = traj.reshape((-1, 2))\n mapping['trajs'].append(traj)\n\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n break\n x, y = line[X], line[Y]\n if i > 0:\n # print(x-line_pre[X], y-line_pre[Y])\n vector = [line_pre[X], line_pre[Y], x, y, line[TIMESTAMP], line[OBJECT_TYPE] == 'AV',\n line[OBJECT_TYPE] == 'AGENT', line[OBJECT_TYPE] == 'OTHERS', len(polyline_spans), i]\n vectors.append(get_pad_vector(vector))\n line_pre = line\n\n end = len(vectors)\n if end - start == 0:\n assert id != 'AV' and id != 'AGENT'\n else:\n mapping['agents'].append(np.array(agent))\n\n polyline_spans.append([start, end])\n\n assert_(len(mapping['agents']) == len(polyline_spans))\n\n assert len(vectors) <= max_vector_num\n\n t = len(vectors)\n mapping['map_start_polyline_idx'] = len(polyline_spans)\n if args.use_map:\n vectors, polyline_spans = get_sub_map(args, mapping['cent_x'], mapping['cent_y'], mapping['city_name'],\n vectors=vectors,\n polyline_spans=polyline_spans, mapping=mapping)\n\n # logging('len(vectors)', t, len(vectors), prob=0.01)\n\n matrix = np.array(vectors)\n # matrix = np.array(vectors, dtype=float)\n # del vectors\n\n # matrix = torch.zeros([len(vectors), args.hidden_size])\n # for i, vector in enumerate(vectors):\n # for j, each in enumerate(vector):\n # matrix[i][j].fill_(each)\n\n labels = []\n info = id2info['AGENT']\n info = info[mapping['agent_pred_index']:]\n if not args.do_test:\n if 'set_predict' in args.other_params:\n pass\n else:\n assert len(info) == 30\n for line in info:\n labels.append(line[X])\n labels.append(line[Y])\n\n if 'set_predict' in args.other_params:\n if 'test' in args.data_dir[0]:\n labels = [0.0 for _ in range(60)]\n\n if 'goals_2D' in args.other_params:\n point_label = np.array(labels[-2:])\n mapping['goals_2D_labels'] = np.argmin(get_dis(mapping['goals_2D'], point_label))\n\n if 'lane_scoring' in args.other_params:\n stage_one_label = 0\n polygons = mapping['polygons']\n min_dis = 10000.0\n for i, polygon in enumerate(polygons):\n temp = np.min(get_dis(polygon, point_label))\n if temp < min_dis:\n min_dis = temp\n stage_one_label = i\n\n mapping['stage_one_label'] = stage_one_label\n\n mapping.update(dict(\n matrix=matrix,\n labels=np.array(labels).reshape([30, 2]),\n polyline_spans=[slice(each[0], each[1]) for each in polyline_spans],\n labels_is_valid=np.ones(args.future_frame_num, dtype=np.int64),\n eval_time=30,\n ))\n\n return mapping",
"def translate_frame(frame, x, y):\n trans_mat = np.float32([[1, 0, x], [0, 1, y]])\n dimensions = (frame.shape[1], frame.shape[0])\n return cv.warpAffine(frame, trans_mat, dimensions)",
"def move_to_point_and_extract(coords_from_to: list,\n gps: adapters.GPSUbloxAdapter,\n vesc_engine: adapters.VescAdapterV4,\n smoothie: adapters.SmoothieAdapter,\n camera: adapters.CameraAdapterIMX219_170,\n periphery_det: detection.YoloOpenCVDetection,\n precise_det: detection.YoloOpenCVDetection,\n logger_full: utility.Logger,\n report_field_names,\n trajectory_saver: utility.TrajectorySaver,\n working_zone_polygon,\n img_output_dir,\n nav: navigation.GPSComputing,\n data_collector: datacollection.DataCollector,\n log_cur_dir,\n image_saver: utility.ImageSaver,\n notification: NotificationClient,\n extraction_manager_v3: ExtractionManagerV3,\n ui_msg_queue: posix_ipc.MessageQueue,\n SI_speed: float,\n wheels_straight: bool,\n navigation_prediction: navigation.NavigationPrediction,\n future_points: list,\n allow_extractions: bool,\n x_scan_poly: list,\n cur_field):\n\n if config.ALLOW_FIELD_LEAVING_PROTECTION and cur_field is not None and len(cur_field) > 2:\n enable_field_leaving_protection = True\n else:\n enable_field_leaving_protection = False\n if config.ALLOW_FIELD_LEAVING_PROTECTION:\n if cur_field is None:\n msg = f\"WARNING: robot field leaving protection WILL NOT WORK as given field is None\"\n print(msg)\n logger_full.write(msg)\n elif len(cur_field) < 3:\n msg = f\"WARNING: robot field leaving protection WILL NOT WORK as given field contains \" \\\n f\"{len(cur_field)} points (required ar least 3 points)\"\n print(msg)\n logger_full.write(msg)\n\n extract = SI_speed > 0 and allow_extractions\n\n vesc_speed = SI_speed * config.MULTIPLIER_SI_SPEED_TO_RPM\n speed_fast = config.SI_SPEED_FAST * config.MULTIPLIER_SI_SPEED_TO_RPM\n vesc_speed_fast = speed_fast if SI_speed >= 0 else -speed_fast\n navigation_prediction.set_SI_speed(SI_speed)\n\n raw_angles_history = []\n detections_period = []\n navigations_period = []\n stop_helping_point = nav.get_coordinate(\n coords_from_to[1], coords_from_to[0], 90, 1000)\n learn_go_straight_index = 0\n learn_go_straight_history = []\n\n last_skipped_point = coords_from_to[0]\n start_Nav_while = True\n last_correct_raw_angle = 0\n point_status = \"origin\"\n last_corridor_side = 0\n current_corridor_side = 1\n almost_start = 0\n\n prev_maneuver_time = time.time()\n working_mode_slow = 1\n working_mode_fast = 2\n working_mode_switching = 3\n current_working_mode = working_mode_slow\n last_working_mode = 0\n # True if robot is close to one of current movement vector points, False otherwise; False if speed limit near points is disabled\n close_to_end = config.USE_SPEED_LIMIT\n bumper_is_pressed = None\n\n # message queue sending temporary performance tracker\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf = {\n \"max_time\": 0,\n \"min_time\": float(\"inf\"),\n \"total_time\": 0,\n \"total_sends\": 0,\n \"timeouts_exceeded\": 0\n }\n\n # x movements during periphery scans\n x_scan_cur_idx = 0\n x_scan_idx_increasing = True\n\n # set camera to the Y min\n res = smoothie.custom_separate_xy_move_to(X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n # TODO: maybe should add sleep time as camera currently has delay\n\n if config.AUDIT_MODE:\n vesc_engine.set_target_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n try:\n notificationQueue = posix_ipc.MessageQueue(\n config.QUEUE_NAME_UI_NOTIFICATION)\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n notificationQueue = None\n\n degraded_navigation_mode = False\n\n number_navigation_cycle_without_gps = 0\n\n point_reading_t = last_send_gps_time = slow_mode_time = time.time()\n\n have_time_for_inference = True\n predictor_next_gps_expected_ts = float(\"inf\")\n\n # main navigation control loop\n while True:\n # gps point reading time predictor\n if have_time_for_inference and config.ALLOW_GPS_TIME_PREDICTIONS_LIMITING_INFERENCE:\n if time.time() + config.INFERENCE_MAX_TICK_TIME > predictor_next_gps_expected_ts:\n have_time_for_inference = False\n\n if have_time_for_inference:\n # EXTRACTION CONTROL\n start_t = time.time()\n frame = camera.get_image()\n frame_t = time.time()\n\n per_det_start_t = time.time()\n if extract:\n plants_boxes = periphery_det.detect(frame)\n else:\n plants_boxes = list()\n per_det_end_t = time.time()\n detections_period.append(per_det_end_t - start_t)\n\n if config.SAVE_DEBUG_IMAGES:\n image_saver.save_image(\n frame,\n img_output_dir,\n label=\"PE_view_M=\" + str(current_working_mode),\n plants_boxes=plants_boxes)\n if config.ALLOW_GATHERING and current_working_mode == working_mode_slow and \\\n image_saver.get_counter(\"gathering\") < config.DATA_GATHERING_MAX_IMAGES:\n image_saver.save_image(frame, config.DATA_GATHERING_DIR,\n plants_boxes=plants_boxes, counter_key=\"gathering\")\n\n if extract:\n msg = \"View frame time: \" + str(frame_t - start_t) + \"\\t\\tPeri. det. time: \" + \\\n str(per_det_end_t - per_det_start_t)\n else:\n msg = \"View frame time: \" + str(frame_t - start_t) + \"\\t\\tPeri. det. (extractions are off) time: \" + \\\n str(per_det_end_t - per_det_start_t)\n logger_full.write(msg + \"\\n\")\n\n # MOVEMENT AND ACTIONS MODES\n if config.AUDIT_MODE:\n dc_start_t = time.time()\n\n # count detected plant boxes for each type\n plants_count = dict()\n for plant_box in plants_boxes:\n plant_box_name = plant_box.get_name()\n if plant_box_name in plants_count:\n plants_count[plant_box_name] += 1\n else:\n plants_count[plant_box_name] = 1\n\n # save info into data collector\n for plant_label in plants_count:\n data_collector.add_detections_data(plant_label,\n math.ceil((plants_count[plant_label]) / config.AUDIT_DIVIDER))\n\n # flush updates into the audit output file and log measured time\n if len(plants_boxes) > 0:\n data_collector.save_all_data(\n log_cur_dir + config.AUDIT_OUTPUT_FILE)\n\n dc_t = time.time() - dc_start_t\n msg = \"Last scan weeds detected: \" + str(len(plants_boxes)) + \\\n \", audit processing tick time: \" + str(dc_t)\n logger_full.write(msg + \"\\n\")\n else:\n # slow mode\n if current_working_mode == working_mode_slow:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : slow\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n if config.VERBOSE_EXTRACT:\n msg = \"[VERBOSE EXTRACT] Stopping the robot because we have detected plant(s).\"\n logger_full.write_and_flush(msg+\"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n # TODO this 0 rpm \"movement\" is to prevent robot movement during extractions, need to add this in future to rest speed modes too\n vesc_engine.set_time_to_move(config.VESC_MOVING_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(0, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(0, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n # TODO remove thread init from here!\n voltage_thread = threading.Thread(\n target=send_voltage_thread_tf,\n args=(vesc_engine, ui_msg_queue),\n daemon=True)\n voltage_thread.start()\n\n # single precise center scan before calling for PDZ scanning and extractions\n if config.ALLOW_PRECISE_SINGLE_SCAN_BEFORE_PDZ and not config.ALLOW_X_MOVEMENT_DURING_SCANS:\n time.sleep(config.DELAY_BEFORE_2ND_SCAN)\n frame = camera.get_image()\n plants_boxes = precise_det.detect(frame)\n\n # do PDZ scan and extract all plants if single precise scan got plants in working area\n if ExtractionManagerV3.any_plant_in_zone(plants_boxes, working_zone_polygon):\n if config.EXTRACTION_MODE == 1:\n extraction_manager_v3.extract_all_plants()\n elif config.EXTRACTION_MODE == 2:\n extraction_manager_v3.mill_all_plants()\n slow_mode_time = time.time()\n else:\n if config.EXTRACTION_MODE == 1:\n extraction_manager_v3.extract_all_plants()\n elif config.EXTRACTION_MODE == 2:\n extraction_manager_v3.mill_all_plants()\n slow_mode_time = time.time()\n\n if config.VERBOSE_EXTRACT:\n msg = \"[VERBOSE EXTRACT] Extract cycle are finish.\"\n logger_full.write_and_flush(msg+\"\\n\")\n\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n\n msg = \"Applying force step forward after extractions cycle(s)\"\n logger_full.write(msg + \"\\n\")\n if config.VERBOSE:\n print(msg)\n vesc_engine.set_time_to_move(config.STEP_FORWARD_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(\n config.SI_SPEED_STEP_FORWARD * config.MULTIPLIER_SI_SPEED_TO_RPM,\n vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n vesc_engine.wait_for_stop(vesc_engine.PROPULSION_KEY)\n\n elif config.SLOW_FAST_MODE and time.time() - slow_mode_time > config.SLOW_MODE_MIN_TIME:\n # move cork to fast mode scan position\n if config.VERBOSE:\n msg = \"SLOW MODE: moving cork to fast mode position\\n\"\n logger_full.write(msg)\n\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm((config.Y_MAX - config.Y_MIN) * config.SLOW_FAST_MODE_HEAD_FACTOR,\n \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Keeping in slow mode as failed to move camera to fast mode scan position, smoothie's response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n else:\n msg = \"Switching from 'slow mode' to 'switching mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_switching\n\n # TODO a bug: will not start moving if config.SLOW_MODE_MIN_TIME == 0 or too low (switch speed applies right after slow mode weeds extractions)\n if not vesc_engine.is_moving(vesc_engine.PROPULSION_KEY):\n vesc_engine.set_time_to_move(config.VESC_MOVING_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n # switching (from slow to fast) mode\n elif current_working_mode == working_mode_switching:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : switching to fast\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n if config.VERBOSE:\n msg = \"Moving cork to slow mode scan position\\n\"\n logger_full.write(msg)\n\n # smoothie.wait_for_all_actions_done()\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n current_working_mode = working_mode_slow\n slow_mode_time = time.time()\n vesc_engine.set_target_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n continue\n\n sm_cur_pos = smoothie.get_smoothie_current_coordinates(\n convert_to_mms=False)\n if abs(sm_cur_pos[\"X\"] - (config.X_MAX - config.X_MIN) / 2) < 0.001 and \\\n abs(sm_cur_pos[\"Y\"] - (config.Y_MAX - config.Y_MIN) * config.SLOW_FAST_MODE_HEAD_FACTOR) < 0.001:\n msg = \"Switching from 'switching mode' to 'fast mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_fast\n\n # fast mode\n elif current_working_mode == working_mode_fast:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : fast\"\n if config.LOG_SPEED_MODES:\n logger_full.write_and_flush(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n if config.VERBOSE:\n msg = \"Moving cork to slow mode scan position\\n\"\n logger_full.write(msg)\n\n # smoothie.wait_for_all_actions_done()\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n msg = \"Switching from 'fast mode' to 'slow mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_slow\n slow_mode_time = time.time()\n # TODO dont need anymore? as rpm is set at the end of slow mode\n # vesc_engine.set_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n continue\n elif close_to_end:\n cur_vesc_rpm = vesc_engine.get_current_rpm(\n vesc_engine.PROPULSION_KEY)\n if cur_vesc_rpm != vesc_speed:\n msg = f\"Applying slow speed {vesc_speed} at 'fast mode' \" \\\n f\"(was {cur_vesc_rpm}) \" \\\n f\"because of close_to_end flag trigger\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n vesc_engine.set_target_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n else:\n cur_vesc_rpm = vesc_engine.get_current_rpm(\n vesc_engine.PROPULSION_KEY)\n if cur_vesc_rpm != vesc_speed_fast:\n msg = f\"Applying fast speed {vesc_speed_fast} at 'fast mode' (was {cur_vesc_rpm})\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n vesc_engine.set_target_rpm(\n vesc_speed_fast, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(\n vesc_speed_fast, vesc_engine.PROPULSION_KEY)\n\n # NAVIGATION CONTROL\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n nav_start_t = time.time()\n\n if start_Nav_while:\n navigation_period = 1\n else:\n navigation_period = nav_start_t - prev_maneuver_time\n\n navigations_period.append(navigation_period)\n # time reference to decide the number of detection before resuming gps.get\n prev_maneuver_time = nav_start_t\n # print(\"tock\")\n\n if start_Nav_while:\n prev_pos_obj = cur_pos_obj\n prev_pos = prev_pos_obj.as_old_list\n start_Nav_while = False\n\n # mu_navigations_period, sigma_navigations_period = utility.mu_sigma(navigations_period)\n\n navigation_prediction.set_current_lat_long(cur_pos)\n\n # skip same points (non-blocking reading returns old point if new point isn't available yet)\n if math.isclose(cur_pos_obj.creation_ts, prev_pos_obj.creation_ts):\n # stop robot if there's no new points for a while\n if time.time() - point_reading_t > config.GPS_POINT_TIME_BEFORE_STOP:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n msg = f\"Stopping the robot due to exceeding time 'GPS_POINT_TIME_BEFORE_STOP=\" \\\n f\"{config.GPS_POINT_TIME_BEFORE_STOP}' limit without new gps points from adapter\"\n logger_full.write_and_flush(msg + \"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n gps_reconnect_ts = time.time()\n\n while True:\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n if math.isclose(cur_pos_obj.creation_ts, prev_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new GPS \" \\\n \"point (new points filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n else:\n msg = \"New GPS point received, continuing movement\"\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n else:\n continue\n\n # gps points reading time predictor\n predictor_next_gps_expected_ts = cur_pos_obj.receiving_ts + config.GPS_POINT_WAIT_TIME_MAX\n have_time_for_inference = True\n\n # points filter by quality flag\n if cur_pos[2] != \"4\" and config.ALLOW_GPS_BAD_QUALITY_NTRIP_RESTART:\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(logger_full)\n\n # stop robot due to bad point quality if allowed\n if config.ALLOW_GPS_BAD_QUALITY_STOP:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n logger_full.write_and_flush(\n \"Stopping the robot for lack of quality gps 4, waiting for it...\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n prev_bad_quality_pos_obj = cur_pos_obj\n gps_reconnect_ts = time.time()\n\n while True:\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n # check if it's a new point\n if math.isclose(cur_pos_obj.creation_ts, prev_bad_quality_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new \" \\\n \"GPS point (quality filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n continue\n else:\n prev_bad_quality_pos_obj = cur_pos_obj\n\n # check if it's a good quality point\n if cur_pos[2] != \"4\":\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(\n logger_full)\n else:\n msg = \"The gps has regained quality 4, starting movement\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n # points filter by distance\n prev_cur_distance = nav.get_distance(prev_pos, cur_pos)\n if config.ALLOW_GPS_PREV_CUR_DIST_STOP and prev_cur_distance > config.PREV_CUR_POINT_MAX_DIST:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n msg = f\"Stopping the robot due to GPS points filter by distance (assuming current position point \" \\\n f\"{str(cur_pos)} is wrong as distance between current position and prev. position {str(prev_pos)}\" \\\n f\" is bigger than config.PREV_CUR_POINT_MAX_DIST={str(config.PREV_CUR_POINT_MAX_DIST)})\"\n logger_full.write_and_flush(msg + \"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n prev_bad_quality_pos_obj = cur_pos_obj\n gps_reconnect_ts = distance_wait_start_ts = time.time()\n\n while True:\n if time.time() - distance_wait_start_ts > config.GPS_DIST_WAIT_TIME_MAX:\n msg = f\"Stopping waiting for good prev-cur distance due to timeout, using current point \" \\\n f\"{cur_pos} and starting moving again\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n # check if it's a new point\n if math.isclose(cur_pos_obj.creation_ts, prev_bad_quality_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new \" \\\n \"GPS point (distance filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n continue\n else:\n prev_bad_quality_pos_obj = cur_pos_obj\n\n # check if it's a good quality point or ignore point quality if bad quality stop is not allowed\n if cur_pos[2] != \"4\" and config.ALLOW_GPS_BAD_QUALITY_NTRIP_RESTART:\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(logger_full)\n continue\n\n # check if distance became ok\n prev_cur_distance = nav.get_distance(prev_pos, cur_pos)\n if prev_cur_distance <= config.PREV_CUR_POINT_MAX_DIST:\n msg = f\"Starting moving again after GPS points filter by distance as distance become OK \" \\\n f\"({str(prev_cur_distance)})\"\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n point_reading_t = time.time()\n\n trajectory_saver.save_point(cur_pos)\n if ui_msg_queue is not None and time.time()-last_send_gps_time >= 1:\n try:\n ui_msg_queue_send_ts = time.time()\n ui_msg_queue.send(json.dumps(\n {\"last_gps\": cur_pos}), timeout=config.QUEUE_WAIT_TIME_MAX)\n last_send_gps_time = time.time()\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_send_et = last_send_gps_time - ui_msg_queue_send_ts\n if ui_msg_queue_send_et < ui_msg_queue_perf[\"min_time\"]:\n ui_msg_queue_perf[\"min_time\"] = ui_msg_queue_send_et\n if ui_msg_queue_send_et > ui_msg_queue_perf[\"max_time\"]:\n ui_msg_queue_perf[\"max_time\"] = ui_msg_queue_send_et\n ui_msg_queue_perf[\"total_time\"] += ui_msg_queue_send_et\n ui_msg_queue_perf[\"total_sends\"] += 1\n except posix_ipc.BusyError:\n msg = f\"Current position wasn't sent to ui_msg_queue likely due to sending timeout \" \\\n f\"(max wait time: config.QUEUE_WAIT_TIME_MAX={config.QUEUE_WAIT_TIME_MAX}\"\n logger_full.write(msg + \"\\n\")\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf[\"timeouts_exceeded\"] += 1\n\n if config.CONTINUOUS_INFORMATION_SENDING and not degraded_navigation_mode:\n notification.set_current_coordinate(cur_pos)\n\n distance = nav.get_distance(cur_pos, coords_from_to[1])\n\n last_corridor_side = current_corridor_side\n perpendicular, current_corridor_side = nav.get_deviation(\n coords_from_to[0], coords_from_to[1], cur_pos)\n\n # stop the robot if it has left the field\n if enable_field_leaving_protection:\n for pt_idx in range(len(cur_field)):\n last_point = pt_idx + 1 == len(cur_field)\n\n if last_point:\n deviation, side = nav.get_deviation(cur_field[pt_idx], cur_field[0], cur_pos)\n else:\n deviation, side = nav.get_deviation(cur_field[pt_idx], cur_field[pt_idx + 1], cur_pos)\n\n if side == -1 and deviation > config.LEAVING_PROTECTION_DISTANCE_MAX:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n msg = f\"Robot is stopped due to leaving the field. Cur pos: '{str(cur_pos)}'; \" \\\n f\"Field comparison vector - P1: '{str(cur_field[pt_idx])}', \" \\\n f\"P2: '{str(cur_field[0] if last_point else cur_field[pt_idx + 1])}'\"\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n notification.set_robot_state(RobotStates.OUT_OF_SERVICE)\n exit()\n\n # check if arrived\n _, side = nav.get_deviation(\n coords_from_to[1], stop_helping_point, cur_pos)\n # if distance <= config.COURSE_DESTINATION_DIFF: # old way\n if side != 1: # TODO: maybe should use both side and distance checking methods at once\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n # msg = \"Arrived (allowed destination distance difference \" + str(config.COURSE_DESTINATION_DIFF) + \" mm)\"\n # TODO: service will reload script even if it done his work?\n msg = \"Arrived to \" + str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n # put the wheel straight\n if wheels_straight:\n response = smoothie.custom_move_to(A_F=config.A_F_MAX, A=0)\n if response != smoothie.RESPONSE_OK: # TODO: what if response is not ok?\n msg = \"Couldn't turn wheels to center (0), smoothie response:\\n\" + \\\n response\n print(msg)\n logger_full.write(msg + \"\\n\")\n else:\n # save wheels angle\n with open(config.LAST_ANGLE_WHEELS_FILE, \"w+\") as wheels_angle_file:\n wheels_angle_file.write(\n str(smoothie.get_adapter_current_coordinates()[\"A\"]))\n break\n\n # TODO check for bug: arrival check applies single speed for all path (while multiple speeds are applied)\n # check if can arrived\n if vesc_engine.get_current_rpm(vesc_engine.PROPULSION_KEY) / config.MULTIPLIER_SI_SPEED_TO_RPM * \\\n config.MANEUVERS_FREQUENCY > nav.get_distance(cur_pos, coords_from_to[1]):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n msg = \"Will have arrived before the next point to \" + \\\n str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n break\n\n # reduce speed if near the target point\n if config.USE_SPEED_LIMIT:\n distance_from_start = nav.get_distance(coords_from_to[0], cur_pos)\n close_to_end = distance < config.DECREASE_SPEED_TRESHOLD or distance_from_start < config.DECREASE_SPEED_TRESHOLD\n\n msg = \"Distance to B: \" + str(distance)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n msg = \"Prev: \" + str(prev_pos) + \" Cur: \" + str(cur_pos) + \" A: \" + str(coords_from_to[0]) \\\n + \" B: \" + str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n # pass by cur points which are very close to prev point to prevent angle errors when robot is staying\n # (too close points in the same position can produce false huge angles)\n\n navigation_prediction.run_prediction(coords_from_to, cur_pos)\n\n # raw_angle_cruise = nav.get_angle(coords_from_to[0], cur_pos, cur_pos, coords_from_to[1])\n # raw_angle_legacy = nav.get_angle(prev_pos, cur_pos, cur_pos, coords_from_to[1])\n raw_angle_centroid = nav.get_angle(\n prev_pos, cur_pos, coords_from_to[0], coords_from_to[1])\n raw_angle_cruise = - current_corridor_side * math.log(1+perpendicular)\n\n if nav.get_distance(coords_from_to[0], coords_from_to[1]) < config.CORNER_THRESHOLD and nav.get_distance(coords_from_to[1], future_points[0][0]) < config.CORNER_THRESHOLD:\n # if abs(raw_angle_legacy)>config.LOST_THRESHOLD:\n centroid_factor = config.CENTROID_FACTOR_LOST\n cruise_factor = 1/centroid_factor\n else:\n centroid_factor = config.CENTROID_FACTOR_ORIENTED\n cruise_factor = 1\n\n raw_angle = raw_angle_centroid*centroid_factor + raw_angle_cruise*cruise_factor\n\n # raw_angle = butter_lowpass_filter(raw_angle, 0.5, 4, 6)\n\n if config.LEARN_GO_STRAIGHT:\n if config.MIN_PERPENDICULAR_GO_STRAIGHT >= perpendicular:\n learn_go_straight_index += 1\n learn_go_straight_history.append(raw_angle)\n if len(learn_go_straight_history) >= config.VALUES_LEARN_GO_STRAIGHT:\n learn_go_straight = sum(\n learn_go_straight_history)/len(learn_go_straight_history)\n msg = f\"Average angle applied to the wheel for the robot to have found : {learn_go_straight}.\"\n logger_full.write_and_flush(msg + \"\\n\")\n # TODO opening and closing file 4 times per second\n with open(config.LEARN_GO_STRAIGHT_FILE, \"w+\") as learn_go_straight_file:\n learn_go_straight_file.write(str(learn_go_straight))\n else:\n learn_go_straight_index = 0\n\n # NAVIGATION STATE MACHINE\n if prev_cur_distance < config.PREV_CUR_POINT_MIN_DIST:\n raw_angle = last_correct_raw_angle\n # print(\"The distance covered is low\")\n point_status = \"skipped\"\n\n # register the last position where the robot almost stop\n # in order to disable the deviation servo for a config.POURSUIT_LIMIT length and then resume in cruise\n last_skipped_point = cur_pos\n else:\n last_correct_raw_angle = raw_angle\n point_status = \"correct\"\n\n almost_start = nav.get_distance(last_skipped_point, cur_pos)\n\n # sum(e)\n if len(raw_angles_history) >= config.WINDOW:\n raw_angles_history.pop(0)\n raw_angles_history.append(raw_angle)\n # print(\"len(raw_angles_history):\",len(raw_angles_history))\n sum_angles = sum(raw_angles_history)\n if sum_angles > config.SUM_ANGLES_HISTORY_MAX:\n msg = \"Sum angles \" + str(sum_angles) + \" is bigger than max allowed value \" + \\\n str(config.SUM_ANGLES_HISTORY_MAX) + \", setting to \" + \\\n str(config.SUM_ANGLES_HISTORY_MAX)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n # Get Ready to go down as soon as the angle get negatif\n raw_angles_history[len(raw_angles_history) -\n 1] -= sum_angles - config.SUM_ANGLES_HISTORY_MAX\n sum_angles = config.SUM_ANGLES_HISTORY_MAX\n elif sum_angles < -config.SUM_ANGLES_HISTORY_MAX:\n msg = \"Sum angles \" + str(sum_angles) + \" is less than min allowed value \" + \\\n str(-config.SUM_ANGLES_HISTORY_MAX) + \", setting to \" + \\\n str(-config.SUM_ANGLES_HISTORY_MAX)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n # get Ready to go up as soon as the angle get positive:\n raw_angles_history[len(raw_angles_history)-1] += - \\\n sum_angles - config.SUM_ANGLES_HISTORY_MAX\n sum_angles = -config.SUM_ANGLES_HISTORY_MAX\n\n # KP = 0.2*0,55\n # KI = 0.0092*0,91\n\n KP = getSpeedDependentConfigParam(\n config.KP, SI_speed, \"KP\", logger_full)\n KI = getSpeedDependentConfigParam(\n config.KI, SI_speed, \"KI\", logger_full)\n\n angle_kp_ki = raw_angle * KP + sum_angles * KI\n\n # smoothie -Value == left, Value == right\n target_angle_sm = angle_kp_ki * -config.A_ONE_DEGREE_IN_SMOOTHIE\n # target_angle_sm = 0 #Debug COVID_PLACE\n ad_wheels_pos = smoothie.get_adapter_current_coordinates()[\"A\"]\n # sm_wheels_pos = smoothie.get_smoothie_current_coordinates()[\"A\"]\n sm_wheels_pos = \"off\"\n\n # compute order angle (smoothie can't turn for huge values immediately also as cancel movement,\n # so we need to do nav. actions in steps)\n order_angle_sm = target_angle_sm - ad_wheels_pos\n\n # check for out of update frequency and smoothie execution speed range (for nav wheels)\n if order_angle_sm > config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND * \\\n config.A_ONE_DEGREE_IN_SMOOTHIE:\n msg = \"Order angle changed from \" + str(order_angle_sm) + \" to \" + str(\n config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND +\n config.A_ONE_DEGREE_IN_SMOOTHIE) + \" due to exceeding degrees per tick allowed range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND * \\\n config.A_ONE_DEGREE_IN_SMOOTHIE\n elif order_angle_sm < -(config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE):\n msg = \"Order angle changed from \" + str(order_angle_sm) + \" to \" + str(-(\n config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE)) + \" due to exceeding degrees per tick allowed range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = -(config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE)\n\n # convert to global smoothie coordinates\n order_angle_sm += ad_wheels_pos\n\n # checking for out of smoothie supported range\n if order_angle_sm > config.A_MAX:\n msg = \"Global order angle changed from \" + str(order_angle_sm) + \" to config.A_MAX = \" + \\\n str(config.A_MAX) + \\\n \" due to exceeding smoothie allowed values range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.A_MAX\n elif order_angle_sm < config.A_MIN:\n msg = \"Global order angle changed from \" + str(order_angle_sm) + \" to config.A_MIN = \" + \\\n str(config.A_MIN) + \\\n \" due to exceeding smoothie allowed values range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.A_MIN\n\n # cork x movement during periphery scans control\n if config.ALLOW_X_MOVEMENT_DURING_SCANS:\n if x_scan_idx_increasing:\n x_scan_cur_idx += 1\n if x_scan_cur_idx >= len(config.X_MOVEMENT_CAMERA_POSITIONS):\n x_scan_idx_increasing = False\n x_scan_cur_idx -= 2\n else:\n x_scan_cur_idx -= 1\n if x_scan_cur_idx < 0:\n x_scan_idx_increasing = True\n x_scan_cur_idx += 2\n # TODO do we check SI_speed earlier and do proper calculations and angle validations if here we'll get here a negative order angle instead of positive?\n response = smoothie.custom_move_to(\n A_F=config.A_F_MAX,\n A=order_angle_sm if SI_speed >= 0 else -order_angle_sm,\n X_F=config.X_MOVEMENT_CAMERA_X_F[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else None,\n X=config.X_MOVEMENT_CAMERA_POSITIONS[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else None\n )\n\n if response != smoothie.RESPONSE_OK:\n msg = \"Couldn't turn wheels! Smoothie response:\\n\" + response\n print(msg)\n logger_full.write(msg + \"\\n\")\n else:\n # TODO opening and closing file too often (likely 4 times per second)\n # save wheels angle\n with open(config.LAST_ANGLE_WHEELS_FILE, \"w+\") as wheels_angle_file:\n wheels_angle_file.write(\n str(smoothie.get_adapter_current_coordinates()[\"A\"]))\n\n raw_angle = round(raw_angle, 2)\n angle_kp_ki = round(angle_kp_ki, 2)\n order_angle_sm = round(order_angle_sm, 2)\n sum_angles = round(sum_angles, 2)\n distance = round(distance, 2)\n ad_wheels_pos = round(ad_wheels_pos, 2)\n perpendicular = round(perpendicular, 2)\n # sm_wheels_pos = round(sm_wheels_pos, 2)\n gps_quality = cur_pos[2]\n corridor = \"\"\n if current_corridor_side == -1:\n corridor = \"left\"\n elif current_corridor_side == 1:\n corridor = \"right\"\n\n raw_angle_cruise = round(raw_angle_cruise, 2)\n\n msg = str(gps_quality).ljust(5) + \\\n str(raw_angle).ljust(8) + \\\n str(angle_kp_ki).ljust(8) + \\\n str(order_angle_sm).ljust(8) + \\\n str(sum_angles).ljust(8) + \\\n str(distance).ljust(13) + \\\n str(ad_wheels_pos).ljust(8) + \\\n str(sm_wheels_pos).ljust(9) + \\\n point_status.ljust(12) + \\\n str(perpendicular).ljust(10) + \\\n corridor.ljust(9) + \\\n str(centroid_factor).ljust(16) + \\\n str(cruise_factor).ljust(14)\n print(msg)\n logger_full.write(msg + \"\\n\")\n\n # TODO vesc sensors are being asked 4 times per second\n # send voltage and track bumper state\n vesc_data = vesc_engine.get_sensors_data(\n report_field_names, vesc_engine.PROPULSION_KEY)\n if vesc_data is not None and \"input_voltage\" in vesc_data:\n if bumper_is_pressed is None:\n bumper_is_pressed = not vesc_data[\"input_voltage\"] > config.VESC_BUMBER_UNTRIGGER_VOLTAGE\n if bumper_is_pressed:\n msg = f\"Bumper is pressed initially before starting moving to point. \" \\\n f\"({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n elif not bumper_is_pressed and vesc_data[\"input_voltage\"] < config.VESC_BUMBER_TRIGGER_VOLTAGE:\n bumper_is_pressed = True\n msg = f\"Bumper was pressed. ({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n elif bumper_is_pressed and vesc_data[\"input_voltage\"] > config.VESC_BUMBER_UNTRIGGER_VOLTAGE:\n bumper_is_pressed = False\n msg = f\"Bumper was unpressed. ({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n\n if config.CONTINUOUS_INFORMATION_SENDING:\n notification.set_input_voltage(vesc_data[\"input_voltage\"])\n\n prev_pos_obj = cur_pos_obj\n prev_pos = prev_pos_obj.as_old_list\n\n msg = \"Nav calc time: \" + str(time.time() - nav_start_t)\n logger_full.write(msg + \"\\n\\n\")\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf[\"avg_time\"] = ui_msg_queue_perf[\"total_time\"] / \\\n ui_msg_queue_perf[\"total_sends\"]\n msg = f\"Position sending performance report: {ui_msg_queue_perf}\"\n if config.VERBOSE:\n print(msg)\n logger_full.write(msg + \"\\n\")",
"def handle_pose(msg):\n global sensor_cfg\n global no_position\n global body_frame\n global frame_cfg\n\n quat = np.array([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])\n pos = np.array([msg.pose.position.x*1000, msg.pose.position.y*1000, msg.pose.position.z*1000])\n\n if position_mode == \"zero_pos\":\n pos = np.array([0, 0, 0])\n elif position_mode == \"relative\":\n pos = pos - parent_position\n\n br = tf.TransformBroadcaster()\n\n br.sendTransform(pos,\n quat,\n msg.header.stamp,\n body_frame,\n msg.header.frame_id)\n\n for k in frame_cfg:\n br.sendTransform(np.array([float(x) for x in frame_cfg[k][\"position\"].split(\" \")]),\n np.array([float(x) for x in frame_cfg[k][\"pose\"].split(\" \")]),\n rospy.Time.now(),\n k,\n body_frame)\n\n for k in sensor_cfg:\n br.sendTransform(np.array([float(x) for x in sensor_cfg[k][\"position\"].split(\" \")]),\n np.array([float(x) for x in sensor_cfg[k][\"pose\"].split(\" \")]),\n rospy.Time.now(),\n k,\n body_frame)\n\n for k in thruster_cfg:\n br.sendTransform(np.array([float(x) for x in sensor_cfg[k][\"position\"].split(\" \")]),\n np.array([float(x) for x in sensor_cfg[k][\"pose\"].split(\" \")]),\n rospy.Time.now(),\n k,\n body_frame)",
"def rotate(frame, df_row, df_feats):\n\n instance = df_row[df_feats[0]]\n user_id = int(df_row[df_feats[1]])\n root_dir = df_row[df_feats[2]]\n rotated_dir = os.path.join(root_dir, 'rotated')\n rotated_dir = os.path.join(rotated_dir, str(user_id))\n file_type = '.jpg'\n\n # flip image about x-axis\n frame_flipped = tf.image.rot90(frame)\n frame_flipped = tf.image.rot90(frame_flipped)\n frame_flipped = np.array(frame_flipped)\n frame_flipped = cv2.cvtColor(frame_flipped, cv2.COLOR_BGR2GRAY)\n flipped_path = os.path.join(rotated_dir, instance + '_flipped' + file_type)\n\n datagen = ImageDataGenerator(horizontal_flip=True)\n frame_ext = frame.reshape((1,) + frame.shape) \n for frame_mirrored in datagen.flow(frame_ext, batch_size=1):\n # mirror image about y-axis\n frame_mirrored = frame_mirrored.reshape((144,256,3))\n\n # flip and mirror image\n frame_mirrored_flipped = tf.image.rot90(frame_mirrored)\n frame_mirrored_flipped = tf.image.rot90(frame_mirrored_flipped)\n frame_mirrored_flipped = np.array(frame_mirrored_flipped)\n frame_mirrored_flipped = cv2.cvtColor(frame_mirrored_flipped, cv2.COLOR_BGR2GRAY)\n mirrored_flipped_path = os.path.join(rotated_dir, instance + '_mirrored_flipped' + file_type)\n\n frame_mirrored = cv2.cvtColor(frame_mirrored, cv2.COLOR_BGR2GRAY)\n mirrored_path = os.path.join(rotated_dir, instance + '_mirrored' + file_type)\n break # break to avoid generating multiple copies of mirrored images\n\n # package results in dictionary\n rotate_dict = {}\n rotate_dict['flipped'] = {}\n rotate_dict['flipped']['path'] = flipped_path\n rotate_dict['flipped']['frame'] = frame_flipped \n rotate_dict['mirrored'] = {}\n rotate_dict['mirrored']['path'] = mirrored_path\n rotate_dict['mirrored']['frame'] = frame_mirrored\n rotate_dict['mirrored_flipped'] = {}\n rotate_dict['mirrored_flipped']['path'] = mirrored_flipped_path\n rotate_dict['mirrored_flipped']['frame'] = frame_mirrored_flipped\n\n return rotate_dict",
"def convert_mtr_to_kittimot_format(data_list: List[Union[str, int, float]], frame_id: int) -> List[Union[str, int, float]]:\n annotation_list = []\n track_id = -1\n for data in data_list:\n annotation = [frame_id, -1]\n # print(\"type: \", str2id(bboxes['object_id']))\n object_type = data[0]\n truncated = -1\n occluded = -1\n alpha = -1\n bbox2d = [-1, -1, -1, -1]\n dimensions = data[1:4]\n location = data[4:7]\n rotation_y = data[7]\n\n annotation.append(object_type)\n annotation.append(truncated)\n annotation.append(occluded)\n annotation.append(alpha)\n annotation += bbox2d\n annotation += dimensions\n annotation += location\n annotation.append(rotation_y)\n annotation_list.append(annotation)\n return annotation_list\n\n\n\n \"\"\"\n convert KITTI MOTS format to AB3DMOT format\n\n \n @params:\n data_list: a list containing data in KITTI MOTs format\n \"\"\"",
"def get_things1(kp_3d, kp_2d, des, comp_list, H, map_3d, map_2d, map_des, map_cam, map_view, my_max):\n # Initializing the arrays\n points_3d = []\n points_2d = []\n camera_ind = []\n points_ind = []\n cam_params = []\n\n dst_3d = kp_3d\n dst_2d = kp_2d\n src_3d = map_3d\n src_2d = map_2d\n src_cam = map_cam\n low_bound = []\n up_bound = []\n my_min = 0\n\n # Updating the Camera parameters in map and setting the bounds for the update \n for i in range(my_min,my_max+1):\n cam_param = [map_view[i,0], map_view[i,1], map_view[i,2], map_view[i,3], map_view[i,4], map_view[i,5], f,0,0]\n cam_params.append(cam_param)\n\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n \n # Updating the Camera parameters for frame and setting the bounds for the update\n r = (R.from_matrix((H[0:3, 0:3]))).as_rotvec()\n t = H[:,3]\n cam_param = [r[0], r[1], r[2], t[0], t[1], t[2], f, 0, 0]\n cam_params.append(cam_param)\n \n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n\n new_cam = len(cam_params)-1\n cam_params = np.array(cam_params).reshape(-1,9)\n count = 0\n \n # listing variables to iterate \n l1 = []\n l2 = []\n count = 0\n \n for m in comp_list:\n count+=1\n l1.append(m.queryIdx)\n l2.append(m.trainIdx)\n\n l1 = np.array(l1).reshape(1,-1)\n l2 = np.array(l2).reshape(1,-1)\n l = np.vstack((l1,l2))\n l_fin = l[:,l[1, :].argsort()]\n j = 0\n count = len(points_3d)\n prev = -1\n final_l1 = []\n final_l2 = []\n final_des = []\n\n # Iterating through the list made and making sure no duplicates\n while(j<(len(l_fin[0]))):\n i1 = l_fin[0,j]\n i2 = l_fin[1,j]\n if(i2!=prev):\n # Map points insertion\n \n check = 0\n for ii in range(len(src_2d[i1])):\n m_2d = src_2d[i1][ii]\n check = 1\n ind = int(src_cam[i1][ii])\n points_2d.append([int((m_2d[0]%(2*cx))-cx), int((m_2d[1]%(2*cy))-cy),0])\n\n points_ind.append(count)\n camera_ind.append(ind)\n final_l1.append(i1)\n final_l2.append(0)\n \n # Taking Mean Desciptor if needed un comment 2 lines below\n # x = ((map_des[i1]*len(src_2d[i1]))+des[i2])/(len(src_2d[i1])+1)\n # map_des[i1] = x\n \n if(check==1):\n # Frame points insersion\n points_2d.append([int((dst_2d[i2,0])-cx), int((dst_2d[i2,1])-cy), 0])\n points_ind.append(count)\n camera_ind.append(new_cam)\n final_l1.append(i2)\n final_l2.append(1)\n wld_pt = src_3d[i1]\n points_3d.append([wld_pt[0], wld_pt[1], wld_pt[2]])\n prev = i2\n count = len(points_3d)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n src_2d[i1].append([int((dst_2d[i2,0])), int((dst_2d[i2,1]))])\n j+=1\n \n # Final Output\n cam_params = np.array(cam_params).reshape(-1,9)\n points_3d = np.array(points_3d)\n points_2d = np.array(points_2d)\n camera_ind = np.array(camera_ind).reshape(len(camera_ind))\n points_ind = np.array(points_ind).reshape(len(points_ind))\n final_l1 = np.array(final_l1)\n final_l2 = np.array(final_l2)\n return cam_params, points_3d, points_2d, camera_ind, points_ind, final_l1, final_l2, low_bound, up_bound, map_des, src_2d",
"def trans_to_coordinates(T, pts):\n p = []\n for i in range(len(pts)):\n \n p_b = [pts[i][0], pts[i][1], pts[i][2], 1]\n p_a = np.matmul(T, p_b).tolist()\n p.append(p_a[0:3])\n\n return p",
"def trans_to_coordinates(T, pts):\n p = []\n for i in range(len(pts)):\n \n p_b = [pts[i][0], pts[i][1], pts[i][2], 1]\n p_a = np.matmul(T, p_b).tolist()\n p.append(p_a[0:3])\n\n return p",
"def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image",
"def tf_map(stacked_points, stacked_normals, labels, obj_inds, stack_lengths):\n\n # Get batch indice for each point\n batch_inds = self.tf_get_batch_inds(stack_lengths)\n\n # Augment input points\n stacked_points, scales, rots = self.tf_augment_input(stacked_points,\n batch_inds,\n config)\n\n # First add a column of 1 as feature for the network to be able to learn 3D shapes\n stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)\n\n # Then use positions or not\n if config.in_features_dim == 1:\n pass\n elif config.in_features_dim == 3:\n stacked_features = tf.concat((stacked_features, stacked_points), axis=1)\n elif config.in_features_dim == 4:\n stacked_features = tf.concat((stacked_features, stacked_normals), axis=1)\n elif config.in_features_dim == 5:\n angles = tf.asin(tf.abs(stacked_normals)) * (2 / np.pi)\n stacked_features = tf.concat((stacked_features, angles), axis=1)\n elif config.in_features_dim == 7:\n stacked_features = tf.concat((stacked_features, stacked_points, stacked_normals), axis=1)\n else:\n raise ValueError('Only accepted input dimensions are 1, 4 and 7 (without and with XYZ)')\n\n # Get the whole input list\n input_list = self.tf_classification_inputs(config,\n stacked_points,\n stacked_features,\n labels,\n stack_lengths,\n batch_inds)\n\n # Add scale and rotation for testing\n input_list += [scales, rots, obj_inds]\n\n return input_list"
] |
[
"0.62087893",
"0.5920398",
"0.57568246",
"0.5627396",
"0.5621936",
"0.56183314",
"0.56157327",
"0.55580515",
"0.5551266",
"0.5515611",
"0.5498045",
"0.5467242",
"0.54564303",
"0.5456426",
"0.5451013",
"0.5434744",
"0.54346627",
"0.5427823",
"0.54127014",
"0.5392363",
"0.5385914",
"0.53804934",
"0.5369227",
"0.5358936",
"0.5341871",
"0.5334845",
"0.5332696",
"0.5332696",
"0.5330773",
"0.53288066"
] |
0.64119387
|
0
|
calculates distance of the closest point in points and returns its distance
|
def calc_dist(self, points):
dist_x = [self._current_pose.position.x - p.pose.position.x for p in points]
dist_y = [self._current_pose.position.y - p.pose.position.y for p in points]
dist = np.hypot(dist_x,dist_y)
if len(dist) > 0:
return min(dist)
else:
return 0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2",
"def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))",
"def closest(point, points):\n pts = [(Point.distance(point, p), p) for p in points]\n pts.sort()\n return pts[0][1]",
"def get_distance(point_a, point_b):\n \n return np.sqrt(np.sum((point_a - point_b) ** 2, 1))",
"def closest_point(point, points):\n return points[cdist([point], points).argmin()]",
"def distance(point1, point2):\n return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5",
"def closest_dist(x, y, x_list, y_list):\n points = np.array([x, y]).T\n points_list = np.array([x_list, y_list]).T\n\n dpt0 = points_list[:, 0] - points[:, 0, np.newaxis]\n dpt1 = points_list[:, 1] - points[:, 1, np.newaxis]\n\n return np.argmin((dpt0*dpt0 + dpt1*dpt1), axis=1)",
"def point_to_point_distance(p1:Point, p2: Point) -> float:\n return round(geopy.distance.distance((p1.y, p1.x), (p2.y, p2.x)).km,2)",
"def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))",
"def distance(point1, point2):\n return math.sqrt(math.pow((point1[0] - point2[0]), 2) +\n math.pow(point1[1] - point2[1], 2))",
"def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )",
"def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2)",
"def get_distance(self, point, cpoint):\n distance = 0.0\n for m, s in zip(point, cpoint):\n distance += pow(m - s, 2)\n distance = math.sqrt(distance)\n return distance",
"def get_distance(point1, point2):\n a = (point1['x'] - point2['x']) ** 2\n b = (point1['y'] - point2['y']) ** 2\n return (a + b) ** (1.0 / 2)",
"def dist_points(x,y):\n\n return abs(x[0]-y[0]) + abs(x[1]-y[1])",
"def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5",
"def _distance(point_a: tuple, point_b: tuple):\n # rgb values\n x1, y1, z1 = point_a\n x2, y2, z2 = point_b\n\n # distances\n dx = x1 - x2\n dy = y1 - y2\n dz = z1 - z2\n\n # final distance\n return sqrt(dx**2 + dy**2 + dz**2)",
"def find_closest_points(points):\n closest_dist = float(\"inf\")\n closest_points = None, None\n for y, point_one in enumerate(points):\n for x, point_two in enumerate(points):\n if x > y:\n dist= distance_between(point_one.points,point_two.points)\n if dist < closest_dist:\n closest_dist = dist\n closest_points= point_one, point_two\n\n return closest_points",
"def distance(self, point_1=(0, 0), point_2=(0, 0)):\n\t\treturn math.sqrt((point_1[0]-point_2[0])**2+(point_1[1]-point_2[1])**2)",
"def calculate_point_distance(p1, p2):\n\n return math.sqrt(math.pow(p1[0]-p2[0],2) + math.pow(p1[1]-p2[1],2))",
"def closest_points(self, points, maxdist=None):\n return [self.closest_point(point, maxdist) for point in points]",
"def _distance(self, new_pt):\n\t\tnew_pt = np.resize(new_point, (self.n_row, new_pt.shape[0]))\n\t\tdist = euclidean_distance(self.data[:,0:-1], new_pt)\n\n\t\treturn dist",
"def distance_to(self, p):\n closest_pt = self.closest_point_to(p)\n return np.linalg.norm(p - closest_pt)",
"def distance_between_points(a: Point, b: Point) -> float:\n return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)",
"def distance(p1,p2):\n return ((p2.x - p1.x)*2 + (p2.y - p1.y))**0.5",
"def get_distance(point_1, point_2):\n result = ((point_1[0] - point_2[0]) ** 2 + (point_1[1] - point_2[1]) ** 2) ** 0.5\n return result",
"def distance(point0, point1):\n if point0 is None or point1 is None:\n return None\n diff = np.subtract(point0, point1)\n return np.sqrt(diff[0] ** 2 + diff[1] ** 2)",
"def distance_to(self, point1, point2):\n delta_x = self.x_points[point1] - self.x_points[point2]\n delta_y = self.y_points[point1] - self.y_points[point2]\n return math.sqrt(delta_x * delta_x + delta_y * delta_y)",
"def getDistance(point1, point2x, point2y):\n distance = np.sqrt((point2x - point1[0])**2 + (point2y - point1[1])**2)\n return distance",
"def distance_to(self, x):\n return np.linalg.norm(np.array(x) - self.closest_point_to(x))"
] |
[
"0.7559835",
"0.75238806",
"0.7506208",
"0.73734766",
"0.7328789",
"0.73241305",
"0.7315014",
"0.7296735",
"0.7279474",
"0.7258013",
"0.7245505",
"0.722329",
"0.7189812",
"0.7188848",
"0.7178517",
"0.7162702",
"0.71596414",
"0.7153017",
"0.7134312",
"0.71279836",
"0.70956284",
"0.70926195",
"0.70709836",
"0.704684",
"0.70215166",
"0.6994755",
"0.6988923",
"0.6949681",
"0.6943954",
"0.69384086"
] |
0.7669649
|
0
|
Gets right and left Lanelet of current_lanelet
|
def get_right_and_left_lanelet(self):
if self.scenario is not None:
possible_lanelet_ids = self.scenario.lanelet_network.find_lanelet_by_position([np.array(list(self.current_pos))])[0]
self.current_lanelet = None
self.right_lanelet = None
self.left_lanelet = None
for lane_id in possible_lanelet_ids:
self.current_lanelet = self.scenario.lanelet_network.find_lanelet_by_id(lane_id)
if self.current_lanelet is not None:
if self.current_lanelet.adj_left is not None:
self.left_lanelet = self.scenario.lanelet_network.find_lanelet_by_id(self.current_lanelet.adj_left)
if self.current_lanelet.adj_right is not None:
self.right_lanelet = self.scenario.lanelet_network.find_lanelet_by_id(self.current_lanelet.adj_right)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_lane(self):\n return self.lane",
"def _get_potential_right_way(self, lanelet):\n if lanelet.adj_right:\n if lanelet.adj_right_same_direction:\n potential_right_way = self.left_ways.get(lanelet.adj_right)\n else:\n potential_right_way = self.right_ways.get(lanelet.adj_right)\n if potential_right_way:\n adj_right = self.lanelet_network.find_lanelet_by_id(lanelet.adj_right)\n vertices = (\n adj_right.left_vertices\n if lanelet.adj_right_same_direction\n else adj_right.right_vertices[::-1]\n )\n if _vertices_are_equal(lanelet.right_vertices, vertices):\n return potential_right_way\n\n return None",
"def _get_potential_left_way(self, lanelet):\n if lanelet.adj_left:\n if lanelet.adj_left_same_direction:\n potential_left_way = self.right_ways.get(lanelet.adj_left)\n else:\n potential_left_way = self.left_ways.get(lanelet.adj_left)\n if potential_left_way:\n adj_left = self.lanelet_network.find_lanelet_by_id(lanelet.adj_left)\n vertices = (\n adj_left.right_vertices\n if lanelet.adj_left_same_direction\n else adj_left.left_vertices[::-1]\n )\n if _vertices_are_equal(lanelet.left_vertices, vertices):\n return potential_left_way\n\n return None",
"def right_or_left(self):\n self.scan()\n\n max = 0\n side = 'l'\n\n #analyze scan results\n for angle in self.scan_data:\n #RIGHT SIDE\n if angle < self.MIDPOINT:\n if self.scan_data[angle] > max:\n max = self.scan_data[angle]\n side = 'r'\n #LEFT SIDE\n else:\n if self.scan_data[angle] > max:\n max = self.scan_data[angle]\n side = 'l'\n\n return side",
"def get_left(self):\n return -self.l_motor.get()",
"def get_right(self):\n return self.right",
"def get_right(self):\n return self.__right",
"def left_right(steps):\n lengths = lens(steps)\n a_side = []\n b_side = []\n for i in range(len(lengths)):\n if i % 2 == 0:\n a_side.append(steps[i])\n else:\n b_side.append(steps[i])\n return a_side, b_side",
"def right(self):\n return self.left + self.width",
"def get_right(self):\n return self.r_motor.get()",
"def left(self):\n x, y = (self.loc[0] - 1, self.loc[1])\n\n if x < 0:\n return None # None\n\n return self.garden.cells[y][x]",
"def _get_right_hand_side(self, curr_inlet_area: float, curr_ma: float):\n velo_bit = \\\n self.input_p0 * curr_inlet_area * curr_ma / \\\n (self.gas_const * self.input_t0 / self.kappa) ** 0.5\n total_temp_ratio = 1 + (self.kappa - 1) / 2 * curr_ma ** 2\n exponent = -1 / 2 * (self.kappa + 1) / (self.kappa - 1)\n res = velo_bit * total_temp_ratio ** exponent\n return res",
"def getRight(self):\n return self.right",
"def right(self):\n return self.r",
"def get_left(self):\n return self.left",
"def left(self):\n return self.l",
"def right(self):\n\t\treturn self._right",
"def display_right_to_left(self):\n return self.container['display_right_to_left']",
"def left_rotation(self, ang_vel):\n vel = self.om_right_max * self.R - ang_vel * self.L\n om_left = (vel - ang_vel * self.L) / self.R -1\n return vel, om_left",
"def _get_shared_last_nodes_from_other_lanelets(\n self, lanelet: Lanelet\n ) -> Tuple[str, str]:\n if lanelet.successor:\n for lanelet_id in lanelet.successor:\n last_left_node, last_right_node = self.first_nodes.get(\n lanelet_id, (None, None)\n )\n if last_left_node:\n return last_left_node, last_right_node\n for succ_id in lanelet.successor:\n succ = self.lanelet_network.find_lanelet_by_id(succ_id)\n for pred_id in succ.predecessor:\n last_left_node, last_right_node = self.last_nodes.get(\n pred_id, (None, None)\n )\n if last_left_node:\n return last_left_node, last_right_node\n\n return None, None",
"def get_left(self):\n return self.__left",
"def getupperleft(self):\n return (self.rect.x, self.rect.y)",
"def right(self):\n return self._right",
"def right(self):\n return self._right",
"def getL(self):\r\n return self.L",
"def getLeft(self):\n return self.left",
"def _convert_lanelet(self, lanelet: Lanelet):\n\n # check if there are shared ways\n right_way_id = self._get_potential_right_way(lanelet)\n left_way_id = self._get_potential_left_way(lanelet)\n\n left_nodes, right_nodes = self._create_nodes(lanelet, left_way_id, right_way_id)\n\n self.first_nodes[lanelet.lanelet_id] = (left_nodes[0], right_nodes[0])\n self.last_nodes[lanelet.lanelet_id] = (left_nodes[-1], right_nodes[-1])\n\n if not left_way_id:\n left_way = Way(self.id_count, *left_nodes)\n self.osm.add_way(left_way)\n left_way_id = left_way.id_\n if not right_way_id:\n right_way = Way(self.id_count, *right_nodes)\n self.osm.add_way(right_way)\n right_way_id = right_way.id_\n\n self.left_ways[lanelet.lanelet_id] = left_way_id\n self.right_ways[lanelet.lanelet_id] = right_way_id\n self.osm.add_way_relation(WayRelation(self.id_count, left_way_id, right_way_id))",
"def left_or_right(self):\n #traversal\n left_total = 0\n left_count = 0\n right_total = 0\n right_count = 0\n self.scan()\n for ang, dist in self.scan_data.items():\n if ang < self.MIDPOINT:\n right_total += dist\n right_count += 1\n print(\"Angle: %d // dist: %d // right_count: %d\" % (ang, dist, right_count))\n else:\n left_total += dist\n left_count += 1\n left_avg = left_total / left_count\n right_avg = right_total / right_count\n if left_avg > right_avg:\n self.turn_by_deg(-45)\n else:\n self.turn_by_deg(45)\n # if robot is facing the wrong way it will turn it around\n self.exit_bias()",
"def right(self):\n return self.__r",
"def right_rotation(self, ang_vel):\n vel = self.om_left_max * self.R + ang_vel * self.L\n om_right = (vel + ang_vel * self.L) / self.R \n return vel, om_right"
] |
[
"0.69697505",
"0.6693879",
"0.65481895",
"0.6400882",
"0.6338087",
"0.6309185",
"0.6169792",
"0.61585784",
"0.61184555",
"0.6088736",
"0.60837555",
"0.60834515",
"0.6081923",
"0.60523117",
"0.6044031",
"0.6035172",
"0.60120726",
"0.59962624",
"0.5978671",
"0.59441257",
"0.589565",
"0.5876493",
"0.58744735",
"0.58744735",
"0.5865079",
"0.58209443",
"0.5796647",
"0.57756007",
"0.57753843",
"0.57734597"
] |
0.879685
|
0
|
Check_lanelet_free. The function is registered as a ROSService. The service definition file is located in
|
def check_lanelet_free(self, req):
lanelet_id = req.lanelet_id
if lanelet_id != 0:
lanelet = self.scenario.lanelet_network.find_lanelet_by_id(lanelet_id)
if self.points is None:
return False
points = list(self.points)
if len(points) == 0:
return False
transformed_lidar_poses = self.transform_lidar_into_map_coords(points)
if lanelet is not None:
filtered_poses = self.filter_lidar_poses(lanelet, transformed_lidar_poses)
if len(filtered_poses) > 0:
dist = self.calc_dist(filtered_poses)
if dist > 0 and dist < self.max_dist_lidar:
return False
else:
return True
else:
# if there are no points on lanelet, checks successor
filtered_poses = self.filter_lidar_poses(self.scenario.lanelet_network.find_lanelet_by_id(lanelet.successor[0]), transformed_lidar_poses)
if len(filtered_poses) > 0:
dist = self.calc_dist(filtered_poses)
if dist > 0 and dist < self.max_dist_lidar:
return False
else:
return True
else:
# if there are no points on lanelet and lanelet.successor, checks predecessor
filtered_poses = self.filter_lidar_poses(self.scenario.lanelet_network.find_lanelet_by_id(lanelet.predecessor[0]), transformed_lidar_poses)
if len(filtered_poses) > 0:
dist = self.calc_dist(filtered_poses)
if dist > 0 and dist < self.max_dist_lidar:
return False
else:
return True
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def lantern_check():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not checking Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Checking Lantern jobs\".format(x=dates.now())\n LanternApi.check_jobs()",
"def XCAFDoc_ShapeTool_IsFree(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsFree(*args)",
"def check_service(self, url: str, check_wfs_member: bool = False, check_image: bool = False):\n service_status = self.check_status(url, check_wfs_member=check_wfs_member, check_image=check_image)\n if service_status.success is True:\n self.handle_service_success(service_status)\n else:\n self.handle_service_error(service_status)",
"def test_live_migration_dest_check_service_works_correctly(self):\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n s_ref = self._create_compute_service(host='somewhere',\n memory_mb_used=5)\n\n ret = self.scheduler.driver._live_migration_dest_check(self.context,\n i_ref,\n 'somewhere',\n False)\n self.assertTrue(ret is None)\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])",
"def verifyActionCenterFirewall():\n pass",
"def check_fleet(self):\n if len(self.ships) > 0:\n response = False\n for ship in self.ships:\n if ship.afloat == True:\n response = True\n return response",
"def check_stellar_service(self):\n _THREEFOLDFOUNDATION_TFTSTELLAR_SERVICES = {\n \"TEST\": \"https://testnet.threefold.io/threefoldfoundation/transactionfunding_service/fund_transaction\",\n \"STD\": \"https://tokenservices.threefold.io/threefoldfoundation/transactionfunding_service/fund_transaction\",\n }\n _HORIZON_NETWORKS = {\"TEST\": \"https://horizon-testnet.stellar.org\", \"STD\": \"https://horizon.stellar.org\"}\n\n services_status = True\n\n # urls of services according to identity explorer\n if \"testnet\" in j.core.identity.me.explorer_url:\n stellar_url = _HORIZON_NETWORKS[\"TEST\"]\n tokenservices_url = _THREEFOLDFOUNDATION_TFTSTELLAR_SERVICES[\"TEST\"]\n else:\n stellar_url = _HORIZON_NETWORKS[\"STD\"]\n tokenservices_url = _THREEFOLDFOUNDATION_TFTSTELLAR_SERVICES[\"STD\"]\n\n # check stellar service\n try:\n j.tools.http.get(stellar_url)\n except:\n services_status = False\n\n # check token services\n try:\n j.tools.http.get(tokenservices_url)\n except:\n services_status = False\n\n return services_status",
"def test_BiplaneRegistration1(self):\n\n self.delayDisplay(\"Starting the test\")\n #\n # first, get some data\n #\n import urllib\n downloads = (\n ('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),\n )\n\n for url,name,loader in downloads:\n filePath = slicer.app.temporaryPath + '/' + name\n if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:\n logging.info('Requesting download %s from %s...\\n' % (name, url))\n urllib.urlretrieve(url, filePath)\n if loader:\n logging.info('Loading %s...' % (name,))\n loader(filePath)\n self.delayDisplay('Finished with download and loading')\n\n volumeNode = slicer.util.getNode(pattern=\"FA\")\n logic = BiplaneRegistrationLogic()\n self.assertIsNotNone( logic.hasImageData(volumeNode) )\n self.delayDisplay('Test passed!')",
"def check_availability(self):\n pass",
"def _check_rac_listener(cfg, warning=None, critical=None):\n bin_name = \"lsnrctl\"\n _check_attrs(cfg, [\"sid\", \"oh\"])\n bin_name = os.path.join(cfg.oh, \"bin\", bin_name)\n regex = re.compile(r'Instance \"{0}\\d*\", status READY, has 1 handler\\(s\\) for this service...'.format(cfg.sid))\n\n try:\n os.environ[\"ORACLE_HOME\"] = cfg.oh\n args = bin_name + \" status\"\n cp = subprocess.run(args, shell=True, check=True, stdout=subprocess.PIPE)\n if cp.stdout is None:\n print(\"None result from lsnrctl status\")\n return UNKNOWN\n out = str(cp.stdout, \"utf-8\")\n ready = False\n msg = \"Service {0} has 0 listener status is READY\".format(cfg.sid)\n for l in out.split(os.linesep):\n if regex.search(l.lstrip().rstrip()):\n ready = True\n msg = l\n break\n\n print(msg)\n return OK if ready else CRITICAL\n except subprocess.CalledProcessError as err:\n print(err.output)\n return UNKNOWN",
"def test_get_eligible_shipment_services_old(self):\n pass",
"def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")",
"def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()",
"def railway_service(osm_path): \n return retrieve(osm_path,'lines',['railway','service'],**{\"service\":[\" IS NOT NULL\"]})",
"def print_service_available():\n if WithingsDataManager.service_available is not True:\n _LOGGER.info(\"Looks like the service is available again\")\n WithingsDataManager.service_available = True\n return True",
"def tr_check_availability(agent_directory, agent_full_name, slot_range):\r\n tr_create_booking_register(agent_directory, agent_full_name) # CHANGE THIS WHEN POSSIBLE. IT IS ERRASING ALL BOOKINGS. NOW THE SYSTEM IS NOT CONSTRAINT IN TR RESOURCES.\r\n tr_booking_df = pd.read_csv(f'{agent_directory}''/'f'{agent_full_name}_booking.csv', header=0, delimiter=\",\", engine='python')\r\n tr_booking_df['booking_type'] = tr_booking_df['booking_type'].fillna(\"\")\r\n # Creates 2 lists: booked_slots_list & free_slots_list and checks availability.\r\n free_slots_list = []\r\n booked_slots_list = []\r\n prebooked_slots_list = []\r\n for x in slot_range:\r\n if tr_booking_df.loc[x - 1, 'booking_type'] == \"pre-book\":\r\n prebooked_slots_list.append(x)\r\n elif tr_booking_df.loc[x - 1, 'booking_type'] == \"booked\":\r\n booked_slots_list.append(x)\r\n else:\r\n free_slots_list.append(x)\r\n # Checks availability\r\n if len(booked_slots_list) >= 1:\r\n tr_msg_ca_body = \"negative\"\r\n else:\r\n tr_msg_ca_body = \"positive\"\r\n return tr_msg_ca_body",
"async def _pilot_fleet(self, fleet_id: int) -> None:\n raise NotImplementedError()",
"def IsFree(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsFree(*args)",
"def ping():\n \"\"\"Get the estimator object for this instance, loading it if it's not already loaded.\"\"\"\n checker = os.listdir('/opt/ml')\n health = checker is not None # health check here\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')",
"def unable_service(req):\n\tglobal active_\n \n\tactive_ = req.data\n\tres = SetBoolResponse()\n\tres.success = True\n\tres.message = 'Done!'\n\n\treturn res",
"def test_get_eligible_shipment_services(self):\n pass",
"def is_available():",
"def check_flights(booking_token):\n parameters = {'v': 2, # default\n 'pnum': 1, # passenger number\n 'bnum': 0, # number of bags\n 'booking_token': booking_token\n }\n response = requests.get(CHECK_FLIGHTS_ENGINE, params=parameters).json()\n print(response)\n checked = response['flights_checked']\n invalid = response['flights_invalid']\n return checked, invalid",
"def __CheckFilesAndData(self):\n APIChoose = tasks.CheckPointFactory_Connection(self.ServerInfo['MgmtServerData'].MgmtR80ApiVersion)\n #conn = tasks.CheckPointAPI(self.ServerInfo['MgmtServerData'].ServerIP,\n # self.ServerInfo['MgmtServerData'].MgmtPort)\n conn = APIChoose(self.ServerInfo['MgmtServerData'].ServerIP, self.ServerInfo['MgmtServerData'].MgmtPort)\n fileTCPPorts = Path(self.ServerInfo['MgmtObjects'].MGMTServerFilePathTCPPorts)\n fileUDPPorts= Path(self.ServerInfo['MgmtObjects'].MGMTServerFilePathUDPPorts)\n fileObjects = Path(self.ServerInfo['MgmtObjects'].MGMTServerFilePathNetObjects)\n fileNetworks = Path(self.ServerInfo['MgmtObjects'].MGMTServerFilePathNetworksObjects)\n #Si no existen los archivos\n print(fileUDPPorts)\n conn.ChkpLogin(self.ServerInfo['MgmtServerUser'].R80User, self.ServerInfo['MgmtServerUser'].R80Password)\n if not(fileTCPPorts.is_file() and fileObjects.is_file() \\\n and fileUDPPorts.is_file() and fileNetworks.is_file()):\n #ENTRA CON TRUE\n fileTCPPorts.touch()\n fileObjects.touch()\n fileUDPPorts.touch()\n fileNetworks.touch()\n #tcpPorts = json.dumps(conn.ChkpShowServicesTCP())\n tcpPorts = json.dumps(conn.ChkpShowFullServicesTCP())\n udpPorts = json.dumps(conn.ChkpShowFullServicesUDP())\n fileTCPPorts.write_text(tcpPorts)\n fileUDPPorts.write_text(udpPorts)\n hosts = json.dumps(conn.ChkpShowFullHosts())\n fileObjects.write_text(hosts)\n networks = json.dumps(conn.ChkpShowFullNetworks())\n fileNetworks.write_text(networks)\n else:\n #Existen los archivos tenemos que verificar la ultima version de la API si no actualizarlos\n DBChkpVersion = self.ServerInfo['MgmtServerData'].LastPublishSession\n RemoteVersion = conn.ChkpShowLastPublishedSession()\n RemoteVersion = RemoteVersion['publish-time']['posix']\n #Si las versiones de Base de datos son distintas vamos por todo nuevamente\n if DBChkpVersion != RemoteVersion:\n print('Versiones diferentes actualizando la versiones')\n #tcpPorts = json.dumps(conn.ChkpShowServicesTCP())\n tcpPorts = json.dumps(conn.ChkpShowFullServicesTCP())\n udpPorts = json.dumps(conn.ChkpShowFullServicesUDP())\n fileTCPPorts.write_text(tcpPorts)\n fileUDPPorts.write_text(udpPorts)\n hosts = json.dumps(conn.ChkpShowFullHosts())\n fileObjects.write_text(hosts)\n networks = json.dumps(conn.ChkpShowFullNetworks())\n fileNetworks.write_text(networks)\n self.ServerInfo['MgmtServerData'].LastPublishSession = RemoteVersion\n self.ServerInfo['MgmtServerData'].save()\n else:\n print('Mismas versiones nada que modificar')\n conn.LogOutCheckPoint()",
"def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.add_landmarks\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.add_landmarks\"] = False\n\n EKFSLAM.EKFSLAM.add_landmarks(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.add_landmarks\"], \"The function uses the solution\"",
"def test_twms_get_tile_service(self):\n ref_hash = '7555d5ad3cca96aa8cbc8a36f5e04f19'\n req_url = r'http://localhost/reproject/test/twms/twms.cgi?Request=GetTileService'\n if DEBUG:\n print('\\nTesting TWMS GetTileService')\n print('URL: ' + req_url)\n response = get_url(req_url)\n\n # Check if the response is valid XML\n try:\n XMLroot = ElementTree.XML(response.read())\n XMLdict = XmlDictConfig(XMLroot)\n xml_check = True\n except:\n xml_check = False\n self.assertTrue(xml_check, 'GetTileService response is not a valid XML file. URL: ' + req_url)\n\n refXMLtree = ElementTree.parse(os.path.join(os.getcwd(), 'mod_reproject_test_data/GetTileService.xml'))\n refXMLroot = refXMLtree.getroot()\n refXMLdict = XmlDictConfig(refXMLroot)\n\n check_result = check_dicts(XMLdict, refXMLdict)\n self.assertTrue(check_result, 'TWMS Get GetTileService Request does not match what\\'s expected. URL: ' + req_url)",
"def status_pf(module):\n rc, out, err = module.run_command(['service', 'pf', 'status'])\n\n # Obtain current status of pf\n if 'Enabled' in out:\n return True\n else:\n return False",
"def check_wfs(self, service: Service):\n wfs_helper = WfsHelper(service)\n version = service.service_type.version\n\n if wfs_helper.get_capabilities_url is not None:\n self.check_get_capabilities(wfs_helper.get_capabilities_url)\n\n if version == OGCServiceVersionEnum.V_2_0_0.value:\n wfs_helper.set_2_0_0_urls()\n if wfs_helper.list_stored_queries is not None:\n self.check_service(wfs_helper.list_stored_queries)\n\n if version == OGCServiceVersionEnum.V_2_0_2.value:\n wfs_helper.set_2_0_2_urls()\n if wfs_helper.list_stored_queries is not None:\n self.check_service(wfs_helper.list_stored_queries)",
"def check_violation(route, vehicle_type):\r\n if len(route) == 2: # [0, 0] route\r\n return True, 0, 0, 0\r\n else:\r\n accu_res = [0, 0, 0] # 0-leaving time, 1-accumulated distance, 2-volume\r\n if vehicle_type == 2:\r\n veh_cap = small_veh\r\n elif vehicle_type == 3:\r\n veh_cap = medium_veh\r\n elif vehicle_type == 5:\r\n veh_cap = large_veh\r\n else:\r\n veh_cap = large_veh\r\n print('Input wrong vehicle type!', vehicle_type)\r\n # small_veh = [1, 12, 10, 400000, 0.012, 200]\r\n fixed_cost = veh_cap[5]\r\n trans_cost = 0\r\n # wait_cost = 0\r\n if time_mat[0, route[1]] < num_timez[route[1]][0]:\r\n accu_res[0] = num_timez[route[1]][0] - time_mat[0, route[1]] # vehicle leaving depot time\r\n depart_time = accu_res[0] # departing from depot time\r\n else:\r\n depart_time = 0\r\n for i in range(len(route) - 1):\r\n last_cust = route[i]\r\n curr_cust = route[i+1]\r\n # checking leaving time\r\n arr_time = accu_res[0] + time_mat[last_cust, curr_cust]\r\n if arr_time < num_timez[curr_cust][0]:\r\n accu_res[0] = num_timez[curr_cust][0] + oprt_t\r\n wait_time = num_timez[curr_cust][0] - arr_time\r\n # wait_cost += (wait_time / 60. * wait_cost0)\r\n elif arr_time <= num_timez[curr_cust][1]:\r\n accu_res[0] = arr_time + oprt_t\r\n else:\r\n # print('Infeasible route!(Service Time Error.)')\r\n return False, 1000000, 0, 0\r\n\r\n # checking vehicle max distance\r\n trans_cost += (dist_mat[last_cust, curr_cust] * veh_cap[4])\r\n\r\n accu_res[1] += dist_mat[last_cust, curr_cust]\r\n\r\n if accu_res[0] - oprt_t - depart_time > veh_cap[3]:\r\n # print('Infeasible route!(Max Time Error.)')\r\n return False, 1000000, 0, 0\r\n\r\n # checking vehicle max volume\r\n accu_res[2] += (num_demd[curr_cust][0] * bskt_vol + num_demd[curr_cust][1] * trsf_vol + (num_demd[curr_cust][2]\r\n + num_demd[curr_cust][3]) * milk_vol + num_demd[curr_cust][4] * paper_bskt)\r\n\r\n if accu_res[2] > veh_cap[2]:\r\n # print('Infeasible route!(Max Weight/Volume Error.)', accu_res[2])\r\n return False, 1000000, 0, 0\r\n route_cost = fixed_cost + accu_res[1] * veh_cap[4]\r\n route_dist = accu_res[1]\r\n route_time = accu_res[0] - oprt_t - depart_time\r\n # print fixed_cost, trvl_cost, trvl_dist\r\n return True, route_cost, route_time, depart_time + 600",
"def test_ipam_vrfs_delete(self):\n pass"
] |
[
"0.53509796",
"0.49458748",
"0.49050424",
"0.48977044",
"0.48960072",
"0.488118",
"0.48679265",
"0.48391494",
"0.47779852",
"0.47708476",
"0.47547635",
"0.47248513",
"0.47162852",
"0.46940163",
"0.4656274",
"0.4647027",
"0.4643872",
"0.4643811",
"0.4625",
"0.46155718",
"0.46095392",
"0.46045116",
"0.4603134",
"0.45976663",
"0.45925093",
"0.45774683",
"0.45679122",
"0.45647123",
"0.4555779",
"0.45511448"
] |
0.67785156
|
0
|
Function to extract rider level temporal patterns
|
def _extract_temporal_patterns(self):
# extract hour and day of week
self.df_transaction['hour'] = self.df_transaction['trxtime'].apply(lambda x: x.hour)
self.df_transaction['day_of_week'] = self.df_transaction['trxtime'].apply(lambda x: x.dayofweek) # monday=0, sunday=6
# counting daily pattern by rider ID
groups = self.df_transaction.groupby(['riderID', 'day_of_week', 'hour']).agg(['count']).iloc[:, 0]
df_group = pd.DataFrame(groups).reset_index()
df_group.columns = ['riderID', 'day_of_week', 'hour', 'count']
rider_id = self.df_transaction['riderID'].unique()
N = len(rider_id)
# construct key dataframe to merge with grouped df
# this key_df makes sure that each rider has 168 hours
day_id = np.array([x for x in range(0, 7)])
day = [x for x in np.repeat(day_id, [24], axis=0)] * N
hour = [x for x in range(0, 24)] * 7 * N
hr_col_names = [i for i in range(1, 169)] * N
riders = [x for x in np.repeat(rider_id, [168], axis=0)]
key_df = pd.DataFrame(data={'riderID': riders, 'day_of_week': day, 'hour': hour, 'hr_col_names': hr_col_names})
# left join key_df and group_df to make sure all riders have 168 hours
# the nan's represent where the rider in df_group has no count information in that hour
join_df = pd.merge(key_df, df_group, how='left', on=['riderID', 'day_of_week', 'hour']).replace({np.nan: 0})
df_rider_temporal_count = join_df.pivot(index='riderID', columns='hr_col_names', values='count').reset_index()
df_rider_temporal_count.reset_index(drop=True, inplace=True)
# add hr_ prefix to temporal pattern
new_col_names = [(0, 'riderID')]
hr_col_names = [(i, 'hr_' + str(i)) for i in df_rider_temporal_count.iloc[:, 1:].columns.values]
new_col_names.extend(hr_col_names)
df_rider_temporal_count.rename(columns=dict(new_col_names), inplace=True)
# add weekend vs weekday count/proportion for higher level features
weekday_col_names = ['hr_' + str(i) for i in range(1, 121)]
weekend_col_names = ['hr_' + str(i) for i in range(121, 169)]
df_rider_temporal_count['weekday'] = df_rider_temporal_count[weekday_col_names].sum(axis=1)
df_rider_temporal_count['weekend'] = df_rider_temporal_count[weekend_col_names].sum(axis=1)
# collapse 168 hourly pattern into 24 hr weekend and 24 hr weekday (48 total) + 2 hour max
wkday_24_hr_col_names = ['wkday_24_' + str(i) for i in range(1, 25)]
wkend_24_hr_col_names = ['wkend_24_' + str(i) for i in range(1, 25)]
weekday = np.array(df_rider_temporal_count[weekday_col_names])
weekday = weekday.reshape((len(weekday), 5, 24)).sum(axis=1)
weekday = pd.DataFrame(weekday, columns=wkday_24_hr_col_names)
weekend = np.array(df_rider_temporal_count[weekend_col_names])
weekend = weekend.reshape((len(weekend), 2, 24)).sum(axis=1)
weekend = pd.DataFrame(weekend, columns=wkend_24_hr_col_names)
hr_col_names = ['hr_' + str(i) for i in range(1, 169)]
df_rider_temporal_count = pd.concat([df_rider_temporal_count, weekday, weekend], axis=1)
df_rider_temporal_count['hr_row_sum'] = df_rider_temporal_count[hr_col_names].iloc[:, :].sum(axis=1)
df_rider_temporal_count['flex_wkday_24'] = weekday.max(axis=1).div(df_rider_temporal_count['hr_row_sum'])
df_rider_temporal_count['flex_wkend_24'] = weekend.max(axis=1).div(df_rider_temporal_count['hr_row_sum'])
# get the top 2 frequency hr in weekday
wkday_rank = weekday.apply(np.argsort, axis=1)
ranked_wkday_cols = weekday.columns.to_series()[wkday_rank.values[:,::-1][:,:2]]
df_rider_temporal_count['max_wkday_24_1'] = pd.DataFrame(ranked_wkday_cols[:, 0])[0].apply(lambda x: str(x).split('_')[-1])
df_rider_temporal_count['max_wkday_24_2'] = pd.DataFrame(ranked_wkday_cols[:, 1])[0].apply(lambda x: str(x).split('_')[-1])
df_rider_temporal_count['max_wkend_24_1'] = weekend.idxmax(axis=1).apply(lambda x: x.split('_')[-1])
return df_rider_temporal_count
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __extract_pattern_nodes(graph):\n tp_nodes = graph.subjects(RDF.type, AGORA.TriplePattern)\n for tpn in tp_nodes:\n subject = list(graph.objects(tpn, AGORA.subject)).pop()\n predicate = list(graph.objects(tpn, AGORA.predicate)).pop()\n obj = list(graph.objects(tpn, AGORA.object)).pop()\n subject_str = list(graph.objects(subject, RDFS.label)).pop().toPython()\n predicate_str = graph.qname(predicate)\n if (obj, RDF.type, AGORA.Variable) in graph:\n object_str = list(graph.objects(obj, RDFS.label)).pop().toPython()\n else:\n object_str = list(graph.objects(obj, AGORA.value)).pop().toPython()\n __plan_patterns[tpn] = '{} {} {}'.format(subject_str, predicate_str, object_str)",
"def paths_to_ymd_string_list(paths, pattern):\n se = re.compile(pattern).search\n return [ymd_tuple_to_string(m.group(1,2,3)) for p in paths for m in [se(p)] if m]",
"def pattern_to_notes(pattern):\n pattern_notes = []\n for (i, m) in zip(range(0, 6), FRET_POSITION.findall(pattern)):\n pos = m[0].strip()\n if pos != 'x':\n pattern_notes.append(FRET_NOTES[i][int(pos) % 12])\n return pattern_notes",
"def get_words_with_end_times(subtitle_file_path):\n\n with open(subtitle_file_path) as subtitle_file:\n\n # Remove first 4 lines (containing meta information)\n for j in range(0, 4):\n subtitle_file.readline()\n\n text = subtitle_file.read()\n\n # Check if the subtitle file supports individual word times\n if text.find(\"<c>\") == -1:\n print(\"Individual word times are not supported for file: \" + subtitle_file_path)\n return None, None\n\n chunks = text.split(\" \\n\\n\") # split into chunks for easier data processing\n\n words = list()\n word_end_times = list()\n\n for chunk in chunks:\n chunk_lines = chunk.split(\"\\n\")\n words_line = chunk_lines[2]\n\n words_in_chunk = []\n word_end_times_in_chunk = []\n\n first_word_end_index = words_line.find(\"<\")\n if first_word_end_index != -1:\n first_word = words_line[\n 0:first_word_end_index] # get the first word (can't be found using method below)\n\n words_in_chunk = re.findall(\"<c> [\\S]*</c>\", words_line) # get all words\n words_in_chunk = [w[4:-4] for w in words_in_chunk] # strip <c> and <c/>\n\n word_end_times_in_chunk = re.findall(\"<\\d\\d:\\d\\d:\\d\\d.\\d\\d\\d>\", words_line) # get all word end times\n word_end_times_in_chunk = [t[1:-1] for t in word_end_times_in_chunk] # strip < and >\n else:\n # Only one word\n first_word = words_line\n\n last_time = chunk_lines[4][17:29] # end time for the last word\n\n words_in_chunk.insert(0, first_word)\n word_end_times_in_chunk.append(last_time)\n\n words.extend(words_in_chunk)\n word_end_times.extend(word_end_times_in_chunk)\n\n # For the last chunk we have to get the word end time from somewhere else\n first_line_in_last_chunk = chunks[-1].split(\"\\n\")[0]\n last_time = first_line_in_last_chunk[17:29]\n word_end_times.pop()\n word_end_times.append(last_time)\n\n if len(words) != len(word_end_times):\n print(\"Warning: word count does not match times count\")\n\n return words, word_end_times",
"def parse_tensorboard_time_series_path(path: str) -> Dict[str, str]:\n m = re.match(\n r\"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/tensorboards/(?P<tensorboard>.+?)/experiments/(?P<experiment>.+?)/runs/(?P<run>.+?)/timeSeries/(?P<time_series>.+?)$\",\n path,\n )\n return m.groupdict() if m else {}",
"def get_pattern():\n return '-activity'",
"def patterns() -> List[Dict[str, Any]]:\n patterns = [\n {\"label\": \"DRUG\", \"pattern\": \"Zithromax\", \"type\": \"fuzzy\", \"id\": \"Antibiotic\"},\n {\"label\": \"GPE\", \"pattern\": \"Mahwahe\", \"type\": \"fuzzy\"},\n {\"label\": \"GPE\", \"pattern\": \"Mahwah\", \"type\": \"fuzzy\"},\n {\n \"label\": \"NAME\",\n \"pattern\": \"Grant Andersen\",\n \"type\": \"fuzzy\",\n \"kwargs\": {\"fuzzy_func\": \"token_sort\"},\n \"id\": \"Developer\",\n },\n {\n \"label\": \"NAME\",\n \"pattern\": \"Garth Andersen\",\n \"type\": \"fuzzy\",\n \"kwargs\": {\"fuzzy_func\": \"token_sort\"},\n \"id\": \"Developer\",\n },\n {\n \"label\": \"STREET\",\n \"pattern\": \"street_addresses\",\n \"type\": \"regex\",\n \"kwargs\": {\"predef\": True},\n },\n {\n \"label\": \"GPE\",\n \"pattern\": \"(?i)[U](nited|\\\\.?) ?[S](tates|\\\\.?)\",\n \"type\": \"regex\",\n \"id\": \"USA\",\n },\n {\"label\": \"GPE\", \"pattern\": \"(?:USR){e<=1}\", \"type\": \"regex\", \"id\": \"USA\"},\n {\"label\": \"GPE\", \"pattern\": \"(?:USSR){d<=1, s<=1}\", \"type\": \"regex\"},\n {\n \"label\": \"BAND\",\n \"pattern\": [{\"LOWER\": {\"FREGEX\": \"(converge){e<=1}\"}}],\n \"type\": \"token\",\n },\n {\n \"label\": \"BAND\",\n \"pattern\": [\n {\"TEXT\": {\"FUZZY\": \"Protest\"}},\n {\"IS_STOP\": True},\n {\"TEXT\": {\"FUZZY\": \"Hero\"}},\n ],\n \"type\": \"token\",\n \"id\": \"Metal\",\n },\n ]\n return patterns # type: ignore",
"def _get_wild_tasks(self, pattern):\n wild_list = []\n for t_name in self._def_order:\n if fnmatch.fnmatch(t_name, pattern):\n wild_list.append(t_name)\n return wild_list",
"def _get_temporal_siblings(self, x, y, path, pattern):\n\t\tpattern = \"%s/*/%s\" % (path, pattern)\n\n\t\tfor fn in glob.iglob(pattern):\n\t\t\t# parse out the time, construct cell ID\n\t\t\t(kind, _) = fn.split('/')[-2:]\n\t\t\tt = self.t0 if kind == 'static' else float(kind[1:])\n\n\t\t\tcell_id = self._cell_id_for_xyt(x, y, t)\n\t\t\tyield cell_id",
"def patterns(self: TokenMatcher) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self._patterns.items():\n for pattern in patterns:\n p = {\"label\": label, \"pattern\": pattern, \"type\": self.type}\n all_patterns.append(p)\n return all_patterns",
"def timeviewUrls(pattern, view, kwargs=None, name=None):\n results = [(pattern, view, kwargs, name)]\n tail = ''\n mtail = re.search('(/+\\+?\\\\*?\\??\\$?)$', pattern)\n if mtail:\n tail = mtail.group(1)\n pattern = pattern[:len(pattern) - len(tail)]\n for filter in ('/(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})/' + \\\n '(?P<hour>\\d\\d)-(?P<minute>\\d\\d)',\n '/(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})'):\n results += [(pattern + filter + tail, view, kwargs)]\n return results",
"def process_time_string(timestr):\n timestr = timestr.strip()\n toks = timestr.split('+')\n timeslices = []\n for t in toks:\n tm = t.strip()\n mobj = re.search('\\\\*', tm)\n if mobj == None:\n timeslices += [int(tm)]\n else:\n tms = tm.split('*')\n timeslices += int(tms[0]) * [int(tms[1])]\n\n return timeslices",
"def time_features_from_frequency_str(cls, freq_str: str) -> List[str]:\n\n features_by_offsets = {\n offsets.YearBegin: [],\n offsets.YearEnd: [],\n offsets.MonthBegin: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n ],\n offsets.MonthEnd: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n ],\n offsets.Week: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n \"Is_month_start\",\n \"Week\",\n ],\n offsets.Day: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n \"Is_month_start\",\n \"Week\" \"Day\",\n \"Dayofweek\",\n \"Dayofyear\",\n ],\n offsets.BusinessDay: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n \"Is_month_start\",\n \"Week\" \"Day\",\n \"Dayofweek\",\n \"Dayofyear\",\n ],\n offsets.Hour: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n \"Is_month_start\",\n \"Week\" \"Day\",\n \"Dayofweek\",\n \"Dayofyear\",\n \"Hour\",\n ],\n offsets.Minute: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n \"Is_month_start\",\n \"Week\" \"Day\",\n \"Dayofweek\",\n \"Dayofyear\",\n \"Hour\",\n \"Minute\",\n ],\n }\n\n offset = to_offset(freq_str)\n\n for offset_type, feature in features_by_offsets.items():\n if isinstance(offset, offset_type):\n return feature\n\n supported_freq_msg = f\"\"\"\n Unsupported frequency {freq_str}\n\n The following frequencies are supported:\n\n Y, YS - yearly\n alias: A\n M, MS - monthly\n W - weekly\n D - daily\n B - business days\n H - hourly\n T - minutely\n alias: min\n \"\"\"\n raise RuntimeError(supported_freq_msg)",
"def get_time_intervals(subtitle, regex=r\"\\S+\"):\n # module webvtt only gets the line-captions, not individual words, so it's time for some regex!\n # The time corresponding to a word is when that word *starts*\n # words = (match.group(\"word\") for match in WORD_REGEX.finditer(subtitle))\n time_words = [\n (m.group(\"time1\") or m.group(\"time2\"), m.group(\"word1\") or m.group(\"word2\"))\n for m in TIME_WORD_REGEX.finditer(subtitle)\n ]\n # Incorrect format, which would lead to trouble zipping\n if len(time_words) == 0:\n return []\n times, words = zip(*time_words)\n # Quickly search lines starting from the end\n final_time = next(match.group(\"time\") for line in reversed(subtitle.split(\"\\n\")) for match in [re.search(FINAL_TIME_REGEX, line)] if match)\n # timestamps[i] is the time the word starts at if i is the index of the start of a word\n timestamps = {}\n word_indices = {}\n # Build search_str while computing time_stamps by adding one word and one space at a time\n search_str = ''\n words = list(words)\n last_time = 0\n for j, (word, time) in enumerate(zip(words, times)):\n i = len(search_str)\n time = parse_time(time)\n timestamps[i] = time\n if time - last_time > 10:\n search_str += '[ … ] '\n last_time = time\n word_indices[i] = j\n search_str += word + ' '\n i = len(search_str)\n timestamps[i] = final_time\n word_indices[i] = len(words)\n # Trim off the final space\n # Inconsistent capitalization, so just lowercase\n search_str = search_str[:-1].lower()\n for match in re.finditer(regex, search_str):\n start, end = match.span()\n start_index = max(i for i in timestamps if i<=start)\n end_index = min(i for i in timestamps if i>end)\n yield Interval(\n timestamps[start_index],\n timestamps[end_index],\n word_indices[start_index],\n word_indices[end_index],\n match.group(0)\n )",
"def patterns(self) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self.fuzzy_patterns.items():\n for pattern, kwargs in zip(patterns[\"patterns\"], patterns[\"kwargs\"]):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": pattern.text, \"type\": \"fuzzy\"}\n if kwargs:\n p[\"kwargs\"] = kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n for label, patterns in self.regex_patterns.items():\n for pattern, kwargs in zip(patterns[\"patterns\"], patterns[\"kwargs\"]):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": pattern, \"type\": \"regex\"}\n if kwargs:\n p[\"kwargs\"] = kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n return all_patterns",
"def __extract_patterns_and_spaces(self):\n\n def __decorate_nodes(nodes, space):\n \"\"\"\n Performs a backward search from a list of pattern nodes and assigns a set of search spaces\n to all encountered nodes.\n :param nodes: List of pattern nodes that belongs to a search space\n :param space: List of search space id\n :return:\n \"\"\"\n for n in nodes:\n if n not in self.__node_spaces:\n self.__node_spaces[n] = set([])\n self.__node_spaces[n].add(space)\n pred_nodes = self.__plan_graph.subjects(AGORA.next, n)\n __decorate_nodes(pred_nodes, space)\n\n # Extract all search spaces in the plan and build a dictionary of subjects-to-ignore per each of them.\n # Ignored subjects are those that won't be dereferenced due to a explicit graph pattern (object) filter,\n # e.g. ?s doap:name \"jenkins\" -> All ?s that don't match the filter will be ignored.\n self.__spaces = set(self.__plan_graph.subjects(RDF.type, AGORA.SearchSpace))\n self.__subjects_to_ignore = dict([(sp, set([])) for sp in self.__spaces])\n\n patterns = list(self.__plan_graph.subjects(RDF.type, AGORA.TriplePattern))\n for tp in patterns:\n # A triple pattern belongs to a UNIQUE search space\n space = list(self.__plan_graph.subjects(AGORA.definedBy, tp)).pop()\n self.__patterns[tp] = {'space': space}\n\n # Depending on the format of each triple pattern (either '?s a Concept' or '?s prop O'),\n # it is required to extract different properties.\n tp_pred = list(self.__plan_graph.objects(tp, predicate=AGORA.predicate)).pop()\n\n if tp_pred == RDF.type: # ?s a Concept\n self.__patterns[tp]['type'] = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()\n try:\n check_type = list(self.__plan_graph.objects(tp, predicate=AGORA.checkType)).pop().toPython()\n except IndexError:\n check_type = True\n self.__patterns[tp]['check'] = check_type\n else: # ?s prop O\n self.__patterns[tp]['property'] = tp_pred\n tp_obj = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()\n if (tp_obj, RDF.type, AGORA.Literal) in self.__plan_graph: # In case O is a Literal\n self.__patterns[tp]['filter_object'] = list(self.__plan_graph.objects(tp_obj, AGORA.value)).pop()\n elif isinstance(tp_obj, URIRef):\n self.__patterns[tp]['filter_object'] = tp_obj\n\n tp_sub = list(self.__plan_graph.objects(tp, predicate=AGORA.subject)).pop()\n if isinstance(tp_sub, URIRef):\n self.__patterns[tp]['filter_subject'] = tp_sub\n\n # Get all pattern nodes (those that have a byPattern properties) of the search plan and search backwards\n # in order to set the scope of each search space.\n nodes = list(self.__plan_graph.subjects(AGORA.byPattern, tp))\n for n in nodes:\n if n not in self.__node_patterns:\n self.__node_patterns[n] = set([])\n self.__node_patterns[n].add(tp)\n __decorate_nodes(nodes, space)",
"def get_timeseries(self,sn,strict=False,**kwargs):\n\n\t\tself.slice(sn)\n\t\tspot = kwargs.get('spot',self.cursor)\n\t\t#---! get the default spot and get the edr part \n\t\tassert (spot[0],'edr') in self.toc\n\t\tedrtree = self.toc[(spot[0],'edr')][sn]\n\t\t#---naming convention\n\t\tsequence = [((sn,step,part),tuple([edrtree[step][part][key] \n\t\t\tfor key in ['start','stop']]))\n\t\t\tfor step in edrtree \n\t\t\tfor part in edrtree[step]]\n\t\t#---return a list of keys,times pairs\n\t\treturn sequence\n\t\t#---! discarded logic below\n\t\t#seq_key_fn = [((sn,sub,fn),self.fullpath(sn,sub,fn)) for sub in subs for fn in self.toc[sn][sub]]\n\t\t#seq_time_fn = [(self.edr_times[self.xtc_files.index(fn)],key) for key,fn in seq_key_fn\n\t\t#\tif not strict or (None not in self.edr_times[self.xtc_files.index(fn)])]\n\t\t#return seq_time_fn",
"def get_index_patterns(cluster):\n es_name = cluster['es']['url']\n es = cluster['es']['client']\n\n if not es.indices.exists('.kibana'):\n raise Exception('.kibana index on {es_name} is missing!')\n\n patterns_gen = scan(\n es,\n index='.kibana',\n doc_type='index-pattern',\n _source_include=['timeFieldName'])\n\n patterns = {}\n for doc in patterns_gen:\n if not doc['_id'].startswith(\n '.') and doc['_id'] not in cluster['exclude']:\n patterns[doc['_id']] = doc['_source'].get('timeFieldName')\n\n log.debug('fetched_patterns', patterns=patterns)\n\n return patterns",
"def pattern(self):\n return self.get_data(\"pattern\")",
"def DiscoverPatterns(parameters, graph):\n patternCount = 0\n # get initial one-edge patterns\n parentPatternList = GetInitialPatterns(graph, parameters.temporal)\n if DEBUGFLAG:\n print(\"Initial patterns (\" + str(len(parentPatternList)) + \"):\")\n for pattern in parentPatternList:\n pattern.print_pattern(' ')\n discoveredPatternList = []\n while ((patternCount < parameters.limit) and parentPatternList):\n print(str(parameters.limit - patternCount) + \" patterns left\")\n childPatternList = []\n # extend each pattern in parent list (***** todo: in parallel)\n while (parentPatternList):\n parentPattern = parentPatternList.pop(0)\n if ((len(parentPattern.instances) > 1) and (patternCount < parameters.limit)):\n patternCount += 1\n extendedPatternList = Pattern.ExtendPattern(parentPattern, parameters.temporal)\n while (extendedPatternList):\n extendedPattern = extendedPatternList.pop(0)\n if DEBUGFLAG:\n print(\"Extended Pattern:\")\n extendedPattern.print_pattern(' ')\n if (len(extendedPattern.definition.edges) <= parameters.maxSize):\n # evaluate each extension and add to child list\n extendedPattern.evaluate(graph)\n if ((not parameters.prune) or (extendedPattern.value >= parentPattern.value)):\n Pattern.PatternListInsert(extendedPattern, childPatternList, parameters.beamWidth, parameters.valueBased)\n # add parent pattern to final discovered list\n if (len(parentPattern.definition.edges) >= parameters.minSize):\n Pattern.PatternListInsert(parentPattern, discoveredPatternList, parameters.numBest, False) # valueBased = False\n parentPatternList = childPatternList\n # insert any remaining patterns in parent list on to discovered list\n while (parentPatternList):\n parentPattern = parentPatternList.pop(0)\n if (len(parentPattern.definition.edges) >= parameters.minSize):\n Pattern.PatternListInsert(parentPattern, discoveredPatternList, parameters.numBest, False) # valueBased = False\n return discoveredPatternList",
"def getPattern(self):\n return self.pattern",
"def pattern_gen():\n pattern = \"\"\n\n return pattern",
"def get_motion_patterns(self, state):\n\t\tpatterns = self.motion_primitives\n\t\tn_p=[]\n\t\tfor i in range(len(patterns)):\n\t\t\tp=patterns[i]\n\t\t\tp[:,0] += state[0]\n\t\t\tp[:,1] += state[1]\n\t\t\tn_p.append(p)\n\n\t\treturn n_p",
"def exposuretimes(self):\n all = self.allexposuretimes\n return [all[layer-1] for layer in self.__layers]",
"def extract_pattern(fmt):\n class FakeDict(object):\n def __init__(self):\n self.seen_keys = set()\n\n def __getitem__(self, key):\n self.seen_keys.add(key)\n return ''\n\n def keys(self):\n return self.seen_keys\n\n fake = FakeDict()\n try:\n fmt % fake\n except TypeError:\n # Formatting error\n pass\n return set(fake.keys())",
"def _parseStage1(pattern):\n result = []\n counter = 0\n pattern =\\\n pattern.replace('(', \" ( \").replace(')', \" ) \").replace('|', \" | \")\n pattern =\\\n pattern.replace('[', \" [ \").replace(']', \" ] \").replace('*', \" * \")\n pattern = pattern.strip().split()\n if pattern[0] != '(':\n pattern = ['('] + pattern\n pattern = [')'] + pattern\n\n bPattern, _ = closeBrackets(pattern)\n return bPattern",
"def GetInitialPatterns(graph, temporal = False):\n initialPatternList = []\n candidateEdges = graph.edges.values()\n while candidateEdges:\n edge1 = candidateEdges.pop(0)\n matchingEdges = [edge1]\n nonmatchingEdges = []\n graph1 = Graph.CreateGraphFromEdge(edge1)\n if temporal:\n graph1.TemporalOrder()\n for edge2 in candidateEdges:\n graph2 = Graph.CreateGraphFromEdge(edge2)\n if temporal:\n graph2.TemporalOrder()\n if Graph.GraphMatch(graph1,graph2):\n matchingEdges.append(edge2)\n else:\n nonmatchingEdges.append(edge2)\n if len(matchingEdges) > 1:\n # Create initial pattern\n pattern = Pattern.Pattern()\n pattern.definition = Graph.CreateGraphFromEdge(matchingEdges[0])\n if temporal:\n pattern.definition.TemporalOrder()\n pattern.instances = []\n for edge in matchingEdges:\n pattern.instances.append(Pattern.CreateInstanceFromEdge(edge))\n pattern.evaluate(graph)\n initialPatternList.append(pattern)\n candidateEdges = nonmatchingEdges\n return initialPatternList",
"def extract_annotation_temporal(self, text, annotationStartPos, annotationEndPos, annotationType, \n expDateStr = None, onsetDateStr = None, refExpDateStr = None, textType='vaers'):\n \n sentences = util.sentence_tokenize(text)\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n locsSentStarts.append(len(text))\n self.sentence_startPos = locsSentStarts\n \n AnnSent = None\n for sentnum, pos in enumerate(self.sentence_startPos):\n if annotationStartPos>=pos and annotationStartPos<=self.sentence_startPos[sentnum+1]-1:\n AnnSent = sentnum\n break\n \n featText = text[annotationStartPos:annotationEndPos]\n tags = self.regexp_tagger.tag(nltk.word_tokenize(featText))\n feat = Feature((annotationType, featText, AnnSent, tags, annotationStartPos, annotationEndPos))\n \n featurelist = [feat]\n\n taggedSentences = [] \n for sentnumber, sentence in enumerate(sentences):\n\n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n\n self.initialization_text_data(text, sentences, taggedSentences, textType)\n \n expDateInput = self.parse_time_string(expDateStr)\n onsetDateInput = self.parse_time_string(onsetDateStr) \n receiveDate = self.parse_time_string(refExpDateStr) \n \n self.exposureDate = expDateInput\n self.onsetDate = onsetDateInput\n self.receiveDate = receiveDate\n self.exposureDateConfidence = 0\n self.onsetDateConfidence = 0\n \n ##: Obtain timex list\n timexList = timexan.annotateTimexes(self.text, expDateInput) \n \n self.sentence_full_tags = self.create_sentence_full_tags(featurelist, timexList)\n \n timexList = self.preprocess_timex_list(timexList, featurelist)\n \n ###: divide features that contain multiple timexes\n featurelist = self.divide_feature_containing_multiple_timexes(featurelist, timexList)\n \n featurelist = self.create_feature_timex_association(featurelist, timexList)\n \n timexList = self.construct_timeline(timexList, featurelist)\n \n featurelist = self.process_feature_durations(featurelist)\n \n featurelist = self.postprocess_features(featurelist)\n \n feature = featurelist[0]\n tlink = feature.getTlink()\n if not tlink:\n return ('', '')\n \n timexes = [t for t in tlink.getTimexes() if t.getDateTime()]\n if not timexes:\n return ('', '')\n \n if len(timexes)==1:\n tStart = timexes[0].getDateTime()\n tEnd = tStart\n else:\n tStart = timexes[0].getDateTime()\n tEnd = timexes[1].getDateTime()\n \n strTimeStart = tStart.isoformat().split('T')[0]\n strTimeEnd = tEnd.isoformat().split('T')[0]\n \n return (strTimeStart, strTimeEnd)",
"def test_radar_request_radvor_re_timerange(default_settings, station_reference_pattern_sorted_prefixed):\n\n timestamp = dt.datetime.utcnow() - dt.timedelta(days=1)\n\n request = DwdRadarValues(\n parameter=DwdRadarParameter.RE_REFLECTIVITY,\n start_date=timestamp,\n end_date=dt.timedelta(minutes=3 * 5),\n settings=default_settings,\n )\n\n # Verify number of elements.\n results = list(request.query())\n\n if len(results) == 0:\n raise pytest.skip(\"Data currently not available\")\n\n assert len(results) == 3 * 25\n\n buffer = results[0].data\n requested_header = wrl.io.read_radolan_header(buffer)\n month_year = request.start_date.strftime(\"%m%y\")\n\n pattern = (\n f\"RE......10000{month_year}BY 162....VS 5SW P30000.HPR E-03INT 60GP 900x 900VV 000MF 00000008QN 016MS\"\n f\"...<{station_reference_pattern_sorted_prefixed}>\"\n )\n assert re.match(pattern, requested_header[:200]), requested_header[:200]",
"def _get_tomes_pattern(self, pattern, row_number):\n \n # remove excess whitespace.\n pattern = pattern.strip()\n\n # test for incorrect TOMES Pattern usage.\n if pattern == \"\":\n msg = \"TOMES pattern in row {} is empty; falling back to empty output.\".format(\n row_number)\n self.logger.warning(msg)\n return [] \n\n # make sure a trailing comma exists; this prevents itertools.product() from splitting\n # something like \"TOMES_PATTERN:{'[A|B]'}\" from being split as a simple string: '{' +\n # 'A' + '|', etc.\n if pattern[-1] != \",\":\n pattern += \",\"\n \n # interpret the TOMES pattern.\n patterns = []\n try:\n pattern = eval(pattern, {\"__builtins__\": {}}, {})\n patterns = [i for i in itertools.product(*pattern)]\n patterns.reverse()\n except (NameError, SyntaxError, TypeError) as err:\n self.logger.error(err)\n msg = \"Invalid TOMES pattern syntax in row {}; falling back to empty output.\\\n \".format(row_number)\n self.logger.warning(msg)\n\n self.logger.info(\"TOMES Pattern yielded {} sub-patterns.\".format(len(patterns)))\n return patterns"
] |
[
"0.5510267",
"0.52542937",
"0.525195",
"0.5220616",
"0.5148889",
"0.5143544",
"0.5133086",
"0.51217085",
"0.5068871",
"0.5026059",
"0.5012949",
"0.50099546",
"0.49786222",
"0.49588096",
"0.49438012",
"0.49356502",
"0.4911912",
"0.49105886",
"0.49089172",
"0.4899446",
"0.48757088",
"0.48543927",
"0.48516378",
"0.48511806",
"0.48313004",
"0.48260546",
"0.48188382",
"0.47996852",
"0.47916314",
"0.47895136"
] |
0.6346698
|
0
|
Function to extract rider level geographical patterns
|
def _extract_geographical_patterns(self):
# take onehot encoding of zipcodes
onehot = pd.get_dummies(self.df_transaction['zipcode'], prefix='zipcode')
rider_id = pd.DataFrame(data={'riderID': self.df_transaction['riderID']})
frames = [rider_id, onehot]
df_onehot = pd.concat(frames, axis=1)
# count zipcodes
df_rider_geo_count = df_onehot.groupby(['riderID'])[list(onehot.columns.values)].sum().reset_index()
df_rider_geo_count['geo_row_sum'] = df_rider_geo_count.iloc[:, 1:].sum(axis=1)
return df_rider_geo_count
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_regions_mask(self, input):",
"def ffgs_regions():\n return [\n ('Hispaniola', 'hispaniola'),\n ('Central America', 'centralamerica')\n ]",
"def get_zone(text_reg):\n posi_zone = []\n gray_zone = []\n for txt in text_reg:\n x1, y1, x2, y2 = txt[0], txt[1], txt[2], txt[3]\n x3, y3, x4, y4 = txt[4], txt[5], txt[6], txt[7]\n line_1_2_len = np.sqrt(np.square(x1 - x2) + np.square(y1 - y2))\n line_1_4_len = np.sqrt(np.square(x1 - x4) + np.square(y1 - y4))\n if line_1_2_len <= line_1_4_len:\n # short side is line_1_2\n mid_point_1_2 = [(x1 + x2) / 2, (y1 + y2) / 2]\n mid_point_m_1 = [(x1 + mid_point_1_2[0]) / 2, (y1 + mid_point_1_2[1]) / 2]\n mid_point_m_2 = [(x2 + mid_point_1_2[0]) / 2, (y2 + mid_point_1_2[1]) / 2]\n\n mid_point_3_4 = [(x3 + x4) / 2, (y3 + y4) / 2]\n mid_point_m_3 = [(x3 + mid_point_3_4[0]) / 2, (y3 + mid_point_3_4[1]) / 2]\n mid_point_m_4 = [(x4 + mid_point_3_4[0]) / 2, (y4 + mid_point_3_4[1]) / 2]\n\n gray_zone.append([x1, y1, mid_point_m_1[0], mid_point_m_1[1], mid_point_m_4[0], mid_point_m_4[1], x4, y4])\n gray_zone.append([mid_point_m_2[0], mid_point_m_2[1], x2, y2, x3, y3, mid_point_m_3[0], mid_point_m_3[1]])\n posi_zone.append([mid_point_m_1[0], mid_point_m_1[1], mid_point_m_2[0], mid_point_m_2[1],\n mid_point_m_3[0], mid_point_m_3[1], mid_point_m_4[0], mid_point_m_4[1]])\n else:\n # short side is line_1_4\n mid_point_1_4 = [(x1 + x4) / 2, (y1 + y4) / 2]\n mid_point_m_1 = [(x1 + mid_point_1_4[0]) / 2, (y1 + mid_point_1_4[1]) / 2]\n mid_point_m_4 = [(x4 + mid_point_1_4[0]) / 2, (y4 + mid_point_1_4[1]) / 2]\n\n mid_point_2_3 = [(x2 + x3) / 2, (y2 + y3) / 2]\n mid_point_m_2 = [(x2 + mid_point_2_3[0]) / 2, (y2 + mid_point_2_3[1]) / 2]\n mid_point_m_3 = [(x3 + mid_point_2_3[0]) / 2, (y3 + mid_point_2_3[1]) / 2]\n gray_zone.append([x1, y1, x2, y2, mid_point_m_2[0], mid_point_m_2[1], mid_point_m_1[0], mid_point_m_1[1]])\n gray_zone.append([mid_point_m_4[0], mid_point_m_4[1], mid_point_m_3[0], mid_point_m_3[1], x3, y3, x4, y4])\n posi_zone.append([mid_point_m_1[0], mid_point_m_1[1], mid_point_m_2[0], mid_point_m_2[1],\n mid_point_m_3[0], mid_point_m_3[1], mid_point_m_4[0], mid_point_m_4[1]])\n\n return gray_zone, posi_zone",
"def best_coords(self):\n lat, lon = None, None\n for term in self.terms:\n # print(term)\n # print(term['weight'])\n geo = term.get(\"geo\")\n if geo:\n osm = geo['osm']\n gm = geo['gm']\n geo_data = None\n if osm:\n geo_data = osm\n elif gm:\n geo_data = gm\n if geo_data:\n g = geo_data[0]\n lat, lon = g['latitude'], g['longitude']\n break\n return lat, lon, self.region",
"def test_get_zr_location_structure(self):\n pass",
"def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue",
"def dflagize_subregional(text: str) -> str:\r\n\r\n return standard.dflagize_subregional(text)",
"def standard_map_peninsula():\n geogr = \"\"\"\\\n OOOOOOOOOOOOOOOOOOOOO\n OOOOOOOOSMMMMJJJJJJJO\n OSSSSSJJJJMMJJJJJJJOO\n OSSSSSSSSSMMJJJJJJOOO\n OSSSSSJJJJJJJJJJJJOOO\n OSSSSSJJJDDJJJSJJJOOO\n OSSJJJJJDDDJJJSSSSOOO\n OOSSSSJJJDDJJJSOOOOOO\n OSSSJJJJJDDJJJJJJJOOO\n OSSSSJJJJDDJJJJOOOOOO\n OOSSSSJJJJJJJJOOOOOOO\n OOOSSSSJJJJJJJOOOOOOO\n OOOOOOOOOOOOOOOOOOOOO\"\"\"\n island = isle.Island(geogr)\n occupants = [{'loc': (1, 19),\n 'pop': [{'species': 'Herbivore', 'age': 9, 'weight': 10},\n {'species': 'Carnivore', 'age': 9, 'weight': 10}]}]\n island.populate_island(occupants)\n return island",
"def get_valid_regions(self):\n pass",
"def get_valid_locations(location_list, grid, shape):",
"def process_latlon(self):\n data = self.unixtext.replace(\"\\n\", \" \")\n search = LAT_LON_PREFIX.search(data)\n if search is None:\n return None\n pos = search.start()\n newdata = data[pos+9:]\n # Go find our next non-digit, non-space character, if we find it, we\n # should truncate our string, this could be improved, I suspect\n search = re.search(r\"[^\\s0-9]\", newdata)\n if search is not None:\n pos2 = search.start()\n newdata = newdata[:pos2]\n\n poly = str2polygon(newdata)\n if poly is None:\n return None\n\n # check 0, PGUM polygons are east longitude akrherz/pyIEM#74\n if self.tp.source == 'PGUM':\n newpts = [[0 - pt[0], pt[1]] for pt in poly.exterior.coords]\n poly = Polygon(newpts)\n\n # check 1, is the polygon valid?\n if not poly.is_valid:\n self.tp.warnings.append(\n (\"LAT...LON polygon is invalid!\\n%s\") % (poly.exterior.xy,))\n return\n # check 2, is the exterior ring of the polygon clockwise?\n if poly.exterior.is_ccw:\n self.tp.warnings.append(\n (\"LAT...LON polygon exterior is CCW, reversing\\n%s\"\n ) % (poly.exterior.xy,))\n poly = Polygon(zip(poly.exterior.xy[0][::-1],\n poly.exterior.xy[1][::-1]))\n self.giswkt = 'SRID=4326;%s' % (dumps(MultiPolygon([poly]),\n rounding_precision=6),)\n return poly",
"def get_all_locations(self):",
"def _get_polygon(areasrc):\n\n str = areasrc.geometry.wkt\n str = re.sub('POLYGON\\(\\(', '', str)\n str = re.sub('\\)\\)', '', str)\n aa = re.split('\\,', str)\n lons = []\n lats = []\n for str in aa:\n bb = re.split('\\s+', re.sub('^\\s+', '', str))\n lons.append(float(bb[0]))\n lats.append(float(bb[1]))\n return lons, lats",
"def findGeostructures(\r\n res: float|List[float, ...], /, \r\n db_properties=['electrical_props', '__description']\r\n ):\r\n \r\n structures = find_similar_structures(res)\r\n if len(structures) !=0 or structures is not None:\r\n if structures[0].find('/')>=0 : \r\n ln = structures[0].split('/')[0].lower() \r\n else: ln = structures[0].lower()\r\n return ln, res\r\n else: \r\n valEpropsNames = GeoBase.getProperties(db_properties)\r\n indeprops = db_properties.index('electrical_props')\r\n for ii, elecp_value in enumerate(valEpropsNames[indeprops]): \r\n if elecp_value ==0.: continue \r\n elif elecp_value !=0 : \r\n try : \r\n iter(elecp_value)\r\n except : pass \r\n else : \r\n if min(elecp_value)<= res<= max(elecp_value):\r\n ln= valEpropsNames[indeprops][ii]\r\n return ln, res",
"def get_zone_pixels(feat, input_zone_polygon, input_value_raster, band, coords=[]): #, raster_band\n \n \n \n # Open data\n raster = gdal.Open(input_value_raster)\n shp = ogr.Open(input_zone_polygon)\n lyr = shp.GetLayer()\n \n # Get raster georeference info\n transform = raster.GetGeoTransform()\n xOrigin = transform[0]\n yOrigin = transform[3]\n pixelWidth = transform[1]\n pixelHeight = transform[5]\n \n sizeX = raster.RasterXSize\n sizeY = raster.RasterYSize\n lrx = xOrigin + (sizeX * pixelWidth)\n lry = yOrigin + (sizeY * pixelHeight)\n \n \n \n # Reproject vector geometry to same projection as raster\n #sourceSR = lyr.GetSpatialRef()\n #targetSR = osr.SpatialReference()\n #targetSR.ImportFromWkt(raster.GetProjectionRef())\n #coordTrans = osr.CoordinateTransformation(sourceSR,targetSR)\n #feat = lyr.GetNextFeature()\n #geom = feat.GetGeometryRef()\n #geom.Transform(coordTrans)\n \n # Get extent of feat\n geom = feat.GetGeometryRef()\n if (geom.GetGeometryName() == 'MULTIPOLYGON'):\n count = 0\n pointsX = []; pointsY = []\n for polygon in geom:\n geomInner = geom.GetGeometryRef(count)\n ring = geomInner.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n count += 1\n elif (geom.GetGeometryName() == 'POLYGON'):\n ring = geom.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n pointsX = []; pointsY = []\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n\n else:\n sys.exit(\"ERROR: Geometry needs to be either Polygon or Multipolygon\")\n\n #xmin = min(pointsX) \n #xmax = max(pointsX)\n #ymin = min(pointsY)\n #ymax = max(pointsY)\n \n \n if len(coords) == 0: \n xmin = xOrigin if (min(pointsX) < xOrigin) else min(pointsX)\n xmax = lrx if (max(pointsX) > lrx) else max(pointsX)\n ymin = lry if (min(pointsY) < lry) else min(pointsY)\n ymax = yOrigin if (max(pointsY) > yOrigin) else max(pointsY)\n else:\n xmin = coords[0] if (min(pointsX) < coords[0]) else min(pointsX)\n xmax = coords[1] if (max(pointsX) > coords[1]) else max(pointsX)\n ymin = coords[2] if (min(pointsY) < coords[2]) else min(pointsY)\n ymax = coords[3] if (max(pointsY) > coords[3]) else max(pointsY)\n \n # Specify offset and rows and columns to read\n xoff = int((xmin - xOrigin)/pixelWidth)\n yoff = int((yOrigin - ymax)/pixelWidth)\n xcount = int((xmax - xmin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the right side\n ycount = int((ymax - ymin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the bottom side\n \n #print(xoff, yoff, xcount, ycount)\n \n # Create memory target raster\n target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform((\n xmin, pixelWidth, 0,\n ymax, 0, pixelHeight,\n ))\n\n # Create for target raster the same projection as for the value raster\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(raster.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n\n # Rasterize zone polygon to raster\n gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1])\n\n # Read raster as arrays\n dataBandRaster = raster.GetRasterBand(band)\n data = dataBandRaster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float)\n bandmask = target_ds.GetRasterBand(1)\n datamask = bandmask.ReadAsArray(0, 0, xcount, ycount).astype(np.float)\n\n # data zone of raster\n dataZone = np.ma.masked_array(data, np.logical_not(datamask))\n\n raster_srs = None\n raster = None\n shp = None\n lyr = None\n return [dataZone, [xmin,xmax,ymin,ymax]]",
"def find_features_geojson(self, geojson_tagset):\n kreis_region_bund_list = []\n only_regs_set = set()\n for feature in geojson_tagset:\n bundesl = feature.properties.get('NAME_1')\n region = feature.properties.get('NAME_2')\n kreis = feature.properties.get('NAME_3')\n\n kreis_region_bund_list.append((kreis, region, bundesl))\n #Check: does \"Göttingen\" appear in this list as a region? Why does Goettingen need to be a region?)\n return kreis_region_bund_list",
"def region(location):\n x, y = location\n if y < 89:\n return \"left-door\" if x < 56 else \"middle-platform\" if x < 102 else \"right-door\"\n elif y < 124:\n return \"left-platform\" if x < 39 else \"belt\" if x < 79 else \"middle-ladder\" if x < 81 else \"belt\" if x < 111 else \"rope\" if x < 113 else \"right-platform\"\n elif y < 127:\n return \"left-platform\" if x < 39 else \"belt\" if x < 111 else \"rope\" if x < 113 else \"right-platform\"\n elif y < 150:\n return \"left-ladder\" if x < 32 else \"floor\" if x < 111 else \"rope\" if x < 113 else \"floor\" if x < 128 else \"right-ladder\"\n elif y < 168:\n return \"left-ladder\" if x < 32 else \"floor\" if x < 128 else \"right-ladder\"\n else:\n return \"floor\"",
"def geotif_image(self, tile_bounds, image_bounds, imagepath,image_gdal):\n i_srid=3857\n s_srid=\"WGS 84 / Pseudo-Mercator\"\n # i_srid=3395\n # s_srid=\"WGS 84 / World Mercator\"\n # 4326 Wsg84\n # Upper Left ( -8.4375000, 77.1571625) ( 8d26'15.00\"W, 77d 9'25.79\"N)\n # Lower Left ( -8.4375000, 35.4606700) ( 8d26'15.00\"W, 35d27'38.41\"N)\n # Upper Right ( 80.1562500, 77.1571625) ( 80d 9'22.50\"E, 77d 9'25.79\"N)\n # Lower Right ( 80.1562500, 35.4606700) ( 80d 9'22.50\"E, 35d27'38.41\"N)\n # Center ( 35.8593750, 56.3089162) ( 35d51'33.75\"E, 56d18'32.10\"N)\n # 3857 'WGS 84 / Pseudo-Mercator'\n # Upper Left ( -939258.204,13932330.020) ( 8d26'15.00\"W, 77d 9'25.79\"N)\n # Lower Left ( -939258.204, 4226661.916) ( 8d26'15.00\"W, 35d27'38.41\"N)\n # Upper Right ( 8922952.934,13932330.020) ( 80d 9'22.50\"E, 77d 9'25.79\"N)\n # Lower Right ( 8922952.934, 4226661.916) ( 80d 9'22.50\"E, 35d27'38.41\"N)\n # Center ( 3991847.365, 9079495.968) ( 35d51'33.75\"E, 62d54'54.84\"N)\n # 3395 'WGS 84 / World Mercator'\n # Upper Left ( -939258.204,13932330.020) ( 8d26'15.00\"W, 77d14'24.81\"N)\n # Lower Left ( -939258.204, 4226661.916) ( 8d26'15.00\"W, 35d38'33.56\"N)\n # Upper Right ( 8922952.934,13932330.020) ( 80d 9'22.50\"E, 77d14'24.81\"N)\n # Lower Right ( 8922952.934, 4226661.916) ( 80d 9'22.50\"E, 35d38'33.56\"N)\n # Center ( 3991847.365, 9079495.968) ( 35d51'33.75\"E, 63d 4'14.87\"N)\n bounds_west,bounds_south,bounds_east,bounds_north=tile_bounds\n bounds_wsg84=\"bounds_wsg84: %f,%f,%f,%f\"% (bounds_west,bounds_south,bounds_east,bounds_north)\n mercator = GlobalMercator()\n tile_bounds=mercator.BoundsToMeters(tile_bounds)\n mbtiles_name=\"\";\n mbtiles_description=\"\"\n s_TIFFTAG_DOCUMENTNAME=\"\"\n s_TIFFTAG_IMAGEDESCRIPTION=\"\"\n s_TIFFTAG_SOFTWARE=\"\"\n s_TIFFTAG_DATETIME=\"\"\n s_TIFFTAG_ARTIST=\"\"\n s_TIFFTAG_HOSTCOMPUTER=\"\"\n s_TIFFTAG_COPYRIGHT=\"\"\n if self.metadata_input:\n metadata=dict(self.metadata_input)\n mbtiles_name=metadata.get('name','')\n mbtiles_description=metadata.get('description','')\n if self._metadata:\n for metadata_list in self._metadata:\n metadata=dict(metadata_list[0])\n mbtiles_name=metadata.get('name',mbtiles_name)\n mbtiles_description=metadata.get('description',mbtiles_description)\n s_TIFFTAG_DOCUMENTNAME=metadata.get('TIFFTAG_DOCUMENTNAME',mbtiles_name)\n s_TIFFTAG_IMAGEDESCRIPTION=metadata.get('TIFFTAG_IMAGEDESCRIPTION',mbtiles_description)\n s_TIFFTAG_SOFTWARE=metadata.get('TIFFTAG_SOFTWARE','')\n s_TIFFTAG_DATETIME=metadata.get('TIFFTAG_DATETIME','')\n s_TIFFTAG_ARTIST=metadata.get('TIFFTAG_ARTIST','')\n s_TIFFTAG_HOSTCOMPUTER=metadata.get('TIFFTAG_HOSTCOMPUTER','')\n s_TIFFTAG_COPYRIGHT=metadata.get('TIFFTAG_COPYRIGHT','')\n if s_TIFFTAG_DOCUMENTNAME == \"\":\n s_TIFFTAG_DOCUMENTNAME=mbtiles_name\n if s_TIFFTAG_IMAGEDESCRIPTION == \"\":\n s_TIFFTAG_IMAGEDESCRIPTION=mbtiles_description\n tiff_metadata=[]\n if s_TIFFTAG_DOCUMENTNAME != \"\":\n tiff_metadata.append(('TIFFTAG_DOCUMENTNAME',s_TIFFTAG_DOCUMENTNAME))\n if s_TIFFTAG_IMAGEDESCRIPTION != \"\":\n tiff_metadata.append(('TIFFTAG_IMAGEDESCRIPTION',s_TIFFTAG_IMAGEDESCRIPTION))\n if s_TIFFTAG_SOFTWARE != \"\":\n tiff_metadata.append(('TIFFTAG_SOFTWARE',s_TIFFTAG_SOFTWARE))\n else:\n tiff_metadata.append(('TIFFTAG_SOFTWARE',bounds_wsg84))\n if s_TIFFTAG_DATETIME != \"\":\n tiff_metadata.append(('TIFFTAG_DATETIME',s_TIFFTAG_DATETIME))\n if s_TIFFTAG_ARTIST != \"\":\n tiff_metadata.append(('TIFFTAG_ARTIST',s_TIFFTAG_ARTIST))\n if s_TIFFTAG_HOSTCOMPUTER != \"\":\n tiff_metadata.append(('TIFFTAG_HOSTCOMPUTER',s_TIFFTAG_HOSTCOMPUTER))\n if s_TIFFTAG_COPYRIGHT != \"\":\n tiff_metadata.append(('TIFFTAG_COPYRIGHT',s_TIFFTAG_COPYRIGHT))\n # this assumes the projection is Geographic lat/lon WGS 84\n xmin,ymin,xmax,ymax=tile_bounds\n image_width,image_height=image_bounds\n # Upper Left ( 20800.000, 22000.000)\n # Lower Right ( 24000.000, 19600.000)\n # Size is 15118, 11339\n # (24000-20800)/15118 = 3200 = 0,21166821 [xres]\n # (19600-22000)/11339 = 2400 = −0,211658876 [yres]\n # geo_transform = (20800.0, 0.2116682100807, 0.0, 22000.0, 0.0, -0.21165887644413)\n geo_transform = [xmin, (xmax-xmin)/image_width, 0, ymax, 0, (ymin-ymax)/image_height ]\n spatial_projection = osr.SpatialReference()\n spatial_projection.ImportFromEPSG(i_srid)\n logger.info(_(\"-I-> geotif_image: Saving as GeoTiff - image[%s] compression[%s]\") % (imagepath,self.tiff_compression))\n image_dataset = gdal.Open(image_gdal, gdal.GA_Update )\n image_dataset.SetProjection(spatial_projection.ExportToWkt())\n image_dataset.SetGeoTransform(geo_transform)\n driver = gdal.GetDriverByName(\"GTiff\")\n output_dataset = driver.CreateCopy(imagepath,image_dataset, 0, self.tiff_compression )\n if tiff_metadata:\n logger.info(_(\"-I-> geotif_image: tiff_metadata[%s]\") % tiff_metadata)\n output_dataset.SetMetadata(dict(tiff_metadata))\n # Once we're done, close properly the dataset\n output_dataset = None\n image_dataset = None\n os.remove(image_gdal)\n logger.info(_(\"-I-> geotif_image: Saved resulting image to '%s' as GeoTiff- bounds[%s]\") % (imagepath,tile_bounds))",
"def get_shapes4country(country='South Africa'):\n # location of data\n URL = \"http://www.naturalearthdata.com/downloads/10m-cultural-vectors\"\n URL += \"/10m-admin-1-states-provinces/\"\n # Shapefiles locally?\n # TODO - update to download automatically and store in AC_tools' data directory\n shapefiles = 'ne_10m_admin_1_states_provinces_lakes'\n# shapefiles = 'ne_10m_admin_1_states_provinces'\n folder = '/mnt/lustre/users/ts551/labbook/Python_progs/'\n folder += '/AC_tools/data/shapefiles/{}'.format(shapefiles, shapefiles)\n states = geopandas.read_file(folder)\n # Just select state of interest\n choosen_states = states.query(\"admin == '{}'\".format(country))\n choosen_states = choosen_states.reset_index(drop=True)\n # Get the shapes\n shapes = zip(choosen_states.geometry, range(len(choosen_states)))\n return shapes",
"def get_location(body, returnthis):\n m = re.search(r\"\\bsteemitworldmap\\b\\s([-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?))\\s\\blat\\b\\s([-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?))\", body)\n if m:\n try:\n latitude = m.group(1)\n longitude = m.group(5)\n geolocator = Nominatim(user_agent=\"travelfeed/0.1\")\n rawlocation = geolocator.reverse(latitude+\", \"+longitude, language=\"en\", timeout=10).raw\n address = rawlocation['address']\n state = address.get('state', None)\n if state == None: #Not every location has a state/region/... set!\n state = address.get('region', None)\n if state == None:\n state = address.get('state_district', None)\n if state == None:\n state = address.get('county', None)\n if state == None:\n state = \"\"\n country_code = str(address[\"country_code\"]).upper()\n country_object = pycountry.countries.get(alpha_2=country_code)\n try:\n country = country_object.common_name #Some countries such as Taiwan or Bolivia have a common name that is used instead of the official name\n except:\n country = country_object.name\n continent_code = pycountry_convert.country_alpha2_to_continent_code(country_code)\n if continent_code == \"AF\":\n continent = \"Africa\"\n elif continent_code == \"NA\":\n continent = \"North America\"\n elif continent_code == \"OC\":\n continent = \"Oceania\"\n elif continent_code == \"AN\":\n continent = \"Antarctica\"\n elif continent_code == \"AS\":\n continent = \"Asia\"\n elif continent_code == \"EU\":\n continent = \"Europe\"\n elif continent_code == \"SA\":\n continent = \"South America\"\n if returnthis == None:\n location = state+\", \"+country+\", \"+continent\n return location\n if returnthis == \"continentcode\":\n return continent_code\n except Exception as error:\n logger.warning(\"Could not determine location: \"+repr(error))\n return None\n else:\n return None",
"def find_map(start, end, *otherlocs):\n small = \"200x200\"\n large = \"512x512\"\n start = start.replace(\" \",\"+\")\n end = end.replace(\" \",\"+\")\n small_url = g_api_base_url + static_url + small + map_type_url + small_marker_url + start + map_concat + end\n big_url = g_api_base_url + static_url + large + map_type_url + marker_url + start + map_concat + end\n for loc in otherlocs:\n loc = loc.replace(\" \", \"+\")\n small_url += loc\n big_url += loc\n small_url += goog_static_map_key\n big_url += goog_static_map_key\n return small_url, big_url",
"def get_polygons(annotation):\n print(f\"Loadding: {annotation}\")\n tree = ET.parse(annotation)\n root = tree.getroot()\n polygons = {}\n for obj in root.findall('object'):\n name = obj.find('name').text\n id_ = obj.find('id').text\n polygon = []\n for pt in obj.find('polygon').findall('pt'):\n polygon.append([pt.find('x').text, pt.find('y').text])\n if name in polygons:\n x_ref= int(polygons[name]['left'][0][0])\n x = int(polygon[0][0])\n if x > x_ref:\n polygons[name]['right'] = polygons[name]['left']\n id_ = 'left'\n else:\n id_ = 'right'\n else:\n polygons[name] = {}\n id_ = 'left'\n polygons[name][id_] = polygon\n for i in list(polygons.keys()):\n if not('right' in polygons[i]):\n print(i,' only has one polygon: ',polygons[i]['left'])\n y = input('Do you wish to label it as \\'right\\'? (leave empy if No): ')\n if (y):\n polygons[i]['right'] = polygons[i]['left']\n polygons[i].pop('left')\n return polygons",
"def maploc(loc):\n\n\n loc = REGEX['parens'].sub('', loc)\n loc = REGEX['and'].sub('', loc)\n loc = REGEX['num'].sub('', loc)\n\n \"\"\"\n 'parens' 'and' 'single' 'num' 'seeley' 'iab' 'brh'\n \"\"\"\n \"\"\"\n /* For non-street address, strip room numbers */\n if (!location.match(' Ave')) {\n location = location.replace(/LL[0-9]/g, '').replace(/[0-9]/g, '');\n }\n /* Some text substitutions */\n location = location.replace('Seeley W.', '').replace('International Affairs Building', '420 W 118th St').replace('Broadway Residence Hall', '2900 Broadway');\n\n \"\"\"\n return loc + ', New York, NY 10027'",
"def filter_geolevels(self):\n return self.filter_nodes('/DistrictBuilder/GeoLevels/GeoLevel')",
"def filter_plants_by_region_id(region_id, year, host='localhost', area=0.5):\n\n state_dict = {\n 'Alabama':'AL',\n 'Alaska':'AK',\n 'Arizona':'AZ',\n 'Arkansas':'AR',\n 'California':'CA',\n 'Colorado':'CO',\n 'Connecticut':'CT',\n 'Delaware':'DE',\n 'Florida':'FL',\n 'Georgia':'GA',\n 'Hawaii':'HI',\n 'Idaho':'ID',\n 'Illinois':'IL',\n 'Indiana':'IN',\n 'Iowa':'IA',\n 'Kansas':'KS',\n 'Kentucky':'KY',\n 'Louisiana':'LA',\n 'Maine':'ME',\n 'Maryland':'MD',\n 'Massachusetts':'MA',\n 'Michigan':'MI',\n 'Minnesota':'MN',\n 'Mississippi':'MS',\n 'Missouri':'MO',\n 'Montana':'MT',\n 'Nebraska':'NE',\n 'Nevada':'NV',\n 'New Hampshire':'NH',\n 'New Jersey':'NJ',\n 'New Mexico':'NM',\n 'New York':'NY',\n 'North Carolina':'NC',\n 'North Dakota':'ND',\n 'Ohio':'OH',\n 'Oklahoma':'OK',\n 'Oregon':'OR',\n 'Pennsylvania':'PA',\n 'Rhode Island':'RI',\n 'South Carolina':'SC',\n 'South Dakota':'SD',\n 'Tennessee':'TN',\n 'Texas':'TX',\n 'Utah':'UT',\n 'Vermont':'VT',\n 'Virginia':'VA',\n 'Washington':'WA',\n 'West Virginia':'WV',\n 'Wisconsin':'WI',\n 'Wyoming':'WY'\n }\n\n print \"Getting region name from database...\"\n query = \"SELECT regionabr FROM ventyx_nerc_reg_region WHERE gid={}\".format(\n region_id)\n region_name = connect_to_db_and_run_query(query=query,\n database='switch_gis', host=host)['regionabr'][0]\n counties_path = os.path.join('other_data', '{}_counties.tab'.format(region_name))\n \n if not os.path.exists(counties_path):\n # assign county if (area)% or more of its area falls in the region\n query = \"SELECT name, state\\\n FROM ventyx_nerc_reg_region regions CROSS JOIN us_counties cts\\\n JOIN (SELECT DISTINCT state, state_fips FROM us_states) sts \\\n ON (sts.state_fips=cts.statefp) \\\n WHERE regions.gid={} AND\\\n ST_Area(ST_Intersection(cts.the_geom, regions.the_geom))/\\\n ST_Area(cts.the_geom)>={}\".format(region_id, area)\n print \"\\nGetting counties and states for the region from database...\"\n region_counties = pd.DataFrame(connect_to_db_and_run_query(query=query,\n database='switch_gis', host=host)).rename(columns={'name':'County','state':'State'})\n region_counties.replace(state_dict, inplace=True)\n region_counties.to_csv(counties_path, sep='\\t', index=False)\n else:\n print \"Reading counties from .tab file...\"\n region_counties = pd.read_csv(counties_path, sep='\\t', index_col=None)\n\n generators = pd.read_csv(\n os.path.join('processed_data','generation_projects_{}.tab'.format(year)), sep='\\t')\n generators.loc[:,'County'] = generators['County'].map(lambda c: str(c).title())\n\n print \"\\nRead in data for {} generators, of which:\".format(len(generators))\n print \"--{} are existing\".format(len(generators[generators['Operational Status']=='Operable']))\n print \"--{} are proposed\".format(len(generators[generators['Operational Status']=='Proposed']))\n\n generators_with_assigned_region = generators.loc[generators['Nerc Region'] == region_name]\n generators = generators[generators['Nerc Region'].isnull()]\n generators_without_assigned_region = pd.merge(generators, region_counties, how='inner', on=['County','State'])\n generators = pd.concat([\n generators_with_assigned_region,\n generators_without_assigned_region],\n axis=0)\n generators.replace(\n to_replace={'Energy Source':coal_codes, 'Energy Source 2':coal_codes,\n 'Energy Source 3':coal_codes}, value='COAL', inplace=True)\n generators_columns = list(generators.columns)\n\n existing_gens = generators[generators['Operational Status']=='Operable']\n proposed_gens = generators[generators['Operational Status']=='Proposed']\n\n print \"=======\"\n print \"Filtered to {} projects in the {} region, of which:\".format(\n len(generators), region_name)\n print \"--{} are existing with {:.0f} GW of capacity\".format(\n len(existing_gens), existing_gens['Nameplate Capacity (MW)'].sum()/1000.0)\n print \"--{} are proposed with {:.0f} GW of capacity\".format(\n len(proposed_gens), proposed_gens['Nameplate Capacity (MW)'].sum()/1000.0)\n print \"=======\"\n\n return generators",
"def _extract_landmarks(dig):\n coords = dict()\n landmarks = {d['ident']: d for d in dig\n if d['kind'] == FIFF.FIFFV_POINT_CARDINAL}\n if landmarks:\n if FIFF.FIFFV_POINT_NASION in landmarks:\n coords['NAS'] = landmarks[FIFF.FIFFV_POINT_NASION]['r'].tolist()\n if FIFF.FIFFV_POINT_LPA in landmarks:\n coords['LPA'] = landmarks[FIFF.FIFFV_POINT_LPA]['r'].tolist()\n if FIFF.FIFFV_POINT_RPA in landmarks:\n coords['RPA'] = landmarks[FIFF.FIFFV_POINT_RPA]['r'].tolist()\n return coords",
"def get_countries_per_region(region='all'):\n\n iso3n = {'NA1': [660, 28, 32, 533, 44, 52, 84, 60, 68, 132, 136,\n 152, 170, 188, 192, 212, 214, 218, 222, 238, 254,\n 308, 312, 320, 328, 332, 340, 388, 474, 484, 500,\n 558, 591, 600, 604, 630, 654, 659, 662, 670, 534,\n 740, 780, 796, 858, 862, 92, 850], \\\n 'NA2': [124, 840], \\\n 'NI': [4, 51, 31, 48, 50, 64, 262, 232,\n 231, 268, 356, 364, 368, 376, 400, 398, 414, 417,\n 422, 462, 496, 104, 524, 512, 586, 634, 682, 706,\n 144, 760, 762, 795, 800, 784, 860, 887], \\\n 'OC': [16, 36, 184, 242, 258, 316, 296, 584, 583, 520,\n 540, 554, 570, 574, 580, 585, 598, 612, 882, 90,\n 626, 772, 776, 798, 548, 876], \\\n 'SI': [174, 180, 748, 450, 454, 466, 480, 508, 710, 834,\n 716], \\\n 'WP1': [116, 360, 418, 458, 764, 704], \\\n 'WP2': [608], \\\n 'WP3': [156], \\\n 'WP4': [344, 392, 410, 446, 158], \\\n 'ROW': [8, 12, 20, 24, 10, 40, 112, 56, 204, 535, 70, 72,\n 74, 76, 86, 96, 100, 854, 108, 120, 140, 148, 162,\n 166, 178, 191, 531, 196, 203, 384, 208, 818, 226,\n 233, 234, 246, 250, 260, 266, 270, 276, 288, 292,\n 300, 304, 831, 324, 624, 334, 336, 348, 352, 372,\n 833, 380, 832, 404, 408, 983, 428, 426, 430, 434,\n 438, 440, 442, 470, 478, 175, 498, 492, 499, 504,\n 516, 528, 562, 566, 807, 578, 275, 616, 620, 642,\n 643, 646, 638, 652, 663, 666, 674, 678, 686, 688,\n 690, 694, 702, 703, 705, 239, 728, 724, 729, 744,\n 752, 756, 768, 788, 792, 804, 826, 581, 732, 894,\n 248]}\n iso3a = {'NA1': ['AIA', 'ATG', 'ARG', 'ABW', 'BHS', 'BRB', 'BLZ', 'BMU',\n 'BOL', 'CPV', 'CYM', 'CHL', 'COL', 'CRI', 'CUB', 'DMA',\n 'DOM', 'ECU', 'SLV', 'FLK', 'GUF', 'GRD', 'GLP', 'GTM',\n 'GUY', 'HTI', 'HND', 'JAM', 'MTQ', 'MEX', 'MSR', 'NIC',\n 'PAN', 'PRY', 'PER', 'PRI', 'SHN', 'KNA', 'LCA', 'VCT',\n 'SXM', 'SUR', 'TTO', 'TCA', 'URY', 'VEN', 'VGB', 'VIR'], \\\n 'NA2': ['CAN', 'USA'], \\\n 'NI': ['AFG', 'ARM', 'AZE', 'BHR', 'BGD', 'BTN', 'DJI', 'ERI',\n 'ETH', 'GEO', 'IND', 'IRN', 'IRQ', 'ISR', 'JOR', 'KAZ',\n 'KWT', 'KGZ', 'LBN', 'MDV', 'MNG', 'MMR', 'NPL', 'OMN',\n 'PAK', 'QAT', 'SAU', 'SOM', 'LKA', 'SYR', 'TJK', 'TKM',\n 'UGA', 'ARE', 'UZB', 'YEM'], \\\n 'OC': ['ASM', 'AUS', 'COK', 'FJI', 'PYF', 'GUM', 'KIR', 'MHL',\n 'FSM', 'NRU', 'NCL', 'NZL', 'NIU', 'NFK', 'MNP', 'PLW',\n 'PNG', 'PCN', 'WSM', 'SLB', 'TLS', 'TKL', 'TON', 'TUV',\n 'VUT', 'WLF'], \\\n 'SI': ['COM', 'COD', 'SWZ', 'MDG', 'MWI', 'MLI', 'MUS', 'MOZ',\n 'ZAF', 'TZA', 'ZWE'], \\\n 'WP1': ['KHM', 'IDN', 'LAO', 'MYS', 'THA', 'VNM'], \\\n 'WP2': ['PHL'], \\\n 'WP3': ['CHN'], \\\n 'WP4': ['HKG', 'JPN', 'KOR', 'MAC', 'TWN'], \\\n 'ROW': ['ALB', 'DZA', 'AND', 'AGO', 'ATA', 'AUT', 'BLR', 'BEL',\n 'BEN', 'BES', 'BIH', 'BWA', 'BVT', 'BRA', 'IOT', 'BRN',\n 'BGR', 'BFA', 'BDI', 'CMR', 'CAF', 'TCD', 'CXR', 'CCK',\n 'COG', 'HRV', 'CUW', 'CYP', 'CZE', 'CIV', 'DNK', 'EGY',\n 'GNQ', 'EST', 'FRO', 'FIN', 'FRA', 'ATF', 'GAB', 'GMB',\n 'DEU', 'GHA', 'GIB', 'GRC', 'GRL', 'GGY', 'GIN', 'GNB',\n 'HMD', 'VAT', 'HUN', 'ISL', 'IRL', 'IMN', 'ITA', 'JEY',\n 'KEN', 'PRK', 'XKX', 'LVA', 'LSO', 'LBR', 'LBY', 'LIE',\n 'LTU', 'LUX', 'MLT', 'MRT', 'MYT', 'MDA', 'MCO', 'MNE',\n 'MAR', 'NAM', 'NLD', 'NER', 'NGA', 'MKD', 'NOR', 'PSE',\n 'POL', 'PRT', 'ROU', 'RUS', 'RWA', 'REU', 'BLM', 'MAF',\n 'SPM', 'SMR', 'STP', 'SEN', 'SRB', 'SYC', 'SLE', 'SGP',\n 'SVK', 'SVN', 'SGS', 'SSD', 'ESP', 'SDN', 'SJM', 'SWE',\n 'CHE', 'TGO', 'TUN', 'TUR', 'UKR', 'GBR', 'UMI', 'ESH',\n 'ZMB', 'ALA']}\n if_id = {'NA1': 1, 'NA2': 2, 'NI': 3, 'OC': 4, 'SI': 5, \\\n 'WP1': 6, 'WP2': 7, 'WP3': 8, 'WP4': 9, 'ROW': 10}\n region_name = dict()\n region_name['NA1'] = 'Caribbean and Mexico'\n region_name['NA2'] = 'USA and Canada'\n region_name['NI'] = 'North Indian'\n region_name['OC'] = 'Oceania'\n region_name['SI'] = 'South Indian'\n region_name['WP1'] = 'South East Asia'\n region_name['WP2'] = 'Philippines'\n region_name['WP3'] = 'China Mainland'\n region_name['WP4'] = 'North West Pacific'\n\n if region == 'all':\n return region_name, if_id, iso3n, iso3a,\n else:\n return region_name[region], if_id[region], iso3n[region], iso3a[region]",
"def fix_location(r):\n \n # all is fine: just change zipcode datatype to str\n if not np.isnan(r['zip']) and not np.isnan(r['lat']):\n return [str(int(r['zip'])), r['lng'], r['lat']]\n \n # try to locate within zipcode polygons\n if not np.isnan(r['lat']):\n query = \"\"\"\n SELECT t.geoid as zip, {} as lng, {} as lat\n FROM us_zcta5 t JOIN usps_zcta5 z ON t.geoid = z.zip\n WHERE ST_Contains(t.shape, ST_GeomFromText('POINT({} {})', 2))\n \"\"\"\n res = pd.read_sql(query.format(r['lng'], r['lat'], r['lng'], r['lat']), con = con)\n if len(res) == 1:\n return res.values[0].tolist()\n\n # use zipcode center as location proxy: geocoding is prefered in this case, but might be quite expensive\n if not np.isnan(r['zip']):\n res = zipcodes[zipcodes['zip'] == str(int(r['zip']))]\n if len(res) == 1:\n return res.values[0].tolist()[:3]\n\n return [None, None, None]",
"def geo_data_analysis(search_term):\n map_pol = dict()\n\n #A list of tweet texts from each region\n NE_text = geo_collect_tweets(search_term,42.781158,-71.398729,'250mi')\n S_text = geo_collect_tweets(search_term,33.000000,-84.000000,'500mi')\n MW_text = geo_collect_tweets(search_term,40.000000,-100.000000,'1000mi')\n W_text = geo_collect_tweets(search_term,35.000000,-120.000000,'250mi')\n \n #A list of sentiment values for the tweets from each region \n NE_sentiment_values = sentiment(NE_text)\n S_sentiment_values = sentiment(S_text)\n MW_sentiment_values = sentiment(MW_text)\n W_sentiment_values = sentiment(W_text)\n\n #find the average sentiment value for each region\n NE_avg = sum(NE_sentiment_values)/len(NE_sentiment_values)\n S_avg = sum(S_sentiment_values)/len(S_sentiment_values)\n MW_avg = sum(MW_sentiment_values)/len(MW_sentiment_values)\n W_avg = sum(W_sentiment_values)/len(W_sentiment_values)\n\n return [W_avg,S_avg,NE_avg,MW_avg]",
"def railway_areas(osm_path): \n return retrieve(osm_path,'multipolygons',['railway','landuse'],**{'railway':[\"='platform' or \",\"='station' or \",\"='tram_stop'\"],'landuse':[\"='railway'\"]})"
] |
[
"0.5798934",
"0.5676824",
"0.55530095",
"0.5447797",
"0.54079866",
"0.5355692",
"0.53391874",
"0.53265494",
"0.52204746",
"0.520599",
"0.51960975",
"0.51955533",
"0.51670206",
"0.51228994",
"0.51193917",
"0.50905097",
"0.5084017",
"0.5071909",
"0.50690395",
"0.5055916",
"0.5048287",
"0.504534",
"0.50393724",
"0.5016364",
"0.50127727",
"0.50078946",
"0.49998593",
"0.4996495",
"0.49961895",
"0.4986709"
] |
0.67227197
|
0
|
Function to extract one purchasing feature
|
def _get_one_purchase_feature(self, feature):
# take onehot encoding of the purchasing feature columns
onehot = pd.get_dummies(self.df_transaction[feature], prefix=feature)
rider_id = pd.DataFrame(data={'riderID': self.df_transaction['riderID']})
frames = [rider_id, onehot]
df_onehot = pd.concat(frames, axis=1)
# count purchasing features
df_onehot_count = df_onehot.groupby(['riderID'])[list(onehot.columns.values)].sum().reset_index()
return df_onehot_count
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def purchase(self, item_type):",
"def after_purchase():\n\n return [\"MCAR\", \"MD\", \"NI\", \"MAR\",\"MAR\"]",
"def extract_feature(self, article) :\n pass",
"def productactivate():\n pass",
"def _extract_ticket_purchasing_patterns(self):\n list_df_purchase_count = []\n\n for feature in self.purchase_features:\n feature_count = self._get_one_purchase_feature(feature)\n list_df_purchase_count.append(feature_count.drop(['riderID'], axis=1))\n df_purchase_count = pd.concat(list_df_purchase_count, axis=1)\n\n # append the riderID columns\n df_purchase_count.insert(0, 'riderID', feature_count['riderID'])\n\n return df_purchase_count",
"def get_active_features(summary_df, slots_offered): # prev -> getActiveFeatures\n disc_cols = [col+'_Discount' for col in slots_offered]\n eco_cols = [col+'_Eco' for col in slots_offered]\n gr_cols = [col+'_Eco' for col in slots_offered]\n features = summary_df.loc[:, disc_cols+eco_cols+gr_cols]\n features = features.loc[:, features.sum(axis=0) > 0]\n for i in reversed(['NO_PURCHASE']+slots_offered):\n features.insert(0, i+'_Asc', value=1)\n return features, disc_cols, eco_cols, gr_cols",
"def _get_feature_support(self):\r\n res = super(AcquirerbKash, self)._get_feature_support()\r\n res['fees'].append('bkash')\r\n return res",
"def feature():\n pass",
"def test_bundle_purchase_method(self):\n template = self.product_apple_bundle\n self.assertEqual(template.purchase_method, 'purchase', 'Product: the Control Policy is On ordered quantities')",
"def get_feature(self, feature_name: FeatureName) -> interface.FeatureInfo:\n if feature_name == FeatureName.PushUpdates:\n # Multiple protocols can register a push updater implementation, but only\n # one of them will ever be used (i.e. relaying is not done on method\n # level). So if at least one push updater is available, then we can return\n # \"Available\" here.\n if self._push_updater_relay.count >= 1:\n return interface.FeatureInfo(FeatureState.Available)\n if feature_name in self._feature_map:\n return self._feature_map[feature_name][1].get_feature(feature_name)\n return interface.FeatureInfo(FeatureState.Unsupported)",
"def buy_one_get_one(products):\n if 'p1' in products and products['p1'] >= 2:\n return -20\n else:\n return 0",
"def _features_of(entry: _LexiconEntry) -> str:\n return entry[\"features\"]",
"def get_feature_set_PA(tweet):\n features= {}\n return features",
"def getUserHistFeatures(transaction_list, coupon_dict, model_start_date, purchase_date):\n feat_header = [\"NoOfPurchases\", \"DaysSinceLastPurchase\", \"NoOfPurchasesLastweek\", \"NoOfPurchasesLast15Days\", \"NoOfPurchasesLast30Days\", \"NoOfPurchasesLast60Days\", \"NoOfPurchasesLast90Days\", \"NoOfPurchasesLast180Days\", \"DaysSincePrevPurchase\", \"NoOfPurchasesPrevweek\", \"NoOfPurchasesPrev15Days\", \"NoOfPurchasesPrev30Days\", \"NoOfPurchasesPrev60Days\", \"NoOfPurchasesPrev90Days\", \"NoOfPurchasesPrev180Days\"]\n\n # getting number of purchases #\n feat_list = [len(transaction_list)]\n\n # initializing variables #\n purchase_small_area_name_dict = {}\n puchase_date_list = []\n capsule_text_dict = {}\n genre_name_dict = {}\n price_rate_list = []\n catalog_price_list = []\n discount_price_list = []\n dispperiod_list = []\n valid_period_list = []\n usable_date_mon_list = {}\n usable_date_tue_list = {}\n usable_date_wed_list = {}\n usable_date_thu_list = {}\n usable_date_fri_list = {}\n usable_date_sat_list = {}\n usable_date_sun_list = {}\n usable_date_hol_list = {}\n usable_date_before_hol_list = {}\n coupon_large_area_name_dict = {}\n coupon_small_area_name_dict = {}\n coupon_ken_name_dict = {}\n days_since_last_purchase = 9999\n last_week_purchase = 0\n last_fifteendays_purchase = 0\n last_thirtydays_purchase = 0\n last_sixtydays_purchase = 0\n last_nintydays_purchase = 0\n\tlast_oneeightydays_purchase = 0\n\tdays_since_prev_purchase = 9999\n\tprev_week_purchase = 0\n prev_fifteendays_purchase = 0\n prev_thirtydays_purchase = 0\n prev_sixtydays_purchase = 0\n prev_nintydays_purchase = 0\n prev_oneeightydays_purchase = 0\n for transaction in transaction_list:\n diff_days = (model_start_date - datetime.datetime.strptime(transaction['I_DATE'], \"%Y-%m-%d %H:%M:%S\").date()).days\n if diff_days < days_since_last_purchase:\n days_since_last_purchase = diff_days\n if diff_days <= 7:\n last_week_purchase += 1\n if diff_days <= 15:\n last_fifteendays_purchase += 1\n if diff_days <= 30:\n last_thirtydays_purchase += 1\n if diff_days <= 60:\n last_sixtydays_purchase += 1\n if diff_days <= 90:\n last_nintydays_purchase += 1\n\t\tif diff_days <= 180:\n last_oneeightydays_purchase += 1\n\t\t\n\t\tdiff_days = (purchase_date - datetime.datetime.strptime(transaction['I_DATE'], \"%Y-%m-%d %H:%M:%S\").date()).days\n if diff_days < days_since_last_purchase:\n days_since_prev_purchase = diff_days\n if diff_days <= 7:\n prev_week_purchase += 1\n if diff_days <= 15:\n prev_fifteendays_purchase += 1\n if diff_days <= 30:\n prev_thirtydays_purchase += 1\n if diff_days <= 60:\n prev_sixtydays_purchase += 1\n if diff_days <= 90:\n prev_nintydays_purchase += 1\n if diff_days <= 180:\n prev_oneeightydays_purchase += 1\n\n coupon_id_dict = coupon_dict[ transaction['COUPON_ID_hash'] ]\n purchase_small_area_name_dict[transaction['SMALL_AREA_NAME']] = purchase_small_area_name_dict.get( transaction['SMALL_AREA_NAME'],0) + 1\n capsule_text_dict[ coupon_id_dict['CAPSULE_TEXT'] ] = capsule_text_dict.get( coupon_id_dict['CAPSULE_TEXT'], 0) + 1\n genre_name_dict[ coupon_id_dict['GENRE_NAME'] ] = genre_name_dict.get( coupon_id_dict['GENRE_NAME'],0 ) + 1\n coupon_large_area_name_dict[ coupon_id_dict['large_area_name'] ] = coupon_large_area_name_dict.get( coupon_id_dict['large_area_name'],0 ) + 1\n coupon_small_area_name_dict[ coupon_id_dict['small_area_name'] ] = coupon_small_area_name_dict.get( coupon_id_dict['small_area_name'],0 ) + 1\n coupon_ken_name_dict[ coupon_id_dict['ken_name'] ] = coupon_ken_name_dict.get( coupon_id_dict['ken_name'],0 ) + 1\n price_rate_list.append( float(coupon_id_dict['PRICE_RATE']) )\n catalog_price_list.append( float(coupon_id_dict['CATALOG_PRICE']) )\n discount_price_list.append( float(coupon_id_dict['DISCOUNT_PRICE']) )\n dispperiod_list.append( float(coupon_id_dict['DISPPERIOD']) )\n if coupon_id_dict['VALIDPERIOD'] not in ('','NA'):\n valid_period_list.append( float(coupon_id_dict['VALIDPERIOD']) )\n if coupon_id_dict['USABLE_DATE_MON'] not in ('','NA'):\n usable_date_mon_list[ float(coupon_id_dict['USABLE_DATE_MON']) ] = usable_date_mon_list.get( float(coupon_id_dict['USABLE_DATE_MON']),0 ) + 1\n usable_date_tue_list[ float(coupon_id_dict['USABLE_DATE_TUE']) ] = usable_date_tue_list.get( float(coupon_id_dict['USABLE_DATE_TUE']),0 ) + 1\n usable_date_wed_list[ float(coupon_id_dict['USABLE_DATE_WED']) ] = usable_date_wed_list.get( float(coupon_id_dict['USABLE_DATE_WED']),0 ) + 1\n usable_date_thu_list[ float(coupon_id_dict['USABLE_DATE_THU']) ] = usable_date_thu_list.get( float(coupon_id_dict['USABLE_DATE_THU']),0 ) + 1\n usable_date_fri_list[ float(coupon_id_dict['USABLE_DATE_FRI']) ] = usable_date_fri_list.get( float(coupon_id_dict['USABLE_DATE_FRI']),0 ) + 1\n usable_date_sat_list[ float(coupon_id_dict['USABLE_DATE_SAT']) ] = usable_date_sat_list.get( float(coupon_id_dict['USABLE_DATE_SAT']),0 ) + 1\n usable_date_sun_list[ float(coupon_id_dict['USABLE_DATE_SUN']) ] = usable_date_sun_list.get( float(coupon_id_dict['USABLE_DATE_SUN']),0 ) + 1\n usable_date_hol_list[ float(coupon_id_dict['USABLE_DATE_HOLIDAY']) ] = usable_date_hol_list.get( float(coupon_id_dict['USABLE_DATE_HOLIDAY']),0 ) + 1\n usable_date_before_hol_list[ float(coupon_id_dict['USABLE_DATE_BEFORE_HOLIDAY']) ] = usable_date_before_hol_list.get( float(coupon_id_dict['USABLE_DATE_BEFORE_HOLIDAY']),0 )+1\n else:\n usable_date_mon_list[3.0] = usable_date_mon_list.get( 3.0,0 ) + 1\n usable_date_tue_list[3.0] = usable_date_tue_list.get( 3.0,0 ) + 1\n usable_date_wed_list[3.0] = usable_date_wed_list.get( 3.0,0 ) + 1\n usable_date_thu_list[3.0] = usable_date_thu_list.get( 3.0,0 ) + 1\n usable_date_fri_list[3.0] = usable_date_fri_list.get( 3.0,0 ) + 1\n usable_date_sat_list[3.0] = usable_date_sat_list.get( 3.0,0 ) + 1\n usable_date_sun_list[3.0] = usable_date_sun_list.get( 3.0,0 ) + 1\n usable_date_hol_list[3.0] = usable_date_hol_list.get( 3.0,0 ) + 1\n usable_date_before_hol_list[3.0] = usable_date_before_hol_list.get( 3.0,0 ) + 1\n\n feat_list.extend([days_since_last_purchase, last_week_purchase, last_fifteendays_purchase, last_thirtydays_purchase, last_sixtydays_purchase, last_nintydays_purchase, last_oneeightydays_purchase, days_since_prev_purchase, prev_week_purchase, prev_fifteendays_purchase, prev_thirtydays_purchase, prev_sixtydays_purchase, prev_nintydays_purchase, prev_oneeightydays_purchase])\n return feat_list, feat_header, [purchase_small_area_name_dict, capsule_text_dict, genre_name_dict, coupon_large_area_name_dict, coupon_small_area_name_dict, coupon_ken_name_dict, price_rate_list, catalog_price_list, discount_price_list, dispperiod_list, valid_period_list, usable_date_mon_list, usable_date_tue_list, usable_date_wed_list, usable_date_thu_list, usable_date_fri_list, usable_date_sat_list, usable_date_sun_list, usable_date_hol_list, usable_date_before_hol_list]",
"def sell():\n return apology(\"TODO\")",
"def get_hikedetails_by_feature(feature):\n\n if (feature == \"dog\"):\n npbyfeature = Hike.query.filter(Hike.features.like('%dogs-leash%')).all()\n\n if (feature == \"kid\"):\n npbyfeature = Hike.query.filter(Hike.features.like('%kids%') | Hike.features.like('%strollers%')).all()\n \n if (feature == \"water\"):\n npbyfeature = Hike.query.filter(Hike.features.like('%river%') | Hike.features.like('%beach%')).all()\n\n \n return npbyfeature",
"def getCouponFeatures(coupon_dict, master_list):\n purchase_small_area_name_dict, capsule_text_dict, genre_name_dict, coupon_large_area_name_dict, coupon_small_area_name_dict, coupon_ken_name_dict, price_rate_list, catalog_price_list, discount_price_list, dispperiod_list, valid_period_list, usable_date_mon_list, usable_date_tue_list, usable_date_wed_list, usable_date_thu_list, usable_date_fri_list, usable_date_sat_list, usable_date_sun_list, usable_date_hol_list, usable_date_before_hol_list = master_list\n\n feat_header = [\"CouponCapsuleText\", \"CouponGenreName\", \"CouponPriceRate\", \"CouponCatalogPrice\", \"CouponDiscountPrice\", \"CouponDispPeriod\", \"CouponValidPeriod\", \"CouponUsableMon\", \"CouponUsableTue\", \"CouponUsableWed\", \"CouponUsableThu\", \"CouponUsableFri\", \"CouponUsableSat\", \"CouponUsableSun\", \"CouponUsableHol\", \"CouponUsableBeforeHol\", \"CouponLargeAreaName\", \"CouponKenName\", \"CouponSmallAreaName\", \"CapsuleTextCount\", \"CapsuleTextCountNorm\", \"GenreNameCount\", \"GenreNameCountNorm\", \"LargeAreaNameCount\", \"LargeAreaNameCountNorm\", \"SmallAreaNameCount\", \"SmallAreaNameCountNorm\", \"KenNameCount\",\"KenNameCountNorm\", \"PriceRateMin\", \"PriceRateMax\", \"PriceRateMean\", \"PriceRateBetMeanMax\", \"PriceRateBetMinMean\", \"PriceRateGrtMax\", \"PriceRateLessMin\", \"CatalogPriceMin\", \"CatalogPriceMax\", \"CatalogPriceMean\", \"CatalogPriceBetMeanMax\", \"CatalogPriceBetMinMean\", \"CatalogPriceGrtMax\", \"CatalogPriceLessMin\", \"DiscountPriceMin\", \"DiscountPriceMax\", \"DiscountPriceMean\", \"DiscountPriceBetMeanMax\", \"DiscountPriceBetMinMean\", \"DiscountPriceGrtMax\", \"DiscountPriceLessMin\", \"DispPeriodMin\", \"DispPeriodMax\", \"DispPeriodMean\", \"DispPeriodBetMeanMax\", \"DispPeriodBetMinMean\", \"DispPeriodGrtMax\", \"DispPeriodLessMin\", \"MondayCount\", \"MondayCountNorm\", \"TuesdayCount\", \"TuesdayCountNorm\", \"WednesdayCount\", \"WednesdayCountNorm\", \"ThursdayCount\", \"ThursdayCountNorm\", \"FridayCount\", \"FridayCountNorm\", \"SaturdayCount\", \"SaturdayCountNorm\", \"SundayCount\", \"SundayCountNorm\", \"HolidayCount\", \"HolidayCountNorm\", \"BeforeHolidayCount\", \"BeforeHolidayCountNorm\"]\n\n if coupon_dict['USABLE_DATE_MON'] in ('','NA'):\n coupon_dict[\"USABLE_DATE_MON\"] = 3.0\n coupon_dict[\"USABLE_DATE_TUE\"] = 3.0\n coupon_dict[\"USABLE_DATE_WED\"] = 3.0\n coupon_dict[\"USABLE_DATE_THU\"] = 3.0\n coupon_dict[\"USABLE_DATE_FRI\"] = 3.0\n coupon_dict[\"USABLE_DATE_SAT\"] = 3.0\n coupon_dict[\"USABLE_DATE_SUN\"] = 3.0\n coupon_dict[\"USABLE_DATE_HOLIDAY\"] = 3.0\n coupon_dict[\"USABLE_DATE_BEFORE_HOLIDAY\"] = 3.0\n\n feat_list = [coupon_dict[\"CAPSULE_TEXT\"], coupon_dict[\"GENRE_NAME\"], coupon_dict[\"PRICE_RATE\"], coupon_dict[\"CATALOG_PRICE\"], coupon_dict[\"DISCOUNT_PRICE\"], coupon_dict[\"DISPPERIOD\"], coupon_dict[\"VALIDPERIOD\"], coupon_dict[\"USABLE_DATE_MON\"], coupon_dict[\"USABLE_DATE_TUE\"], coupon_dict[\"USABLE_DATE_WED\"], coupon_dict[\"USABLE_DATE_THU\"], coupon_dict[\"USABLE_DATE_FRI\"], coupon_dict[\"USABLE_DATE_SAT\"], coupon_dict[\"USABLE_DATE_SUN\"], coupon_dict[\"USABLE_DATE_HOLIDAY\"], coupon_dict[\"USABLE_DATE_BEFORE_HOLIDAY\"], coupon_dict[\"large_area_name\"], coupon_dict[\"ken_name\"], coupon_dict[\"small_area_name\"]]\n\n capsule_text_count, capsule_text_count_norm = getCountFromDict(coupon_dict[\"CAPSULE_TEXT\"], capsule_text_dict)\n feat_list.extend([capsule_text_count, capsule_text_count_norm])\n\n genre_name_count, genre_name_count_norm = getCountFromDict(coupon_dict[\"GENRE_NAME\"], genre_name_dict)\n feat_list.extend([genre_name_count, genre_name_count_norm])\n\n large_area_name_count, large_area_name_count_norm = getCountFromDict(coupon_dict[\"large_area_name\"], coupon_large_area_name_dict)\n feat_list.extend([large_area_name_count, large_area_name_count_norm])\n\n small_area_name_count, small_area_name_count_norm = getCountFromDict(coupon_dict[\"small_area_name\"], coupon_small_area_name_dict)\n feat_list.extend([small_area_name_count, small_area_name_count_norm])\n\n ken_name_count, ken_name_count_norm = getCountFromDict(coupon_dict[\"ken_name\"], coupon_ken_name_dict)\n feat_list.extend([ken_name_count, ken_name_count_norm])\n\n price_rate_out_list = getMinMaxMeanFromList(coupon_dict[\"PRICE_RATE\"], price_rate_list)\n feat_list.extend( price_rate_out_list )\n\n catalog_price_out_list = getMinMaxMeanFromList(coupon_dict[\"CATALOG_PRICE\"], catalog_price_list)\n feat_list.extend( catalog_price_out_list )\n\n discount_price_out_list = getMinMaxMeanFromList(coupon_dict[\"DISCOUNT_PRICE\"], discount_price_list)\n feat_list.extend( discount_price_out_list )\n\n dispperiod_out_list = getMinMaxMeanFromList(coupon_dict[\"DISPPERIOD\"], dispperiod_list)\n feat_list.extend( dispperiod_out_list )\n\n monday_count, monday_count_norm = getCountFromDict(coupon_dict[\"USABLE_DATE_MON\"], usable_date_mon_list)\n feat_list.extend([monday_count, monday_count_norm])\n\n tuesday_count, tuesday_count_norm = getCountFromDict(coupon_dict[\"USABLE_DATE_TUE\"], usable_date_tue_list)\n feat_list.extend([tuesday_count, tuesday_count_norm])\n\n wednesday_count, wednesday_count_norm = getCountFromDict(coupon_dict[\"USABLE_DATE_WED\"], usable_date_wed_list)\n feat_list.extend([wednesday_count, wednesday_count_norm])\n\n thursday_count, thursday_count_norm = getCountFromDict(coupon_dict[\"USABLE_DATE_THU\"], usable_date_thu_list)\n feat_list.extend([thursday_count, thursday_count_norm])\n\n friday_count, friday_count_norm = getCountFromDict(coupon_dict[\"USABLE_DATE_FRI\"], usable_date_fri_list)\n feat_list.extend([friday_count, friday_count_norm])\n\n saturday_count, saturday_count_norm = getCountFromDict(coupon_dict[\"USABLE_DATE_SAT\"], usable_date_sat_list)\n feat_list.extend([saturday_count, saturday_count_norm])\n\n sunday_count, sunday_count_norm = getCountFromDict(coupon_dict[\"USABLE_DATE_SUN\"], usable_date_sun_list)\n feat_list.extend([sunday_count, sunday_count_norm])\n\n holiday_count, holiday_count_norm = getCountFromDict(coupon_dict[\"USABLE_DATE_HOLIDAY\"], usable_date_hol_list)\n feat_list.extend([holiday_count, holiday_count_norm])\n\n before_holiday_count, before_holiday_count_norm = getCountFromDict(coupon_dict[\"USABLE_DATE_BEFORE_HOLIDAY\"], usable_date_before_hol_list)\n feat_list.extend([before_holiday_count, before_holiday_count_norm])\n\n\tvar_list, var_header = getDummyVars(capsule_text_dict, unique_capsule_text, prefix=\"CapsuleText_\")\n\tfeat_header.extend(var_header)\n\tfeat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVars(genre_name_dict, unique_genre_name, prefix=\"GenreName_\")\n\tfeat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVars(coupon_large_area_name_dict, unique_large_area_name, prefix=\"LargeAreaName_\")\n feat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVars(coupon_small_area_name_dict, unique_small_area_name, prefix=\"SmallAreaName_\")\n feat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVars(coupon_ken_name_dict, unique_ken_name, prefix=\"KenName_\")\n feat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVarsDays(usable_date_mon_list, prefix=\"UsableMonday_\")\n\tfeat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVarsDays(usable_date_tue_list, prefix=\"UsableTuesday_\")\n feat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVarsDays(usable_date_wed_list, prefix=\"UsableWednesday_\")\n feat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVarsDays(usable_date_thu_list, prefix=\"UsableThursday_\")\n feat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVarsDays(usable_date_fri_list, prefix=\"UsableFriday_\")\n feat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVarsDays(usable_date_sat_list, prefix=\"UsableSaturday_\")\n feat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVarsDays(usable_date_sun_list, prefix=\"UsableSunday_\")\n feat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVarsDays(usable_date_hol_list, prefix=\"UsableHoliday_\")\n feat_header.extend(var_header)\n feat_list.extend(var_list)\n\n\tvar_list, var_header = getDummyVarsDays(usable_date_before_hol_list, prefix=\"UsableBeforeHoliday_\")\n feat_header.extend(var_header)\n feat_list.extend(var_list)\n\t\n return feat_list, feat_header",
"def get_feature_set_SA(tweet):\n features= {}\n return features",
"def generate_products(self = random.sample, name = random.choice(result), price = random.randint(5, 100), weight = random.randint(5, 100), \nflammability= random.uniform(0, 2.5)):\n return sample",
"def __getitem__(self, feature_name):\n return self.get_feature_by_name(feature_name)",
"def product(self):\n if self._data:\n return self._get_info(\"PRODUCT\")\n\n\n product_id = int(self.tracking_number[2:4])\n\n if product_id in range(10, 68):\n return \"Business-Parcel\"\n elif product_id == 71:\n return \"Cash-Service (+DAC)\"\n elif product_id == 72:\n return \"Cash-Service+Exchange-Service\"\n elif product_id == 74:\n return \"DeliveryAtWork-Service\"\n elif product_id == 75:\n return \"Guaranteed 24-Service\"\n elif product_id == 76:\n return \"ShopReturn-Service\"\n elif product_id == 78:\n return \"Intercompany-Service\"\n elif product_id == 85:\n return \"Express-Parcel\"\n elif product_id == 87:\n return \"Exchange-Service Hintransport\"\n elif product_id == 89:\n return \"Pick&Return/Ship\"\n else:\n # Not explicitly mentiond in the docs, but apparently just a regular parcel\n return \"Business-Parcel\"",
"def extra_products(self, target):\r\n return []",
"def extract_info(cls, receipt, reader):\n extracted_data = ShoppingTrip()\n return extracted_data",
"def price_data(sp_available=False, sp_traded=False, ex_best_offers=False, ex_all_offers=False, ex_traded=False):\n args = locals()\n return [\n k.upper() for k, v in args.items() if v is True\n ]",
"def get_one_meal():",
"def _GetITunesProductInfo(cls, verify_response):\n product_id = verify_response.GetProductId()\n base_product, billing_cycle = product_id.rsplit('_', 1)\n assert billing_cycle in ('month', 'year'), billing_cycle\n return Subscription._ITUNES_PRODUCTS[base_product]",
"def project(self, feature):\n return feature",
"def get(self, x):\n key = self.feature_def.key_func(x)\n return self.cache.get(key)",
"def topdia(x):\r\n return Feature(x, \"TopDia\")",
"def featureByName(self, name):\n for feature in self.features:\n if feature.name == name:\n return feature\n return None"
] |
[
"0.5571388",
"0.54234684",
"0.5408792",
"0.5228844",
"0.517373",
"0.5148059",
"0.51403916",
"0.51266134",
"0.50367516",
"0.5029217",
"0.5013141",
"0.49834538",
"0.49649334",
"0.49609634",
"0.49595377",
"0.4956056",
"0.4929668",
"0.4919164",
"0.49121955",
"0.49118873",
"0.4909091",
"0.48839837",
"0.48422676",
"0.48009187",
"0.47974896",
"0.47896942",
"0.47742394",
"0.4772318",
"0.47619292",
"0.4731083"
] |
0.6703345
|
0
|
Function to label riders by their total number of trips
|
def _label_rider_by_trip_frequency(self, rider):
if rider['total_num_trips'] <= 5*self.duration:
label = 0
elif rider['total_num_trips'] <= 20*self.duration:
label = 1
elif rider['total_num_trips'] > 20*self.duration:
label = 2
else:
label = -1
return label
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def num_labels(self) -> int:\n raise NotImplementedError",
"def make_labels(self, ilines):\n\n llist = []\n for lind, lstr in enumerate(ilines):\n # get label and value list\n rv, label, vals = self.get_label_vals(lstr)\n if rv < 1: continue\n\n nvals = len(vals)\n\n # label = self.find_parent_label(label)\n\n if self.verb > 2: print('++ label: %s, %d val(s)' % (label, nvals))\n\n llist.append(label)\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0\n\n if not UTIL.vals_are_unique(llist):\n print('** warning: labels are not unique, will use only last values')\n llist = UTIL.get_unique_sublist(llist)\n\n return 0, llist",
"def getLabels(self):\n return self.numToLabel",
"def get_gini(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return 1 - sum([np.square(float(label_count[label])/total_count) for label in label_count.keys()])",
"def labelingLVQ(self):\n numLabels = len(np.unique(self.y))\n for i, x in enumerate(self.x):\n w = self.find_closest(x)[0]\n for nl in range(numLabels):\n if self.y[i] == nl:\n self.labels[nl, w[0], w[1]] += 1\n return self.labels",
"def count_votes(self, neighbours=()):\n labels = []\n data = neighbours\n # create the list made up of labels.\n for x in range(len(data)):\n labels.append(data[x][-1])\n\n # count the appearance of labels.\n count = [[x, labels.count(x)] for x in set(labels)]\n # Sort the labels in descending order by using their frequency\n vote = sorted(count, key=itemgetter(-1), reverse=True)\n # return the prediction\n # print(\"[{}]\".format(vote[0][0]))\n return vote[0][0]",
"def nb_triples(self) -> int:\n return 0",
"def count_labels(self, add_no_ne_label=False):\n return sum([count[1] for count in self.get_label_counts(add_no_ne_label=add_no_ne_label)])",
"def triples():",
"def count_nodes(self, term=None, labels: istr = None):",
"def countTriplets(arr, r):\n c_2, c_3 = Counter(), Counter()\n n_triplets = 0\n for e in arr:\n # print(f'arr: {arr}, e: {e}, c_3: {c_3}, c_2: {c_2}, n_triplets: {n_triplets}')\n if e in c_3:\n n_triplets += c_3[e]\n if e in c_2:\n c_3[e*r] += c_2[e]\n c_2[e*r] += 1\n return n_triplets",
"def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret",
"def count_nodes(self, term=None, labels: istr = None) -> int:",
"def get_num_labels(self):\n return self.num_labels",
"def count_ner_labels(self, y_true, y_pred):\n return Counter(y_true), Counter(y_pred)",
"def createLabels(edge):\n k = removeLabel(edge)\n return k + \"_L\", k + \"_R\"",
"def _make_label_ranking(n_samples, n_classes, random_state):\n rankings = np.zeros((n_samples, n_classes), dtype=np.int64)\n\n for sample in range(n_samples):\n rankings[sample] = random_state.permutation(n_classes) + 1\n\n return rankings",
"def rank_labels(self, features):\n vec = vectorize(features, self.vocab,\n self.dpvocab, self.projmat)\n vals = self.clf.decision_function(vec)\n # print vals.shape\n # print len(self.labelmap)\n labelvals = {}\n for idx in range(len(self.labelmap)):\n labelvals[self.labelmap[idx]] = vals[0,idx]\n sortedlabels = sorted(labelvals.items(), key=itemgetter(1),\n reverse=True)\n labels = [item[0] for item in sortedlabels]\n return labels",
"def tarnon_2_scoreslabels(tar,non):\n scores = np.concatenate((tar,non))\n labels = np.zeros_like(scores,dtype=int)\n labels[:len(tar)] = 1.0\n return scores, labels",
"def digit_indices_to_labels(digits_run1, digits_run2):\n labels_run1, labels_run2 = np.zeros(shape=256), np.zeros(shape=256)\n for finger_i in range(1, 6):\n labels_run1[digits_run1[finger_i - 1]] = finger_i\n labels_run2[digits_run2[finger_i - 1]] = finger_i\n return labels_run1, labels_run2",
"def countTriplets1(arr, r):\n from collections import Counter\n arr_dict = Counter()\n ratio_range = []\n triplets = 0\n\n # Build the counter\n for x in arr:\n arr_dict[x] += 1\n\n # Build a list for easier iteration\n for key, value in arr_dict.items():\n ratio_range.append(tuple([key,value]))\n ratio_range.sort()\n \n for y in range(len(ratio_range)-2):\n firstvalue = ratio_range[y][1]\n secondvalue = ratio_range[y+1][1]\n thirdvalue = ratio_range[y+2][1]\n print(ratio_range, firstvalue, secondvalue,thirdvalue)\n\n summedvalue = (firstvalue + secondvalue + thirdvalue) - 3\n triplet_count = 2**summedvalue\n print(summedvalue, triplet_count)\n triplets += triplet_count\n\n return triplets, arr_dict, ratio_range",
"def ridgelen(id,ir,jr,fr):\n\n\n\tlr=ridgelenloop(id,ir,jr,fr)\n\n\treturn lr",
"def getLabel(labels):\r\n elems = {}\r\n for l in labels:\r\n if l not in elems.keys():\r\n elems[l] = 1\r\n else:\r\n elems[l] += 1\r\n counts = sorted(elems.values(), reverse=True)\r\n if len(counts) > 1 and counts[0] == counts[1]:\r\n return choice(list(elems.keys()))\r\n return sorted(elems, key=elems.get, reverse=True)[0]",
"def extract_labels(tweets):\n result = extract_retweet_counts(tweets)\n return result",
"def nr_labels(self):\n return self.model.nr_labels",
"def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }",
"def getClassCounts(b):\n c = {k:0 for k in labels.keys()}\n for r in b:\n c[r[0]] += 1\n return c",
"def count_labels(labels, num_classes):\n return np.array([\n np.bincount(segment_labels, minlength=num_classes) for _, segment_labels in labels\n ])",
"def _tally_votes(self, labels, distances):\n votes = collections.defaultdict(int)\n for i, index in enumerate(distances.order(ascending=True).index):\n if i < self.k:\n votes[labels[index]] += 1\n else:\n break\n return votes",
"def totalTrips(analyzer):\n return model.totalTrips(analyzer)"
] |
[
"0.62115324",
"0.62078524",
"0.6168908",
"0.59179735",
"0.59134716",
"0.5711058",
"0.56915367",
"0.56866425",
"0.56840545",
"0.5661594",
"0.5646231",
"0.5631034",
"0.5630065",
"0.5607377",
"0.5585696",
"0.55522656",
"0.55182344",
"0.55077565",
"0.54725116",
"0.5469419",
"0.54656714",
"0.5460074",
"0.54221",
"0.5416525",
"0.5414436",
"0.54139256",
"0.5409198",
"0.5403266",
"0.5394348",
"0.5388661"
] |
0.7017241
|
0
|
Function to label riders as either commuter rail rider or others
|
def _label_commuter_rail_rider(self, rider):
if (rider['servicebrand_Commuter Rail'] > 0) and (rider['zonecr_1a'] == 0):
label = 'CR except zone 1A'
else:
label = 'others'
return label
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _label_rider_by_trip_frequency(self, rider):\n if rider['total_num_trips'] <= 5*self.duration:\n label = 0\n elif rider['total_num_trips'] <= 20*self.duration:\n label = 1\n elif rider['total_num_trips'] > 20*self.duration:\n label = 2\n else:\n label = -1\n return label",
"def getRaceLabel(x,binary=False):\r\n ID = x.split(\"/\")[-1].split(\"_\")[0]\r\n label = truth[truth.DummyID == int(ID)]['Medview_Race'].values[0]\r\n\r\n if label == 'African American':\r\n return 0\r\n elif label == \"White\":\r\n return 1\r\n else:\r\n return 2",
"def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }",
"def lyft_labels():\n\n return {\n 0: 'None',\n 7: 'Roads',\n 10: 'Vehicles'\n }",
"def get_labels(self):\n return [\"contradiction\", \"entailment\", \"neutral\"]",
"def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}",
"def createLabels(edge):\n k = removeLabel(edge)\n return k + \"_L\", k + \"_R\"",
"def get_reaction_label(rmg_reaction):\n reactants = rmg_reaction.reactants\n products = rmg_reaction.products\n if len(reactants) > 1:\n reactants_string = '+'.join([reactant.molecule[0].toSMILES() for reactant in reactants])\n else:\n reactants_string = reactants[0].molecule[0].toSMILES()\n if len(products) > 1:\n products_string = '+'.join([product.molecule[0].toSMILES() for product in products])\n else:\n products_string = products[0].molecule[0].toSMILES()\n reaction_label = '_'.join([reactants_string, products_string])\n return reaction_label",
"def readLabels(instrProgram, instrName, is_drum):\n if (is_drum):\n return 'Percussion'\n elif ((instrProgram in range(32,40) or \"bass\" in instrName.lower())):\n return 'Bass'\n elif((\"vocal\" in instrName.lower()) or (\"voice\" in instrName.lower())):\n return 'Vocals'\n elif(\"chord\" in instrName.lower()):\n return 'Chords'\n else:\n return None",
"def get_label(urs):\n return assign_term(urs)[1]",
"def _rectified_relabel(infr, cc_subgraphs):\n # Determine which names can be reused\n from wbia.scripts import name_recitifer\n\n infr.print('grouping names for rectification', 3)\n grouped_oldnames_ = [\n list(nx.get_node_attributes(subgraph, 'name_label').values())\n for count, subgraph in enumerate(cc_subgraphs)\n ]\n # Make sure negatives dont get priority\n grouped_oldnames = [\n [n for n in group if len(group) == 1 or n > 0] for group in grouped_oldnames_\n ]\n infr.print(\n 'begin rectification of %d grouped old names' % (len(grouped_oldnames)), 2\n )\n new_labels = name_recitifer.find_consistent_labeling(\n grouped_oldnames, verbose=infr.verbose >= 3\n )\n infr.print('done rectifying new names', 2)\n new_flags = [\n not isinstance(n, int) and n.startswith('_extra_name') for n in new_labels\n ]\n\n for idx in ut.where(new_flags):\n new_labels[idx] = infr._next_nid()\n\n for idx, label in enumerate(new_labels):\n if label < 0 and len(grouped_oldnames[idx]) > 1:\n # Remove negative ids for grouped items\n new_labels[idx] = infr._next_nid()\n return new_labels",
"def relation_label(self, rid):\n relations = self._load_relations()\n return relations[rid][\"label\"]",
"def test_rlabel(self):\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n label = \"R\"\n ax.set_rlabel(label)\n assert ax.get_rlabel() == label",
"def get_labels(self):\n return [\"A轮\", \"B轮\",\"C轮\",\"天使轮\",\"战略融资\"]",
"def _get_classify_labels(df):\n labels = np.ones((len(df), 1), dtype=dtype) * 2\n labels[df['A-coref']] = 0\n labels[df['B-coref']] = 1\n return labels",
"def assign_colour_label_data(catl):\n\n logmstar_arr = catl.logmstar.values\n u_r_arr = catl.modelu_rcorr.values\n\n colour_label_arr = np.empty(len(catl), dtype='str')\n for idx, value in enumerate(logmstar_arr):\n\n # Divisions taken from Moffett et al. 2015 equation 1\n if value <= 9.1:\n if u_r_arr[idx] > 1.457:\n colour_label = 'R'\n else:\n colour_label = 'B'\n\n if value > 9.1 and value < 10.1:\n divider = 0.24 * value - 0.7\n if u_r_arr[idx] > divider:\n colour_label = 'R'\n else:\n colour_label = 'B'\n\n if value >= 10.1:\n if u_r_arr[idx] > 1.7:\n colour_label = 'R'\n else:\n colour_label = 'B'\n \n colour_label_arr[idx] = colour_label\n \n catl['colour_label'] = colour_label_arr\n\n return catl",
"def classify(self, features):\n node = self.tree\n answer = node.right_label + node.left_label\n while len(answer)>1:\n if node.model.classify(features)==+1:\n answer=node.left_label\n node=node.left\n else:\n answer=node.right_label\n node=node.right \n return answer[0]",
"def one_v_all(description):\n label = [0, 0, 0]\n for raw in description.split(\"|\")[4:]:\n line = raw.strip()\n if \"midbrain\" in line:\n label[0] = 1\n if \"forebrain\" in line:\n label[1] = 1\n if \"hindbrain\" in line:\n label[2] = 1\n return label",
"def test_labels(ruler: SpaczzRuler) -> None:\n assert all(\n [label in ruler.labels for label in [\"GPE\", \"STREET\", \"DRUG\", \"NAME\", \"BAND\"]]\n )\n assert len(ruler.labels) == 5",
"def propagateLabel(self, l1, l2):\n\n if l1 != l2:\n winner = min(l1, l2)\n loser = max(l1, l2)\n loserN = 0\n superiorN = 0\n for i,l in enumerate(self.labels):\n if l == loser:\n loserN += 1\n self.labels[i] = winner\n if l > loser:\n superiorN += 1\n self.labels[i] = l - 1\n\n # print('Loser Label is ' + str(loser) + ' . With ' + str(loserN) + ' associated cells. Winner label is ' + str(winner))",
"def get_labels(self):\r\n return [\"X\", \"O\", \"B-a\", \"I-a\", \"B-b\", \"I-b\", \"B-c\", \"I-c\", \"S-a\", \"S-b\", \"S-c\", \"[CLS]\", \"[SEP]\"]",
"def classify_comm(bf, t1, t2):\n # Per Special --> all 3 targets OpI\n if is_opi(bf, t1) and is_opi(t1, t2) and is_opi(t2, bf):\n return \"Per Special\"\n # Cyclic Shift --> all 3 targets coplanar and AnI\n if (not (is_inter(bf, t1) or is_inter(t1, t2) or is_inter(t2, bf))) and \\\n is_coplanar(bf, t1 ,t2):\n return \"Cyclic Shift\"\n # Orthogonal --> all 3 targets non-interchangeable, 2 targets opposite to buffer\n if (not (is_inter(bf, t1) or is_inter(t1, t2) or is_inter(t2, bf))) and \\\n is_oni(bf, t1) and is_oni(bf, t2) and is_oni(t1, t2):\n return \"Orthogonal\"\n # Columns --> 2 targets OpI, 3rd not interchangeable to first 2\n if has_column(bf, t1, t2):\n return \"Columns\"\n # Else, is Pure/A9\n return \"Pure/A9\"",
"def get_label(name):\n lower = name.lower()\n vals = lower.split('_')\n if 'ho' in vals:\n name = 'Independent Estimate'\n elif 'alldata' in vals:\n name = 'Extra-Data Estimate'\n elif 'ris' in vals[0]:\n name = 'RIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n elif 'is' in vals[0]:\n name = 'OIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n if 'dr' in vals:\n name += ' DR'\n if 'wdr' in vals:\n name += ' WDR'\n return name",
"def convert_tcia_labels(mask, keep_all_label=False):\n \n mask[np.isin(mask, [14])] = 0 # Remove duodenum\n label = [1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1] # no right kidney\n\n if keep_all_label:\n label += [0,0]\n\n return mask, label",
"def get_pattern_labels(self, pipeline: str) -> Set[str]:",
"def get_labels(self):\n if self.option == \"term\":\n return ['platform characteristics', 'atmospheric winds', 'radio wave','weather events', 'geomagnetism', 'atmospheric electricity','microwave', 'atmospheric temperature', 'atmospheric water vapor','atmospheric pressure', 'aerosols', 'atmospheric radiation','atmospheric chemistry', 'precipitation', 'sensor characteristics','radar', 'infrared wavelengths', 'visible wavelengths','weather/climate advisories', 'clouds', 'lidar', 'ocean optics','ultraviolet wavelengths', 'cryospheric indicators','land use/land cover', 'topography', 'surface thermal properties','spectral/engineering', 'soils', 'snow/ice', 'geothermal dynamics','natural hazards', 'surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics']\n elif self.option == \"mostdepth\":\n return ['flight data logs', 'turbulence', 'radio wave flux', 'lightning', 'magnetic field', 'atmospheric conductivity', 'electric field', 'data synchronization time', 'brightness temperature', 'vertical profiles', 'water vapor profiles', 'air temperature', 'upper level winds', 'atmospheric pressure measurements', 'upper air temperature', 'humidity', 'dew point temperature', 'aerosol particle properties', 'emissivity', 'trace gases/trace species', 'liquid precipitation', 'cloud liquid water/ice', 'microwave radiance', 'sensor counts', 'total pressure', 'airspeed/ground speed', 'total temperature', 'static pressure', 'wind speed', 'wind direction', 'radar reflectivity', 'doppler velocity', 'infrared imagery', 'visible imagery', 'water vapor', 'vertical wind velocity/speed', 'aerosol backscatter', 'weather forecast', 'tropical cyclones', 'visible radiance', 'infrared radiance', 'total precipitable water', 'boundary layer temperature', 'atmospheric temperature indices', 'cloud height', 'flight level winds', 'cloud droplet distribution', 'cloud droplet concentration/size', 'cloud condensation nuclei', 'cloud microphysics', 'hydrometeors', 'ozone', 'wind profiles', 'cloud base temperature', 'cloud base height', 'liquid water equivalent', 'solar radiation', 'planetary boundary layer height', 'surface winds', 'precipitation amount', 'precipitation rate', 'surface pressure', 'rain', 'cloud optical depth/thickness', 'aerosol extinction', 'aerosol optical depth/thickness', 'cirrus cloud systems', 'lidar depolarization ratio', 'radar backscatter', 'radar cross-section', 'return power', 'mean radial velocity', 'radiance', 'air quality', 'climate advisories', 'atmospheric emitted radiation', 'optical depth/thickness', 'surface temperature', 'ultraviolet flux', 'spectrum width', 'microwave imagery', 'lidar backscatter', 'relative humidity', 'u/v wind components', 'wind speed/wind direction', 'radar imagery', 'snow depth', 'land use/land cover classification', 'digital elevation/terrain model (dem)', 'snow', 'droplet size', 'droplet concentration/size', 'drizzle', 'precipitation anomalies', 'snow water equivalent', 'solid precipitation', 'total surface precipitation rate', 'particle size distribution', 'skin temperature', 'attitude characteristics', 'land surface temperature', 'hail', 'reflectance', 'soil moisture/water content', 'soil temperature', 'soil bulk density', 'surface roughness', 'present weather', 'snow density', 'ambient temperature', 'aerosol forward scatter', 'floods', 'snow cover', 'sigma naught', 'precipitable water', 'stage height', 'rivers/streams', 'shortwave radiation', 'photosynthetically active radiation', 'longwave radiation', 'net radiation', 'hourly precipitation amount', '24 hour precipitation amount', 'soil moisture', 'satellite orbits/revolution', 'sea surface temperature', 'heat flux', 'latent heat flux', 'cloud fraction', '3 and 6 hour precipitation amount', 'geopotential height', 'particulate matter', 'particle images', 'water vapor indices', 'horizontal wind velocity/speed', 'electrical conductivity', 'dissolved carbon dioxide', 'hurricanes', 'tropical cyclone track', 'convective clouds/systems (observed/analyzed)', 'cloud top height', 'viewing geometry', 'temperature profiles', 'vertical wind shear', 'wind shear', 'carbon monoxide', 'sea level pressure', 'water vapor tendency', 'potential temperature', 'angstrom exponent', 'ultraviolet radiation', 'solar irradiance', 'scattering', 'absorption', 'water vapor mixing ratio profiles', 'sea surface temperature indices', 'extreme eastern tropical pacific sst', 'sedimentation', 'erosion', 'sediment transport', 'sediments', 'tropopause', 'ocean chemistry', 'ocean optics', 'ocean temperature', 'salinity/density', 'pigments', 'ocean color', 'attenuation/transmission', 'inorganic carbon', 'organic carbon', 'photosynthetically available radiation', 'chlorophyll', 'optical depth', 'fluorescence', 'vegetation index', 'gelbstoff', 'phytoplankton', 'vegetation index2', 'cloud precipitable water', 'landscape ecology', 'ultraviolet radiance', 'cloud ceiling', 'aerosol radiance', 'carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles', 'organic particles', 'sulfate particles', 'radiative flux', 'transmittance', 'atmospheric stability', 'cloud asymmetry', 'cloud frequency', 'cloud top pressure', 'cloud top temperature', 'cloud vertical distribution', 'cloud emissivity', 'cloud radiative forcing', 'cloud reflectance', 'rain storms', 'reflected infrared', 'thermal infrared', 'incoming solar radiation', 'clouds', 'cloud properties', 'cloud types', 'orbital characteristics', 'sensor characteristics', 'maximum/minimum temperature', 'condensation', 'platform characteristics', 'geolocation', 'geodetics', 'coordinate reference system', 'aerosols', 'topographical relief maps', 'terrain elevation', 'normalized difference vegetation index (ndvi)', 'infrared flux', 'visible flux', 'albedo', 'land use/land cover', 'topography', 'lidar', 'lidar waveform', 'plant phenology', 'vegetation cover', 'crop/plant yields', 'land use classes', 'landscape patterns', 'forest harvesting and engineering', 'forest management', 'total surface water', 'agricultural plant science', 'photosynthesis', 'primary production', 'leaf characteristics', 'evapotranspiration', 'fire occurrence', 'surface thermal properties', 'canopy characteristics', 'evergreen vegetation', 'crown', 'deciduous vegetation', 'anisotropy', 'fire ecology', 'biomass burning', 'wildfires', 'topographical relief', 'burned area', 'surface radiative properties', 'environmental sustainability', 'boundaries', 'anthropogenic/human influenced ecosystems', 'emissions', 'sulfur dioxide', 'population', 'infrastructure', 'environmental assessments', 'public health', 'conservation', 'agriculture production', 'administrative divisions', 'economic resources', 'socioeconomics', 'lake/pond', 'rivers/stream', 'political divisions', 'environmental vulnerability index (evi)', 'ecosystems', 'urban areas', 'sustainability', 'treaty agreements/results', 'human settlements', 'population estimates', 'nitrogen dioxide', 'cropland', 'pasture', 'particulates', 'cyclones', 'mortality', 'environmental impacts', 'droughts', 'earthquakes', 'population distribution', 'fertilizers', 'animal manure and waste', 'urbanization/urban sprawl', 'landslides', 'avalanche', 'urban lands', 'mangroves', 'volcanic eruptions', 'pesticides', 'population size', 'population density', 'lakes/reservoirs', 'surface water', 'rural areas', 'infant mortality rates', 'amphibians', 'mammals', 'carbon', 'sulfur oxides', 'methane', 'non-methane hydrocarbons/volatile organic compounds', 'nitrogen oxides', 'natural gas', 'coal', 'coastal elevation', 'biodiversity functions', 'nuclear radiation exposure', 'radiation exposure', 'poverty levels', 'malnutrition', 'wetlands', 'sea level rise', 'vulnerability levels/index', 'ground water', 'snow/ice', 'electricity', 'energy production/use', 'sustainable development', 'deforestation', 'household income', 'discharge/flow', 'hydropattern', 'nitrogen', 'phosphorus', 'carbon dioxide', 'alpine/tundra', 'forests', 'vegetation', 'permafrost', 'nutrients', 'plant characteristics', 'leaf area index (lai)', 'soil gas/air', 'ammonia', 'nitrous oxide', 'ecosystem functions', 'litter characteristics', 'soil chemistry', 'soil respiration', 'active layer', 'soil depth', 'cation exchange capacity', 'organic matter', 'soil porosity', 'soil texture', 'permafrost melt', 'land subsidence', 'freeze/thaw', 'surface water features', 'chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride', 'molecular hydrogen', 'sulfur compounds', 'fire models', 'biomass', 'dominant species', 'vegetation species', 'sulfur', 'tree rings', 'soil classification', 'heat index', 'sea ice concentration', 'ocean heat budget', 'reforestation', 'even-toed ungulates', 'species recruitment', 'population dynamics', 'range changes', 'topographic effects', 'land resources', 'river ice depth/extent', 'snow melt', 'river ice', 'animal commodities', 'animal ecology and behavior', 'phenological changes', 'water depth', 'inundation', 'forest fire science', 'biogeochemical cycles', 'radiative forcing', 'soil heat budget', 'drainage', 'respiration rate', 'river/lake ice breakup', 'river/lake ice freeze', 'reclamation/revegetation/restoration', 'permafrost temperature', 'indigenous/native species', 'fire dynamics', 'lichens', 'plants', 'plant succession', 'carbon flux', 'coastal', 'salt marsh', 'degradation', 'altitude', 'carbon and hydrocarbon compounds', 'halocarbons and halogens', 'forest composition/vegetation structure', 'water vapor indicators', 'barometric altitude', 'atmospheric water vapor', 'terrestrial ecosystems', 'volatile organic compounds', 'boundary layer winds', 'forest fire danger index', 'periglacial processes', 'landscape processes', 'evaporation', 'soil horizons/profile', 'shrubland/scrub', 'soil ph', 'soils', 'soil water holding capacity', 'community structure', 'pingo', 'soil color', 'virtual temperature', 'formaldehyde', 'hydroxyl', 'photolysis rates', 'cloud dynamics', 'nitric oxide', 'molecular oxygen', 'smog', 'peroxyacyl nitrate', 'hydrogen compounds', 'nitrogen compounds', 'oxygen compounds', 'stable isotopes', 'chemical composition', 'actinic flux', 'tropospheric ozone', 'fossil fuel burning', 'industrial emissions', 'denitrification rate', 'sunshine', 'runoff', 'soil structure', 'mosses/hornworts/liverworts', 'peatlands', 'hydraulic conductivity', 'snow/ice temperature', 'vegetation water content', 'discharge', 'chlorophyll concentrations', 'outgoing longwave radiation', 'geomorphic landforms/processes', 'soil compaction', 'soil impedance', 'canopy transmittance', 'water table', 'decomposition', 'water temperature', 'dissolved gases', 'total dissolved solids', 'agricultural expansion', 'forest science', 'pressure tendency', 'visibility', 'biomass dynamics', 'agricultural lands', 'grasslands', 'savannas', 'grazing dynamics/plant herbivory', 'herbivory', 'paleoclimate reconstructions', 'drought indices', 'fire weather index', 'animal yields', 'multivariate enso index', 'dissolved solids', 'ocean currents', 'salinity', 'coastal processes', 'atmospheric pressure', 'afforestation/reforestation', 'fresh water river discharge', 'surface water chemistry', 'drainage basins', 'resource development site', 'dunes', 'flood plain', 'endangered species', 'precipitation indices', 'temperature indices', 'forest yields', 'stratigraphic sequence', 'freeze/frost', 'frost', 'hydrogen cyanide', 'land management', 'nutrient cycling', 'industrialization', 'suspended solids', 'deserts', 'weathering', 'gas flaring', 'atmospheric temperature', 'ice extent', 'fraction of absorbed photosynthetically active radiation (fapar)', 'marshes', 'swamps', 'lake ice', 'atmospheric winds', 'watershed characteristics', 'transportation', 'soil rooting depth', 'isotopes', 'cultural features', 'consumer behavior', 'boundary surveys', 'aquifers', 'land productivity', 'water quality/water chemistry', 'sediment composition', 'dissolved oxygen', 'surface water processes/measurements', 'turbidity', 'conductivity', 'ph', 'calcium', 'magnesium', 'potassium', 'micronutrients/trace elements', 'social behavior', 'sulfate', 'sediment chemistry', 'biogeochemical processes', 'water ion concentrations', 'cropping systems', 'percolation', 'groundwater chemistry', 'reforestation/revegetation', 'species/population interactions', 'soil infiltration', 'alkalinity', 'soil fertility', 'phosphorous compounds', 'radioisotopes', 'cooling degree days', 'angiosperms (flowering plants)', 'glacial landforms', 'glacial processes', 'contour maps', 'estuaries', 'methane production/use', 'natural gas production/use', 'petroleum production/use', 'visualization/image processing', 'subsetting/supersetting', 'transformation/conversion', 'forest mensuration', 'acid deposition', 'differential pressure', 'precipitation', 'marine ecosystems', 'consumption rates', 'radio wave', 'soil organic carbon (soc)', 'soil erosion', 'halocarbons', 'trace elements/trace metals', 'biomass energy production/use', 'riparian wetlands', 'soil consistence', 'snow stratigraphy', 'thermal conductivity', 'estuary', 'tidal height', 'plant diseases/disorders/pests', 'layered precipitable water', 'atmospheric chemistry', 'water vapor concentration profiles', 'specific humidity', 'total runoff', 'pressure thickness', 'wind stress', 'atmospheric heating', 'conduction', 'hydrogen chloride', 'nitric acid', 'radar', 'land surface/agriculture indicators', 'satellite soil moisture index', 'chlorine nitrate', 'chlorofluorocarbons', 'dinitrogen pentoxide', 'antenna temperature', 'glaciers', 'ice sheets', 'dimethyl sulfide', 'potential vorticity', 'ice fraction', 'atmospheric radiation', 'runoff rate', 'temperature tendency', 'wind dynamics', 'wind direction tendency', 'base flow', 'bromine monoxide', 'chlorine monoxide', 'methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy', 'cloud base pressure', 'temperature anomalies', 'nitrate', 'ocean mixed layer', 'precipitation trends', 'temperature trends', 'convection', 'ground ice', 'oxygen', 'phosphate', 'solar induced fluorescence', 'chlorine dioxide', 'sun-earth interactions', 'uv aerosol index', 'volcanic activity', 'potential evapotranspiration', 'ultraviolet wavelengths', 'ice temperature', 'sea surface skin temperature', 'sea surface height', 'sublimation', 'convective surface precipitation rate', 'hydrogen fluoride', 'airglow', 'energy deposition', 'x-ray flux', 'electron flux', 'proton flux', 'magnetic fields/magnetic currents']\n else:\n return ['platform characteristics', 'atmospheric winds','radio wave', 'weather events', 'geomagnetism','atmospheric electricity', 'microwave', 'atmospheric temperature','atmospheric water vapor', 'atmospheric pressure', 'aerosols','atmospheric radiation', 'atmospheric chemistry', 'precipitation','sensor characteristics', 'radar', 'infrared wavelengths','visible wavelengths', 'weather/climate advisories', 'clouds','lidar', 'ocean optics', 'ultraviolet wavelengths','cryospheric indicators', 'land use/land cover', 'topography','surface thermal properties', 'spectral/engineering', 'soils','snow/ice', 'geothermal dynamics', 'natural hazards','surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics','flight data logs','wind dynamics', 'radio wave flux', 'lightning', 'magnetic field','atmospheric conductivity', 'electric field','data synchronization time', 'brightness temperature','upper air temperature', 'water vapor profiles','surface temperature', 'upper level winds','atmospheric pressure measurements', 'water vapor indicators','aerosol particle properties', 'emissivity','trace gases/trace species', 'liquid precipitation','cloud microphysics', 'microwave radiance', 'sensor counts','total pressure', 'airspeed/ground speed', 'total temperature','static pressure', 'humidity', 'radar reflectivity','doppler velocity', 'infrared imagery', 'visible imagery','aerosol backscatter', 'weather forecast', 'tropical cyclones','visible radiance', 'infrared radiance','atmospheric temperature indices', 'cloud droplet distribution','cloud condensation nuclei', 'hydrometeors', 'oxygen compounds','wind profiles', 'liquid water equivalent', 'solar radiation','planetary boundary layer height', 'surface winds','precipitation amount', 'precipitation rate', 'surface pressure','aerosol extinction', 'aerosol optical depth/thickness','tropospheric/high-level clouds (observed/analyzed)','lidar depolarization ratio', 'radar backscatter','radar cross-section', 'return power', 'radial velocity','radiance', 'climate advisories', 'atmospheric emitted radiation','optical depth/thickness', 'ultraviolet flux', 'spectrum width','microwave imagery', 'lidar backscatter', 'radar imagery','snow depth', 'land use/land cover classification','terrain elevation', 'solid precipitation', 'droplet size','droplet concentration/size', 'precipitation anomalies','snow water equivalent', 'total surface precipitation rate','skin temperature', 'water vapor', 'attitude characteristics','land surface temperature', 'reflectance','soil moisture/water content', 'soil temperature','soil bulk density', 'surface roughness', 'present weather','snow density', 'geothermal temperature','aerosol forward scatter', 'floods', 'snow cover', 'sigma naught','precipitable water', 'surface water processes/measurements','surface water features', 'shortwave radiation','photosynthetically active radiation', 'longwave radiation','net radiation', 'flight level winds', 'soil moisture','satellite orbits/revolution', 'heat flux','precipitation profiles', 'geopotential height','particulate matter', 'particle images', 'water vapor indices','electrical conductivity', 'gases', 'sea surface temperature','convective clouds/systems (observed/analyzed)','viewing geometry', 'wind shear','carbon and hydrocarbon compounds', 'sea level pressure','water vapor processes', 'ultraviolet radiation','solar irradiance', 'scattering', 'absorption','sea surface temperature indices', 'sedimentation', 'erosion','sediment transport', 'sediments', 'tropopause', 'nan', 'pigments','attenuation/transmission', 'inorganic carbon', 'organic carbon','photosynthetically available radiation', 'chlorophyll','optical depth', 'fluorescence', 'vegetation index', 'gelbstoff','plankton', 'vegetation index2', 'landscape ecology','ultraviolet radiance', 'aerosol radiance','carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles','organic particles', 'sulfate particles', 'radiative flux','transmittance', 'atmospheric stability','cloud radiative transfer', 'rain storms', 'reflected infrared','thermal infrared', 'incoming solar radiation', 'cloud types','orbital characteristics', 'geolocation','coordinate reference system', 'infrared flux', 'visible flux','albedo', 'lidar waveform', 'plant phenology', 'vegetation cover','crop/plant yields', 'land use classes', 'landscape patterns','forest harvesting and engineering', 'forest management','ecosystem functions', 'leaf characteristics', 'fire ecology','total surface water', 'primary production', 'photosynthesis','canopy characteristics', 'evergreen vegetation', 'crown','deciduous vegetation', 'anisotropy', 'biomass burning','wildfires', 'topographical relief','environmental sustainability','anthropogenic/human influenced ecosystems', 'emissions','sulfur compounds', 'environmental assessments', 'conservation','agriculture production', 'administrative divisions','freshwater ecosystems', 'political divisions', 'urban areas','treaty agreements/results', 'population estimates','nitrogen compounds', 'particulates', 'mortality', 'droughts','earthquakes', 'population distribution', 'fertilizers','animal manure and waste', 'urbanization/urban sprawl','landslides', 'avalanche', 'mangroves', 'volcanic eruptions','pesticides', 'population size', 'population density','rural areas', 'amphibians', 'mammals', 'carbon', 'sulfur oxides','land management', 'natural gas', 'sedimentary rocks','coastal elevation', 'community dynamics','nuclear radiation exposure', 'radiation exposure','poverty levels', 'malnutrition', 'sea level rise','vulnerability levels/index', 'electricity','energy production/use', 'sustainable development','deforestation', 'household income', 'nitrogen', 'phosphorus','terrestrial ecosystems', 'permafrost', 'nutrients','plant characteristics', 'soil gas/air', 'litter characteristics','soil chemistry', 'soil respiration', 'active layer', 'soil depth','cation exchange capacity', 'organic matter', 'soil porosity','soil texture', 'permafrost melt','ground water processes/measurements', 'freeze/thaw','halocarbons and halogens', 'hydrogen compounds', 'biomass','dominant species', 'vegetation species', 'sulfur', 'tree rings','soil classification', 'sea ice concentration', 'reforestation','species/population interactions', 'range changes','topographic effects', 'land resources', 'river ice depth/extent','snow melt', 'river ice', 'animal ecology and behavior','phenological changes', 'forest fire science', 'radiative forcing','soil heat budget', 'river/lake ice breakup','river/lake ice freeze', 'reclamation/revegetation/restoration','lichens', 'marine ecosystems', 'coastal landforms', 'degradation','forest composition/vegetation structure', 'barometric altitude','volatile organic compounds', 'forest fire danger index','periglacial processes', 'landscape processes','soil horizons/profile', 'soil ph', 'soil water holding capacity','fluvial landforms', 'soil color', 'glacial processes','photochemistry', 'cloud dynamics', 'nitrogen oxides', 'smog','chemical composition', 'actinic flux', 'tropospheric ozone','fossil fuel burning', 'industrial emissions','denitrification rate', 'sunshine', 'soil structure','mosses/hornworts/liverworts', 'hydraulic conductivity','snow/ice temperature', 'water characteristics','outgoing longwave radiation', 'soil compaction', 'soil impedance','canopy transmittance', 'ground water features', 'solids','agricultural expansion', 'pressure tendency', 'visibility','herbivory', 'paleoclimate reconstructions', 'drought indices','fire weather index', 'animal yields', 'teleconnections','carbon dioxide', 'dissolved solids', 'ocean currents', 'salinity','afforestation/reforestation', 'fresh water river discharge','surface water chemistry', 'aeolian landforms','precipitation indices', 'temperature indices', 'forest yields','stratigraphic sequence', 'freeze/frost', 'frost','industrialization', 'ice core records', 'suspended solids','weathering', 'gas flaring', 'ice extent', 'biogeochemical cycles','lake ice', 'isotopes', 'watershed characteristics','transportation', 'soil rooting depth', 'geochemical properties','carbon monoxide', 'cultural features', 'consumer behavior','boundary surveys', 'land productivity', 'sediment composition','calcium', 'magnesium', 'potassium','micronutrients/trace elements', 'sediment chemistry','biogeochemical processes', 'cropping systems','groundwater chemistry', 'reforestation/revegetation','soil infiltration', 'soil fertility','angiosperms (flowering plants)', 'glacial landforms','forest mensuration', 'acid deposition', 'differential pressure','soil erosion', 'trace elements/trace metals', 'soil consistence','snow stratigraphy', 'thermal conductivity', 'estuaries','tidal height', 'plant diseases/disorders/pests','pressure thickness', 'atmospheric heating', 'conduction','evaporation', 'turbulence', 'wind stress','satellite soil moisture index', 'antenna temperature', 'glaciers','ice sheets', 'nitrate', 'ocean mixed layer','precipitation indicators', 'temperature indicators', 'ground ice','alkalinity', 'dissolved gases', 'oxygen', 'ph', 'phosphate','solar induced fluorescence', 'volcanic activity','ice temperature', 'sea surface height', 'airglow','energy deposition', 'x-ray flux', 'electron flux', 'proton flux','magnetic fields/magnetic currents', 'vertical profiles','air temperature', 'dew point temperature','cloud liquid water/ice', 'wind speed', 'wind direction','vertical wind velocity/speed', 'total precipitable water','boundary layer temperature', 'cloud height','cloud droplet concentration/size', 'ozone','cloud base temperature', 'cloud base height', 'rain','cloud optical depth/thickness', 'cirrus/systems','mean radial velocity', 'relative humidity', 'u/v wind components','wind speed/wind direction','digital elevation/terrain model (dem)', 'snow', 'drizzle','particle size distribution', 'hail', 'ambient temperature','stage height', 'rivers/streams', 'hourly precipitation amount','24 hour precipitation amount', 'latent heat flux','cloud fraction', '3 and 6 hour precipitation amount','horizontal wind velocity/speed', 'dissolved carbon dioxide','hurricanes', 'tropical cyclone track', 'cloud top height','temperature profiles', 'vertical wind shear','water vapor tendency', 'potential temperature','angstrom exponent', 'water vapor mixing ratio profiles','extreme eastern tropical pacific sst', 'phytoplankton','cloud precipitable water', 'cloud asymmetry', 'cloud ceiling','cloud frequency', 'cloud top pressure', 'cloud top temperature','cloud vertical distribution', 'cloud emissivity','cloud radiative forcing', 'cloud reflectance','maximum/minimum temperature', 'condensation','topographical relief maps', 'evapotranspiration','fire occurrence', 'burned area', 'sulfur dioxide', 'lake/pond','rivers/stream', 'nitrogen dioxide', 'agricultural lands','cyclones', 'urban lands', 'lakes/reservoirs','infant mortality rates', 'methane','non-methane hydrocarbons/volatile organic compounds', 'coal','biodiversity functions', 'wetlands', 'discharge/flow','hydropattern', 'alpine/tundra', 'forests','leaf area index (lai)', 'ammonia', 'nitrous oxide','land subsidence', 'normalized difference vegetation index (ndvi)','chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride','molecular hydrogen', 'fire models', 'heat index','even-toed ungulates', 'species recruitment','population dynamics', 'water depth', 'inundation', 'drainage','respiration rate', 'permafrost temperature','indigenous/native species', 'fire dynamics', 'plant succession','coastal', 'salt marsh', 'boundary layer winds', 'shrubland/scrub','community structure', 'pingo', 'virtual temperature','formaldehyde', 'hydroxyl', 'photolysis rates', 'nitric oxide','molecular oxygen', 'peroxyacyl nitrate', 'stable isotopes','runoff', 'vegetation water content', 'discharge','chlorophyll concentrations', 'water table', 'decomposition','water temperature', 'total dissolved solids', 'biomass dynamics','grasslands', 'savannas', 'grazing dynamics/plant herbivory','multivariate enso index', 'drainage basins','resource development site', 'dunes', 'flood plain','endangered species', 'hydrogen cyanide', 'nutrient cycling','deserts','fraction of absorbed photosynthetically active radiation (fapar)','aquifers', 'dissolved oxygen', 'turbidity', 'conductivity','sulfate', 'water ion concentrations', 'percolation','phosphorous compounds', 'radioisotopes', 'cooling degree days','contour maps', 'methane production/use','natural gas production/use', 'petroleum production/use','consumption rates', 'soil organic carbon (soc)', 'halocarbons','biomass energy production/use', 'estuary','layered precipitable water', 'water vapor concentration profiles','hydrogen chloride', 'nitric acid', 'chlorine nitrate','chlorofluorocarbons', 'dinitrogen pentoxide', 'dimethyl sulfide','vorticity', 'ice fraction', 'temperature tendency','wind direction tendency', 'bromine monoxide', 'chlorine monoxide','methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy','cloud base pressure', 'temperature anomalies','precipitation trends', 'temperature trends', 'convection','chlorine dioxide', 'uv aerosol index','sea surface skin temperature', 'sublimation','convective surface precipitation rate', 'hydrogen fluoride']",
"def rendering_of_graph_node(self, nodeName):\n rendingList = [item for sublist in self.node[nodeName].values() for item in sublist] # due to flattening [['unrend'],['rend','unrend']] etc.\n if all(items == 'unrend' for items in rendingList): return 'unrend'\n elif all(items == 'rend' for items in rendingList): return 'rend'\n else: return 'mixed'",
"def label_joints():\n side_dict = {'C': 0,\n 'L': 1,\n 'R': 2}\n for jnt in mc.ls(type='joint'):\n mc.setAttr('{}.side'.format(jnt), side_dict[jnt.split('_')[0]])\n mc.setAttr('{}.type'.format(jnt), 18)\n mc.setAttr('{}.otherType'.format(jnt), jnt.split('_')[1], type=\"string\")",
"def make_labels(self, ilines):\n\n llist = []\n for lind, lstr in enumerate(ilines):\n # get label and value list\n rv, label, vals = self.get_label_vals(lstr)\n if rv < 1: continue\n\n nvals = len(vals)\n\n # label = self.find_parent_label(label)\n\n if self.verb > 2: print('++ label: %s, %d val(s)' % (label, nvals))\n\n llist.append(label)\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0\n\n if not UTIL.vals_are_unique(llist):\n print('** warning: labels are not unique, will use only last values')\n llist = UTIL.get_unique_sublist(llist)\n\n return 0, llist",
"def get_labels(self):\r\n raise NotImplementedError()"
] |
[
"0.61879206",
"0.6081442",
"0.6048964",
"0.59177494",
"0.58201796",
"0.57484347",
"0.5720458",
"0.56168586",
"0.55854166",
"0.5579104",
"0.557112",
"0.55050945",
"0.5465426",
"0.545311",
"0.5416637",
"0.53969747",
"0.53866935",
"0.53793013",
"0.5374424",
"0.53614503",
"0.5354584",
"0.5341291",
"0.53089094",
"0.52661306",
"0.526398",
"0.5254443",
"0.52524203",
"0.5251063",
"0.5212175",
"0.52056223"
] |
0.7601737
|
0
|
Fit an estimator on the training data and then score it on the testing data
|
def fit_score(estimator, train_data, test_data):
estimator.fit(*train_data)
return estimator.score(*test_data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _fit_and_score(estimator, X, modality, y, scorer, train, test, verbose,\n parameters, return_train_score=False,\n return_parameters=False, return_n_test_samples=False,\n return_times=False):\n\n X = X[modality]\n\n # Adjust length of sample weights\n # fit_params = fit_params if fit_params is not None else {}\n # fit_params = dict([(k, _index_param_value(X, v, train))\n # for k, v in fit_params.items()])\n\n train_scores = {}\n if parameters is not None:\n estimator.set_params(**parameters)\n\n start_time = time.time()\n\n X_train, y_train = _safe_split(estimator, X, y, train)\n X_test, y_test = _safe_split(estimator, X, y, test, train)\n\n valid_train = [i for i, x in enumerate(X_train) if ~np.any(np.isnan(x))]\n X_train = [x for i, x in enumerate(X_train) if i in valid_train]\n y_train = [y_ for i, y_ in enumerate(y_train) if i in valid_train]\n valid_test = [i for i, x in enumerate(X_test) if ~np.any(np.isnan(x))]\n X_test = [x for i, x in enumerate(X_test) if i in valid_test]\n y_test = [y_ for i, y_ in enumerate(y_test) if i in valid_test]\n\n is_multimetric = not callable(scorer)\n\n if y_train is None:\n # estimator.fit(X_train, **fit_params)\n estimator.fit(X_train)\n else:\n # estimator.fit(X_train, y_train, **fit_params)\n estimator.fit(X_train, y_train)\n\n fit_time = time.time() - start_time\n # _score will return dict if is_multimetric is True\n if y_test:\n test_scores = _score(estimator, X_test, y_test, scorer, is_multimetric)\n else:\n test_scores = dict(score=np.nan)\n\n score_time = time.time() - start_time - fit_time\n if return_train_score:\n train_scores = _score(estimator, X_train, y_train, scorer,\n is_multimetric)\n\n ret = [train_scores, test_scores] if return_train_score else [test_scores]\n\n if return_n_test_samples:\n ret.append(_num_samples(X_test))\n if return_times:\n ret.extend([fit_time, score_time])\n if return_parameters:\n ret.append(parameters)\n return ret",
"def fit(self,\n X,\n y,\n estimator=None,\n ):\n\n self._load_data(X, y)\n\n self.outer_testscores = []\n self.outer_trainscores = []\n self.outer_pipelines = []\n\n n_outer = 0\n for train_id, test_id in self.outer_cv.split(self.Xtrain, self.ytrain):\n Xtrain, Xtest = \\\n self.Xtrain.iloc[train_id], self.Xtrain.iloc[test_id]\n ytrain, ytest = self.ytrain[train_id], self.ytrain[test_id]\n\n self.clf_pipeline.fit(Xtrain, ytrain)\n\n self.outer_pipelines.append(pickle.dumps(self.clf_pipeline))\n\n # measure the model performance (chosen by inner cv) on outer cv\n Xtest_score = self.clf_pipeline.score(Xtest, ytest)\n Xtrain_score = self.clf_pipeline.score(Xtrain, ytrain)\n\n self.outer_testscores.append(Xtest_score)\n self.outer_trainscores.append(Xtrain_score)\n\n print('n_outer =', n_outer)\n print('train score =', Xtrain_score)\n print('test score =', Xtest_score)\n\n n_outer += 1\n\n print('Scores of {n} models: {mean} +/- {std}'\n .format(n=n_outer,\n mean=np.mean(self.outer_testscores),\n std=np.std(self.outer_testscores)\n )\n )\n\n if estimator == 'best':\n chosen_split = np.argmax(self.outer_testscores)\n\n elif estimator == 'random':\n chosen_split = np.random.choice(n_outer)\n\n elif estimator == 'average':\n chosen_split = helpers.argmedian(self.outer_testscores)\n\n try:\n print('chosen_split', chosen_split)\n train_id, test_id = next(islice(self.outer_cv.split(self.Xtrain,\n self.ytrain),\n chosen_split,\n None\n )\n )\n self.Xtest, self.ytest = \\\n self.Xtrain.iloc[test_id], self.ytrain[test_id]\n\n self.Xtrain, self.ytrain = \\\n self.Xtrain.iloc[train_id], self.ytrain[train_id]\n\n # chose the model, pickle to avoid refit\n self.clf_pipeline = pickle.loads(\n self.outer_pipelines[chosen_split])\n\n except NameError:\n self.Xtrain, self.ytrain = Xtrain, ytrain\n self.Xtest, self.ytest = Xtest, ytest\n\n return self",
"def fit_test(self):",
"def model(classifier, data):\n print(\"Beggining to test model\")\n train, test = cross_validation.train_test_split(data, test_size=.30)\n f,c = train[:,1:], train[:,0]\n classifier.fit(f,c,False)\n print(\"Score: \" + classifier.score(f,c))\n print(\"Finished testing model\")",
"def _score(self, estimator, train, test):\n b = estimator.fit(self.A[train], self.b[train]).predict(self.A[test])\n return accuracy_score(self.b[test], b)",
"def _fit_and_score_keras(estimator, X, y, scorer, train, test, verbose,\n parameters, fit_params, return_train_score=False,\n return_parameters=False, return_n_test_samples=False,\n return_times=False, error_score='raise',session=None):\n if verbose > 1:\n if parameters is None:\n msg = ''\n else:\n msg = '%s' % (', '.join('%s=%s' % (k, v)\n for k, v in parameters.items()))\n print(\"[CV] %s %s\" % (msg, (64 - len(msg)) * '.'))\n\n # Adjust length of sample weights\n fit_params = fit_params if fit_params is not None else {}\n fit_params = dict([(k, _index_param_value(X, v, train))\n for k, v in fit_params.items()])\n\n test_scores = {}\n train_scores = {}\n if parameters is not None:\n estimator.set_params(**parameters)\n\n start_time = time.time()\n\n X_train, y_train = _safe_split(estimator, X, y, train)\n X_test, y_test = _safe_split(estimator, X, y, test, train)\n\n is_multimetric = not callable(scorer)\n n_scorers = len(scorer.keys()) if is_multimetric else 1\n\n try:\n if y_train is None:\n estimator.fit(X_train, **fit_params)\n else:\n estimator.fit(X_train, y_train, **fit_params)\n\n except Exception as e:\n # Note fit time as time until error\n fit_time = time.time() - start_time\n score_time = 0.0\n if error_score == 'raise':\n raise\n elif isinstance(error_score, numbers.Number):\n if is_multimetric:\n test_scores = dict(zip(scorer.keys(),\n [error_score, ] * n_scorers))\n if return_train_score:\n train_scores = dict(zip(scorer.keys(),\n [error_score, ] * n_scorers))\n else:\n test_scores = error_score\n if return_train_score:\n train_scores = error_score\n warnings.warn(\"Classifier fit failed. The score on this train-test\"\n \" partition for these parameters will be set to %f. \"\n \"Details: \\n%r\" % (error_score, e), FitFailedWarning)\n else:\n raise ValueError(\"error_score must be the string 'raise' or a\"\n \" numeric value. (Hint: if using 'raise', please\"\n \" make sure that it has been spelled correctly.)\")\n\n else:\n fit_time = time.time() - start_time\n # _score will return dict if is_multimetric is True\n test_scores = _score(estimator, X_test, y_test, scorer, is_multimetric)\n score_time = time.time() - start_time - fit_time\n if return_train_score:\n train_scores = _score(estimator, X_train, y_train, scorer,\n is_multimetric)\n\n if verbose > 2:\n if is_multimetric:\n for scorer_name, score in test_scores.items():\n msg += \", %s=%s\" % (scorer_name, score)\n else:\n msg += \", score=%s\" % test_scores\n if verbose > 1:\n total_time = score_time + fit_time\n end_msg = \"%s, total=%s\" % (msg, logger.short_format_time(total_time))\n print(\"[CV] %s %s\" % ((64 - len(end_msg)) * '.', end_msg))\n\n ret = [train_scores, test_scores] if return_train_score else [test_scores]\n\n if return_n_test_samples:\n ret.append(_num_samples(X_test))\n if return_times:\n ret.extend([fit_time, score_time])\n if return_parameters:\n ret.append(parameters)\n # The estimator is erased\n del estimator\n # We assign the keras backend\n K = session\n # Clean the session\n K.clear_session()\n # The garbage collector is called in order to ensure that the estimator is erased from memory\n for i in range(15): gc.collect()\n return ret",
"def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)",
"def _train_and_evaluate(estimator, output_dir):\n \n \"\"\"X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val = utils.read_from_bigquery(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\"\"\"\n \n df_train=utils.over_sample(\"amiable-octane-267022.kkbox.output_train_1\",\"amiable-octane-267022\")\n X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val=utils.over_sample(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\n\n estimator.fit(X_train, y_train)\n f1_scorer = make_scorer(f1_score)\n accuracy_scorer =make_scorer(accuracy_score)\n\n if metadata.HYPERPARAMTER_TUNING:\n scores=model_selection.cross_val_score(estimator, X_val, y_val, cv=3,scoring=f1_scorer)\n #,scoring=f1_scorer\n\n logging.info('Score: %s', scores)\n\n #tune hyper\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='F1_SCORE',\n metric_value=np.mean(scores),\n global_step=10000)\n \n#joblib.dump(estimator, 'model.joblib')\n\n # Write model and eval metrics to `output_dir`\n model_output_path = os.path.join(output_dir, 'model',metadata.MODEL_FILE_NAME)\n \n utils.dump_object(estimator, model_output_path)",
"def fit_and_test(X, y) -> None:\r\n models = {\r\n \"tree2\": RandomForestClassifier(n_estimators=1, n_jobs=-1, class_weight=\"balanced\", random_state=0),\r\n \"tree1\": RandomForestClassifier(n_estimators=1, n_jobs=-1, random_state=0, criterion=\"entropy\"),\r\n \"random_forest_10\": RandomForestClassifier(\r\n n_estimators=10, n_jobs=-1, class_weight=\"balanced\", criterion=\"gini\"\r\n ),\r\n \"random_forest_100\": RandomForestClassifier(n_estimators=100, n_jobs=-1, criterion=\"entropy\"),\r\n \"knn_1\": KNeighborsClassifier(n_neighbors=1, n_jobs=-1, metric=\"hamming\"),\r\n \"knn_5\": KNeighborsClassifier(n_neighbors=5, n_jobs=-1, metric=\"hamming\"),\r\n \"knn_15\": KNeighborsClassifier(n_neighbors=15, n_jobs=-1, metric=\"hamming\"),\r\n \"cnb\": ComplementNB(),\r\n }\r\n\r\n for model_name in models.keys():\r\n cross_validate(estimator=models[model_name], X=X, y=y, num_splits=5, save_name=model_name)",
"def fit(train_data, train_target):\r\n for name in models.keys():\r\n est = models[name]\r\n est_params = params2[name]\r\n gscv = GridSearchCV(estimator=est, param_grid=est_params, cv=5)\r\n gscv.fit(train_data, train_target)\r\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\r\n print(\"Where we selected the parameters: {}\" .format(gscv.cv_results_['params'][gscv.best_index_]))\r\n print(\"with mean cross-validated score: {}\" .format(gscv.best_score_))",
"def fit(model, data, test_ids, exp_name, datasets):\n if model.model_type == 'torch':\n size = len(data[0])\n else:\n size = data[0].shape[0]\n \n train_ids = [i for i in range(size) if i not in test_ids]\n scaler = pka_scaler(data[1][train_ids])\n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n desc_scaler.fit(data[0][train_ids])\n data[0] = desc_scaler.transform(data[0])\n \n trained_model = train(model, train_ids, data, scaler, datasets)\n results = test(model, trained_model, test_ids, data, scaler)\n model.experiments.append({'name':exp_name,'model':trained_model, 'results':results, 'scaler':scaler})\n return results",
"def fit(self, train_features, train_actuals):\n for name in self.models.keys():\n print('-'*shutil.get_terminal_size().columns)\n print(\"evaluating {}\".format(name).center(columns))\n print('-'*shutil.get_terminal_size().columns)\n estimator = self.models[name]\n est_params = self.params[name]\n gscv = GridSearchCV(estimator, est_params, cv=5, scoring=self.scoring_metric)\n gscv.fit(train_features, train_actuals)\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\n self.single_classifier_best[name] = gscv",
"def fit(model, data, test_ids, exp_name, train_ids=None):\n if model.model_type == 'torch':\n size = len(data[0])\n else:\n size = data[0].shape[0]\n \n if train_ids == None:\n train_ids = [i for i in range(size) if i not in test_ids]\n scaler = pka_scaler(data[1][train_ids])\n \n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n scaling_data = data[0][train_ids]\n desc_scaler.fit(scaling_data)\n data[0] = desc_scaler.transform(data[0])\n else:\n scaling_data = None\n \n trained_model = train(model, train_ids, data, scaler)\n results = test(model, trained_model, test_ids, data, scaler)\n model.experiments[exp_name] = {'model':trained_model, 'results':results, 'scaler':scaler, 'desc scaling data':scaling_data}\n return results",
"def fit_with_testing(self, X_train, y_train, X_test, y_test):\n self.init(X_train)\n self.opt.print_accuracy = self.print_accuracy\n self.opt.fit(X_train, y_train, self.n_epochs, self.batch_size,\n validation_data=(X_test, y_test), program=self.program,\n print_accuracy=self.print_accuracy, print_loss=True)",
"def fit(self):\n \n # Open an existing model and get the training & test dataset and targets\n train_test_df, target_df = self._get_model_and_data(target=True, set_feature_def=True)\n \n # Check that the estimator is an supervised ML algorithm\n if self.model.estimator_type not in [\"classifier\", \"regressor\"]:\n err = \"Incorrect usage. The estimator specified is not a known classifier or regressor: {0}\".format(self.model.estimator)\n raise Exception(err)\n \n # Check which validation strategy is to be used, if any\n # For an explanation of cross validation in scikit-learn see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n if self.model.time_series_split > 0:\n self.model.validation = \"timeseries\"\n # Set up cross validation to be performed using TimeSeriesSplit\n self.model.cv = TimeSeriesSplit(n_splits=self.model.time_series_split, max_train_size=self.model.max_train_size)\n elif self.model.cv > 0:\n self.model.validation = \"k-fold\"\n elif self.model.test_size > 0:\n self.model.validation = \"hold-out\"\n else:\n self.model.validation = \"external\"\n\n if self.model.validation == \"hold-out\": \n # Split the data into training and testing subsets\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(train_test_df, target_df, test_size=self.model.test_size, random_state=self.model.random_state)\n else:\n self.X_train = train_test_df\n self.y_train = target_df\n \n # Add the training and test data to the model if required\n if self.model.retain_data:\n self.model.X_train = self.X_train\n self.model.y_train = self.y_train\n \n try:\n self.model.X_test = self.X_test\n self.model.y_test = self.y_test\n except AttributeError:\n pass\n \n # Scale the targets and increase stationarity if required\n if self.model.scale_target or self.model.make_stationary:\n # Set up the target transformer\n self.model.target_transformer = TargetTransformer(scale=self.model.scale_target, make_stationary=self.model.make_stationary, stationarity_lags=self.model.stationarity_lags,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Fit the transformer to the training targets\n self.model.target_transformer = self.model.target_transformer.fit(self.y_train)\n\n # Apply the transformer to the training targets\n self.y_train = self.model.target_transformer.transform(self.y_train)\n # Drop samples where the target cannot be transformed due to insufficient lags\n self.X_train = self.X_train.iloc[len(self.X_train)-len(self.y_train):] \n \n # Add lag observations to the samples if required\n if self.model.lags or self.model.lag_target:\n # Check if the current sample will be included as an input, or whether we only use lag observations for predictions\n extrapolate = 1 if self.model.current_sample_as_input else 0\n # Add the lag observations\n self.X_train = self._add_lags(self.X_train, self.y_train, extrapolate=extrapolate, update_features_df=True)\n # Drop targets for samples which were dropped due to null values after adding lags.\n if len(self.y_train) > len(self.X_train):\n self.y_train = self.y_train.iloc[len(self.y_train)-len(self.X_train):]\n\n # If this is a Keras estimator, we require the preprocessing to return a data frame instead of a numpy array\n prep_return = 'df' if self.model.using_keras else 'np'\n\n # Construct the preprocessor\n prep = Preprocessor(self.model.features_df, return_type=prep_return, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Setup a list to store steps for the sklearn pipeline\n pipe_steps = [('preprocessor', prep)]\n\n if self.model.dim_reduction:\n # Construct the dimensionality reduction object\n reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)\n \n # Include dimensionality reduction in the pipeline steps\n pipe_steps.append(('reduction', reduction))\n self.model.estimation_step = 2\n else:\n self.model.estimation_step = 1 \n\n # If this is a Keras estimator, update the input shape and reshape the data if required\n if self.model.using_keras:\n # Update the input shape based on the final number of features after preprocessing\n self._keras_update_shape(prep)\n\n # Add the Keras build function, architecture and prediction_periods to the estimator keyword arguments\n self.model.estimator_kwargs['build_fn'] = self._keras_build_fn\n self.model.estimator_kwargs['architecture'] = self.model.architecture\n self.model.estimator_kwargs['prediction_periods'] = self.model.prediction_periods\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(10)\n \n # Check than an identifier has been provided for sorting data if this is a sequence prediction problem\n if self.model.lags or len(self.model.first_layer_kwargs[\"input_shape\"]) > 1:\n assert len(self.model.original_features_df[self.model.original_features_df['variable_type'].isin([\"identifier\"])]) == 1, \\\n \"An identifier is mandatory when using lags or with sequence prediction problems. Define this field in your feature definitions.\"\n\n # Cater for multi-step predictions\n if self.model.prediction_periods > 1:\n # Transform y to a vector of values equal to prediction_periods\n self.y_train = utils.vectorize_array(self.y_train, steps=self.model.prediction_periods)\n # Drop values from x for which we don't have sufficient y values\n self.X_train = self.X_train.iloc[:-len(self.X_train)+len(self.y_train)]\n\n # Add a pipeline step to update the input shape and reshape the data if required\n # This transform will also add lag observations if specified through the lags parameter\n # If lag_target is True, an additional feature will be created for each sample using the previous value of y \n reshape = Reshaper(first_layer_kwargs=self.model.first_layer_kwargs, logfile=self.logfile)\n pipe_steps.append(('reshape', reshape))\n self.model.estimation_step += self.model.estimation_step\n\n # Avoid tensorflow error for keras models\n # https://github.com/tensorflow/tensorflow/issues/14356\n # https://stackoverflow.com/questions/40785224/tensorflow-cannot-interpret-feed-dict-key-as-tensor\n kerasbackend.clear_session()\n \n # Try assuming the pipeline involves a grid search\n try:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Prepare the grid search using the previously set parameter grid\n grid_search = GridSearchCV(estimator=estimator, param_grid=self.model.param_grid, **self.model.grid_search_args)\n \n # Add grid search to the pipeline steps\n pipe_steps.append(('grid_search', grid_search))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n\n # Get the best parameters and the cross validation results\n grid_search = self.model.pipe.named_steps['grid_search']\n self.model.best_params = grid_search.best_params_\n self.model.cv_results = grid_search.cv_results_\n\n # Get the best estimator to add to the final pipeline\n estimator = grid_search.best_estimator_\n\n # Update the pipeline with the best estimator\n self.model.pipe.steps[self.model.estimation_step] = ('estimator', estimator)\n\n except AttributeError:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Add the estimator to the pipeline steps\n pipe_steps.append(('estimator', estimator))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n \n if self.model.validation == \"hold-out\": \n # Evaluate the model using the test data \n self.calculate_metrics(caller=\"internal\")\n \n if self.model.calc_feature_importances:\n # Select the dataset for calculating importances\n if self.model.validation == \"hold-out\":\n X = self.X_test\n y = self.y_test # Already a numpy array after calculate_metrics\n else:\n X = self.X_train\n y = self.y_train.values.ravel()\n \n # Calculate model agnostic feature importances\n self._calc_importances(X = X, y = y)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n if self.model.validation != \"external\": \n message = [[self.model.name, 'Model successfully trained, tested and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model has a score of {1:.3f} against the test data.\"\\\n .format(self.model.estimator, self.model.score), self.model.score]]\n else:\n message = [[self.model.name, 'Model successfully trained and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model score unknown as test_size was <= 0.\"\\\n .format(self.model.estimator), np.NaN]]\n \n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp', 'score_result', 'score'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"fit\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response",
"def fit_and_get_test_predictions(self, trace, tuning=True):\n pass",
"def train_and_evaluate_classifier(X, yt, estimator, grid):\n \n # Cross validation\n cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)\n classifier = GridSearchCV(estimator=estimator, cv=cv, param_grid=grid, error_score=0.0, n_jobs = -1, verbose = 5, scoring='f1')\n \n # Train the model over and tune the parameters\n print(\"Training model\")\n classifier.fit(X, yt)\n\n # CV-score\n print(\"CV-scores for each grid configuration\")\n means = classifier.cv_results_['mean_test_score']\n stds = classifier.cv_results_['std_test_score']\n for mean, std, params in sorted(zip(means, stds, classifier.cv_results_['params']), key=lambda x: -x[0]):\n print(\"Accuracy: %0.3f (+/-%0.03f) for params: %r\" % (mean, std * 2, params))\n print()\n\n return classifier",
"def train_and_evaluate_classifier(X, yt, estimator, grid):\n \n # Cross validation\n cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)\n classifier = GridSearchCV(estimator=estimator, cv=cv, param_grid=grid, error_score=0.0, n_jobs = -1, verbose = 5, scoring='f1')\n \n # Train the model over and tune the parameters\n print(\"Training model\")\n classifier.fit(X, yt)\n\n # CV-score\n print(\"CV-scores for each grid configuration\")\n means = classifier.cv_results_['mean_test_score']\n stds = classifier.cv_results_['std_test_score']\n for mean, std, params in sorted(zip(means, stds, classifier.cv_results_['params']), key=lambda x: -x[0]):\n print(\"Accuracy: %0.3f (+/-%0.03f) for params: %r\" % (mean, std * 2, params))\n print()\n\n return classifier",
"def _fit_and_score_multimodal(estimator, X, modality, y, scorer, train, test, verbose,\n parameters, return_train_score=False,\n return_parameters=False, return_n_test_samples=False,\n return_times=False, default_parameters=None):\n if verbose > 1 and modality != 'all':\n if parameters is None:\n msg = ''\n else:\n msg = '%s' % (', '.join('%s=%s' % (k, v)\n for k, v in parameters.items()))\n print(\"[CV] %s %s\" % (msg, (64 - len(msg)) * '.'))\n\n y = np.array(y)\n\n # Adjust length of sample weights\n # fit_params = fit_params if fit_params is not None else {}\n # fit_params = dict([(k, _index_param_value(X, v, train))\n # for k, v in fit_params.items()])\n\n train_scores = {}\n\n clfind = [v[0] for v in estimator.steps].index('clf')\n if modality == 'all':\n for k in estimator.steps[clfind][1].base_estimators.keys():\n estimator.steps[clfind][1].base_estimators[k].set_params(**parameters[k])\n elif parameters is not None:\n for k in estimator.steps[clfind][1].base_estimators.keys():\n if k == modality:\n estimator.steps[clfind][1].base_estimators[k].set_params(**parameters)\n else:\n estimator.steps[clfind][1].base_estimators[k].set_params(**default_parameters)\n\n\n start_time = time.time()\n\n y_train = y[train]\n y_test = y[test]\n\n X_train = OrderedDict({k: np.array(x)[train] for k, x in X.items()})\n X_test = OrderedDict({k: np.array(x)[test] for k, x in X.items()})\n # X_train, X_test = dict(), dict()\n # for k, X_ in X.items():\n # x_train, x_test = X_[train], X_[test]\n # valid_train = [i for i, x in enumerate(x_train) if ~np.any(np.isnan(x))]\n # X_train[k] = [x for i, x in enumerate(x_train) if i in valid_train]\n # valid_test = [i for i, x in enumerate(x_test) if ~np.any(np.isnan(x))]\n # X_test[k] = [x for i, x in enumerate(x_test) if i in valid_test]\n\n is_multimetric = not callable(scorer)\n\n if y_train is None:\n # estimator.fit(X_train, **fit_params)\n estimator.fit(X_train)\n else:\n # estimator.fit(X_train, y_train, **fit_params)\n estimator.fit(X_train, y_train)\n\n fit_time = time.time() - start_time\n # _score will return dict if is_multimetric is True\n test_scores = _score(estimator, X_test, y_test, scorer, is_multimetric)\n score_time = time.time() - start_time - fit_time\n if return_train_score:\n train_scores = _score(estimator, X_train, y_train, scorer,\n is_multimetric)\n\n if verbose > 2:\n if is_multimetric:\n for scorer_name, score in test_scores.items():\n msg += \", %s=%s\" % (scorer_name, score)\n else:\n msg += \", score=%s\" % test_scores\n if verbose > 1:\n total_time = score_time + fit_time\n end_msg = \"%s, total=%s\" % (msg, logger.short_format_time(total_time))\n print(\"[CV] %s %s\" % ((64 - len(end_msg)) * '.', end_msg))\n\n ret = [train_scores, test_scores] if return_train_score else [test_scores]\n\n if return_n_test_samples:\n ret.append(_num_samples(X_test))\n if return_times:\n ret.extend([fit_time, score_time])\n if return_parameters:\n ret.append(parameters)\n return ret",
"def train_test_scores(estimator_scores):\n\n # Converting the dictionary of scores from cross_validate to a dataframe\n # and dropping unnecessary rows\n scores_df = (pd\n .DataFrame\n .from_dict(estimator_scores)\n .drop(['fit_time', 'score_time'], axis=1))\n # Getting mean scores and standard deviations from repeated cv\n scores_mean = np.abs(scores_df.mean() * 100)\n scores_std = np.abs(scores_df.std() * 100)\n # Returning results as pandas dataframe\n results = pd.DataFrame({'Accuracy': scores_mean,\n 'Standard Deviation': scores_std})\n # Sub-setting train and test results into their own dataframes\n train_results = np.round(results.iloc[list(range(1, 19, 2))], decimals=4)\n test_results = np.round(results.iloc[list(range(0, 18, 2))], decimals=4)\n # Returning Brier scores back to a value between 0 and 1\n train_results.iloc[8] = (train_results.iloc[8]/100)\n test_results.iloc[8] = (test_results.iloc[8]/100)\n\n return train_results, test_results, scores_df",
"def train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, model_dir, restore_dir=None):\n # reload weights from restore_dir if specified\n if restore_dir is not None:\n model = BertForSequenceTagging.from_pretrained(tagger_model_dir)\n \n best_val_f1 = 0.0\n patience_counter = 0\n\n for epoch in range(1, params.epoch_num + 1):\n # Run one epoch\n logging.info(\"Epoch {}/{}\".format(epoch, params.epoch_num))\n\n # Compute number of batches in one epoch\n params.train_steps = params.train_size // params.batch_size\n params.val_steps = params.val_size // params.batch_size\n\n # data iterator for training\n train_data_iterator = data_loader.data_iterator(train_data, shuffle=True)\n\n # Train for one epoch on training set\n train_epoch(model, train_data_iterator, optimizer, scheduler, params)\n\n # data iterator for evaluation\n # train_data_iterator = data_loader.data_iterator(train_data, shuffle=False)\n val_data_iterator = data_loader.data_iterator(val_data, shuffle=False)\n\n # Evaluate for one epoch on training set and validation set\n # params.eval_steps = params.train_steps\n # train_metrics = evaluate(model, train_data_iterator, params, mark='Train') # callback train f1\n params.eval_steps = params.val_steps\n val_metrics = evaluate(model, val_data_iterator, params, mark='Val')\n \n val_f1 = val_metrics['f1']\n improve_f1 = val_f1 - best_val_f1\n if improve_f1 > 1e-5: \n logging.info(\"- Found new best F1\")\n best_val_f1 = val_f1\n model.save_pretrained(model_dir)\n if improve_f1 < params.patience:\n patience_counter += 1\n else:\n patience_counter = 0\n else:\n patience_counter += 1\n\n # Early stopping and logging best f1\n if (patience_counter >= params.patience_num and epoch > params.min_epoch_num) or epoch == params.epoch_num:\n logging.info(\"Best val f1: {:05.2f}\".format(best_val_f1))\n break",
"def test(classifier, data, labels):\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": data},\n y=labels,\n num_epochs=1,\n shuffle=False)\n eval_results = classifier.evaluate(input_fn=eval_input_fn)\n eval_results[\"F-Score\"] = 2 * eval_results[\"precision\"] * eval_results[\"recall\"] / (eval_results[\"precision\"] + eval_results[\"recall\"])\n# print(eval_results)\n return eval_results",
"def score_model(X, y, estimator, encode=True, **kwargs):\n y = LabelEncoder().fit_transform(y)\n \n if encode:\n model = Pipeline([('one_hot_encoder', OneHotEncoder()),\n ('estimator', estimator)])\n else:\n model = Pipeline([('estimator', estimator)])\n \n # Instantiate the classification model and visualizer\n model.fit(X, y, **kwargs)\n\n expected = y\n predicted = model.predict(X)\n\n # return model name, (P, R, Fscore, Support):\n return estimator.__class__.__name__, PRFS(expected, predicted)",
"def fit_model(X, y):\n\n # Create cross-validation sets from the training data\n # sklearn version 0.18: ShuffleSplit(n_splits=10, test_size=0.1, train_size=None, random_state=None)\n # sklearn versiin 0.17: ShuffleSplit(n, n_iter=10, test_size=0.1, train_size=None, random_state=None)\n cv_sets = ShuffleSplit(n_splits=10, test_size=0.20, random_state=42)\n\n # TODO: Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = {'max_depth': np.arange(1, 11)}\n\n # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer'\n scoring_fnc = make_scorer(performance_metric)\n\n # TODO: Create the grid search cv object --> GridSearchCV()\n # Make sure to include the right parameters in the object:\n # (estimator, param_grid, scoring, cv) which have values 'regressor', 'params', 'scoring_fnc', and 'cv_sets' respectively.\n grid = GridSearchCV(regressor, params, scoring=scoring_fnc, cv=cv_sets)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_",
"def score_dataset(X_train, X_valid, y_train, y_valid):\r\n model = RandomForestRegressor(n_estimators=100, random_state=0)\r\n model.fit(X_train, y_train)\r\n preds = model.predict(X_valid)\r\n score = mean_absolute_error(y_valid, preds)\r\n return score",
"def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\n\t\t# One hot encode the input/labels\n\t\tencoder = LabelEncoder()\n\t\tencoder.fit(outs)\n\t\tenc_labels = encoder.transform(outs)\n\t\tenc_labels = np_utils.to_categorical(enc_labels)\n\n\t\t_, score = self.model.evaluate(ins, enc_labels, verbose=2)\n\n\t\treturn score",
"def train(models, X_train, y_train, X_test, y_test):\n \n # Train and test each model in a for lop\n accuracies = []\n \n for model in models:\n clf = model.fit(X_train, y_train) # Train\n score = clf.score(X_test, y_test) # Test\n accuracies.append(score)\n\n return accuracies",
"def train(self):\n\t\t# Helper: Early stopping.\n\t\tearly_stopper = EarlyStopping(patience=2, verbose = 1)\n\t\tself.model.fit(data.x_train, data.y_train,\n\t\t\t\t\t\tbatch_size=data.batch_size,\n\t\t\t\t\t\tepochs=10000, # using early stopping, so no real limit\n\t\t\t\t\t\tverbose=1,\n\t\t\t\t\t\tvalidation_split=0.05,\n\t\t\t\t\t\tcallbacks=[early_stopper])\n\n\t\tscore = self.model.evaluate(data.x_test, data.y_test, verbose=1)\n\n\t\treturn score[1] # 1 is accuracy. 0 is loss.",
"def evaluate(clf, dataset, feature_list, features, labels, num_iter, params):\n\n features_train, features_test, labels_train, labels_test = \\\n train_test_split(features, labels, test_size=0.3, random_state=42)\n\n\n\n precision_values = []\n recall_values = []\n accuracy_values = []\n print clf\n for i in xrange(0, num_iter):\n #print params\n clf = GridSearchCV(clf, params)\n clf.fit(features_train, labels_train)\n print '*****************************'\n print clf.best_estimator_\n print clf.best_params_\n\n clf = clf.best_estimator_\n #test_classifier(clf, dataset, feature_list)\n pred = clf.predict(features_test)\n precision_values.append(precision_score(labels_test, pred))\n recall_values.append(recall_score(labels_test, pred))\n accuracy_values.append(accuracy_score(labels_test, pred))\n print 'Recall score: ', mean(recall_values)\n print 'Precision score: ', mean(precision_values)\n print 'Accuracy score: ' , mean(accuracy_values)",
"def score(self):\n\n\t\tsplits = 10\n\t\tscore = 0\n\n\t\tkf = KFold(n_splits=splits, shuffle=True)\n\t\tkf.get_n_splits(self.data)\n\n\t\tfor train_ind, test_ind in kf.split(self.data):\n\n\t\t\ttrain = [self.data[ind] for ind in train_ind]\n\t\t\ttest = [self.data[ind] for ind in test_ind]\n\n\t\t\tself.model = self._fit(train)\n\t\t\ttemp_score = self.score_one(test)\n\t\t\tscore += temp_score\n\n\t\treturn score/float(splits)"
] |
[
"0.76107645",
"0.7543218",
"0.75375515",
"0.7517675",
"0.74909353",
"0.7400457",
"0.7224176",
"0.7108566",
"0.70855176",
"0.70284665",
"0.69677943",
"0.69219077",
"0.6902353",
"0.69022137",
"0.6899991",
"0.68701804",
"0.6868485",
"0.6868485",
"0.68650854",
"0.6844149",
"0.68327534",
"0.68268424",
"0.6817783",
"0.68034405",
"0.6803225",
"0.67960614",
"0.6789264",
"0.6777127",
"0.67744327",
"0.67741525"
] |
0.8566178
|
0
|
Index each array in a tuple of arrays. If the arrays tuple contains a ``None``, the entire tuple will be returned as is.
|
def select(arrays, index):
if arrays is None or any(i is None for i in arrays):
return arrays
return tuple(i.ravel()[index] for i in arrays)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def aind(x):\n\treturn tuple(x.T)",
"def index(self, arr, idx, temp = True, name = None):\n \n temp = temp or name is not None\n \n arr_t = arr.type\n\n if isinstance(arr_t, ScalarT):\n # even though it's not correct externally, it's\n # often more convenient to treat indexing\n # into scalars as the identity function.\n # Just be sure to catch this as an error in\n # the user's code earlier in the pipeline.\n return arr\n if isinstance(arr_t, TupleT):\n if isinstance(idx, Const):\n idx = idx.value\n\n assert isinstance(idx, int), \\\n \"Index into tuple must be an integer, got %s\" % idx\n if isinstance(idx, Const):\n idx = idx.value\n proj = self.tuple_proj(arr, idx)\n if temp:\n return self.assign_temp(proj, \"tuple_elt%d\" % idx if name is None else name)\n else:\n return proj\n\n if self.is_tuple(idx):\n indices = self.tuple_elts(idx)\n elif hasattr(idx, '__iter__'):\n indices = tuple(map(wrap_if_constant,idx))\n else:\n indices = (wrap_if_constant(idx),)\n\n n_required = arr_t.rank\n n_indices = len(indices)\n if n_indices < n_required:\n # all unspecified dimensions are considered fully sliced\n extra = (syntax_helpers.slice_none,) * (n_required - n_indices)\n indices = indices + extra\n\n if len(indices) > 1:\n idx = self.tuple(indices, \"index_tuple\" if name is None else name)\n else:\n idx = indices[0]\n\n t = arr_t.index_type(idx.type)\n idx_expr = Index(arr, idx, type=t)\n if temp:\n return self.assign_temp(idx_expr, \"array_elt\" if name is None else name)\n else:\n return idx_expr",
"def __getitem__(\n self, idx: int\n ) -> Union[\n Tuple[ndarray, ndarray, ndarray, ndarray, int, int, bool, bool],\n Tuple[ndarray, ndarray, ndarray, ndarray, int, int, bool, bool, ndarray, List[int]],\n ]:\n if not self.use_audio:\n return (\n np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n np.array(self.all_input_mask[idx], dtype=np.float32),\n np.array(self.all_subtokens_mask[idx]),\n self.all_quantities_of_preceding_words[idx],\n self.all_query_ids[idx],\n self.all_is_first[idx],\n self.all_is_last[idx],\n )\n return (\n np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n np.array(self.all_input_mask[idx], dtype=np.float32),\n np.array(self.all_subtokens_mask[idx]),\n self.all_quantities_of_preceding_words[idx],\n self.all_query_ids[idx],\n self.all_is_first[idx],\n self.all_is_last[idx],\n np.array(self.all_audio_queries[idx], dtype=np.float),\n self.all_audio_lengths[idx],\n )",
"def index(l_: List[int], i: Tuple[int, ...]) -> Tuple[int, ...]:\n return tuple([l_[x] for x in i])",
"def __getitem__(self, index_tuple):\n assert len(index_tuple) == 2, \"Invalid number of array subscripts.\"\n row, col = index_tuple\n assert 0 <= row < self.num_rows() and 0 <= col < self.num_cols(), \\\n \"Array subscript out of range.\"\n array_1d = self.rows[row]\n return array_1d[col]",
"def _multi_index(indexes, shape):\n indexes = indexes if isinstance(indexes, typing.Sequence) else (indexes,)\n if any(isinstance(i, type(Ellipsis)) for i in indexes):\n raise IndexError('Ellipsis index currently is not supported.')\n # Fill the right-most elements.\n indexes = indexes + (slice(0, None, None),) * (len(shape) - len(indexes))\n # Convert to positive index.\n positive_indexes = []\n for i, index in enumerate(indexes):\n if isinstance(index, slice):\n index = slice(\n index.start or 0, index.stop or shape[i], index.step or 1\n )\n positive_indexes.append(\n slice(\n index.start + shape[i] if index.start < 0 else index.start,\n index.stop + shape[i] if index.stop < 0 else index.stop,\n # Negative step means index backward, no need to convert to\n # positive interger.\n index.step,\n )\n )\n elif isinstance(index, int):\n positive_indexes.append(index + shape[i] if index < 0 else index)\n else:\n raise TypeError(f'Not supported index type {index}.')\n return tuple(positive_indexes)",
"async def infer_shape_array_map(track, fn, *arrays):\n fn_t = await fn['shape']\n vrefs = [TransformedReference(track.engine, getelement, a)\n for a in arrays]\n elem_shp = await fn_t(*vrefs)\n assert elem_shp is NOSHAPE\n\n shapes = [await a['shape'] for a in arrays]\n shape0, *rest = shapes\n if any(len(s) != len(shape0) for s in rest):\n raise MyiaShapeError(\"Expect same shapes for array_map\")\n rshape = []\n for entries in zip(*shapes):\n entries = set(entries)\n entries.add(ANYTHING)\n if len(entries) == 1:\n rshape.append(ANYTHING)\n elif len(entries) == 2:\n entries.remove(ANYTHING)\n entry, = entries\n rshape.append(entry)\n else:\n raise MyiaShapeError(\"Expect same shapes for array_map\")\n return tuple(rshape)",
"def indices(self):\n return tuple([slice(*r) for r in self.location])",
"def enumerate(self) -> '_[Tuple[int, T]]':\n return _(enumerate(self.array))",
"def iflatten(self):\n return _((e for es in self.array for e in es))",
"def _atleastnd(array, n):\n return array[tuple((n - array.ndim) * [None] + [...])]",
"def array_map(fn, arrs, n):\n # we shouldn't need a special case for n == 0, but NumPy complains about indexing into a zero-dimensional\n # array a using a[(Ellipsis,)].\n if n == 0:\n return fn(*arrs)\n \n full_shape = tuple(np.array([a.shape[:n] for a in arrs]).max(0))\n result = None\n for full_idx in itertools.product(*map(range, full_shape)):\n inputs = [a[broadcast(full_idx, a.shape[:n]) + (Ellipsis,)] for a in arrs]\n curr = fn(*inputs)\n \n if result is None:\n if type(curr) == tuple:\n result = tuple(np.zeros(full_shape + np.asarray(c).shape) for c in curr)\n else:\n result = np.zeros(full_shape + np.asarray(curr).shape)\n\n if type(curr) == tuple:\n for i, c in enumerate(curr):\n result[i][full_idx + (Ellipsis,)] = c\n else:\n result[full_idx + (Ellipsis,)] = curr\n return result",
"def __getitem__(self, inds):\n i, j = inds\n return self.array[i][j]",
"def axes2indices(\n self,\n axes:'Union['\n 'Tuple[Union[ConvertableAxisClass, ellipsis], ...], '\n 'Mapping[NamedIndex, ConvertableAxisClass],'\n ']',\n )->'Any':\n\n if isinstance(axes, dict):\n indices = []\n for dim, axes_ in self.items():\n axis = axes.get(dim)\n if axis is None and dim in axes:\n warnings.warn(f'it does not make sense using None(at dim {dim!r}) '\n 'in a named index, it whould be translated into '\n 'slice(None)(i.e. :)')\n index = slice(None) if axis is None else axis2index(axes_, axis)\n indices.append(index)\n return tuple(indices)\n\n axes = axes if isinstance(axes, tuple) else (axes, )\n idx_elps = naxis = len(axes)\n for idx_axis, axis in enumerate(axes):\n if isinstance(axis, type(Ellipsis)):\n assert idx_elps == naxis, 'more than one ellipsis is not allowed'\n\n idx_elps = idx_axis\n\n indices = []\n idx_axis = idx_dim = 0\n while idx_axis < idx_elps:\n axis = axes[idx_axis]\n index = None if axis is None else axis2index(self._dim_axes[idx_dim], axis)\n indices.append(index)\n idx_axis += 1\n idx_dim += index is not None\n\n if idx_elps < naxis:\n indices.append(axes[idx_elps])\n remainder = idx_elps + 1 - naxis\n indices_ = []\n idx_axis = idx_dim = -1\n while idx_axis >= remainder:\n axis = axes[idx_axis]\n index = None if axis is None else axis2index(self._dim_axes[idx_dim], axis)\n indices_.append(index)\n idx_axis -= 1\n idx_dim -= index is not None\n indices_.reverse()\n indices.extend(indices_)\n\n return tuple(indices)",
"def _extract_array(tiffs: list[np.ndarray], idx: int, shape: tuple[int, ...], dtype: type | np.dtype) -> np.ndarray:\n feature_arrays = (np.atleast_3d(img)[..., idx] for img in tiffs)\n return np.asarray(list(feature_arrays), dtype=dtype).reshape(*shape, 1)",
"def array_form(self):\n return tuple(self)",
"def arr_to_tup(a):\n return tuple(a.reshape(1, -1)[0])",
"def __getitem__(self, index):\n self.__init_from_composite()\n res = []\n if type(index) == VTKCompositeDataArray:\n for a, idx in zip(self._Arrays, index.Arrays):\n if a is not NoneArray:\n res.append(a.__getitem__(idx))\n else:\n res.append(NoneArray)\n else:\n for a in self._Arrays:\n if a is not NoneArray:\n res.append(a.__getitem__(index))\n else:\n res.append(NoneArray)\n return VTKCompositeDataArray(res, dataset=self.DataSet)",
"def totuple(self, arr):\n\n\t\ttry:\n\t\t return tuple(self.totuple(i) for i in arr)\n\t\texcept TypeError:\n\t\t return arr",
"def indexes(self, fields):\r\n\r\n indexes = [self.index(field) for field in fields]\r\n\r\n return tuple(indexes)",
"def broadcast_arrays(*args):\n from .dataarray import DataArray\n\n all_indexes = _get_all_indexes(args)\n for k, v in all_indexes.items():\n if not all(v[0].equals(vi) for vi in v[1:]):\n raise ValueError('cannot broadcast arrays: the %s index is not '\n 'aligned (use xray.align first)' % k)\n\n vars = broadcast_variables(*[a.variable for a in args])\n indexes = dict((k, all_indexes[k][0]) for k in vars[0].dims)\n\n arrays = []\n for a, v in zip(args, vars):\n arr = DataArray(v.values, indexes, v.dims, a.name, a.attrs, a.encoding)\n for k, v in a.coords.items():\n arr.coords[k] = v\n arrays.append(arr)\n\n return tuple(arrays)",
"def assert_schema(arrays, zero_indexed=False, bounded=False,\n same_attributes=False, same_dimension=False):\n\n ds0 = arrays[0].datashape\n if same_dimension:\n if not all(a.ndim == ds0.ndim for a in arrays):\n raise ValueError(\"Input arrays must all have same dimension\")\n\n for a in arrays:\n ds = a.datashape\n if zero_indexed and not all(dl == 0 for dl in ds.dim_low):\n raise ValueError(\"Input arrays must start at 0 \"\n \"along all dimensions\")\n if bounded and not all(dh is not None for dh in ds.dim_high):\n raise ValueError(\"Input arrays must be bound along \"\n \"all dimensions\")\n if same_attributes and ds.sdbtype.full_rep != ds0.sdbtype.full_rep:\n raise ValueError(\"Input arrays must have the same attributes\")\n\n return tuple(arrays)",
"def test_integer_split_2D_default(self):\n a = array([arange(10),arange(10)])\n res = array_split(a,3)\n desired = [array([arange(10)]),array([arange(10)]),array([])]\n compare_results(res,desired)",
"def _index(tensor_3d, tensor_2d):\n x, y, z = tensor_3d.size()\n t = tensor_3d.reshape(x * y, z)\n tt = tensor_2d.reshape(x * y)\n v = t[torch.arange(x * y), tt]\n v = v.reshape(x, y)\n return v",
"def enumerate(x) -> List[Tuple[int, any]]:\n pass",
"def broadcast_shapes(*args):\n x = list(np.atleast_1d(args[0])) if args else ()\n for arg in args[1:]:\n y = list(np.atleast_1d(arg))\n if len(x) < len(y):\n x, y = y, x\n x[-len(y):] = [j if i == 1 else i if j == 1 else i if i == j else 0\n for i, j in zip(x[-len(y):], y)]\n if not all(x):\n return None\n return tuple(x)",
"def _get_slice(index, axis, num_axes):\n idx = [slice(None)] * num_axes\n idx[axis] = index\n return tuple(idx)",
"def get_aqi_and_combine(array):\n if array.ndim not in (1, 2):\n raise TypeError('array必须是一维或二维数据...')\n if array.ndim == 1:\n return np.append((ceil(_get_one_record_aqi(array)), array))\n x, y = array.shape\n y += 1\n aqis = np.array([])\n for vec in array:\n aqis = np.append(aqis, _get_one_record_aqi(vec))\n aqis = aqis.reshape((len(aqis), 1))\n aqis = np.ceil(aqis)\n return np.column_stack((aqis, array))",
"def __getitem__(self, index):\n x = self._input_data[index]\n if self._output_data is None:\n return x, x\n else:\n y = self._output_data[index]\n return x, y",
"def pick_image(images, idx):\r\n if type(images) == list:\r\n return [pick_image(r, idx) for r in images]\r\n if idx is None:\r\n return images[:, 0]\r\n elif type(idx) == int:\r\n return images[:, idx]\r\n \r\n idx = idx.astype('long').numpy()\r\n images = L.stack([images[i][int(idx[i])] for i in range(images.shape[0])])\r\n return images"
] |
[
"0.5842294",
"0.55760646",
"0.55092984",
"0.5497843",
"0.54363334",
"0.52967614",
"0.5283771",
"0.5263889",
"0.5225666",
"0.5220214",
"0.5194468",
"0.51706874",
"0.51583457",
"0.5133251",
"0.51113605",
"0.5105309",
"0.5093947",
"0.50427014",
"0.50376666",
"0.5035451",
"0.4927082",
"0.49227425",
"0.49112517",
"0.4903504",
"0.49033338",
"0.48857385",
"0.4872255",
"0.47853774",
"0.47833535",
"0.47801995"
] |
0.7136496
|
0
|
This fuction opens bot_admin file Then returns 1 if id is in the file (is admin) Returns 0 otherwise
|
def is_admin(id):
with open('bot_admin', 'r') as myfile:
if id in myfile.read():
return 1
return 0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def admin():\n commands = os.listdir(adminDir)\n if len(commands) > 0:\n return True\n else:\n return False",
"def check_if_admin(bot, update, *args, **kwargs):\n user_id = update._effective_user\n # print(\"cerco user con id \" + str(user_id) + \", nel database\")\n user = DB.execute(TABELLE[\"id_users\"][\"select\"][\"from_id\"], (user_id['id'],))\n # print(\"ho trovato : \" + str(user))\n if not user:\n self.request_access(bot, user_id)\n return\n elif user[\"banned\"]:\n update.message.reply_text(\"Spiacente sei stato bannato dal bot\")\n return\n elif user[\"loot_admin\"] or user[\"admin\"]:\n sig = signature(func)\n if len(sig.parameters) > 1:\n return func(bot, update, *args, **kwargs)\n else:\n return func(*args, **kwargs)\n else:\n update.message.reply_text(\"Non sei abilitato ad usare questo comando\")\n return",
"def check_if_admin(bot, update, *args, **kwargs):\n user_id = update._effective_user\n # print(\"cerco user con id \" + str(user_id) + \", nel database\")\n user = DB.execute(TABELLE[\"id_users\"][\"select\"][\"from_id\"], (user_id['id'],))\n # print(\"ho trovato : \" + str(user))\n if not user:\n self.request_access(bot, user_id)\n return\n elif user[\"banned\"]:\n update.message.reply_text(\"Spiacente sei stato bannato dal bot\")\n return\n elif user[\"admin\"]:\n sig = signature(func)\n if len(sig.parameters) > 1:\n return func(bot, update, *args, **kwargs)\n else:\n return func(*args, **kwargs)\n else:\n update.message.reply_text(\"Non sei abilitato ad usare questo comando\")\n return",
"def check_if_admin(bot, update, *args, **kwargs):\n user_id = update._effective_user\n # print(\"cerco user con id \" + str(user_id) + \", nel database\")\n user = DB.execute(TABELLE[\"id_users\"][\"select\"][\"from_id\"], (user_id['id'],))\n # print(\"ho trovato : \" + str(user))\n if not user:\n self.request_access(bot, user_id)\n return\n elif user[\"banned\"]:\n update.message.reply_text(\"Spiacente sei stato bannato dal bot\")\n return\n elif user[\"tester\"]:\n sig = signature(func)\n if len(sig.parameters) > 1:\n return func(bot, update, *args, **kwargs)\n else:\n return func(*args, **kwargs)\n else:\n update.message.reply_text(\"Non sei abilitato ad usare questo comando\")\n return",
"def is_loot_admin(self, id):\n users=self.execute(TABELLE['id_users']['select']['from_id'],(id,))\n if not users: return False\n if not isinstance(users, list): users=[users]\n\n for elem in users:\n if elem['id']==id:\n if elem['admin'] or elem['loot_admin']: return True\n else: break\n\n return False",
"def check_admin(self, user: TelegramController.User = None, id: str = None):\n\n if id == None:\n id = user.id\n\n return md5((str(id) + \"admin\").encode()).hexdigest() in self.__admins",
"async def is_admin(ctx):\n member = ctx.message.author\n aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)\n if aRole in member.roles or member.id == 715048392408956950: return True",
"def test_file(self):\n a = False\n if \"show()\" in open('attempt.py').read():\n a = True\n self.assertEquals(a,True)",
"def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())",
"def perm_adm(user_id):\n\tif user_id == config.adm_id_1 or user_id == config.adm_id_2:\n\t\treturn 1\n\telse \n\t\treturn 0",
"def admin(ctx):\n return ctx.message.author.permissions_in(ctx.channel).administrator",
"def scan_admin_url():\r\n target_admin_url=provided_url+\"/administrator/index.php\"\r\n if verbose_flag: print \"\\t[.] Trying to access admin login page...\", #+ target_admin_url\r\n try:\r\n response = urllib2.urlopen(target_admin_url)\r\n except HTTPError, e:\r\n admin_flag=0\r\n #print \"admin flag=\"+str(admin_flag)\r\n if verbose_flag: print \"Failed\"\r\n return admin_flag\r\n else:\r\n admin_flag=1\r\n #print \"admin flag=\"+str(admin_flag)\r\n if verbose_flag: print \"Success\"\r\n return admin_flag",
"async def id(ctx, user: discord.Member = None):\n user = user or ctx.message.author\n with open('users.json') as f:\n data = json.load(f)\n\n if data.get(user.id) is not None:\n await bot.say('`User id is {}`'.format(user.id))\n else:\n await bot.say(f'I can not seem to grab your id')",
"def method_isloggedid(self, chat_id):\n with open('./package_login/logged.json') as f:\n data = json.load(f)\n\n find_it = False\n find_user = \"\"\n id_c = sha256(str(chat_id).rstrip().encode()).hexdigest()\n for x in data:\n if x['chat_id'] == id_c:\n find_it = True\n find_user = x\n break\n\n if find_it:\n if find_user['password'] != self.password:\n return 1\n else:\n return 0\n else:\n return 2",
"def can_manage(self, filename):\n return False",
"def has_admin(cipher):\n counter = Crypto.gen_aes_stream_counter_mt19973(3453243);\n text = Crypto.decrypt_aes(cipher, key, AES.MODE_CTR, counter=counter)\n return text.find(';admin=true;') != -1",
"async def check_can_edit_user(\n authorization_client: AuthorizationClient, req_user_id: str, user_id: str\n):\n admin_tuple, req_admin_tuple = await asyncio.gather(\n authorization_client.get_administrator(user_id),\n authorization_client.get_administrator(req_user_id),\n )\n\n if admin_tuple[1] is None:\n return True\n\n if req_admin_tuple[1] == AdministratorRole.FULL:\n return True\n\n return False",
"def check_admin(self, update, context):\n\n user = self.User(update)\n output = self.data_base.check_admin(user)\n user.send_message(output)\n self.data_base.log(user, update.message.text, str(output))",
"def __reloadAdmins(self, admin_id):\n for admin_username in admin_main.getLoader().getAllUsernames():\n try:\n admin_obj=admin_main.getLoader().getAdminByName(admin_username)\n if admin_obj.creator_id == admin_id:\n admin_main.getLoader().loadAdmin(admin_obj.getAdminID())\n else:\n for lock_obj in admin_obj.getLocks():\n if lock_obj.getLockerID()==admin_id:\n admin_main.getLoader().loadAdmin(admin_obj.getAdminID())\n break\n except:\n logException(LOG_DEBUG)",
"def _getAdminChannelIndex(self):\n c = self.getChannelByName(\"admin\")\n if c:\n return c.index\n else:\n return 0",
"def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1",
"def check_file(self, path, approve_if_no_dbhash=False):\r\n if self.mod.filehash:\r\n h = create_filehash(path)\r\n return h == self.mod.filehash\r\n return approve_if_no_dbhash",
"def check_id_existence(self, id:str):\n\n oc_prefix = id[:(id.index(':')+1)]\n\n if oc_prefix == 'doi:':\n vldt = doi.DOIManager() # you can use removeprefix(oc_prefix) from Python 3.9+\n return vldt.exists(id.replace(oc_prefix, '', 1)) # todo: use id.replace(oc_prefix, '', 1) for Python < v.3.9\n if oc_prefix == 'isbn:':\n vldt = isbn.ISBNManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'issn:':\n vldt = issn.ISSNManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'orcid:':\n vldt = orcid.ORCIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'pmcid:':\n vldt = pmcid.PMCIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'pmid:':\n vldt = pmid.PMIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'ror:':\n vldt = ror.RORManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'url:':\n vldt = url.URLManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'viaf:':\n vldt = viaf.ViafManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'wikidata:':\n vldt = wikidata.WikidataManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'wikipedia:':\n vldt = wikipedia.WikipediaManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))",
"def read(self):\n found = False\n if os.path.exists(self.user_file):\n if os.path.getsize(self.user_file) > 0:\n f = open(self.user_file, \"rb\")\n data = f.read()\n self.screen_name, self.access_key, self.access_secret = data.split() # split the line by space token\n f.close()\n found = True\n return found",
"def get_file_id(file_name, model, workspace, header, user):\n uri = (\"https://api.anaplan.com/1/3/workspaces/{}/models/{}/\"\n \"files/\").format(workspace, model)\n response = requests.get(uri, headers = header)\n response_json = json.loads(response.text.encode(\"utf-8\"))\n for file in response_json:\n if file[u\"name\"] == unicode(file_name):\n return file[u\"id\"]",
"def identify_file(self, file):",
"def __check_in_autonotes_dir():\n if not os.path.isfile('master.tex'):\n cli.log.error(f'I can\\'t find a {emph(\"master.tex\")} file, '\n 'are you inside an autonotes directory?')\n exit(3)",
"def check_if_admin(connection,username):\r\n with connection:\r\n c = connection.execute(SELECT_USER_BY_ADMIN_PREVILAGES,(username,))\r\n return c.fetchone()",
"def check_is_admin(cookie):\n return ';admin=true;' in cookie",
"def user_is_admin(user):\n return user in admins"
] |
[
"0.5988642",
"0.5855251",
"0.58323574",
"0.573519",
"0.57336074",
"0.563821",
"0.552184",
"0.5445745",
"0.54403436",
"0.5414914",
"0.5295082",
"0.51754415",
"0.5171889",
"0.5087034",
"0.50759846",
"0.5023059",
"0.50092083",
"0.49878058",
"0.4981291",
"0.49573648",
"0.49339414",
"0.49117798",
"0.48761016",
"0.48751995",
"0.4831579",
"0.4830074",
"0.4810748",
"0.47919473",
"0.4790991",
"0.4785949"
] |
0.8447236
|
0
|
This function Opens the Tracked users file And checks if the given user is in the file returns 1 on success returns 0 if user not found
|
def check_user_del(user):
with open('tracked_users', 'r') as myfile:
userfile = myfile.read()
if user.lower() in userfile.lower():
return 1
return 0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def user_find(file):\n username1 = input(\"Enter your username: \")\n username = username1.lower()\n for row in file:\n if row[0] == username:\n print(\"\\n username found \" + username +\"\\n\")\n user_found = [row[0],row[1]]\n pass_check(user_found)\n gen.create_username_file(username)\n return True\n else:\n continue",
"def read(self):\n found = False\n if os.path.exists(self.user_file):\n if os.path.getsize(self.user_file) > 0:\n f = open(self.user_file, \"rb\")\n data = f.read()\n self.screen_name, self.access_key, self.access_secret = data.split() # split the line by space token\n f.close()\n found = True\n return found",
"def is_registered(username):\n with open(PASSFILE, \"r\") as passfile:\n for record in passfile:\n try:\n r_username, r_salt_hash = record.split()\n # The below is just for the linter\n r_salt_hash = r_salt_hash + \"nothing\"\n if username == r_username:\n return True\n # this is to handle the initial blank file\n except ValueError:\n pass\n return False",
"def del_user(user):\n\ttry:\n\t\tmyfile = open('tracked_users', 'r')\n\t\tlines = myfile.readlines()\n\t\tmyfile.close()\n\t\tmyfile = open('tracked_users', 'w')\n\t\tfor line in lines:\n\t\t\tif line.lower() != user.lower()+'\\n':\n\t\t\t\tmyfile.write(line.lower())\n\t\tmyfile.close()\n\t\tos.remove('data/'+user.lower())\n\t\treturn 1\n\texcept Exception as e:\n\t\tfd = open('tracked_users', 'r')\n\t\tprint(fd.read())\n\t\tfd.close()\n\t\tprint(e)\n\t\treturn -1",
"def is_registered(user_id: str) -> bool:\n inventories = get_file(\"inventories\")\n return str(user_id) in inventories",
"def check_registration_details(username, password):\n contents = read_file()\n while ((username + '\\n') in contents):\n print(\"Sorry! This username is taken..\")\n username = get_username()\n password = get_password()\n \n add_details(username, password)",
"def user_exist(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return True\n return False",
"def chk_info(user_data):\n print(\"Info...\\n\\n\")\n print(str(user_data) + \"\\n\\n\")\n\n folder_name = user_data.folder_name\n print(build_status(os.path.isdir(folder_name),\n \"Localizando el directorio del examen\"))\n\n print(build_status(os.path.isfile(DataFile),\n \"Localizando info del usuario\"))\n\n print(build_status(os.path.isfile(os.path.join(folder_name, InfoFile)),\n \"Localizando el archivo de datos del usuario\"))\n\n print(build_status(os.path.isfile(os.path.join(folder_name, MarksFile)),\n \"Localizando el archivo de notas\"))",
"def ask_server_if_user_exists(self, sn):\n\n ### <------ Called from show_prompts\n print(\"//asking server to look up user...\")\n\n ### -------> Outbound to Server\n response = ServerOperations().is_user_here(sn)\n\n if response == True:\n print(f\"-=- Waiting for {sn} to accept file. Press A to abort.\")\n return True\n\n else:\n print(f\"{sn} not found. Try again.\")\n return False",
"def lookfor_user(used_name, used_password):\n user_exits = UserData.user_login(used_name, used_password)\n\n return user_exits",
"def _checkDB(self, userID, key):\r\n # TODO: Why not return True directly instead all lines will be read\r\n # TODO: Should this be deferred to a separate thread due to flock,\r\n # which is a blocking call?\r\n found = False\r\n with open(self._dbFile, 'r') as bridgefile:\r\n fcntl.flock(bridgefile.fileno(), fcntl.LOCK_EX)\r\n lines = bridgefile.readlines()\r\n for line in lines:\r\n g = line.split(':')\r\n if g[0] == userID and str(g[1].rstrip()) == str(key):\r\n found = True\r\n return found",
"def login(username, password):\n try:\n user_file = open('/etc/users.txt')\n user_buf = user_file.read()\n users = [line.split(\"|\") for line in user_buf.split(\"\\n\")]\n return [username, password] in users\n except IOError:\n print(\"I can't authentication you.\")\n return False",
"def _user_in_subid(self, sub_file, wanted_user):\n subid_list = []\n if self.passwd_file:\n (user, dum1, dum2, dum3, dum4, dum5) = \\\n self._get_user_from_file(wanted_user)\n else:\n (user, dum1, dum2, dum3, dum4, dum5) = \\\n self._get_user_from_host(wanted_user)\n try:\n insub = open(sub_file)\n except (IOError, OSError):\n return []\n else:\n for line in insub:\n try:\n (subuser, subid, count) = line.strip().split(':')\n except ValueError:\n continue\n if subuser == user:\n subid_list.extend([(subid, count), ])\n insub.close()\n return subid_list",
"def exists(cls, name: Name, lang: Lang, mode: Mode, time: Time) -> bool:\n users_files = os.listdir('users')\n filename = \"_\".join([name, lang, mode, time]) + '.json'\n\n return filename in users_files",
"def _checkUserExists(username,self):\r\n \r\n exists = False\r\n \r\n if _findUser(username) is not None:\r\n exists = True\r\n \r\n return exists",
"def UserExist(self, username):\n return self.com.CheckUserexists(username)",
"def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")",
"def test_contains_user(self):\n print('(' + self.test_contains_user.__name__+')',\n self.test_contains_user.__doc__)\n self.assertTrue(self.connection.contains_user(PATIENT_USERNAME))\n self.assertTrue(self.connection.contains_user(DOCTOR_USERNAME))",
"def __contains__(self, username):\n if self.db == None:\n raise AssertionError(\"DB not open\")\n\n self.lock.acquire()\n try:\n return username in self.db\n finally:\n self.lock.release()",
"def FindFile(self, fd):\n hashes = self._HashFile(fd)\n if not hashes:\n return False\n\n hash_urn = self.PATH.Add(str(hashes.sha1))\n\n for data in aff4.FACTORY.Stat([hash_urn], token=self.token):\n return data[\"urn\"]\n\n return False",
"def check_lock(username):\n try:\n users = file_manager.read_from_file('users_data.json')\n user = users[username]\n except KeyError:\n return 2\n if user[\"status\"]:\n return 1\n\n else:\n lock_time = datetime.strptime(user[\"lock_time\"], \"%Y-%m-%d %H:%M:%S\")\n\n if lock_time + timedelta(seconds=60 * 2) < datetime.now():\n user[\"status\"] = True\n file_manager.write_to_file('users_data.json', user, username)\n return 1\n return 0",
"def check_username(search_username):\n for find_username in USERS_LIST:\n if find_username[\"username\"] == search_username:\n return True\n return False",
"def userCheck(name):\r\n \r\n from logger.gamelogger import logger\r\n \r\n sql = \"\"\"SELECT count(*) FROM players where name = '{0}' COLLATE NOCASE;\"\"\".format(name)\r\n \r\n try:\r\n conn = sqlite3.connect(os.path.join(\"data\", \"players.db\"))\r\n cursor = conn.cursor()\r\n cursor.execute(sql)\r\n \r\n results = cursor.fetchall()\r\n\r\n except sqlite3.Error, e:\r\n logger.log.critical(\"Error using utils.gameutils.userCheck(): {0}\".format(e.args[0]))\r\n return False\r\n \r\n for row in results:\r\n if row[0] is 1:\r\n return True\r\n elif row[0] > 1:\r\n logger.log.warn(\"Duplicate username exists in player database: {0}\".format(name))\r\n \r\n return False",
"def Load(self, filename):\n logging.info(\"Reading users file at %s\", filename)\n try:\n try:\n contents = utils.ReadFile(filename)\n except EnvironmentError as err:\n self._users = None\n if err.errno == errno.ENOENT:\n logging.warning(\"No users file at %s\", filename)\n else:\n logging.warning(\"Error while reading %s: %s\", filename, err)\n return False\n\n users = http.auth.ParsePasswordFile(contents)\n\n except Exception as err: # pylint: disable=W0703\n # We don't care about the type of exception\n logging.error(\"Error while parsing %s: %s\", filename, err)\n return False\n\n self._users = users\n\n return True",
"def is_user_present(self, username): # WORKS\n done = self.cur.execute(\"SELECT username FROM users WHERE username = \\\"{}\\\"\".format(username))\n if done == 1:\n return True\n else:\n return False",
"def open_user(self, username):\n if self.check_if_user_exists(username):\n self.click((\"xpath\", self.username_link_xpath.format(username)))\n self.wait_unit_el_present(self.user_edit_save_btn)\n else:\n Log.info(\"User \" + username + \" is not in the user list.\")",
"def has_user(self, username):\n\t\treturn username in self.users",
"def get_users():\n with open(user_path, \"r\") as infile:\n return json.load(infile)",
"def UserRecords(self, username):\n return not self.com.CheckUid(username) is None",
"def user_exists(self,unique_ID):\n\t\ttry:\n\t\t\tself.data[unique_ID]\n\t\texcept KeyError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True"
] |
[
"0.72001135",
"0.6392704",
"0.6380288",
"0.60906875",
"0.59361774",
"0.58851326",
"0.5845398",
"0.58390796",
"0.5828219",
"0.5755387",
"0.5733727",
"0.5731585",
"0.5705902",
"0.56972355",
"0.5690812",
"0.5672853",
"0.56608915",
"0.5633513",
"0.56234914",
"0.5618595",
"0.56107485",
"0.5608318",
"0.56029147",
"0.5586394",
"0.55574083",
"0.5514612",
"0.5512059",
"0.5506167",
"0.5500219",
"0.54915655"
] |
0.7034657
|
1
|
This function opens Tracked users file to delete a given user And deletes the data of the given user returns 1 on success return 1 and prints content of the file on fail for recovery purpose
|
def del_user(user):
try:
myfile = open('tracked_users', 'r')
lines = myfile.readlines()
myfile.close()
myfile = open('tracked_users', 'w')
for line in lines:
if line.lower() != user.lower()+'\n':
myfile.write(line.lower())
myfile.close()
os.remove('data/'+user.lower())
return 1
except Exception as e:
fd = open('tracked_users', 'r')
print(fd.read())
fd.close()
print(e)
return -1
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_users(self, filename):\n f_id = self.face.FACES.files.find_one({ \"filename\" : filename }, { \"_id\" : 1 })\n self.face_fs.delete(f_id['_id'])",
"def check_user_del(user):\n\twith open('tracked_users', 'r') as myfile:\n\t\tuserfile = myfile.read()\n\t\tif user.lower() in userfile.lower():\n\t\t\treturn 1\n\treturn 0",
"async def delete_file(location_id: LocationID, file_id: StorageFileID, user_id: UserID):",
"def delete_user():",
"def removeUser(self, username):\r\n try:\r\n self.getUser(username)\r\n for line in fileinput.input(self.filename, inplace=1):\r\n if self.scanner.match(line).groups()[0] != username:\r\n print(line[:-1])\r\n except KeyError:\r\n raise CredentialError('No such user')",
"def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)",
"def delete_user():\n #TODO user delete\n pass",
"def delete(self, host, file):",
"def DelteUser(database):\n firstname=str(input(\"what is the name of the user you want to delete : \"))\n delusr,find =getByName(database,firstname)\n if not find:\n return\n del database[delusr.key]\n for key,usr in database.items():\n if delusr.key in usr.folow:\n usr.folow.remove(delusr.key)\n if delusr.key in usr.folowed:\n usr.folowed.remove(delusr.key)\n \n os.remove(f\"Users/{delusr.key}\")",
"def do_deluser(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\ttry:\n\t\t\t\tself.cl.del_contact(line)\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"Wrong syntax! Type 'help delete'\")\n\t\telse:\n\t\t\tprint(\"To delete contacts you need to open or create a book.\")",
"def DeleteUser(self, delusercount, deluser):\n for i in range(delusercount):\n login = string.replace(deluser[i]['Login'], ' ', '')\n action = 'userman -D ' + login\n output = commands.getstatusoutput(action)\n print output\n updatecount, update = self.__sqlData[\"UPDATE AccUser SET ToDo = 0 WHERE Login = '%s'\" % (login)]",
"def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass",
"def delete_user(username, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file)\n command = \"DELETE FROM {table} WHERE username =?\".format(table=safe(table))\n c.execute(command, (username,))\n conn.commit()\n conn.close()\n except Exception as e:\n print(\"Error when trying to delete user \" + username + \\\n \" from table \" + table + \" in file \" + db_file)\n print(e)\n return False\n else:\n return True",
"def delete_file(filename):\n\tprint client.file_delete(filename)",
"def delete_user(id):\n pass",
"def delete_users(user_id):\n my_users = storage.get(\"User\", user_id)\n if my_users:\n storage.delete(my_users)\n storage.save()\n storage.close()\n return jsonify({}), 200\n else:\n abort(404)",
"def test_080_user_delete(self):\n\n testflow.step(RMV_USR_MSG, TEST_GROUP_DELETE)\n assert USER_CLI.run('delete', TEST_USER_DELETE)[0]",
"def delete_user():\r\n raise NotImplementedError()",
"def delete_user(self):\n raise NotImplementedError(\"Function not yet implemented contact package creator\")",
"def delete_user(self, username):\n Log.info(\"Start to delete user.\")\n if self.check_if_user_exists(username):\n self.click((\"xpath\", self.user_checkbox_xpath.format(username)))\n self.click(self.user_del_btn)\n self.click(self.dialog_del_btn)\n self.wait_unit_el_present(self.user_table)\n Log.info(\"Use is deleted.\")\n else:\n Log.info(\"User \" + username + \" is not in the user list, not delete.\")",
"def delete_data_file(path):\n cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n os.remove(path)\n except FileNotFoundError:\n pass",
"def delete(self, filename):\n pass",
"def delete_user(self, userId):\n\n try:\n query = \"delete from user where userId = {}\".format(userId)\n print(query)\n cur = self.con.cursor()\n cur.execute(query)\n self.con.commit()\n\n logger.info(\"Deleted\")\n except Exception as e:\n logger.error(\"Error occured at data deletion..\", e)",
"def del_user(UserName):\r\n\r\n try:\r\n conn = sql.connect('database.db')\r\n cur = conn.cursor()\r\n delete_query=f\"Delete from users1 where UserName='{UserName}'\"\r\n update = cur.execute(delete_query)\r\n conn.commit()\r\n msg=\"Deleted UserName from database based on UserName.\"\r\n except:\r\n msg=\"UserName deletion problem exists\"\r\n finally:\r\n conn.close()\r\n return msg",
"def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200",
"def del_user(self, username):\n pass",
"def clean_up(user, fname, tango_output):\n time.sleep(1)\n run(['rm', fname])\n time.sleep(1)\n path = tango_output + user + '.out'\n run(['rm', path])",
"def delete_file(client, file_id):\n\n try:\n client.file(file_id=file_id).delete()\n print(f\"File with {file_id} has been deleted\")\n return True\n\n except Exception as e:\n print(f\"an error has occurred: {e}\")\n return False",
"def test_users_username_delete(self):\n pass",
"async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")"
] |
[
"0.72505856",
"0.6862722",
"0.6805278",
"0.6782408",
"0.6610334",
"0.6485585",
"0.6411243",
"0.63377917",
"0.6297156",
"0.6207489",
"0.6204286",
"0.61876947",
"0.6148491",
"0.61326313",
"0.6066154",
"0.6051134",
"0.60115385",
"0.5980973",
"0.59755516",
"0.59708357",
"0.5968604",
"0.5952245",
"0.5942745",
"0.5927305",
"0.59056",
"0.59014577",
"0.5891784",
"0.58862084",
"0.58673036",
"0.58501047"
] |
0.8196673
|
0
|
This function process an image and returns the most present pixel in it as an hex int if fail, it returs default color
|
def av_color(file):
try:
image = Image.open(file)
w, h = image.size
pixels = image.getcolors(w * h)
most_frequent_pixel = pixels[0]
for count, colour in pixels:
if count > most_frequent_pixel[0]:
most_frequent_pixel = (count, colour)
dbg = int('0x%02x%02x%02x' % most_frequent_pixel[1], 16)
print(dbg)
return dbg
except Exception as e:
print('[!Error!] in AV COLOR')
print(e)
return 0xB46BCF
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_reddest_pixel(img):\n # HINTS/ADVICE-------------\n # Use a nested for loop here.\n #\n # BE CAREFUL DOING ARITHMETIC WITH UNSIGNED INTEGERS: \n # >>> a = np.array([2], dtype='uint8')\n # >>> b = np.array([3], dtype='uint8')\n # >>> a - b\n # array([255], dtype=uint8)\n #\n # Reminder:\n # numpy arrays have a \"shape\" attribute that stores the layout:\n # img.shape[0] - rows\n # img.shape[1] - columns\n # img.shape[2] - color channels\n\n max_redness = 0\n max_x = 0\n max_y = 0\n \n img = np.array(img, dtype = 'int32')\n for r in range(img.shape[0]):\n for c in range(img.shape[1]):\n red = img[r, c, 2]\n green = img[r, c, 1]\n blue = img[r, c, 0] \n redness = (red - green) + (red - blue)\n\n if redness > max_redness:\n max_redness = redness\n max_x = c\n max_y = r\n \n return (max_x, max_y)",
"def get_color_max(image, color):\n boundaries = find_color_boundaries(image, color)\n if boundaries:\n return (0, image[boundaries[0] : boundaries[1] + 1, boundaries[2] : boundaries[3] + 1])\n else:\n return 1, None",
"def detect_colour(field):\n # create list of BGR tuples and count them\n pixels = Counter(map(tuple, np.reshape(field, (-1, 3)).tolist()))\n # filter out the colours which just have a few occurrences\n pixels = dict(filter(lambda pixel: pixel[1] > 100, dict(pixels).items()))\n # and merge the nearby colours\n pixels = merge_colours(pixels)\n\n # the background color should be the one with the most pixels present\n return Counter(pixels).most_common(1)[0][0]",
"def main_color_background(img):\r\n\r\n dico = {}; max_value = 0; color = []\r\n\r\n #Convert picture to array\r\n im = Image.fromarray(img)\r\n #data from array recup value pixels \r\n for value in im.getdata():\r\n if value in dico.keys():\r\n dico[value] += 1\r\n else:\r\n dico[value] = 1\r\n\r\n #recup main pixel presence\r\n #except for green pixels (use for our contours)\r\n for key, value in dico.items():\r\n if value > max_value and key != (0, 255, 0):\r\n max_value = value; color = key;\r\n\r\n return color",
"def find_reddest_pixel_fast(img): \n img = np.array(img, dtype = 'int32')\n location = cv2.minMaxLoc((img[:, :, 2] - img[:, :, 1]) + (img[:, :, 2] - img[:, :, 0]))[3]\n return location",
"def get_initial_color():\n if os.path.isfile(DATA_FILE):\n with open(DATA_FILE, 'r') as f:\n data = f.readline()\n print data\n return int(data, base=16)\n else:\n return INITIAL_COLOR",
"def check_image_color(image):\n\n def check_color(i, j, k):\n \"\"\" Function used only for DEBUGGING\"\"\"\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()\n\n if not os.path.isfile(image):\n return \"Image not found\"\n\n def calculate_bgr(data):\n average_color_per_row = numpy.average(data, axis=0)\n average_color = numpy.average(average_color_per_row, axis=0)\n return tuple(average_color)\n\n def calculate_y(r, g, b):\n alpha = 0.299\n betta = 0.587\n gamma = 0.114\n return alpha * r + betta * g + gamma * b\n\n # split the image for four squares calucate averate pixel for them and take higest value\n # blure image and save to /Library/Caches as com.apple.desktop.admin.png\n # in case using blur tool --> blur = cv2.blur(img,(5,5))\n try:\n img_cv_data = cv2.imread(image)\n B, G, R = calculate_bgr(img_cv_data)\n Y = calculate_y(B, G, R)\n height, width = img_cv_data.shape[:2]\n except Exception as err:\n print(f\"[ERROR] {err} with image: {image}\")\n return \"Error parsing image\"\n\n # image detection\n if Y < 72.0:\n _type = \"dark\"\n elif Y >= 73.0 and Y <= 108.0:\n _type = \"evening\"\n else:\n _type = \"light\"\n\n return _type",
"def get_color(im_obj):\n #im = Image.open(path, 'r')\n x, y = im_obj.size\n\n r, g, b = 0, 0, 0\n for i in xrange(x):\n for j in xrange(y):\n color_px = im_obj.getpixel((i, j))\n #print color_px\n r += color_px[0]\n g += color_px[1]\n b += color_px[2]\n\n r = r / (x * y)\n g = g / (x * y)\n b = b / (x * y)\n return (r, g, b)",
"def count_from_top(img):\n pixel_count = 0\n for row in img:\n unique_pixel_vals = np.unique(row)\n if 255 not in unique_pixel_vals: # ignore shading (values between 0-255)\n pixel_count += 1\n else:\n return pixel_count",
"def get_dominant_color(image):\n image = image.convert('RGBA')\n # Shrink the image, so we don't spend too long analysing color\n # frequencies. We're not interpolating so should be quick.\n image.thumbnail((200, 200))\n max_score = 0.0\n dominant_color = None\n\n for count, (r, g, b, a) in image.getcolors(image.size[0] * image.size[1]):\n # Skip 100% transparent pixels\n if a == 0:\n continue\n # Get color saturation, 0-1\n saturation = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)[1]\n # Calculate luminance - integer YUV conversion from\n # http://en.wikipedia.org/wiki/YUV\n y = min(abs(r * 2104 + g * 4130 + b * 802 + 4096 + 131072) >> 13, 235)\n # Rescale luminance from 16-235 to 0-1\n y = (y - 16.0) / (235 - 16)\n # Ignore the brightest colors\n if y > 0.9:\n continue\n # Calculate the score, preferring highly saturated colors.\n # Add 0.1 to the saturation so we don't completely ignore grayscale\n # colors by multiplying the count by zero, but still give them a low\n # weight.\n score = (saturation + 0.1) * count\n if score > max_score:\n max_score = score\n dominant_color = [b, g, r]\n\n return dominant_color",
"def get_primary_color(source: str) -> list:\r\n img = Image.fromarray(source.copy()).convert(\"RGB\")\r\n img.resize((1, 1), resample=0)\r\n primary_color = img.getpixel((0, 0))\r\n return primary_color",
"def retrieveColor(image):\n w, h, dim = image.shape\n ret = np.zeros((w, h, dim), dtype=np.uint8)\n for i in range(w):\n for j in range(h):\n ret[i][j] = fakingColors(image[i][j])\n return np.clip(ret, 0, 255)",
"def get_primary_color(source_img, palette_size, show_img=False):\n\n # Scale down image to conserve resources\n img = source_img.copy()\n img.thumbnail((100, 100))\n\n # Reduce color palette (using k-means)\n img_reduced = img.convert('P', palette=Image.ADAPTIVE, colors=palette_size)\n if show_img:\n img_reduced.show()\n\n # Get list of colors in image\n palette = img_reduced.getpalette()\n\n # Find most common color\n color_counts = sorted(img_reduced.getcolors(), reverse=True)\n primary_index = color_counts[0][1]\n primary_color = palette[primary_index*3:primary_index*3+3]\n\n return primary_color",
"def color_detection(self, img):\n\n # red\n low_red = np.array([0, 0, 160])\n high_red = np.array([130, 130, 255])\n red_threshold = cv2.inRange(img, low_red, high_red)\n\n # green\n low_green = np.array([0, 120, 0])\n high_green = np.array([90, 255, 90])\n green_threshold = cv2.inRange(img, low_green, high_green)\n\n # yellow\n low_yellow = np.array([0, 140, 140])\n high_yellow = np.array([150, 255, 255])\n yellow_threshold = cv2.inRange(img, low_yellow, high_yellow)\n\n count = np.sum(np.nonzero(red_threshold))\n if count == 0:\n print(\"Not red\")\n else:\n print(\"red\")\n return \"red\"\n\n count = np.sum(np.nonzero(green_threshold))\n if count == 0:\n print(\"Not green\")\n else:\n print(\"green\")\n return \"green\"\n\n count = np.sum(np.nonzero(yellow_threshold))\n if count == 0:\n print(\"Not yellow\")\n else:\n print(\"yellow\")\n return \"yellow\"",
"def recognize_color(color, palette):\n min_distance = np.inf\n most_similar_color = None\n for cname, cvalue in palette.items():\n distance = np.sum(np.abs(np.array(color) - np.array(cvalue)))\n if distance < min_distance:\n min_distance = distance\n most_similar_color = cname\n return most_similar_color",
"def preprocess(image):\n return (image / 255) * 2 - 1",
"def read_img(img): #X\n im = plt.imread(img)\n im = im[:, :, :3]\n if im.max()>200:\n im = im/255.\n return rgb_to_hsv(im)-0.5",
"def _get_color(self, c, x, max_num):\n\n ratio = 5*(float(x)/max_num)\n i = int(math.floor(ratio))\n j = int(math.ceil(ratio))\n ratio -= i\n r = (1 - ratio) * self._colors[i][c] + ratio*self._colors[j][c]\n return int(255*r)",
"def getPixelColor(self, n):\n self._logger.debug(\"getPixelColor\")",
"def get_pixel(image, i, j):\n if i >= image.shape[0] or j >= image.shape[1]:\n return 1, None\n return 0, image[i : i + 1, j : j + 1]",
"async def get_color(self, right=False) -> tuple:\n \n if not hasattr(self, \"image\"):\n await self.__get_image()\n\n load = self.image.load()\n if right:\n res = []\n for i in range(self.image.height):\n for j in range(self.image.width):\n res.append(load[self.image.width-1, i])\n else:\n arr_L, arr_R, arr_T, arr_B = [], [], [], []\n for i in range(self.image.height):\n for j in range(self.image.width):\n arr_L.append(load[0, i])\n arr_R.append(load[self.image.width-1, i])\n arr_T.append(load[j, 0])\n arr_B.append(load[j, self.image.height-1])\n res = arr_L + arr_R + arr_T + arr_B\n del arr_L, arr_R, arr_T, arr_B\n total = max(set(res), key=res.count)\n del res, load\n if not total:\n return (0, 0, 0)\n \n return total",
"def matplotlib_image(image):\n if image.ndim == 2:\n rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n else:\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return rgb",
"def rgb_2_scalar_idx(r, g, b):\n return 256 ** 2 * r + 256 * g + b",
"def post_process_image(self, image):\n \n from numpy import max, min\n \n max_value, min_value = max(image), min(image)\n # print(min_value, max_value)\n # print(image)\n min_value = 0 if min_value < 0 else min_value \n _image = 255 * ( ( image - min_value )/ (max_value - min_value) )\n # print('next')\n # print(min(_image), max(_image))\n # print(_image)\n return 255 - _image.astype('uint8')",
"def get_classification(self, image):\n #TODO implement light color prediction\n colors = [TrafficLight.GREEN, TrafficLight.RED, TrafficLight.YELLOW, TrafficLight.UNKNOWN]\n img = np.reshape (image, (1, IMG_H, IMG_W, IMG_C))\n with self.graph.as_default():\n colorNum = np.argmax(self.model.predict(img))\n #print(str(colorNum))\n \n return colors[colorNum] # -1 is UNKNOWN",
"def whatsgreen2(image):\n green = image.hueDistance(color= Color('green'), minvalue=40).binarize()\n return green",
"def findYellow(im): #Use the fast version! (findYellowFast(im)) \n #im = Image.open(imageName)\n pix = im.load() #load in pixel array \n #define HSV value ranges for yellow \n #for now just base of Hue - refine for actual yellows seen in field? \n minHue = 50/360.\n maxHue = 61/360.\n width, height = im.size #find the size of the image \n count = 0 #initialize a counter for yellow pixels. \n for i in range(width): \n for j in range(height): \n (r,g,b) = pix[i,j] #pull out the current r,g,b values \n (h,s,v) = rgb_to_hsv(r/255.,g/255.,b/255.) \n if minHue<=h and h<maxHue: \n count += 1 #add one to the count \n totalPix = width*height \n portion = float(count)/totalPix\n #print(portion)\n return portion",
"def getShadeMap(img):\n return np.bincount(img.astype(int).flat, minlength=256)",
"def unique_colors(img):\n colors = {i[1] for i in img.getcolors(maxcolors=img.size[0]*img.size[1])}\n return colors",
"def findAllColors(img):\n hist = cv2.calcHist([img], [0, 1, 2], None, [256] * 3, [0, 256] * 3)\n allColors = np.argwhere(hist != 0)\n return allColors"
] |
[
"0.67914766",
"0.6712479",
"0.6589963",
"0.6425963",
"0.6399882",
"0.63659",
"0.6218228",
"0.6203629",
"0.6185864",
"0.6180609",
"0.617545",
"0.61731213",
"0.60577923",
"0.6015999",
"0.5913961",
"0.5890827",
"0.5828751",
"0.58275735",
"0.5809326",
"0.57891506",
"0.57731843",
"0.57720387",
"0.57714677",
"0.57607025",
"0.57514954",
"0.5730201",
"0.57209814",
"0.56952786",
"0.56797284",
"0.5675547"
] |
0.71025014
|
0
|
Used to download a picture Returns 1 on succes Ro 1 on fail
|
async def dl_image(url, filename):
try:
with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
test = await resp.read()
with open('data/tmp/'+filename.lower(), "wb") as f:
f.write(test)
return 0
except Exception as e:
print('[!ERROR!] in Get image')
print(e)
return -1
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def download_picture(inPath):\n success = False\n consoleFeedback = exec_console_command(constants.copyFileToStatic.format(inPath))\n print(consoleFeedback)\n\n if \"SUCCESS\" in consoleFeedback:\n success = True\n else:\n raise IOError(constants.pictureNotFound)\n\n return success",
"def download_single(data):\n url = data[0]\n image_id = data[1]\n target_path = data[2]\n\n if os.path.exists(target_path):\n return\n\n try:\n response = requests.get(url, timeout=30)\n response.raise_for_status()\n except:\n LOGGER.warning('Failed to fetch url %s (id=%d)', url, image_id)\n return\n\n try:\n content = response.content\n image = Image.open(BytesIO(content))\n except:\n LOGGER.warning('Failed to capture image at url %s (id=%d)', url, image_id)\n return\n\n if not image.format == 'JPEG':\n try:\n image = image.convert('RGB')\n except:\n logging.warning('Failed to convert RGB, %s (id=%d)', url, image_id)\n return\n\n try:\n image.save(target_path, format='JPEG', quality=100)\n except:\n LOGGER.warning('Failed to save url %s (id=%d)', url, image_id)\n return\n\n return",
"def __download_image_file(self):\n if not file_utils.file_exists(self.image_file_path):\n logger.info('Downloading Image from - ' + self.image_url)\n return file_utils.download(self.image_url, self.download_path)",
"def downloadGrab(self, url, tag):\n self.log(\"downloading: \"+url)\n try:\n self.downloadTempGrab(url)\n fname, hsh, ext = self.renameTempGrab(url, tag)\n self.logStats(tag+\" \"+hsh[:10])\n if hsh in self.seenHashes:\n self.log(\"Downloaded image is old\")\n #self.logStats(tag+\" dup\")\n return \"DUP\"\n else:\n self.log(\"Downloaded image is new\")\n #self.logStats(tag+\" new\")\n self.thumbnail(fname, fname.replace(\".\"+ext, \".thumb.\"+ext))\n return \"OK\"\n except Exception as e:\n traceback.print_exc()\n fname = \"%s.%s.fail.fail\" % (tag, self.timeCode())\n with open(self.downloadFolder+fname, 'w') as f:\n f.write(\"fail\")\n self.log(\"Download failed\")\n self.logStats(tag+\" fail\")\n return \"FAIL (%s)\" % str(e)",
"def postimg(self,imgurl):\n if self.is_downloadable(imgurl) == True: \n pass\n else:\n return None\n\n \"\"\"\n Download the image from URL and put it in Downloads\n \"\"\"\n try:\n urllib.request.urlretrieve(imgurl,'%s/downloads/%s' % (os.getcwd(),self.filename(imgurl)))\n except Exception as err:\n print (err)\n return None\n\n imgread = open('%s/downloads/%s' % (os.getcwd(),self.filename(imgurl)), 'rb').read()\n \n header = {\n 'Content-Type': self.contenttype(imgurl),\n 'Authorization': 'Basic {basic_auth}'.format(basic_auth=self.basic_auth),\n 'Content-Disposition' : 'attachment; filename=%s' % self.filename(imgurl)\n }\n\n postimgreq = self.reqsesion.post(\n url=self.mediaurl,\n headers = header, \n data = imgread, \n auth=(config['wp_username'],config['wp_password'])\n )\n\n print(postimgreq.status_code) \n \n if postimgreq.status_code == 201:\n os.remove('%s/downloads/%s' % (os.getcwd(),self.filename(imgurl)))\n return json.loads(postimgreq.text)['id']\n else:\n return None",
"def download_image(filename):\n return ImageApiHandler.image_handler.get(filename)",
"def get_tile(url):\n hash_name = hashlib.md5(url.encode(\"utf-16\")).hexdigest()\n fname = hash_name + \".jpeg\"\n print(\"Checking tile\" + fname)\n #if image is already downloaded, return it\n if os.path.isfile(fname):\n print(\"Downloaded!\")\n try:\n # image was fully downloaded, good to return\n return Image.open(fname) \n except Exception:\n print(\"Tile is corrupt :(\")\n # file is corrupted for some reason, so try to download it\n pass\n print(\"Downloading \" + fname)\n req.urlretrieve(url, fname) \n return Image.open(fname)",
"def download_picture(url, filename):\n logger = logging.getLogger(\"steam.query.download_picture\")\n logger.debug(\"Downloading picture ({0}) ...\".format(filename))\n\n try:\n urllib.urlretrieve(url, filename)\n return True\n except:\n logger.error(\"Could not download picture {0} from {1}!\".format(filename, url))\n return False",
"def test_download(self):\n imgurl = \"{}spei03.nc\".format(self.processor.base_url)\n httpretty.register_uri(httpretty.GET, imgurl,\n body=get_mock_image())\n imgfile = self.processor.download(imgurl, 'spei03.tif')\n self.assertTrue(os.path.exists(os.path.join(\n self.processor.tmp_dir, imgfile)))",
"def download_img(self, url, output):\n try:\n print(\"Downloading from: %s\" % url)\n with open(output, 'wb') as f:\n f.write(urllib2.urlopen(url).read())\n print(\"Wrote to: %s\" % output)\n except IOError, e:\n print(e)",
"def download_image(filename, url):\n if not url:\n return url\n refresh_needed = False\n if xbmcvfs.exists(filename) and filename == url:\n # only overwrite if new image is different\n return filename\n else:\n if xbmcvfs.exists(filename):\n xbmcvfs.delete(filename)\n refresh_needed = True\n if xbmcvfs.copy(url, filename):\n if refresh_needed:\n refresh_image(filename)\n return filename\n\n return url",
"def download_image(self, url):\r\n file_path = os.path.join(self.temp_dir, 'image.png')\r\n urlretrieve(url, file_path)\r\n return file_path",
"def download_and_save_image(imgurl, save_dir, num_retries=5, retry_interval=10):\n parse_result = urlparse(imgurl)\n img_name = os.path.basename(parse_result.path)\n img_id = img_name.split(\".\")[0]\n img_data = url_fetch(imgurl, attempt=0, num_retries=num_retries, retry_interval=retry_interval)\n save_name = os.path.join(save_dir, img_name)\n with open(save_name, \"wb\") as f:\n f.write(img_data)\n return {\"path\": save_name, \"img_id\": img_id}",
"def download_image(url, filename):\n r = requests.get(url)\n open(filename, 'wb').write(r.content)",
"def download_img(url,name):\n resp = download(url)\n if (resp!=None):\n image = np.asarray(bytearray(resp), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n cv2.imwrite(name,image)\n return",
"def download_image(full_image_url, image_name):\r\n\r\n logging.debug('download_image({}, {})'.format(full_image_url, image_name))\r\n\r\n if use_proxy:\r\n img_data = requests.get(full_image_url, proxies=proxies, timeout=15, verify=False).content\r\n else:\r\n img_data = requests.get(full_image_url).content\r\n dir_path = os.path.join(os.environ['TEMP'],'WarietyWallpaperImages')\r\n os.makedirs(dir_path, exist_ok=True)\r\n with open(os.path.join(dir_path, image_name), 'wb') as handler:\r\n handler.write(img_data)\r\n image_filesize = os.stat(os.path.join(dir_path, image_name)).st_size\r\n logging.debug('download_image - dir_path = {}'.format(dir_path))\r\n logging.debug('download_image - image_name = {}'.format(image_name))\r\n logging.debug('download_image - image_filesize = {}'.format(image_filesize))\r\n return os.path.join(dir_path, image_name)",
"def download(correlation_id, image_url, output_path=None):\n try:\n response = requests.get(image_url, timeout=15)\n if response.ok:\n if not output_path:\n output_path = os.path.join(TMP_FOLDER, '{}.png'.format(correlation_id))\n with open(output_path, 'wb') as f:\n f.write(response.content)\n except Exception as e:\n log.warn('Error downloading [{}]: [{}]'.format(image_url, e))\n output_path = None\n return output_path",
"def download_image(image_url, image_name, collection_id):\n try:\n response = requests.get(image_url)\n folder_path = imgs_directory + '/' + collection_id\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n image_path = folder_path + '/' + image_name\n # image_path = os.path.join(folder_path, image_name)\n with open(image_path, 'wb') as f:\n f.write(response.content)\n return image_path\n except Exception as e:\n print(f\"An error occurred while downloading image {image_name}. Error message: {e}\")\n return None",
"def get_image(url, path):\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"[>] get\", url, \">>\", path)\n f.close()",
"def save_image(filename: str, img_url: str) -> None:\n\n if not (os.path.isfile(filename)): # Check if the file already exists\n print('Downloading image {}...'.format(img_url))\n res = requests.get(img_url) # Download the image.\n res.raise_for_status()\n\n # Save the image\n image_file = open(filename, 'wb')\n for chunk in res.iter_content(100000):\n image_file.write(chunk)\n image_file.close()",
"def get_image(self, image_id):\n index = int(image_id)\n if index >= len(self._image_urls):\n message = \"Url index does not exist: '%i'\" % index\n return (None, message)\n url = self._image_urls[index]\n message = \"Successful URL found.\"\n return (url, message)",
"def download(url, out_folder):\n \n filename = \"2.png\"\n \n outpath = os.path.join(out_folder, filename)\n \n if url.lower().startswith(\"http\"):\n urlretrieve(url, outpath)\n else:\n urlretrieve(urlparse.urlunparse(parsed), outpath)",
"def dl_image(img_name, img_url):\n path = os.path.join(base_path, img_name)\n res = requests.get(img_url)\n with open(path, 'wb') as fout:\n fout.write(res.content)",
"def anon_download(url: str):\n if verify(url):\n location = download(url)\n return location\n return 6",
"def _download_image(image_info):\n starttime = time.time()\n image_location = _image_location(image_info)\n for attempt in range(CONF.image_download_connection_retries + 1):\n try:\n image_download = ImageDownload(image_info, time_obj=starttime)\n\n with open(image_location, 'wb') as f:\n try:\n for chunk in image_download:\n f.write(chunk)\n except Exception as e:\n msg = 'Unable to write image to {}. Error: {}'.format(\n image_location, str(e))\n raise errors.ImageDownloadError(image_info['id'], msg)\n except errors.ImageDownloadError as e:\n if attempt == CONF.image_download_connection_retries:\n raise\n else:\n LOG.warning('Image download failed, %(attempt)s of %(total)s: '\n '%(error)s',\n {'attempt': attempt,\n 'total': CONF.image_download_connection_retries,\n 'error': e})\n time.sleep(CONF.image_download_connection_retry_interval)\n else:\n break\n\n totaltime = time.time() - starttime\n LOG.info(\"Image downloaded from %(image_location)s \"\n \"in %(totaltime)s seconds. Transferred %(size)s bytes. \"\n \"Server originaly reported: %(reported)s.\",\n {'image_location': image_location,\n 'totaltime': totaltime,\n 'size': image_download.bytes_transferred,\n 'reported': image_download.content_length})\n image_download.verify_image(image_location)",
"def _download_with_proxy(image_info, url, image_id):\n no_proxy = image_info.get('no_proxy')\n if no_proxy:\n os.environ['no_proxy'] = no_proxy\n proxies = image_info.get('proxies', {})\n verify, cert = utils.get_ssl_client_options(CONF)\n resp = None\n for attempt in range(CONF.image_download_connection_retries + 1):\n try:\n # NOTE(TheJulia) The get request below does the following:\n # * Performs dns lookups, if necessary\n # * Opens the TCP socket to the remote host\n # * Negotiates TLS, if applicable\n # * Checks cert validity, if necessary, which may be\n # more tcp socket connections.\n # * Issues the get request and then returns back to the caller the\n # handler which is used to stream the data into the agent.\n # While this all may be at risk of transitory interrupts, most of\n # these socket will have timeouts applied to them, although not\n # exactly just as the timeout value exists. The risk in transitory\n # failure is more so once we've started the download and we are\n # processing the incoming data.\n resp = requests.get(url, stream=True, proxies=proxies,\n verify=verify, cert=cert,\n timeout=CONF.image_download_connection_timeout)\n if resp.status_code != 200:\n msg = ('Received status code {} from {}, expected 200. '\n 'Response body: {} Response headers: {}').format(\n resp.status_code, url, resp.text, resp.headers)\n raise errors.ImageDownloadError(image_id, msg)\n except (errors.ImageDownloadError, requests.RequestException) as e:\n if (attempt == CONF.image_download_connection_retries\n # NOTE(dtantsur): do not retry 4xx status codes\n or (resp and resp.status_code < 500)):\n raise\n else:\n LOG.warning('Unable to connect to %s, retrying. Error: %s',\n url, e)\n time.sleep(CONF.image_download_connection_retry_interval)\n else:\n break\n return resp",
"def download_images_jpg(self):\n self.show_as_waiting(True)\n self.download_images('JPEG')\n self.show_as_waiting(False)",
"def work_on_the_picture(self) -> None:\n self.folder_create(self.folder_config)\n value_image_used = os.path.join(self.folder_config, entrance_bot_img_name)\n if os.path.exists(value_image_used) and os.path.isfile(value_image_used):\n return value_image_used\n a = TelegramManager()\n try:\n value_img = self.produce_request(entrance_bot_img_link)\n if value_img.status_code == 200:\n with open(value_image_used, 'wb') as new_picture:\n for chunk in value_img:\n new_picture.write(chunk)\n return value_image_used\n a.proceed_message_values('Unfortunatelly, your link to the image is not working.')\n except Exception as e:\n a.proceed_message_values(f'We faced problem with the getting requests. Mistake: {e}')\n return ''",
"def threaded_image(self, image_file : str, image_url : str) -> NoReturn:\n\n # Sets up retry configuration to prevent connection refusals from too many requests at once\n with requests.Session() as session:\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n response = session.get(\n image_url,\n headers={\"Connection\":\"close\"}\n )\n\n if response and response.status_code == 200:\n with open(image_file, \"wb\") as img_file:\n img_file.write(response.content)\n self.update_completed(1)",
"def download_images(self, url_file, destination_dir, log_file):\n try:\n self._download_images(url_file, destination_dir, log_file)\n except IOError as error:\n sys.stderr.write(str(error))\n sys.exit(error.errno)\n except Exception as error:\n sys.stderr.write('[Unknown error] %s' % str(error))\n sys.exit(1)"
] |
[
"0.67260003",
"0.67189807",
"0.6644087",
"0.6582544",
"0.65628064",
"0.65265995",
"0.6522943",
"0.64578414",
"0.6457213",
"0.6372547",
"0.6365554",
"0.62701195",
"0.62566346",
"0.62408376",
"0.6234943",
"0.6145252",
"0.61320657",
"0.6111227",
"0.6094474",
"0.6086137",
"0.60846335",
"0.60833865",
"0.607145",
"0.60537386",
"0.60475844",
"0.60424876",
"0.60411054",
"0.60239744",
"0.6019278",
"0.5999387"
] |
0.6896206
|
0
|
Returns the Unicode flag symbol For a given ISO 31661 alpha2 Coutry code
|
def flag(code):
OFFSET = ord('🇦') - ord('A')
if not code:
return u''
points = list(map(lambda x: ord(x) + OFFSET, code.upper()))
try:
return chr(points[0]) + chr(points[1])
except ValueError:
return ('\\U%08x\\U%08x' % tuple(points)).decode('unicode-escape')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def flag(countrycode: str) -> str:\r\n\r\n code = [c for c in countrycode.lower() if c in ASCII_LOWER]\r\n if len(code) == 2:\r\n # Regional indicator symbols\r\n return flag_regional_indicator(code)\r\n if len(code) > 2 and len(code) < 7:\r\n # Tag sequence\r\n return flag_tag_sequence(code)\r\n found = ''.join(code)\r\n raise ValueError(\r\n 'invalid countrycode, found %d (%r) in %r.' %\r\n (len(found), found, countrycode))",
"def flag(countrycode: str) -> str:\r\n\r\n return flag(countrycode)",
"def flag_regional_indicator(code: List[str]) -> str:\r\n\r\n return \"\".join([chr(ord(c.upper()) + OFFSET) for c in code])",
"def uCSIsLetterlikeSymbols(code):\n ret = libxml2mod.xmlUCSIsLetterlikeSymbols(code)\n return ret",
"def uCSIsBasicLatin(code):\n ret = libxml2mod.xmlUCSIsBasicLatin(code)\n return ret",
"def flag_tag_sequence(code: List[str]) -> str:\r\n\r\n tags = \"\".join([chr(ord(c.lower()) + OFFSET_TAG) for c in code])\r\n return BLACKFLAG + tags + CANCELTAG",
"def uCSIsTaiXuanJingSymbols(code):\n ret = libxml2mod.xmlUCSIsTaiXuanJingSymbols(code)\n return ret",
"def iata(code):\r\n if len(code) == 3:\r\n return code.upper()\r\n else:\r\n raise argparse.ArgumentTypeError(\"%s is not valid IATA code\" % code)",
"def country(alpha_2_code: str) -> None:",
"def uCSIsLatinExtendedB(code):\n ret = libxml2mod.xmlUCSIsLatinExtendedB(code)\n return ret",
"def CODE(string):\n return ord(string[0])",
"def uCSIsLatinExtendedA(code):\n ret = libxml2mod.xmlUCSIsLatinExtendedA(code)\n return ret",
"def uCSIsLatinExtendedAdditional(code):\n ret = libxml2mod.xmlUCSIsLatinExtendedAdditional(code)\n return ret",
"def uCSIsMathematicalAlphanumericSymbols(code):\n ret = libxml2mod.xmlUCSIsMathematicalAlphanumericSymbols(code)\n return ret",
"def label(mi_, ma_):\n\treturn \"caractères Unicode des points de code {} à {}\".format(mi_, ma_)",
"def uCSIsHangulSyllables(code):\n ret = libxml2mod.xmlUCSIsHangulSyllables(code)\n return ret",
"def getChar(self,code):\r\n return chr(code)",
"def show_c(text):\n complement = text.replace(\"_\", \" \")\n return 'C %s' % complement",
"def uCSIsLatin1Supplement(code):\n ret = libxml2mod.xmlUCSIsLatin1Supplement(code)\n return ret",
"def to_symbol(text):\n text = text.upper()\n if text in (\"BGM\", \"BANGUMI\"):\n return \"bgm\"\n elif text in (\"MAL\", \"MYANIMELIST\"):\n return \"mal\"\n else:\n return None",
"def uCSIsKatakanaPhoneticExtensions(code):\n ret = libxml2mod.xmlUCSIsKatakanaPhoneticExtensions(code)\n return ret",
"def uCSIsGreekandCoptic(code):\n ret = libxml2mod.xmlUCSIsGreekandCoptic(code)\n return ret",
"def getCode1Letter(self):\n dataDict = self.__dict__\n cc = self.stdChemComp\n if cc is None:\n result = None\n else:\n result = cc.code1Letter\n return result",
"def _hexchar(c):\n if c == '1': return 1\n if c == '2': return 2\n if c == '3': return 3\n if c == '4': return 4\n if c == '5': return 5\n if c == '6': return 6\n if c == '7': return 7\n if c == '8': return 8\n if c == '9': return 9\n if c == 'A' or c == 'a': return 10\n if c == 'B' or c == 'b': return 11\n if c == 'C' or c == 'c': return 12\n if c == 'D' or c == 'd': return 13\n if c == 'E' or c == 'e': return 14\n if c == 'F' or c == 'f': return 15\n return 0",
"def FSMLetterSymbol(letter):\n return FSMEmptyWordSymbol if letter is None else repr(letter)",
"def uCSIsKatakana(code):\n ret = libxml2mod.xmlUCSIsKatakana(code)\n return ret",
"def non_secret_char(c):\n return c",
"def uCSIsGreekExtended(code):\n ret = libxml2mod.xmlUCSIsGreekExtended(code)\n return ret",
"def uCSIsUnifiedCanadianAboriginalSyllabics(code):\n ret = libxml2mod.xmlUCSIsUnifiedCanadianAboriginalSyllabics(code)\n return ret",
"def uCSIsYijingHexagramSymbols(code):\n ret = libxml2mod.xmlUCSIsYijingHexagramSymbols(code)\n return ret"
] |
[
"0.64730406",
"0.64475",
"0.63321203",
"0.62854385",
"0.6161302",
"0.60630107",
"0.6045155",
"0.6037729",
"0.60337454",
"0.59990394",
"0.5994314",
"0.5979302",
"0.592053",
"0.5904522",
"0.5877021",
"0.58428967",
"0.58397055",
"0.5797212",
"0.5775551",
"0.5729321",
"0.57204676",
"0.5706527",
"0.5695403",
"0.56900585",
"0.56680757",
"0.5653729",
"0.56266916",
"0.5623938",
"0.5607301",
"0.56033754"
] |
0.74334806
|
0
|
Load Speech Enhancement STFT UNET masking deep learning model.
|
def deep_masking(model: str = 'resnet-unet', quantized: bool = False, **kwargs):
model = model.lower()
if model not in _masking_availability:
raise ValueError(
'model not supported, please check supported models from `malaya_speech.speech_enhancement.available_deep_masking()`.'
)
return unet.load_stft(
model=model,
module='speech-enhancement-mask',
instruments=['voice', 'noise'],
quantized=quantized,
**kwargs
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)",
"def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def load_feature_extractor(model_spec, device):\n\n model_type = model_spec['name']\n model_weights_fp = model_spec['weights']\n\n if model_type == 'imagenet_swav':\n # or could load from hub model\n # model = torch.hub.load('facebookresearch/swav', 'resnet50')\n\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n state_dict = torch.load(model_weights_fp, map_location=\"cpu\")\n\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict.items()}\n for k in list(state_dict.keys()):\n if 'projection' in k or 'prototypes' in k:\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'imagenet_moco_v2':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'imagenet_supervised':\n model = models.resnet50(pretrained=True)\n\n elif model_type == 'random':\n model = models.resnet50(pretrained=False)\n\n elif model_type == 'inat2018_supervised':\n model = models.resnet50(pretrained=False)\n # This model was actually trained with 10000 classes for the fc layer\n # but only 8142 (the number in inat2018) were actually updated\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_supervised':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_supervised':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_supervised_from_scratch':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in checkpoint['state_dict'].items()}\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'inat2021_supervised_from_scratch':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_moco_v2':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'inat2021_mini_swav' or model_type == 'inat2021_mini_swav_1k':\n # or could load from hub model\n # model = torch.hub.load('facebookresearch/swav', 'resnet50')\n\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n state_dict = torch.load(model_weights_fp, map_location=\"cpu\")\n\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict['state_dict'].items()}\n for k in list(state_dict.keys()):\n if 'projection' in k or 'prototypes' in k:\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n else:\n raise ValueError(\"Unknown pytorch model: %s\" % model_type)\n\n\n # remove the final fully connected layer so the model only operates with post average pool features\n model = torch.nn.Sequential(*(list(model.children())[:-1]))\n model.to(device)\n model.eval()\n\n feature_extractor = PTResNet50FeatureExtractor(model, device)\n\n return feature_extractor",
"def load_model():\r\n model = MobileNetV2(weights=\"imagenet\")\r\n print(\"Model loaded\")\r\n return model",
"def load_trained_net(mal):\n model_root = os.path.join(os.getcwd(), 'data', 'models')\n model = load_model(os.path.join(model_root, 'model_' + mal + '.h5'))\n\n return model",
"def load_model():\n global obj\n obj = NutritionTableDetector()\n print(\"Weights Loaded!\")",
"def load_onnx_model(self):\n print(\"Loading Rescue Detection Model\")\n\n self.rescue_model = cv2.dnn.readNetFromONNX(os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n rescue_cnn_model_path))\n\n self.rescue_model.setPreferableTarget(Rescue_PI.preferable_target)",
"def stft():\n trainer = {\n 'model': {\n 'encoder': {\n 'factory': 'padertorch.contrib.examples.source_separation.tasnet.tas_coders.StftEncoder'\n },\n 'decoder': {\n 'factory': 'padertorch.contrib.examples.source_separation.tasnet.tas_coders.IstftDecoder'\n },\n }\n }",
"def model_fn(model_dir):\n ctx = mx.cpu()\n net = unet.Unet()\n print (\"Loading\", model_dir)\n if path.exists(model_dir+\"/unet_RGB.params\"):\n print (\"Loading RGB Model\")\n net.load_params(model_dir+\"/unet_RGB.params\", ctx)\n print (\"RGB Model Loaded\")\n \n elif path.exists(model_dir+\"/unet_ALL_BANDS.params\"):\n print (\"Loading ALL_BANDS Model\")\n net.load_params(model_dir+\"/unet_ALL_BANDS.params\", ctx)\n print (\"ALL_BANDS Model Loaded\")\n \n else:\n print (\"Model Missing\")\n net=None\n return (net)",
"def esm1v_t33_650M_UR90S_5():\n return load_model_and_alphabet_hub(\"esm1v_t33_650M_UR90S_5\")",
"def esm1v_t33_650M_UR90S_3():\n return load_model_and_alphabet_hub(\"esm1v_t33_650M_UR90S_3\")",
"def bisenet_face_parsing():\r\n network = BiSeNet_keras()\r\n network.load_weights(MODEL_PATH)\r\n return network",
"def load_trained_model(unit):\n return load_model(DATA_FOLDER + \"{}_cdae_model.hd5\".format(UNITS[unit]))",
"def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return",
"def setupEmbeddings(self, path = \"awd_lm\"):\n try:\n data_lm = TextLMDataBunch.from_df(path, train_df=self.train, valid_df=self.valid,\\\n text_cols = \"text\", label_cols = \"label\")\n except:\n print(\"error creating LM\")\n return\n\n learn = language_model_learner(data_lm, arch=AWD_LSTM, drop_mult=.25)\n learn.fit_one_cycle(1, 1e-2)\n learn.save_encoder('ft_enc_1')\n\n learn.unfreeze()\n learn.fit_one_cycle(3, 1e-3)\n learn.save_encoder('ft_enc_1')\n\n learn.unfreeze()\n learn.fit_one_cycle(5, 5e-4)\n learn.save_encoder('ft_enc_1')\n\n print(\"feature encoding saved\")",
"def esm1v_t33_650M_UR90S_4():\n return load_model_and_alphabet_hub(\"esm1v_t33_650M_UR90S_4\")",
"def esm1v_t33_650M_UR90S():\n return load_model_and_alphabet_hub(\"esm1v_t33_650M_UR90S_1\")",
"def esm1v_t33_650M_UR90S_2():\n return load_model_and_alphabet_hub(\"esm1v_t33_650M_UR90S_2\")",
"def __call__(self):\n custom_obj = {'tf': tf, 'relu6': tf.nn.relu6}\n wfile = self._get_model_weights()\n model = tf.keras.models.load_model(wfile, custom_objects=custom_obj)\n\n if not self._trainable:\n # freeze encoder layers up to\n # expanded_conv_16_project_BN\n for layer in model.layers[1:147]:\n layer.trainable = False\n\n return model",
"def esm1b_t33_650M_UR50S():\n return load_model_and_alphabet_hub(\"esm1b_t33_650M_UR50S\")",
"def esm1v_t33_650M_UR90S_1():\n return load_model_and_alphabet_hub(\"esm1v_t33_650M_UR90S_1\")",
"def load_swin_model(model_path, cfg_path):\n # set up model config\n model = init_detector(cfg_path, model_path, device='cuda:0')\n return model",
"def model(flags):\n input_audio = tf.keras.layers.Input(\n shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING),\n batch_size=flags.batch_size)\n net = input_audio\n\n if flags.preprocess == 'raw':\n # it is a self contained model, user need to feed raw audio only\n net = speech_features.SpeechFeatures(\n speech_features.SpeechFeatures.get_params(flags))(\n net)\n\n time_size, feature_size = net.shape[1:3]\n\n channels = parse(flags.channels)\n\n net = tf.keras.backend.expand_dims(net)\n\n if flags.debug_2d:\n conv_kernel = first_conv_kernel = (3, 3)\n else:\n net = tf.reshape(\n net, [-1, time_size, 1, feature_size]) # [batch, time, 1, feature]\n first_conv_kernel = (3, 1)\n conv_kernel = parse(flags.kernel_size)\n\n net = tf.keras.layers.Conv2D(\n filters=channels[0],\n kernel_size=first_conv_kernel,\n strides=1,\n padding='same',\n activation='linear')(\n net)\n net = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n net)\n net = tf.keras.layers.Activation('relu')(net)\n\n if parse(flags.pool_size):\n net = tf.keras.layers.AveragePooling2D(\n pool_size=parse(flags.pool_size), strides=flags.pool_stride)(\n net)\n\n channels = channels[1:]\n\n # residual blocks\n for n in channels:\n if n != net.shape[-1]:\n stride = 2\n layer_in = tf.keras.layers.Conv2D(\n filters=n,\n kernel_size=1,\n strides=stride,\n padding='same',\n activation='linear')(\n net)\n layer_in = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n layer_in)\n layer_in = tf.keras.layers.Activation('relu')(layer_in)\n else:\n layer_in = net\n stride = 1\n\n net = tf.keras.layers.Conv2D(\n filters=n,\n kernel_size=conv_kernel,\n strides=stride,\n padding='same',\n activation='linear')(\n net)\n net = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n net)\n net = tf.keras.layers.Activation('relu')(net)\n\n net = tf.keras.layers.Conv2D(\n filters=n,\n kernel_size=conv_kernel,\n strides=1,\n padding='same',\n activation='linear')(\n net)\n net = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n net)\n\n # residual connection\n net = tf.keras.layers.Add()([net, layer_in])\n net = tf.keras.layers.Activation('relu')(net)\n\n net = tf.keras.layers.AveragePooling2D(\n pool_size=net.shape[1:3], strides=1)(\n net)\n\n net = tf.keras.layers.Dropout(rate=flags.dropout)(net)\n\n # fully connected layer\n net = tf.keras.layers.Conv2D(\n filters=flags.label_count,\n kernel_size=1,\n strides=1,\n padding='same',\n activation='linear')(\n net)\n\n net = tf.reshape(net, shape=(-1, net.shape[3]))\n return tf.keras.Model(input_audio, net)",
"def load_pretrained_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model_ft = models.alexnet(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading pretrained ResNet18 Model\")\n model_ft = models.resnet18(pretrained=True)\n\n for param in model_ft.parameters(): # Code for fixing the Conv Layer\n param.requires_grad = False # During Training Conv layer does not learn.\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet50\":\n print(\"Loading pretrained ResNet50 Model\")\n\n model_ft = models.resnet50(pretrained=True)\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"DenseNet\":\n print(\"Loading pretrained DenseNet161 Model\")\n model_ft = models.densenet161(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, 100)\n\n if cfg.load_model_true:\n model_ft.load_state_dict(torch.load(cfg.load_model_path))\n\n return model_ft",
"def esm1_t34_670M_UR100():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR100\")",
"def __init__(self, in_classes=1, channelscale=64):\n super(DetectPatchAndSegm, self).__init__()\n self.unetvgg1 = UNetSimple(in_classes=in_classes, channelscale=64, out_classes=2)#UNetVgg()\n self.unetvgg2 = UNetSimple(in_classes=3, channelscale=128, out_classes=3)#in is 2 patches and original image\n self.sft = nn.Softmax2d()",
"def load_fasttext_en_pretrained():\r\n log.info(\"Load FT Model\")\r\n path = Path.join(package_path, 'augmentation', 'data', 'fasttext_en', 'cc.en.300.bin')\r\n\r\n if not Path.isfile(path):\r\n raise ValueError(\"Fast Text Pretrained Model is not available, please run: `from seaqube import download;download('fasttext-en-pretrained')`\")\r\n\r\n with open(path, 'rb') as fin:\r\n return PreTrainedFTRawEN(load(fin))",
"def load_caffe_model(self):\n print(\"Loading caffe model used for detecting a human_blob.\")\n\n # Use cv2.dnn function to read the caffe model used for detecting faces and set preferable target.\n # self.detector = cv2.dnn.readNetFromCaffe(os.path.join(\n # os.path.dirname(os.path.realpath(__file__)),\n # prototxt_path),\n # os.path.join(\n # os.path.dirname(os.path.realpath(__file__)),\n # human_model_path))\n self.detector = cv2.dnn.readNetFromCaffe(prototxt=\"human_detection_model/MobileNetSSD_deploy.prototxt.txt\",\n caffeModel=\"human_detection_model/MobileNetSSD_deploy.caffemodel\")\n self.detector.setPreferableTarget(Rescue_PI.preferable_target)",
"def esm1_t34_670M_UR50D():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR50D\")",
"def modify_to_return_embeddings(net, model_name):\n if model_name in [\"vgg_face_dag\", \"vgg_m_face_bn_dag\"]:\n net.fc8 = torch.nn.Sequential()\n else:\n msg = \"{} not yet supported\".format(model_name)\n raise NotImplementedError(msg)\n return net"
] |
[
"0.59340066",
"0.5911736",
"0.5900582",
"0.57844377",
"0.5723234",
"0.56802684",
"0.5617355",
"0.55857116",
"0.55831295",
"0.5580721",
"0.55755264",
"0.5573632",
"0.5558571",
"0.5548964",
"0.5547449",
"0.55412763",
"0.54874104",
"0.548048",
"0.54715073",
"0.5469528",
"0.5462655",
"0.54531294",
"0.5419599",
"0.54157615",
"0.5414407",
"0.5392999",
"0.5390632",
"0.53652555",
"0.53600526",
"0.53515047"
] |
0.62793493
|
0
|
Adds a period to a line that is missing a period
|
def fix_missing_period(line):
if "@highlight" in line: return line
if line=="": return line
if line[-1] in END_TOKENS: return line
# print line[-1]
return line + " ."
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fix_missing_period(line):\n if \"@highlight\" in line:\n return line\n if line == \"\":\n return line\n if line[-1] in END_TOKENS:\n return line\n return line + \" .\"",
"def test_trailing_period(self):\r\n self.assertEqual(4.0, calc.evaluator({}, {}, '4.'))",
"def period(self, value: int, /) -> None:",
"def append_period(text):\r\n\tif text[-1] == '\\\"':\r\n\t\treturn text[0:-1]+'.\\\"'\r\n\treturn text",
"def filter_period_period(self, string):\n newstring = string\n newer_string = newstring.replace(\"..\", \"--\")\n while newstring != newer_string:\n newer_string = newstring.replace(\"..\", \"--\")\n\n return newstring",
"def period():\n frequencyText.config(state = NORMAL)\n frequencyText.delete(\"1.0\", END)\n frequencyText.config(state = DISABLED)\n periodText.config(state = NORMAL)",
"def period(self) -> int:",
"def test_period(self):\r\n with self.assertRaises(ParseException):\r\n calc.evaluator({}, {}, '.')\r\n with self.assertRaises(ParseException):\r\n calc.evaluator({}, {}, '1+.')",
"def AddMissingPresentValue(self, point: str) -> None:\n self._valid = False\n self._missing_present_values.append(point)",
"def add_accumulated(self) -> None:\n if len(self.line_parts):\n for word in self.next_line.line_parts[0].words:\n self.line_parts[-1].add_word(word)\n self.next_line.line_parts = self.next_line.line_parts[1:]\n\n self.line_parts.extend(self.next_line.line_parts)\n last_part = self.line_parts[-1]\n last_part.add_word(' ')\n self.next_line.line_parts = [\n PDFTextLinePart(last_part.style, self.fonts, last_part.ids)\n ]",
"def end_of_line():\r\n set_point(point().end_of_line())",
"def shift2line(xs, ys, b, period=360):\r\n yline = xs + b\r\n ydist = ys - yline\r\n ys[ydist > period/2.] -= period\r\n ys[ydist < -period/2.] += period\r\n return ys",
"def periodCheck(data):",
"def create(self, vals):\n lines = super(KaHrPayrollTunjanganKhususPeriodLines, self).create(vals)\n if not 'name' in vals or not vals.get('name'):\n date_obj = datetime.strptime(lines.period_id.date_start, DATE_FORMAT)\n date_str = date_obj.strftime('%d-%m-%Y')\n lines.name = \"Detail {0}. Periode: {1}\".format(lines.combine_id.name, date_str)\n return lines",
"def add_line_piece(self, start, end):\r\n self.pieces.append(LineSegment(start, end))\r\n self.total_length += self.pieces[-1].get_length()",
"def addPeriod(self, value, track=0, color=None):\n begin, end, title = value\n period = TimelineDelta(begin, end, title=title, parent=self, top=Track.whichTop(track))\n self._tracks[period.track].periods.append(period)\n return period",
"def period(self, period):\n\n self._period = period",
"def period(self, period):\n\n self._period = period",
"def test_custom_period():\n lons = np.arange(-180, 190, 10)\n expected = np.concatenate(\n [np.arange(-180, 0, 10), np.arange(-180, 0, 10), np.array([-180])]\n )\n result = wrap(lons, period=180)\n np.testing.assert_array_equal(result, expected.astype(DTYPE))",
"def missing_reg_interval(keys, values, time_before, time_after, hour):\n if is_dropped(time_after, time_before):\n keys.append((time_before, time_after))\n values.append((time_after - time_before, hour))\n Config.ANALYSIS.write(f\"{(time_before, time_after)}: {(time_after - time_before, hour)}, \"\n f\"{round((time_after - time_before) / float(Config.BOUNDARY) - 0.5)} \"\n f\"possible missing regulars\\n\")",
"def extend(self,\n period_data: List[Candle]) -> None:\n self.point_1_moment = period_data[0].moment\n self.point_1_price = period_data[0].open\n self.point_2_moment = period_data[1].moment\n self.point_2_price = period_data[1].open",
"def test_cyclic_p_spline_custom_period():\n\n # define square wave\n X = np.linspace(0, 1, 5000)\n y = X > 0.5\n\n # when modeling the full period, we get close with a periodic basis\n gam = LinearGAM(s(0, basis='cp', n_splines=4, spline_order=0)).fit(X, y)\n assert np.allclose(gam.predict(X), y)\n assert np.allclose(gam.edge_knots_[0], [0, 1])\n\n # when modeling a non-periodic function, our periodic model fails\n gam = LinearGAM(s(0, basis='cp', n_splines=4, spline_order=0, edge_knots=[0, 0.5])).fit(X, y)\n assert np.allclose(gam.predict(X), 0.5)\n assert np.allclose(gam.edge_knots_[0], [0, 0.5])",
"def update_period(self):\n return 0.1",
"def add_line_element(self, movement: int, duration: int) -> None:\n if self.current_time_in_eighths == self.total_duration_in_eighths:\n raise RuntimeError(\"Attempt to add notes to a finished piece.\")\n if not self.check_validity(movement, duration):\n raise ValueError(\n \"The suggested continuation is not valid. \"\n \"It either breaks some rules or goes beyond ranges.\"\n )\n next_line_element = self.__find_next_element(movement, duration)\n self.counterpoint.append(next_line_element)\n self.__add_to_piano_roll(next_line_element)\n self.__update_runtime_variables(movement, duration)\n self.__finalize_if_needed()",
"def test_bad_period(self):\n self.period = 'bad'\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 104)",
"def _generate_single_student_report_line(self, student_record, periods,\n use_period_separator = True, separator = None,):\n line = [student_record.full_name_lastname_first()]\n for period in periods:\n # if requested insert a separator between each period but\n # not between the name and the first period\n if (use_period_separator and (len(line) > 1)):\n line.append(separator)\n school_days, days_present = \\\n student_record.attendance.get_summary(\n period[0], period[1])\n if not school_days:\n school_days = 1\n percent_present = round((100.0 *days_present / school_days), 1)\n days_absent = school_days - days_present\n percent_absent = 100.0 - percent_present\n line.extend((days_present, percent_present))\n #days_absent, percent_absent))\n return line",
"def add_time_period(df):\n\n # determine in which half hour period of the day the \n # predicted time of arrival falls\n\n interval = df.iloc[0].planned_arrival // 1800 \n\n # find string representation of period from dict. mapping (top)\n\n inverval_string = interval_map[interval]\n\n # add the feature\n\n df['TIME_PERIOD_ARRIVAL'] = inverval_string\n\n # set the dtype\n\n df.TIME_PERIOD_ARRIVAL = df.TIME_PERIOD_ARRIVAL.astype('category') \n\n return df",
"def __set_period(self, period):\n if not isinstance(period, int):\n raise TypeError('The period should be an integer')\n if period <= 0:\n raise ValueError('The period should be a natural number')\n self.__period = period",
"def add(self, line):\n self.cull()\n self.lines.append(line)",
"def add_optional(line):\n newline = line.replace(\":\", \"?:\", 1)\n return newline"
] |
[
"0.6506273",
"0.58323836",
"0.57100993",
"0.54586047",
"0.5438765",
"0.5400417",
"0.52904093",
"0.518897",
"0.5163461",
"0.51524025",
"0.50683826",
"0.49437892",
"0.4940815",
"0.49108043",
"0.49103516",
"0.48758814",
"0.48758078",
"0.48758078",
"0.4828564",
"0.48167768",
"0.4816369",
"0.48153085",
"0.47894156",
"0.47738",
"0.476811",
"0.4757895",
"0.47472635",
"0.474702",
"0.47247767",
"0.47231248"
] |
0.63101846
|
1
|
Reads the tokenized .story files corresponding to the urls listed in the url_file and writes them to a out_file.
|
def write_to_bin(url_file, out_file_article, out_file_abstract, cnn_tokenized_stories_dir, dm_tokenized_stories_dir):
print("Making bin file for URLs listed in %s..." % url_file)
url_list = read_text_file(url_file)
url_hashes = get_url_hashes(url_list)
story_fnames = [s+".story" for s in url_hashes]
num_stories = len(story_fnames)
with open(out_file_article, 'w') as article_writer:
with open(out_file_abstract, 'w') as abstract_writer:
for idx,s in enumerate(story_fnames):
if idx % 1000 == 0:
print("Writing story %i of %i; %.2f percent done" % (idx, num_stories, float(idx)*100.0/float(num_stories)))
# Look in the tokenized story dirs to find the .story file corresponding to this url
if os.path.isfile(os.path.join(cnn_tokenized_stories_dir, s)):
story_file = os.path.join(cnn_tokenized_stories_dir, s)
elif os.path.isfile(os.path.join(dm_tokenized_stories_dir, s)):
story_file = os.path.join(dm_tokenized_stories_dir, s)
else:
print("Error: Couldn't find tokenized story file %s in either tokenized story directories %s and %s. Was there an error during tokenization?" % (s, cnn_tokenized_stories_dir, dm_tokenized_stories_dir))
# Check again if tokenized stories directories contain correct number of files
print("Checking that the tokenized stories directories %s and %s contain correct number of files..." % (cnn_tokenized_stories_dir, dm_tokenized_stories_dir))
check_num_stories(cnn_tokenized_stories_dir, num_expected_cnn_stories)
check_num_stories(dm_tokenized_stories_dir, num_expected_dm_stories)
raise Exception("Tokenized stories directories %s and %s contain correct number of files but story file %s found in neither." % (cnn_tokenized_stories_dir, dm_tokenized_stories_dir, s))
# Get the strings to write to file
article, abstract = get_art_abs(story_file)
article = ' '.join(article.split(' ')[:MAX_TOKENS])
abstract = ' '.join(abstract.split(' ')[:MAX_TOKENS])
eol = "\n"
if idx == num_stories - 1:
eol = ""
article_writer.write(article + eol)
abstract_writer.write(abstract + eol)
print("Finished writing file")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write_urls_to_file(urls, file_name):\n with open(file_name, 'w') as file_handler:\n for url in urls:\n content = read_url(url)\n pretty_content = pretty_print_content(content)\n file_handler.write(pretty_content)",
"def write_url(url_list):\r\n with open(\"URLS.txt\", \"a+\") as url_file:\r\n for url in url_list:\r\n url_file.write(url+\"\\n\")",
"def write_to_bin(tok_files, out_file, makevocab=False):\n\n num_stories = len(tok_files)\n\n if makevocab:\n vocab_counter = collections.Counter()\n\n with open(out_file, 'wb') as writer:\n for idx,s in enumerate(tok_files):\n if idx % 1000 == 0:\n print(\"Writing story %i of %i; %.2f percent done\" % (idx, num_stories, float(idx)*100.0/float(num_stories)))\n\n path = os.path.join(tok_dir, s)\n src_path = \"%s.src.tok\" % path\n tgt_path = \"%s.tgt.tok\" % path\n for _ in [src_path, tgt_path]:\n if not os.path.isfile(_):\n raise Exception(\"Error: Couldn't find tokenized file %s\" % _)\n\n # Get the strings to write to .bin file\n article, abstract = [to_bytes(_) for _ in get_art_abs(src_path, tgt_path)]\n\n # Write to tf.Example\n tf_example = example_pb2.Example()\n tf_example.features.feature['article'].bytes_list.value.extend([article])\n tf_example.features.feature['abstract'].bytes_list.value.extend([abstract])\n tf_example_str = tf_example.SerializeToString()\n str_len = len(tf_example_str)\n writer.write(struct.pack('q', str_len))\n writer.write(struct.pack('%ds' % str_len, tf_example_str))\n\n # Write the vocab to file, if applicable\n if makevocab:\n art_tokens = article.split(b' ')\n abs_tokens = abstract.split(b' ')\n art_tokens = [t for t in art_tokens if t not in [to_bytes(SENTENCE_START), to_bytes(SENTENCE_END)]] # remove these tags from vocab\n abs_tokens = [t for t in abs_tokens if t not in [to_bytes(SENTENCE_START), to_bytes(SENTENCE_END)]] # remove these tags from vocab\n tokens = art_tokens + abs_tokens\n tokens = [t.strip() for t in tokens] # strip\n tokens = [t for t in tokens if t!=\"\"] # remove empty\n vocab_counter.update(tokens)\n\n print(\"Finished writing file %s\\n\" % out_file)\n\n # write vocab to file\n if makevocab:\n print(\"Writing vocab file...\")\n with open(os.path.join(finished_files_dir, \"vocab\"), 'wb') as writer:\n for word, count in vocab_counter.most_common(VOCAB_SIZE):\n writer.write(word + b' ' + to_bytes(str(count)) + b'\\n')\n print(\"Finished writing vocab file\")",
"def save_to_file(urls):\n try:\n with open('url.txt', 'w') as file:\n for url in urls:\n file.write(url + \"\\n\")\n except:\n print(\"ERROR SAVING FILE\")",
"def read_url_all(url):\n\n\t\treturn write_file(read_url(url))",
"def analyze_urls(filename, topic):\n # Initialize an empty list. Note that I store my urls and references\n # in a sort of strange way. Each element in result_list is a list of two\n # elements, the first element being the url, and the second element\n # being a list of all the references to the url\n result_list = []\n\n # Using the with...as construct to open the file in read mode\n with open(filename, \"r\", encoding=\"utf-8\") as files:\n # Iterate over each line (each is a url)\n for line in files:\n # Use the try ... except construct\n try:\n # Try to open each url\n with urllib.request.urlopen(line) as url_file:\n # Read the page\n page = url_file.read()\n # Decode the page\n decoded_page = page.decode(\"UTF-8\")\n # Regex expression to find the places which open\n # with a > then have some stuff, then the topic, then\n # close with a <\n pattern = fr\">[^<]*\\b{topic}\\b.*?<\"\n\n # Use the findall method from re to find all of the\n # occurrences of pattern in decoded_page as a list\n # The flags are IGNORECASE and DOTALL\n my_list = re.findall(pattern, decoded_page,\n re.IGNORECASE | re.DOTALL)\n\n # If my_list is not empty\n if my_list:\n # Slice off the the closing and opening angle\n # brackets using a list comprehension\n new_list = [word[1:-1] for word in my_list]\n # Append a new list of two elements to result_list,\n # where the first element of the list is the url,\n # and the second element of the list is the list of\n # references\n result_list.append([line, new_list])\n # One possible error is the urllib.error.URLError\n except urllib.error.URLError as url_err: # Catch the error\n # Print a message, url, and the error\n print(\"Error opening url:\", line, url_err)\n # Another possible error is the UnicodeDecodeError\n except UnicodeDecodeError as dec_err: # Catch the error\n # Print a message, and url\n print(\"Error decoding url:\", line)\n # Print the error\n print(dec_err)\n # Except all other errors\n except:\n pass\n # Return the result_list\n return result_list",
"def collect_html(args):\n url_list = args.url_list\n output_dir = args.output_dir\n\n print(url_list)\n\n # do some checks\n try: \n assert os.path.exists(url_list), 'url_list must exist'\n assert os.path.exists(output_dir), 'output_dir must exist'\n except AssertionError as err: \n logger.error('Failed check: {}'.format(err)) \n return \n\n urls = common.read_file(url_list)\n \n for url in urls: \n logger.debug(url) \n\n html = spy_tools.collect_html(url)\n out = url.split('/')\n output = os.path.join(output_dir, out[-1] + '.html')\n common.write_file(html, output)",
"def tokenize_stories(stories_dir, tokenized_stories_dir):\n print(\"Preparing to tokenize %s to %s...\" % (stories_dir, tokenized_stories_dir))\n stories = os.listdir(stories_dir)\n # make IO list file\n print(\"Making list of files to tokenize...\")\n with open(\"mapping.txt\", \"w\") as f:\n for s in stories:\n t = \"%s.tok\" % s\n f.write(\"%s \\t %s\\n\" % (os.path.join(stories_dir, s), os.path.join(tokenized_stories_dir, t)))\n command = ['java', 'edu.stanford.nlp.process.PTBTokenizer', '-ioFileList', '-preserveLines', 'mapping.txt']\n print(\"Tokenizing %i files in %s and saving in %s...\" % (len(stories), stories_dir, tokenized_stories_dir))\n subprocess.call(command)\n print(\"Stanford CoreNLP Tokenizer has finished.\")\n os.remove(\"mapping.txt\")\n\n # Check that the tokenized stories directory contains the same number of files as the original directory\n num_orig = len(os.listdir(stories_dir))\n num_tokenized = len(os.listdir(tokenized_stories_dir))\n if num_orig != num_tokenized:\n raise Exception(\"The tokenized stories directory %s contains %i files, but it should contain the same number as %s (which has %i files). Was there an error during tokenization?\" % (tokenized_stories_dir, num_tokenized, stories_dir, num_orig))\n print(\"Successfully finished tokenizing %s to %s.\\n\" % (stories_dir, tokenized_stories_dir))",
"def extract_tokenized_in_place(dirname, in_fname):\n\n in_path = dp.path_iteration.get_gigaword_path(dirname, in_fname)\n\n # Make the out_path.\n out_fname = in_fname[:-4]\n out_path = dp.path_iteration.get_tokenized_path(dirname, out_fname)\n\n # Ensure the output directory exists\n out_dir = os.path.dirname(out_path)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n # Extract the tokens.\n extract_tokenized_from_file(in_path, out_path)",
"def getHTML():\n for url in urls: #Because there might be multipe URLs to scrape, iterate through the list \n r = requests.get(url)\n r.raise_for_status()\n webpage_html = str(bs4.BeautifulSoup(r.text, \"html.parser\"))\n filenumber = str(urls.index(url)) #Create a variable called filenumber using the index of the url in the list of urls\n filename = \"output_\" + filenumber + \".html\" #This and above line avoid the loop rewriting the file name of the previous file.\n with open(filename, 'w') as file_object: #open a new (or existing) file to be written (or overwritten)\n file_object.write(webpage_html) #write the scraped HTML into the file\n file_object.close #close the file",
"def process_from_file():\r\n global default_input_path\r\n print \"JoomFind v 1.0\"\r\n print \"\\n\\nTrying to read URL(s) form \" + default_input_path + \" file...\\n\"\r\n try:\r\n if not default_input_path:\r\n f = open(\"urls.txt\")\r\n else:\r\n f=open(default_input_path)\r\n cwd=os.getcwd()\r\n file_path = cwd + path_slash + f.name\r\n\t# extracting url's to list from file\r\n start_urls = [url.strip() for url in f.readlines() if url[0] not in ['#',' ',\"\\n\"]]\r\n if not start_urls:\r\n print \"File is empty. Add some URL(s) first.\\n\"\r\n f.close()\r\n return 0\r\n except:\r\n print \"File not found. Make sure it exists.\\n\"\r\n return 0\r\n #print start_urls\r\n \r\n num=str(len(start_urls))\r\n print \"Found \" + num + \" URL(s) on \" + time.asctime(time.localtime(time.time())) + \"\\n\"\r\n \r\n of=open(default_output_path,'a+')\r\n of.write(\"\\n\\n\\tScanning \" + num + \" URL(s) \")\r\n of.write(\"\\n\\n\\tDate\\Time : \" + time.asctime(time.localtime(time.time())) )\r\n of.write(\"\\n\\n\\tInput file path : \" + default_input_path + \"\\n\\n\")\r\n of.close()\r\n \r\n for url in start_urls:\r\n global provided_url\r\n provided_url=url\r\n print \"\\nWorking on URL \" + str(start_urls.index(url)+1) + \": \" + provided_url\r\n processing()\r\n print \"\\nAll done! Check '\" + default_output_path +\"' file for results.\\n\"",
"def fetch_words(url):\n\n with urlopen(url) as story:\n story_words = []\n for line in story:\n line_words = line.decode('utf-8').split()\n for word in line_words:\n story_words.append(word)\n\n return story_words",
"def fetch_words(url):\n # story = urlopen('http://sixty-north.com/c/t.txt')\n story = urlopen(url)\n story_words = []\n for line in story:\n # line_words = line.split()\n line_words = line.decode('utf-8').split()\n for word in line_words:\n story_words.append(word)\n\n story.close()\n\n # for word in story_words:\n # print(word)\n return story_words",
"def fetch_words(url):\n story = urlopen(url)\n\n story_words = []\n\n for line in story:\n line_words = line.decode('utf-8').split()\n\n for word in line_words:\n story_words.append(word)\n\n story.close()\n\n return story_words",
"def read_urls(filename):\n \n urls = []\n with open(filename, 'r') as f:\n for line in f:\n if 'puzzle' in line:\n match = re.search(r'GET\\s(.*)HTTP', line)\n url = match.group(1)\n urls.append(url.strip())\n sorted_urls = sorted(set(urls))\n for url in sorted_urls:\n print (url[-8:-4])\n return sorted_urls",
"def wikiextractor_outputs_to_file(\n extracted_dir: str,\n language: str,\n dataset_name: str,\n output_dir: str,\n num_output_files: int,\n max_num_files: int,\n min_sent_word_count: int,\n) -> None:\n # Get list of files in extracted directory\n list_of_files = get_all_filepaths_recursively(extracted_dir, \".bz2\")\n if max_num_files > -1:\n list_of_files = list_of_files[:max_num_files]\n\n # Prepare arguments for multiprocessing\n process_wiki_files_args = [\n (file, language, min_sent_word_count) for file in list_of_files\n ]\n\n # Check if we should default the amount of files the the number of CPUs.\n if num_output_files == -1:\n num_output_files = cpu_count()\n num_output_files_str_len = len(str(num_output_files))\n\n # Compute how many extracted files to have for each output file\n num_extracted_files_per_output_file = int(len(list_of_files) // num_output_files)\n\n # Process files using multiprocessing\n with Pool() as pool:\n for i, mp_args in zip(\n range(num_output_files),\n batch_list_gen(\n process_wiki_files_args, num_extracted_files_per_output_file\n ),\n ):\n output_filepath = join(\n output_dir,\n f\"{dataset_name}-{str(i + 1).zfill(num_output_files_str_len)}.txt\",\n )\n with open(output_filepath, \"w\", encoding=\"utf8\") as file:\n for j, result in enumerate(\n tqdm(\n pool.imap_unordered(\n process_wiki_file,\n mp_args,\n ),\n total=num_extracted_files_per_output_file,\n )\n ):\n if j > 0:\n file.write(\"\\n\")\n file.writelines(result)",
"def transform_document(output_file, url_entities, uris_urls, line_doc, mapping):\n doc_soup = BeautifulSoup(line_doc.decode(\"utf-8\").strip(), \"lxml\")\n doc_url = doc_soup.doc[\"url\"]\n doc_title = doc_soup.doc.find(\"h1\", {\"id\": \"title\"}).replaceWith(\n \" {}. \".format(TITLE_CHANGE_TOKEN))\n doc_links = doc_soup.doc.findAll(\n lambda tag: tag.name == \"a\" and \"href\" in tag.attrs)[:]\n for a in doc_soup.doc.findAll(\n lambda tag: tag.name == \"a\" and \"href\" in tag.attrs):\n a.replaceWith(\" {} \".format(LINK_CHANGE_TOKEN))\n document = doc_soup.doc.text\n sentences = sent_tokenize(document)\n\n for sentence in sentences:\n token_idx = 0\n sentence = sentence.replace(\"\\xa0\", \" \").replace(\n \"{}.\".format(TITLE_CHANGE_TOKEN), TITLE_CHANGE_TOKEN)\n for token in word_tokenize(sentence):\n if token == LINK_CHANGE_TOKEN:\n token_tag = doc_links.pop(0)\n\n try:\n entity = url_entities[uris_urls[token_tag[\"href\"]]]\n except KeyError:\n entity = None\n except BaseException as e:\n print(\"Document {} had unexpected exception for token {}: {}\".format(\n doc_url, token_idx, e), file=sys.stderr)\n entity = None\n\n token_idx = write_link_token(token_idx, entity, token_tag,\n output_file, mapping)\n elif token == TITLE_CHANGE_TOKEN:\n entity = None\n if doc_url in url_entities:\n entity = url_entities[doc_url]\n token_idx = write_title_token(token_idx, doc_title, entity,\n output_file, mapping)\n else:\n token_idx += 1\n print(\"{}\\t{}\\tO\\tO\\tO\".format(token_idx, token).encode(\"utf-8\"),\n file=output_file)\n\n print(\"\", file=output_file)",
"def extract_sentences_to_file(infile, outfname:str):\n out = open(outfname, 'x')\n\n linegen = extract_lines(infile)\n\n for line in linegen:\n out.write(line + \"\\n\")\n\n out.close()",
"def divide_url_all():\n\tf = open(\"url_all.txt\", \"r+\")\n\turl_amount = 0\n\tfile_num = 1\n\tline = f.readline()\n\tsub_f = open(\"url_\"+str(file_num)+\".txt\", \"w+\")\n\twhile(line != \"\"):\n\t\t#print (\"line : \" + line )\n\t\turl_amount += 1\n\t\tsub_f.write(line)\n\t\tif url_amount > 33999:\n\t\t\tsub_f.close()\n\t\t\turl_amount = 0\n\t\t\tfile_num += 1\n\t\t\tsub_f = open(\"url_\"+str(file_num)+\".txt\", \"w+\")\n\t\tline = f.readline()\n\tsub_f.close()\n\treturn file_num",
"def write_tok_to_file(self):\n dir_path = os.path.join(self.output_path, 'tokens')\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n for dataset_name, dataset in self.amr_corpus.items():\n f = open(os.path.join(dir_path, dataset_name + '_tok.txt'), 'w')\n for doc_name, doc in dataset.items():\n for amr_id, amr_data in doc.items():\n amr_strings = self.amr_corpus[dataset_name][doc_name][amr_id]['amr_string_triples']\n if not amr_strings:\n continue\n tok = ' '.join(self.amr_corpus[dataset_name][doc_name][amr_id]['tok'])\n f.write(tok + '\\n')\n f.close()",
"def fetch_words(url):\n\n with urlopen(url) as story:\n storyWords = []\n for line in story:\n words = line.split()\n for word in words:\n storyWords.append(word.decode('utf-8'))\n return storyWords",
"def read_urls(filename):\n # +++your code here+++\n\n res=utility(filename)\n for i in res:\n \tprint i",
"def fetch_words(filename):\n data = [] #empty list\n with urlopen(filename) as story:\n for line in story:\n words = line.decode('utf-8').split() #must decode into strings and then separate with spaces\n #print(lists)\n for word in words:\n data.append(word)\n return(data)",
"def record_to_file(url, filename):\n\tcontents = []\n\tif os.path.exists(filename):\n\t\tcontents = open(filename, \"r\").readlines()\n\ts = set(contents)\n\ts.add(\"%s\\n\" % url)\n\topen(filename, \"w\").writelines(s)",
"def index_file(self, file_name):\n self.contents = []\n article_text = \"\"\n article_annots = [] # for annot-only index\n\n f = open(file_name, \"r\")\n for line in f:\n line = line.replace(\"#redirect\", \"\")\n # ------ Reaches the end tag for an article ---------\n if re.search(r'</doc>', line):\n # ignores null titles\n if wiki_uri is None:\n print \"\\tINFO: Null Wikipedia title!\"\n # ignores disambiguation pages\n elif (wiki_uri.endswith(\"(disambiguation)>\")) or \\\n ((len(article_text) < 200) and (\"may refer to:\" in article_text)):\n print \"\\tINFO: disambiguation page \" + wiki_uri + \" ignored!\"\n # ignores list pages\n elif (wiki_uri.startswith(\"<wikipedia:List_of\")) or (wiki_uri.startswith(\"<wikipedia:Table_of\")):\n print \"\\tINFO: List page \" + wiki_uri + \" ignored!\"\n # adds the document to the index\n else:\n self.__add_to_contents(Lucene.FIELDNAME_ID, wiki_uri, Lucene.FIELDTYPE_ID)\n if self.annot_only:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_annots, Lucene.FIELDTYPE_ID_TV)\n else:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_text, Lucene.FIELDTYPE_TEXT_TVP)\n self.lucene.add_document(self.contents)\n self.contents = []\n article_text = \"\"\n article_annots = []\n\n # ------ Process other lines of article ---------\n tag_iter = list(self.tagRE.finditer(line))\n # adds line to content if there is no annotation\n if len(tag_iter) == 0:\n article_text += line\n continue\n # A tag is detected in the line\n for t in tag_iter:\n tag = t.group(3)\n if tag == \"doc\":\n doc_title = self.titleRE.search(t.group(2))\n wiki_uri = WikipediaUtils.wiki_title_to_uri(doc_title.group(1)) if doc_title else None\n if tag == \"a\":\n article_text += t.group(1) + t.group(4) # resolves annotations and replace them with mention\n # extracts only annotations\n if self.annot_only:\n link_title = self.linkRE.search(t.group(2))\n link_uri = WikipediaUtils.wiki_title_to_uri(unquote(link_title.group(1))) if link_title else None\n if link_uri is not None:\n article_annots.append(link_uri)\n else:\n print \"\\nINFO: link to the annotation not found in \" + file_name\n last_span = tag_iter[-1].span()\n article_text += line[last_span[1]:]\n f.close()",
"def create_tokens_li():\n cnt=0\n for file in docs:\n file_name = open(\"./corpus/\"+ str(file) + \".txt\")\n print(cnt)\n cnt+=1\n words = file_name.read()\n tokens_doc = nltk.word_tokenize(words)\n tokens_doc = [w.lower() for w in tokens_doc]\n #tokens_doc = [snowball_stemmer.stem(token) for token in tokens_doc]\n tokens_doc = [token for token in tokens_doc if token not in nltk.corpus.stopwords.words('english')]\n tokens_li.append(tokens_doc)\n\n\n #storing in json file\n with open('savers/tokens.json', 'w') as fp:\n json.dump(tokens_li, fp)",
"def create_words_file(source_url, words_filename, start_pt = \"\", end_pt = \"\", do_refresh = False):\n # if it already exists, and no need to refresh it, then\n # just do nothing\n if(os.path.exists(words_filename)\n and os.path.getsize(words_filename) > 1000\n and do_refresh == False\n ):\n return\n # else, re-download, and re-make\n logger.info(\"Downloading text from %s\" % source_url)\n text = requests.get(source_url).text.upper()\n a = text.index(start_pt)\n b = text.index(end_pt)\n words = re.findall(\"[A-Z]+\", text[a:b])\n wordcounts = {}\n for word in words:\n if word not in wordcounts:\n wordcounts[word] = 1\n else:\n wordcounts[word] += 1\n\n with open(words_filename, \"w\") as f:\n for word, x in sorted(wordcounts.items()):\n f.write('%s,%s\\n' % (word, x))\n logger.info(\"%s is %s bytes\" % (words_filename, os.path.getsize(words_filename)))\n # return the path to the file\n return words_filename",
"def load_words(filename):\n url = codeskulptor.file2url(filename)\n word_file = urllib2.urlopen(url)\n \n all_words = []\n for line in word_file.readlines():\n all_words.append(line.strip())\n \n \n return all_words",
"def downloadSchemaFiles(self, outputFile, url):\n thingsFileFromUrl = urllib.request.urlopen(url)\n data = thingsFileFromUrl.read()\n with open(outputFile, 'w+') as output:\n output.write(data.decode('utf-8'))\n return outputFile",
"def tokenize_with_sentencepiece(in_file, out_file, sp_model_filename):\n sp = sentencepiece.SentencePieceProcessor()\n sp.load(sp_model_filename)\n packer = msgpack.Packer()\n for line in in_file:\n ids = sp.encode_as_ids(line.rstrip())\n out_file.write(packer.pack(ids))"
] |
[
"0.62645054",
"0.58563197",
"0.5821562",
"0.57902795",
"0.57028097",
"0.56542486",
"0.56383413",
"0.56162834",
"0.55391157",
"0.55385387",
"0.54912",
"0.5453684",
"0.54283243",
"0.53994155",
"0.5358935",
"0.53502476",
"0.53414875",
"0.5339555",
"0.5327423",
"0.5316142",
"0.53147036",
"0.53058076",
"0.52684015",
"0.5260783",
"0.52517736",
"0.52243763",
"0.51919425",
"0.5189383",
"0.5188894",
"0.51863885"
] |
0.65715915
|
0
|
The main program takes in two arguments input_file_path(a txt file) and output_file_path(a kml file) and converts the txt file to kml file
|
def main():
input_file_path = sys.argv[1]
output_file_path = sys.argv[2]
gps_df = create_df(input_file_path) # creates a data frame
gps_df = clean_data(gps_df) # cleans the data
print('Cleaning done')
write_to_kml(gps_df, output_file_path) # writes to kml file
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def main(input_filepath, output_model_filepath):\n logger = logging.getLogger(__name__)\n logger.info('training hotel cluster embeddings models')\n\n input_file = os.path.join(input_filepath, 'sentences.pkl')\n output_model_file = os.path.join(output_model_filepath, 'hotelcluster2vec.bin')\n\n train(input_file, output_model_file)",
"def tdump2kml(inputDir):\n # Check inputdir\n if not os.path.exists(inputDir):\n print(\"Entered directory is invalid.\")\n sys.exit()\n\n os.chdir(inputDir)\n\n # Main loop\n for run in os.walk('.').next()[1]:\n\n os.chdir(run)\n\n # Filter tdump files\n files = glob.glob(\"*.tdump\")\n\n # Conversion\n for entry in files:\n p = subprocess.Popen(\"C:\\\\hysplit4\\\\exec\\\\trajplot.exe -i%s -o%s.ps -a3 -v1 -l1\" % \\\n (entry, entry), shell=True, stdout=subprocess.PIPE)\n p.wait()\n os.remove(entry[:-6])\n #p_out = p.communicate()\n #print p_out[0], p_out[1]\n\n # Move all kmls into dir kmls\n #sys.stdout.flush()\n kmls = glob.glob(\"*.kml\")\n\n if not os.path.exists(\"kmls\"):\n os.makedirs(\"kmls\")\n\n for kml in kmls:\n os.rename(kml, \"kmls\\\\%s\" % kml)\n\n # Remove redundant ps files\n pss = glob.glob(\"*.ps\")\n\n for ps in pss:\n os.remove(ps)\n\n print \"DONE : %s %s\\kmls\" % (run, os.getcwd())\n os.chdir('../')",
"def open_airspace_format_2_kml(self, source_file_txt):\n # load template for kml file\n self.load_kml_template(self.full_path_kml_template)\n # load airspace source\n self.load_airspace_open_air_format(source_file_txt)\n\n self.kml_lines = self.kml_template['header']\n self.kml_lines.extend(self.kml_template['good_subdivided']['head'])\n # collect all A and B kml lines\n kml_A = []\n kml_B = []\n # transform airspaces and attach to A and B collect-lists\n for airspace in self.airspaces:\n airspace.make_kml_format(self.kml_template)\n if airspace.as_type == 'A':\n kml_A.extend(airspace.kml_lines)\n if airspace.as_type == 'B':\n kml_B.extend(airspace.kml_lines)\n\n self.kml_lines.extend(kml_A)\n self.kml_lines.extend(self.kml_template['good_subdivided']['tail'])\n # start B part\n self.kml_lines.extend(self.kml_template['bad_subdivided']['head'])\n self.kml_lines.extend(kml_B)\n self.kml_lines.extend(self.kml_template['bad_subdivided']['tail'])\n\n full_path_kml = source_file_txt[:-4] + '_converted.kml'\n # uisave dialog\n full_path_kml = filesavebox(default=full_path_kml, filetypes=\"*.kml\")\n if full_path_kml is None:\n print('Airspace conversion was aborted by the user')\n quit()\n\n # write to file\n f = open(full_path_kml, 'w')\n f.writelines(self.kml_lines)\n f.close()\n print('Resulting KML files was saved to: %s' % full_path_kml)",
"def write(file_path, kml_str):\n\n fa.text_writer(file_path, kml_str)",
"def keyholemarkup2x(file,output='df'):\n r = re.compile(r'(?<=\\.)km+[lz]?',re.I)\n try:\n extension = r.search(file).group(0) #(re.findall(r'(?<=\\.)[\\w]+',file))[-1]\n \n \n except IOError as e:\n logging.error(\"I/O error {0}\".format(e))\n if (extension.lower()=='kml') is True:\n buffer = file\n elif (extension.lower()=='kmz') is True:\n kmz = ZipFile(file, 'r')\n \n vmatch = np.vectorize(lambda x:bool(r.search(x)))\n A = np.array(kmz.namelist())\n sel = vmatch(A)\n buffer = kmz.open(A[sel][0],'r')\n \n else:\n raise ValueError('Incorrect file format entered. Please provide the '\n 'path to a valid KML or KMZ file.') \n \n \n parser = xml.sax.make_parser()\n handler = PlacemarkHandler()\n parser.setContentHandler(handler)\n parser.parse(buffer)\n \n try:\n kmz.close()\n except:\n pass\n \n df = pd.DataFrame(handler.mapping).T\n names = list(map(lambda x: x.lower(),df.columns))\n if 'description' in names:\n extradata = df.apply(PlacemarkHandler.htmlizer,axis=1)\n df = df.join(extradata)\n \n \n output = output.lower()\n \n if output=='df' or output=='dataframe' or output == None:\n result = df\n \n elif output=='csv':\n out_filename = file[:-3] + \"csv\"\n df.to_csv(out_filename,encoding='utf-8',sep=\"\\t\")\n result = (\"Successfully converted {0} to CSV and output to\"\n \" disk at {1}\".format(file,out_filename))\n \n elif output=='gpd' or output == 'gdf' or output=='geoframe' or output == 'geodataframe':\n try:\n import shapely\n from shapely.geometry import Polygon,LineString,Point\n except ImportError as e:\n raise ImportError('This operation requires shapely. {0}'.format(e))\n try:\n import fiona\n except ImportError as e:\n raise ImportError('This operation requires fiona. {0}'.format(e))\n try:\n import geopandas as gpd\n except ImportError as e:\n raise ImportError('This operation requires geopandas. {0}'.format(e))\n \n geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))\n result = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))\n \n \n elif output=='geojson' or output=='json':\n try:\n import shapely\n from shapely.geometry import Polygon,LineString,Point\n except ImportError as e:\n raise ImportError('This operation requires shapely. {0}'.format(e))\n try:\n import fiona\n except ImportError as e:\n raise ImportError('This operation requires fiona. {0}'.format(e))\n try:\n import geopandas as gpd\n except ImportError as e:\n raise ImportError('This operation requires geopandas. {0}'.format(e))\n try:\n import geojson\n except ImportError as e:\n raise ImportError('This operation requires geojson. {0}'.format(e))\n \n geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))\n gdf = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))\n out_filename = file[:-3] + \"geojson\"\n gdf.to_file(out_filename,driver='GeoJSON')\n validation = geojson.is_valid(geojson.load(open(out_filename)))['valid']\n if validation == 'yes':\n \n result = (\"Successfully converted {0} to GeoJSON and output to\"\n \" disk at {1}\".format(file,out_filename))\n else:\n raise ValueError('The geojson conversion did not create a '\n 'valid geojson object. Try to clean your '\n 'data or try another file.')\n \n elif output=='shapefile' or output=='shp' or output =='esri shapefile':\n try:\n import shapely\n from shapely.geometry import Polygon,LineString,Point\n except ImportError as e:\n raise ImportError('This operation requires shapely. {0}'.format(e))\n try:\n import fiona\n except ImportError as e:\n raise ImportError('This operation requires fiona. {0}'.format(e))\n \n try:\n import geopandas as gpd\n except ImportError as e:\n raise ImportError('This operation requires geopandas. {0}'.format(e))\n \n try:\n import shapefile\n except ImportError as e:\n raise ImportError('This operation requires pyshp. {0}'.format(e))\n \n \n geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))\n gdf = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))\n out_filename = file[:-3] + \"shp\"\n gdf.to_file(out_filename,driver='ESRI Shapefile')\n sf = shapefile.Reader(out_filename)\n import shapefile\n sf = shapefile.Reader(out_filename)\n if len(sf.shapes())>0:\n validation = \"yes\"\n else:\n validation = \"no\"\n if validation == 'yes':\n \n result = (\"Successfully converted {0} to Shapefile and output to\"\n \" disk at {1}\".format(file,out_filename))\n else:\n raise ValueError('The Shapefile conversion did not create a '\n 'valid shapefile object. Try to clean your '\n 'data or try another file.') \n else:\n raise ValueError('The conversion returned no data; check if'\n ' you entered a correct output file type. '\n 'Valid output types are geojson, shapefile,'\n ' csv, geodataframe, and/or pandas dataframe.')\n \n return result",
"def make_input_data_kmls(rundata):\n \n import os\n from . import topotools, dtopotools\n\n regions2kml(rundata, combined=False)\n gauges2kml(rundata)\n\n topofiles = rundata.topo_data.topofiles\n for f in topofiles:\n topo_file_name = f[-1]\n topo_type = f[0]\n topo2kml(topo_file_name, topo_type)\n \n dtopofiles = rundata.dtopo_data.dtopofiles\n for f in dtopofiles:\n dtopo_file_name = f[-1]\n dtopo_type = f[0]\n dtopo2kml(dtopo_file_name, dtopo_type)",
"def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def convert(input_filename, output_filename):\n c_file = pkg_resources.resource_filename('ShapelyChipDesigns', 'convert.rb')\n os.system('klayout -z -rd input='+input_filename+' -rd output='+output_filename+' -r '+c_file)",
"def main():\n args = utils.read_arguments(__doc__)\n documents = []\n filenames = list(traverse_directory(args[\"input_dirpath\"],'*clean*.txt'))\n labels_dirname = args[\"labels_dirpath\"]\n labels_from_json = get_all_labels_from_json(labels_dirname)\n for filename in tqdm(filenames):\n with AnnotatedIBMFactory(filename) as instance_extractor:\n filename_key = filename.split(\"/\")[-1]\n document = instance_extractor.build_document(\n labels_from_json[filename_key])\n documents.append(document)\n utils.pickle_to_file(documents, args['output_file'])",
"def main():\n filepath = input(\"Enter the Source File: \")\n with open(filepath, encoding=\"utf-8\") as f:\n sentences = f.readlines()\n sentences = \" \".join(sentences)\n\n summary = summarize_sentences(sentences)\n\n filepath_index = filepath.find(\".txt\")\n outputpath = filepath[:filepath_index] + \"_lexRank.txt\"\n\n with open(outputpath, \"w\") as w:\n for sentence in summary:\n w.write(str(sentence) + \"\\n\")",
"def main(input_filepath: str = \"./data\",\n output_filepath: str = \"./data\") -> None:\n logger = logging.getLogger(__name__)\n logger.info(\"making final data set from raw data\")\n\n raw_data_dir = path.abspath(input_filepath)\n if path.isdir(raw_data_dir):\n\n processed_data_dir = path.abspath(output_filepath)\n\n logger.info(\"start\")\n filenames = [\"train.txt\", \"valid.txt\", \"test.txt\"]\n create_index(filenames, raw_data_dir, processed_data_dir)\n prepare_datasets(filenames, raw_data_dir, processed_data_dir)\n\n else:\n logger.info(\"File or directory does not exist\")\n\n logger.info(\"finished\")",
"def run():\n assert os.path.exists(args.input_path), \"input_path doesn't exist\"\n assert os.path.exists(args.output_path), \"output_path doesn't exist\"\n\n # read all the paths to the input documents\n doc_files = []\n for root, dirs, files in os.walk(args.input_path):\n for file in files:\n if not file.endswith('gz') and not file.endswith('xml'):\n continue\n doc_files.append(os.path.join(root, file))\n print('{} medline files found from {}'\n ''.format(len(doc_files), args.input_path))\n\n print('converting...')\n pool = Pool(processes=args.num_workers)\n total_doc = 0\n total_batch = 0\n total_empty = 0\n for d, b, n in tqdm(pool.imap_unordered(partial(convert), doc_files),\n total=len(doc_files)):\n total_doc += d\n total_batch += b\n total_empty += n\n\n print('total docs: {}, total batches: {} created (empty doc {})'\n ''.format(total_doc, total_batch, total_empty))",
"def predictFromFile(repoClassifier, strFileInput):\n #Checks file exists and txt file\n if os.path.exists(strFileInput) & string_operation.validate_txtfile(strFileInput):\n file = open(strFileInput, 'r')\n\n strReadFileDirectory = os.path.dirname(strFileInput)\n strReadFileName = os.path.basename(strFileInput)\n\n print(strReadFileName + 'was read successfully')\n strFileClassified = \"classified_\" + strReadFileName\n\n writeClassifiedTxtFile(file, strReadFileDirectory, strFileClassified, repoClassifier)\n else:\n print(\"File could no be read. Make sure you have permission or entered correct File (txt)\")",
"def kmlWriter(output_data, output_dir, output_name):\n msg = 'Writing ' + output_name + ' KML output.'\n print '[+]', msg\n logging.info(msg)\n # Instantiate a Kml object and pass along the output filename\n kml = simplekml.Kml(name=output_name)\n for exif in output_data:\n if 'Latitude' in exif.keys() and 'Latitude Reference' in exif.keys() and 'Longitude Reference' in exif.keys() and 'Longitude' in exif.keys():\n\n if 'Original Date' in exif.keys():\n dt = exif['Original Date']\n else:\n dt = 'N/A'\n\n if exif['Latitude Reference'] == 'S':\n latitude = '-' + exif['Latitude']\n else:\n latitude = exif['Latitude']\n\n if exif['Longitude Reference'] == 'W':\n longitude = '-' + exif['Longitude']\n else:\n longitude = exif['Longitude']\n\n kml.newpoint(name=exif['Name'], description='Originally Created: ' + dt,\n coords=[(longitude, latitude)])\n else:\n pass\n kml.save(os.path.join(output_dir, output_name))",
"def main(args):\n bad_words_file = codecs.open(args.language + \"/feature_files/bad_words\", \"r\", \"utf-8\").readlines()\n bad_words = read_known_words(bad_words_file)\n \n good_words_file = codecs.open(args.language + \"/feature_files/good_words\", \"r\", \"utf-8\").readlines()\n good_words = read_known_words(good_words_file)\n\n curse_words_file = codecs.open(args.language + \"/feature_files/curse_words\", \"r\", \"utf-8\").readlines()\n curse_words = read_known_words(curse_words_file)\n\n prepositions_file = codecs.open(args.language + \"/feature_files/prepositions\", \"r\", \"utf-8\").readlines()\n prepositions = read_known_words(prepositions_file)\n\n determiners_file = codecs.open(args.language + \"/feature_files/determiners\", \"r\", \"utf-8\").readlines()\n determiners = read_known_words(determiners_file)\n\n syllables_file = codecs.open(args.language + \"/feature_files/syllables\", \"r\", \"utf-8\").readlines()\n syllable_structure = read_syllables_file(syllables_file)\n\n other_feature_files = glob.glob(args.language + \"/feature_files/*.txt\")\n other_features = set_features_from_files(other_feature_files)\n \n ermaObj = ConllToErma(args, bad_words, good_words, curse_words, prepositions, \\\n determiners, syllable_structure, other_features)\n\n if not args.just_test:\n # Input training file.\n train_id = open(args.train, \"r\")\n train = train_id.readlines()\n train_id.close()\n sys.stdout.write(\"Reading training file...\\n\")\n (train_features, train_skip_chains) = ermaObj.read_conll_file(train)\n sys.stdout.write(\"Building model...\\n\")\n train_hash = ermaObj.make_nodes(train_features)\n # Freeze the known features based on what's seen in the training data\n ermaObj.cutoff_features()\n else:\n train_hash = {}\n train_skip_chains = {}\n # Input testing file.\n test_id = open(args.test, \"r\")\n test = test_id.readlines()\n test_id.close()\n sys.stdout.write(\"Reading test file...\\n\")\n (test_features, test_skip_chains) = ermaObj.read_conll_file(test)\n sys.stdout.write(\"Building model...\\n\")\n test_hash = ermaObj.make_nodes(test_features, test=True)\n ermaObj.write_out(train_hash, train_skip_chains, test_hash, test_skip_chains)",
"def main(argv):\n logging.basicConfig(level=logging.INFO)\n try:\n opts, args = getopt.getopt(argv, \"m:i:o:\", [\"model_path=\", \"input_file_path=\", \"output_file_path=\"])\n except getopt.GetoptError:\n logging.error('wrong command line param')\n sys.exit(2)\n\n model_path = ''\n input_file_path = ''\n output_file_path = ''\n for opt, arg in opts:\n if opt in (\"-m\", \"--model_path\"):\n logging.info(f\"Arg Supplied: {arg}\")\n model_path = arg\n elif opt in (\"-i\", \"--input_file_path\"):\n logging.info(f\"Arg Supplied: {arg}\")\n input_file_path = arg\n elif opt in (\"-o\", \"--output_file_path\"):\n logging.info(f\"Arg Supplied: {arg}\")\n output_file_path = arg\n else:\n logging.error('Wrong param.')\n sys.exit(2)\n\n logging.info(f\"Model path provided: {model_path}\")\n logging.info(f\"Input file: {input_file_path}\")\n logging.info(f\"Output file: {output_file_path}\")\n\n translator = ctranslate2.Translator(model_path)\n output = translator.translate_batch(\n source=get_input_sentence_list(input_file_path),\n beam_size=4,\n num_hypotheses=1,\n return_scores=True\n )\n output_sentences = []\n output_scores = []\n for ele in output:\n sentence = \" \".join(ele[0]['tokens'])\n # print(sentence)\n score = str(ele[0]['score'])\n # formatted_output = f\"{sentence}|{score}\"\n # print(formatted_output)\n output_sentences.append(sentence)\n output_scores.append(score)\n\n # write output (sentences and scores, index should be preserved).\n output_file = open(output_file_path, \"w\", encoding=\"utf-8\")\n output_file.write(\"\\n\".join(output_sentences))\n output_score_file = open(output_file_path.replace(\".txt\", \"_score_only.txt\"), \"w\", encoding=\"utf-8\")\n output_score_file.write(\"\\n\".join(output_scores))\n\n # be a good citizen and close file after write :)\n output_score_file.close()\n output_file.close()",
"def main(file):\n\n # Get the current working directory.\n here = os.getcwd()\n #Need the file_name to set globe, so that other functions can access to it.\n global file_name\n # Spite the Input into file_path and file_name.\n file_path = spilt_path(file)[0]\n file_name = spilt_path(file)[1]\n\n # Try to get into the file_path, if exist\n try:\n os.chdir(file_path)\n except IOError, e:\n print e\n\n # Now convert it\n convertFile(file_name)\n # going back to orgin folder\n os.chdir(here)\n return os.path.join(output_dir, file_name)",
"def main():\n\n #Initiate argument parser\n parser = argparse.ArgumentParser(\n description=\"LabelMe TensorFlow XML-to-CSV converter\"\n )\n parser.add_argument(\n \"-c\",\n \"--csvInput\",\n help=\"Path to the labels.csv file\",\n type=str,\n )\n\n parser.add_argument(\n \"-l\",\n \"--labelMap\",\n help=\"Path to the label_map.pbtxt file\",\n type=str,\n )\n\n parser.add_argument(\n \"-i\",\n \"--images\",\n help=\"Path to image folder\",\n type=str,\n )\n\n parser.add_argument(\n \"-o\",\n \"--outputFile\",\n help=\"Path to output TFRecord file\",\n type=str\n )\n\n args = parser.parse_args()\n\n #If no input args are given use current working directory\n if args.csvInput is None:\n args.csvInput = os.getcwd() + \"/labels.csv\"\n if args.labelMap is None:\n args.labelMap = os.getcwd() + \"/label_map.pbtxt\"\n if args.images is None:\n args.images = os.getcwd()\n if args.outputFile is None:\n args.outputFile = os.getcwd() + \"/train.record\"\n\n #check if input paths exists\n assert os.path.isdir(args.images)\n assert os.path.isfile(args.csvInput)\n assert os.path.isfile(args.labelMap)\n\n #Initiate TFRecordWriter\n writer = tf.io.TFRecordWriter(args.outputFile)\n \n #Read labels from .csv into pd dataframe\n labels = pd.read_csv(args.csvInput)\n\n #Load the `label_map` from pbtxt file.\n label_map = label_map_util.load_labelmap(args.labelMap)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=90, use_display_name=True\n )\n category_index = label_map_util.create_category_index(categories)\n label_map = {} #Dict resolving class name to class id\n for k, v in category_index.items():\n label_map[v.get(\"name\")] = v.get(\"id\")\n\n #Group labels dataframe by filename\n grouped = split(labels, \"filename\")\n\n #for each filename\n for group in grouped:\n #create a tf_example for each image including all labels\n tf_example = create_tf_example(group, args.images, label_map)\n writer.write(tf_example.SerializeToString())\n\n #Close TFRecordWriter and save to file\n writer.close()\n output_path = os.path.join(os.getcwd(), args.outputFile)\n print(\"Successfully created the TFRecords: {}\".format(args.outputFile))",
"def transform_file(self, input_fn, overwrite=False):\r\n out_fn = input_fn + '.dk'\r\n if not os.path.exists(out_fn) or \\\r\n os.stat(out_fn).st_size == 0 or overwrite:\r\n\r\n with open(out_fn, 'w') as fout:\r\n for line in open(input_fn):\r\n LL = line.split('\\t')\r\n if len(LL) == 3:\r\n entry0 = self.transform(LL[0])\r\n entry1 = self.transform(LL[1])\r\n fout.write(entry0 + '\\t' + entry1 + '\\t' + LL[2])\r\n return out_fn",
"def process(text, output_dir, file_name, json_output):\n\t\n\t# Process HTML\n\tprocessed_text_html = process_html(text)\n\t# Write processed HTML output \n\t#pre_proc.create_text_file(output_dir + \"/html_\" + file_name + \"_pre.html\", processed_text_html)\n\n\t# Convert HMTL to MD\n\ttext_md = pre_proc.extract_text_md(processed_text_html)\n\n\t# Process MD\n\tprocessed_text_md = process_md(text_md)\n\t\n\tif(json_output):\n\t\t# Convert MD to JSON\n\t\tprocessed_json = pre_proc.convert_md_to_json(processed_text_md, file_name)\n\t\t# Write processed JSON output \n\t\tpre_proc.create_binary_file(output_dir + \"/\" + file_name + \".json\", processed_json)\n\telse:\n\t\t# Write processed MD output \n\t\tpre_proc.create_text_file(output_dir + \"/\" + file_name + \".md\", processed_text_md)",
"def main(nlp, file_path, final_file_path, from_line=0, to_line=None):\n with open(final_file_path, \"w\") as parsed_file:\n with open(file_path) as cnn_dm:\n line = cnn_dm.readline().strip()\n article_idx = 0\n while article_idx < from_line:\n line = cnn_dm.readline().strip()\n article_idx += 1\n if to_line is None:\n while line is not None and line != '':\n process_line(nlp, line, parsed_file)\n article_idx += 1\n print(\"{} articles processed from file {}\".format(article_idx, file_path))\n line = cnn_dm.readline().strip()\n else:\n while article_idx < to_line and line is not None and line != '':\n process_line(nlp, line, parsed_file)\n article_idx += 1\n print(\"{}th article processed from file {}\".format(article_idx, file_path))\n line = cnn_dm.readline().strip()",
"def process(self):\n # Opening and preprocessing of the input file\n if self.options.mbtiles_fromdisk or self.options.mbtiles_todisk:\n if self.options.mbtiles_fromdisk:\n i_parm=10\n if self.options.mbtiles_todisk:\n i_parm=11\n if self.options.verbose:\n print \"GDAL2MbTiles :mbtiles from/to disk [\",i_parm,\"] mbtiles_fromdisk[\",self.options.mbtiles_fromdisk,\"] mbtiles_todisk[\",self.options.mbtiles_todisk,\"]\"\n self.mbtiles_setup(i_parm)\n return\n else:\n if self.options.verbose:\n print \"GDAL2MbTiles :tile creation mbtiles[\",self.options.mbtiles,\"]\"\n self.open_input()\n # Generation of main metadata files and HTML viewers\n self.generate_metadata()\n # Generation of the lowest tiles\n self.generate_base_tiles()\n # Generation of the overview tiles (higher in the pyramid)\n self.generate_overview_tiles()\n # Generating of KML\n self.generate_kml()",
"def main(input_filepath, output_filepath):\n\n logging.info(\"reading %s\", input_filepath)\n train_test = pd.read_hdf(input_filepath, 'train_test')\n meta = pd.read_hdf(input_filepath, 'meta')\n meta_org = pd.read_hdf(input_filepath, 'meta_org')\n\n sel_series = train_test[train_test.entry_type.isin(['train', 'cold_start'])]\\\n ['series_id'].unique()\n train_series, validate_series = train_test_split(sel_series, random_state=1)\n\n logging.info(\"calc train_test\")\n train_test = calc_final_features(train_test, meta, meta_org=meta_org, verbose=True)\n\n sel = train_test[train_test.entry_type.isin(['train', 'cold_start'])]\n train = sel[sel.series_id.isin(train_series)]\n validate = sel[sel.series_id.isin(validate_series)]\n test = train_test[train_test.entry_type.isin(['test'])]\n\n logging.info(\"writing %s\", output_filepath)\n train.to_hdf(output_filepath, \"train\", mode=\"w\")\n validate.to_hdf(output_filepath, \"validate\", mode=\"a\")\n test.to_hdf(output_filepath, \"test\", mode=\"a\")\n for k in ['meta', 'submission']:\n df = pd.read_hdf(input_filepath, k)\n df.to_hdf(output_filepath, k, mode=\"a\")",
"def importKML(filepath):\n\tf = open(filepath, 'r')\n\tstr = f.read()\n\treturn etree.fromstring(str)",
"def main(training_file_name):\n attribute, inverse = build_classifier(training_file_name)\n trained_file = open(TRAINED_FILE_NAME, mode='w')\n prolog(trained_file)\n write_body(trained_file, attribute, inverse)\n epilog(trained_file)",
"def convert_to_kbest_format(infname,outfname,k_str):\n k=int(k_str)\n with codecs.open(outfname,'w','utf-8') as outfile:\n for sent_no, parsed_lines in iterate_nbest_list(infname): \n for i in xrange(0,k): \n outfile.write( u'{} ||| {} ||| {} ||| {}\\n'.format( *parsed_lines[i] ) )",
"def main():\n\tfl = '/home/rupesh20/ProjectFinal/IITB/conv.txt'\n\tPconv = '/home/rupesh20/ProjectFinal/IITB/prevconv.txt'\n filename = '/home/rupesh20/ProjectFinal/IITB/NEw.txt'\n outfile = '/home/rupesh20/ProjectFinal/IITB/Demo.txt'\n outputfile ='/home/rupesh20/ProjectFinal/IITB/Test.txt'\n\n\t\"\"\" \n\t\tOther comments are for debugging.\n\t\"\"\"\n #filename = '/home/rupesh20/ProjectFinal/final/en_US/en_US.blogs.txt'\n #newfilename = '/home/rupesh20/ProjectFinal/IITB/NEw.txt'\n #file = open(filename,'r')\n #newfile = open(newfilename,'w')\n\n #text = file.read()\n #file.close()\n #words = text.split()\n\t\n #new = []\n #new=lowercase(words,0,new)\n #new=url(new,0)\n #new=unwanted(new)\n #new=DictLook(new)\n\t\t\n\twith open(Pconv) as f:\n \t\tlines = f.readlines()\n\t\"\"\" \n\t\tlines : contains list of strings in english for conversion \n\t\"\"\"\n #Writer(new,newfile)\n #newfile.close()\n\n\t\"\"\" \n\t\tObjects for files are opened\n\t\"\"\"\n\ttxt = open(fl,'w')\n Tobj1=open(outputfile,'w')\n #Ttext1=Tobj1.read()\n #Tobj1.close()1\n Nobj=open(filename,'r')\n Ntext=Nobj.read()\n Nobj.close()\n Nobj1=open(outfile,'r')\n Ntext1=Nobj1.read()\n Nobj1.close()\n\t\"\"\" \n\t\tSentences are Tokenized \n\t\"\"\"\n sentences=sent_tokenize(Ntext)\n testSentences=sent_tokenize(Ntext1)\n \n Csize=0\n\t\"\"\" \n\t\tAfter Tokenize the text in sentences \n\t\tWe have calculated the Ngram \n\t\t1,2 3,4,5 grams\n\t\tGram1 list contains uni-gram\n\t\t||ly Gram2,3,4,5 contains further grams\n\t\t\n\t\"\"\"\n for sent in sentences:\n for j in xrange(1,6):\n Csize= Ngram(sent,j,Csize)\n print Csize \n FreqCount(Gram)\n for sent in testSentences:\n TestNgram(sent,5)\n #print Gram5\n #print Sliced(x)\n \n\t\"\"\"\" \n\t\tCalcScore : function to ecaluate the score using the \n\t\t\t stupid backoff algo\n\t\toutput file : generated after the calcscore module , sent : Score\n\t\t\t\n\t\"\"\"\n\tCalcScore(Csize,Tobj1)\n\n\t\"\"\" \n\t\tProcesscontent : module is for Alignment, S-O-V \n\t\t\t\tdetermization.\n\t\n\t\"\"\"\n\n\t\"\"\" \n\t\tbelow code translate, del list[:]( delete whole list )\n\t\n\t\"\"\"\n\tfor line in lines:\n\t\ttemp = []\n\t\ttemp.append(line)\t\n\t\tprocessContent(temp)\n\t\tfor i in Subj:\n\t\t\tBilingDict(txt,i)\n\t\tfor i in obj:\n\t\t\tBilingDict(txt,i)\n\t\tfor i in verb:\n\t\t\tBilingDict(txt,i)\t\n\t\tdel Subj[:]\n\t\tdel verb[:]\n\t\tdel obj[:]\n\t\t\n\t\ttxt.write('\\n')\n\n\t#print Subj[0] \n\t#print verb\n\t#print obj\n\t\"\"\" \n\t\tBilingdict : module contain JSON lookup table.\n\t\t\t\tgenrates a file with HINDI text.\n\n\t\"\"\"\n\t\t\t \n\t#print StupidBackoff(x,Sliced(x),Csize)\n #print CountGram1\n #print CountGram2\n #print CountGram3\n #print CountGram4\n #freq.plot(50)",
"def process(kml_file, kmz=False):\n\ttry:\n\t\tif kmz:\n\t\t\tzipped = zipfile.ZipFile(kml_file)\n\t\t\tkml = Kml(zipped.open('doc.kml'))\n\t\telse: \n\t\t\tkml = Kml(open(kml_file))\n\texcept Exception as e:\n\t\tprint('Failed for %s: %s' % (kml_file, e))\n\telse:\n\t\tprint('FILE NAME: %s' % kml_file)\n\t\tif not is_duplicate(kml.as_dict(), collection): \n\t\t\t# try to update database AND\n\t\t\t# extract files to right place; if one\n\t\t\t# fails, undo the other:\t\n\t\t\ttry:\n\t\t\t\tcollection.insert_one(kml.as_dict())\n\t\t\texcept Exception as e:\n\t\t\t\tprint('Failed to update database with %s: %s' % (kml, e))\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tdest = 'static/kml/%s' % kml.uid\n\t\t\t\t\tif kmz:\n\t\t\t\t\t\tzipped.extractall(dest)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif not os.path.exists(os.path.dirname(dest)): os.makedirs(os.path.dirname(dest))\n\t\t\t\t\t\tshutil.copy(kml_file, '%s/doc.kml' % dest)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint('Failed to extract files: %s\\n\\tTrying to remove record from database...' % e)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcollection.remove(kml.as_json())\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint('Failed to remove item from database -- db is no longer consistent w/ file system: %s' % e)\n\tfinally:\n\t\tif kmz:\n\t\t\tzipped.close()\n\t\telse:\n\t\t\tkml.close()",
"def main():\n grammar_file = sys.argv[1]\n cfg_grammar = nltk.data.load(grammar_file)\n\n sentence_file = sys.argv[2]\n sentences = open(sentence_file, \"r\")\n sentences = sentences.readlines()\n\n output_file = sys.argv[3]\n with open(output_file, \"w\") as f:\n\n parser = PCKY(cfg_grammar)\n\n for sentence in sentences:\n tree = parser.parse(sentence)\n print(tree, file=f)"
] |
[
"0.6632069",
"0.6444081",
"0.6348496",
"0.6317812",
"0.6302665",
"0.6273071",
"0.6177123",
"0.6177123",
"0.61510634",
"0.6150506",
"0.6132754",
"0.6043515",
"0.58791876",
"0.5868927",
"0.58523566",
"0.5841822",
"0.5827696",
"0.5798412",
"0.57972443",
"0.5789804",
"0.5754245",
"0.572676",
"0.57136804",
"0.56919074",
"0.5687537",
"0.5661376",
"0.56288326",
"0.5613655",
"0.5591287",
"0.55858165"
] |
0.6799259
|
0
|
Reads in a txt file and creates a pandas data frame with columns representing Latitude and its direction, Longitude and its direction, quality and dilution.
|
def create_df(file_path):
columns = ['Lat', 'Lat_dir', 'Long', 'Long_dir', 'Quality', 'Dilution']
speeds = []
validity = []
rows = []
with open(file_path, encoding='utf-8', errors='ignore') as f:
for line in f.readlines()[5:]:
words = line.strip().split(",")
if len(words) == 0 or len(words) > 15:
continue
elif words[0] == "$GPGGA": # if line starts with GPGGA store lat, long, quality and dilution of precision
if len(rows) == 0:
row = [words[2], words[3], words[4], words[5], words[6], words[8]]
speeds.append(np.nan)
validity.append(np.nan)
rows.append(row)
else:
if rows[len(rows) - 1][0] is np.nan:
row = [words[2], words[3], words[4], words[5], words[6], words[8]]
rows[len(rows) - 1] = row
else:
row = [words[2], words[3], words[4], words[5], words[6], words[8]]
speeds.append(np.nan)
validity.append(np.nan)
rows.append(row)
elif words[0] == "$GPRMC": # if lines start with GPRMC store speed and validity
if len(rows) == 0:
row = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
speeds.append(float(words[7]) * 1.15078)
validity.append(words[2])
rows.append(row)
else:
if speeds[len(speeds) - 1] is np.nan:
speeds[len(speeds) - 1] = float(words[7]) * 1.15078
validity[len(speeds) - 1] = words[2]
else:
row = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
speeds.append(float(words[7]) * 1.15078)
validity.append(words[2])
rows.append(row)
else:
continue
gps_df = pd.DataFrame(rows, columns=columns)
gps_df['Speed'] = speeds # combine both speed and validity back to dataframe
gps_df['Validity'] = validity
return gps_df
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def txt_to_dataframe(folder,name_parcellation):\n column_weight = ['patients','degree', 'density', 'global_efficiency', 'transitivity', 'assortavity', 'clustering_coef',\n 'fiedler_value', 'small_worldness','Null']\n\n file_name=folder+name_parcellation+'.txt'\n data=pd.read_csv(file_name,header=None,delimiter=';')\n data.columns=column_weight\n data=data.drop(['Null'],axis=1)\n file_len=folder+name_parcellation+'_len.txt'\n data_len=only_connected_patients(file_len)\n data_len=data_len.values\n data['length']=data_len\n data=data[data['length']>-1.0]\n data=data.reset_index(drop=True)\n return data",
"def read_metadata_txt(path):\n df = pd.read_csv(path,\n sep='\\s+', # Fields are separated by one or more spaces\n usecols=[0, 1, 2, 3, 4], # Grab only the first 4 columns\n # Missing elevation is noted as -999.9\n na_values=[-999.9],\n header=None,\n names=['station_id', 'latitude', 'longitude', 'elevation', 'state'])\n return df",
"def build_dataframe(textline):\n column_names = []\n records = [line.split(u',') for line in textline]\n records = [pd.np.nan if token in (u'\\\\N', 'NULL') else token for token in records]\n # df_line = pd.read_csv(textline, header=None, names=column_names)\n df = pd.DataFrame(records, columns=column_names)\n df = df.convert_objects(convert_numeric=True)\n df.set_index('msisdn', inplace=True)\n print('-----', df.dtypes)\n return df",
"def load_vo_txt_raw(*, fname, sampling='1M'):\n # Set the day of month for time series depending on the MF sampling rate\n if sampling == '1M':\n day = 15\n elif sampling == '4M':\n day = 1\n\n # Positions given in degrees - co-latitude (0 to 180), longitude (\n df = pd.read_csv(fname, sep=\"\\s+\", header=14,\n names=[\"theta\", \"phi\", \"Year\", \"Month\", \"Time\", \"r\",\n \"Br\", \"Btheta\", \"Y\", \"sigma_r\", \"sigma_theta\",\n \"sigma_phi\", \"N_data\"], usecols=range(13))\n\n df[\"mjd2000\"] = mjd2000(df[\"Year\"], df[\"Month\"], day)\n df[\"dyear\"] = mjd_to_dyear(df[\"mjd2000\"], leap_year=True)\n df[\"X\"] = -df[\"Btheta\"] # -theta component\n df[\"Z\"] = -df[\"Br\"] # -radial component\n df.drop(columns=[\"Btheta\", \"Br\"], inplace=True)\n # To 00:00 on 1st or 15th each month\n # Multiplication by 10000 and 100 are needed to convert to datetime\n # (see documentation for pandas.datetime)\n df[\"date\"] = pd.to_datetime(df[\"Year\"]*10000+df[\"Month\"]*100+day,\n format=\"%Y%m%d\")\n\n return df",
"def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")",
"def load_data(txt_path: str = RAW_TXT) -> pd.DataFrame:\n df = pd.read_csv(txt_path)[INDICES]\n return df",
"def _parse_textfile(self):\n\n field_names = list(self.FIELD_NAME_TO_INDEX.keys())\n field_indices = list(self.FIELD_NAME_TO_INDEX.values())\n frame = pd.read_csv(\n self.filepath,\n header=None, # MAGIC file has no header line\n delimiter=self.DELIMITER,\n usecols=field_indices,\n names=field_names,\n converters=self.FIELD_CONVERTERS,\n )\n return frame",
"def create_df(filename):\n data = pd.read_csv(filename)\n data = data.dropna(axis='index')\n data['inc_angle'] = np.radians(data['inc_angle'])\n data = data.astype('float64')\n data = data[data['inc_angle'] <= np.deg2rad(80)]\n return data",
"def _get_position_data(file):\n return pd.read_csv(file)",
"def _read_txt(self, expected_col_names):\n\n try:\n # Read data\n data = pd.read_csv(self.source)\n\n # Check number of columns\n if data.shape[1] != len(expected_col_names):\n raise ValueError(\n \"Unexpected number of columns. Expected {}.\".format(\n len(expected_col_names)))\n # Check column names\n for item in data.columns:\n if item not in expected_col_names:\n raise ValueError(\"Unexpected column name. Expected:{}\"\\\n .format(expected_col_names))\n\n # Convert data\n for column in data.columns:\n data[column] = pd.to_numeric(data[column])\n\n # Generate output\n if self.coordinate_system == CoordinateSystem.GEOGRAPHIC:\n def generate_utm(row):\n return UtmCoordinate.create_from_geographic(\n row['latitude'],\n row['longitude'],\n row['elevation'])\n data['UTM'] = data.apply(generate_utm, axis=1)\n data['easting'] = data.apply(lambda row: row['UTM'].easting,\n axis=1)\n data['northing'] = data.apply(lambda row: row['UTM'].northing,\n axis=1)\n data['x'] = data['easting'] - data['easting'].min()\n data['y'] = data['northing'] - data['northing'].min()\n data['z'] = data['elevation'] - data['elevation'].min()\n\n elif self.coordinate_system == CoordinateSystem.UTM:\n data['x'] = data['easting'] - data['easting'].min()\n data['y'] = data['northing'] - data['northing'].min()\n data['z'] = data['elevation'] - data['elevation'].min()\n\n elif self.coordinate_system == CoordinateSystem.CARTESIAN:\n data['elevation'] = data['z'] # keeping return values consitent\n data['z'] = data['elevation'] - data['elevation'].min()\n\n else:\n raise ValueError('Unknown coordinate system.')\n\n selection = ['x', 'y', 'z', 'elevation']\n return data[selection]\n except Exception as exception:\n raise exception",
"def uadb_ascii_to_dataframe(file=''): \n \n if debug:\n print(\"Running uadb_ascii_to_dataframe for: \", file) \n \n data = check_read_file(file=file, read=True) # TODO\n \n #source_file = [l for l in file.split('/') if '.txt' in l][0]\n\n nmiss = 0\n search_h = False \n read_data = []\n \n usi,idate, usi, lat, lon, lat, stype, press, gph, temp, rh, wdir, wspd = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n\n #usi,idate, usi, lat, lon, lat, stype, press, gph, temp, rh, wdir, wspd, iday, ident, numlev= 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n obs_id = 0\n stations_id = [] \n \n for i, line in enumerate(data):\n if line[0] == 'H':\n try:\n # Header\n usi = int(line[2:14]) # unique station identifier\n \n ident = int(line[15:21].replace(' ',''))# WMO\n if ident not in stations_id:\n stations_id.append(ident)\n \n #if len(ident) == 4:\n # ident = '0' + ident \n #idflag = int(line[22:24]) # id flag\n #d_src = int(line[25:28]) # source dataset\n #version = float(line[29:34]) # version\n #dateflag = int(line[35:37]) # date flag\n year = line[38:42] # year\n month = \"%02d\" % int(line[43:45])\n day = \"%02d\" % int(line[46:48])\n hour = line[49:53]\n #locflag = int(line[54:56]) # Location Flag\n lat = float(line[57:67])\n lon = float(line[68:78])\n #ele = float(line[79:85])\n #stype = int(line[86:88])\n numlev = int(line[89:93])\n #pvers = line[94:102]\n\n if '99' in hour:\n hour = hour.replace('99', '00')\n \n if '99' in day:\n search_h = True\n continue\n \n minutes = int(hour) % 100 \n hour = \"%02d\" % (int(hour) // 100)\n if minutes > 60 or minutes < 0:\n minutes = 0\n minutes = \"%02d\" % minutes\n idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n iday = int(year + month + day)\n #pday = int(day)\n search_h = False\n\n except Exception as e:\n #print(\"Error: \", i, line, repr(e), \"Skipping Block:\")\n search_h = True\n #iprev = i\n\n elif search_h:\n nmiss += 1\n continue # Skipping block\n\n else:\n # Data\n #ltyp = int(line[0:4])\n p = float(line[5:13])\n \n if p != -99999.0 and p != 9999.9: \n press = float(line[5:13])*100 # converting to Pa, since P is given in mb (1 mb = 1 hPa) \n else:\n press = np.nan \n \n gph = float(line[14:22]) # gph [m]\n \n if gph == -999.0 or gph == -99999.00 or gph >= 99999.0:\n gph = np.nan\n \n temp = float(line[23:29])\n if temp == -999.0:\n temp = np.nan \n else:\n temp = temp + 273.15\n \n rh = float(line[30:36]) # %\n if rh == -999.0:\n rh = np.nan\n else:\n rh = rh / 100. # convert to absolute ratio TODO\n\n wdir = float(line[37:43]) \n if wdir == -999.0 or wdir == -999 :\n wdir = np.nan\n \n wspd = float(line[44:50]) # [m/s], module of the velocity\n if wspd <0 :\n wspd = np.nan \n \n try:\n \n for value,var in zip([ gph, temp, wspd, wdir, rh], [ 'gph', 'temperature', 'wind_speed', 'wind_direction', 'relative_humidity'] ):\n obs_id = obs_id +1\n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1\n z_type = 1 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n else:\n z_type = -2147483648 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n \n except:\n 0\n \n \n \n #column_names = ['source_file', 'product_code', 'report_id', 'observation_id', 'report_timestamp' , 'iday', 'station_id', 'lat@hdr', 'lon@hdr', 'vertco_reference_1@body', 'obsvalue@body', 'varno@body' , 'units', 'number_of_pressure_levels' ]\n \n df = pd.DataFrame(data= read_data, columns= column_names) \n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n #df['observations_id'] =numpy.char.zfill(numpy.arange(ivar.shape[0]).astype('S10'), 10)\n \n df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n #df['report_id'] = numpy.int64 (df['report_id'] ) \n #df['observation_id'] = numpy.int64 (df['observation_id'] ) \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n \n print('Done reading DF')\n return df , stations_id",
"def Read_Points_From_File(self, fileName):\n\n try:\n fp = open(fileName, 'r')\n\n origSys = pyproj.Proj(init=\"epsg:4326\")\n newSys = pyproj.Proj(init=\"epsg:2436\")\n\n for line, content in enumerate(fp):\n if line > 5:\n lineField = content.replace('\\n', '').split(',')\n lat = float(lineField[0])\n lon = float(lineField[1])\n #DEBUG ONLY\n #print 'lat: %f; lon: %f' % (lat, lon)\n\n x, y = pyproj.transform(origSys, newSys, lon, lat)\n # DEBUG ONLY\n #print 'x: %f; y: %f' % (x, y)\n alt = float(lineField[3])\n date = lineField[5]\n time = lineField[6]\n temp = (x, y, alt, date, time)\n self.__traectory_list.append(temp)\n\n print 'Complete Reading Trajectories.'\n\n fp.close()\n # Catch the error if the Input/Output related error found\n except IOError:\n print 'The file could not be read!'\n self.__traectory_list = []",
"def parse_data(self, path_to_file):\n\n line_dict, rel_dict = self.create_dicts(path_to_file)\n \n line_df = self.create_dataframe(line_dict, ['line'])\n rel_df = self.create_dataframe(rel_dict, ['relation'])\n\n line_df['relation'] = rel_df['relation']\n\n return (line_df, rel_df)",
"def read_from(self, filename):\n\n lon, lat, field, weight = [], [], [], []\n\n if os.path.exists(filename):\n logger.info(\"Reading data from file {0}\".format(filename))\n with open(filename, 'r') as f:\n line = f.readline()\n ncols = len(line.split())\n while ncols >= 3:\n lon.append(float(line.split()[0]))\n lat.append(float(line.split()[1]))\n field.append(float(line.split()[2]))\n if ncols >= 4:\n weight.append(float(line.split()[3]))\n else:\n weight.append(1.)\n line = f.readline()\n ncols = len(line.split())\n\n self.x = np.array(lon)\n self.y = np.array(lat)\n self.field = np.array(field)\n self.weight = np.array(weight)\n return self\n else:\n logger.error(\"File {0} does not exist\".format(filename))\n raise FileNotFoundError('File does not exist')",
"def read_to_df(path):\n return pd.DataFrame.from_records(map(lambda x: typed_line(x, parse_normalized), read_lines(path, header=False)),\n columns=['user', 'item', 'rating'])",
"def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels",
"def read_file(file, visible_only=False) -> pd.DataFrame:\n\n # Get current working directory\n cd = os.getcwd()\n # Read in the vulture data\n df = None\n if file is None:\n df = pd.read_csv(cd+'/data/turkey_vultures.csv', low_memory=False)\n else:\n df = pd.read_csv(cd+'/'+file, low_memory=False)\n\n usefulDf = df[[\"visible\", # \"timestamp,\"\n \"location-long\", \"location-lat\",\n \"individual-local-identifier\"]].copy()\n\n if visible_only:\n return usefulDf[usefulDf[\"visible\"] is True]\n\n usefulDf = usefulDf.dropna(subset=[\"location-long\",\n \"location-lat\"], axis=0)\n return usefulDf",
"def Load_PositionFile(position_filename):\n positions = pd.read_table(position_filename, delimiter=',', header=None)\n positions.columns = ['x','y']\n return positions",
"def __init__(self, file_path: str):\n self._data: pd.DataFrame = self.read_input_and_split_tuples(file_path)",
"def load_data(file):\n if file == \"test\":\n file_path = '../data/day-4-test.txt'\n elif file == \"full\":\n file_path = '../data/day-4.txt'\n else:\n raise Exception('load_data() must take the input argument \"test\" or \"full\"')\n\n # read file\n with open(file_path) as f:\n lines = f.read().split(\"\\n\\n\")\n\n # turn into a dictionary, then a data frame\n f = lambda x: pd.DataFrame(list_to_dict(x.split()), index = [0])\n x = [f(x) for x in lines]\n return pd.concat(x, ignore_index=True, sort=True)",
"def read_data(filename):\n \n ######################################################\n # Disadvantage here: only includes J_up = 11 here, #\n # please manually add more if you have #\n # J_up >= 12 CO lines #\n ######################################################\n \n ascii_data = ascii.read(\n filename, names=[\n \"SOURCE\", \"z\", \"D_L\", \"line_width\",\n \"CO_J_1\", \"eCO_J_1\", \"CO_J_2\", \"eCO_J_2\", \"CO_J_3\", \"eCO_J_3\",\n \"CO_J_4\", \"eCO_J_4\", \"CO_J_5\", \"eCO_J_5\", \"CO_J_6\", \"eCO_J_6\",\n \"CO_J_7\", \"eCO_J_7\", \"CO_J_8\", \"eCO_J_8\", \"CO_J_9\", \"eCO_J_9\",\n \"CO_J_10\", \"eCO_J_10\", \"CO_J_11\", \"eCO_J_11\", \"CI_1\", \"eCI_1\",\n \"CI_2\", \"eCI_2\"])\n\n pd = ascii_data.to_pandas()\n pd = pd.set_index('SOURCE')\n return pd.T",
"def from_multiscale_file(cls, file_path):\n topo_df = pd.read_csv(file_path, skiprows=5, header=None, delim_whitespace=True, names=['x','y','z'])\n Z_array = topo_df.pivot('y','x','z')\n # import pdb; pdb.set_trace()\n\n return cls(np.unique(topo_df['x']), np.unique(topo_df['y']), Z_array)\n # F1 = np.fft.fft2(Z_array.values)\n # F2 = np.fft.fftshift(F1)\n #\n # # Calculate 2D power spectrum\n # psd2D = np.abs(F2)**2\n #\n # # Calculate azimuthly averaged 1D power spectrum\n # psd1D = radialprofile.azimuthalAverage(psd2D)",
"def read_lexicon_into_df(lex_txt_file):\n data = []\n with open(lex_txt_file) as txtf:\n lines = txtf.readlines()\n for line in lines:\n root = re.search(r\"root='(.*?)'\", line).group(1)\n if root.startswith('0'):\n num_radicals = 3\n else:\n num_radicals = 4\n verb_class = re.search(r\"class='(.*?)'\", line).group(1)\n verb_type = re.search(r\"type='(.*?)'\", line).group(1)\n infinitive = re.search(r\"inf='(.*?)'\", line).group(1)\n languages = re.search(r\"lang='(.*?)'\", line).group(1)\n gloss = re.search(r\"gloss='(.*?)'\", line).group(1)\n\n data.append([root, num_radicals, verb_class, verb_type, infinitive, languages, gloss])\n\n lexicon_df = pd.DataFrame(data, columns=['root', 'num_radicals', 'class', 'type', 'infinitive', 'languages', 'gloss'])\n\n lexicon_df['root'] = lexicon_df['root'].str.replace(\"0\", \"\")\n lexicon_df = utify_chars(lexicon_df)\n lexicon_df.to_csv('babylex.csv')\n return lexicon_df",
"def read_txt(self, widths=[3, 21, 4, 6, 4, 6, 12, 12]):\n cols = ['ID', 'SSSSSSSS.mmmuuun', 'AMP', 'THR', 'A-FRQ', 'R-FRQ', 'SIG STRNGTH', 'ABS-ENERGY']\n\n widths = widths\n self.data = pd.read_fwf(self.data_file, widths=widths, header=None, skiprows=self.skip_rows)\n self.data.columns = cols\n\n self.data = self.data.loc[self.data['ID'] == 1]\n self.skip_rows += len(self.data)",
"def load_gps(filepath):\n gps = []\n\n with open(filepath) as gps_file:\n contents = gps_file.read()\n\n for sample in contents.splitlines():\n if sample.startswith(\"$GPRMC\"):\n gps_sample = parse_gps_sentence(sample)\n gps_dict = sentence_to_dict(gps_sample)\n gps.append(gps_dict)\n\n gps = pd.DataFrame(gps)\n\n sample_times = combine_date_and_time(gps.datestamp, gps.timestamp)\n sample_times = pd.DatetimeIndex(sample_times, tz=\"UTC\")\n gps[\"time\"] = sample_times\n gps.sort_values(by=\"time\", inplace=True)\n\n gps[\"lon\"] = gps.lon.apply(float)\n gps[\"lat\"] = gps.lat.apply(float)\n\n gps[\"lon\"] = gps.lon.apply(degree_minute_to_decimal)\n gps[\"lat\"] = gps.lat.apply(degree_minute_to_decimal)\n\n longitudes = []\n for _, (lon, lon_dir) in gps[[\"lon\", \"lon_dir\"]].iterrows():\n if lon_dir == \"E\":\n longitudes.append(lon)\n else:\n longitudes.append(-lon)\n\n latitudes = []\n for _, (lat, lat_dir) in gps[[\"lat\", \"lat_dir\"]].iterrows():\n if lat_dir == \"N\":\n latitudes.append(lat)\n else:\n latitudes.append(-lat)\n\n gps[\"lon\"] = longitudes\n gps[\"lat\"] = latitudes\n\n return gps",
"def parse_data(path_to_file):\n\n line_dict, rel_dict = create_dicts(path_to_file)\n \n line_df = create_dataframe(line_dict, ['line'])\n rel_df = create_dataframe(rel_dict, ['relation'])\n\n line_df['relation'] = rel_df['relation']\n\n return (line_df, rel_df)",
"def load_loss_GDF(filename, lon, lat):\n df = pd.read_csv(filename)\n x, y = np.meshgrid(lon, lat)\n coords = [Point(xval, yval) for xval, yval in zip(x.ravel(), y.ravel())]\n \n df['geometry'] = coords\n df = gpd.GeoDataFrame(df)\n df.crs = {'init': 'epsg:4326'}\n return df",
"def read_features(path):\n columns = [\"Feature\", \"Correlation\"]\n regex_str = '([^\\s]+)(?:\\s+corr=\\s+)([^\\n]+)'\n with open(path, 'r') as f:\n text = f.read()\n feature_tuples = re.compile(regex_str).findall(text)\n index = np.arange(len(feature_tuples)) + 1\n # features and models are 1-indexed\n feature_df = pd.DataFrame(index=index, \n data=feature_tuples, \n columns=columns)\n feature_df = feature_df.dropna()\n feature_df['Correlation'] = pd.to_numeric(feature_df['Correlation'])\n return feature_df",
"def _read_data(self, fp):\n names = [\n \"Year\",\n \"Month\",\n \"Day\",\n \"Hour\",\n \"Minute\",\n \"Data Source and Uncertainty Flags\",\n \"Dry Bulb Temperature\",\n \"Dew Point Temperature\",\n \"Relative Humidity\",\n \"Atmospheric Station Pressure\",\n \"Extraterrestrial Horizontal Radiation\",\n \"Extraterrestrial Direct Normal Radiation\",\n \"Horizontal Infrared Radiation Intensity\",\n \"Global Horizontal Radiation\",\n \"Direct Normal Radiation\",\n \"Diffuse Horizontal Radiation\",\n \"Global Horizontal Illuminance\",\n \"Direct Normal Illuminance\",\n \"Diffuse Horizontal Illuminance\",\n \"Zenith Luminance\",\n \"Wind Direction\",\n \"Wind Speed\",\n \"Total Sky Cover\",\n \"Opaque Sky Cover (used if Horizontal IR Intensity missing)\",\n \"Visibility\",\n \"Ceiling Height\",\n \"Present Weather Observation\",\n \"Present Weather Codes\",\n \"Precipitable Water\",\n \"Aerosol Optical Depth\",\n \"Snow Depth\",\n \"Days Since Last Snowfall\",\n \"Albedo\",\n \"Liquid Precipitation Depth\",\n \"Liquid Precipitation Quantity\",\n ]\n\n first_row = self._first_row_with_climate_data(fp)\n df = pd.read_csv(fp, skiprows=first_row, header=None, names=names)\n return df",
"def readData(f):\n line = f.readline()\n fieldnames = [x.strip() for x in line.split(\",\")]\n line = f.readline().strip()\n data = []\n while line != \"\":\n if line[0] != \"#\":\n fields = line.split(\",\")\n data.append((fields[0], [extractSI(v)[0] for v in fields[1:]]))\n line = f.readline().strip()\n # Man, working out this next incantation out was non-trivial!\n # They really want you to be snarfing data in csv or some other format they understand!\n res = pd.DataFrame.from_items(data, columns=fieldnames[1:], orient=\"index\")\n return res"
] |
[
"0.6931486",
"0.6567923",
"0.60308295",
"0.60107195",
"0.58846587",
"0.58273214",
"0.5817731",
"0.5799361",
"0.57866096",
"0.5752707",
"0.57501096",
"0.56600416",
"0.5650866",
"0.56462586",
"0.5641389",
"0.5616411",
"0.5613479",
"0.5582875",
"0.5572268",
"0.5540163",
"0.55275834",
"0.55271155",
"0.55200964",
"0.55142295",
"0.5491657",
"0.54849756",
"0.5483183",
"0.5470603",
"0.5462363",
"0.54567707"
] |
0.6993391
|
0
|
Find the stack frame of the caller so that we can note the source file name, line number and function name.
|
def findCaller(cls):
f = currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
break
return rv
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _findCaller(stack_info=False):\n f = logging.currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == logging._srcfile:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv",
"def findCallerPatch():\n f = currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n rv = (filename, f.f_lineno, co.co_name)\n break\n return rv",
"def findCaller(self, stack_info=False, stacklevel=2):\n f = currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n orig_f = f\n while f and stacklevel > 1:\n f = f.f_back\n stacklevel -= 1\n if not f:\n f = orig_f\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv",
"def findCaller(self, stack_info=False):\n \n _frame_object = logging.currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X: Frames.\n if (_frame_object is not None):\n _frame_object = _frame_object.f_back\n \n rv = (\"(unknown file)\", 0, \"(unknown function)\", None)\n while hasattr(_frame_object, 'f_code'):\n _code_object = _frame_object.f_code\n filename = os.path.normcase(_code_object.co_filename)\n \n _next = _frame_object.f_back\n # noinspection PyProtectedMember,PyUnresolvedReferences\n if (filename == logging._srcfile):\n _frame_object = _next\n continue\n \n if (_next and hasattr(_next, 'f_code')):\n _parent_code = _next.f_code\n if (_parent_code.co_name == LOGGING_WRAPPER_NAME):\n _frame_object = _next.f_back\n continue\n \n _stack_info = None\n if (stack_info):\n _str_io = StringIO()\n _str_io.write('Stack (most recent call last):\\n')\n traceback.print_stack(_frame_object, file=_str_io)\n _stack_info = _str_io.getvalue()\n if (_stack_info[-1] == '\\n'):\n _stack_info = _stack_info[:-1]\n _str_io.close()\n \n rv = (_code_object.co_filename, _frame_object.f_lineno, _code_object.co_name, _stack_info)\n break\n return rv",
"def get_caller_context(depth=None, **kwarg):\r\n if TIK_ERROR_MSG.api_source_info is not None:\r\n return TIK_ERROR_MSG.api_source_info\r\n if depth is None:\r\n raise RuntimeError(\"There are two reasons for the error:\\n\"\r\n \"If it is called by the user, please register source\"\r\n \" info before entering decorators;\\n\"\r\n \"If it is an internal call, please specify \"\r\n \"the stack depth;\")\r\n additional_stack = kwarg.get('stack_depth', 0)\r\n depth += additional_stack\r\n if ERROR_MSG_LEVEL.err_msg_level == 0:\r\n caller = stack(depth)\r\n else:\r\n caller = current_frame(depth)\r\n return caller",
"def caller_info(self):\n\n frames = traceback.extract_stack()\n frames.reverse()\n try:\n (_, mod_name) = __name__.rsplit('.', 1)\n except ValueError:\n mod_name = __name__\n for (fpath, lnum, _, _) in frames:\n (fname, _) = os.path.basename(fpath).rsplit('.', 1)\n if fname != mod_name:\n break\n\n return (fname, lnum)",
"def getframeinfo(frame, context=1):\r\n if istraceback(frame):\r\n lineno = frame.tb_lineno\r\n frame = frame.tb_frame\r\n else:\r\n lineno = frame.f_lineno\r\n if not isframe(frame):\r\n raise TypeError('{!r} is not a frame or traceback object'.format(frame))\r\n\r\n filename = getsourcefile(frame) or getfile(frame)\r\n if context > 0:\r\n start = lineno - 1 - context//2\r\n try:\r\n lines, lnum = findsource(frame)\r\n except IOError:\r\n lines = index = None\r\n else:\r\n start = max(start, 1)\r\n start = max(0, min(start, len(lines) - context))\r\n lines = lines[start:start+context]\r\n index = lineno - 1 - start\r\n else:\r\n lines = index = None\r\n\r\n return Traceback(filename, lineno, frame.f_code.co_name, lines, index)",
"def get_caller_frame() -> FrameType:\n return cast(FrameType, cast(FrameType, inspect.currentframe()).f_back)",
"def calling_stack_info(print_res=True, code_context=1):\n\n start_frame = inspect.currentframe().f_back\n\n fil = generate_frame_list_info(start_frame, code_context=code_context)\n\n if print_res:\n # noinspection PyUnresolvedReferences\n print(fil.tb_txt)\n return fil",
"def find_actual_caller(self):\n\n # Gleaned from code in the logging module itself...\n try:\n f = sys._getframe(1)\n ##f = inspect.currentframe(1)\n except Exception:\n f = None\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown module)\", \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n mod = inspect.getmodule(f)\n\n if mod is None:\n modname = '__main__'\n else:\n modname = mod.__name__\n\n if modname == __name__:\n # Crawl back until the first frame outside of this module\n f = f.f_back\n continue\n\n rv = (modname, filename, f.f_lineno, co.co_name)\n break\n return rv",
"def _find_the_caller(i=0):\n import inspect\n\n # the first 2 elements in the stack are the current line and the line\n # of caller of `_find_the_caller`\n i = i + 2\n caller = inspect.stack()[i]\n return caller[1], caller[2], caller[4][0].rstrip(\"\\n\").strip()",
"def _get_caller_detail(n=2):\n if not _show_caller_details:\n return None\n s = inspect.stack()[:n + 1]\n try:\n frame = s[n]\n try:\n return frame[1]\n # WARNING(dhellmann): Using frame.lineno to include the\n # line number in the return value causes some sort of\n # memory or stack corruption that manifests in values not\n # being cleaned up in the cfgfilter tests.\n # return '%s:%s' % (frame[1], frame[2])\n finally:\n del frame\n finally:\n del s",
"def _print_caller(self):\n import traceback\n print '\\n'.join(['%s:%d %s'%(f,l,c) for f,l,m,c in traceback.extract_stack()])",
"def get_cur_info():\n try:\n raise Exception\n except:\n f = sys.exc_info()[2].tb_frame.f_back\n # return (f.f_code.co_name, f.f_lineno)\n return f.f_code.co_name",
"def callersName():\r\n import sys\r\n return sys._getframe(2).f_code.co_name",
"def GetCallerName(num_frame=1):\n frame = sys._getframe(num_frame + 1) # pylint: disable=protected-access\n return inspect.getframeinfo(frame, 1)[2]",
"def getStackPosition(self):\r\n return self.callstack.getStack()",
"def caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0] \n \n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)",
"def _sourceFrame(self):\n try:\n raise Exception('catch me') # forced exception to get stack traceback\n except:\n exc_traceback = sys.exc_info()[2]\n return exc_traceback.tb_frame.f_back.f_back.f_back.f_back\n #endTry",
"def get_caller(delta=0):\n if delta < 0:\n raise RuntimeError(\"Delta must be positive!\")\n for i, frame in enumerate(inspect.stack()):\n if i == 2 + delta:\n return os.path.abspath(frame.filename)",
"def __get_caller_name(caller_frame):\n\n caller_name = caller_frame.f_code.co_name\n if 'self' in caller_frame.f_locals:\n caller_name = \"%s.%s\" % (\n caller_frame.f_locals['self'].__class__.__name__, caller_name\n )\n module = inspect.getmodule(caller_frame)\n if module:\n caller_name = \"%s.%s\" % (module.__name__, caller_name)\n return caller_name",
"def findCaller(self):\n frames = inspect.stack()\n thisfile = os.path.normcase(frames[0][1])\n for frame in frames:\n filename = os.path.normcase(frame[1])\n if filename != thisfile and filename != logging._srcfile:\n major, minor, micro, _, _ = sys.version_info\n if (major, minor, micro) >= (2, 4, 2):\n return filename, frame[2], frame[3]\n else:\n return filename, frame[2]",
"def debug_caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)",
"def caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)",
"def who_is_calling():\n return sys._getframe(2).f_code.co_name",
"def getCallerParams(self,frameLevel=1):\n # frameLevel=0 is always getCallerParams. Caller should be level 1, but sometimes level 1 is still in Debug. This causes many dirty hacks.\n levelsToAdd=frameLevel-1\n #debugDir=dir(self)\n #debugDir.remove('__init__') # without removing __init__ was debug unusable in any __init__. Following line is temporary unslashed only\n debugDir=['allowed', 'allowedLevels', 'caller', 'callerLocals', 'callerName', 'dprint', 'getCallerName', 'getCallerParams', 'printHeader', 'restricted', 'settings']\n while sys._getframe(frameLevel).f_code.co_name in debugDir: # restrict returning functions from Debug instance -- dirty hack\n # but causes trouble for init which is in every class. property debugDir hacks this issue.\n if frameLevel>1: print '%i: %s'%(frameLevel,sys._getframe(frameLevel).f_code.co_name)\n frameLevel+=1\n frameLevel+=levelsToAdd # another hack to get another frame\n self.caller=sys._getframe(frameLevel)\n self.callerLocals=self.caller.f_locals\n try:\n if self.callerLocals.has_key('self'):\n #debug.dprint(print str(self.callerLocals['self'].__class__).split(' ')[1],4)\n self.callerName=(\n str(self.callerLocals['self']).split(' ')[0].replace('<__main__.','')+\n '.'+self.caller.f_code.co_name)\n # 026 #if self.callerLocals.has_key('self'): del self.callerLocals['self'] # 025 Fix - caused errors in multithreadng.\n else: self.callerName=self.caller.f_code.co_name\n except KeyError, errorInstance:\n #026 #self.headerLogger.error(\"Caught KeyError. Error: %s; Arguments: %s\"%(errorInstance,str(errorInstance.args)))\n self.headerLogger.exception(\"Caught KeyError. Error: %s; Arguments: %s\"%(errorInstance,str(errorInstance.args)))\n self.headerLogger.debug(\"callerLocals is %s\"%(str(self.callerLocals)))\n return (self.callerName,self.callerLocals)",
"def print_callsite_location():\n fi = inspect.getouterframes( inspect.currentframe() )[2]\n print(\"{path}:{line} {fname}\".format(\n line=fi.lineno, path=fi.filename, fname=fi.function))",
"def caller_name(self, skip=6):\r\n stack = inspect.stack()\r\n start = 0 + skip\r\n if len(stack) < start + 1:\r\n return ''\r\n parentframe = stack[start][0] \r\n\r\n name = []\r\n module = inspect.getmodule(parentframe)\r\n # `modname` can be None when frame is executed directly in console\r\n # TODO(techtonik): consider using __main__\r\n if module:\r\n name.append(module.__name__)\r\n # detect classname\r\n if 'self' in parentframe.f_locals:\r\n # I don't know any way to detect call from the object method\r\n # XXX: there seems to be no way to detect static method call - it will\r\n # be just a function call\r\n name.append(parentframe.f_locals['self'].__class__.__name__)\r\n codename = parentframe.f_code.co_name\r\n if codename != '<module>': # top level usually\r\n name.append( codename ) # function or a method\r\n\r\n ## Avoid circular refs and frame leaks\r\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\r\n del parentframe, stack\r\n\r\n return \".\".join(name)",
"def GetFunctionName():\n return traceback.extract_stack(None, 2)[0][2]",
"def _get_vispy_caller():\n records = inspect.stack()\n # first few records are vispy-based logging calls\n for record in records[5:]:\n module = record[0].f_globals['__name__']\n if module.startswith('vispy'):\n line = str(record[0].f_lineno)\n func = record[3]\n cls = record[0].f_locals.get('self', None)\n clsname = \"\" if cls is None else cls.__class__.__name__ + '.'\n caller = \"{0}:{1}{2}({3}): \".format(module, clsname, func, line)\n return caller\n return 'unknown'"
] |
[
"0.7930418",
"0.7792423",
"0.7783527",
"0.7717714",
"0.76556414",
"0.7586453",
"0.75716466",
"0.74985564",
"0.74963945",
"0.7413992",
"0.73594606",
"0.73560166",
"0.72826964",
"0.7263826",
"0.7234214",
"0.71918005",
"0.71639085",
"0.71572584",
"0.71543473",
"0.7149359",
"0.71343",
"0.70934725",
"0.70868695",
"0.70721513",
"0.7069717",
"0.7061928",
"0.70604753",
"0.70071495",
"0.70060575",
"0.6927584"
] |
0.7862673
|
1
|
Returns the likelihood for data xs, assumed to be multivariate Gaussian with the given means and covariance.
|
def correlated_gaussian_loglikelihood(xs, means, cov):
lu,piv=sl.lu_factor(cov)
lambdas=np.diag(lu)
ndim=xs.shape[0]
ds=(xs-means)*sl.lu_solve((lu,piv), xs-means)/2.0
return -np.log(2.0*np.pi)*(ndim/2.0)-0.5*np.sum(np.log(lambdas))-np.sum(ds)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def likelihood_bivariate_normal(X, mu, cov):\n \n dist = multivariate_normal(mu, cov)\n P = dist.pdf(X)\n return P",
"def multivariate_gaussian(pos, mu, Sigma):\r\n\r\n n = mu.shape[0]\r\n Sigma_det = np.linalg.det(Sigma)\r\n Sigma_inv = np.linalg.inv(Sigma)\r\n N = np.sqrt((2*np.pi)**n * Sigma_det)\r\n # This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized\r\n # way across all the input variables.\r\n fac = np.einsum('...k,kl,...l->...', pos-mu, Sigma_inv, pos-mu)\r\n\r\n return np.exp(-fac / 2) / N",
"def estimate_multi_gaussian(X):\n m, n = X.shape\n mu = mean(X, axis=0)\n sigma = cov_matrix(X, mu)\n\n return mu, sigma",
"def multivariate_gauss_prob(observed, mean, covariance):\n\n return None",
"def _multivariate_gaussian(self, x, mu_k, sigma_k):\n return multivariate_normal.pdf(x, mu_k, sigma_k)",
"def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)",
"def likelihood(alphas, sigmas, mus, x):\n if len(alphas.shape) == 0:\n alphas = np.expand_dims(alphas, 1)\n sigmas = np.expand_dims(sigmas, 1)\n k = alphas.shape[0]\n t_dim = int(mus.shape[0] / k)\n\n likelihood_ = 0.0\n\n for i in range(k):\n likelihood_t = gaussian_np(x, mus[i*t_dim:(i+1)*t_dim], sigmas[i])\n likelihood_ += alphas[i] * likelihood_t\n\n return likelihood_",
"def multivariateGaussian(X, mu, sigma2):\n\tk = len(mu)\n\n\tif sigma2.ndim == 1:\n\t\t# convert sigma2 to a diagonal matrix\n\t\tsigma2 = np.diag(sigma2)\n\n\t# vectorized version of Multivariate Gaussian Distribution\n\tX = X - mu\n\t# p is a vector contains all probabilities of each examples\n\tp = (2 * np.pi) ** (- k / 2.0) * np.linalg.det(sigma2) ** (-0.5) * \\\n\t np.exp(-0.5 * np.sum(X.dot(np.linalg.pinv(sigma2)) * X, axis=1))\n\n\treturn p",
"def estimateGaussian(X):\n mu = X.mean(0, keepdims=True).T\n sigma2 = X.var(0, keepdims=True).T\n return mu, sigma2",
"def gaussian_likelihood(x, mu, log_std):\n prob = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(2 * np.pi))\n return tf.reduce_sum(prob, axis=1)",
"def multivariate_normal(x, d, mean, covariance):\n x_m = x - mean\n return (1. / (np.sqrt((2 * np.pi)**d * np.linalg.det(covariance))) * \n np.exp(-(np.linalg.solve(covariance, x_m).T.dot(x_m)) / 2))",
"def multivariateGaussian(X, mu, Sigma2):\n k = mu.shape[0]\n\n if Sigma2.shape[1] == 1 or Sigma2.shape[0] == 1:\n Sigma2 = np.diag(Sigma2[:, 0])\n\n X = (X-mu.T).copy()\n p = (2*np.pi)**(-k/2)*np.linalg.det(Sigma2)**-0.5\n p = p*np.exp(-0.5*(X.dot(np.linalg.pinv(Sigma2))*X).sum(1, keepdims=True))\n return p",
"def multivariate_normal_logpf(\n x: np.ndarray,\n mean_vec: t.Optional[np.ndarray] = None,\n cov_mat: t.Optional[np.ndarray] = None,\n cov_mat_cholesky: t.Optional[np.ndarray] = None) -> float:\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n\n if mean_vec is None:\n mean_vec = np.zeros(x.size, dtype=np.float64)\n\n if cov_mat_cholesky is None:\n if cov_mat is None:\n cov_mat = np.diag(np.ones(mean_vec.size, dtype=np.float64))\n\n cov_mat_cholesky = scipy.linalg.cholesky(cov_mat, lower=True)\n\n num_dim = mean_vec.size\n\n _cov_shape = cov_mat_cholesky.shape\n if not x.size == num_dim == _cov_shape[0] == _cov_shape[1]:\n raise ValueError(\"Dimensions of given 'x', mean vector and covariance \"\n \"matrix does not match! (Got length {} mean vector \"\n \"and covariance matrix with {} shape and {} length 'x'.\"\n .format(mean_vec.size, _cov_shape, x.size))\n\n log_det = 2.0 * np.sum(np.log(np.diag(cov_mat_cholesky)))\n\n z_vector = scipy.linalg.solve_triangular(\n cov_mat_cholesky, x - mean_vec, lower=True)\n aux = np.dot(z_vector, z_vector)\n\n return -0.5 * (num_dim * np.log(2.0 * np.pi) + log_det + aux)",
"def like(x, sigma, mu):\n L = np.ones(len(mu))\n for x_i,sigma_i in zip(x, sigma):\n L *= (1.0/np.sqrt(2.0*np.pi*sigma_i**2))*np.exp(-0.5*(x_i-mu)**2/(sigma_i**2))\n return L",
"def likelihood_function(X, taus, mus, sigmas):\n N = X.shape[0] # number of data points\n get_component_prob = lambda x: component_pdfs(x, mus, sigmas)\n T = np.apply_along_axis(arr=X, func1d=get_component_prob, axis=1) # gaussian component probabilities in row format (NxK)\n taus_rep = np.tile(taus, reps=(N, 1)) # repeat tau along N-axis so elementwise product can work\n\n return np.sum(T*taus_rep, axis=1)",
"def log_gaussian_likelihood(x, mu, log_std):\n log_gaussian_prob = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS)) ** 2 - log_std - 0.5 * np.log(2 * np.pi))\n return tf.reduce_sum(log_gaussian_prob, axis=1)",
"def _log_multivariate_normal_density_full(X, means, covars, min_covar=1):\n # n_samples = 1 #\n n_samples, n_dim = X.shape\n nmix = len(means)\n log_prob = np.empty((n_samples, nmix))\n chmatr = []\n chsol = []\n for c, (mu, cv) in enumerate(zip(means, covars)):\n try:\n cv_chol = scipy.linalg.cholesky(cv, lower=True)\n except scipy.linalg.LinAlgError:\n # The model is most probably stuck in a component with too\n # few observations, we need to reinitialize this components\n try:\n #cv_chol = scipy.linalg.cholesky(cv + min_covar * np.eye(n_dim),\n # lower=True)\n cv_chol = scipy.linalg.cholesky(positive_def(cv), lower=True)\n except scipy.linalg.LinAlgError:\n np.savetxt(\"errorch\", cv + min_covar * np.eye(n_dim))\n raise ValueError(\"'covars' must be symmetric, \"\n \"positive-definite\")\n\n cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))\n cv_sol = scipy.linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T\n chmatr.append(cv_chol)\n chsol.append(cv_sol)\n log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +\n n_dim * np.log(2 * np.pi) + cv_log_det)\n\n return log_prob, chmatr, chsol",
"def multi_gaussian(X, mu, sigma):\n m, n = X.shape\n X = X - mu\n\n factor = X.dot(inv(sigma))\n factor = multiply(factor, X)\n factor = - (1 / 2) * sum(factor, axis=1, keepdims=True)\n\n p = 1 / (power(2 * pi, n / 2) * sqrt(det(sigma)))\n p = p * exp(factor)\n\n return p",
"def estimateGaussian(X):\n\tmu = np.mean(X, axis=0)\n\tsigma2 = np.std(X, axis=0) ** 2\n\treturn mu, sigma2",
"def gaussian_likelihood(x, mu, log_std):\n\n # this expression just calculates the log of the pdf of the gaussian for a single\n # vector index, as described in the function docstring.\n # note that since we are taking the *log* of the pdf, we add terms together\n # which are multiplied together in the pdf\n # also note that rather than dividing by the std_dev, like we do in the regular pdf,\n # we divide by (std_dev + EPS), where EPS (epsilon) is a tiny number we include\n # to ensure that we don't divide by zero if std_dev = 0.\n pre_sum = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS))**2 + 2*log_std + np.log(2 * np.pi))\n \n # return the sum of the items in the pre_sum vector\n # since each item is the log of the pdf for a specific index,\n # when we sum these, we get the log of the product of each\n # individual pdf -- ie. the log of the pdf evaluated\n # at this vector as a whole\n return tf.reduce_sum(pre_sum, axis=1)",
"def multivariate_normal_density(mean, cov, X):\n \n evals, evecs = np.linalg.eigh(cov)\n cov_half_inv = evecs.dot(np.diag(evals**(-0.5))).dot(evecs.T)\n # print(evals)\n half_evals = np.dot(X-mean, cov_half_inv)\n full_evals = np.sum(half_evals**2, 1)\n unden = np.exp(-0.5*full_evals)\n \n Z = np.sqrt(np.linalg.det(2.0*np.pi*cov))\n den = unden/Z\n assert len(den) == X.shape[0]\n return den",
"def gaussiannd(mu, cov, x):\n #2012-05-08 11:36 IJMC: Created\n \n x = np.array(x, dtype=float, copy=False)\n mu = np.array(mu, dtype=float, copy=False)\n cov = np.array(cov, dtype=float, copy=True)\n \n if x.ndim==1:\n nx = x.size\n niter = 0\n elif x.ndim==2:\n niter, nx = x.shape\n \n if cov.size==1:\n cov = cov.reshape((1,1))\n\n # Test if cov is square:\n\n # invert\n invcov = np.linalg.inv(cov)\n\n # Compute mean vector:\n if niter==0:\n xmu = (x - mu).reshape(nx, 1)\n term1 = ((2*np.pi)**(nx/2.) * np.sqrt(np.linalg.det(cov)))\n term2 = np.exp(-0.5 * np.dot(xmu.transpose(), np.dot(invcov, xmu)))\n ret = term2 / term1\n else:\n for ii in range(niter):\n xmu = (x[ii] - mu).reshape(nx, 1)\n term1 = ((2*np.pi)**(nx/2.) * np.sqrt(np.linalg.det(cov)))\n term2 = np.exp(-0.5 * np.dot(xmu.transpose(), np.dot(invcov, xmu)))\n ret[ii] = term2 / term1\n \n\n ret = np.zeros(nx, dtype=float)\n\n\n return term2 / term1",
"def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var",
"def gaussian_likelihood(x, mu, log_std):\n std = tf.exp(log_std)\n pre_sum = tf.square((x - mu)/std) + 2*log_std + np.log(2*np.pi)\n return -0.5 * tf.reduce_sum(pre_sum, axis=1)",
"def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)",
"def discretized_gaussian_log_likelihood(x, *, means, log_scales):\n assert x.shape == means.shape == log_scales.shape\n centered_x = x - means\n inv_stdv = torch.exp(-log_scales)\n plus_in = inv_stdv * (centered_x + 1.0 / 255.0)\n cdf_plus = approx_standard_normal_cdf(plus_in)\n min_in = inv_stdv * (centered_x - 1.0 / 255.0)\n cdf_min = approx_standard_normal_cdf(min_in)\n log_cdf_plus = torch.log(cdf_plus.clamp(min=1e-12))\n log_one_minus_cdf_min = torch.log((1.0 - cdf_min).clamp(min=1e-12))\n cdf_delta = cdf_plus - cdf_min\n log_probs = torch.where(x < -0.999, log_cdf_plus, torch.where(x > 0.999, log_one_minus_cdf_min, torch.log(cdf_delta.clamp(min=1e-12))))\n assert log_probs.shape == x.shape\n return log_probs",
"def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)",
"def mm_gaussian(nsample, means, covars, weights):\n assert len(means) == len(covars), \"Number of means or covariance matrices inconsistant with the number of gaussians\"\n ngaussian = len(means)\n nd = means[0].size(0)\n weights.div_(weights.sum())\n # weights = torch.tensor([0.5, 0.5])\n # means = torch.tensor([[-3, 0], [3, 0]], dtype=torch.float)\n samples = torch.zeros(ngaussian, nsample, nd)\n for i, (mean, covar) in enumerate(zip(means, covars)):\n # covar = I\n # covar.div_(covar.max())\n # corr = 0.01 * (R.t() + R) + 3*I # cross correletion matrix\n # covar = corr - torch.mm(mean.unsqueeze(1), mean.unsqueeze(1).t())\n multi_normal = MultivariateNormal(loc=mean, covariance_matrix=covar)\n samples[i] = multi_normal.sample((nsample,))\n indices = np.random.permutation(nsample) # the total range of indices\n range_idx = (0, 0)\n mm_sample = samples[0] # the mixture model for the gaussian\n for i in range(ngaussian):\n n = int(0.5 + weights[i] * nsample) # the number of samples belonging to this\n range_idx = range_idx[1], min(n+range_idx[1], nsample)\n idx = indices[range_idx[0]:range_idx[1]]\n mm_sample[idx] = samples[i, idx]\n return mm_sample.unsqueeze(2).unsqueeze(3)",
"def log_likelihood(X, mu, sigma, phi):\n ll = None\n\n #######################################################################\n # TODO: #\n # Compute the log-likelihood of the data under the current model. #\n # This is used to check for convergnence of the algorithm. #\n #######################################################################\n\n ll = np.zeros((X.shape[0], 1))\n k = mu.shape[0]\n\n for i in range(k):\n ll += multivariate_normal(mu[i, :], sigma[i]).pdf(X)[:, np.newaxis]*phi[i]\n\n ll = sum(np.log(ll))\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n return ll",
"def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))"
] |
[
"0.7061533",
"0.69797194",
"0.69187695",
"0.6848097",
"0.6845057",
"0.67813957",
"0.6697071",
"0.6683684",
"0.667868",
"0.6657556",
"0.6601283",
"0.6573809",
"0.65730196",
"0.656916",
"0.6557741",
"0.6553713",
"0.6550848",
"0.6544307",
"0.6529757",
"0.65168965",
"0.6497047",
"0.64800835",
"0.6463418",
"0.6439936",
"0.6410685",
"0.63411534",
"0.63399434",
"0.63260525",
"0.6319916",
"0.6316398"
] |
0.7507182
|
0
|
Returns an array of quantiles for each of the xs in the correlated Gaussian distribution with the given mean and covariance.
|
def correlated_gaussian_quantiles(xs, means, cov):
L = sl.cholesky(cov, lower=True)
ys = sl.solve(L, xs-means)
return ss.norm.cdf(ys)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def estimate_multi_gaussian(X):\n m, n = X.shape\n mu = mean(X, axis=0)\n sigma = cov_matrix(X, mu)\n\n return mu, sigma",
"def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)",
"def sigmaPoints(mean, covariance):\n N = len(mean)\n mean = np.reshape(mean, (N,1))\n assert covariance.shape == (N,N)\n\n sigmaPoints = [mean] * (2*N + 1)\n w0 = 1/3 # based on assumption of Gaussian distributions.\n\n cholesky = linalg.cholesky((N/(1-w0)) * covariance)\n # cholesky returns A s.t. A*A.T = P so we use the columns of A\n columns = np.hsplit(cholesky, N)\n for i, column in enumerate(columns):\n sigmaPoints[i+1] = mean + column\n sigmaPoints[i+1+N] = mean - column\n weights = [w0] + [(1-w0)/(2*N)] * (2 * N)\n return sigmaPoints, weights",
"def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])",
"def peak_comb_cdf(x, mean, sigma, alpha, dt, n_samples = 6):\n res = np.zeros_like(x)\n for i in range(n_samples):\n z = np.clip((x - mean - i*dt)/sigma, -10, 10)\n res += skewnorm.cdf(z, alpha)\n return 1.0/n_samples * res",
"def gaussiannd(mu, cov, x):\n #2012-05-08 11:36 IJMC: Created\n \n x = np.array(x, dtype=float, copy=False)\n mu = np.array(mu, dtype=float, copy=False)\n cov = np.array(cov, dtype=float, copy=True)\n \n if x.ndim==1:\n nx = x.size\n niter = 0\n elif x.ndim==2:\n niter, nx = x.shape\n \n if cov.size==1:\n cov = cov.reshape((1,1))\n\n # Test if cov is square:\n\n # invert\n invcov = np.linalg.inv(cov)\n\n # Compute mean vector:\n if niter==0:\n xmu = (x - mu).reshape(nx, 1)\n term1 = ((2*np.pi)**(nx/2.) * np.sqrt(np.linalg.det(cov)))\n term2 = np.exp(-0.5 * np.dot(xmu.transpose(), np.dot(invcov, xmu)))\n ret = term2 / term1\n else:\n for ii in range(niter):\n xmu = (x[ii] - mu).reshape(nx, 1)\n term1 = ((2*np.pi)**(nx/2.) * np.sqrt(np.linalg.det(cov)))\n term2 = np.exp(-0.5 * np.dot(xmu.transpose(), np.dot(invcov, xmu)))\n ret[ii] = term2 / term1\n \n\n ret = np.zeros(nx, dtype=float)\n\n\n return term2 / term1",
"def _eval_sumgaussians(x,xamp,xmean,xcovar):\n ndata= x.shape[0]\n da= x.shape[1]\n out= numpy.zeros(ndata)\n ngauss= len(xamp)\n loglike= numpy.zeros(ngauss)\n for ii in range(ndata):\n for kk in range(ngauss):\n if xamp[kk] == 0.:\n loglike[kk]= numpy.finfo(numpy.dtype(numpy.float64)).min\n continue\n tinv= linalg.inv(xcovar[kk,:,:])\n delta= x[ii,:]-xmean[kk,:]\n loglike[kk]= numpy.log(xamp[kk])+0.5*numpy.log(linalg.det(tinv))\\\n -0.5*numpy.dot(delta,numpy.dot(tinv,delta))+\\\n da*_SQRTTWOPI\n out[ii]= maxentropy.logsumexp(loglike)\n return out",
"def multivariate_gauss_prob(observed, mean, covariance):\n\n return None",
"def multivariate_normal_density(mean, cov, X):\n \n evals, evecs = np.linalg.eigh(cov)\n cov_half_inv = evecs.dot(np.diag(evals**(-0.5))).dot(evecs.T)\n # print(evals)\n half_evals = np.dot(X-mean, cov_half_inv)\n full_evals = np.sum(half_evals**2, 1)\n unden = np.exp(-0.5*full_evals)\n \n Z = np.sqrt(np.linalg.det(2.0*np.pi*cov))\n den = unden/Z\n assert len(den) == X.shape[0]\n return den",
"def correlated_gaussian_loglikelihood(xs, means, cov):\n lu,piv=sl.lu_factor(cov)\n\n lambdas=np.diag(lu)\n\n ndim=xs.shape[0]\n \n ds=(xs-means)*sl.lu_solve((lu,piv), xs-means)/2.0\n\n return -np.log(2.0*np.pi)*(ndim/2.0)-0.5*np.sum(np.log(lambdas))-np.sum(ds)",
"def mm_gaussian(nsample, means, covars, weights):\n assert len(means) == len(covars), \"Number of means or covariance matrices inconsistant with the number of gaussians\"\n ngaussian = len(means)\n nd = means[0].size(0)\n weights.div_(weights.sum())\n # weights = torch.tensor([0.5, 0.5])\n # means = torch.tensor([[-3, 0], [3, 0]], dtype=torch.float)\n samples = torch.zeros(ngaussian, nsample, nd)\n for i, (mean, covar) in enumerate(zip(means, covars)):\n # covar = I\n # covar.div_(covar.max())\n # corr = 0.01 * (R.t() + R) + 3*I # cross correletion matrix\n # covar = corr - torch.mm(mean.unsqueeze(1), mean.unsqueeze(1).t())\n multi_normal = MultivariateNormal(loc=mean, covariance_matrix=covar)\n samples[i] = multi_normal.sample((nsample,))\n indices = np.random.permutation(nsample) # the total range of indices\n range_idx = (0, 0)\n mm_sample = samples[0] # the mixture model for the gaussian\n for i in range(ngaussian):\n n = int(0.5 + weights[i] * nsample) # the number of samples belonging to this\n range_idx = range_idx[1], min(n+range_idx[1], nsample)\n idx = indices[range_idx[0]:range_idx[1]]\n mm_sample[idx] = samples[i, idx]\n return mm_sample.unsqueeze(2).unsqueeze(3)",
"def gauss_sample(mean, covariance):\n\n return None",
"def estimate_uni_gaussian(X):\n mu = mean(X, axis=0)\n sigma2 = var(X, axis=0)\n return mu, sigma2",
"def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))",
"def _gaussian(self, c, sigma):\n d = 2*sigma*sigma\n ax = exp(-power(self._xx-self._xx.T[c], 2)/d)\n ay = exp(-power(self._yy-self._yy.T[c], 2)/d)\n return (ax * ay).T # the external product gives a matrix",
"def estimateGaussian(X):\n\tmu = np.mean(X, axis=0)\n\tsigma2 = np.std(X, axis=0) ** 2\n\treturn mu, sigma2",
"def uni_gaussian(X, mu, sigma2):\n p = (1 / sqrt(2 * pi * sigma2))\n p = p * exp(-power(X - mu, 2) / (2 * sigma2))\n\n def prod(x, y):\n return x * y\n p = array([[reduce(prod, el)] for el in p])\n\n return p",
"def estimateGaussian(X):\n mu = X.mean(0, keepdims=True).T\n sigma2 = X.var(0, keepdims=True).T\n return mu, sigma2",
"def gauss(x, mu, A, sigma):\n mu, A, sigma = np.atleast_2d(mu), np.atleast_2d(A), np.atleast_2d(sigma)\n val = (A / (sigma * np.sqrt(np.pi * 2)) *\n np.exp(-(x[:, None] - mu)**2 / (2 * sigma**2)))\n return val.sum(axis=-1)",
"def multi_gaussian(X, mu, sigma):\n m, n = X.shape\n X = X - mu\n\n factor = X.dot(inv(sigma))\n factor = multiply(factor, X)\n factor = - (1 / 2) * sum(factor, axis=1, keepdims=True)\n\n p = 1 / (power(2 * pi, n / 2) * sqrt(det(sigma)))\n p = p * exp(factor)\n\n return p",
"def generate_samples(mu1,cov,number_of_samples):\n samples = np.random.multivariate_normal(mu1, cov,number_of_samples)\n return samples",
"def multivariate_gaussian(pos, mu, Sigma):\r\n\r\n n = mu.shape[0]\r\n Sigma_det = np.linalg.det(Sigma)\r\n Sigma_inv = np.linalg.inv(Sigma)\r\n N = np.sqrt((2*np.pi)**n * Sigma_det)\r\n # This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized\r\n # way across all the input variables.\r\n fac = np.einsum('...k,kl,...l->...', pos-mu, Sigma_inv, pos-mu)\r\n\r\n return np.exp(-fac / 2) / N",
"def norm_cdf(mu, sigma, x):\n return 0.5 * (1 + math.erf((x - mu) / (sigma * math.sqrt(2.0))))",
"def create_gaussian_data(self, mean, std, nPoints, nClusters, nDimension):\n dataset = np.zeros((nClusters, nPoints, nDimension), dtype=float)\n for i in range(nClusters):\n cov = std[i] ** 2\n dataset[i, :, :] = np.random.multivariate_normal(mean[i], cov, nPoints)\n\n return dataset",
"def get_square_gauss(x_mat):\n sq_mat = np.zeros(x_mat['mu'].shape)\n\n for i in range(x_mat['mu'].shape[1]):\n sq_mat[:, i] = x_mat['mu'][:, i] ** 2.0\n sq_mat[:, i] += np.diag(x_mat['sigma'][i, :, :])\n\n return sq_mat",
"def get_gaussian_axis_sample(a, b, N, dtype):\n assert a < b, \"condition a < b violated!\"\n assert isinstance(N, int), \"condition N of type int violated!\"\n\n data = []\n for n in range(N):\n x = a + get_norm_cdf(N)[n]*(b-a)\n if dtype is int:\n data.append(int(x))\n elif dtype is float:\n data.append(x)\n else:\n raise AssertionError(\"dtype {} not supported for uniform sampling!\".format(dtype))\n return data",
"def gauss_convolve(array, sigma):\r\n\t##remove singleton dimesions and make sure values are floats\r\n\tarray = array.squeeze().astype(float)\r\n\t##allocate memory for result\r\n\tresult = np.zeros(array.shape)\r\n\t##if the array is 2-D, handle each trial separately\r\n\ttry:\r\n\t\tfor trial in range(array.shape[1]):\r\n\t\t\tresult[:,trial] = gaussian_filter(array[:, trial], sigma = sigma, order = 0, mode = \"constant\", cval = 0.0)\r\n\t##if it's 1-D:\r\n\texcept IndexError:\r\n\t\tif array.shape[0] == array.size:\r\n\t\t\tresult = gaussian_filter(array, sigma = sigma, order = 0, mode = \"constant\", cval = 0.0)\r\n\t\telse:\r\n\t\t\tprint \"Check your array dimenszions!\"\r\n\treturn result",
"def npp_ify(values, input_mean_variance=None):\n\n distribution = distributions.normal_distribution()\n values = flex.sorted(values)\n if input_mean_variance:\n mean, variance = input_mean_variance\n else:\n mean, variance = mean_variance(values)\n\n scaled = (values - mean) / math.sqrt(variance)\n expected = distribution.quantiles(values.size())\n\n return expected, scaled",
"def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)",
"def test_cumulative_distribution_fit_call_np_array(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.to_numpy())\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()"
] |
[
"0.62254775",
"0.6103563",
"0.6049607",
"0.598576",
"0.59617925",
"0.59495264",
"0.5938824",
"0.5892896",
"0.5885811",
"0.5877937",
"0.5797762",
"0.57833624",
"0.57510304",
"0.5727639",
"0.5709559",
"0.5681378",
"0.566666",
"0.5659615",
"0.5648411",
"0.5610717",
"0.55851966",
"0.55630666",
"0.5543167",
"0.553854",
"0.55242586",
"0.5518672",
"0.5506694",
"0.5470584",
"0.54686373",
"0.54564863"
] |
0.79821897
|
0
|
Return the residuals for the rv model with parameters ``p`` and the observations of radial velocitys ``rv`` at times ``t``
|
def residuals(self, ts, rvs, p):
if p.npl == 0:
return rvs
else:
rvmodel = np.sum(rv.rv_model(ts,p), axis=0)
return rvs - rvmodel
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def residuals_linear(params, t, data):\n \n #Get an ordered dictionary of parameter values\n v = params.valuesdict()\n \n #Cubic model\n model = v['a']*t**3 + v['b']*t**2 + v['c']*t + v['d']\n\n return model - data #Return residuals",
"def residual(t, x, xdot, result):\n result[0] = x[2]-xdot[0]\n result[1] = x[3]-xdot[1]\n result[2] = -xdot[2]+x[4]*x[0]/m\n result[3] = -xdot[3]+x[4]*x[1]/m-g\n result[4] = x[2]**2 + x[3]**2 \\\n + (x[0]**2 + x[1]**2)/m*x[4] - x[1] * g\n print(result)",
"def residuals(p, r, theta):\n return r - f(theta, p)",
"def residuals(self, p, data, X):\n err = data - self.fitfunc(X,p)\n return err",
"def rv_from_r0v0(mu, R0, V0, t):\n #...Magnitudes of R0 and V0:\n r0 = norm(R0)\n v0 = norm(V0)\n #...Initial radial velocity:\n vr0 = np.dot(R0, V0)/r0\n #...Reciprocal of the semimajor axis (from the energy equation):\n alpha = 2/r0 - pow(v0,2)/mu\n #...Compute the universal anomaly:\n x = kepler_U(mu, t, r0, vr0, alpha)\n #...Compute the f and g functions:\n f, g = calc_f_g(mu, x, t, r0, alpha)\n #...Compute the final position vector:\n R = f*R0 + g*V0\n #...Compute the magnitude of R:\n r = norm(R)\n #...Compute the derivatives of f and g:\n fdot, gdot = calc_fdot_gdot(mu, x, r, r0, alpha)\n #...Compute the final velocity:\n V = fdot*R0 + gdot*V0\n return R, V",
"def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k )\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\t\t\t#interp_model[interp_model == 0] = np.nan\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()",
"def residual2(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n B0 = parvals['B0']\n Tm = parvals['Tm']\n T0 = parvals['T0']\n model = B0*x*(x-T0)*((Tm-x)**0.5)\n return data - model",
"def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()",
"def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\t\t\treturn Resid.flatten()",
"def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model[interp_model == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\n\t\t\treturn Resid.flatten()",
"def residuals(self) -> npt.NDArray[np.float64]:\n return self.data - self.theory",
"def phy_residual(self, x, t, u, v):\n \n u_t = torch.autograd.grad(\n u, t, \n grad_outputs=torch.ones_like(u),\n retain_graph=True,\n create_graph=True\n )[0]\n \n u_x = torch.autograd.grad(\n u, x, \n grad_outputs=torch.ones_like(u),\n retain_graph=True,\n create_graph=True\n )[0]\n \n u_xx = torch.autograd.grad(\n u_x, x, \n grad_outputs=torch.ones_like(u_x),\n retain_graph=True,\n create_graph=True\n )[0]\n \n v_t = torch.autograd.grad(\n v, t, \n grad_outputs=torch.ones_like(v),\n retain_graph=True,\n create_graph=True\n )[0]\n \n v_x = torch.autograd.grad(\n v, x, \n grad_outputs=torch.ones_like(v),\n retain_graph=True,\n create_graph=True\n )[0]\n \n v_xx = torch.autograd.grad(\n v_x, x, \n grad_outputs=torch.ones_like(v_x),\n retain_graph=True,\n create_graph=True\n )[0]\n \n f_u = u_t + 0.5*v_xx + (u**2 + v**2)*v\n f_v = v_t - 0.5*u_xx - (u**2 + v**2)*u \n return f_u, f_v",
"def compute_residuals(self):\n\n r = self.rsdl()\n adapt_tol = self.opt['RelStopTol']\n\n if self.opt['AutoStop', 'Enabled']:\n adapt_tol = self.tau0 / (1. + self.k)\n\n return r, adapt_tol",
"def residual(self, y,r):\n u,v,tt = self.split(y)\n fiu,fiv,fitt = self.problem.internal_forces(u,v,tt)\n R = np.concatenate((fiu,fiv,fitt))\n R = self.residualApplyBCs(R,y,r)\n return R",
"def residuals(data: DataVector, theory: TheoryVector) -> npt.NDArray[np.float64]:\n assert isinstance(data, DataVector)\n assert isinstance(theory, TheoryVector)\n return (data - theory).view(np.ndarray)",
"def simulating_verlet(n,N,D,t,Rv,sigma,epsilon,dt,m,T,dim,kb,V,steps_r):\n Ekinv = np.zeros((n,1))\n Epotv = np.zeros((n,1))\n Ev = np.zeros((n,1))\n Gpc = np.zeros((steps_r,n))\n for k in range(len(t)):\n F = particle_forceV(Rv[-1], N, sigma, epsilon, D)\n Rv.append(particle_positionV(copy.deepcopy(Rv[-1]), V, dt, F, D)) \n V = particle_velocityV(V, F, dt, Rv, sigma, epsilon, D, N)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n \n #Calibration\n if (int(k%(10)) == int(0) & int(k)<int(len(t)/2)):\n V = calibration(N, kb,T,Ekinv[k],V)\n Ekinv[k] = np.sum(1/(2*m)*(V**2))\n if int(k)> int(len(t)-50):\n Gpc[:,k], dist, dr = pair_correlation(N,Rv[-1],D,steps_r)\n Uv = particle_LJV(Rv[-1], N, D) \n Epotv[k] = abs(Uv)/2 \n Ev[k] = Ekinv[k]+Epotv[k]\n return Rv, Ekinv, Epotv, Ev, Gpc",
"def residuals_PL(self, p, data, x):\n err = data - self.PowerLaw(x,p)\n return err",
"def _residual_lattice(self, params):\n model = np.sqrt(self.calc_q_square())\n data = np.absolute(self.q)\n return (model[self.mask] -data[self.mask])",
"def calculate_residuals(*, vo_data, model_data):\n # Drop the dates and then add back after residuals calculation\n dates = vo_data['date']\n vo_data.drop(vo_data.columns[[0]], axis=1, inplace=True)\n model_data.drop(model_data.columns[[0]], axis=1, inplace=True)\n # Calculate residuals as data minus model values\n residuals = pd.DataFrame(\n vo_data.values - model_data.values,\n columns=vo_data.columns)\n model_data.insert(0, 'date', dates)\n return residuals",
"def residual1(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n a = parvals['a']\n b = parvals['b']\n c = parvals['c']\n d = parvals['d']\n model = a + b*x + c*x**2 + d*x**3\n return data - model",
"def residuals(self):\r\n return self.__residuals",
"def residuals_Arr(self, p, data, x):\n err = data - self.Arr(x,p)\n return err",
"def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ",
"def _get_residuals(self, model: Model) -> np.ndarray:\n try:\n # pyre-fixme[16]: `Model` has no attribute `model`.\n return model.model.resid.values\n except Exception:\n fcst = model.predict(steps=1, freq=\"D\", include_history=True)\n # pyre-fixme[16]: `None` has no attribute `merge`.\n # pyre-fixme[16]: `Optional` has no attribute `to_dataframe`.\n merge = fcst.merge(model.data.to_dataframe(), on=\"time\")\n for col in merge.columns:\n if col != \"time\" and (\"fcst\" not in col):\n return merge[col].values - merge[\"fcst\"].values\n raise ValueError(\"Couldn't find residual or forecast values in model\")",
"def getVs(self, Vp, residual, beta):\n return Vp + beta*residual",
"def calcResiduals(self, params)->np.ndarray:\r\n if self._selectedIdxs is None:\r\n self._updateSelectedIdxs()\r\n dataArr = ModelFitterCore.runSimulationNumpy(parameters=params,\r\n modelSpecification=self.roadrunnerModel,\r\n startTime=self.observedTS.start,\r\n endTime=self.endTime,\r\n numPoint=self.numPoint,\r\n selectedColumns=self.selectedColumns,\r\n _logger=self.logger,\r\n _loggerPrefix=self._loggerPrefix)\r\n if dataArr is None:\r\n residualsArr = np.repeat(LARGE_RESIDUAL, len(self._observedArr))\r\n else:\r\n truncatedArr = dataArr[self._selectedIdxs, 1:]\r\n truncatedArr = truncatedArr.flatten()\r\n residualsArr = self._observedArr - truncatedArr\r\n if self._isObservedNan:\r\n residualsArr = np.nan_to_num(residualsArr)\r\n return residualsArr",
"def residuals_(self):\n return self._residuals",
"def verlet(self,t,dt,r0,r1):\r\n r2 = np.zeros([2,self.particles.size])\r\n\r\n MX, MXT = np.meshgrid(r1[0,:],r1[0,:],copy=False)\r\n MY, MYT = np.meshgrid(r1[1,:],r1[1,:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n r2 = (2*r1 - r0 + np.transpose(fv(r1[0,:],r1[1,:],dx,dy,r2,t/self.dt,True,self.param[2],self.particles.size,self.U,self.dt,self.close_list,self.Nlist,self.vel_verlet_on,self.R,self.param[3],self.param[4],self.param[5],self.grid,self.G,self.wallcount,self.X2)) * (dt**2))\r\n #The transpose is necessary because I messed up the shapes when I did the fv function.\r\n\r\n #JV: this needs to change if we want to include particles with mass diferent than 1 (in reduced units),\r\n # in other words, diferent particles than the Argon gas\r\n\r\n return r2[0,:],r2[1,:]",
"def _raw_residuals(self):\n if self.model.assortativity == 'positive':\n traj = self._solution[::-1]\n else:\n traj = self._solution\n\n # compute the residuals\n xi = np.linspace(traj[0, 0], traj[-1, 0], 10 * traj.shape[0])\n resids_arr = self.ivp.compute_residual(traj[:, :3], xi, k=5, ext=2)\n\n # convert to a data frame\n resids_arr[:, 0] = xi\n col_names = ['x', r'$\\hat{\\mu}(x)$', r'$\\hat{\\theta}(x)$']\n df = pd.DataFrame(resids_arr, columns=col_names)\n\n return df.set_index('x')",
"def residuals_Sigm(self, p, data, x):\n err = data - self.Sigm(x,p)\n return err"
] |
[
"0.6360294",
"0.62171745",
"0.6073643",
"0.600488",
"0.595743",
"0.58258915",
"0.5814308",
"0.5789692",
"0.5789692",
"0.5789692",
"0.5775249",
"0.57112926",
"0.5624764",
"0.55967015",
"0.55801696",
"0.55782723",
"0.5524036",
"0.5520603",
"0.55015445",
"0.5492828",
"0.5464734",
"0.5414925",
"0.54123557",
"0.54002696",
"0.53686345",
"0.53503144",
"0.53399533",
"0.53313076",
"0.5328311",
"0.52908474"
] |
0.77084005
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.