query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Checks that the GsmModem in PDU mode does not send message if error, when the text is within ASCII chars 22 126. | def testSendSmsPduModeError(self):
# setup expectation to raise a non-timeout error with prompt
when(self.mockDevice).read_lines().thenRaise(Exception("something other than timeout"))
self.gsm.send_sms("1234", "Test Message")
# must see command with size
verify(self.mockDevice, times=1).write("AT+CMGS=21\r")
# must see command to break out of command prompt
verify(self.mockDevice, times=1).write("\x1b")
# must NOT see command with text and terminating char
verify(self.mockDevice, times=0).write("00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\x1a")
# allow any number of reads
verify(self.mockDevice, atleast=1).read_lines()
verifyNoMoreInteractions(self.mockDevice) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_sms_valid(text=''):\n try:\n text.decode('ascii')\n except:\n return False\n if len(text) > 160:\n return False\n\n return True",
"def testSendSmsPduMode(self):\n \n # setup expectation to raise a timeout error with prompt\n err = errors.GsmReadTimeoutError(\">\")\n when(self.mockDevice).read_lines().thenRaise(err).thenReturn(self.oklines)\n self.gsm.send_sms(\"1234\", \"Test Message\")\n \n # must see command with size\n verify(self.mockDevice, times=1).write(\"AT+CMGS=21\\r\")\n # must see command with text and terminating char\n verify(self.mockDevice, times=1).write(\"00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\\x1a\")\n # allow any number of reads\n verify(self.mockDevice, atleast=1).read_lines()\n verifyNoMoreInteractions(self.mockDevice)",
"def message_check(self, message):\n if(message == \"\"):\n return False\n\n if(len(message) > 256):\n return False\n\n return True",
"def _validate_ascii(message):\n return all(ord(c) < 128 for c in message)",
"def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True",
"def check_message(self, msg):\n pass",
"def func_ehlo(self, data):\n data_list = bytes(data).decode().encode('ascii', 'ignore').decode().split(' ')\n if data_list[0].lower().rstrip() == 'ehlo':\n message = '250-' + self.hostname + '\\r\\n250-PIPELINING\\r\\n' \\\n + '250-8BITMIME\\r\\n250-SIZE ' \\\n + str(self.data_recv_size) \\\n + '\\r\\n' + '250 AUTH LOGIN PLAIN'\n self.func_sender(message)\n return True",
"def is_valid_msg(msg):\n for char in msg:\n if char not in string.ascii_letters and char not in string.punctuation and char != ' ':\n return False\n return True",
"def send_sms_via_modem(self, mobile, text=\"\"):\n\n mobile = self.sanitise_phone(mobile)\n\n # Add '+' before country code\n mobile = \"+\" + mobile\n\n try:\n self.modem.send_sms(mobile, text)\n return True\n except:\n return False",
"def testInvalidCommand(self):\n self.mgr.sendGoProCommand(140, (1, 0, 0, 0))\n self.assertFalse(self.v.message_factory.gopro_set_request_encode.called)",
"def message(self, text):\n lines = str(text).split('\\n') # Split at newline(s)\n for i, line in enumerate(lines): # For each substring...\n if i > 0: # If newline(s),\n self.write_lcd(self.LCD_DATA_E1, 0xC0) # set DDRAM address to 2nd line\n self.write_lcd(self.LCD_DATA_E1, line, True) # Issue substring",
"def astral(msg):\r\n return any(ord(c) > 0xFFFF for c in msg)",
"def find_message(text: str) -> str:\n if not all(ch in string.printable for ch in text):\n return\n elif not text:\n return \"\"\n if len(text) > 1000:\n return\n decode_message = \"\"\n # check case sensitive for a simple character in string\n for correct_char in [char for char in text\\\n if char.upper() == char and char.lower()\\\n in 'abcdefghijklmnopqrstuwxyz']:\n decode_message += correct_char\n return decode_message",
"def send_text(msg, up):\n try:\n client = TwilioRestClient(account=TWILIO_ACCOUNT_SID,\n token=TWILIO_AUTH_TOKEN)\n c = client.sms.messages.create(to=up.phone,\n from_=WATTTIME_PHONE,\n body=msg.msg)\n TwilioSMSEvent(user=up.user,\n msg_type=msg.msg_type,\n to_number=up.phone,\n from_number=WATTTIME_PHONE,\n body=msg.msg).save()\n\n debug(\"texted '{}' to {}\".format(msg, str(up.name)))\n return True\n except:\n print (\"Faild message\", up.phone, WATTTIME_PHONE, msg.msg)\n debug(\"failed to text '{}' to {}\".format(msg, str(up.name)))\n return False",
"def _handle_ok_ack(string):\n if string.strip() == Parser.OK_MSG:\n return True\n return False",
"def _text(self, fromwhom, number, text):\n\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(self._smsaddress, self._smspw)\n server.sendmail(str(fromwhom), '{}@vtext.com'.format(number),\n str(text))\n server.quit()",
"def verify_text(self, text):\n pass",
"def degsm(self):\n self.success = False",
"def send_message(self, msg):\n if msg is not None:\n try:\n self.node.write(msg.encode(encoding='UTF-8'))\n time.sleep(self.delay)\n except serial.serialutil.SerialTimeoutException:\n self.handle_congestion()\n self.send_message(msg)\n except serial.SerialException:\n self.handle_disconnection()\n self.send_message(msg)\n except:\n print(\"\\n!!!Unexpected error occurred in send_message()!!!\\n\")\n finally:\n return False\n return True",
"def test_recipient_not_str_error(\n config,\n):\n sms = YesssSMS.YesssSMS(\"0000000000\", \"2d4faa0ea6f55813\")\n with pytest.raises(ValueError):\n sms.send(176264916361239, \"test\")",
"def check_ascii_compliance(plaintext: bytes) -> bool:\n return all(c < 128 for c in plaintext)",
"def exit_with_message(error_text: str) -> NoReturn:\n raise PealSpeedParseError(peal_speed, error_text)",
"def test_unsupported_chars_error(\n config,\n):\n with requests_mock.Mocker() as m:\n sms = YesssSMS.YesssSMS(\"0000000000\", \"2d4faa0ea6f55813\")\n m.register_uri(\n \"POST\",\n # pylint: disable=protected-access\n sms._login_url,\n status_code=302,\n # pylint: disable=protected-access\n headers={\"location\": sms._kontomanager},\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._kontomanager,\n status_code=200,\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._sms_form_url,\n status_code=200,\n text=TEST_FORM_TOKEN_SAMPLE,\n )\n m.register_uri(\n \"POST\",\n # pylint: disable=protected-access\n sms._send_sms_url,\n status_code=200,\n text=_UNSUPPORTED_CHARS_STRING,\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._logout_url,\n status_code=200,\n )\n with pytest.raises(sms.UnsupportedCharsError):\n sms.send(YESSS_TO, \"test\")",
"def isvalidport(txt):\n return txt.isdigit() and int(txt) <= 65535 and int(txt) >= 0",
"def isValid(self) :\n try :\n pos = 0\n while self.firstblock[pos] == chr(0) :\n pos += 1\n except IndexError : \n return False\n else : \n firstblock = self.firstblock[pos:]\n if firstblock.startswith(\"\\033E\\033\") or \\\n firstblock.startswith(\"\\033%1BBPIN;\") or \\\n ((pos == 11000) and firstblock.startswith(\"\\033\")) or \\\n (firstblock.startswith(\"\\033*rbC\") and (not self.lastblock[-3:] == \"\\f\\033@\")) or \\\n firstblock.startswith(\"\\033*rB\\033\") or \\\n firstblock.startswith(\"\\033%8\\033\") or \\\n (firstblock.find(\"\\033%-12345X\") != -1) or \\\n (firstblock.find(\"@PJL ENTER LANGUAGE=PCL\\012\\015\\033\") != -1) or \\\n (firstblock.startswith(chr(0xcd)+chr(0xca)) and (firstblock.find(\"\\033E\\033\") != -1)) :\n return True\n else : \n return False",
"def sendErrorMessage(msg): #@NoSelf",
"def simulate_unsupported_chars_error(valid_connection):\n path = \"YesssSMS.api.YesssSMS.send\"\n with mock.patch(path, side_effect=YesssSMS.YesssSMS.UnsupportedCharsError()):\n yield",
"def parseable(message_data):\n if not message_data:\n raise TypeError('message_data must not be None')\n\n acceptable = range(97, 123) + range(65, 91) + range(48, 58) + range(33, 43) + range(44, 48) + [58, 63, 64, 94]\n return any(ord(c) not in acceptable for c in message_data['message'].replace(' ', ''))",
"def check_ack_or_nak(message):\n value = message.body[-1]\n\n if value == 0x06:\n return\n elif value == 0x15:\n raise CommandFailure(command_code=message.command_code)\n else:\n raise RuntimeError(\"Unexpected ACK/NAK value (0x%02x)\" % value)",
"def validate_ping(result):\n if '0 packets received' in str(result) or 'no answer from' in str(result) or '0 received' in str(result):\n print 'Conectividade - DOWN'\n return False\n print 'Conectividade - OK'\n return True"
] | [
"0.68313634",
"0.67669386",
"0.61789453",
"0.60250825",
"0.579996",
"0.57878447",
"0.5629621",
"0.5615489",
"0.55831856",
"0.5521425",
"0.54795057",
"0.5427085",
"0.54028183",
"0.53781205",
"0.5342483",
"0.53372145",
"0.533206",
"0.5329173",
"0.5322715",
"0.53093356",
"0.52896047",
"0.52851856",
"0.5264613",
"0.52627045",
"0.52588075",
"0.5254264",
"0.52520216",
"0.52446926",
"0.5242512",
"0.52265406"
] | 0.71887165 | 0 |
Returns True if the content type is valid. | def is_valid_content_type(cls, content_type: str) -> bool:
return content_type in cls.CONTENT_TYPES.value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_valid_ct(content_type: str) -> bool:\n content_type = content_type.strip()\n return _is_valid_regex(CT_CONTENT_TYPE_REGEX_PATTERN, content_type)",
"def _is_valid_content_type_format(content_type: str) -> bool:\n return (\n _is_valid_ct(content_type)\n or _is_valid_pt(content_type)\n or _is_valid_set(content_type)\n or _is_valid_list(content_type)\n or _is_valid_dict(content_type)\n or _is_valid_union(content_type)\n or _is_valid_optional(content_type)\n )",
"def is_schema_types_valid(self):\n valid_types = {\"string\", \"int\", \"float\", \"datetime\", \"boolean\"}\n invalid_types = []\n if self.schema_content:\n for dataset in self.schema_content:\n attributes = self.schema_content.get(dataset)\n for attr in attributes.values():\n type_to_validate = attr.get(\"type\")\n if type_to_validate not in valid_types:\n invalid_types.append(type_to_validate)\n\n if invalid_types:\n error_message, error_code = Errors.modeling_rule_schema_types_invalid(\n invalid_types\n )\n if self.handle_error(\n error_message, error_code, file_path=self.file_path\n ):\n self._is_valid = False\n return False\n return True",
"def is_valid(self):\n try:\n self.validate()\n return True\n except (TypeError, ValueError) as e:\n return False",
"def _is_valid_pt(content_type: str) -> bool:\n content_type = content_type.strip()\n return content_type in SPECIFICATION_PRIMITIVE_TYPES",
"def is_readable(self, content_type):\n return False",
"def _validate_content_type(\n content_type: str, content_name: str, performative: str\n) -> Tuple[bool, str]:\n if not _is_valid_content_type_format(content_type):\n return (\n False,\n \"Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.\".format(\n content_name,\n performative,\n ),\n )\n\n return (\n True,\n \"Type of content '{}' of performative '{}' is valid.\".format(\n content_name, performative\n ),\n )",
"def is_valid(self):\n\n return True",
"def _is_compositional_type(content_type: str) -> bool:\n for valid_compositional_type in (\n SPECIFICATION_COMPOSITIONAL_TYPES + PYTHON_COMPOSITIONAL_TYPES\n ):\n if content_type.startswith(valid_compositional_type):\n return True\n return False",
"def is_valid(self):\n return _drafter.check_blueprint(self.content)",
"def valid(self) -> bool:\n return True",
"def _is_valid_set(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:set\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 1:\n return False\n\n sub_type = sub_types[0]\n return _is_valid_pt(sub_type)",
"def is_valid(self):\n return (self.time is not None\n and self.author is not None\n and self.content is not None)",
"def is_content_malformed(self):\n return self._tag == 'content_malformed'",
"def is_content_malformed(self):\n return self._tag == 'content_malformed'",
"def type_valid(self):\n return contain_in_list_equal(self._type_or_ref, PARAM_RES_TYPES)",
"def is_valid(self): # -> bool:\n ...",
"def is_valid_type(self, question_type):\n\t\treturn question_type in self.valid_types",
"def _is_valid(self, value):\n\n # Entities have an istypeof method that can perform more sophisticated\n # type checking.\n if hasattr(self._type, \"istypeof\"):\n return self._type.istypeof(value)\n else:\n return isinstance(value, self._type)",
"def check_eligible_mimetype(self, ctype, uid):\n self.helper.log_debug(\n 'check_eligible_mimtype: checking content-type %s of msg uid %s' %\n (ctype, uid))\n if ctype == \"application/zip\":\n return True\n elif ctype == \"application/gzip\":\n return True\n elif ctype == \"application/x-gzip\":\n return True\n elif ctype == \"application/octet-stream\":\n # Non-standard mimetype used by Amazon SES dmarc reports\n return True\n elif ctype == \"application-x-gzip\":\n # Non-standard mimetype used by Comcast dmarc reports\n return True\n elif ctype == \"application/x-zip-compressed\":\n # Non-standard mimetype used by Yahoo dmarc reports\n return True\n elif ctype == \"application/xml\":\n return True\n elif ctype == \"text/xml\":\n return True\n else:\n self.helper.log_debug(\n 'check_eligible_mimtype: skipping content-type %s of msg uid %s' %\n (ctype, uid))\n return False",
"def valid(self) -> bool:\n pass",
"def verify_type(self, obj):\n return isinstance(obj, self.type_)",
"def validate(self):\n self._check_type()",
"def is_valid_type(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate_type(attr)\n except TypeError:\n return False\n return True",
"def validate(self,value):\r\n return type(value) is self.datatype",
"def _is_valid_dict(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:dict\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 2:\n return False\n\n sub_type_1 = sub_types[0]\n sub_type_2 = sub_types[1]\n return _is_valid_pt(sub_type_1) and _is_valid_pt(sub_type_2)",
"def validate_content_type(uri: str) -> None:\n try:\n response = requests.head(uri)\n response.raise_for_status()\n except RequestException as e:\n raise ValidationError(f\"groundtruth content type ({uri}) validation failed\") from e\n\n content_type = response.headers.get(\"Content-Type\", \"\")\n if content_type not in SUPPORTED_CONTENT_TYPES:\n raise ValidationError(f\"groundtruth entry has unsupported type {content_type}\")",
"def IsValid(self):\n return False",
"def check_content_type():\n return request.content_type == \"application/json\"",
"def valid_xss_content_type(http_res):\n # When no content-type is returned, browsers try to display the HTML\n if \"content-type\" not in http_res.headers:\n return True\n\n # else only text/html will allow javascript (maybe text/plain will work for IE...)\n if \"text/html\" in http_res.headers[\"content-type\"]:\n return True\n return False"
] | [
"0.7517771",
"0.7417243",
"0.70675975",
"0.6998834",
"0.68573606",
"0.6832993",
"0.67981493",
"0.6714344",
"0.6592863",
"0.6578096",
"0.6575198",
"0.6568513",
"0.6541694",
"0.65321887",
"0.65321887",
"0.6491889",
"0.6491021",
"0.6470095",
"0.6450657",
"0.63938487",
"0.63921374",
"0.63828534",
"0.63796335",
"0.6365822",
"0.63636667",
"0.63577056",
"0.6356282",
"0.63446987",
"0.6342855",
"0.63200486"
] | 0.8173388 | 0 |
Constructor for facebook sdk | def init_fb(self, **kwargs):
try:
self.graph = facebook.GraphAPI(access_token=fb_token, version='2.4')
except Exception as e:
sys.exit(str(e)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, access_token, endpoint='/me',\r\n version='2.5'):\r\n self.access_token = access_token\r\n self.endpoint = endpoint",
"def initialize_facebook():\n session = FacebookSession(APP_ID, APP_SECRET, ACCESS_TOKEN)\n return FacebookAdsApi(session)",
"def __init__(self, access_token):\n self.access_token = access_token",
"def facebook(self, facebook):\n\n self._facebook = facebook",
"def __init__(__self__, *,\n app_id: pulumi.Input[str],\n is_enabled: pulumi.Input[bool],\n app_secret: Optional[pulumi.Input[str]] = None,\n pages: Optional[pulumi.Input[Sequence[pulumi.Input['FacebookPageArgs']]]] = None):\n pulumi.set(__self__, \"app_id\", app_id)\n pulumi.set(__self__, \"is_enabled\", is_enabled)\n if app_secret is not None:\n pulumi.set(__self__, \"app_secret\", app_secret)\n if pages is not None:\n pulumi.set(__self__, \"pages\", pages)",
"def __init__(self, access_token=None):\n self.access_token = access_token",
"def __init__(self, user_id, token):\n\n self.user_id = user_id\n self.buttons = {}\n self.token = token\n self.quick_reply_uri = \"https://graph.facebook.com/v10.0/me/messages?access_token=\"+self.token\n self.url_button_uri = \"https://graph.facebook.com/v2.6/me/messages?access_token=\"+self.token\n self.text_uri = 'https://graph.facebook.com/v9.0/me/messages?access_token='+self.token\n self.template_uri = 'https://graph.facebook.com/v9.0/me/messages?access_token='+self.token\n self.button_template_uri = 'https://graph.facebook.com/v2.6/me/messages?access_token='+self.token\n self.typing_on_uri = 'https://graph.facebook.com/v2.6/me/messages?access_token='+self.token\n self.mark_seen_uri = 'https://graph.facebook.com/v2.6/me/messages?access_token='+self.token",
"def __init__(self, oauth_consumer_token=None, oauth_access_token=None):\n self.consumer_token = oauth_consumer_token\n self.access_token = oauth_access_token",
"def __init__(self, access_token):\n self._access_token = access_token",
"def __init__(self, callback_url):\n # Credientials\n self.URI_SCHEME = \"https\"\n self.API_ENDPOINT = \"rightsignature.com\"\n self.REQUEST_TOKEN_URL = \"/oauth/request_token\"\n self.ACCESS_TOKEN_URL = \"/oauth/access_token\"\n self.REDIRECT_URL = \"/oauth/authorize\"\n self.version = \"1.0\"\n self.signature_method = \"HMAC-SHA1\" # as I said\n self.BASE_URL = \"%s://%s\" % (self.URI_SCHEME, self.API_ENDPOINT)\n\n self.API_KEY = \"\"\n self.API_SECRET = \"\"\n self.CALLBACK_URL = callback_url\n self.request_token = None # that comes later\n self.access_token = None # that comes later and later\n\n self.request_token_secret = None\n self.access_token_secret = None\n\n self.verifier = None\n self.error = None\n\n self.request_oauth_nonce = None\n self.request_oauth_timestamp = None\n self.access_oauth_nonce = None\n self.access_oauth_timestamp = None\n self.request_oauth_error = None\n self.access_oauth_error = None",
"def __init__(self, client_id, token, scope=[\"activity\", \"heartrate\", \"location\", \"nutrition\", \"profile\", \"settings\", \"sleep\", \"social\", \"weight\"]):\n\n\t\tif token['access_token'] == \"\":\n\t\t\t# We need to fetch a token for the user.\n\t\t\tprint(\"Note: looks like we don't have an access token yet. Let's fetch one.\")\n\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope)\n\n\t\t\tauthorization_base_url = \"https://www.fitbit.com/oauth2/authorize\"\n\n\t\t\tauthorization_url, state = self.fitbit.authorization_url(authorization_base_url)\n\n\t\t\tprint(\"Please go to the following authorization URL: {}\".format(authorization_url))\n\n\t\t\traw_callback_url = input(\"Paste callback URL you get back here: \")\n\n\t\t\tself.fitbit.token_from_fragment(raw_callback_url)\n\t\t\tself.token = self.fitbit.token['access_token']\n\n\t\t\tprint(self.fitbit.token)\n\n\t\telse:\n\t\t\t# We've got an access token, and we'll use it.\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope, token=token)\n\t\t\tself.token = token['access_token']",
"def __init__(self, client=\"ANDROID_EMBED\"):\n self.context = self._DEFAULT_CLIENTS[client][\"context\"]\n self.api_key = self._DEFAULT_CLIENTS[client][\"api_key\"]",
"def __init__(self, oauth=None, client_id=None):\n\t\tself.oauth = oauth\n\t\tself.client_id = client_id or self.default_client_id",
"def __init__(self,\n client_id,\n client_secret):\n self.__client_id = client_id\n self.__client_secret = client_secret",
"def facebook(self):\n try:\n from facebook import Facebook\n except ImportError:\n log.warning(\"PyFacebook is not installed!\")\n else:\n if self.user and self.user.profile.uses_facebook_connect:\n # This implies, that the correct cookies must be set. We don't\n # double check for that.\n api_key = get_app().cfg['facebook/api_key']\n secret_key = get_app().cfg['facebook/secret_key']\n facebook = Facebook(api_key, secret_key)\n # Setting the cookie values\n # It's so cool to have no private attributes. (;\n facebook.uid = self.session['fb_user_id']\n facebook.session_key = self.session['fb_session_id']\n return facebook",
"def __init__(self, consumer_key, consumer_secret, access_token,\n access_token_secret, **kwargs):\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret\n self.access_token = access_token\n self.access_token_secret = access_token_secret\n super().__init__(**kwargs)",
"def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object",
"def __init__(self, access_token, db_path, id_list):\n self.access_token = access_token\n self.db_path = db_path\n self.id_list = id_list\n\n g = facebook.GraphAPI(self.access_token, version='2.3')\n self.g = g\n\n # connect to database\n con = lite.connect(self.db_path)\n self.con = con\n\n with con:\n # create cursor to the database\n cur = con.cursor()\n self.cur = cur\n # create tables for posts, comments, post likes and people if not exists\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS Posts(post_id TEXT PRIMARY KEY, status_id TEXT, content TEXT, \"\n \"person_hash_id TEXT, published_date TEXT, last_comment_date TEXT, post_type TEXT, status_type TEXT, \"\n \"post_link TEXT, link TEXT, video_link TEXT, picture_link TEXT, link_name TEXT, link_caption TEXT, \"\n \"link_description TEXT, comment_count INTEGER, share_count INTEGER, like_count INTEGER, \"\n \"love_count INTEGER, wow_count INTEGER, haha_count INTEGER, sad_count INTEGER, angry_count INTEGER, \"\n \"mentions_count INTEGER, mentions TEXT, location TEXT, date_inserted TEXT)\")\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS Comments(comment_id TEXT PRIMARY KEY, person_hash_id TEXT, post_id TEXT, \"\n \"comment_content TEXT, comment_date TEXT, like_count INTEGER)\")\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS Post_likes(like_id TEXT PRIMARY KEY, person_hash_id TEXT, post_id TEXT)\")\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS People(person_hash_id TEXT PRIMARY KEY, person_id TEXT, person_name TEXT)\")",
"def __init__(self, access_key, secret_key, **kwargs):\r\n pass",
"def __init__(self):\n self.dsq = fb_forecast_q.FbForecastApi(\"gfs\")",
"def __init__(self, options={}):\n # Throw an error if app_id is not present in options dict\n if 'app_id' not in options:\n raise KeyError('app_id must be supplied when making requests to the API. Get a free app_id by signing up here: https://www.opengraph.io/')\n\n self.app_id = options['app_id']\n\n # Assign options if present, or defaults if not\n # These can be overridden when making requests through get_site_info\n self.cache_ok = options['cache_ok'] if 'cache_ok' in options else True\n self.full_render = options['full_render'] if 'full_render' in options else False\n self.version = options['version'] if 'version' in options else '1.1'",
"def __init__(self, username, passwordresettoken, passwordresetexpires, password, email, phone, facebook, google, linkin, group_id):\n\n self.username = username\n self.passwordresettoken = passwordresettoken\n self.passwordresetexpires = passwordresetexpires\n self.password = password\n self.email = email\n self.phone = phone\n self.facebook = facebook\n self.google = google\n self.linkin = linkin\n self.createdate = datetime.now()\n self.updatedate = datetime.now()\n self.group_id = group_id",
"def __init__(self, client_id=None, access_token=None):\r\n if not client_id and not access_token:\r\n raise TypeError('__init__() must be passed at least one '\r\n 'of client_id, access_token')\r\n\r\n self.apiroot = 'https://api.instagram.com/v1'\r\n\r\n self.client_id = client_id\r\n self.access_token = access_token\r\n self.add_filter(self.add_authorization)",
"def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret):\n self.api = self.getAPI(consumer_key, consumer_secret, access_token, access_token_secret)",
"def __init__(self, client_id: str, client_secret: str, access_token_publish_url: str, access_token: str = None):\n\n self.client_id = client_id\n self.client_secret = client_secret\n self.access_token_publish_url = access_token_publish_url\n self.api_base_url = 'https://api.ce-cotoha.com/api/dev/'\n\n if access_token is not None:\n self.access_token = access_token\n else:\n self.access_token = self.update_access_token()",
"def __init__(self,\n access_token=None,\n token_type=None,\n error=None):\n\n # Initialize members of the class\n self.access_token = access_token\n self.token_type = token_type\n self.error = error",
"def __init__(self, client_id, client_secret):\n self.client_id = client_id\n self.client_secret = client_secret\n self.token = None\n self.request_time = None\n self._initialized = False",
"def __init__(self, client_auth_type, client_id, client_secret=None):\n self.client_auth_type = client_auth_type\n self.client_id = client_id\n self.client_secret = client_secret",
"def __init__(self, consumer_key,\n consumer_secret,\n request_token_url,\n access_token_url,\n authorize_url,\n callback_url='oob',\n version='1.0',\n token=None):\n self.__consumer_key = consumer_key\n self.__signature_method = 'HMAC-SHA1'\n self.__version = version\n self.__consumer_secret = consumer_secret\n self.__signing_key = None\n self.__signature_base_string = None\n self.__parameter_string = None\n self.__auth_headers = None\n self.__token = token\n self.__signature = None\n self.__access_token_url = access_token_url\n self.__request_token_url = request_token_url\n self.__authorize_url = authorize_url\n self.__callback_url = callback_url\n self.__response = None\n self.__request = None",
"def __init__(self):\n self.dsq = fb_stations_q.FbStationsApi(\"ghcn\")"
] | [
"0.70955795",
"0.70331687",
"0.65805393",
"0.6543465",
"0.6526601",
"0.64303684",
"0.63264066",
"0.625907",
"0.6256634",
"0.61950535",
"0.61830455",
"0.618123",
"0.61768824",
"0.61615527",
"0.6148158",
"0.6142532",
"0.6112654",
"0.6108066",
"0.6090049",
"0.6088559",
"0.6086943",
"0.6074998",
"0.60672325",
"0.605394",
"0.6045962",
"0.6040036",
"0.60149527",
"0.5993534",
"0.59856105",
"0.59599227"
] | 0.760726 | 0 |
Save event to database | def save_event(self, data):
rdb.table(self.rdb_table).insert(data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, event):\n self.saved_events.append(event)",
"def insert_event_to_db(self):\n try:\n events_coll.insert_one(self.event_info_to_dic())\n except Exception as e:\n print(e)",
"def save(self, db):\n pass",
"def save_event(id):\n event = Event.query.get_or_404(id)\n if not current_user.has_saved(event):\n current_user.save(event)\n db.session.commit()\n return jsonify({\"message\": \"Event added to your saved events list.\"})\n else:\n return jsonify({\"message\": \"You have already saved this event.\"})",
"def save(self):\n self.db.commit()",
"def save_db(self) -> None:",
"def save(self):\n self.__db.commit()",
"def save(self):\n db.session.commit()",
"def save(self):\n self.session.commit()",
"def save(self):\n logging.debug(\"sychronizing db\")\n self._db.sync()",
"def writeEvent(self):\n\t\ttry:\n\t\t\tif self.dataFileHnd:\n\t\t\t\tself.dataFileHnd.writeRecord( (self.mdList())+[self.eventData] )\n\t\texcept sqlite3.OperationalError, err:\n\t\t\t# If the db is locked, wait 1 s and try again.\n\t\t\tprint err\n\t\t\ttime.sleep(1)\n\t\t\tself.writeEvent()\n\t\t# else:\n\t\t# \traise MissingMDIOError(\"Meta-data I/O object not initialized.\")",
"def save_data(self):\n db.session.add(self)\n db.session.commit( )",
"def save(self):\n # send data to be saved by another job\n save_callevent.delay(self.data)",
"def save():",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n file = Path(\"config/event_{0}.json\".format(self.name))\n try:\n file.write_text(self.toJSON())\n except Exception as err:\n raise(err)",
"def save(self)->None:\n database.cursor.execute(\"INSERT INTO meetups(topic,happening_date,tags,location,images,body) VALUES(%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.topic,\n self.happening_on,\n self.tags,\n self.location,\n self.images,\n self.body\n ))\n super().save()",
"def save(self, *args, **kwargs):\n pass",
"def writeToDB(self, eventDateTime, eventFileName, eventType, eventPath):\n conn = self.createConnection()\n c = conn.cursor()\n\n c.execute(\"INSERT INTO RansomedFiles (TIME, EventFileName, EventType, EventPath) VALUES (?,?,?,?)\", (eventDateTime, eventFileName, eventType, eventPath))\n conn.commit()\n conn.close()\n\n # print(\"[+]Wrote to the database successfully!\")",
"def create_event(data):\n event = EventModel(**data)\n db.session.add(event)\n db.session.commit()\n return event",
"def save(self):\n\n self.__session.commit()",
"def save(self):\n\n self.__session.commit()",
"def save_to_db(self):\n db.session.add(self)\n db.session.commit()"
] | [
"0.7487547",
"0.7325725",
"0.7049459",
"0.6976891",
"0.69406134",
"0.6901806",
"0.68859524",
"0.6789551",
"0.66951",
"0.66863483",
"0.66196334",
"0.65969324",
"0.65858656",
"0.6583082",
"0.6570057",
"0.6570057",
"0.6570057",
"0.6570057",
"0.6570057",
"0.6570057",
"0.6570057",
"0.6570057",
"0.6541968",
"0.6539257",
"0.65242314",
"0.6522762",
"0.651794",
"0.65169066",
"0.65169066",
"0.6513589"
] | 0.8077102 | 0 |
Iterate through all events pages | def get_events(self):
url = '/v2.4/'+self.page_id+'/events'
data = self.graph.request(url)
while 'next' in data['paging'].keys():
print data['paging']['next']
data = self.graph.request(url, args={
'limit' : 100,
'after' : data['paging']['cursors']['after']
})
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_event_list(self, response):\n for event in response.css(\".view-content .article-title a::attr(href)\"):\n event_url = event.extract()\n yield scrapy.Request(\n response.urljoin(event_url),\n callback=self.parse_event_page,\n dont_filter=True,\n )\n next_url = self._response_next_url(response)\n if next_url:\n yield scrapy.Request(\n response.urljoin(next_url),\n callback=self.parse_event_list,\n dont_filter=True,\n )",
"def parse(self, response):\n for link in response.css(\".event-entry .event-title a::attr(href)\").extract():\n yield scrapy.Request(\n response.urljoin(link), callback=self.parse_event_page, dont_filter=True\n )",
"def events(self) -> [redirect, HTMLBody]:\n\t\t# Get all events and split into 2 groups\n\t\teventsl, eventsr = prepare_events(get_events())\n\t\treturn render_template(\"events.jinja2\", eventsl=eventsl, eventsr=eventsr)",
"def searchForEvents(self, search_args, onProgress):\n print('[EventFinder]: Search For Events called. Checking how many pages to crawl...')\n pages = self.get_total_pages_to_search(search_args)\n urls = [self.assembleRequest(search_args, p) for p in range(1, pages + 1)]\n\n print('[EventFinder]: Crawling %d pages from the eventful api...' % pages)\n start_ms = time_ms()\n\n for u in urls:\n response = requests.get(u)\n events = self.parse_events(response)\n onProgress(events)\n\n print('[EventFinder]: Crawling took ' + str(time_ms() - start_ms) + ' ms')",
"def scrape_events(meta_url, collection):\r\n options = Options()\r\n options.add_argument('--headless')\r\n driver = webdriver.Firefox(options=options)\r\n driver.get(meta_url)\r\n soup = BeautifulSoup(driver.page_source, 'html.parser')\r\n meta_dropdown = soup.find('select', {'name': 'meta'}) # get drop down selector for meta\r\n selected_meta = meta_dropdown.find('option', selected=True) # get current meta\r\n \r\n def get_next(d, class_name):\r\n \"\"\"Check if the next button is still valid\"\"\"\r\n try:\r\n button = d.find_elements_by_class_name('Nav_PN')[-1]\r\n return button if button.text == 'Next' else False\r\n except Exception as e:\r\n return False\r\n \r\n page = 1\r\n while True:\r\n print(f'\\nScraping event page {page}...')\r\n next_btn = get_next(driver, 'Nav_PN')\r\n soup = BeautifulSoup(driver.page_source, 'html.parser') # make some soup\r\n \r\n for event in soup.find_all(class_='Stable')[2].find_all(class_='hover_tr'): # 10 events list table\r\n \"\"\"\r\n This loop iterates through event table rows, pulling out an ID number,\r\n the star rating and the date of the event\r\n \"\"\"\r\n link = event.a # associated hyperlink\r\n eid = re.search(r\"e=(\\d+)&\", link['href']).group(1) # unique id number\r\n stars = event.find(class_='O16').find_all('img') # star rating / level\r\n collection.insert_one({\r\n 'id': eid,\r\n 'name': link.text,\r\n 'date': event.find(class_='S10').text,\r\n 'level': 4 if 'bigstar' in stars[0]['src'] else len(stars),\r\n 'link': mtgtop8_url.format(link['href']),\r\n 'meta': selected_meta.text\r\n })\r\n \r\n if next_btn:\r\n next_btn.click()\r\n page += 1\r\n sleep(1)\r\n else:\r\n print('\\n\\n')\r\n driver.close()\r\n break",
"def show_events_list():\r\n\tevents_list = Page.objects.filter(tags='events').order_by('-created')\r\n\treturn {'events_list': events_list}",
"async def events(self) -> Iterable[Event]:",
"def __show_all_events(self):\n for event in self.events_list:\n self.__print_events_info(event)\n print()",
"def test_event_page(self):\n res = self.client.get('/events')\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'Upcoming Events' in data",
"def _iter_events(self) -> Generator:\n response = self.client.call()\n events: list = response.json()\n\n if not events:\n return []\n\n while True:\n yield events\n last = events.pop()\n self.client.set_next_run_filter(last['@timestamp'])\n response = self.client.call()\n events = response.json()\n try:\n events.pop(0)\n assert events\n except (IndexError, AssertionError):\n LOG('empty list, breaking')\n break",
"def scrape_events(path, urls):\n seen_ids = set()\n result = []\n for url in urls:\n # Get all of the Network requests being sent out\n print(f'Processing {url}')\n driver.get(url)\n browser_log = driver.get_log('performance') \n events = [process_browser_log_entry(entry) for entry in browser_log]\n results = []\n # Find the Network request that sends a GET request to EventBrite API\n for event in events:\n if event['method'] == 'Network.responseReceived':\n # print(event)\n if 'event_ids' in event['params']['response']['url']:\n results.append(event)\n # Get the GET request URL\n get_url = \"\"\n # TODO: Sometimes returning 0 or more than 1... I'm not sure why :(\n if len(results) >= 1:\n get_url = results[0]['params']['response']['url']\n # Get the GET request response JSON\n json_response = get_request(get_url)\n event_list = json_response['events']\n # Find unique events in the response JSON \n unique_event_list = []\n for event in event_list:\n if event['id'] not in seen_ids:\n seen_ids.add(event['id'])\n unique_event_list.append(event)\n parsed_events = parse_event_page(unique_event_list)\n result.extend(parsed_events)\n else:\n print(results)\n print('yikes something went wrong')\n\n driver.close()\n return result\n # save_events(path, result)",
"def get_events_helper(Event):\n try:\n limit = int(request.args.get('limit'))\n page = int(request.args.get('page'))\n except:\n limit = 10\n page = 1\n user_input = \"get_all\"\n check_input_dict = {\n \"get_all\": lambda: Event.get_all_pages(limit, page)\n }\n events_page_object = check_input_dict.get(user_input, \"Something went wrong!!\")()\n status_code = 200\n result = {\"Events\": print_events(pagination(events_page_object)[0]),\n \"Current page\": pagination(events_page_object)[1],\n \"All pages\": pagination(events_page_object)[2]}\n return result, status_code",
"def slurp_events(self):\n while self.has_event():\n self.get_event()",
"def events(self):\n if \"events\" in self._prop_dict:\n return EventsCollectionPage(self._prop_dict[\"events\"])\n else:\n return None",
"def list_event(request):\n event_list = Event.objects.all()\n paginator = Paginator(event_list, 5)\n\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n\n # If page request (9999) is out of range, deliver last page of results.\n try:\n event_list = paginator.page(page)\n except (EmptyPage, InvalidPage):\n event_list = paginator.page(paginator.num_pages)\n\n context = {'event_list': event_list }\n return render_to_response('event_list.html',\n context,\n context_instance=RequestContext(request))",
"def test_response_is_paginated(self):\r\n user = ViewAfishaTests.mentor\r\n EventFactory.create_batch(50, city=user.profile.city)\r\n client = self.return_authorized_user_client(user)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n\r\n self.assertTrue(\"next\" in response_data)\r\n self.assertTrue(\"previous\" in response_data)\r\n self.assertTrue(\"results\" in response_data)",
"def parse_competition(self, response):\n # gather events from the competition page\n event_urls = response.css('div.navilevel1 p a::attr(href)').getall()\n event_titles = response.css('div.navilevel1 p a::text').getall()\n\n for event_url, event_title in zip(event_urls, event_titles):\n # assemble direct URL for this event\n full_event_url = response.urljoin(event_url)\n\n # pass along metadata for use in next steps\n event_details = parse_qs(urlsplit(full_event_url).query)\n event_title = regex_replace(event_title)\n\n if treatable_event(event_title):\n response.meta.update(dict(instance_of_event_in_competition=detect_event_multiple(event_title),\n event_title=clean_event_title(event_title),\n event_gender=event_details.get(\"gen\", [np.nan])[0]))\n\n # scrape the event page\n yield scrapy.Request(url=full_event_url,\n callback=self.parse_event,\n meta=response.meta)",
"def events(self) -> Generator[dict, None, None]:\n\n for audit_file, audit_type in self.identified_files.items():\n temp_file_path = f\"{self.tempdir.name}/{audit_file}\"\n\n if audit_type == \"stateagentinspector\":\n yield from self.parse_agent_events(temp_file_path)\n\n # If we have atleast the hits.json file, we can make alert nodes\n if self.alert_files[\"hits.json\"]:\n yield from self.parse_alert_files(self.tempdir.name)\n\n self.tempdir.cleanup()",
"def get_events(self):\n\n print \"\\ngetting new Events\"\n path = os.path.join(self.path, 'no_consent')\n for d_cnt, date in sorted(enumerate(os.listdir(path))):\n\n if os.path.isdir(os.path.join(self.events_path, date)):\n print \"%s already processed\" % date\n continue\n\n directory = os.path.join(path, date)\n for recording in os.listdir(directory):\n if os.path.isdir(os.path.join(directory, recording)):\n\n # Can we reduce this list of objects using ROI information?\n try:\n use_objects = {}\n for region, objects in self.soma_objects.items():\n for ob, position in objects.items():\n use_objects[ob] = position\n\n ce.get_event(recording, directory, use_objects, self.config['events'])\n except:\n print \"recording: %s in: %s is broken.\" %(recording, directory)\n else:\n print \"already processed: %s\" % recording\n print \"done.\"",
"def scrape_event(self, body):\n\n content = body.find('div', {'id': 'main-content'})\n\n title = self.scrape_title(body)\n description = self.scrape_description(content)\n location = self.scrape_location(content)\n location_details = self.scrape_location_details(content)\n admission = self.scrape_admission(content)\n admission_details = self.scrape_admission_details(content)\n # sponsor = self.scrape_sponsor(content)\n related_url = self.scrape_related_url(content)\n invited_audience = self.scrape_invited_audience(content)\n categories = self.scrape_categories(content)\n image = self.scrape_image(content)\n date_times = self.scrape_dates(content)\n\n cost = admission_details\n\n if admission_details == '\"\"':\n cost = admission\n\n event_list = []\n\n for date_time in date_times:\n date, start_time = self.date_time_to_tuple(date_time[0])\n end_time = ''\n\n # If the date_time tuple shows that it is an all day event\n if date_time[1]:\n start_time = '8:00'\n end_time = '20:00'\n event_dict = {\n 'Title': title,\n \"Description\": description,\n 'Date From': date,\n 'Start Time': start_time,\n 'End Time': end_time,\n 'Location': location,\n 'Cost': cost,\n 'Event Website': related_url,\n 'Photo URL': image,\n \"Invited Audience\": invited_audience,\n \"Event Types\": categories,\n \"Location Details\": location_details\n }\n event_list.append(event_dict)\n return event_list",
"def get(self):\r\n #\"SELECT * FROM DBEvent\"\r\n self.insertContent(\"<hr> Грядущие события:<br>\")\r\n event = self.event #db.GqlQuery(self.query) \r\n eventlist=''\r\n #self.checkSession(self.request.headers.get('Cookie'), False)\r\n found_events = False\r\n \r\n ec = DBEventCat()\r\n cats = ec.get_categories()\r\n \r\n for this_event in event:\r\n try:\r\n if not found_events: found_events = True\r\n if self.Session['access'] >= this_event.access or int(self.Session['userid']) == int(this_event.userid) or this_event.access <= 0:\r\n eventlist += '<a href=\"/event/'+str(this_event.key().id())+'\">'+this_event.name.encode(\"utf8\")+'</a>'\r\n users = db.GqlQuery(\"SELECT * FROM DBEventAnketa WHERE eventid = :eventid\",\r\n eventid = this_event.key().id())\r\n if self.Session['access'] >= 8 or int(self.Session['userid']) == int(this_event.userid): \r\n eventlist += ' [ <i><a href=\"/event/info/' + str(this_event.key().id()) + '\">Участников зарегистрировано: ' + str(users.count()) + '</i></a> ]<br>'\r\n elif self.Session['access'] >= this_event.access:\r\n eventlist += ' [ <i>Участников зарегистрировано: ' + str(users.count()) + '</i> ]<br>'\r\n except: continue\r\n if found_events:\r\n self.insertTemplate('tpl_event_add.html', { 'eventlist': eventlist, 'cats' : cats })\r\n else:\r\n self.insertContent(\" Пока мероприятий не запланировано!\")\r\n self.insertContent(\"<hr> Недавно прошедшие события:<br>\")\r\n \r\n eventlist = ''\r\n events = db.GqlQuery(\"SELECT * FROM DBEvent where date<:today order by date desc limit 10\", today = db.datetime.date.today())\r\n for this_event in events:\r\n if self.Session['access'] >= this_event.access or int(self.Session['userid']) == int(this_event.userid):\r\n eventlist += '<a href=\"/event/'+str(this_event.key().id())+'\">'+this_event.name.encode(\"utf8\")+'</a>'\r\n users = db.GqlQuery(\"SELECT * FROM DBEventAnketa WHERE eventid = :eventid\",\r\n eventid = this_event.key().id())\r\n if self.Session['access'] >= 8 or int(self.Session['userid']) == int(this_event.userid): \r\n eventlist += ' [ <i><a href=\"/event/info/' + str(this_event.key().id()) + '\">Участников зарегестрировано: ' + str(users.count()) + '</i></a> ]<br>'\r\n elif self.Session['access'] >= this_event.access:\r\n eventlist += ' [ <i>Участников зарегистрировано: ' + str(users.count()) + '</i> ]<br>'\r\n self.insertTemplate('tpl_event_add.html', { 'eventlist': eventlist })\r\n\r\n \r\n #self.drawPage()\r",
"def test_events(self):\n\n resp = self.client.get('/events?page=1&user_categories=113%2C105%2C104 ')\n self.assertTrue('next_events_url' in resp.context)\n self.assertTrue('previous_events_url' in resp.context)\n self.assertTrue('events_list' in resp.context)\n self.assertTrue('previous' in resp.context)\n self.assertTrue('next' in resp.context)\n self.assertEqual(resp.status_code, 200)",
"def _parse_events(self, html):\n print \"Parse events\"\n data = []\n soup = BeautifulSoup(html, \"html.parser\")\n events = soup.find_all(\"div\", {\"class\": \"program clearfix\"})\n \"\"\" Site's html is broken. We have to handle descriptions \n with a hack.\n \"\"\"\n descriptions = soup.find_all(\"div\", {\"class\": \"programpostingress\"})\n for index, event in enumerate(events):\n link_tag = event.find(\"a\")\n if link_tag:\n link = link_tag[\"href\"]\n else:\n link = None\n dates = self._parse_date(self._parse_text(event.find(\"span\", {\"class\": \"programpostdato\"})))\n row = {\n \"title\": self._parse_text(event.find(\"span\", {\"class\": \"programposttittel\"})),\n \"date_start\": dates[\"start\"],\n \"date_end\": dates[\"end\"],\n \"description\": self._parse_text(descriptions[index]),\n \"link\": link,\n \"country\": \"Norge\"\n }\n data.append(row)\n print \"Found %s events\" % len(data)\n return data",
"def events(bot, event, *args):\n yield from _printEventList(bot, event)",
"def events():\n # Compare cache against a new GET request\n temp_cache = EVENTS_CACHED\n # events_new = get_calendar_events_today(CALENDAR_URL)\n events_new = get_calendar_events_limit(CALENDAR_URL, sort=False)\n\n # If not change is detected, tell the browser to keep it's current content.\n if temp_cache is None or compare_events(temp_cache, events_new):\n return \"false\"\n\n # Else, render the partial events template to return to the client.\n return render_template('events_sorted.html', events=sort_events_days(events_new))",
"def iter_events(self, name):\n for event in self._get_events(name):\n yield event",
"def listings(request, category1, category2, category3, page = 1):\n \n # Creating URL for request\n base_url = \"https://www.eventbriteapi.com/v3/events/search/\"\n token_component = \"token=BKKRDKVUVRC5WG4HAVLT\" #I had this token in my mail link\n category_component = \"categories=\" + category1 + ',' + category2 + ',' + category3\n page_component = \"page=\" + str(page)\n url_without_page = base_url + \"?\" + token_component + \"&\" + category_component\n url_complete = url_without_page + \"&\" + page_component\n \n # GET events from Eventbrite\n f = urllib2.urlopen(url_complete) \n json_string = f.read() \n parsed_json = json.loads(json_string) \n\n # Parse through JSON\n events = parsed_json['events']\n eventsList = []\n \n for i in events:\n eventsList.append(event_container())\n \n # Parse further through JSON\n eventsList[-1].name = i['name']['text']\n eventsList[-1].id = i['id']\n eventsList[-1].url = i['url']\n try:\n eventsList[-1].description = i['description']['text']\n except:\n eventsList[-1].description = \"No description available\"\n eventsList[-1].resource_uri = i['resource_uri']\n \n \n listings_url_base = '/topthree/listings/'+ category1 + '/' + category2 + '/' + category3 + '/'\n \n # Pagination\n \n \"\"\"\n Performing manual pagination instead of Django pagination \n because GET request for events pulls in paginated data already\n \"\"\"\n \n next_page = int(page) + 1\n next_page_url = listings_url_base + str(next_page) \n \n if int(page)>1:\n prev_page = int(page) - 1\n prev_page_url = listings_url_base + str(prev_page) \n\n else:\n prev_page = 0\n prev_page_url = \"#\"\n \n \n # Sending values to template\n \n template = loader.get_template('listings.html')\n\n context = RequestContext(request, {\n 'eventsList': eventsList,\n 'prev_page_url':prev_page_url,\n 'next_page_url':next_page_url,\n 'prev_page':prev_page,\n 'page':page,\n 'category1':category1,\n 'category2':category2,\n 'category3':category3,\n })\n \n return HttpResponse(template.render(context))",
"def process_events(self, events):\n for game_event in events:\n if game_event:\n game_event = self._send_event(game_event)\n if game_event:\n yield game_event",
"def logevents(self, events, request = None):\n for event in events:\n self.logevent(event, request)",
"def parse(self, response):\n for nav_link in response.css(\".col-sm-7 a.btn\"):\n if \"?bd=\" in nav_link.attrib[\"href\"]:\n yield response.follow(\n nav_link.attrib[\"href\"], callback=self._parse_events_page\n )\n\n yield from self._parse_events_page(response)"
] | [
"0.7203284",
"0.6670896",
"0.66397905",
"0.65484786",
"0.65141577",
"0.64590824",
"0.64310527",
"0.6378027",
"0.6360895",
"0.63342434",
"0.6316285",
"0.6311675",
"0.6298735",
"0.6272081",
"0.6216796",
"0.61802506",
"0.61441034",
"0.6130424",
"0.6043177",
"0.60396385",
"0.6032347",
"0.6019611",
"0.6004777",
"0.5999012",
"0.599123",
"0.59719354",
"0.5967197",
"0.59616375",
"0.5939878",
"0.59361994"
] | 0.7202085 | 1 |
Setup the Binary Sensor platform fo EnOcean. | def setup_platform(hass, config, add_devices, discovery_info=None):
dev_id = config.get(CONF_ID, None)
devname = config.get(CONF_NAME, "EnOcean binary sensor")
add_devices([EnOceanBinarySensor(dev_id, devname)]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n binary_sensors = []\n for name in hass.data[DOMAIN]:\n if name in BINARY_SENSORS:\n binary_sensors.append(NextcloudBinarySensor(name))\n add_entities(binary_sensors, True)",
"def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)\n self.tag_outdoor = wirelesstagpy.SensorTag(MOCK.OUTDOOR_PROBE, self.platform)\n self.platform._tags[\"fake-1\"] = self.tag_outdoor # pylint: disable=protected-access",
"def setUp(self):\n self.ser = Serial()\n self.device_obj = ZBSensor(self.ser)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n\n data = hass.data[LUPUSEC_DOMAIN]\n\n device_types = [CONST.TYPE_OPENING]\n\n devices = []\n for device in data.lupusec.get_devices(generic_type=device_types):\n devices.append(LupusecBinarySensor(data, device))\n\n add_entities(devices)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n _LOGGER.debug(\"Setting up\")\n\n mon = Monitor(hass, mac, name)\n add_entities([SkybeaconTemp(name, mon)])\n add_entities([SkybeaconHumid(name, mon)])\n\n def monitor_stop(_service_or_event):\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping monitor for %s\", name)\n mon.terminate()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n mon.start()",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host = config[CONF_HOST]\n monitored_variables = config[CONF_MONITORED_VARIABLES]\n\n charger = openevsewifi.Charger(host)\n\n entities = [\n OpenEVSESensor(charger, description)\n for description in SENSOR_TYPES\n if description.key in monitored_variables\n ]\n\n add_entities(entities, True)",
"def setUp(self):\n self.sensor = Sensor('127.1.1.3', 9000)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n host = config[CONF_HOST]\n port = config[CONF_PORT]\n username = config[CONF_USERNAME]\n password = config[CONF_PASSWORD]\n\n customize = config[CONF_CUSTOMIZE]\n\n protocol = \"https\" if config[CONF_SSL] else \"http\"\n\n url = f\"{protocol}://{host}\"\n\n data = HikvisionData(hass, url, port, name, username, password)\n\n if data.sensors is None:\n _LOGGER.error(\"Hikvision event stream has no data, unable to set up\")\n return\n\n entities = []\n\n for sensor, channel_list in data.sensors.items():\n for channel in channel_list:\n # Build sensor name, then parse customize config.\n if data.type == \"NVR\":\n sensor_name = f\"{sensor.replace(' ', '_')}_{channel[1]}\"\n else:\n sensor_name = sensor.replace(\" \", \"_\")\n\n custom = customize.get(sensor_name.lower(), {})\n ignore = custom.get(CONF_IGNORED)\n delay = custom.get(CONF_DELAY)\n\n _LOGGER.debug(\n \"Entity: %s - %s, Options - Ignore: %s, Delay: %s\",\n data.name,\n sensor_name,\n ignore,\n delay,\n )\n if not ignore:\n entities.append(\n HikvisionBinarySensor(hass, sensor, channel[1], data, delay)\n )\n\n add_entities(entities)",
"def setup_sensors(self):\n super(EddRoach2ProductController, self).setup_sensors()\n self._firmware_server_sensor = Sensor.string(\n \"firmware-server\",\n description=\"The address of the firmware server started by this product\",\n default=\"\",\n initial_status=Sensor.UNKNOWN)\n self.add_sensor(self._firmware_server_sensor)\n self._parent.mass_inform(Message.inform('interface-changed'))",
"def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'rtutank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusRTU(devconfig['icsifaces'][0], points.values())\n self.server.start()",
"def configure(self):\n\n self.platform.configure()",
"def setup_platform(hass, config, add_entities, discovery_info=None):\n if discovery_info is None:\n return\n\n devices = []\n for vin, datastore in hass.data[DATA_LEAF].items():\n _LOGGER.debug(\"Adding binary_sensors for vin=%s\", vin)\n devices.append(LeafPluggedInSensor(datastore))\n devices.append(LeafChargingSensor(datastore))\n\n add_entities(devices, True)",
"def __init__(self, parent, endpoint):\n Wemo_Endpoint.__init__(self, parent, endpoint)\n self.device_type = self._Parent._DeviceTypes.get('wemo_binary_sensor')\n self.FEATURES.update({\n FEATURE_BRIGHTNESS: False,\n FEATURE_PERCENT: False,\n FEATURE_NUMBER_OF_STEPS: False\n })",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n hub.update()\n\n for vacbot in hub.vacbots:\n add_devices([DeebotMopAttachedBinarySensor(vacbot, \"mop_attached\")], True)",
"async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n binary_sensors = []\n invert_logic = config[CONF_INVERT_LOGIC]\n pin_mode = config[CONF_PIN_MODE]\n ports = config[CONF_PORTS]\n\n setup_mode(pin_mode)\n\n for port_num, port_name in ports.items():\n binary_sensors.append(\n OPiGPIOBinarySensor(hass, port_name, port_num, invert_logic)\n )\n async_add_entities(binary_sensors)",
"def setup_platform(hass, config, add_devices, discovery_info=None):\r\n pull_mode = config[CONF_PULL_MODE]\r\n invert_logic = config[CONF_INVERT_LOGIC]\r\n\r\n iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)\r\n\r\n binary_sensors = []\r\n pins = config[CONF_PINS]\r\n\r\n for pin_num, pin_name in pins.items():\r\n binary_sensors.append(abelectronicsiopiBinarySensor(pin_name, pin_num, pull_mode, invert_logic, iopi))\r\n add_devices(binary_sensors, True)",
"def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'tcptank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusTCP( devconfig['icsifaces'][0], points.values() )\n self.server.start()",
"def _init_hardware(self):\n return",
"def startup( self ):\n # ---- Setup UPNPC ----\n if self.config.neuron.use_upnpc:\n bittensor.logging.success(prefix = 'Set upnpc', sufix = '<green>ON</green>')\n try:\n self.external_port = net.upnpc_create_port_map( port = self.axon.port )\n except net.UPNPCException as upnpc_exception:\n logger.critical('Failed to hole-punch with upnpc')\n raise RuntimeError('Failed to hole-punch with upnpc')\n else:\n bittensor.logging.success(prefix = 'Set upnpc', sufix = '<red>OFF</red>')\n self.external_port = self.config.axon.port\n\n # ---- Get external ip ----\n try:\n self.external_ip = net.get_external_ip()\n bittensor.logging.success(prefix = 'External IP', sufix = '<blue>{}</blue>'.format(self.external_ip))\n except net.ExternalIPNotFound as external_port_exception:\n raise RuntimeError('Unable to attain your external ip. Check your internet connection. error:{}', external_port_exception)\n\n # ---- Setup tensorboard ----\n if self.config.neuron.use_tensorboard == True:\n self._tensorboard_program = program.TensorBoard()\n self._tensorboard_program.configure(argv=[None, '--logdir', self.config.neuron.full_path, '--load_fast=true'])\n self._tensorbaord_url = self._tensorboard_program.launch()\n bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<blue>http://localhost:6006/</blue>')\n else: bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<red>OFF</red>')\n\n # ---- Setup Wallet. ----\n if not self.wallet.has_coldkeypub:\n self.wallet.create_new_coldkey( n_words = 12, use_password = True )\n if not self.wallet.has_coldkeypub:\n raise RuntimeError('Miner must have access to a decrypted coldkeypub')\n if not self.wallet.has_hotkey:\n self.wallet.create_new_hotkey( n_words = 12, use_password = False )\n if not self.wallet.has_hotkey:\n raise RuntimeError('Miner must have access to a decrypted hotkey')\n\n # ---- Subscribe to chain ----\n subscribe_success = self.subtensor.subscribe(\n wallet = self.wallet,\n ip = self.external_ip,\n port = self.external_port,\n modality = bittensor.proto.Modality.TEXT,\n wait_for_finalization = True,\n timeout = 4 * bittensor.__blocktime__,\n )\n if not subscribe_success:\n raise RuntimeError('Failed to subscribe neuron.')\n\n # ---- Starting axon ----\n self.axon.start()",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host: str = config[CONF_HOST]\n port: int = config[CONF_PORT]\n name: str = config[CONF_NAME]\n url = f\"http://{host}:{port}/api/LiveData.xml\"\n\n gateway = Ted5000Gateway(url)\n\n # Get MUT information to create the sensors.\n gateway.update()\n\n entities = []\n for mtu in gateway.data:\n for description in SENSORS:\n entities.append(Ted5000Sensor(gateway, name, mtu, description))\n\n add_entities(entities)",
"def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)",
"def setUpEnv(self):\n \n robot = Robot('atrv')\n\n pose = Sensor('pose')\n robot.append(pose)\n pose.configure_mw('yarp')\n\n motion = Actuator('v_omega')\n robot.append(motion)\n motion.configure_mw('yarp')\n \n env = Environment('indoors-1/indoor-1')\n env.configure_service('socket')",
"def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)",
"def __init__(self):\n self.hw = dev_hwinfo.device()\n self.ethKey=\"Ethernet\"\n self.ethAllInterfaceName=[]\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.myDefine = init_define.main()\n self.mPlatform=self.hw.getPlatform()",
"def setup(hass, base_config):\n from pyhusmow import API as HUSMOW_API\n\n config = base_config.get(DOMAIN)\n\n if hass.data.get(DOMAIN) is None:\n hass.data[DOMAIN] = { 'devices': [] }\n\n api = HUSMOW_API()\n api.login(config.get(CONF_USERNAME), config.get(CONF_PASSWORD))\n\n robots = api.list_robots()\n\n if not robots:\n return False\n\n for robot in robots:\n hass.data[DOMAIN]['devices'].append(AutomowerDevice(robot, api))\n\n for component in AUTOMOWER_COMPONENTS:\n discovery.load_platform(hass, component, DOMAIN, {}, base_config)\n\n return True",
"def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config[CONF_NAME]\n host = config[CONF_HOST]\n entity = OppleLight(name, host)\n\n add_entities([entity])\n\n _LOGGER.debug(\"Init light %s %s\", host, entity.unique_id)",
"def initialize_electronics(self):\n\n self.electronics = ArduinoModel(**self.config['electronics']['arduino'])\n self.logger.info('Initializing electronics arduino')\n self.electronics.initialize()",
"def test_version_sensor(self):\n config = {\"sensor\": {\"platform\": \"version\"}}\n\n assert setup_component(self.opp, \"sensor\", config)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None\n) -> None:\n # Assign configuration variables.\n # The configuration check takes care they are present.\n host = config[CONF_HOST]\n username = config[CONF_USERNAME]\n password = config.get(CONF_PASSWORD)\n\n # Setup connection with devices/cloud\n hub = awesomelights.Hub(host, username, password)\n\n # Verify that passed in configuration works\n if not hub.is_valid_login():\n _LOGGER.error(\"Could not connect to AwesomeLight hub\")\n return\n\n # Add devices\n add_entities(AwesomeLight(light) for light in hub.lights())"
] | [
"0.6923445",
"0.6406349",
"0.6367884",
"0.630438",
"0.6297725",
"0.62513864",
"0.61724937",
"0.61561346",
"0.61022043",
"0.60586834",
"0.60506696",
"0.60256183",
"0.59961325",
"0.59727365",
"0.59395474",
"0.5925092",
"0.5920433",
"0.5904643",
"0.58785045",
"0.5876473",
"0.58727616",
"0.58726966",
"0.58704424",
"0.58220845",
"0.5789167",
"0.5765442",
"0.5754448",
"0.5746082",
"0.57348514",
"0.571501"
] | 0.7185503 | 0 |
Load an ARFF File from a file. | def load(filename):
o = open(filename)
s = o.read()
a = ArffFile.parse(s)
o.close()
return a | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load(self, filename=None):\n importer = aspecd.io.AdfImporter()\n importer.source = filename\n importer.import_into(self)",
"def loadFile(self, filename):\n #TODO: do a contents based detection\n if filename[-4:].lower() == '.txt':\n self.loadTIText(open(filename, \"r\"))\n elif filename[-4:].lower() in ('.a43', '.hex'):\n self.loadIHex(open(filename, \"r\"))\n else:\n self.loadELF(open(filename, \"rb\"))",
"def load(self, file):\n self._load(file.encode())",
"def loadFile(self, filename):\n #TODO: do a contents based detection\n if filename[-4:].lower() == '.txt':\n self.loadTIText(open(filename, \"rb\"))\n elif filename[-4:].lower() in ('.a43', '.hex'):\n self.loadIHex(open(filename, \"rb\"))\n else:\n self.loadELF(open(filename, \"rb\"))",
"def load(self, arffile=None):\n inputstream = _get_file_object(arffile)\n if inputstream is None:\n inputstream = self.inputstream\n if inputstream is None:\n return False\n\n arff_data = loadarff(inputstream)\n self.data = arff_data[0]\n self.attributes = arff_data[1]\n return True",
"def read_file(filepath: str) -> Adat:\n with open(filepath, 'r') as f:\n rfu_matrix, row_metadata, column_metadata, header_metadata = parse_file(f)\n\n return Adat.from_features(\n rfu_matrix=rfu_matrix,\n row_metadata=row_metadata,\n column_metadata=column_metadata,\n header_metadata=header_metadata\n )",
"def read_from_file(self, filename: str) -> None:",
"def loadDataFile(self, filename):\n \n self.datafile = vocloadlib.readTabFile(filename,\n [ 'term', 'accID', 'status', 'abbreviation',\n 'note', 'comment', 'synonyms', 'synonymTypes',\n 'otherIDs', 'emapa', 'ts', 'parent']\n )",
"def load_from_file(cls, filename):\n with open(filename, \"r\") as fd:\n return cls.load(fd)",
"def aer_load_from_file(filename, read_as_block=True):\n f, _ = read_aer_header(filename)\n \n if read_as_block:\n return read_block(f)\n else:\n return read_incrementally(f)",
"def load(self, filename):\n raise NotImplementedError",
"def load(self, filename):\n aead_f = open(filename, \"rb\")\n buf = aead_f.read(1024)\n if buf.startswith(YHSM_AEAD_CRLF_File_Marker):\n buf = YHSM_AEAD_File_Marker + buf[len(YHSM_AEAD_CRLF_File_Marker):]\n if buf.startswith(YHSM_AEAD_File_Marker):\n if buf[len(YHSM_AEAD_File_Marker)] == chr(1):\n # version 1 format\n fmt = \"< I %is\" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE)\n self.key_handle, self.nonce = struct.unpack_from(fmt, buf, len(YHSM_AEAD_File_Marker) + 1)\n self.data = buf[len(YHSM_AEAD_File_Marker) + 1 + struct.calcsize(fmt):]\n else:\n raise pyhsm.exception.YHSM_Error('Unknown AEAD file format')\n else:\n # version 0 format, just AEAD data\n self.data = buf[:pyhsm.defines.YSM_MAX_KEY_SIZE + pyhsm.defines.YSM_BLOCK_SIZE]\n aead_f.close()",
"def load(cls, filename):\n \n raise NotImplementedError(\"not implemented!\")",
"def loadDataFile(self, filename):\n self.datafile = vocloadlib.readTabFile(filename,\n [ 'term', 'accID', 'status', 'abbreviation',\n 'note', 'comment', 'synonyms', 'synonymTypes',\n 'otherIDs', 'start', 'end', 'parent' \n ])",
"def load(cls, from_file):\n raise NotImplementedError",
"def from_file(f, origin=None, rdclass=dns.rdataclass.IN,\n relativize=True, zone_factory=Zone, filename=None,\n allow_include=True, check_origin=True):\n\n with contextlib.ExitStack() as stack:\n if isinstance(f, str):\n if filename is None:\n filename = f\n f = stack.enter_context(open(f))\n return from_text(f, origin, rdclass, relativize, zone_factory,\n filename, allow_include, check_origin)",
"def readFastaFile(filename):",
"def parser(path):\n\t\n\tdata = Arff()\n\tdata.read_arff(path)\n\t\n\treturn data",
"def loadFromFile(fileName):\n rel = Relation()\n\n with open(fileName, \"r\") as f:\n lines = f.readlines()\n\n try:\n relName = \"\"\n fieldNames = []\n fieldTypes = []\n dataPart = False\n datasets = []\n classColName = None\n skipCols = []\n skipCounter = 0\n for l in lines:\n l = l.strip()\n if \"\" == l or \"%\" == l[0]:\n continue\n\n if \"@\" == l[0]:\n if not dataPart:\n fields = re.split(\"\\s+\", l.strip())\n if \"@RELATION\" == fields[0].upper():\n relName = fields[1]\n elif \"@ATTRIBUTE\" == fields[0].upper():\n if \"NUMERIC\" == fields[2].upper() or \"REAL\" == fields[2].upper():\n fieldTypes.append(float)\n fieldNames.append(fields[1])\n else:\n classColName = fields[1]\n skipCols.append(skipCounter)\n skipCounter += 1\n elif \"@DATA\" == fields[0].upper():\n if len(fieldNames) != 0:\n if classColName is None:\n # class column is numeric, but we need a string\n classColName = fieldNames[-1]\n fieldTypes[-1] = str\n else:\n skipCols.pop() # last column is class column, don't skip it\n fieldNames.append(classColName)\n fieldTypes.append(str)\n dataPart = True\n rel.relName = relName\n rel.fieldNames = fieldNames\n elif dataPart:\n fieldsTmp = re.split(\",\", l.strip())\n fields = []\n for i, f_ in enumerate(fieldsTmp):\n if i not in skipCols:\n fields.append(f_)\n\n for i, t in enumerate(fieldTypes):\n fields[i] = t(fields[i])\n\n if len(fields) > 1:\n rel.allClasses.add(fields[-1])\n datasets.append(fields)\n rel.datasets = datasets\n rel.numDatasets = len(datasets)\n rel.activeClasses = set(rel.allClasses)\n except:\n raise Exception(\"ARFF parsing error!\")\n\n return rel",
"def _read_from_file(self, filename):\n ff = fits.open(filename)\n # Load the normalized intensity\n self.norm_int = ff[0].data\n # Load the other parameters\n self.lam = ff[1].data['lam']\n self.lam_unit = ff[1].columns['lam'].unit\n self.theta = ff[2].data['theta']\n self.taux = ff[3].data['taux']\n # Set halo type\n self.description = filename",
"def load(self):\r\n self.read(self.filename)",
"def from_file(cls, filename: str) -> \"OntoALAConfig\":\n with open(filename, \"r\") as config_file:\n config_dict = yaml.load(config_file, Loader=yaml.FullLoader)\n return OntoALAConfig(\n knowledge_file=config_dict[\"knowledge-file\"],\n )",
"def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()",
"def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()",
"def load(self, file_path):\n get_base().scene_parser.load(file_path)",
"def from_laspy_File(cls, f):\n return cls((f.x, f.y, f.z), header=f.header.copy())",
"def load(self, file_name):\n self.file_name = file_name\n self.frd = FRDFile(file_name)\n self._build_node_kon()\n self._build_step_idx()",
"def readArff(filename):\n \n data = []\n labels = []\n\n def parseLine(line): # csv.reader could not do this.\n isopen = False\n current = ''\n for c in line:\n if c == \"'\":\n if isopen:\n yield current\n current = ''\n isopen = not isopen\n elif isopen:\n current += c\n\n #with filename.open() as f:\n with bz2.open(str(filename)+'.bz2', 'r') as f:\n \n line = ''\n while line != '@data':\n line = f.readline().decode().strip()\n if line.startswith(\"@attribute 'classification'\"):\n line = line[line.find('{') + 1:line.find('}')]\n classes = {i:n for n,i in enumerate(parseLine(line))}\n\n for line in f.read().decode().splitlines():\n record = list(parseLine(line))\n labels.append(classes[record[-1]])\n data.append([int(x) for x in record[:-1]])\n return numpy.array(data, dtype=float), numpy.array(labels), classes",
"def from_file(self, path):\n data, sr = self.loader(path)\n return self.from_array(data, sr)",
"def read_filepath(self, filename, file_format='FASTA'):\n file_obj = open(filename, 'r')\n return self.read_file_object(file_obj, file_format=file_format)"
] | [
"0.6952331",
"0.6750146",
"0.6709102",
"0.67043614",
"0.6499693",
"0.6472072",
"0.6260189",
"0.6143849",
"0.61354226",
"0.61135525",
"0.60728455",
"0.6060615",
"0.60431916",
"0.60410386",
"0.60397345",
"0.59697336",
"0.5964675",
"0.59411573",
"0.5907846",
"0.5897713",
"0.58816516",
"0.5856695",
"0.58550686",
"0.58550686",
"0.5852144",
"0.58499825",
"0.58466977",
"0.58404547",
"0.583764",
"0.5803708"
] | 0.8042971 | 0 |
Define a new attribute. atype has to be one of 'numeric', 'string', and 'nominal'. For nominal attributes, pass the possible values as data. | def define_attribute(self, name, atype, data=None):
self.attributes.append(name)
self.attribute_types[name] = atype
self.attribute_data[name] = data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_attribute(self,\n identifier,\n idl_type,\n is_readonly=False,\n extended_attributes=None,\n node=None):\n if isinstance(idl_type, str):\n idl_type = self._create_type(idl_type)\n if isinstance(extended_attributes, dict):\n extended_attributes = self._create_extended_attributes(\n extended_attributes)\n debug_info = self._build_debug_info(node) if node else None\n\n return Attribute.IR(\n identifier,\n idl_type=idl_type,\n is_readonly=is_readonly,\n extended_attributes=extended_attributes,\n component=self._component,\n debug_info=debug_info)",
"def add_attribute(self, attr_type, name, components):\n self.attributes[attr_type] = {\"name\": name, \"components\": components}",
"def add_attribute(a_class, name, value):\n types = ['str', [], {}, (1, 1), 1.1, 1, None]\n for item in types:\n if type(a_class) == type(item):\n raise TypeError(\"can't add new attribute\")\n a_class.name = value",
"def addAttr(*args, attributeType: Union[AnyStr, bool]=\"\", binaryTag: Union[AnyStr, bool]=\"\",\n cachedInternally: bool=True, category: Union[AnyStr, List[AnyStr], bool]=\"\",\n dataType: Union[AnyStr, List[AnyStr], bool]=\"\", defaultValue: Union[float,\n bool]=0.0, disconnectBehaviour: Union[int, bool]=0, enumName: Union[AnyStr,\n bool]=\"\", exists: bool=True, fromPlugin: bool=True, hasMaxValue: bool=True,\n hasMinValue: bool=True, hasSoftMaxValue: bool=True, hasSoftMinValue: bool=True,\n hidden: bool=True, indexMatters: bool=True, internalSet: bool=True, keyable:\n bool=True, longName: Union[AnyStr, bool]=\"\", maxValue: Union[float, bool]=0.0,\n minValue: Union[float, bool]=0.0, multi: bool=True, niceName: Union[AnyStr,\n bool]=\"\", numberOfChildren: Union[int, bool]=0, parent: Union[AnyStr, bool]=\"\",\n proxy: Union[AnyStr, bool]=\"\", readable: bool=True, shortName: Union[AnyStr,\n bool]=\"\", softMaxValue: Union[float, bool]=0.0, softMinValue: Union[float,\n bool]=0.0, storable: bool=True, usedAsColor: bool=True, usedAsFilename: bool=True,\n usedAsProxy: bool=True, writable: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass",
"def add_attribute(a, name, other):\n raise TypeError(\"can't add new attribute\")",
"def attr_type(self, attr_type):\n\n self._attr_type = attr_type",
"def createAttribute(nid, label, primary, list, x, y):\n attribute = Attribute(nid, label, primary, x, y)\n list.append(attribute)",
"def set_attr(self, aid, value, custom=False):\n if aid not in self.attributes and not custom:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, value)\n self.remember_custom_attribute(self.name, aid, value)\n self.attributes[aid] = {}\n else:\n # TODO: validate data_type\n pass\n self.attributes[aid]['nv'] = value\n # self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)",
"def set_attribute(self, node, attribute, value):\n name = '{}.{}'.format(node, attribute)\n try:\n attr_type = mc.getAttr(name, typ=True)\n if 'string' in attr_type:\n mc.setAttr(name, value, typ='string')\n elif 'float3' in attr_type:\n mc.setAttr(\n name, value[0][0], value[0][1], value[0][2], typ='float3'\n )\n else:\n mc.setAttr(name, value)\n except Exception:\n return False\n return True",
"def _handle_attr(self, attr, dev):\n attr_val = None\n list_flag = False\n\n if attr.name == \"os\":\n attr_val = self.OS_MAPPER[attr.val]\n elif attr.name == \"network\":\n attr_val = self._create_network(attr.val)\n list_flag = True\n elif attr.name == \"bluetooth\":\n attr_val = Bluetooth(version=attr.val.version)\n elif attr.name == \"cpu\":\n attr_val = CPU(cpu_family=attr.val.cpu_family,\n max_freq=float(attr.val.max_freq\n * self.FREQ_MULT[attr.val.unit]),\n fpu=attr.val.fpu)\n elif attr.name == \"memory\":\n attr_val = self._create_memory(attr.val)\n elif attr.name == \"type\":\n self._per_type = self.PER_MAPPER[attr.val]\n elif attr.name == \"pins\":\n list_flag = True\n attr_val = self._create_pins(attr.val)\n else:\n attr_val = attr.val\n\n # Set attribute\n if list_flag:\n getattr(dev, attr.name).extend(attr_val)\n elif attr_val:\n setattr(dev, attr.name, attr_val)",
"def create_attribute(owner_name, att_name, context=ast.Load(), line=0, column=0):\n attribute = ast.Attribute()\n attribute.attr = att_name\n attribute.ctx = context\n attribute.lineno = line\n attribute.col_offset = column\n\n if isinstance(owner_name, str):\n attribute_name = ast.Name()\n attribute_name.ctx = ast.Load()\n attribute_name.id = owner_name\n attribute_name.lineno = line\n attribute_name.col_offset = column\n\n attribute.value = attribute_name\n else:\n attribute.value = owner_name\n\n return attribute",
"def add_attribute(self, attr):\n self.attrs.add_attribute(attr)",
"def set_attribute(self, name, type_, value):\n if not self._linked:\n raise RuntimeError('Cannot set attribute when program has no code')\n # Get handle for the attribute, first try cache\n handle = self._handles.get(name, -1)\n if handle < 0:\n if name in self._known_invalid:\n return\n handle = gl.glGetAttribLocation(self._handle, name)\n self._unset_variables.discard(name) # Mark as set\n self._handles[name] = handle # Store in cache\n if handle < 0:\n self._known_invalid.add(name)\n if value[0] != 0 and value[2] > 0: # VBO with offset\n return # Probably an unused element in a structured VBO\n logger.info('Variable %s is not an active attribute' % name)\n return\n # Program needs to be active in order to set uniforms\n self.activate()\n # Triage depending on VBO or tuple data\n if value[0] == 0:\n # Look up function call\n funcname = self.ATYPEMAP[type_]\n func = getattr(gl, funcname)\n # Set data\n self._attributes[name] = 0, handle, func, value[1:]\n else:\n # Get meta data\n vbo_id, stride, offset = value\n size, gtype, dtype = self.ATYPEINFO[type_]\n # Get associated VBO\n vbo = self._parser.get_object(vbo_id)\n if vbo == JUST_DELETED:\n return\n if vbo is None:\n raise RuntimeError('Could not find VBO with id %i' % vbo_id)\n # Set data\n func = gl.glVertexAttribPointer\n args = size, gtype, gl.GL_FALSE, stride, offset\n self._attributes[name] = vbo.handle, handle, func, args",
"def add_attribute(self, attribute, key=None):\n if isinstance(attribute, TileType):\n key = attribute.name if key is None else key\n self.attributes[key] = attribute\n else:\n key = key if key is not None else str(id(attribute))\n self.attributes[key] = String(key, attribute)",
"def addattribute(self, uid, field, value):\n\n raise NotImplementedError",
"def add_attribute(self, name, value):\n\t\tif name in self.__attr_hash:\n#\t\t\tattribue already exists\n\t\t\ta = self.__attr_hash[name]\n\t\t\tif name == 'class':\n#\t\t\t\t'class' is a magic attribute\n\t\t\t\tif a['value']:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\ta['value'] += value\n\t\t\telse:\n\t\t\t\ta['value'] = value\n\t\telse:\n\t\t\ta = {'name': name, 'value': value}\n\t\t\tself.__attr_hash[name] = a\n\t\t\tself.attributes.append(a)",
"def set_value(node, attr, attr_data, verbose=False):\n\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n excluded_types = ['float2', 'float3', 'double2', 'double3',\n 'compound', 'message', 'short3', 'long2', 'long3']\n try:\n if not mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} doe not exist! Skipping..'.format(node, attr))\n return\n\n elif attr_type in excluded_types:\n return\n\n elif attr_type == 'string':\n if not value:\n value = ''\n mc.setAttr(node+'.'+attr, value, type='string')\n\n else:\n mc.setAttr(node+'.'+attr, value)\n\n if verbose:\n print 'Set attribute value: '+node+'.'+attr\n\n except:\n if verbose:\n mc.warning('Could not set '+attr_type+' attr value :'+node+'.'+attr)",
"def visit_AttributeDeclaration(self, node):\n attr_type = node.type or 'object'\n self.code_ops.extend([\n (SetLineno, node.lineno),\n (DUP_TOP, None), # cls._add_user_attribute(name, type, is_event)\n (LOAD_CONST, node.name),\n (LOAD_NAME, attr_type),\n (LOAD_CONST, node.is_event),\n (CALL_FUNCTION, 0x0003),\n (POP_TOP, None),\n ])",
"def set_attr(self, name: str, values: Union[list, tuple, object]):",
"def set_attribute(self, name, value):\n\n pass",
"def _Attribute(self,t):\n # Only a limited set of globals supported\n func_dict = None\n \n # pyflamegpu singleton\n if isinstance(t.value, ast.Name):\n if t.value.id == \"pyflamegpu\":\n if t.attr in self.fgpu_attrs:\n # proceed\n self.write(\"flamegpu::\")\n self.write(t.attr)\n else:\n self.RaiseError(t, f\"Attribute '{t.attr}' does not exist in pyflamegpu object\")\n # math functions (try them in raw function call format) or constants\n elif t.value.id == \"math\":\n if t.attr in self.mathconsts:\n self.write(self.mathconsts[t.attr])\n else:\n self.RaiseError(t, f\"Unsupported math constant '{t.attr}'\")\n # numpy types\n elif t.value.id == \"numpy\" or t.value.id == \"np\":\n # not sure how a numpy attribute would be used without function call or type hint but translate anyway \n if t.attr in self.numpytypes:\n self.write(self.numpytypes[t.attr])\n else: \n self.RaiseError(t, f\"Unsupported numpy type {t.attr}\")\n else:\n self.RaiseError(t, f\"Global '{t.value.id}' identifiers not supported\")\n else:\n self.RaiseError(t, \"Unsupported attribute\")",
"def add(self, attr):\n self.validate_type(attr)\n self.values.add(attr.value)",
"def make_attribute( # pylint: disable=too-many-statements\n key: str,\n value: Any,\n doc_string: Optional[str] = None,\n attr_type: Optional[int] = None,\n) -> AttributeProto:\n attr = AttributeProto()\n attr.name = key\n if doc_string:\n attr.doc_string = doc_string\n\n # Singular cases\n if isinstance(value, numbers.Integral):\n attr.i = int(value)\n attr.type = AttributeProto.INT\n elif isinstance(value, numbers.Real):\n attr.f = float(value)\n attr.type = AttributeProto.FLOAT\n elif isinstance(value, (str, bytes)):\n # Encode strings into utf-8\n attr.s = _to_bytes(value)\n attr.type = AttributeProto.STRING\n elif isinstance(value, TensorProto):\n attr.t.CopyFrom(value)\n attr.type = AttributeProto.TENSOR\n elif isinstance(value, SparseTensorProto):\n attr.sparse_tensor.CopyFrom(value)\n attr.type = AttributeProto.SPARSE_TENSOR\n elif isinstance(value, GraphProto):\n attr.g.CopyFrom(value)\n attr.type = AttributeProto.GRAPH\n elif isinstance(value, TypeProto):\n attr.tp.CopyFrom(value)\n attr.type = AttributeProto.TYPE_PROTO\n # Iterable cases\n elif isinstance(value, collections.abc.Iterable):\n value = list(value)\n if len(value) == 0 and attr_type is None:\n raise ValueError(\n f\"Could not infer attribute `{key}` type from empty iterator\"\n )\n if attr_type is None:\n types = {type(v) for v in value}\n for exp_t, exp_enum in (\n (numbers.Integral, AttributeProto.INTS),\n (numbers.Real, AttributeProto.FLOATS),\n ((str, bytes), AttributeProto.STRINGS),\n (TensorProto, AttributeProto.TENSORS),\n (SparseTensorProto, AttributeProto.SPARSE_TENSORS),\n (GraphProto, AttributeProto.GRAPHS),\n (TypeProto, AttributeProto.TYPE_PROTOS),\n ):\n if all(issubclass(t, exp_t) for t in types): # type: ignore[arg-type]\n attr_type = exp_enum\n break\n if attr_type is None:\n raise ValueError(\n \"Could not infer the attribute type from the elements of the passed Iterable value.\"\n )\n\n if attr_type == AttributeProto.INTS:\n attr.ints.extend(value)\n attr.type = AttributeProto.INTS\n elif attr_type == AttributeProto.FLOATS:\n attr.floats.extend(value)\n attr.type = AttributeProto.FLOATS\n elif attr_type == AttributeProto.STRINGS:\n attr.strings.extend(_to_bytes(v) for v in value)\n attr.type = AttributeProto.STRINGS\n elif attr_type == AttributeProto.TENSORS:\n attr.tensors.extend(value)\n attr.type = AttributeProto.TENSORS\n elif attr_type == AttributeProto.SPARSE_TENSORS:\n attr.sparse_tensors.extend(value)\n attr.type = AttributeProto.SPARSE_TENSORS\n elif attr_type == AttributeProto.GRAPHS:\n attr.graphs.extend(value)\n attr.type = AttributeProto.GRAPHS\n elif attr_type == AttributeProto.TYPE_PROTOS:\n attr.type_protos.extend(value)\n attr.type = AttributeProto.TYPE_PROTOS\n else:\n raise AssertionError() # Should not reach since `ValueError` must be raised in attr_type checking\n else:\n raise TypeError(f\"'{value}' is not an accepted attribute value.\")\n\n if attr_type is not None and attr.type != attr_type:\n raise TypeError(\n f\"Inferred attribute type {attr.type} mismatched with specified type {attr_type}\"\n )\n return attr",
"def add_attribute(obj, attribute, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attribute, value)",
"def set(self, attribute, value):\n if not isinstance(attribute, str):\n raise TypeError(\"attributes must be designated by string label, recieved %s\" % attribute)\n self._defined[attribute] = value",
"def setAttributes(self, args):\n for atr in self.defaultAttributes:\n if args.has_key(atr):\n # convert atr to proper type\n objAttr = getattr(self, atr)\n myType = type(args[atr])\n if type(objAttr) == types.IntType and myType <> types.IntType:\n args[atr] = int(args[atr])\n elif type(objAttr) == types.StringType and myType <> types.StringType:\n args[atr] = str(args[atr])\n elif type(objAttr) == types.ListType and myType <> types.ListType:\n args[atr] = eval(args[atr])\n elif type(objAttr) == types.DictType and myType <> types.DictType:\n args[atr] = eval(args[atr])\n elif type(objAttr) == types.FloatType and myType <> types.FloatType:\n args[atr] = float(args[atr])\n setattr(self, atr, args[atr])",
"def add_user_attribute(self, attribute_name, attribute_type, nested_type):\n self.request_url = \"{0}/{1}/attributes\".format(self.API_URL, self.USER_ENDPOINT)\n payload = {\n 'name': 'traits.' + attribute_name,\n 'attributeType': attribute_type,\n 'nestedType': nested_type\n }\n return self.__create_request(payload, self.REQUEST_POST, version=\"v1\")",
"def add_attribute(self, attr):\n self.add(attr)",
"def add_attribute(obj, attribute, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, attribute, value)\n else:\n raise TypeError(\"can't add new attribute\")",
"def add_attribute(self, attribute_name, attribute_value):\n self.attributes[attribute_name] = attribute_value"
] | [
"0.6832389",
"0.6797418",
"0.67368126",
"0.6671336",
"0.65930986",
"0.6554519",
"0.65519553",
"0.650852",
"0.6398499",
"0.6318576",
"0.6294345",
"0.6168906",
"0.6158302",
"0.6157011",
"0.61001563",
"0.605466",
"0.6050371",
"0.6018214",
"0.6016995",
"0.60158825",
"0.60112774",
"0.599875",
"0.59961176",
"0.59870607",
"0.5986194",
"0.59758335",
"0.5961091",
"0.59515965",
"0.5940589",
"0.59143305"
] | 0.8044042 | 0 |
Update the overall ignorance | def update_overall_ignorance(overall_ignorance, object_ignorance, rate=0.05):
return (1-rate)*overall_ignorance + rate*object_ignorance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self):\n # default implementation is to do nothing.",
"def dummy_update( self ):\r\n pass",
"def _update(self):\n pass",
"def update(self):\n\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update( ):\r\n pass",
"def Update(self):\r\n\r\n # does nothing\r\n pass",
"def update():",
"def update():",
"def update(self):\r\n pass",
"def update(self) -> None:\n ...",
"def _update(self, count=True, forced=False):",
"def update(self):",
"def update(self):",
"def update(self):",
"def update(self) -> None:\n pass"
] | [
"0.60763353",
"0.60567635",
"0.60499495",
"0.59942096",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.59925723",
"0.5989389",
"0.5969066",
"0.59493464",
"0.59493464",
"0.5906367",
"0.58488685",
"0.58307916",
"0.5783855",
"0.5783855",
"0.5783855",
"0.5770172"
] | 0.62073374 | 0 |
Return focus image at target positon | def check_target_position(environment, target_xy, fovea):
temp_fovea = Fovea(target_xy, fovea.size, [0, 0, 0], fovea.unit)
temp_image = temp_fovea.get_focus_image(environment)
return temp_image | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def click_img(self, target_img):\n pos = imagesearch_loop(target_img, timesample=0.5)\n if pos[0] == -1:\n print(\"No image found\")\n else:\n self.click(pos)",
"def extract_target_pixel_location(self):\n #Respective Image location\n pixel_array = self.imageprepare(self.image_path)\n\n #Select less_than_target color point --> must be calibrated\n #?? Should we use an abstract class here instead of an if statment ??\n if self.color == \"g\":\n less_than_target = .15\n else:\n raise ValueError(\"Unknown color value\")\n\n #Chooses target pixels as well as it's location\n target_pixels = []\n for pixel in enumerate(pixel_array):\n if pixel[1] < less_than_target:\n target_pixels.append(pixel[0])\n\n return target_pixels",
"def _identify_target(self):\n \n # change the cursor for the drawing area\n x_cursor = gtk.gdk.Cursor(gtk.gdk.X_CURSOR)\n self.drawing_area.window.set_cursor(x_cursor)\n \n # set the drawing area mode\n self.drawing_area_mode = \"IDENTIFY_TARGET\"\n \n #clear the screen\n if self.box_drawn == True:\n self.redraw_current_image()",
"def focus(self):\n self.image_window.focus_set()",
"def GetBitmapFocus(self):\n\n return self.bmpFocus",
"def focus(self):\n\n # Getting the microscope height\n current_z = self.microscope.position(2)\n\n # Tabs of maximum match value and their location during the process\n vals = []\n locs = []\n\n # Getting the maxvals and their locations\n for i in self.template:\n\n res, val, loc = templatematching(self.cam.frame, i)\n locs += [loc]\n\n if res:\n # Template has been detected\n vals += [val]\n else:\n # Template has not been detected, val set at 0\n vals += [0]\n\n # Search of the highest value, indicating which template image match the best the current image\n maxval = max(vals)\n\n if maxval != 0:\n # At least one template has been detected, setting the microscope at corresponding height\n index = vals.index(maxval)\n loc = locs[index]\n focus_height = current_z + len(self.template) // 2 - index\n self.microscope.absolute_move(focus_height, 2)\n self.microscope.wait_motor_stop(2)\n dep = len(self.template) // 2 - index\n else:\n # No template has been detected, focus can not be achieved\n raise ValueError('The template image has not been detected.')\n\n return maxval, dep, loc",
"def get_active_target(self, inp_hist):\n go = inp_hist[:, 0]\n curr_targ = inp_hist[:, 3:5]\n next_targ = inp_hist[:, 5:7]\n return curr_targ * (1 - go[:, None]) + next_targ * go[:, None]",
"def currently_focused(self) -> int:",
"def getCurrentTarget(self):\r\n\t\treturn self.currentTarget",
"def prep_robot_target(self):\n x = int(self.robot.target_x)\n y = int(self.robot.target_y)\n target_str = f\"Target (X,Y): {str(x)}, {str(y)}\"\n # Prepare the image and positions it on the screen\n self.target_image = self.font.render(target_str, True, self.text_color, self.bg_color)\n self.target_rect = self.target_image.get_rect()\n self.target_rect.left = self.location_rect.left\n self.target_rect.top = self.location_rect.bottom + self.line_gap",
"def click_b(self, event, x, y, flags, params):\n if event == cv2.EVENT_LBUTTONDOWN:\n self.image_b_coordinates = (x, y)\n print(\"ImageB selected coordinates =\", self.image_b_coordinates)\n return x, y",
"def findTarget(self, initial_call):\n if self.vision.hasTarget():\n self.next_state(\"driveToTarget\")\n else:\n self.chassis.setOutput(self.SEARCH_SPEED, -self.SEARCH_SPEED)",
"def focus(self):\n raise NotImplementedError",
"def select_region_of_interest():\r\n image = np.array(ImageGrab.grab(bbox=None))\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n r = cv2.selectROI(windowName='grab roi', img=image, showCrosshair=True, fromCenter=False)\r\n cv2.destroyAllWindows()\r\n return r[0], r[1], r[0] + r[2], r[1] + r[3]",
"def fit_to_window(img, face, target_width=400, target_height=250):\n height, width, _ = img.shape # input image dimensions\n if width / height > target_width / target_height: # crop horizontally\n ratio = target_height / height # image sizes ratio\n height = int(ratio * height) # output image height\n width = int(ratio * width) # output image width\n face = (face * ratio).astype(int) # face coordinates in new image\n img = cv2.resize(img, (width, height)) # transformed image\n x_middle = int((face[0] + face[2]) / 2) # face center x coordinate\n if x_middle < target_width / 2: # returns the left part of the image\n return img[:, 0:target_width], face - np.array([0, 0, 0, 0])\n if x_middle > width - target_width / 2: # returns the right part of the image\n return img[:, width - target_width:width], face - np.array([width - target_width, 0, width - target_width, 0])\n # returns the part of the image around the face\n return img[:, x_middle - int(target_width / 2):x_middle + int(target_width / 2)], face - np.array([x_middle - int(target_width / 2), 0, x_middle - int(target_width / 2), 0])\n else: # crop vertically\n ratio = target_width / width # image sizes ratio\n height = int(height * ratio) # output image height\n width = int(width * ratio) # output image width\n face = (face * ratio).astype(int) # face coordinates in new image\n img = cv2.resize(img, (width, height)) # transformed image\n y_middle = int((face[1] + face[3]) / 2) # face center y coordinate\n if y_middle < target_height / 2: # returns the upper part of the image\n return img[0:target_height, :], face - np.array([0, 0, 0, 0])\n if y_middle > height - target_height / 2: # returns the lower part of the image\n return img[height - target_height:height, :], face - np.array([0, height - target_height, 0, height - target_height])\n # returns the part of the image around the face\n return img[y_middle - int(target_height / 2):y_middle + int(target_height / 2), :], face - np.array([0, y_middle - int(target_height / 2), 0, y_middle - int(target_height / 2)])",
"def get_focus(self):\n return self._get_at(self._current)",
"def getFocus(*args):",
"def getFocus(*args):",
"def getFocus(*args):",
"def getFocus(*args):",
"def anchor_and_clip(image):\n\n\t# Offsets for approximate in-game solitaire window size at 1600x900 game window size\n\tmax_x = 1074\n\tmax_y = 675\n\n\tcorner = cv2.imread(\"card_back/anchor/anchor.png\")\n\tresult = cv2.matchTemplate(image, corner, cv2.TM_SQDIFF)\n\tx, y = cv2.minMaxLoc(result)[2]\n\tx += 3\n\ty += 2\n\n\tcrop_image = image[y:y + max_y, x:x + max_x]\n\treturn x, y, crop_image",
"def targeted(self):\n\t\tpass",
"def get_element_coordinates(path_to_image):\n return pyautogui.center(pyautogui.locateOnScreen(path_to_image, confidence=0.9))",
"def get_target_info(target):\n corners = get_corner_points(target[0])\n return cv2.solvePnP(OBJECT_POINTS, np.array(corners, dtype=\"double\"),\n CAMERA_MATRIX, CAMERA_DIST_COEFFS, flags=cv2.SOLVEPNP_ITERATIVE)",
"def image_search_in_image(base_image, looking_for_img):\n base_image = cv2.imread(base_image)\n looking_for_img = cv2.imread(looking_for_img)\n # result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_SQDIFF_NORMED)\n result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_CCOEFF)\n (_, _, minLoc, maxLoc) = cv2.minMaxLoc(result)\n print(result)\n (waldoHeight, waldoWidth) = looking_for_img.shape[:2]\n topLeft = maxLoc\n botRight = (topLeft[0] + waldoWidth, topLeft[1] + waldoHeight)\n roi = base_image[topLeft[1]:botRight[1], topLeft[0]:botRight[0]]\n mask = np.zeros(base_image.shape, dtype=\"uint8\")\n puzzle = cv2.addWeighted(base_image, 0.25, mask, 0.75, 0)\n puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] = roi\n cv2.imshow(\"Puzzle\", puzzle)\n cv2.imshow(\"Waldo\", looking_for_img)\n cv2.waitKey(0)",
"def click_while_searching_for_img(self, num_of_clicks, target_img, region):\n img_found = self.click_randomly_in_area(num_of_clicks=num_of_clicks, region=region, img=target_img)\n return True if img_found else False",
"def detect(self, source, target):\n \n movementLocations = []\n # Generate work image by blurring.\n self.workImg = cv2.blur(source, self.kSize)\n # Generate moving average image if needed\n if self.movingAvgImg == None:\n self.movingAvgImg = numpy.float32(self.workImg)\n # Generate moving average image\n cv2.accumulateWeighted(self.workImg, self.movingAvgImg, self.alpha)\n self.diffImg = cv2.absdiff(self.workImg, cv2.convertScaleAbs(self.movingAvgImg))\n # Convert to grayscale\n self.grayImg = cv2.cvtColor(self.diffImg, cv2.COLOR_BGR2GRAY)\n # Convert to BW\n return_val, self.grayImg = cv2.threshold(self.grayImg, self.blackThreshold, 255, cv2.THRESH_BINARY)\n # Apply ignore mask\n if self.ignoreMask != None:\n self.grayImg = numpy.bitwise_and(self.grayImg, self.ignoreMask) \n # Total number of changed motion pixels\n self.motionPercent = 100.0 * cv2.countNonZero(self.grayImg) / self.totalPixels\n # Detect if camera is adjusting and reset reference if more than maxChange\n if self.motionPercent > self.maxChange:\n self.logger.debug(\"%3.1f%% motion detected, resetting reference image\" % self.motionPercent) \n self.movingAvgImg = numpy.float32(self.workImg)\n movementLocations = self.contours(self.grayImg)\n # Mark objects (make sure to copy target image if you want to keep original image intact)\n if self.markObjects == True:\n self.mark(source, target, movementLocations, self.widthMultiplier, self.heightMultiplier, self.boxColor)\n if self.ignoreAreas != None: \n self.mark(source, target, self.ignoreAreas, self.widthMultiplier, self.heightMultiplier, self.ignoreAreasBoxColor)\n # Return filtered results\n return movementLocations",
"def get_first_image_target(inputs, outputs, **ttarg_params):\n return {'first_image': inputs['images'][0]}",
"def update_target(self):\n\t\tself.check_top()\n\t\tself.check_bottom()\n\t\tself.update()\n\t\tself.screen.fill(self.target_color, self.rect)",
"def goal(target, prediction):\n return closest_point_on_segment(prediction, target)"
] | [
"0.6471832",
"0.60620207",
"0.60257924",
"0.5962527",
"0.5960928",
"0.5843375",
"0.5577541",
"0.556946",
"0.5547605",
"0.55378973",
"0.55275726",
"0.54741293",
"0.5447037",
"0.543567",
"0.5431909",
"0.54052734",
"0.5387265",
"0.5387265",
"0.5387265",
"0.5387265",
"0.534272",
"0.5322626",
"0.53206104",
"0.53170913",
"0.5312307",
"0.5307752",
"0.5296244",
"0.5285922",
"0.52739275",
"0.5269642"
] | 0.64985406 | 0 |
Check if target area is free | def check_free_space(environment, target_xy, fovea):
temp_image = check_target_position(environment, target_xy, fovea)
if np.array_equal(temp_image, np.zeros(temp_image.shape)):
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_free(self) -> bool:\n return self.places < self.total",
"def is_free(self):\n return self._size > 0",
"def guard_occupy_transition(self):\n if not self.get_free_positions:\n return True",
"def _space_has_degrees_of_freedom(self) -> bool:\n return True",
"def freePoint(self, X, Y):\n if X < 0 or Y < 0 or X > GSIZE or Y > GSIZE:\n return False\n if not self.allowSelfAvoidOnly:\n return True\n if self.segs == []:\n return True\n if self.segs[0].getStartPoint() == (X, Y):\n return False\n for seg in self.segs:\n if seg.getEndPoint() == (X, Y):\n return False\n return True",
"def is_free(self, pos: tuple):\n if self.within_map(pos):\n return self.map[round(pos[0]), round(pos[1])] == FREE\n else:\n return False",
"def isFree(point):\n global grid\n for i in point:\n if i < 0:\n return False\n try:\n value = grid[point[0]][point[1]][point[2]]\n # print value\n except:\n print \"point \", point, \"lies outside of grid\"\n value = False\n\n return value",
"def checkFree(self, x, y):\n for i in range(self.numPieces):\n new_x = x + self.pos[self.rotation][i][0]\n new_y = y + self.pos[self.rotation][i][1]\n if not self.checkAvailable(new_x, new_y):\n return self.colors['busy']\n return self.colors['free']",
"def is_full(self):\n return self.remaining_space_in_hold() == 0",
"def is_full(self):\n return len(self.__occupied_slots__) >= self.__size__",
"def free_spot(self, start, distance, p1):\n free = False\n spot = 25 - start - distance\n #do we have a valid position to consider?\n if (spot > 0):\n #which player are we?\n if (p1):\n if (self.p2vec[spot] < 2):\n free = True\n else:\n if (self.p1vec[spot] < 2):\n free = True\n if (spot == 0):\n free = True\n return free",
"def checkAvailable(self, x, y):\n return 0 <= x < self.rows and 0 <= y < self.cols and not self.gridBusy[x][y]",
"def free(self,source):\n return self.near(source, self.free_radius)",
"def IsFree(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsFree(*args)",
"def goal_test(self, state):\n for x, y in state.alvos:\n if state.tabuleiro[x][y] is not BOX_ON_TARGET:\n return False\n return True",
"def _is_full(self):\n if self.allocated_spaces == self.capacity:\n return True\n elif self.allocated_spaces < self.capacity:\n return False",
"def is_all_free(self):\n return self.pool_size == self.pool.qsize()",
"def validarea(state, area):\n if area > len(state) - MEMORY:\n state[HEAD][STATUS] = OOB\n return False\n else:\n return True",
"def hit(self):\n\n self.units.pop()\n return (len(self.units) == 0) # Returns True if the ship has been sunk",
"def is_target_in(self, newtarget, buffer_safe_width=0.025):\n from ..utils.shape import HAS_SHAPELY\n # Test if shapely\n if not HAS_SHAPELY:\n print(\"WARNING: could not test if the target is in the image since you do not have SHAPELY\")\n return True\n from ..utils.shape import Point\n\n centroid = self.get_centroid(system=\"xy\")\n radius_pixels = (0.6-buffer_safe_width)* self.units_to_pixels(\"deg\").value\n fov = Point(*centroid).buffer(radius_pixels)\n targetloc = Point(*self.coords_to_pixel(*newtarget.radec))\n return fov.contains(targetloc)",
"def is_occupied(self, p):\r\n return 0 <= p[0] < self.width and 0 <= p[1] < self.height and self.grid[p[1]][p[0]] == '#'",
"def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE",
"def goal_occupied(self, view):\n for line in view.obstacles:\n if linesegdist2(line.p1, line.p2, self.goal) < self.radius ** 2:\n return True\n\n for p in view.pedestrians:\n if p.velocity.length2() == 0.0:\n if p.position.distance_to2(self.goal) < p.radius:\n return True\n\n return False",
"def check_lanelet_free(self, req):\n lanelet_id = req.lanelet_id\n if lanelet_id != 0: \n lanelet = self.scenario.lanelet_network.find_lanelet_by_id(lanelet_id)\n if self.points is None: \n return False \n points = list(self.points)\n if len(points) == 0:\n return False \n transformed_lidar_poses = self.transform_lidar_into_map_coords(points) \n if lanelet is not None: \n filtered_poses = self.filter_lidar_poses(lanelet, transformed_lidar_poses) \n if len(filtered_poses) > 0: \n dist = self.calc_dist(filtered_poses) \n if dist > 0 and dist < self.max_dist_lidar:\n return False \n else:\n return True \n else:\n # if there are no points on lanelet, checks successor\n filtered_poses = self.filter_lidar_poses(self.scenario.lanelet_network.find_lanelet_by_id(lanelet.successor[0]), transformed_lidar_poses)\n if len(filtered_poses) > 0: \n dist = self.calc_dist(filtered_poses) \n if dist > 0 and dist < self.max_dist_lidar: \n return False \n else:\n return True\n else:\n # if there are no points on lanelet and lanelet.successor, checks predecessor \n filtered_poses = self.filter_lidar_poses(self.scenario.lanelet_network.find_lanelet_by_id(lanelet.predecessor[0]), transformed_lidar_poses)\n if len(filtered_poses) > 0: \n dist = self.calc_dist(filtered_poses) \n if dist > 0 and dist < self.max_dist_lidar: \n return False\n else:\n return True\n return True",
"def obstacle_prone_area(self,image):\r\n\r\n start_x=int(self.start[0])\r\n start_y=int(self.start[1])\r\n goal_x=int(self.goal[0])\r\n goal_y=int(self.goal[1])\r\n print(goal_x,goal_y)\r\n if (image[int(self.maximum_size-goal_x),int(goal_y),0]==0) or ((image[int(self.maximum_size-start_x),int(start_y),0]==0)):\r\n #print(1)\r\n return False\r\n else:\r\n #print(2)\r\n return True",
"def XCAFDoc_ShapeTool_IsFree(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsFree(*args)",
"def is_all_free(self):\n return self.pool_size == self.sem._value",
"def free(self):\n\n return not self.moving and not self.queue.get(0) and not self.anims.get(0)",
"def passable(self, point):\n return point not in self.obstacles",
"def is_free(self) -> tuple:\n if self.running_procs >= self.procs_no:\n return (False, None)\n if self.gpus:\n for gpu in self.gpus:\n if self.gpu_running_procs[gpu] < self.per_gpu[gpu]:\n return (True, gpu)\n return (False, None)\n return (True, None)"
] | [
"0.69473505",
"0.65693545",
"0.65516436",
"0.65503776",
"0.65440136",
"0.64887106",
"0.63822377",
"0.6373474",
"0.6314653",
"0.6287633",
"0.62136126",
"0.6197832",
"0.6167743",
"0.6130558",
"0.60996246",
"0.6068702",
"0.6056441",
"0.60131264",
"0.6010799",
"0.5976388",
"0.5974357",
"0.59689426",
"0.5948514",
"0.59328705",
"0.592306",
"0.5901548",
"0.58628696",
"0.58589864",
"0.5858412",
"0.58337796"
] | 0.7472877 | 0 |
Evaluate loss and gradient for the threelayer convolutional network. | def loss(self, X, y=None):
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
W3, b3 = self.params['W3'], self.params['b3']
# pass conv_param to the forward pass for the convolutional layer
filter_size = W1.shape[2]
conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}
# pass pool_param to the forward pass for the max-pooling layer
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
scores = None
############################################################################
# TODO: Implement the forward pass for the three-layer convolutional net, #
# computing the class scores for X and storing them in the scores #
# variable. #
############################################################################
cnn_out, cnn_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)
hidden_out, hidden_cache = affine_relu_forward(cnn_out, W2, b2)
scores, scores_cache = affine_forward(hidden_out, W3, b3)
############################################################################
# END OF YOUR CODE #
############################################################################
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the three-layer convolutional net, #
# storing the loss and gradients in the loss and grads variables. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
############################################################################
# Compute loss and gradients
loss, dscores = softmax_loss(scores, y)
dhidden, grads['W3'], grads['b3'] = affine_backward(dscores, scores_cache)
dcnn, grads['W2'], grads['b2'] = affine_relu_backward(dhidden, hidden_cache)
dX, grads['W1'], grads['b1'] = conv_relu_pool_backward(dcnn, cnn_cache)
# Regularization
loss = loss + 0.5*self.reg*np.sum(self.params['W3']**2)
loss = loss + 0.5*self.reg*np.sum(self.params['W2']**2)
loss = loss + 0.5*self.reg*np.sum(self.params['W1']**2)
grads['W3'] = grads['W3'] + self.reg * self.params['W3']
grads['W2'] = grads['W2'] + self.reg * self.params['W2']
grads['W1'] = grads['W1'] + self.reg * self.params['W1']
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_relu_pool_forward(x, w, b, conv_param, pool_param): return out, cache;\n out, cache['layer1'] = layer_utils.conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) \n # def affine_relu_forward(x, w, b): return out, cache;\n out, cache['layer2'] = layer_utils.affine_relu_forward(out, W2, b2)\n # def affine_forward(x, w, b): return out, cache;\n scores, cache['layer3'] = layers.affine_forward(out, W3, b3)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW3, db3 = layers.affine_backward(dscores, cache['layer3']) \n # def affine_relu_backward(dout, cache): return dx, dw, db;\n dout, dW2, db2 = layer_utils.affine_relu_backward(dout, cache['layer2'])\n # def conv_relu_pool_backward(dout, cache): return dx, dw, db;\n dout, dW1, db1 = layer_utils.conv_relu_pool_backward(dout, cache['layer1'])\n\n # reg\n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads",
"def three_layer_neuralnetwork(X, model, y=None, reg=0.0,verbose=0):\n \n # Unpack weights\n W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'],model['W3'],model['b3']\n N,D= X.shape\n\n assert W1.shape[0] == D, ' W1 2nd dimenions must match number of features'\n \n dW1,dW2,dW3,db1,db2,db3=np.zeros_like(W1),np.zeros_like(W2),np.zeros_like(W3),np.zeros_like(b1),np.zeros_like(b2),np.zeros_like(b3)\n # Compute the forward pass\n \n '''\n AffineLayer = X.dot(W1)+b1 \n ReluLayer,_ = relu_forward(AffineLayer)\n AffineLayer2 = ReluLayer.dot(W2) + b2\n ReluLayer2,_ = relu_forward(AffineLayer2)\n AffineLayer3 = ReluLayer2.dot(W3) + b3\n scores = AffineLayer3\n \n print X.shape\n print W1.shape\n print b1.shape\n print W2.shape\n print b2.shape\n print W3.shape\n print b3.shape\n '''\n affine_out1,cache1 = affine_forward(X, W1, b1)\n relu_out1,cache_relu1 = relu_forward(affine_out1)\n \n affine_out2,cache2 = affine_forward(relu_out1, W2, b2)\n relu_out2,cache_relu2 = relu_forward(affine_out2)\n \n affine_out3,cache3 = affine_forward(relu_out2, W3, b3)\n scores = affine_out3\n\n #if verbose:\n #print ['Layer {} Variance = {}'.format(i+1, np.var(l[:])) for i,l in enumerate([a1, a2, cache3[0]])][:]\n if y is None:\n return scores\n data_loss,d_softmax = softmax_loss(scores,y)\n data_loss += reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3))\n '''\n max_scores = np.max(scores)\n scores -= max_scores\n correct_class_scores = scores[y,np.arange(N)]\n exp_score = np.exp(scores)\n sumexp = np.sum(exp_score,axis=0)\n loss_i = -correct_class_scores + np.log(sumexp)\n loss = np.sum(loss_i) / N \n ''' \t\n # Compute the backward pass\n \n d_affine_out3, dW3, db3 = affine_backward(d_softmax, cache3) \n d_relu2 = relu_backward(d_affine_out3, cache_relu2)\n \n d_affine_out2, dW2, db2 = affine_backward(d_relu2, cache2) \n d_relu1 = relu_backward(d_affine_out2, cache_relu1)\n \n d_affine_out1, dW1, db1 = affine_backward(d_relu1, cache1) \n \n #\n reg_loss = 0\n\n loss = data_loss + reg_loss\n grads = {'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2,'W3':dW3,'b3':db3}\n \n return loss, grads",
"def loss(self, X, y=None):\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\t\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size = W1.shape[2]\n\t\tconv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\t\tscores = None\n\t\t############################################################################\n\t\t# TODO: Implement the forward pass for the three-layer convolutional net, #\n\t\t# computing the class scores for X and storing them in the scores\t\t\t\t\t #\n\t\t# variable.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tz1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n\t\tz2, cache2 = affine_relu_forward(z1, W2, b2)\n\t\ty3, cache3 = affine_forward(z2, W3, b3)\n\t\tscores = y3\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\t############################################################################\n\t\t# TODO: Implement the backward pass for the three-layer convolutional net, #\n\t\t# storing the loss and gradients in the loss and grads variables. Compute #\n\t\t# data loss using softmax, and make sure that grads[k] holds the gradients #\n\t\t# for self.params[k]. Don't forget to add L2 regularization!\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W3'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W1'], 2).sum())\n\n\t\tdx3, grads['W3'], grads['b3'] = affine_backward(dout, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = affine_relu_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_pool_backward(dx2, cache1)\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\treturn loss, grads",
"def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n N, C, H, W = X.shape;\n\n #print 'X shape = ' + str(X.shape);\n\n # Get conv layer output. Note that it is not 2-dimensional \n # conv - relu - 2x2 maxpool\n v1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param);\n\n #print 'v1 shape = ' + str(v1.shape);\n\n # Reshape to 2D\n v1shape = v1.shape; # Used to reshape back to original form in backward pass\n v1 = np.reshape(v1,(N,-1));\n #print 'v1 shape = ' + str(v1.shape);\n\n # Feed forward to hidden layer (affine-relu)\n v2, cache2 = affine_relu_forward(v1, W2, b2);\n #print 'v2 shape = ' + str(v2.shape);\n\n # Feed forward to final layer (affine only)\n v3, cache3 = affine_forward(v2, W3, b3)\n #print 'v3 shape = ' + str(v3.shape);\n\n # Compute scores\n scores = v3;\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n # Calculate softmax loss from layer 2 output\n # Loss gets regularized here\n # Each separate gradient must be regularized later when calculated\n loss, dv3 = softmax_loss(scores,y); # Softmax loss and gradient\n #print 'dv3 shape = ' + str(dv3.shape);\n reg = self.reg;\n loss += 0.5 * reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3)); # Regularize\n\n # Do backward pass through layer 2 affine\n dv2, dw3, db3 = affine_backward(dv3, cache3);\n dw3 += reg*W3; # Regularize\n #print 'dv2 shape = ' + str(dv2.shape);\n\n\n # Backward pass through hidden layer\n dv1, dw2, db2 = affine_relu_backward(dv2, cache2);\n dw2 += reg*W2; # Regularize\n #print 'dv1 shape = ' + str(dv1.shape);\n\n # Reshape dv1 to be compatible with convolutional layer\n dv1 = np.reshape(dv1,v1shape);\n #print 'dv1 shape = ' + str(dv1.shape);\n\n # Do backward pass through convolutional layer\n dx, dw1, db1 = conv_relu_pool_backward(dv1, cache1);\n dw1 += reg*W1; # Regularize\n\n # Store all weight and bias gradients in grads\n grads['W1'] = dw1; grads['b1'] = db1;\n grads['W2'] = dw2; grads['b2'] = db2;\n grads['W3'] = dw3; grads['b3'] = db3;\n\n\n\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads",
"def loss(self, X, y=None):\n W1 = self.params['W1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n X, cache_conv = conv_forward(X, W1)\n X, x_relu1 = relu_forward(X)\n X, cache_maxpool = max_pool_forward(X, pool_param)\n N1,C1,H1,W1 = X.shape\n X = X.reshape(N1, C1 * H1 * W1)\n X, cache_fc2 = fc_forward(X, W2, b2)\n X, x_relu2 = relu_forward(X)\n X, cache_fc3 = fc_forward(X, W3, b3)\n scores = X\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. #\n ############################################################################\n loss, dx = softmax_loss(X, y)\n dx, dw, db = fc_backward(dx, cache_fc3)\n grads['W3'] = dw\n grads['b3'] = db\n dx = relu_backward(dx, x_relu2)\n dx, dw, db = fc_backward(dx, cache_fc2)\n grads['W2'] = dw\n grads['b2'] = db\n xx, Ind, pp = cache_maxpool\n N2,C2,H2,W2 = xx.shape\n H2 = int(H2/2)\n W2 = int(W2/2)\n dx = dx.reshape(N2,C2,H2,W2)\n dx = max_pool_backward(dx, cache_maxpool)\n dx = relu_backward(dx, x_relu1)\n dx, dw = conv_backward(dx, cache_conv)\n grads['W1'] = dw\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads",
"def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n np.random.seed(10)\n #output_weight = np.random.randn(*output.shape)\n output_weight = np.ones_like(output)\n #print('output_weight',output_weight)\n\n def helper_func(x):\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n #print('loss',loss)\n d_out = np.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)",
"def checkBatchGradient():\n\n from mynnet import InputLayer\n\n n,b,d,o = (1, 4, 3, 7) # sequence length, batch size, hidden size, output size\n input_size = 10\n \n lstm = create_cell(input_size, (n,b,d,o))\n\n X = np.random.randn(n,b,input_size)\n c0 = np.random.randn(b,d)\n \n print \"c0:\", c0\n\n # batch forward backward\n H, Ct = lstm.forward(X, c0)\n wrand = np.random.randn(*H.shape)\n loss = np.sum(H * wrand) # weighted sum is a nice hash to use I think\n dH = wrand\n dX, dW, dV, dc0 = lstm.backward(dH)\n\n def fwd():\n h, _ = lstm.forward(X, c0)\n return np.sum(h * wrand)\n\n # now gradient check all\n delta = 1e-7\n rel_error_thr_warning = 1e-2\n rel_error_thr_error = 1\n tocheck = [X, lstm.W, lstm.V, c0]\n grads_analytic = [dX, dW, dV, dc0]\n names = ['X', 'W', 'V', 'c0']\n for j in xrange(len(tocheck)):\n mat = tocheck[j]\n dmat = grads_analytic[j]\n name = names[j]\n # gradcheck\n for i in xrange(mat.size):\n old_val = mat.flat[i]\n mat.flat[i] = old_val + delta\n loss0 = fwd()\n mat.flat[i] = old_val - delta\n loss1 = fwd()\n mat.flat[i] = old_val\n\n grad_analytic = dmat.flat[i]\n grad_numerical = (loss0 - loss1) / (2 * delta)\n\n if grad_numerical == 0 and grad_analytic == 0:\n rel_error = 0 # both are zero, OK.\n status = 'OK'\n elif abs(grad_numerical) < 1e-7 and abs(grad_analytic) < 1e-7:\n rel_error = 0 # not enough precision to check this\n status = 'VAL SMALL WARNING'\n else:\n rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)\n status = 'OK'\n if rel_error > rel_error_thr_warning: status = 'WARNING'\n if rel_error > rel_error_thr_error: status = '!!!!! NOTOK'\n\n # print stats\n print '%s checking param %s index %s (val = %+8f), analytic = %+8f, numerical = %+8f, relative error = %+8f' \\\n % (status, name, `np.unravel_index(i, mat.shape)`, old_val, grad_analytic, grad_numerical, rel_error)",
"def ComputeGradients(self, input_data: list, target_output_data: list):\n delta = 1e-6\n normal_cost = self.Cost(input_data, target_output_data)\n\n # Evaluate Gradient for Hidden Layer Biases\n for i in range(self.hidden_layer_biases.shape[0]):\n original_bias_value = self.hidden_layer_biases[i]\n self.hidden_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_layer_biases[i] = original_bias_value\n self.hidden_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Output Layer Biases\n for i in range(self.output_layer_biases.shape[0]):\n original_bias_value = self.output_layer_biases[i]\n self.output_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.output_layer_biases[i] = original_bias_value\n self.output_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for i in range(self.input_to_hidden_weights.shape[0]):\n for h in range(self.input_to_hidden_weights.shape[1]):\n original_bias_value = self.input_to_hidden_weights[i, h]\n self.input_to_hidden_weights[i, h] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.input_to_hidden_weights[i, h] = original_bias_value\n self.input_to_hidden_weights_gradient[i, h] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for h in range(self.hidden_to_output_weights.shape[0]):\n for o in range(self.hidden_to_output_weights.shape[1]):\n original_bias_value = self.hidden_to_output_weights[h, o]\n self.hidden_to_output_weights[h, o] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_to_output_weights[h, o] = original_bias_value\n self.hidden_to_output_weights_gradient[h, o] = (plusdelta_cost - normal_cost) / delta",
"def compute_grad(W, x, y, loss_c, config):\n\n # Lazy import of propper model\n if config.model_type == \"linear_svm\":\n from utils.linear_svm import model_grad\n elif config.model_type == \"logistic_regression\":\n from utils.logistic_regression import model_grad\n else:\n raise ValueError(\"Wrong model type {}\".format(\n config.model_type))\n\n dW, db = model_grad(loss_c, x, y)\n dW += config.reg_lambda * l2_grad(W)\n\n return dW, db",
"def loss(self, X, y=None, justLoss=False):\n # N = X.shape[0]\n # mode = 'test' if y is None else 'train'\n scores = None\n\n W1, b1 = self.params['W1'], self.params['b1']\n # W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n\n conv_param = {'stride': 1, 'pad': 0}\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n #######################################################################\n # TODO: Implement the forward pass for the convolutional neural net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n #######################################################################\n\n conv1, conv_cache = conv_forward(X, W1, b1, conv_param)\n relu1, relu_cache1 = relu_forward(conv1)\n\n # conv2, conv_cache2 = conv_forward(relu1, W2, b2, conv_param)\n # relu2, relu_cache2 = relu_forward(conv2)\n\n scores, maxpool_cache = max_pool_forward(relu1, pool_param)\n scores, forward_cache = fc_forward(scores, W3, b3)\n \n\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n #######################################################################\n # TODO: Implement the backward pass for the convolutional neural net, #\n # storing the loss and gradients in the loss and grads variables. #\n # Compute data loss using softmax, and make sure that grads[k] holds #\n # the gradients for self.params[k]. #\n loss, dscores = softmax_loss(scores, y)\n\n if justLoss:\n return loss\n # print(loss)\n\n\n dx_3, grads['W3'], grads['b3'] = fc_backward(dscores, forward_cache)\n dx_3 = max_pool_backward(dx_3, maxpool_cache)\n\n # dx_2 = relu_backward(dx_3, relu_cache2)\n # dx_2, grads['W2'], grads['b2'] = conv_backward(dx_3, conv_cache2)\n\n dx = relu_backward(dx_3, relu_cache1)\n dx, grads['W1'], grads['b1'] = conv_backward(dx, conv_cache)\n \n \n\n return loss, grads",
"def compute_gradients(self, inputs, targets, hprev):\n n = len(inputs)\n loss = 0\n\n # Dictionaries for storing values during the forward pass\n aa, xx, hh, oo, pp = {}, {}, {}, {}, {}\n hh[-1] = np.copy(hprev)\n\n # Forward pass\n for t in range(n):\n xx[t] = np.zeros((self.vocab_len, 1))\n xx[t][inputs[t]] = 1 # 1-hot-encoding\n\n aa[t], hh[t], oo[t], pp[t] = self.evaluate_classifier(hh[t-1], xx[t])\n\n loss += -np.log(pp[t][targets[t]][0]) # update the loss\n\n # Dictionary for storing the gradients\n grads = {\"W\": np.zeros_like(self.W), \"U\": np.zeros_like(self.U),\n \"V\": np.zeros_like(self.V), \"b\": np.zeros_like(self.b),\n \"c\": np.zeros_like(self.c), \"o\": np.zeros_like(pp[0]),\n \"h\": np.zeros_like(hh[0]), \"h_next\": np.zeros_like(hh[0]),\n \"a\": np.zeros_like(aa[0])}\n\n # Backward pass\n for t in reversed(range(n)):\n grads[\"o\"] = np.copy(pp[t])\n grads[\"o\"][targets[t]] -= 1\n\n grads[\"V\"] += grads[\"o\"]@hh[t].T\n grads[\"c\"] += grads[\"o\"]\n\n grads[\"h\"] = np.matmul(self.V.T , grads[\"o\"] )+ grads[\"h_next\"]\n grads[\"a\"] = np.multiply(grads[\"h\"], (1 - np.square(hh[t])))\n\n grads[\"U\"] += np.matmul(grads[\"a\"], xx[t].T)\n grads[\"W\"] += np.matmul(grads[\"a\"], hh[t-1].T)\n grads[\"b\"] += grads[\"a\"]\n\n grads[\"h_next\"] = np.matmul(self.W.T, grads[\"a\"])\n\n # Drop redundant gradients\n grads = {k: grads[k] for k in grads if k not in [\"o\", \"h\", \"h_next\", \"a\"]}\n\n # Clip the gradients\n for grad in grads:\n grads[grad] = np.clip(grads[grad], -5, 5)\n\n # Update the hidden state sequence\n h = hh[n-1]\n\n return grads, loss, h",
"def compute_loss_and_gradients(self, X, y):\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n # Hint: using self.params() might be useful!\n self.fulllayer1.W.grad = np.zeros_like(self.fulllayer1.W.grad)\n self.fulllayer1.B.grad = np.zeros_like(self.fulllayer1.B.grad)\n self.fulllayer2.W.grad = np.zeros_like(self.fulllayer2.W.grad)\n self.fulllayer2.B.grad = np.zeros_like(self.fulllayer2.B.grad)\n\n\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model\n res = self.fulllayer1.forward(X)\n res2 = self.reglayer1.forward(res)\n res3 = self.fulllayer2.forward(res2)\n\n loss, grad = softmax_with_cross_entropy(res3, y)\n\n back3 = self.fulllayer2.backward(grad)\n back2 = self.reglayer1.backward(back3)\n back = self.fulllayer1.backward(back2)\n \n # After that, implement l2 regularization on all params\n # Hint: self.params() is useful again!\n\n for params in self.params().keys():\n # print(params)\n # print(self.params()[params].value)\n loc_loss, loc_grad = l2_regularization(self.params()[params].value, self.reg)\n loss += loc_loss\n self.params()[params].grad += loc_grad\n\n return loss",
"def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss",
"def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss",
"def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad",
"def compute_gradients_and_update(batch_y0, batch_yN):\n with tf.GradientTape() as g:\n pred_y = node_network(tb, batch_y0)\n loss = tf.reduce_mean(tf.abs(pred_y - batch_yN))\n grads = g.gradient(loss, var_list)\n optimizer.apply_gradients(zip(grads, var_list))\n return loss",
"def cnn_pred(self):\n \n # Construct model\n pred = self.conv_net()\n \n # Evaluate model\n correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(self.y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n \n return (pred, correct_pred, accuracy)",
"def _Conv3DGrad(op, grad):\n strides = op.get_attr('strides')\n padding = op.get_attr('padding')\n data_format = op.get_attr('data_format')\n shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])\n dx = nn_ops.conv3d_backprop_input_v2(\n shape_0,\n op.inputs[1],\n grad,\n strides=strides,\n padding=padding,\n data_format=data_format)\n dw = nn_ops.conv3d_backprop_filter_v2(\n op.inputs[0],\n shape_1,\n grad,\n strides=strides,\n padding=padding,\n data_format=data_format)\n dw = 0.5 * (dw + tf.transpose(dw, (0, 1, 2, 4, 3)))\n return dx, dw\n # # Pool grads across symmetric channels\n # dw_t = tf.transpose(\n # dw,\n # (3, 4, 0, 1, 2))\n # dw_symm_t = (0.5) * (dw_t + tf.transpose(\n # dw,\n # (4, 3, 0, 1, 2)))\n # dw_symm = tf.transpose(\n # dw_symm_t,\n # (2, 3, 4, 0, 1))\n # return dx, dw_symm",
"def compute_gradients(self):\n wlist = self._neural_net.weights()\n blist = self._neural_net.biases()\n\n nmatrices = len(wlist)\n weight_grad = []\n bias_grad = []\n\n cost_function = self._cost_function\n weight_der = WeightDerivative(neural_net=self._neural_net,\n data_src=self._data_src,\n cost_function=cost_function)\n biase_der = BiasDerivative(neural_net=self._neural_net,\n data_src=self._data_src,\n cost_function=cost_function)\n for layer in range(nmatrices):\n weight_grad.append(np.zeros(wlist[layer].shape))\n bias_grad.append(np.zeros(blist[layer].shape))\n\n rows, cols = wlist[layer].shape\n for i in range(rows):\n for j in range(cols):\n loc = ParameterLocation(layer=layer, row=i, column=j)\n weight_grad[layer][i][j] = weight_der.partial_derivative(loc)\n\n for row in range(rows):\n loc = ParameterLocation(layer=layer, row=row, column=0)\n bias_grad[layer][row] = biase_der.partial_derivative(loc)\n\n return weight_grad, bias_grad",
"def compute_loss(self):",
"def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n output_weight = CP.cp.random.randn(*output.shape)\n\n def helper_func(x):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n loss = CP.cp.sum(output * output_weight)\n d_out = CP.cp.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)",
"def evaluate():\n model.eval()\n with torch.no_grad():\n loss, n = 0, 0\n for xb, yb in valid_dl:\n n += len(xb)\n loss += loss_func(model(xb), yb) * len(xb)\n\n return loss/n",
"def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad",
"def loss(self, X, y=None):\n W1 = self.params['W1']\n mode = 'test' if y is None else 'train'\n\n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n cache = {}\n\n if self.use_batchnorm:\n for bn_param in self.bn_params:\n bn_param[mode] = mode\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n input = X\n for l in xrange(1, self.conv_layers + 1):\n if self.use_batchnorm:\n W, b, gamma, beta = self.get_params_for_layer(l, get_gamma_beta=True)\n input, cache['cache%d' % l] = conv_norm_relu_pool_forward(input, W, b, conv_param, pool_param, gamma, beta, self.bn_params[l])\n else:\n W, b = self.get_params_for_layer(l)\n input, cache['cache%d' % l] = conv_relu_pool_forward(input, W, b, conv_param, pool_param)\n\n l = self.conv_layers + 1\n if self.use_batchnorm:\n W, b, gamma, beta = self.get_params_for_layer(l, get_gamma_beta=True)\n h_out, h_cache = affine_norm_relu_forward(input, W, b, gamma, beta, self.bn_params[l])\n else:\n W, b = self.get_params_for_layer(l)\n h_out, h_cache = affine_relu_forward(input, W, b)\n\n l = l + 1\n W, b = self.get_params_for_layer(l)\n scores, scores_cache = affine_forward(h_out, W, b)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n loss, loss_dx = softmax_loss(scores, y)\n\n for l in xrange(1, self.num_layers + 1):\n loss += 0.5 * self.reg * np.sum(self.params['W%d' % l] * self.params['W%d' % l])\n\n l = self.num_layers\n scores_dx, scores_dw, scores_db = affine_backward(loss_dx, scores_cache)\n self.set_grads(l, grads, scores_dw, scores_db)\n l = l - 1\n\n if self.use_batchnorm:\n a_dx, a_dw, a_db, a_dgamma, a_dbeta = affine_norm_relu_backward(scores_dx, h_cache)\n self.set_grads(l, grads, a_dw, a_db, a_dgamma, a_dbeta)\n else:\n a_dx, a_dw, a_db = affine_relu_backward(scores_dx, h_cache)\n self.set_grads(l, grads, a_dw, a_db)\n l = l - 1\n\n conv_layers = l\n next_input = a_dx\n for l in xrange(conv_layers, 0, -1):\n current_cache = cache['cache%d' % l]\n if self.use_batchnorm:\n c_dx, c_dw, c_db, c_dgamma, c_dbeta = conv_norm_relu_pool_backward(next_input, current_cache)\n self.set_grads(l, grads, c_dw, c_db, c_dgamma, c_dbeta)\n else:\n c_dx, c_dw, c_db = conv_relu_pool_backward(next_input, current_cache)\n self.set_grads(l, grads, c_dw, c_db)\n next_input = c_dx\n\n for l in xrange(1, self.conv_layers + 3):\n grads['W%d' % l] += self.reg * self.params['W%d' % l]\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads",
"def evaluate(net, loader, criterion):\n total_loss = 0.0\n total_err = 0.0\n total_epoch = 0\n for i, data in enumerate(loader, 0):\n inputs, labels = data\n labels = normalize_label(labels) # Convert labels to 0/1\n outputs = net(inputs)\n loss = criterion(outputs, labels.float())\n corr = (outputs > 0.0).squeeze().long() != labels\n total_err += int(corr.sum())\n total_loss += loss.item()\n total_epoch += len(labels)\n err = float(total_err) / total_epoch\n loss = float(total_loss) / (i + 1)\n return err, loss",
"def check_gradient(self, x, y):\n x = x.transpose()\n y = y.transpose()\n layers_copy = deepcopy(self.layers)\n epsilon = 10 ** -4\n a, layer = self.forward_propagation(x)\n delta = self.calculate_delta(a, y, layer)\n self.backpropagation(delta=delta, theta=layer.theta)\n previous_layer_output = x\n for layer in self.layers:\n theta_copy = deepcopy(layer.theta)\n real_theta_size = theta_copy.shape\n delta = layer.delta\n dc_dtheta = np.outer(previous_layer_output, delta).transpose()\n previous_layer_output = layer.a\n R, C = theta_copy.shape\n for i in range(R):\n for j in range(C):\n theta_plus = deepcopy(theta_copy)\n theta_plus[i, j] += epsilon\n layer.theta = theta_plus\n a_plus, l_plus = self.forward_propagation(x)\n err_plus = self.calculate_loss(a_plus, y)\n theta_minus = deepcopy(theta_copy)\n theta_minus[i, j] -= epsilon\n layer.theta = theta_minus\n a_minus, l_minus = self.forward_propagation(x)\n err_minus = self.calculate_loss(a_minus, y)\n limit = (err_plus - err_minus)/(2*epsilon)\n grad_diff = abs(dc_dtheta[i,j] - limit)\n assert grad_diff < 10 ** -6, f\"Diff {grad_diff} is too big.\"\n layer.theta = theta_copy",
"def test_network_fine_tuning_loss(self):\n height = 128\n width = 128\n num_features = 3\n batch_size = 2\n\n # Create the graph.\n input_image_a = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n input_image_b = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n final_flow, previous_flows = self.pwc_net.get_forward(input_image_a, input_image_b)\n\n image_a = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_a[:, 10:height - 10, 10:width - 10, :] = 1.0\n image_b = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_b[:, 5:height - 5, 5:width - 5, :] = 1.0\n dummy_flow = np.ones(shape=[batch_size, height, width, 2], dtype=np.float32)\n\n self.sess.run(tf.global_variables_initializer())\n trainable_vars = tf.trainable_variables(scope='pwc_net')\n\n # Check that the gradients are flowing.\n grad_op = tf.gradients(tf.reduce_mean(final_flow), trainable_vars + [input_image_a, input_image_b])\n for grad in grad_op:\n self.assertNotEqual(grad, None)\n\n # Get the losses.\n gt_placeholder = tf.placeholder(shape=[None, height, width, 2], dtype=tf.float32)\n training_loss = self.pwc_net.get_fine_tuning_loss(previous_flows, gt_placeholder)\n # Check the loss.\n loss_value = self.sess.run(training_loss, feed_dict={input_image_a: image_a, input_image_b: image_b,\n gt_placeholder: dummy_flow})\n self.assertNotAlmostEqual(loss_value[0], 0.0)\n\n # Check the gradients.\n loss_grad_ops = tf.gradients(training_loss, trainable_vars + [input_image_a, input_image_b])\n self.assertGreater(len(loss_grad_ops), 0)\n for grad in loss_grad_ops:\n self.assertNotEqual(grad, None)\n grads = self.sess.run(loss_grad_ops, feed_dict={input_image_a: image_a, input_image_b: image_b,\n gt_placeholder: dummy_flow})\n for grad in grads:\n self.assertNotAlmostEqual(0.0, np.sum(grad))",
"def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads",
"def calc_gradients(\n test_file,\n model_name,\n output_file_dir,\n max_iter,\n learning_rate=0.001,\n targets=None,\n weight_loss2=1,\n data_spec=None,\n batch_size=1,\n seq_len=40,\n resolution_x=16,\n resolution_y=32,\n resolution_z=32,\n c_space=cv2.COLOR_BGR2LUV): \n spec = data_spec\n\n modifier = tf.Variable(0.01*np.ones((1, seq_len, spec.crop_size,spec.crop_size,spec.channels),dtype=np.float32))\n \n input_image = tf.placeholder(tf.float32, (batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels))\n input_label = tf.placeholder(tf.int32, (batch_size))\n #input_image_cs = tf.placeholder(tf.float32, (batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels))\n params_color = tf.Variable(np.empty_like(construct_identity_param(batch_size,resolution_x, resolution_y, resolution_z)).reshape(batch_size,-1,spec.channels))\n \n trans_color_img = function(input_image,params_color,batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels,resolution_x,resolution_y, resolution_z)\n #print(tf.shape(trans_color_img))\n #trans_input = np.array(trans_color_img,dtype=np.float32)\n #trans_color_img = cv2.cvtColor( trans_input, cv2.COLOR_LUV2RGB)\n # temporal mask, 1 indicates the selected frame\n indicator = [0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0]\n\n true_image = tf.minimum(tf.maximum(modifier[0,0,:,:,:]+trans_color_img [0,0,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n true_image = tf.expand_dims(true_image, 0)\n for ll in range(seq_len-1):\n if indicator[ll+1] == 1:\n mask_temp = tf.minimum(tf.maximum(modifier[0,ll+1,:,:,:]+input_image[0,ll+1,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n else:\n mask_temp = input_image[0,ll+1,:,:,:]\n mask_temp = tf.expand_dims(mask_temp,0)\n true_image = tf.concat([true_image, mask_temp],0)\n true_image = tf.expand_dims(true_image, 0)\n\n for kk in range(batch_size-1):\n true_image_temp = tf.minimum(tf.maximum(modifier[0,0,:,:,:]+input_image[kk+1,0,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n true_image_temp = tf.expand_dims(true_image_temp, 0)\n for ll in range(seq_len-1):\n if indicator[ll+1] == 1:\n mask_temp = tf.minimum(tf.maximum(modifier[0,ll+1,:,:,:]+input_image[kk+1,ll+1,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n else:\n mask_temp = input_image[kk+1,ll+1,:,:,:]\n mask_temp = tf.expand_dims(mask_temp,0)\n true_image_temp = tf.concat([true_image_temp, mask_temp],0)\n true_image_temp = tf.expand_dims(true_image_temp, 0)\n\n true_image = tf.concat([true_image, true_image_temp],0)\n loss2 = tf.reduce_mean(1.0 - tf.image.ssim(true_image, input_image, max_val=255))\n \n #loss2 = tf.reduce_sum(tf.sqrt(tf.reduce_mean(tf.square(true_image-input_image), axis=[0, 2, 3, 4])))\n norm_frame = tf.reduce_mean(tf.abs(modifier), axis=[2,3,4])\n\n sess = tf.Session()\n probs, variable_set, pre_label,ince_output, pre_node = models.get_model(sess, true_image, model_name, False) \n true_label_prob = tf.reduce_sum(probs*tf.one_hot(input_label,101),[1])\n if targets is None:\n loss1 = -tf.log(1 - true_label_prob + 1e-6)\n else:\n loss1 = -tf.log(true_label_prob + 1e-6)\n loss1 = tf.reduce_mean(loss1)\n loss = loss1 + weight_loss2 * loss2\n\n optimizer = tf.train.AdamOptimizer(learning_rate)\n print('optimizer.minimize....')\n train = optimizer.minimize(loss, var_list=[modifier,params_color])\n # initiallize all uninitialized varibales\n init_varibale_list = set(tf.all_variables()) - variable_set\n sess.run(tf.initialize_variables(init_varibale_list))\n\n data = DataSet(test_list=test_file, seq_length=seq_len,image_shape=(spec.crop_size, spec.crop_size, spec.channels))\n all_names = []\n all_images = []\n all_labels = []\n \n def_len = 40\n for video in data.test_data:\n frames = data.get_frames_for_sample(video)\n if len(frames) < def_len:\n continue\n frames = data.rescale_list(frames, def_len)\n frames_data = data.build_image_sequence(frames)\n all_images.append(frames_data)\n label, hot_labels = data.get_class_one_hot(video[1])\n all_labels.append(label)\n all_names.append(frames)\n total = len(all_names)\n all_indices = range(total)\n num_batch = int(total/batch_size)\n print('process data length:', num_batch)\n\n correct_ori = 0\n correct_noi = 0\n tot_image = 0\n \n for ii in range(num_batch): \n images = all_images[ii*batch_size : (ii+1)*batch_size]\n names = all_names[ii*batch_size : (ii+1)*batch_size]\n labels = all_labels[ii*batch_size : (ii+1)*batch_size]\n indices = all_indices[ii*batch_size : (ii+1)*batch_size]\n print('------------------prediction for clean video-------------------')\n print('---video-level prediction---')\n for xx in range(len(indices)):\n print(names[xx][0],'label:', labels[xx], 'indice:',indices[xx], 'size:', len(images[xx]), len(images[xx][0]), len(images[xx][0][0]), len(images[xx][0][0][0]))\n sess.run(tf.initialize_variables(init_varibale_list))\n if targets is not None:\n labels = [targets[e] for e in names]\n \n feed_dict = {input_image: [images[0][0:seq_len]], input_label: labels}\n var_loss, true_prob, var_loss1, var_loss2, var_pre, var_node = sess.run((loss, true_label_prob, loss1, loss2, pre_label, pre_node), feed_dict=feed_dict)\n \n correct_pre = correct_ori\n for xx in range(len(indices)):\n if labels[xx] == var_pre[xx]:\n correct_ori += 1\n\n tot_image += 1\n print('Start!')\n min_loss = var_loss\n last_min = -1\n print('---frame-wise prediction---')\n print('node_label:', var_node, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib', true_prob)\n # record numer of iteration\n tot_iter = 0\n\n if correct_pre == correct_ori:\n ii += 1\n continue\n \n print('------------------prediction for adversarial video-------------------')\n\n for cur_iter in range(max_iter):\n tot_iter += 1\n sess.run(train, feed_dict=feed_dict)\n var_loss, true_prob, var_loss1, var_loss2, var_pre, var_node = sess.run((loss, true_label_prob, loss1, loss2, pre_label, pre_node), feed_dict=feed_dict)\n print('iter:', cur_iter, 'total loss:', var_loss, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib:', true_prob)\n break_condition = False\n if var_loss < min_loss:\n if np.absolute(var_loss-min_loss) < 0.00001:\n break_condition = True\n print(last_min)\n min_loss = var_loss\n last_min = cur_iter\n\n if cur_iter + 1 == max_iter or break_condition:\n print('iter:', cur_iter, 'node_label:', var_node, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib:', true_prob)\n var_diff, var_color,var_probs, noise_norm = sess.run((modifier, params_color,probs, norm_frame), feed_dict=feed_dict)\n for pp in range(seq_len):\n # print the map value for each frame\n print(noise_norm[0][pp])\n for i in range(len(indices)):\n top1 = var_probs[i].argmax()\n if labels[i] == top1:\n correct_noi += 1\n break\n print('saved modifier paramters.', ii)\n \n for ll in range(len(indices)):\n for kk in range(def_len):\n if kk < seq_len:\n attack_img = np.clip(images[ll][kk]*255.0+var_diff[0][kk]+data_spec.mean,data_spec.rescale[0],data_spec.rescale[1])\n diff = np.clip(np.absolute(var_diff[0][kk])*255.0, data_spec.rescale[0],data_spec.rescale[1])\n else:\n attack_img = np.clip(images[ll][kk]*255.0+data_spec.mean,data_spec.rescale[0],data_spec.rescale[1])\n diff = np.zeros((spec.crop_size,spec.crop_size,spec.channels))\n im_diff = scipy.misc.toimage(arr=diff, cmin=data_spec.rescale[0], cmax=data_spec.rescale[1])\n im = scipy.misc.toimage(arr=attack_img, cmin=data_spec.rescale[0], cmax=data_spec.rescale[1])\n new_name = names[ll][kk].split('/')\n \n adv_dir = output_file_dir+'/adversarial/'\n dif_dir = output_file_dir+'/noise/'\n if not os.path.exists(adv_dir):\n os.mkdir(adv_dir)\n os.mkdir(dif_dir)\n\n tmp_dir = adv_dir+new_name[-2]\n tmp1_dir = dif_dir+new_name[-2]\n if not os.path.exists(tmp_dir):\n os.mkdir(tmp_dir)\n os.mkdir(tmp1_dir)\n \n new_name = new_name[-1] + '.png'\n im.save(tmp_dir + '/' +new_name)\n im_diff.save(tmp1_dir + '/' +new_name)\n print('saved adversarial frames.', ii)\n print('correct_ori:', correct_ori, 'correct_noi:', correct_noi)",
"def compute_net_gradients(images, labels, net, optimizer=None, is_net_first_initialized=False):\n _, net_loss = net.compute_loss(\n inputdata=images,\n labels=labels,\n name='shadow_net',\n reuse=is_net_first_initialized\n )\n\n if optimizer is not None:\n grads = optimizer.compute_gradients(net_loss)\n else:\n grads = None\n\n return net_loss, grads"
] | [
"0.64657706",
"0.62264025",
"0.61885417",
"0.617772",
"0.61766106",
"0.6117092",
"0.609945",
"0.6043953",
"0.60374737",
"0.60298234",
"0.6029765",
"0.6028192",
"0.6012463",
"0.6012463",
"0.59997284",
"0.5982718",
"0.590393",
"0.58888876",
"0.5880226",
"0.5858676",
"0.5853266",
"0.5841568",
"0.5840397",
"0.58378834",
"0.5821999",
"0.5818778",
"0.58162767",
"0.58089375",
"0.5787712",
"0.57733965"
] | 0.6235286 | 1 |
Inplace applies a one mode gate G into the process matrix T in mode i | def _apply_one_mode_gate(G, T, i):
T[i] *= G
return T | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _apply_two_mode_gate(G, T, i, j):\n (T[i], T[j]) = (G[0, 0] * T[i] + G[0, 1] * T[j], G[1, 0] * T[i] + G[1, 1] * T[j])\n return T",
"def compile(self, seq, registers):\n\n # Check which modes are actually being used\n used_modes = []\n for operations in seq:\n modes = [modes_label.ind for modes_label in operations.reg]\n used_modes.append(modes)\n\n used_modes = list(set(item for sublist in used_modes for item in sublist))\n\n # dictionary mapping the used modes to consecutive non-negative integers\n dict_indices = {used_modes[i]: i for i in range(len(used_modes))}\n nmodes = len(used_modes)\n\n # We start with an identity then sequentially update with the gate transformations\n T = np.identity(nmodes, dtype=np.complex128)\n\n # Now we will go through each operation in the sequence `seq` and apply it to T\n for operations in seq:\n name = operations.op.__class__.__name__\n params = par_evaluate(operations.op.p)\n modes = [modes_label.ind for modes_label in operations.reg]\n if name == \"Rgate\":\n G = np.exp(1j * params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"LossChannel\":\n G = np.sqrt(params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"Interferometer\":\n U = params[0]\n if U.shape == (1, 1):\n T = _apply_one_mode_gate(U[0, 0], T, dict_indices[modes[0]])\n elif U.shape == (2, 2):\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n U_expand = np.eye(nmodes, dtype=np.complex128)\n U_expand[np.ix_(modes, modes)] = U\n T = U_expand @ T\n elif name == \"PassiveChannel\":\n T0 = params[0]\n if T0.shape == (1, 1):\n T = _apply_one_mode_gate(T0[0, 0], T, dict_indices[modes[0]])\n elif T0.shape == (2, 2):\n T = _apply_two_mode_gate(T0, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n T0_expand = np.eye(nmodes, dtype=np.complex128)\n T0_expand[np.ix_(modes, modes)] = T0\n T = T0_expand @ T\n elif name == \"BSgate\":\n G = _beam_splitter_passive(params[0], params[1])\n T = _apply_two_mode_gate(G, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"MZgate\":\n v = np.exp(1j * params[0])\n u = np.exp(1j * params[1])\n U = 0.5 * np.array([[u * (v - 1), 1j * (1 + v)], [1j * u * (1 + v), 1 - v]])\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"sMZgate\":\n exp_sigma = np.exp(1j * (params[0] + params[1]) / 2)\n delta = (params[0] - params[1]) / 2\n U = exp_sigma * np.array(\n [[np.sin(delta), np.cos(delta)], [np.cos(delta), -np.sin(delta)]]\n )\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n\n ord_reg = [r for r in list(registers) if r.ind in used_modes]\n ord_reg = sorted(list(ord_reg), key=lambda x: x.ind)\n\n return [Command(ops.PassiveChannel(T), ord_reg)]",
"def T(self, *, inplace: bool = False) -> SelfAdjointUnitaryGate:\n if self.power == 1 and self.is_conjugated(\n ) and not self.is_transposed():\n return PowerMatrixGate.conj(self, inplace=inplace)\n else:\n return PowerMatrixGate.T(self, inplace=inplace)",
"def _assembler_baseV00(M2bass, Gi_, G_j, mode):\n Gi_ = Gi_.T\n G_j = G_j.T\n\n hmgeoiti_ = int(np.max(Gi_) + 1)\n hmgeoit_j = int(np.max(G_j) + 1)\n\n szGi_ = np.shape(Gi_)\n szG_j = np.shape(G_j)\n rowGi_ = szGi_[0]\n rowG_j = szG_j[0]\n num_elements = szG_j[1]\n\n # assembled = lil_matrix((hmgeoiti_, hmgeoit_j))\n assembled = np.zeros(shape=(hmgeoiti_, hmgeoit_j), order='F')\n\n if mode == 'add':\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = assembled[i, j] + E[a, b]\n\n elif mode == 'replace':\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = E[a, b]\n\n elif mode == 'average':\n asstimes = np.zeros((hmgeoiti_, 1))\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n asstimes[i] = asstimes[i] + 1\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = assembled[i, j] + E[a, b]\n\n for i in range(hmgeoiti_):\n if asstimes[i] > 1:\n assembled[i, :] = assembled[i, :] / asstimes[i]\n\n else:\n raise Exception('Mode wrong: add, replace or average......')\n\n return assembled",
"def FormG():\n for i in range(2):\n for j in range(2):\n G[i, j] = 0.0\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])",
"def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]",
"def inverse_gc(g):\n i = g\n j = 1\n while j<N:\n i = i ^ (g >> j)\n j = j + 1\n return i",
"def change_basis(self, U_global):\n self.matrix = U_global @ self.matrix @ np.conj(U_global).T",
"def process(self, mat):",
"def gru_cell(self, Xt, h_t_minus_1):\n # 1.update gate: decides how much past information is kept and how much new information is added.\n z_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_z) + tf.matmul(h_t_minus_1,self.U_z) + self.b_z) # z_t:[batch_size,self.hidden_size]\n # 2.reset gate: controls how much the past state contributes to the candidate state.\n r_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_r) + tf.matmul(h_t_minus_1,self.U_r) + self.b_r) # r_t:[batch_size,self.hidden_size]\n # candiate state h_t~\n h_t_candiate = tf.nn.tanh(tf.matmul(Xt, self.W_h) +r_t * (tf.matmul(h_t_minus_1, self.U_h)) + self.b_h) # h_t_candiate:[batch_size,self.hidden_size]\n # new state: a linear combine of pervious hidden state and the current new state h_t~\n h_t = (1 - z_t) * h_t_minus_1 + z_t * h_t_candiate # h_t:[batch_size*num_sentences,hidden_size]\n return h_t",
"def traverse(op):\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_injective(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:\n traverse(tensor.op)\n\n if 'conv2d_transpose_nchw' in op.tag:\n C = op.output(0)\n\n N, OC, OH, OW = C.op.axis\n rc, ry, rx = C.op.reduce_axis\n\n OH, oh = s[C].split(OH, factor=2)\n OC, oc = s[C].split(OC, factor=32)\n IC, ic = s[C].split(rc, factor=32)\n\n s[C].reorder(N, OC, OH, OW, oc, IC, ry, rx, ic)\n N = s[C].fuse(N, OC)\n s[C].vectorize(oc)\n s[C].parallel(N)\n\n scheduled_ops.append(op)",
"def inplace_elemwise_optimizer_op(OP):\r\n @gof.inplace_optimizer\r\n def inplace_elemwise_optimizer(fgraph):\r\n \"\"\"\r\n Usage: inplace_elemwise_optimizer.optimize(fgraph)\r\n\r\n Attempts to replace all Broadcast ops by versions of them\r\n that operate inplace. It operates greedily: for each Broadcast\r\n Op that is encountered, for each output, tries each input to\r\n see if it can operate inplace on that input. If so, makes the\r\n change and go to the next output or Broadcast Op.\r\n\r\n Examples:\r\n x + y + z -> x += y += z\r\n (x + y) * (x * y) -> (x += y) *= (x * y) or (x + y) *= (x *= y)\r\n \"\"\"\r\n # We should not validate too often as this takes too much time to\r\n # execute!\r\n # It is the _dfs_toposort() fct in theano/gof/destroyhandler.py\r\n # that takes so much time.\r\n # Should we try to use another lib that does toposort?\r\n # igraph: http://igraph.sourceforge.net/\r\n # networkx: https://networkx.lanl.gov/\r\n # Should we try to use cython?\r\n # Compiling only that fct is not enough, should we try to add the\r\n # deque class too?\r\n # And init the deque and other list to an upper bound number of\r\n # elements?\r\n # Maybe Theano should do online toposort as in\r\n # http://code.google.com/p/acyclic\r\n #\r\n # The next longest optimizer is the canonizer phase.\r\n # Then I think it is the [io_?]toposort (need to validate) so check if\r\n # the solution is also applicable there.\r\n\r\n # We execute `validate` after this number of change.\r\n check_each_change = config.tensor.insert_inplace_optimizer_validate_nb\r\n if check_each_change == -1:\r\n if len(fgraph.apply_nodes) > 500:\r\n check_each_change = 10\r\n else:\r\n check_each_change = 1\r\n\r\n nb_change_no_validate = 0\r\n chk = fgraph.checkpoint()\r\n\r\n for node in list(graph.io_toposort(fgraph.inputs, fgraph.outputs)):\r\n op = node.op\r\n if not isinstance(op, OP):\r\n continue\r\n baseline = op.inplace_pattern\r\n protected_inputs = [\r\n f.protected for f in node.fgraph._features if\r\n isinstance(f, theano.compile.function_module.Supervisor)]\r\n protected_inputs = sum(protected_inputs, []) # flatten the list\r\n protected_inputs.extend(fgraph.outputs)\r\n candidate_outputs = [i for i in xrange(len(node.outputs))\r\n if i not in baseline]\r\n # node inputs that are Constant, already destroyed,\r\n # fgraph protected inputs and fgraph outputs can't be used as inplace\r\n # target.\r\n # Remove here as faster.\r\n candidate_inputs = [i for i in xrange(len(node.inputs))\r\n if i not in baseline.values() \\\r\n and not isinstance(node.inputs[i],\r\n Constant)\\\r\n and not fgraph.destroyers(node.inputs[i])\\\r\n and node.inputs[i] not in protected_inputs]\r\n\r\n verbose = False\r\n\r\n raised_warning = not verbose\r\n\r\n for candidate_output in candidate_outputs:\r\n for candidate_input in candidate_inputs:\r\n #remove inputs that don't have the same dtype as the output\r\n if node.inputs[candidate_input].type != node.outputs[\r\n candidate_output].type:\r\n continue\r\n\r\n inplace_pattern = dict(baseline)\r\n inplace_pattern[candidate_output] = candidate_input\r\n try:\r\n if hasattr(op.scalar_op, \"make_new_inplace\"):\r\n new_scal = op.scalar_op.make_new_inplace(\r\n scalar.transfer_type(\r\n *[inplace_pattern.get(i, None) \\\r\n for i in xrange(len(node.outputs))]))\r\n else:\r\n new_scal = op.scalar_op.__class__(\r\n scalar.transfer_type(\r\n *[inplace_pattern.get(i, None) \\\r\n for i in xrange(len(node.outputs))]))\r\n new_outputs = OP(new_scal, inplace_pattern)(\r\n *node.inputs, **dict(return_list=True))\r\n new_node = new_outputs[0].owner\r\n\r\n for r, new_r in zip(node.outputs, new_outputs):\r\n fgraph.replace(r, new_r,\r\n reason=\"inplace_elemwise_optimizer\")\r\n nb_change_no_validate += 1\r\n if nb_change_no_validate >= check_each_change:\r\n fgraph.validate()\r\n chk = fgraph.checkpoint()\r\n nb_change_no_validate = 0\r\n except (ValueError, TypeError, InconsistencyError), e:\r\n if check_each_change != 1 and not raised_warning:\r\n print >> sys.stderr, (\r\n \"Some inplace optimization was not \"\r\n \"performed due to unexpected error:\")\r\n print >> sys.stderr, e\r\n raised_warning = True\r\n fgraph.revert(chk)\r\n continue\r\n candidate_inputs.remove(candidate_input)\r\n node = new_node\r\n baseline = inplace_pattern\r\n break\r\n\r\n if nb_change_no_validate > 0:\r\n try:\r\n fgraph.validate()\r\n except Exception:\r\n if not raised_warning:\r\n print >> sys.stderr, (\"Some inplace optimization was not \"\r\n \"performed due to unexpected error\")\r\n fgraph.revert(chk)\r\n return inplace_elemwise_optimizer",
"def problem_reduction_single(self, i, val):\n y_update = - val * self.A.getcol(i).toarray().flatten()\n self.y += y_update\n self.A = sparse.hstack([self.A[:, :i], self.A[:, i + 1:]], format='csr')\n z_index = self.mask.searchsorted(i)\n self.mask = np.insert(self.mask, z_index, i)\n self.z = np.insert(self.z, z_index, val)",
"def test_gemm_opt_double_gemm():\r\n X, Y, Z, a, b = T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()\r\n R, S, c = T.matrix(), T.matrix(), T.scalar()\r\n\r\n just_gemm([X, Y, Z, a, b, R, S, c],\r\n [Z * c + a * T.dot(X, Y) + b * T.dot(R, S).T],\r\n ishapes=[(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()],\r\n expected_nb_gemm=2)\r\n\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()]\r\n i = [X, Y, Z, a, b, R, S, c]\r\n o = [(a * T.dot(X, Y)\r\n + gemm_inplace(Z, b, S.T, R.T, T.constant(1.0).astype(config.floatX)))]\r\n try:\r\n f = inplace_func([Param(ii, mutable=True) for ii in i], o,\r\n mode='FAST_RUN', on_unused_input='ignore')\r\n for node in f.maker.fgraph.apply_nodes:\r\n if isinstance(node.op, T.Dot):\r\n raise Failure('dot in graph')\r\n if node.op == _dot22:\r\n raise Failure('_dot22 in graph')\r\n g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None),\r\n on_unused_input='ignore')\r\n #for node in g.maker.fgraph.apply_nodes:\r\n # if node.op == gemm_inplace: raise Failure('gemm_inplace in graph')\r\n\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r0 = f(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r1 = g(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n max_abs_err = numpy.max(numpy.abs(r0[0] - r1[0]))\r\n eps = 1.0e-8\r\n if config.floatX == 'float32':\r\n eps = 1.0e-6\r\n if max_abs_err > eps:\r\n raise Failure(\r\n 'GEMM is computing the wrong output. max_rel_err =',\r\n max_abs_err)\r\n except Failure:\r\n for node in f.maker.fgraph.toposort():\r\n print 'GRAPH', node\r\n raise",
"def run(self, x):\n T = len(x)\n self.x = x\n self.i = np.zeros((T, self.hidden_size))\n self.f = np.zeros((T, self.hidden_size))\n self.o = np.zeros((T, self.hidden_size))\n self.g = np.zeros((T, self.hidden_size))\n self.h = np.zeros((T, self.hidden_size))\n self.c = np.zeros((T+1, self.hidden_size))\n self.s = np.zeros((T+1, self.hidden_size))\n for t in xrange(T):\n # input gate\n self.i[t] = self.gatefun.compute(np.dot(self.igate.u, x[t])\n + np.dot(self.igate.w, self.s[t-1])\n + np.dot(self.igate.v, self.c[t-1]) + self.igate.b)\n # forget gate\n self.f[t] = self.gatefun.compute(np.dot(self.fgate.u, x[t])\n + np.dot(self.fgate.w, self.s[t-1])\n + np.dot(self.fgate.v, self.c[t-1]) + self.fgate.b)\n # current hidden node state\n self.g[t] = self.acfun.compute(np.dot(self.nodes.u, x[t]) + \n np.dot(self.nodes.w, self.s[t-1]) + self.nodes.b)\n # internal memoery\n self.c[t] = self.f[t] * self.c[t-1] + self.i[t] * self.g[t]\n # output gate\n self.o[t] = self.gatefun.compute(np.dot(self.ogate.u, x[t])\n + np.dot(self.ogate.w, self.s[t-1])\n + np.dot(self.ogate.v, self.c[t]) + self.ogate.b)\n self.h[t] = self.acfun.compute(self.c[t])\n self.s[t] = np.clip(self.o[t] * self.h[t], -50, 50)\n return self.s[:-1]",
"def test_destroy_map4(self):\r\n Z = shared(self.rand(2, 2), name='Z')\r\n A = shared(self.rand(2, 2), name='A')\r\n one = T.constant(1.0).astype(Z.dtype)\r\n f = inplace_func([], gemm_inplace(Z, one, A, A, one))\r\n f()\r\n f = inplace_func([], gemm_inplace(Z, one, A, A.T, one))\r\n f()",
"def transform(self,G):\n\n n = len(self.G_train_)\n nt = len(G)\n #Ks = sp.zeros((n,1))\n kernel_matrix = sp.zeros((nt,n))\n \n# for j in range(n):\n# Ks[j] = sp.sqrt(aGMKernel(self.G_train_[j],self.G_train_[j],self.alpha,self.gamma))\n# \n# for i in range(nt):\n# Kts = sp.sqrt(aGMKernel(G[i],G[i],self.alpha,self.gamma))\n# for j in range(n):\n# kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha,self.gamma)/Kts/Ks[j]\n \n for i in range (nt):\n for j in range(n):\n kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha, self.gamma)\n \n \n return kernel_matrix",
"def calcT1(g2, g1):\n idop = FermiOp(g2.orbs, 3, 3)\n idop.data = np.eye(int(binom(g2.orbs, 3)))\n\n return p2N(g2, 3) - p2N(g1, 3) + idop",
"def forward(self,i,direction):\n \"\"\"the direction argument is used to dertermine the direcrtion of the forward function, designed for the equilibrium of the two classes of the datasets\"\"\"\n if(direction):\n self.mask_A = self.netG_Amask[self.orders[i]](self.real_A)\n self.A = self.netG_A[self.orders[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Bmask[self.orders[i]](self.fake_B)\n self.B = self.netG_B[self.orders[i]](self.fake_B)\n self.rec_A = self.B.mul(self.mask_B)+(1-self.mask_B).mul(self.fake_B) # G_B(G_A(A))\n else:\n self.mask_A = self.netG_Bmask[self.orders_rev[i]](self.real_A)\n self.A = self.netG_B[self.orders_rev[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Amask[self.orders_rev[i]](self.fake_B)\n self.B = self.netG_A[self.orders_rev[i]](self.fake_B)\n self.rec_A = self.B.mul(\n self.mask_B)+(self.mask_B).mul(1-self.fake_B) # G_B(G_A(A))",
"def calculate_transformation(self, p: np.ndarray, o: np.ndarray):\n self.set_inputs(p)\n self.set_outputs(o)\n self.reset_transformation_to_rest()\n self.reset_output_transformation_to_rest()\n # activation resets the hidden layer to rest (unless primed)\n self.activation(clamps = ['input', 'output'])\n return np.copy(self.t)[0]",
"def forward(self,state,action):\n action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE\n indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)\n indices = indices.tolist()\n action_[indices] = 1.\n x = torch.cat( (state,action_) ,dim=1)\n return self.forwardM(x)",
"def trans_o(self):\n temp_array = []\n for j in range(self.O.shape[1]):\n for i in range(self.V.shape[1]):\n if self.V[0, i] == self.O[0, j]:\n temp_array.append(i)\n self.O = mat(temp_array)",
"def __opExpand1(self,that,op, out=None):\n A = self\n B = that if isinstance(that,Factor) else Factor([],that)\n vall = A.v | B.v\n axA = list(map(lambda x:A.v.index(x) if x in A.v else -1 ,vall))\n axB = list(map(lambda x:B.v.index(x) if x in B.v else -1 ,vall))\n if ( (not (out is None)) and (out.v == vall) ):\n f = out\n else:\n f = Factor(vall) # TODO: should also change \"out\" if specified!\n it = np.nditer([A.t, B.t, f.t], \n op_axes = [ axA, axB, None ], \n op_flags=[['readonly'], ['readonly'], ['writeonly']])\n for (i,j,k) in it:\n op(i,j,out=k)\n return f",
"def backward_G(self,i,direction):\n #lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n #lambda_B = self.opt.lambda_B\n lambda_reg = 0.01\n lambda_idt=1\n # Identity loss\n if(direction):\n #the idt loss \n self.loss_idt=0\n # if lambda_idt > 0:\n # # G_A should be identity if real_B is fed: ||G_A(B) - B|| 使用fakeB代替\n # self.idt_A = self.netG_A[self.orders[i]](self.fake_B)\n # self.loss_idt_A = self.criterionIdt(\n # self.idt_A, self.fake_B) * lambda_B * lambda_idt\n # # G_B should be identity if real_A is fed: ||G_B(A) - A||\n # self.idt_B = self.netG_B[self.orders[i]](self.real_A)\n # self.loss_idt_B = self.criterionIdt(\n # self.idt_B, self.real_A) * lambda_A * lambda_idt\n # else:\n # self.loss_idt_A = 0\n # self.loss_idt_B = 0\n\n self.loss_G_adv=self.criterionGAN_D(self.netDadv(self.fake_B),True)\n # GAN loss D_A(G_A(A))\n self.pred_fake = self.netD(self.fake_B)\n self.loss_G_A = self.criterionGAN_D(self.pred_fake,self.labels[i+1])\n # GAN loss D_B(G_B(B))\n \n self.loss_G_B = self.criterionGAN_D(self.netD(self.rec_A), self.labels[i])\n \n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n #self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n self.criterionReg=torch.nn.MSELoss()\n #\n self.loss_reg = (self.criterionReg(self.mask_A, torch.ones_like(self.mask_A))+self.criterionReg(self.mask_B, torch.ones_like(self.mask_B)))*0.5*lambda_reg\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_adv+self.loss_G_A + self.loss_cycle_A + self.loss_G_B\n self.loss_G.backward()\n else:\n if lambda_idt > 0:\n self.idt_B = self.netG_A[self.orders_rev[i]](self.real_A)\n self.loss_idt = self.criterionIdt(\n self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt = 0\n\n self.loss_G_adv = self.criterionGAN_D(self.netDadv(self.fake_B), True)\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN_D(\n self.netD(self.fake_B), self.labels_rev[i])\n # GAN loss D_B(G_B(B))\n\n self.loss_G_B = self.criterionGAN_D(\n self.netD(self.rec_A), self.labels[0])\n\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(\n self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n #self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n self.criterionReg = torch.nn.MSELoss()\n self.loss_reg = -(self.criterionReg(self.mask_A, torch.ones_like(self.mask_A)) +\n self.criterionReg(self.mask_B, torch.ones_like(self.mask_B)))*0.5*lambda_reg\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_adv+self.loss_G_A + self.loss_cycle_A +self.loss_G_B\n self.loss_G.backward()",
"def modelB(G,x=0,i0=0.1,alpha=-0.01,tf=5,Nt=1000):\r\n #set up graph atteributes\r\n N = G.number_of_nodes()\r\n degree_arr=np.asarray(G.degree(),dtype=int)[:,1]\r\n iarray = np.zeros((Nt+1,2*N))\r\n tarray = np.linspace(0,tf,Nt+1)\r\n #calucalte operaters and set intial conditions\r\n A=nx.adjacency_matrix(G)\r\n L=scipy.sparse.diags(degree_arr)-A\r\n L_alpha=L*alpha\r\n ones=np.ones(2*N)\r\n\r\n y0=np.zeros(2*N)\r\n y0[N+x]=i0\r\n #Add code here\r\n dy=np.zeros(N*2)\r\n def RHS2(y,t):\r\n \"\"\"Compute RHS of modelB at time t\r\n input: y should be a size N array\r\n output: dy, also a size N array corresponding to dy/dt\r\n\r\n Discussion: add discussion here\r\n \"\"\"\r\n dy[:N] =y[N:2*N]\r\n dy[N:2*N]=scipy.sparse.csr_matrix.__mul__(L_alpha,y[0:N])\r\n return dy\r\n\r\n iarray[:,:]=scipy.integrate.odeint(RHS2,y0,tarray)\r\n\r\n return iarray[:,N:],iarray[:,:N]",
"def test_1in_1out(self):\r\n gval = theano.tensor.matrix()\r\n\r\n class O(gof.op.Op):\r\n def make_node(self):\r\n inputs = [theano.tensor.matrix()]\r\n outputs = [theano.tensor.matrix()]\r\n return gof.Apply(self, inputs, outputs)\r\n\r\n def grad(self, inp, grads):\r\n return gval,\r\n a1 = O().make_node()\r\n g = grad_sources_inputs([(a1.outputs[0], one)], None)\r\n self.assertTrue(g[a1.inputs[0]] is gval)",
"def Controlled2(U):\n '''Generalized controlled unitary tensor construction\n Parameters:\n -----------\n U: input tensor which is assumed to be a square Matrix\n\n Returns:\n --------\n Controlled unitary\n\n '''\n shp = U.shape\n new_ten = scipy.linalg.block_diag(np.eye(*shp), U)\n return new_ten.reshape(2, shp[0], 2, shp[1], 2, shp[2])",
"def forward_step(self, layer: int, hidden: AmbiguousHidden, input_: Tensor) -> AmbiguousHidden:\n hx, cx = hidden\n\n # Forget gate\n f_g = torch.sigmoid(self.gates[layer]['if'](input_) + self.gates[layer]['hf'](hx))\n\n # Input gate\n i_g = torch.sigmoid(self.gates[layer]['ii'](input_) + self.gates[layer]['hi'](hx))\n\n # Output gate\n o_g = torch.sigmoid(self.gates[layer]['io'](input_) + self.gates[layer]['ho'](hx))\n\n # Intermediate cell state\n c_tilde_g = torch.tanh(self.gates[layer]['ig'](input_) + self.gates[layer]['hg'](hx))\n\n # New cell state\n cx = f_g * cx + i_g * c_tilde_g\n\n # New hidden state\n hx = o_g * torch.tanh(cx)\n\n return hx, cx",
"def test_gemm_unrolled():\r\n batch_size = 100\r\n rep_size = 40\r\n rng = numpy.random.RandomState([1, 2, 3])\r\n\r\n for num_rounds in range(1, 10):\r\n W = sharedX(rng.randn(rep_size, rep_size), name='W')\r\n V = sharedX(numpy.zeros((batch_size, rep_size)), name='V')\r\n H = sharedX(numpy.zeros((batch_size, rep_size)), name='H')\r\n G = sharedX(numpy.zeros((batch_size, rep_size)), name='G')\r\n\r\n init_V = sharedX(rng.uniform(0, 1, (batch_size, rep_size)), name='init_V')\r\n init_H = sharedX(rng.uniform(0, 1, (batch_size, rep_size)), name='init_H')\r\n cur_V = V\r\n cur_H = H\r\n\r\n def update_V(cur_H):\r\n return T.nnet.sigmoid(T.dot(cur_H, W.T))\r\n\r\n def update_H(cur_V):\r\n return T.nnet.sigmoid(T.dot(cur_V, W) + T.dot(G, W.T))\r\n\r\n for i in xrange(num_rounds):\r\n cur_V = update_V(cur_H)\r\n cur_H = update_H(cur_V)\r\n\r\n unrolled_theano = theano.function([], updates=[(V, cur_V), (H, cur_H)],\r\n name='unrolled_theano')\r\n nb_dot = sum([1 for node in unrolled_theano.maker.fgraph.toposort()\r\n if isinstance(node.op, (theano.tensor.Dot,\r\n theano.tensor.blas.Dot22,\r\n theano.tensor.blas.Gemm))])\r\n # Each num_rounds add 3 dot, but one of them is always the same.\r\n # So the final graph should have 1 + 2* num_rounds dot varient op.\r\n assert nb_dot == num_rounds * 2 + 1, nb_dot\r\n\r\n unrolled_theano()",
"def mv_step(self):\n # def mv_all(self):\n self.device_reg_data &= ~(0x1 << 3)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)"
] | [
"0.70139986",
"0.5631263",
"0.5538027",
"0.5311544",
"0.5232887",
"0.5197726",
"0.5164995",
"0.5135691",
"0.50920993",
"0.50480705",
"0.5039565",
"0.50272524",
"0.49950483",
"0.49901596",
"0.4963029",
"0.49488658",
"0.4937678",
"0.49336824",
"0.49291444",
"0.49188215",
"0.4876458",
"0.4861921",
"0.4845824",
"0.48403755",
"0.48325482",
"0.48276332",
"0.48009506",
"0.47994095",
"0.4798728",
"0.4796692"
] | 0.79795337 | 0 |
Inplace applies a two mode gate G into the process matrix T in modes i and j | def _apply_two_mode_gate(G, T, i, j):
(T[i], T[j]) = (G[0, 0] * T[i] + G[0, 1] * T[j], G[1, 0] * T[i] + G[1, 1] * T[j])
return T | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _apply_one_mode_gate(G, T, i):\n\n T[i] *= G\n return T",
"def _assembler_baseV00(M2bass, Gi_, G_j, mode):\n Gi_ = Gi_.T\n G_j = G_j.T\n\n hmgeoiti_ = int(np.max(Gi_) + 1)\n hmgeoit_j = int(np.max(G_j) + 1)\n\n szGi_ = np.shape(Gi_)\n szG_j = np.shape(G_j)\n rowGi_ = szGi_[0]\n rowG_j = szG_j[0]\n num_elements = szG_j[1]\n\n # assembled = lil_matrix((hmgeoiti_, hmgeoit_j))\n assembled = np.zeros(shape=(hmgeoiti_, hmgeoit_j), order='F')\n\n if mode == 'add':\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = assembled[i, j] + E[a, b]\n\n elif mode == 'replace':\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = E[a, b]\n\n elif mode == 'average':\n asstimes = np.zeros((hmgeoiti_, 1))\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n asstimes[i] = asstimes[i] + 1\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = assembled[i, j] + E[a, b]\n\n for i in range(hmgeoiti_):\n if asstimes[i] > 1:\n assembled[i, :] = assembled[i, :] / asstimes[i]\n\n else:\n raise Exception('Mode wrong: add, replace or average......')\n\n return assembled",
"def compile(self, seq, registers):\n\n # Check which modes are actually being used\n used_modes = []\n for operations in seq:\n modes = [modes_label.ind for modes_label in operations.reg]\n used_modes.append(modes)\n\n used_modes = list(set(item for sublist in used_modes for item in sublist))\n\n # dictionary mapping the used modes to consecutive non-negative integers\n dict_indices = {used_modes[i]: i for i in range(len(used_modes))}\n nmodes = len(used_modes)\n\n # We start with an identity then sequentially update with the gate transformations\n T = np.identity(nmodes, dtype=np.complex128)\n\n # Now we will go through each operation in the sequence `seq` and apply it to T\n for operations in seq:\n name = operations.op.__class__.__name__\n params = par_evaluate(operations.op.p)\n modes = [modes_label.ind for modes_label in operations.reg]\n if name == \"Rgate\":\n G = np.exp(1j * params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"LossChannel\":\n G = np.sqrt(params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"Interferometer\":\n U = params[0]\n if U.shape == (1, 1):\n T = _apply_one_mode_gate(U[0, 0], T, dict_indices[modes[0]])\n elif U.shape == (2, 2):\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n U_expand = np.eye(nmodes, dtype=np.complex128)\n U_expand[np.ix_(modes, modes)] = U\n T = U_expand @ T\n elif name == \"PassiveChannel\":\n T0 = params[0]\n if T0.shape == (1, 1):\n T = _apply_one_mode_gate(T0[0, 0], T, dict_indices[modes[0]])\n elif T0.shape == (2, 2):\n T = _apply_two_mode_gate(T0, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n T0_expand = np.eye(nmodes, dtype=np.complex128)\n T0_expand[np.ix_(modes, modes)] = T0\n T = T0_expand @ T\n elif name == \"BSgate\":\n G = _beam_splitter_passive(params[0], params[1])\n T = _apply_two_mode_gate(G, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"MZgate\":\n v = np.exp(1j * params[0])\n u = np.exp(1j * params[1])\n U = 0.5 * np.array([[u * (v - 1), 1j * (1 + v)], [1j * u * (1 + v), 1 - v]])\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"sMZgate\":\n exp_sigma = np.exp(1j * (params[0] + params[1]) / 2)\n delta = (params[0] - params[1]) / 2\n U = exp_sigma * np.array(\n [[np.sin(delta), np.cos(delta)], [np.cos(delta), -np.sin(delta)]]\n )\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n\n ord_reg = [r for r in list(registers) if r.ind in used_modes]\n ord_reg = sorted(list(ord_reg), key=lambda x: x.ind)\n\n return [Command(ops.PassiveChannel(T), ord_reg)]",
"def FormG():\n for i in range(2):\n for j in range(2):\n G[i, j] = 0.0\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])",
"def T(self, *, inplace: bool = False) -> SelfAdjointUnitaryGate:\n if self.power == 1 and self.is_conjugated(\n ) and not self.is_transposed():\n return PowerMatrixGate.conj(self, inplace=inplace)\n else:\n return PowerMatrixGate.T(self, inplace=inplace)",
"def test_gemm_opt_double_gemm():\r\n X, Y, Z, a, b = T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()\r\n R, S, c = T.matrix(), T.matrix(), T.scalar()\r\n\r\n just_gemm([X, Y, Z, a, b, R, S, c],\r\n [Z * c + a * T.dot(X, Y) + b * T.dot(R, S).T],\r\n ishapes=[(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()],\r\n expected_nb_gemm=2)\r\n\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()]\r\n i = [X, Y, Z, a, b, R, S, c]\r\n o = [(a * T.dot(X, Y)\r\n + gemm_inplace(Z, b, S.T, R.T, T.constant(1.0).astype(config.floatX)))]\r\n try:\r\n f = inplace_func([Param(ii, mutable=True) for ii in i], o,\r\n mode='FAST_RUN', on_unused_input='ignore')\r\n for node in f.maker.fgraph.apply_nodes:\r\n if isinstance(node.op, T.Dot):\r\n raise Failure('dot in graph')\r\n if node.op == _dot22:\r\n raise Failure('_dot22 in graph')\r\n g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None),\r\n on_unused_input='ignore')\r\n #for node in g.maker.fgraph.apply_nodes:\r\n # if node.op == gemm_inplace: raise Failure('gemm_inplace in graph')\r\n\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r0 = f(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r1 = g(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n max_abs_err = numpy.max(numpy.abs(r0[0] - r1[0]))\r\n eps = 1.0e-8\r\n if config.floatX == 'float32':\r\n eps = 1.0e-6\r\n if max_abs_err > eps:\r\n raise Failure(\r\n 'GEMM is computing the wrong output. max_rel_err =',\r\n max_abs_err)\r\n except Failure:\r\n for node in f.maker.fgraph.toposort():\r\n print 'GRAPH', node\r\n raise",
"def inplace_elemwise_optimizer_op(OP):\r\n @gof.inplace_optimizer\r\n def inplace_elemwise_optimizer(fgraph):\r\n \"\"\"\r\n Usage: inplace_elemwise_optimizer.optimize(fgraph)\r\n\r\n Attempts to replace all Broadcast ops by versions of them\r\n that operate inplace. It operates greedily: for each Broadcast\r\n Op that is encountered, for each output, tries each input to\r\n see if it can operate inplace on that input. If so, makes the\r\n change and go to the next output or Broadcast Op.\r\n\r\n Examples:\r\n x + y + z -> x += y += z\r\n (x + y) * (x * y) -> (x += y) *= (x * y) or (x + y) *= (x *= y)\r\n \"\"\"\r\n # We should not validate too often as this takes too much time to\r\n # execute!\r\n # It is the _dfs_toposort() fct in theano/gof/destroyhandler.py\r\n # that takes so much time.\r\n # Should we try to use another lib that does toposort?\r\n # igraph: http://igraph.sourceforge.net/\r\n # networkx: https://networkx.lanl.gov/\r\n # Should we try to use cython?\r\n # Compiling only that fct is not enough, should we try to add the\r\n # deque class too?\r\n # And init the deque and other list to an upper bound number of\r\n # elements?\r\n # Maybe Theano should do online toposort as in\r\n # http://code.google.com/p/acyclic\r\n #\r\n # The next longest optimizer is the canonizer phase.\r\n # Then I think it is the [io_?]toposort (need to validate) so check if\r\n # the solution is also applicable there.\r\n\r\n # We execute `validate` after this number of change.\r\n check_each_change = config.tensor.insert_inplace_optimizer_validate_nb\r\n if check_each_change == -1:\r\n if len(fgraph.apply_nodes) > 500:\r\n check_each_change = 10\r\n else:\r\n check_each_change = 1\r\n\r\n nb_change_no_validate = 0\r\n chk = fgraph.checkpoint()\r\n\r\n for node in list(graph.io_toposort(fgraph.inputs, fgraph.outputs)):\r\n op = node.op\r\n if not isinstance(op, OP):\r\n continue\r\n baseline = op.inplace_pattern\r\n protected_inputs = [\r\n f.protected for f in node.fgraph._features if\r\n isinstance(f, theano.compile.function_module.Supervisor)]\r\n protected_inputs = sum(protected_inputs, []) # flatten the list\r\n protected_inputs.extend(fgraph.outputs)\r\n candidate_outputs = [i for i in xrange(len(node.outputs))\r\n if i not in baseline]\r\n # node inputs that are Constant, already destroyed,\r\n # fgraph protected inputs and fgraph outputs can't be used as inplace\r\n # target.\r\n # Remove here as faster.\r\n candidate_inputs = [i for i in xrange(len(node.inputs))\r\n if i not in baseline.values() \\\r\n and not isinstance(node.inputs[i],\r\n Constant)\\\r\n and not fgraph.destroyers(node.inputs[i])\\\r\n and node.inputs[i] not in protected_inputs]\r\n\r\n verbose = False\r\n\r\n raised_warning = not verbose\r\n\r\n for candidate_output in candidate_outputs:\r\n for candidate_input in candidate_inputs:\r\n #remove inputs that don't have the same dtype as the output\r\n if node.inputs[candidate_input].type != node.outputs[\r\n candidate_output].type:\r\n continue\r\n\r\n inplace_pattern = dict(baseline)\r\n inplace_pattern[candidate_output] = candidate_input\r\n try:\r\n if hasattr(op.scalar_op, \"make_new_inplace\"):\r\n new_scal = op.scalar_op.make_new_inplace(\r\n scalar.transfer_type(\r\n *[inplace_pattern.get(i, None) \\\r\n for i in xrange(len(node.outputs))]))\r\n else:\r\n new_scal = op.scalar_op.__class__(\r\n scalar.transfer_type(\r\n *[inplace_pattern.get(i, None) \\\r\n for i in xrange(len(node.outputs))]))\r\n new_outputs = OP(new_scal, inplace_pattern)(\r\n *node.inputs, **dict(return_list=True))\r\n new_node = new_outputs[0].owner\r\n\r\n for r, new_r in zip(node.outputs, new_outputs):\r\n fgraph.replace(r, new_r,\r\n reason=\"inplace_elemwise_optimizer\")\r\n nb_change_no_validate += 1\r\n if nb_change_no_validate >= check_each_change:\r\n fgraph.validate()\r\n chk = fgraph.checkpoint()\r\n nb_change_no_validate = 0\r\n except (ValueError, TypeError, InconsistencyError), e:\r\n if check_each_change != 1 and not raised_warning:\r\n print >> sys.stderr, (\r\n \"Some inplace optimization was not \"\r\n \"performed due to unexpected error:\")\r\n print >> sys.stderr, e\r\n raised_warning = True\r\n fgraph.revert(chk)\r\n continue\r\n candidate_inputs.remove(candidate_input)\r\n node = new_node\r\n baseline = inplace_pattern\r\n break\r\n\r\n if nb_change_no_validate > 0:\r\n try:\r\n fgraph.validate()\r\n except Exception:\r\n if not raised_warning:\r\n print >> sys.stderr, (\"Some inplace optimization was not \"\r\n \"performed due to unexpected error\")\r\n fgraph.revert(chk)\r\n return inplace_elemwise_optimizer",
"def traverse(op):\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_injective(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:\n traverse(tensor.op)\n\n if 'conv2d_transpose_nchw' in op.tag:\n C = op.output(0)\n\n N, OC, OH, OW = C.op.axis\n rc, ry, rx = C.op.reduce_axis\n\n OH, oh = s[C].split(OH, factor=2)\n OC, oc = s[C].split(OC, factor=32)\n IC, ic = s[C].split(rc, factor=32)\n\n s[C].reorder(N, OC, OH, OW, oc, IC, ry, rx, ic)\n N = s[C].fuse(N, OC)\n s[C].vectorize(oc)\n s[C].parallel(N)\n\n scheduled_ops.append(op)",
"def move_multi_wire_gates(self, operator_grid):\n n = operator_grid.num_layers\n i = -1\n while i < n - 1:\n i += 1\n\n this_layer = operator_grid.layer(i)\n layer_ops = _remove_duplicates(this_layer)\n other_layer = [None] * operator_grid.num_wires\n\n for j in range(len(layer_ops)):\n op = layer_ops[j]\n\n if op is None:\n continue\n\n # translate wires to their indices on the device\n wire_indices = self.active_wires.indices(op.wires)\n\n if len(op.wires) > 1:\n\n sorted_wires = wire_indices.copy()\n sorted_wires.sort()\n\n blocked_wires = list(range(sorted_wires[0], sorted_wires[-1] + 1))\n\n for k in range(j + 1, len(layer_ops)):\n other_op = layer_ops[k]\n\n if other_op is None:\n continue\n\n # translate wires to their indices on the device\n other_wire_indices = self.active_wires.indices(other_op.wires)\n other_sorted_wire_indices = other_wire_indices.copy()\n other_sorted_wire_indices.sort()\n other_blocked_wires = list(\n range(other_sorted_wire_indices[0], other_sorted_wire_indices[-1] + 1)\n )\n\n if not set(other_blocked_wires).isdisjoint(set(blocked_wires)):\n op_indices = [\n idx for idx, layer_op in enumerate(this_layer) if layer_op == op\n ]\n\n for l in op_indices:\n other_layer[l] = op\n this_layer[l] = None\n\n break\n\n if not all([item is None for item in other_layer]):\n operator_grid.insert_layer(i + 1, other_layer)\n n += 1",
"def map(h_loc, a, state_shape, j, domain, g, ncap):\n params = _get_parameters(\n n=len(state_shape), j=j, domain=domain, g=g, ncap=ncap)\n dims_chain = [i[0] for i in state_shape]\n bs = [_get_annihilation_op(dim) for dim in dims_chain[1::]]\n b_daggers = [b.T for b in bs]\n return _get_singlesite_ops(h_loc, params, bs, b_daggers), \\\n _get_twosite_ops(a, params, bs, b_daggers)",
"def fn(i, j):\n grid2[i][j] = 0 # mark as visited \n ans = grid1[i][j]\n for ii, jj in (i-1, j), (i, j-1), (i, j+1), (i+1, j): \n if 0 <= ii < m and 0 <= jj < n and grid2[ii][jj]: \n ans &= fn(ii, jj)\n return ans",
"def _schedule_winograd(cfg, s, op):\n # get ops and tensors\n output = op.output(0)\n\n Y = op.input_tensors[0]\n M, A = s[Y].op.input_tensors\n U, V = s[M].op.input_tensors\n d, B = s[V].op.input_tensors\n data_pad = s[d].op.input_tensors[0]\n\n # padding\n s[data_pad].compute_inline()\n\n # transform kernel\n if isinstance(U.op, tvm.te.ComputeOp):\n kernel, G = s[U].op.input_tensors\n s[G].compute_inline()\n (eps, nu, co, ci, vco) = s[U].op.axis\n if not autotvm.GLOBAL_SCOPE.in_tuning:\n r_kh, r_kw = s[U].op.reduce_axis\n s[U].reorder(co, ci, eps, nu, r_kh, r_kw, vco)\n _ = [s[U].unroll(x) for x in [eps, nu, r_kh, r_kw]]\n s[U].vectorize(vco)\n tile_and_bind(s, U, co, ci, 1, 256)\n\n # dilation\n if isinstance(kernel.op, tvm.te.ComputeOp) and \"dilate\" in kernel.op.tag:\n s[kernel].compute_inline()\n\n # transform image\n s[B].compute_inline()\n VL = s.cache_write(V, \"local\")\n\n eps, nu, p, ci, vp = s[V].op.axis\n s[V].reorder(p, ci, eps, nu, vp)\n for axis in [eps, nu]:\n s[V].unroll(axis)\n s[V].vectorize(vp)\n fused = s[V].fuse(p, ci)\n\n bb, tt = cfg[\"tile_t1\"].apply(s, V, fused)\n s[V].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[V].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n eps, nu, p, ci, vp = s[VL].op.axis\n r_a, r_b = s[VL].op.reduce_axis\n for axis in [eps, nu, r_a, r_b]:\n s[VL].unroll(axis)\n s[VL].vectorize(vp)\n s[d].compute_at(s[V], tt)\n s[VL].compute_at(s[V], tt)\n\n # batch gemm\n bna = cfg[\"tile_bna\"].val\n bnb = cfg[\"tile_bnb\"].val\n\n eps, nu, k, b = s[M].op.axis\n alpha = eps.dom.extent\n c = s[M].op.reduce_axis[0]\n yo, xo, yi, xi = s[M].tile(k, b, bna, bnb)\n c, c_unroll = cfg[\"c_unroll\"].apply(s, M, c)\n s[M].reorder(yo, xo, c, c_unroll, yi, xi)\n s[M].unroll(c_unroll)\n s[M].unroll(yi)\n s[M].vectorize(xi)\n z = s[M].fuse(eps, nu)\n tile_and_bind3d(s, M, z, yo, xo, 1, cfg[\"yt\"].val, 1)\n\n # inverse transform\n s[A].compute_inline()\n k, b, vh, vw = s[Y].op.axis\n r_a, r_b = s[Y].op.reduce_axis\n for axis in [vh, vw, r_a, r_b]:\n s[Y].unroll(axis)\n\n # schedule output and fusion\n if output.op not in s.outputs:\n s[output].compute_inline()\n output = s.outputs[0]\n\n n, co, h, w = s[output].op.axis\n m = alpha - 3 + 1\n h, w, hi, wi = s[output].tile(h, w, m, m)\n s[output].unroll(hi)\n s[output].unroll(wi)\n fused = s[output].fuse(n, co, h, w)\n bb, tt = cfg[\"tile_t2\"].apply(s, output, fused)\n s[output].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[output].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n s[Y].compute_at(s[output], tt)",
"def p2(self, i):\n j = 0 if i == 1 else 1\n self.edges[i].m_v = exp(dot(self.edges[j].m_f, self.tp.F))",
"def trans_o(self):\n temp_array = []\n for j in range(self.O.shape[1]):\n for i in range(self.V.shape[1]):\n if self.V[0, i] == self.O[0, j]:\n temp_array.append(i)\n self.O = mat(temp_array)",
"def Matrix_G(r,xi,E,ops,j):\r\n #Array of multipliers for operators in V14\r\n #----------------------------------------\r\n raw_pot = av14.V14(r)\r\n #----------------------------------------\r\n \r\n #Operator Values \r\n #---------------------------------------- \r\n op00,op01,op10,op11 = ops\r\n \r\n #Matrix Values\r\n #----------------------------------------\r\n G00 = (j-1)*j/r**2 + xi*(np.sum(op00*raw_pot) - E)\r\n G01 = xi*(np.sum(op01*raw_pot))\r\n G10 = G01#xi*(np.sum(operators10*raw_pot))\r\n G11 = (j+1)*(j+2)/r**2 + xi*(np.sum(op11*raw_pot) - E)\r\n #Generate and return (2x2)\r\n #----------------------------------------\r\n return np.array([[G00,G01],[G10,G11]])",
"def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out",
"def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]",
"def inverse_gc(g):\n i = g\n j = 1\n while j<N:\n i = i ^ (g >> j)\n j = j + 1\n return i",
"def TimeEvolution(w: np.ndarray, t: float):\n # pylint: disable=expression-not-assigned\n n_modes = len(w)\n\n @operation(n_modes)\n def op(q):\n\n theta = -w * 100.0 * c * 1.0e-15 * t * (2.0 * pi)\n\n for i in range(n_modes):\n sf.ops.Rgate(theta[i]) | q[i]\n\n return op()",
"def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here",
"def gru_cell_decoder(self, Xt, h_t_minus_1,context_vector):\n # 1.update gate: decides how much past information is kept and how much new information is added.\n z_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_z_decoder) + tf.matmul(h_t_minus_1,self.U_z_decoder) +tf.matmul(context_vector,self.C_z_decoder)+self.b_z_decoder) # z_t:[batch_size,self.hidden_size]\n # 2.reset gate: controls how much the past state contributes to the candidate state.\n r_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_r_decoder) + tf.matmul(h_t_minus_1,self.U_r_decoder) +tf.matmul(context_vector,self.C_r_decoder)+self.b_r_decoder) # r_t:[batch_size,self.hidden_size]\n # candiate state h_t~\n h_t_candiate = tf.nn.tanh(tf.matmul(Xt, self.W_h_decoder) +r_t * (tf.matmul(h_t_minus_1, self.U_h_decoder)) +tf.matmul(context_vector, self.C_h_decoder)+ self.b_h_decoder) # h_t_candiate:[batch_size,self.hidden_size]\n # new state: a linear combine of pervious hidden state and the current new state h_t~\n h_t = (1 - z_t) * h_t_minus_1 + z_t * h_t_candiate # h_t:[batch_size*num_sentences,hidden_size]\n return h_t,h_t",
"def gru_cell(self, Xt, h_t_minus_1):\n # 1.update gate: decides how much past information is kept and how much new information is added.\n z_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_z) + tf.matmul(h_t_minus_1,self.U_z) + self.b_z) # z_t:[batch_size,self.hidden_size]\n # 2.reset gate: controls how much the past state contributes to the candidate state.\n r_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_r) + tf.matmul(h_t_minus_1,self.U_r) + self.b_r) # r_t:[batch_size,self.hidden_size]\n # candiate state h_t~\n h_t_candiate = tf.nn.tanh(tf.matmul(Xt, self.W_h) +r_t * (tf.matmul(h_t_minus_1, self.U_h)) + self.b_h) # h_t_candiate:[batch_size,self.hidden_size]\n # new state: a linear combine of pervious hidden state and the current new state h_t~\n h_t = (1 - z_t) * h_t_minus_1 + z_t * h_t_candiate # h_t:[batch_size*num_sentences,hidden_size]\n return h_t",
"def perform_gauss_jordan_elimination(m, show):\n if show:\n print(\"Initial State\")\n print_matrix(m)\n\n r, c = 0, 0\n rows = len(m)\n cols = len(m[0])\n\n if show:\n print(\"rows: %s cols: %s\"%(rows, cols))\n\n while True:\n _swap = False\n\n if show:\n print(\"r %s c %s\"%(r, c))\n\n ## Check Pivot\n if m[r][c] == 0:\n ## Swap\n for i in range(rows):\n if r != i and i > r: ## Avoid comparing the same row and do not swap to upper rows\n if m[i][c] == 1 and not _swap: ## Check if a swap is not performed before in the same column\n if show:\n print(\"Swapping %s %s and %s %s\"%(r, m[r], i, m[i]))\n #m = swap(m,r,i)\n temp = m[r]\n m[r] = m[i]\n m[i] = temp\n _swap = True\n if show:\n print_matrix(m)\n if not _swap: ## If not swap, means there is no 1 to swap, so go to the next column\n c+=1\n\n if m[r][c] == 1:\n ## XOR\n for i in range(rows):\n if r != i: ## Avoid comparing the same row\n if m[i][c] == 1:\n if show:\n print(\"XOR Row %s: %s into Row %s: %s\"%(r, m[r], i, m[i]))\n for e in range(len(m[0])):\n m[i][e] ^= m[r][e]\n if show:\n print_matrix(m)\n\n ## Increase row and column\n r+=1\n c+=1\n\n ## break condition if all rows or all columns (except the augmented column are treated)\n if r == rows or c >= cols-1:\n break\n \n return m",
"def switch(self, i, j0, j1):\n if not self._mutable:\n raise ValueError(\"this constellation is immutable.\"\n \" Take a mutable copy first.\")\n S = SymmetricGroup(list(range(self.degree())))\n tr = S((j0, j1))\n i = int(i)\n if i < 0 or i >= len(self._g):\n raise ValueError(\"index out of range\")\n\n ii = i + 1\n if ii == len(self._g):\n ii = 0\n self._g[i] = self._g[i] * tr\n self._g[ii] = tr * self._g[ii]",
"def _poputil_block_recompute_backward(op, grads):\n return grads",
"def MathonPseudocyclicMergingGraph(M, t):\n from sage.graphs.graph import Graph\n from sage.matrix.constructor import identity_matrix\n assert len(M) == 4\n assert M[0] == identity_matrix(M[0].nrows())\n A = sum(x.tensor_product(x) for x in M[1:])\n if t > 0:\n A += sum(x.tensor_product(M[0]) for x in M[1:])\n if t > 1:\n A += sum(M[0].tensor_product(x) for x in M[1:])\n return Graph(A)",
"def separate_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.result[row] = self.matrix[row][-1]\r\n self.matrix[row].pop()",
"def ancmig_adj_2(params, ns):\n #7 parameters \n nu1, nuA, nu2, nu3, m1_1, T1, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1, nuA]\n mig1 = numpy.array([[0, m1_1],[m1_1, 0]])\n fs.integrate(nu_T1, T1, m=mig1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1, nu2, nu3]\n fs.integrate(nu_T2, T2)\n return fs",
"def test_destroy_map4(self):\r\n Z = shared(self.rand(2, 2), name='Z')\r\n A = shared(self.rand(2, 2), name='A')\r\n one = T.constant(1.0).astype(Z.dtype)\r\n f = inplace_func([], gemm_inplace(Z, one, A, A, one))\r\n f()\r\n f = inplace_func([], gemm_inplace(Z, one, A, A.T, one))\r\n f()",
"def forward(self,i,direction):\n \"\"\"the direction argument is used to dertermine the direcrtion of the forward function, designed for the equilibrium of the two classes of the datasets\"\"\"\n if(direction):\n self.mask_A = self.netG_Amask[self.orders[i]](self.real_A)\n self.A = self.netG_A[self.orders[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Bmask[self.orders[i]](self.fake_B)\n self.B = self.netG_B[self.orders[i]](self.fake_B)\n self.rec_A = self.B.mul(self.mask_B)+(1-self.mask_B).mul(self.fake_B) # G_B(G_A(A))\n else:\n self.mask_A = self.netG_Bmask[self.orders_rev[i]](self.real_A)\n self.A = self.netG_B[self.orders_rev[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Amask[self.orders_rev[i]](self.fake_B)\n self.B = self.netG_A[self.orders_rev[i]](self.fake_B)\n self.rec_A = self.B.mul(\n self.mask_B)+(self.mask_B).mul(1-self.fake_B) # G_B(G_A(A))"
] | [
"0.719273",
"0.57634723",
"0.5522466",
"0.509846",
"0.50796694",
"0.49862283",
"0.49686834",
"0.49493116",
"0.4947738",
"0.493212",
"0.48544836",
"0.48431808",
"0.48389664",
"0.48374176",
"0.4813507",
"0.4800392",
"0.47900787",
"0.47492748",
"0.47435454",
"0.47330758",
"0.47255751",
"0.47066548",
"0.47060645",
"0.47047192",
"0.46992213",
"0.46663573",
"0.4661605",
"0.46615353",
"0.4654679",
"0.46437886"
] | 0.80511445 | 0 |
Returns a list of the columns that are in our features dataframe that should not be used in prediction. These are essentially either metadata columns (team name, for example), or potential target variables that include the outcome. We want to make sure not to use the latter, since we don't want to use information about the current game to predict that same game. | def get_non_feature_columns():
return ['teamid', 'op_teamid', 'matchid', 'competitionid', 'seasonid',
'goals', 'op_goals', 'points', 'timestamp', 'team_name',
'op_team_name'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_columns(self):\n df = self.get_prep_data()\n col = [c for c in df.columns if c not in ['target', 'idd', 'ft_data_dt']]\n return col",
"def missing_columns(self):\r\n _missing_columns = set(self.reqd_columns).difference(set(self.all_columns))\r\n return list(_missing_columns)",
"def get_feature_columns(all_cols):\n return [col for col in all_cols if col not in get_non_feature_columns()]",
"def _unselected_columns(self, X):\n X_columns = list(X.columns)\n return [column for column in X_columns if\n column not in self._selected_columns]",
"def _get_target_only_columns(self, df: DataFrame) -> DataFrame:\n target_table_columns = self.target_table.get_columns()\n \n # if mutation of incoming df is desired, make a deepcopy here\n filtered_df = df\n for column in filtered_df.columns:\n if column not in target_table_columns:\n print(f'dropping unused column \"{column}\"')\n filtered_df = filtered_df.drop(column)\n \n return filtered_df",
"def others(self) -> List[str]:\n exclude = self._obj._names[\"covariates\"] + DATA_COLS\n return [col for col in self._obj.columns if col not in exclude]",
"def get_cols(df):\n meta = get_metafeatures(df)\n categorical_columns = meta.loc[meta['type'] == 'object', 'column'].tolist()\n cols_to_drop = meta.loc[meta['missing'] > 0.5, 'column'].tolist()\n logging.debug('%s categorical columns found', len(categorical_columns))\n logging.debug('%s columns will be dropped', len(cols_to_drop))\n return categorical_columns, cols_to_drop",
"def ignored_columns(self):\n return self._parms.get(\"ignored_columns\")",
"def features(self):\n other_features = ['listen_type', 'is_context', 'is_context_flow', \n 'is_listened_context', 'is_listened_flow', \n 'is_listened_context_flow']\n \n drop_features = self.categorize_features + self.drop_features + other_features + self.features_bis\n features = np.setdiff1d(self.train.columns.tolist(), drop_features + ['is_listened'], assume_unique=True)\n \n return features",
"def drop_extra_columns(self):\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)",
"def get_column_excluded(self):\n return self.column_excluded or []",
"def get_all_contests(data_frame) -> list:\n return [contest for contest in data_frame.columns if contest != 'Ballot Style']",
"def old_non_pk_column_list(self):\n return [\n col.name\n for col in self._old_table.column_list\n if col.name not in self._pk_for_filter\n and col.name not in self.dropped_column_name_list\n ]",
"def get_cols_drop():",
"def list_feature_drop(self):\n \n list_to_drop = list()\n list_not_in_df = list()\n \n #-------------------------------------------------------------------------\n # Columns are checked to be into df_invoice_line dataframe\n #-------------------------------------------------------------------------\n for col in self._list_feature_to_drop:\n if col in self.df_invoice_line.columns:\n list_to_drop.append(col)\n else:\n list_not_in_df.append(col)\n \n if 0 == len(list_to_drop):\n self.strprint(\"\\n*** ERROR : no element in list belonging to dataframe!\")\n else:\n if len(self._list_feature_to_drop) != len(list_to_drop):\n self.strprint(\"\\n*** WARNING : followings features do not belong to \\\n dataframe : {}\".format(list_not_in_df))\n else:\n pass\n list_col_keep \\\n = [col for col in self.df_invoice_line.columns \\\n if col not in list_to_drop]\n s\n self.df_invoice_line = self.df_invoice_line[list_col_keep]\n return",
"def get_all_hidden_columns(self):\n visible_columns_list = []\n column_headers = self.driver.find_elements_by_xpath('//thead/tr/th')\n for i in range(len(column_headers)):\n if column_headers[i].get_attribute('class') == 'ng-scope ng-hide':\n visible_columns_list.append(i + 1)\n return visible_columns_list",
"def select_columns(df):\n df = df.dropna(axis='columns', how='all') # drop columns containing only NaN\n keep_cols = [col for col in df.columns if 'normalized' not in col]\n df = df[keep_cols]\n return df",
"def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols",
"def remove_columns(df):\n avg = np.mean(df[df['sentiment'] != 'None']['sentiment'].astype('float'))\n df['sentiment'] = df['sentiment'].replace('None', avg).astype('float')\n\n to_remove = []\n print('column(s) removed: ')\n for column in df.columns:\n print(column)\n if(np.unique(df[column][df[column].notnull()]).shape[0] < 2):\n print(column)\n to_remove.append(column)\n \n return df.drop(columns = to_remove)",
"def _get_relevant_features(X):\n # FIXME utilize sklearn.utils.multiclass.type_of_target()\n continuous_cols = X.columns[~which_columns_are_binary(X)]\n return continuous_cols",
"def get_columns_after_apply_mapping(self) -> List[str]:\n return self.get_dyf_and_apply_mapping().toDF().columns",
"def old_column_list(self):\n return [\n col.name\n for col in self._old_table.column_list\n if col.name not in self.dropped_column_name_list\n ]",
"def _selected_columns(self):\n selected_columns = set()\n for feature in self.features:\n columns = feature[0]\n if isinstance(columns, list):\n selected_columns = selected_columns.union(set(columns))\n else:\n selected_columns.add(columns)\n return selected_columns",
"def chose_only_hypothesis_colums(df):\n lst = ['abv', 'ibu', 'gravity', 'abv_min', 'abv_max', 'ibu_min',\n 'ibu_max', 'srm_min', 'srm_max', 'og_min', 'fg_min', 'fg_max']\n return df[lst]",
"def columns(self):\n\n return None",
"def get_columns(self) -> List[str]:\n return self.get_dyf().toDF().columns",
"def drop_unnecessary_columns(df):\n df = df.drop([\n 'id',\n 'imdb_id',\n 'poster_path',\n 'video',\n 'status',\n 'weighted_rating', # Only average_rating was used for this project\n 'original_title',\n 'crew', # Used in production_score\n 'producers', # Used in production_score\n 'executive_producers', # Used in production_score\n 'cast', # Used in production_score\n 'director', # Used in production_score\n 'production_companies', # Used in production_score\n 'production_countries', # Binarized\n 'genres', # Binarized\n 'original_language', # Binarized\n 'adult', # No adult movies in the dataset, so no variance between movies\n 'release_date', # Not being considered for this project\n 'overview',\n 'title',\n 'tagline',\n 'vote_average', # Ratings have been binned\n 'popularity', # Only considering average_rating\n 'vote_count', # We are making a predictor, so it makes no sense to use vote counts as input\n 'revenue', # We are making a predictor, so it makes no sense to use revenue as input\n 'keywords', # Not considering keywords for this project\n 'revenue_divide_budget', # We are making a predictor, so it makes no sense to use revenue/budget as input\n ], 1)\n return df",
"def exclude_cols(self, *_, **__) -> Tuple[str, ...]:",
"def notable_features(self):\n return self._notable_features",
"def _columns(cls):\n columns = []\n for name, member in inspect.getmembers(cls):\n if (not name.startswith('_') and\n isinstance(member, InstrumentedAttribute)):\n columns.append(name)\n return columns"
] | [
"0.71959037",
"0.68640786",
"0.683394",
"0.6806973",
"0.67299575",
"0.6661575",
"0.6637758",
"0.65448886",
"0.65201235",
"0.64434034",
"0.6430318",
"0.6406203",
"0.63625467",
"0.6347281",
"0.6317178",
"0.6239724",
"0.621829",
"0.6213187",
"0.6197959",
"0.6065288",
"0.60188276",
"0.60165936",
"0.59574044",
"0.5952358",
"0.59392947",
"0.5929654",
"0.5906698",
"0.590606",
"0.5901749",
"0.5876959"
] | 0.80354255 | 0 |
Returns a list of all columns that should be used in prediction (i.e. all features that are in the dataframe but are not in the features.get_non_feature_column() list). | def get_feature_columns(all_cols):
return [col for col in all_cols if col not in get_non_feature_columns()] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_columns(self):\n df = self.get_prep_data()\n col = [c for c in df.columns if c not in ['target', 'idd', 'ft_data_dt']]\n return col",
"def get_cols(df):\n meta = get_metafeatures(df)\n categorical_columns = meta.loc[meta['type'] == 'object', 'column'].tolist()\n cols_to_drop = meta.loc[meta['missing'] > 0.5, 'column'].tolist()\n logging.debug('%s categorical columns found', len(categorical_columns))\n logging.debug('%s columns will be dropped', len(cols_to_drop))\n return categorical_columns, cols_to_drop",
"def _selected_columns(self):\n selected_columns = set()\n for feature in self.features:\n columns = feature[0]\n if isinstance(columns, list):\n selected_columns = selected_columns.union(set(columns))\n else:\n selected_columns.add(columns)\n return selected_columns",
"def _get_relevant_features(X):\n # FIXME utilize sklearn.utils.multiclass.type_of_target()\n continuous_cols = X.columns[~which_columns_are_binary(X)]\n return continuous_cols",
"def get_non_feature_columns():\n return ['teamid', 'op_teamid', 'matchid', 'competitionid', 'seasonid',\n 'goals', 'op_goals', 'points', 'timestamp', 'team_name', \n 'op_team_name']",
"def missing_columns(self):\r\n _missing_columns = set(self.reqd_columns).difference(set(self.all_columns))\r\n return list(_missing_columns)",
"def _unselected_columns(self, X):\n X_columns = list(X.columns)\n return [column for column in X_columns if\n column not in self._selected_columns]",
"def get_columns(self) -> List[str]:\n return self.get_dyf().toDF().columns",
"def select_columns(df):\n df = df.dropna(axis='columns', how='all') # drop columns containing only NaN\n keep_cols = [col for col in df.columns if 'normalized' not in col]\n df = df[keep_cols]\n return df",
"def get_columns_after_apply_mapping(self) -> List[str]:\n return self.get_dyf_and_apply_mapping().toDF().columns",
"def features(self):\n other_features = ['listen_type', 'is_context', 'is_context_flow', \n 'is_listened_context', 'is_listened_flow', \n 'is_listened_context_flow']\n \n drop_features = self.categorize_features + self.drop_features + other_features + self.features_bis\n features = np.setdiff1d(self.train.columns.tolist(), drop_features + ['is_listened'], assume_unique=True)\n \n return features",
"def filterfeatures(df):\n\tfilter_arr = []\n\tfor f in df.columns:\n\t\tif not '.l' in f and not '.r' in f and not '.std' in f and f != 'weight' and f != 'class':\n\t\t\t# filter_arr.append(f.rstrip('.mean'))\n\t\t\tfilter_arr.append(f)\n\treturn filter_arr",
"def get_dense_feature_columns(self) -> List[FeatureColumn]:\n\n return self._get_numeric_feature_columns(\n ) + self._get_embedding_feature_columns()",
"def _get_target_only_columns(self, df: DataFrame) -> DataFrame:\n target_table_columns = self.target_table.get_columns()\n \n # if mutation of incoming df is desired, make a deepcopy here\n filtered_df = df\n for column in filtered_df.columns:\n if column not in target_table_columns:\n print(f'dropping unused column \"{column}\"')\n filtered_df = filtered_df.drop(column)\n \n return filtered_df",
"def get_returns_columns(df: pd.DataFrame) -> list:\n return [col for col in df.columns if '_period_return' in col]",
"def others(self) -> List[str]:\n exclude = self._obj._names[\"covariates\"] + DATA_COLS\n return [col for col in self._obj.columns if col not in exclude]",
"def get_needed_columns(df, list_of_columns):\n return df[list_of_columns]",
"def list_feature_drop(self):\n \n list_to_drop = list()\n list_not_in_df = list()\n \n #-------------------------------------------------------------------------\n # Columns are checked to be into df_invoice_line dataframe\n #-------------------------------------------------------------------------\n for col in self._list_feature_to_drop:\n if col in self.df_invoice_line.columns:\n list_to_drop.append(col)\n else:\n list_not_in_df.append(col)\n \n if 0 == len(list_to_drop):\n self.strprint(\"\\n*** ERROR : no element in list belonging to dataframe!\")\n else:\n if len(self._list_feature_to_drop) != len(list_to_drop):\n self.strprint(\"\\n*** WARNING : followings features do not belong to \\\n dataframe : {}\".format(list_not_in_df))\n else:\n pass\n list_col_keep \\\n = [col for col in self.df_invoice_line.columns \\\n if col not in list_to_drop]\n s\n self.df_invoice_line = self.df_invoice_line[list_col_keep]\n return",
"def get_all_hidden_columns(self):\n visible_columns_list = []\n column_headers = self.driver.find_elements_by_xpath('//thead/tr/th')\n for i in range(len(column_headers)):\n if column_headers[i].get_attribute('class') == 'ng-scope ng-hide':\n visible_columns_list.append(i + 1)\n return visible_columns_list",
"def columns(self):\n return self.__column_list",
"def get_columns(self):\n columns = []\n for column in self.columns:\n columns.append(column.data.name)\n return columns",
"def column_names(self):\n return self.data.columns.values",
"def get_non_float_column_names(df):\n if not isinstance(df, pd.DataFrame):\n msg = 'df of type=\"{}\" is not a pandas DataFrame'\n raise TypeError(msg.format(str(type(df))))\n if len(set(df.columns)) != len(df.columns):\n msg = 'df contains duplicated column names which is not supported'\n raise ValueError(msg)\n return list(set(df.select_dtypes(exclude=[np.floating]).columns))",
"def columns(self):\n return list(self._scala.observationColumns())",
"def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df",
"def get_sparse_feature_columns(self) -> List[FeatureColumn]:\n\n return self._get_numeric_feature_columns(\n ) + self._get_sparse_categorical_feature_columns()",
"def chose_only_hypothesis_colums(df):\n lst = ['abv', 'ibu', 'gravity', 'abv_min', 'abv_max', 'ibu_min',\n 'ibu_max', 'srm_min', 'srm_max', 'og_min', 'fg_min', 'fg_max']\n return df[lst]",
"def get_features(df, target=[], meta=[]):\n ############################################################\n # Type conversion\n ############################################################\n\n types = df[df.columns[~df.columns.isin(target+meta)]].dtypes\n for col_name, col_type in types.iteritems():\n if col_type == bool:\n df[col_name] = df[col_name].astype(float)\n\n ############################################################\n # Get features by type\n ############################################################\n \n features_cat = filter(lambda x: not np.issubdtype(x[1], np.number), types.iteritems())\n features_cat = sorted(list(map(lambda x: x[0], features_cat)))\n # target and meta should have already been removed. but just to be sure\n features_num = sorted(list(set(types.index) - set(features_cat) - set(target) - set(meta))) \n selected_features = df.columns.to_list()\n features_idx = dict(zip(selected_features, range(len(selected_features))))\n \n return selected_features, features_num, features_cat, features_idx",
"def columns(self):\n return self._column_names",
"def get_all_contests(data_frame) -> list:\n return [contest for contest in data_frame.columns if contest != 'Ballot Style']"
] | [
"0.7311638",
"0.71005136",
"0.7081749",
"0.6978075",
"0.6938612",
"0.6809944",
"0.67643905",
"0.6764281",
"0.67626035",
"0.6757341",
"0.67021",
"0.6644742",
"0.6602799",
"0.64940643",
"0.64777064",
"0.6425499",
"0.64037734",
"0.6395039",
"0.6363723",
"0.6360488",
"0.6316431",
"0.6314124",
"0.6288864",
"0.6284879",
"0.62807053",
"0.6261158",
"0.6253158",
"0.62519044",
"0.62340724",
"0.622875"
] | 0.7818545 | 0 |
Setup cache object for wallet | def setup_cache(self):
if self.walletname not in cache:
cache[self.walletname] = {
"raw_transactions": {},
"transactions": [],
"tx_count": None,
"tx_changed": True,
"last_block": None,
"raw_tx_block_update": {},
"addresses": [],
"change_addresses": [],
"scan_addresses": True
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_cache(self):\n if self.cacheable:\n self._instance._cache[self.name] = {}",
"def __init__(self, *args, **kwargs):\n self._cachedict = {}",
"def __init_cache__(self) -> None:\n try:\n self.cache = caches[CACHE_NAME]\n logging.info(\"GeoIP2 - successfully initialised cache\")\n except InvalidCacheBackendError as ex:\n raise MiddlewareNotUsed(f\"GeoIP2 - cache configuration error: {ex}\") from ex",
"def __initCacheSection(self):\n m = hashlib.md5()\n for i in [self.AUTHZ_ENDPOINT, self.CLIENT_ID]:\n m.update(bytes(self.conf[i], \"utf-8\"))\n self.cacheSection = str(m.hexdigest())",
"def _load_cache(self):\n self.cache = self.cache_manager.retrieve(self.cache_file)\n if self.cache is None:\n self.cache = {}\n return",
"def load_cache():\n return {}",
"def __init__(self,cacheLocation):\n self.cacheLocation = cacheLocation\n if not os.path.exists(self.cacheLocation):\n os.mkdir(self.cacheLocation)",
"def __init__(self):\n if Config.USEMEMCACHED is True:\n self.mc = MCache(server = Config.MEMCACHED_SERVER,\n username = Config.MEMCACHED_USERNAME,\n password = Config.MEMCACHED_PASSWORD)\n else:\n self.mc = None\n self.api = DozensApi()",
"def __init__(self, cache_location=None):\n CacheManager.cache_location = None # The dir that holds the whole cache.\n CacheManager.cache_file_location = None # The JSON file that contains posts, etc.\n\n if cache_location is not None:\n CacheManager.create_cache(cache_location)",
"def __init__(self, persistent=True):\n super().__init__()\n self.name_cache = {}\n self.run_cache = {}\n self.row_cache = {}\n self.persistent = persistent\n\n if self.persistent:\n self.load_cache()",
"def connect(self):\n storage_type = self.config.get('cache', 'type')\n self.__log.info(\"Creating the storage cache of type {}\".format(storage_type))\n cache = Storage(storage_type, self.config) #.cache\n self.__log.info(\"Connected to cache\")\n return cache",
"def setup_cache(self):\n train_cache_path = self.cache.get_cache_path_and_check(TRAIN_STR, self.task_name)\n dev_cache_path = self.cache.get_cache_path_and_check(DEV_STR, self.task_name)\n test_cache_path = self.cache.get_cache_path_and_check(TEST_STR, self.task_name)\n\n self.train_cache_writer = None\n self.dev_cache_writer = None\n self.test_cache_writer = None\n\n if os.path.exists(train_cache_path):\n f = h5py.File(train_cache_path, 'r')\n self.train_cache = (torch.tensor(f[str(i)][()]) for i in range(len(f.keys())))\n else:\n self.train_cache_writer = h5py.File(train_cache_path, 'w')\n if os.path.exists(dev_cache_path):\n f2 = h5py.File(dev_cache_path, 'r')\n self.dev_cache = (torch.tensor(f2[str(i)][()]) for i in range(len(f2.keys())))\n else:\n self.dev_cache_writer = h5py.File(dev_cache_path, 'w')\n if os.path.exists(test_cache_path):\n f3 = h5py.File(test_cache_path, 'r')\n self.test_cache = (torch.tensor(f3[str(i)][()]) for i in range(len(f3.keys())))\n else:\n self.test_cache_writer = h5py.File(test_cache_path, 'w')",
"def __init__(self, config):\n # Initialize key variables\n connection_string = (\n '{}:{}'\n ''.format(\n config.memcached_hostname(), config.memcached_port()))\n self.cache = memcache.Client([connection_string], debug=0)",
"def cache(cls):\n return Cache(cls, cls.cache_regions, cls.cache_label)",
"def __init__(self, location, option):\n super(MyCache, self).__init__(location, option)\n self.dcreate('ttl')",
"def __init__(self, access_token_cache, account_id, credentials):\n super(AccessTokenStore, self).__init__(lock=None)\n self._access_token_cache = access_token_cache\n self._account_id = account_id\n self._credentials = credentials",
"def __init__(self, config, cache_filename, path):\n self.config = config\n self.cache_path = os.path.join(path, cache_filename)\n self._cache = None",
"def setUp(self):\n self.expire_time = 1\n self.cache = Cacher(self.expire_time)\n self.key = 'test'\n self.value = {1:2}",
"def __init__(self, storage=default_storage, prefix=\"assets\", cache_name=\"optimizations.assetcache\"):\n self._storage = storage\n self._prefix = prefix\n self._cache = resolve_namespaced_cache(cache_name)",
"def __init__(self):\n self.ts = dict()\n self.cache = dict()",
"def setup_cache(backend: Literal[\"memory\", \"disk\"], expiry: int = 0):\n setup_cache_hooks(_set_val, _get_val, expiry == 0)\n _BackendOpt.BACKEND_DISK = backend == \"disk\"\n _BackendOpt.EXPIRY_SECONDS = expiry * 60",
"def fill_request_cache():\n if not request_cache.cache.get(\"bingo_request_cache_filled\"):\n\n # Assume that we're going to grab both BingoCache and\n # BingoIdentityCache from memcache\n memcache_keys = [\n BingoCache.CACHE_KEY,\n BingoIdentityCache.key_for_identity(identity())\n ]\n\n # Try to grab BingoCache from instance cache\n bingo_instance = instance_cache.get(BingoCache.CACHE_KEY)\n if bingo_instance:\n # If successful, use instance cached version...\n request_cache.cache[BingoCache.CACHE_KEY] = bingo_instance\n # ...and don't load BingoCache from memcache\n memcache_keys.remove(BingoCache.CACHE_KEY)\n\n # Load necessary caches from memcache\n dict_memcache = memcache.get_multi(memcache_keys)\n\n # Decompress BingoCache if we loaded it from memcache\n if BingoCache.CACHE_KEY in dict_memcache:\n dict_memcache[BingoCache.CACHE_KEY] = CacheLayers.decompress(\n dict_memcache[BingoCache.CACHE_KEY])\n\n # Update request cache with values loaded from memcache\n request_cache.cache.update(dict_memcache)\n\n if not bingo_instance:\n # And if BingoCache wasn't in the instance cache already, store\n # it with a 1-minute expiry\n instance_cache.set(BingoCache.CACHE_KEY,\n request_cache.cache.get(BingoCache.CACHE_KEY),\n expiry=CacheLayers.INSTANCE_SECONDS)\n\n request_cache.cache[\"bingo_request_cache_filled\"] = True",
"def setup(cls, path, cache_filename, **kwargs):\n cache_filepath = os.path.join(path, cache_filename)\n if not os.path.isfile(cache_filepath):\n with open(cache_filepath, 'w') as cache_file:\n json.dump({'start_time': None}, cache_file)",
"def test__cache(self):\n # Access to a protected member _cache of a client class\n # pylint: disable=W0212\n treadmill.zkutils.get.return_value = {}\n\n zkclient = kazoo.client.KazooClient()\n self.evmgr._cache(zkclient, 'foo#001')\n\n appcache = os.path.join(self.cache, 'foo#001')\n self.assertTrue(os.path.exists(appcache))",
"def _cache(self):\n return self._class(self.client_servers, **self._options)",
"def __init__(self):\n load_dotenv()\n mnemonic_phrase = os.getenv(\n \"MNEMONIC\", \"soccer cousin badge snow chicken lamp soft note ugly crouch unfair biology symbol control heavy\")\n\n # initialize w3\n self.w3 = Web3(Web3.HTTPProvider(\"http://127.0.0.1:8545\"))\n # support PoA algorithm\n self.w3.middleware_onion.inject(geth_poa_middleware, layer=0)\n\n self.coins = {}\n for coin in COINS:\n self.coins[coin] = self.derive_wallets(mnemonic_phrase, coin)",
"def __init__(self, accessor, settings, name=None):\n super(DiskCache, self).__init__(accessor, settings, name)\n\n path = settings.get(\"path\")\n assert path\n\n self.__env = None\n self.__path = os_path.join(path, \"biggraphite\", \"cache\", \"version0\")\n self.__size = settings.get(\"size\", self.MAP_SIZE)\n self.__ttl = int(settings.get(\"ttl\", 24 * 60 * 60))\n self.__sync = settings.get(\"sync\", True)\n self.__databases = {\"metric_to_meta\": None}\n self.__metric_to_metadata_db = None\n self._max_size.set(self.__size)",
"def _store_cache(self):\n assert self._already_generated, \"Must generate before storing to cache\"\n\n if self.variant_unit is not None:\n logger.warning(\"Cannot cache once variant_unit has been set\")\n return\n\n try:\n os.mkdir(os.path.dirname(self._cache_key))\n except FileExistsError:\n # Easier than checking and risking race conditions\n pass\n\n with open(self._cache_key, 'w') as f:\n json.dump(self.rows, f)\n\n logger.debug(\"Stored cache to {}\".format(self._cache_key))",
"def __init__(self, region=\"default\", cache_key=None):\n self.region = region\n self.cache_key = cache_key",
"def pymod_cache():\n pymod.cache.cache = Singleton(pymod.cache.factory)"
] | [
"0.70809096",
"0.67123383",
"0.66832745",
"0.6391737",
"0.6336041",
"0.619476",
"0.6180135",
"0.6171709",
"0.6163314",
"0.6139369",
"0.60808635",
"0.6058581",
"0.60361993",
"0.60249174",
"0.6023925",
"0.5965481",
"0.59415615",
"0.5929574",
"0.59244114",
"0.59184754",
"0.58754265",
"0.58665377",
"0.5860716",
"0.58524007",
"0.58502835",
"0.5849246",
"0.5847693",
"0.5822829",
"0.58178693",
"0.58145213"
] | 0.86449647 | 0 |
Cache `raw_transactions` (with full data on all the inputs and outputs of each tx) | def cache_raw_txs(self, cli_txs):
# Get list of all tx ids
txids = list(dict.fromkeys(cli_txs.keys()))
tx_count = len(txids)
# If there are new transactions (if the transations count changed)
if tx_count != self.cache["tx_count"]:
for txid in txids:
# Cache each tx, if not already cached.
# Data is immutable (unless reorg occurs) and can be saved in a file for permanent caching
if txid not in self.cache["raw_transactions"]:
# Call Bitcoin Core to get the "raw" transaction - allows to read detailed inputs and outputs
raw_tx_hex = self.cli.gettransaction(txid)["hex"]
raw_tx = self.cli.decoderawtransaction(raw_tx_hex)
# Some data (like fee and category, and when unconfirmed also time) available from the `listtransactions`
# command is not available in the `getrawtransacion` - so add it "manually" here.
if "fee" in cli_txs[txid]:
raw_tx["fee"] = cli_txs[txid]["fee"]
if "category" in cli_txs[txid]:
raw_tx["category"] = cli_txs[txid]["category"]
if "time" in cli_txs[txid]:
raw_tx["time"] = cli_txs[txid]["time"]
if "blockhash" in cli_txs[txid]:
raw_tx["block_height"] = self.cli.getblockheader(cli_txs[txid]["blockhash"])["height"]
else:
raw_tx["block_height"] = -1
# Loop on the transaction's inputs
# If not a coinbase transaction:
# Get the the output data corresponding to the input (that is: input_txid[output_index])
tx_ins = []
for vin in raw_tx["vin"]:
# If the tx is a coinbase tx - set `coinbase` to True
if "coinbase" in vin:
raw_tx["coinbase"] = True
break
# If the tx is a coinbase tx - set `coinbase` to True
vin_txid = vin["txid"]
vin_vout = vin["vout"]
try:
raw_tx_hex = self.cli.gettransaction(vin_txid)["hex"]
tx_in = self.cli.decoderawtransaction(raw_tx_hex)["vout"][vin_vout]
tx_in["txid"] = vin["txid"]
tx_ins.append(tx_in)
except:
pass
# For each output in the tx_ins list (the tx inputs in their output "format")
# Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is).
raw_tx["from"] = [{
"address": out["scriptPubKey"]["addresses"][0],
"amount": out["value"],
"internal": out["scriptPubKey"]["addresses"][0] in self.wallet_addresses
} for out in tx_ins]
# For each output in the tx (`vout`)
# Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is).
raw_tx["to"] = [({
"address": out["scriptPubKey"]["addresses"][0],
"amount": out["value"],
"internal": out["scriptPubKey"]["addresses"][0] in self.wallet_addresses
}) for out in raw_tx["vout"] if "addresses" in out["scriptPubKey"]]
# Save the raw_transaction to the cache
cache[self.walletname]["raw_transactions"][txid] = raw_tx
# Set the tx count to avoid unnecessary indexing
cache[self.walletname]["tx_count"] = tx_count
# Set the tx changed to indicate the there are new transactions to cache
cache[self.walletname]["tx_changed"] = True
else:
# Set the tx changed to False to avoid unnecessary indexing
cache[self.walletname]["tx_changed"] = False
# If unconfirmed transactions were mined, assign them their block height
blocks = self.cli.getblockcount()
if blocks != self.cache["last_block"]:
for txid in self.cache["raw_transactions"]:
if self.cache["raw_transactions"][txid]["block_height"] == -1 and "blockhash" in cli_txs[txid]:
height = self.cli.getblockheader(cli_txs[txid]["blockhash"])["height"]
cache[self.walletname]["raw_transactions"][txid]["block_height"] = height
cache[self.walletname]["raw_tx_block_update"][txid] = height
cache[self.walletname]["last_block"] = blocks
return self.cache["raw_transactions"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cache_txs(self, raw_txs):\n # Get the cached `raw_transactions` dict (txid -> tx) as a list of txs\n transactions = list(sorted(raw_txs.values(), key = lambda tx: tx['time'], reverse=True))\n result = []\n\n # If unconfirmed transactions were mined, assign them their block height\n if len(self.cache[\"raw_tx_block_update\"]) > 0:\n for i in range(0, len(self.cache[\"transactions\"])):\n if self.cache[\"transactions\"][i][\"txid\"] in cache[self.walletname][\"raw_tx_block_update\"]:\n cache[self.walletname][\"transactions\"][i][\"block_height\"] = cache[self.walletname][\"raw_tx_block_update\"][cache[self.walletname][\"transactions\"][i][\"txid\"]]\n cache[self.walletname][\"raw_tx_block_update\"] = {}\n\n # If the `raw_transactions` did not change - exit here.\n if not self.cache[\"tx_changed\"]:\n return self.cache[\"transactions\"]\n\n # Loop through the raw_transactions list\n for i, tx in enumerate(transactions):\n # If tx is a user generated one (categories: `send`/ `receive`) and not coinbase (categories: `generated`/ `immature`)\n if tx[\"category\"] == \"send\" or tx[\"category\"] == \"receive\":\n is_send = True\n is_self = True\n\n # Check if the transaction is a `send` or not (if all inputs belong to the wallet)\n if len(tx[\"from\"]) == 0:\n is_send = False\n\n for fromdata in tx[\"from\"]:\n if not fromdata[\"internal\"]:\n is_send = False\n\n # Check if the transaction is a `self-transfer` (if all inputs and all outputs belong to the wallet)\n for to in tx[\"to\"]:\n if not is_send or not to[\"internal\"]:\n is_self = False\n break\n\n tx[\"is_self\"] = is_self\n\n if not is_send or is_self:\n for to in tx[\"to\"]:\n if to[\"internal\"]:\n # Cache received outputs\n result.append(self.prepare_tx(tx, to, \"receive\", destination=None, is_change=(to[\"address\"] in self.change_addresses)))\n\n if is_send or is_self:\n destination = None\n for to in tx[\"to\"]:\n if to[\"address\"] in self.change_addresses and not is_self:\n # Cache change output\n result.append(self.prepare_tx(tx, to, \"receive\", destination=destination, is_change=True))\n elif not to[\"internal\"] or (is_self and to[\"address\"] not in self.change_addresses):\n destination = to\n for fromdata in tx[\"from\"]:\n # Cache sent inputs\n result.append(self.prepare_tx(tx, fromdata, \"send\", destination=destination))\n else:\n tx[\"is_self\"] = False\n # Cache coinbase output\n result.append(self.prepare_tx(tx, tx[\"to\"][0], tx[\"category\"]))\n\n # Save the result to the cache\n cache[self.walletname][\"transactions\"] = result\n return self.cache[\"transactions\"]",
"def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions",
"def load_transactions(self, address, update=True, verbose=False, **kwargs):\n if self.apikey is None:\n update = False\n if verbose:\n print('load_transactions', address)\n fn = os.path.join(self.cache_dir, address + '.json')\n startblock = None\n transactions = []\n if os.path.exists(fn):\n with open(fn) as f:\n try:\n transactions = json.load(f)\n except json.decoder.JSONDecodeError:\n if verbose:\n print('ignoring error while loading', fn)\n pass\n if not update:\n return transactions\n if len(transactions):\n startblock = max([int(e['blockNumber']) for e in transactions])\n if verbose:\n print('starting from cache at', startblock, 'with', len(transactions))\n # add new transactions\n new_transactions = self.fetch_transactions(address, startblock=startblock, verbose=verbose, **kwargs)\n # dedupe\n if len(new_transactions) > 0:\n transactions.extend(new_transactions)\n transactions = list({e['hash']:e for e in transactions}.values())\n safe_dump(fn, transactions)\n return transactions",
"def all_transactions(self):\n self._update()\n with self.all_tx_lock:\n all_tx_copy = copy.deepcopy(self._all_transactions)\n return all_tx_copy",
"def setup_cache(self):\n if self.walletname not in cache: \n cache[self.walletname] = {\n \"raw_transactions\": {},\n \"transactions\": [],\n \"tx_count\": None,\n \"tx_changed\": True,\n \"last_block\": None,\n \"raw_tx_block_update\": {},\n \"addresses\": [],\n \"change_addresses\": [],\n \"scan_addresses\": True\n }",
"def _get_all_transactions(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError",
"def request_transactions(self, blockchain):\n excludes_list, balance_dict = list(), dict()\n print(\"Requesting transactions to %s...\" % self.url)\n while len(self.transactions) < Miner.TX_PER_BLOCK:\n transaction = self.get_transaction(excludes_list)\n if transaction:\n verif = transaction.verify_signature()\n print(\"Verifying signature of TX %s: %s\"\n % (transaction.hash, verif))\n if verif:\n balance_keys = balance_dict.keys()\n sender = Address.generate_address(transaction.sender_public_key)\n receiver, amount = transaction.receiver, transaction.amount\n if not (sender in balance_keys):\n balance_dict[sender] = blockchain.get_balance(sender)\n if not (receiver in balance_keys):\n balance_dict[receiver] = blockchain.get_balance(receiver)\n hasEnoughBalance = self.sender_has_enough_balance(sender, amount, balance_dict)\n print(\"In TX %s sender has enough balance: %s\" % (transaction.hash, hasEnoughBalance))\n if hasEnoughBalance:\n balance_dict[sender] -= transaction.amount\n balance_dict[receiver] += transaction.amount\n self.add_transaction(transaction)\n\n print(\"Excluding TX: %s\" % transaction.hash)\n excludes_list.append(transaction.hash)\n print(\"Received %s transactions\" % Miner.TX_PER_BLOCK)",
"def transactions(self):\n return copy.deepcopy(self._transactions)",
"def update_txs(self, txs):\n # For now avoid caching orphan transactions. We might want to show them somehow in the future.\n cli_txs = {tx[\"txid\"]: tx for tx in txs if tx[\"category\"] != \"orphan\"}\n raw_txs = self.cache_raw_txs(cli_txs)\n cached_txs = self.cache_txs(raw_txs)\n\n return cached_txs",
"def get_pending_trust_transactions():\n with django.db.transaction.atomic():\n transactions = list(\n Transaction.objects.filter(\n kind=Transaction.KIND.deposit,\n status=Transaction.STATUS.pending_trust,\n pending_execution_attempt=False,\n )\n .select_related(\"asset\")\n .select_for_update()\n )\n Transaction.objects.filter(id__in=[t.id for t in transactions]).update(\n pending_execution_attempt=True\n )\n return transactions",
"def get_transaction_data():\n data = parse_json()\n income_instances = create_transactions(data['incomes'])\n expense_instances = create_transactions(data['expenses'])\n for expense in expense_instances:\n expense.amount = -(expense.amount)\n transactions = income_instances + expense_instances\n return transactions",
"def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions",
"def raw_get_transaction(cls, txid):\n r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)\n r.raise_for_status() # pragma: no cover\n return r.json()",
"def _save_internal_transactions(self, blocks_traces):\n docs = [\n self._preprocess_internal_transaction(transaction)\n for transaction in blocks_traces\n if transaction[\"transactionHash\"]\n ]\n if docs:\n for chunk in bulk_chunks(docs, None, BYTES_PER_CHUNK):\n self.client.bulk_index(docs=chunk, index=self.indices[\"internal_transaction\"], doc_type=\"itx\",\n id_field=\"hash\", refresh=True)",
"def get_transactions(self, block_name):\n cmd = \"\"\" SELECT * FROM %s WHERE %s = '%s'; \"\"\" %(\n TABLE_TRANSACTIONS, COL_TRANSACTION_BLOCK, block_name)\n\n self.__dbcursor.execute(cmd)\n return self.__dbcursor.fetchall()",
"async def check_transaction_receipts(self):\n async_scheduler: AsyncCallScheduler = AsyncCallScheduler.shared_instance()\n tasks = [self._check_transaction_receipt(tx_hash, self._pending_tx_dict[tx_hash]['timestamp'])\n for tx_hash in self._pending_tx_dict.keys()]\n transaction_receipts: List[AttributeDict] = [tr for tr in await safe_gather(*tasks)\n if (tr is not None and tr.get(\"blockHash\") is not None)]\n block_hash_set: Set[HexBytes] = set(tr.blockHash for tr in transaction_receipts)\n fetch_block_tasks = [async_scheduler.call_async(self._w3.eth.getBlock, block_hash)\n for block_hash in block_hash_set]\n blocks: Dict[HexBytes, AttributeDict] = dict((block.hash, block)\n for block\n in await safe_gather(*fetch_block_tasks)\n if block is not None)\n\n for receipt in transaction_receipts:\n # Emit gas used event.\n tx_hash: str = receipt.transactionHash.hex()\n gas_price_wei: int = self._pending_tx_dict[tx_hash]['gas_price']\n gas_used: int = receipt.gasUsed\n gas_eth_amount_raw: int = gas_price_wei * gas_used\n\n if receipt.blockHash in blocks:\n block: AttributeDict = blocks[receipt.blockHash]\n\n if receipt.status == 0:\n self.logger().warning(f\"The transaction {tx_hash} has failed.\")\n self.trigger_event(WalletEvent.TransactionFailure, tx_hash)\n\n self.trigger_event(WalletEvent.GasUsed, EthereumGasUsedEvent(\n float(block.timestamp),\n tx_hash,\n float(gas_price_wei * 1e-9),\n gas_price_wei,\n gas_used,\n float(gas_eth_amount_raw * 1e-18),\n gas_eth_amount_raw\n ))\n\n # Stop tracking the transaction.\n self._stop_tx_tracking(tx_hash)",
"def _gather_transactions(self, tx_pool):\n # Get a set of random transactions from pending transactions\n self.added_tx_lock.acquire()\n self.all_tx_lock.acquire()\n try:\n # Put in coinbase transaction\n coinbase_tx = Transaction.new(\n sender=self.pubkey,\n receiver=self.pubkey,\n amount=Block.REWARD,\n privkey=self.privkey,\n comment=\"Coinbase\"\n )\n gathered_transactions = [coinbase_tx.to_json()]\n # No transactions to process, return coinbase transaction only\n if not tx_pool:\n return gathered_transactions\n num_tx = min(Miner.MAX_NUM_TX, len(tx_pool))\n while True:\n if num_tx <= 0:\n return gathered_transactions\n trans_sample = random.sample(tx_pool, num_tx)\n num_tx -= 1\n if self._check_transactions_balance(trans_sample):\n break\n gathered_transactions.extend(trans_sample)\n finally:\n self.added_tx_lock.release()\n self.all_tx_lock.release()\n return gathered_transactions",
"def transactions(self):\r\n return tx.Transactions(self)",
"def pending_transactions(self):\n self._update()\n self.added_tx_lock.acquire()\n self.all_tx_lock.acquire()\n try:\n pending_tx = self._all_transactions - self._added_transactions\n finally:\n self.added_tx_lock.release()\n self.all_tx_lock.release()\n return copy.deepcopy(pending_tx)",
"def fundrawtransaction(self, given_transaction, *args, **kwargs):\n # just use any txid here\n vintxid = lx(\"99264749804159db1e342a0c8aa3279f6ef4031872051a1e52fb302e51061bef\")\n\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to fundrawtransaction.\")\n\n # this is also a clever way to not cause a side-effect in this function\n transaction = CMutableTransaction.deserialize(given_bytes)\n\n for vout_counter in range(0, self._num_fundrawtransaction_inputs):\n txin = CMutableTxIn(COutPoint(vintxid, vout_counter))\n transaction.vin.append(txin)\n\n # also allocate a single output (for change)\n txout = make_txout()\n transaction.vout.append(txout)\n\n transaction_hex = b2x(transaction.serialize())\n\n return {\"hex\": transaction_hex, \"fee\": 5000000}",
"def cache_txn_manage(database, table, action, trans=None, **kw):\n trace = kw['trace']\n cache = server.data[database].tables['cache']\n transaction = request.get_json() if trans == None else trans\n if 'txn' in transaction:\n txn_id = transaction['txn']\n tx=None\n wait_time = 0.0 # total time waiting to commit txn \n wait_interval = txn_default_wait_in_sec # amount of time to wait between checks - if multiple txns exist \n # Get transaction from cache db\n if action == 'commit':\n while True:\n txns = cache.select('id','timestamp',\n where={'table_name': table}\n )\n if not txn_id in {tx['id'] for tx in txns}:\n return {\"message\": trace.error(f\"{txn_id} does not exist in cache\")}, 500\n if len(txns) == 1:\n if not txns[0]['id'] == txn_id:\n warning = f\"txn with id {txn_id} does not exist for {database} {table}\"\n return {'warning': trace.warning(warning)}, 500\n # txn_id is only value inside\n tx = txns[0]\n break\n # multiple pending txns - need to check timestamp to verify if this txn can be commited yet\n txns = sorted(txns, key=lambda txn: txn['timestamp'])\n for ind, txn in enumerate(txns):\n if txn['id'] == txn_id:\n if ind == 0:\n tx = txns[0]\n break\n if wait_time > txn_max_wait_time_in_sec:\n warning = f\"timeout of {wait_time} reached while waiting to commit {txn_id} for {database} {table}, waiting on {txns[:ind]}\"\n trace.warning(warning)\n trace.warning(f\"removing txn with id {txns[0]['id']} maxWaitTime of {txn_max_wait_time_in_sec} reached\")\n cache.delete(where={'id': txns[0]['id']})\n break\n break\n if tx == None:\n trace.warning(f\"txn_id {txn_id} is behind txns {txns[:ind]} - waiting {wait_time} to retry\")\n time.sleep(wait_interval)\n wait_time+=wait_interval \n # wait_interval scales up to txn_max_wait_interval_in_sec\n wait_interval+=wait_interval \n if wait_interval >= txn_max_wait_interval_in_sec:\n wait_interval = txn_max_wait_interval_in_sec\n continue\n break\n # Should not have broken out of loop here without a tx\n if tx == None:\n trace.error(\"tx is None, this should not hppen\")\n return {\"error\": \"tx was none\"}, 500\n tx = cache.select('type','txn',\n where={'id': txn_id})[0]\n try:\n r, rc = server.actions[tx['type']](database, table, tx['txn'])\n trace.warning(f\"##cache {action} response {r} rc {rc}\")\n except Exception as e:\n r, rc = trace.exception(f\"Exception when performing cache {action}\"), 500\n \n del_txn = cache.delete(\n where={'id': txn_id}\n )\n if rc == 200:\n # update last txn id\n set_params = {\n 'set': {\n 'last_txn_uuid': txn_id,\n 'last_mod_time': float(time.time())\n },\n 'where': {\n 'table_name': table\n }\n }\n server.data['cluster'].tables['pyql'].update(\n **set_params['set'],\n where=set_params['where']\n )\n return {\"message\": r, \"status\": rc}, rc\n if action == 'cancel':\n del_txn = cache.delete(\n where={'id': txn_id}\n )\n return {'deleted': txn_id}, 200",
"def prepare_raw_tx(self, mn_address, change_address, inputs, total, fee=0.00001):\n raw_tx = {mn_address: self.send_amount, change_address: total - self.send_amount - fee}\n return self.rpc.createrawtransaction(inputs, raw_tx)",
"def transaction(self) -> Context:\n session = self.Session()\n if self.cache:\n with self._cache_lock:\n for i in self.cache:\n session.add(i)\n self.cache = []\n session.flush()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()",
"def get_rawtx(txid):\n return requests.get(BASE+f'/api/rawtx/{txid}').json()['rawtx']",
"def transactions(self, transactions: list):\n num_txs = len(transactions)\n transactions_size = num_txs * self._message_size['tx']\n return {\n 'id': 'transactions',\n 'transactions': transactions,\n 'size': kB_to_MB(transactions_size)\n }",
"def transactions(self):\r\n return tx.AccountTransactions(self)",
"def get_transactions(self):\n transactions = []\n for subaccount_pointer in range((clargs.args.search_subaccounts or 0) + 1):\n utxos = self.scan_subaccount(subaccount_pointer, clargs.args.key_search_depth)\n if len(utxos) == 0:\n continue\n\n transaction, used_utxo = self.create_transaction(utxos)\n if transaction:\n signed_transaction = self.sign_transaction(transaction, used_utxo)\n transactions.append(signed_transaction)\n\n if transactions:\n self.test_transactions(transactions)\n\n logging.debug('transactions: {}'.format(transactions))\n flags = wally.WALLY_TX_FLAG_USE_WITNESS\n return [(wally.tx_from_hex(transaction, flags), None) for transaction in transactions]",
"def added_transactions(self):\n self._update()\n with self.added_tx_lock:\n added_tx_copy = copy.deepcopy(self._added_transactions)\n return added_tx_copy",
"def fetch_transactions(self, address, startblock=None, endblock=None, simplify=True, verbose=False):\n all_transactions = []\n while True:\n transactions = self.fetch_transactions_in_range(address, startblock, endblock)\n try:\n if simplify:\n transactions = list(map(simplify_tx, transactions))\n except TypeError:\n print('error', address, 'start block', startblock, 'end block', endblock, 'transactions', transactions)\n all_transactions.extend(transactions)\n if verbose:\n print('fetching block', startblock, 'total transactions', len(all_transactions))\n if len(transactions) < 1000:\n break\n # do not incremement the block, in case there are multiple transactions in one block\n # but spread across paginated results. we dedupe later.\n startblock = int(transactions[-1]['blockNumber'])\n return all_transactions",
"def _merge_block(internal_transactions, transactions, whitelist):\n transactions_by_id = {\n (transaction[\"hash\"], transaction[\"blockHash\"]): transaction\n for transaction in transactions\n }\n for transaction in internal_transactions:\n hash = transaction[\"transactionHash\"]\n block = transaction[\"blockHash\"]\n if (hash, block) in transactions_by_id:\n whitelisted_fields = {\n key: value\n for key, value in transactions_by_id[(hash, block)].items()\n if key in whitelist\n }\n transaction.update(whitelisted_fields)\n del transactions_by_id[(hash, block)]\n return internal_transactions"
] | [
"0.7925975",
"0.6223888",
"0.61566186",
"0.60971904",
"0.5936462",
"0.58497435",
"0.58128",
"0.57948434",
"0.57376546",
"0.5634292",
"0.5595399",
"0.5577145",
"0.55244076",
"0.54729325",
"0.5469587",
"0.54539764",
"0.5448407",
"0.5412813",
"0.5397462",
"0.53824824",
"0.5359702",
"0.5350589",
"0.53419816",
"0.53343654",
"0.53244734",
"0.5311953",
"0.52994925",
"0.5293144",
"0.5278731",
"0.52743715"
] | 0.79335445 | 0 |
Caches the transactions list. Cache the inputs and outputs which belong to the user's wallet for each `raw_transaction` | def cache_txs(self, raw_txs):
# Get the cached `raw_transactions` dict (txid -> tx) as a list of txs
transactions = list(sorted(raw_txs.values(), key = lambda tx: tx['time'], reverse=True))
result = []
# If unconfirmed transactions were mined, assign them their block height
if len(self.cache["raw_tx_block_update"]) > 0:
for i in range(0, len(self.cache["transactions"])):
if self.cache["transactions"][i]["txid"] in cache[self.walletname]["raw_tx_block_update"]:
cache[self.walletname]["transactions"][i]["block_height"] = cache[self.walletname]["raw_tx_block_update"][cache[self.walletname]["transactions"][i]["txid"]]
cache[self.walletname]["raw_tx_block_update"] = {}
# If the `raw_transactions` did not change - exit here.
if not self.cache["tx_changed"]:
return self.cache["transactions"]
# Loop through the raw_transactions list
for i, tx in enumerate(transactions):
# If tx is a user generated one (categories: `send`/ `receive`) and not coinbase (categories: `generated`/ `immature`)
if tx["category"] == "send" or tx["category"] == "receive":
is_send = True
is_self = True
# Check if the transaction is a `send` or not (if all inputs belong to the wallet)
if len(tx["from"]) == 0:
is_send = False
for fromdata in tx["from"]:
if not fromdata["internal"]:
is_send = False
# Check if the transaction is a `self-transfer` (if all inputs and all outputs belong to the wallet)
for to in tx["to"]:
if not is_send or not to["internal"]:
is_self = False
break
tx["is_self"] = is_self
if not is_send or is_self:
for to in tx["to"]:
if to["internal"]:
# Cache received outputs
result.append(self.prepare_tx(tx, to, "receive", destination=None, is_change=(to["address"] in self.change_addresses)))
if is_send or is_self:
destination = None
for to in tx["to"]:
if to["address"] in self.change_addresses and not is_self:
# Cache change output
result.append(self.prepare_tx(tx, to, "receive", destination=destination, is_change=True))
elif not to["internal"] or (is_self and to["address"] not in self.change_addresses):
destination = to
for fromdata in tx["from"]:
# Cache sent inputs
result.append(self.prepare_tx(tx, fromdata, "send", destination=destination))
else:
tx["is_self"] = False
# Cache coinbase output
result.append(self.prepare_tx(tx, tx["to"][0], tx["category"]))
# Save the result to the cache
cache[self.walletname]["transactions"] = result
return self.cache["transactions"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cache_raw_txs(self, cli_txs): \n # Get list of all tx ids\n txids = list(dict.fromkeys(cli_txs.keys()))\n tx_count = len(txids)\n\n # If there are new transactions (if the transations count changed)\n if tx_count != self.cache[\"tx_count\"]:\n for txid in txids:\n # Cache each tx, if not already cached.\n # Data is immutable (unless reorg occurs) and can be saved in a file for permanent caching\n if txid not in self.cache[\"raw_transactions\"]:\n # Call Bitcoin Core to get the \"raw\" transaction - allows to read detailed inputs and outputs\n raw_tx_hex = self.cli.gettransaction(txid)[\"hex\"]\n raw_tx = self.cli.decoderawtransaction(raw_tx_hex)\n # Some data (like fee and category, and when unconfirmed also time) available from the `listtransactions`\n # command is not available in the `getrawtransacion` - so add it \"manually\" here.\n if \"fee\" in cli_txs[txid]:\n raw_tx[\"fee\"] = cli_txs[txid][\"fee\"]\n if \"category\" in cli_txs[txid]:\n raw_tx[\"category\"] = cli_txs[txid][\"category\"]\n if \"time\" in cli_txs[txid]:\n raw_tx[\"time\"] = cli_txs[txid][\"time\"]\n\n if \"blockhash\" in cli_txs[txid]:\n raw_tx[\"block_height\"] = self.cli.getblockheader(cli_txs[txid][\"blockhash\"])[\"height\"]\n else:\n raw_tx[\"block_height\"] = -1\n\n # Loop on the transaction's inputs\n # If not a coinbase transaction:\n # Get the the output data corresponding to the input (that is: input_txid[output_index])\n tx_ins = []\n for vin in raw_tx[\"vin\"]:\n # If the tx is a coinbase tx - set `coinbase` to True\n if \"coinbase\" in vin:\n raw_tx[\"coinbase\"] = True\n break\n # If the tx is a coinbase tx - set `coinbase` to True\n vin_txid = vin[\"txid\"]\n vin_vout = vin[\"vout\"]\n try:\n raw_tx_hex = self.cli.gettransaction(vin_txid)[\"hex\"]\n tx_in = self.cli.decoderawtransaction(raw_tx_hex)[\"vout\"][vin_vout]\n tx_in[\"txid\"] = vin[\"txid\"]\n tx_ins.append(tx_in)\n except:\n pass\n # For each output in the tx_ins list (the tx inputs in their output \"format\")\n # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is).\n raw_tx[\"from\"] = [{\n \"address\": out[\"scriptPubKey\"][\"addresses\"][0],\n \"amount\": out[\"value\"],\n \"internal\": out[\"scriptPubKey\"][\"addresses\"][0] in self.wallet_addresses\n } for out in tx_ins]\n # For each output in the tx (`vout`)\n # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is).\n raw_tx[\"to\"] = [({\n \"address\": out[\"scriptPubKey\"][\"addresses\"][0],\n \"amount\": out[\"value\"],\n \"internal\": out[\"scriptPubKey\"][\"addresses\"][0] in self.wallet_addresses\n }) for out in raw_tx[\"vout\"] if \"addresses\" in out[\"scriptPubKey\"]]\n # Save the raw_transaction to the cache\n cache[self.walletname][\"raw_transactions\"][txid] = raw_tx\n # Set the tx count to avoid unnecessary indexing\n cache[self.walletname][\"tx_count\"] = tx_count\n # Set the tx changed to indicate the there are new transactions to cache\n cache[self.walletname][\"tx_changed\"] = True\n else:\n # Set the tx changed to False to avoid unnecessary indexing\n cache[self.walletname][\"tx_changed\"] = False\n\n # If unconfirmed transactions were mined, assign them their block height\n blocks = self.cli.getblockcount()\n if blocks != self.cache[\"last_block\"]:\n for txid in self.cache[\"raw_transactions\"]:\n if self.cache[\"raw_transactions\"][txid][\"block_height\"] == -1 and \"blockhash\" in cli_txs[txid]:\n height = self.cli.getblockheader(cli_txs[txid][\"blockhash\"])[\"height\"]\n cache[self.walletname][\"raw_transactions\"][txid][\"block_height\"] = height\n cache[self.walletname][\"raw_tx_block_update\"][txid] = height\n cache[self.walletname][\"last_block\"] = blocks\n\n return self.cache[\"raw_transactions\"]",
"def setup_cache(self):\n if self.walletname not in cache: \n cache[self.walletname] = {\n \"raw_transactions\": {},\n \"transactions\": [],\n \"tx_count\": None,\n \"tx_changed\": True,\n \"last_block\": None,\n \"raw_tx_block_update\": {},\n \"addresses\": [],\n \"change_addresses\": [],\n \"scan_addresses\": True\n }",
"def request_transactions(self, blockchain):\n excludes_list, balance_dict = list(), dict()\n print(\"Requesting transactions to %s...\" % self.url)\n while len(self.transactions) < Miner.TX_PER_BLOCK:\n transaction = self.get_transaction(excludes_list)\n if transaction:\n verif = transaction.verify_signature()\n print(\"Verifying signature of TX %s: %s\"\n % (transaction.hash, verif))\n if verif:\n balance_keys = balance_dict.keys()\n sender = Address.generate_address(transaction.sender_public_key)\n receiver, amount = transaction.receiver, transaction.amount\n if not (sender in balance_keys):\n balance_dict[sender] = blockchain.get_balance(sender)\n if not (receiver in balance_keys):\n balance_dict[receiver] = blockchain.get_balance(receiver)\n hasEnoughBalance = self.sender_has_enough_balance(sender, amount, balance_dict)\n print(\"In TX %s sender has enough balance: %s\" % (transaction.hash, hasEnoughBalance))\n if hasEnoughBalance:\n balance_dict[sender] -= transaction.amount\n balance_dict[receiver] += transaction.amount\n self.add_transaction(transaction)\n\n print(\"Excluding TX: %s\" % transaction.hash)\n excludes_list.append(transaction.hash)\n print(\"Received %s transactions\" % Miner.TX_PER_BLOCK)",
"def load_transactions(self, address, update=True, verbose=False, **kwargs):\n if self.apikey is None:\n update = False\n if verbose:\n print('load_transactions', address)\n fn = os.path.join(self.cache_dir, address + '.json')\n startblock = None\n transactions = []\n if os.path.exists(fn):\n with open(fn) as f:\n try:\n transactions = json.load(f)\n except json.decoder.JSONDecodeError:\n if verbose:\n print('ignoring error while loading', fn)\n pass\n if not update:\n return transactions\n if len(transactions):\n startblock = max([int(e['blockNumber']) for e in transactions])\n if verbose:\n print('starting from cache at', startblock, 'with', len(transactions))\n # add new transactions\n new_transactions = self.fetch_transactions(address, startblock=startblock, verbose=verbose, **kwargs)\n # dedupe\n if len(new_transactions) > 0:\n transactions.extend(new_transactions)\n transactions = list({e['hash']:e for e in transactions}.values())\n safe_dump(fn, transactions)\n return transactions",
"async def check_transaction_receipts(self):\n async_scheduler: AsyncCallScheduler = AsyncCallScheduler.shared_instance()\n tasks = [self._check_transaction_receipt(tx_hash, self._pending_tx_dict[tx_hash]['timestamp'])\n for tx_hash in self._pending_tx_dict.keys()]\n transaction_receipts: List[AttributeDict] = [tr for tr in await safe_gather(*tasks)\n if (tr is not None and tr.get(\"blockHash\") is not None)]\n block_hash_set: Set[HexBytes] = set(tr.blockHash for tr in transaction_receipts)\n fetch_block_tasks = [async_scheduler.call_async(self._w3.eth.getBlock, block_hash)\n for block_hash in block_hash_set]\n blocks: Dict[HexBytes, AttributeDict] = dict((block.hash, block)\n for block\n in await safe_gather(*fetch_block_tasks)\n if block is not None)\n\n for receipt in transaction_receipts:\n # Emit gas used event.\n tx_hash: str = receipt.transactionHash.hex()\n gas_price_wei: int = self._pending_tx_dict[tx_hash]['gas_price']\n gas_used: int = receipt.gasUsed\n gas_eth_amount_raw: int = gas_price_wei * gas_used\n\n if receipt.blockHash in blocks:\n block: AttributeDict = blocks[receipt.blockHash]\n\n if receipt.status == 0:\n self.logger().warning(f\"The transaction {tx_hash} has failed.\")\n self.trigger_event(WalletEvent.TransactionFailure, tx_hash)\n\n self.trigger_event(WalletEvent.GasUsed, EthereumGasUsedEvent(\n float(block.timestamp),\n tx_hash,\n float(gas_price_wei * 1e-9),\n gas_price_wei,\n gas_used,\n float(gas_eth_amount_raw * 1e-18),\n gas_eth_amount_raw\n ))\n\n # Stop tracking the transaction.\n self._stop_tx_tracking(tx_hash)",
"def transactions(self):\n return copy.deepcopy(self._transactions)",
"def all_transactions(self):\n self._update()\n with self.all_tx_lock:\n all_tx_copy = copy.deepcopy(self._all_transactions)\n return all_tx_copy",
"def test_wallets_get_transaction_list(self):\n pass",
"def _save_transactions(self):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\twith open(self._state_file, 'wb') as tmp:\r\n\t\t\tlogger.debug(\"Dumping transactions: %r\" % self.transactions)\r\n\t\t\tpickle.dump(self.transactions, tmp)\r\n\t\t\r\n\t\tlogger.debug(\"Exit\")",
"def __init__(self):\n self.transaction_index = {}\n self.transaction_list = []",
"def apply_transactions(\n self, transactions: List[TransactionMessage]\n ) -> \"OwnershipState\":\n new_state = copy.copy(self)\n for tx_message in transactions:\n new_state._update(tx_message)\n\n return new_state",
"def process_transaction(self, transaction):\n instrument = transaction.instrument\n if isinstance(instrument, Future):\n try:\n old_price = self._payout_last_sale_prices[instrument]\n except KeyError:\n self._payout_last_sale_prices[instrument] = transaction.price\n else:\n position = self.position_tracker.positions[instrument]\n amount = position.amount\n price = transaction.price\n\n self._cash_flow(\n self._calculate_payout(\n instrument.multiplier,\n amount,\n old_price,\n price,\n ),\n )\n\n if amount + transaction.amount == 0:\n del self._payout_last_sale_prices[instrument]\n else:\n self._payout_last_sale_prices[instrument] = price\n else:\n self._cash_flow(-(transaction.price * transaction.amount))\n\n self.position_tracker.execute_transaction(transaction)\n\n # we only ever want the dict form from now on\n transaction_dict = transaction.to_dict()\n try:\n self._processed_transactions[transaction.dt].append(\n transaction_dict,\n )\n except KeyError:\n self._processed_transactions[transaction.dt] = [transaction_dict]",
"def fundrawtransaction(self, given_transaction, *args, **kwargs):\n # just use any txid here\n vintxid = lx(\"99264749804159db1e342a0c8aa3279f6ef4031872051a1e52fb302e51061bef\")\n\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to fundrawtransaction.\")\n\n # this is also a clever way to not cause a side-effect in this function\n transaction = CMutableTransaction.deserialize(given_bytes)\n\n for vout_counter in range(0, self._num_fundrawtransaction_inputs):\n txin = CMutableTxIn(COutPoint(vintxid, vout_counter))\n transaction.vin.append(txin)\n\n # also allocate a single output (for change)\n txout = make_txout()\n transaction.vout.append(txout)\n\n transaction_hex = b2x(transaction.serialize())\n\n return {\"hex\": transaction_hex, \"fee\": 5000000}",
"def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions",
"def lock(self):\n self.words = None\n self.keys = {}\n self.passphrase = b''\n self.language = ''\n self.unspent_txs = {}\n self.spent_txs = []\n self.balance = 0\n self.last_shared_index = 0\n self.last_generated_index = 0",
"def _get_all_transactions(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError",
"def transactions(self):\r\n return tx.AccountTransactions(self)",
"def mine_transactions(self, address):\n transaction = Transaction(walletoffrom=None, walletofto=address, amount=self.reward)\n self.current_transactions.append(transaction)\n\n block = Block(target=self.target, transactions=self.current_transactions, previoushash=self.last_block().__hash__())\n\n\n self.chain.append(block)\n self.current_transactions = []",
"def get_transaction_data():\n data = parse_json()\n income_instances = create_transactions(data['incomes'])\n expense_instances = create_transactions(data['expenses'])\n for expense in expense_instances:\n expense.amount = -(expense.amount)\n transactions = income_instances + expense_instances\n return transactions",
"def checks(transactions):\n txs = transactions.values_list('to_address', flat=True)\n addrs = ' '.join([tx for tx in txs if tx])\n r = requests.post(\"https://www.blockonomics.co/api/searchhistory\",\n data=json.dumps({\"addr\": addrs}))\n\n try:\n history_data = json.loads(r.content.decode('utf-8'))['history']\n except:\n [blockchain_set_tx_detail(transaction) for transaction in transactions]\n\n [set_tx_details(history_data, transaction) for transaction in transactions]",
"def __init__(self):\n self.chain = {}\n self.blocks = {}\n self.blocks_spending_input = {}\n self.blocks_containing_tx = {}\n self.all_transactions = {}",
"def update_txs(self, txs):\n # For now avoid caching orphan transactions. We might want to show them somehow in the future.\n cli_txs = {tx[\"txid\"]: tx for tx in txs if tx[\"category\"] != \"orphan\"}\n raw_txs = self.cache_raw_txs(cli_txs)\n cached_txs = self.cache_txs(raw_txs)\n\n return cached_txs",
"def transactions(self, transactions: list):\n num_txs = len(transactions)\n transactions_size = num_txs * self._message_size['tx']\n return {\n 'id': 'transactions',\n 'transactions': transactions,\n 'size': kB_to_MB(transactions_size)\n }",
"def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)",
"def _update_executed(self, tx: BaseTransaction) -> None:\n tx_meta = tx.get_metadata()\n assert tx.hash is not None\n assert not tx_meta.voided_by\n log = self.log.new(tx=tx.hash_hex)\n log.debug('update executed')\n # remove all inputs\n for tx_input in tx.inputs:\n spent_tx = tx.get_spent_tx(tx_input)\n spent_tx_output = spent_tx.outputs[tx_input.index]\n log_it = log.new(tx_id=spent_tx.hash_hex, index=tx_input.index)\n if _should_skip_output(spent_tx_output):\n log_it.debug('ignore input')\n continue\n log_it.debug('remove output that became spent')\n self._remove_utxo(UtxoIndexItem.from_tx_output(spent_tx, tx_input.index, spent_tx_output))\n # add outputs that aren't spent\n for index, tx_output in enumerate(tx.outputs):\n log_it = log.new(index=index)\n if _should_skip_output(tx_output):\n log_it.debug('ignore output')\n continue\n spent_by = tx_meta.get_output_spent_by(index)\n if spent_by is not None:\n log_it.debug('do not add output that is spent', spent_by=spent_by.hex())\n continue\n log_it.debug('add new unspent output')\n self._add_utxo(UtxoIndexItem.from_tx_output(tx, index, tx_output))",
"def _save_miner_transactions(self, blocks_traces):\n docs = [self._preprocess_internal_transaction(transaction) for transaction in blocks_traces if\n not transaction[\"transactionHash\"]]\n self.client.bulk_index(docs=docs, index=self.indices[\"miner_transaction\"], doc_type=\"tx\", id_field=\"hash\",\n refresh=True)",
"def added_transactions(self):\n self._update()\n with self.added_tx_lock:\n added_tx_copy = copy.deepcopy(self._added_transactions)\n return added_tx_copy",
"def filter_unspent_outputs(our_outputs, transactions):\n\n unspent_outputs = our_outputs.copy()\n for tx_id, tx in transactions.items():\n tx_inputs = tx[\"vin\"]\n for tx_input in tx_inputs:\n # ID of output spent by this input.\n spent_outpoint = \"{}:{}\".format(tx_input[\"txid\"], tx_input[\"vout\"])\n if spent_outpoint in our_outputs:\n del unspent_outputs[spent_outpoint]\n return unspent_outputs",
"def prepare_raw_tx(self, mn_address, change_address, inputs, total, fee=0.00001):\n raw_tx = {mn_address: self.send_amount, change_address: total - self.send_amount - fee}\n return self.rpc.createrawtransaction(inputs, raw_tx)",
"def _save_internal_transactions(self, blocks_traces):\n docs = [\n self._preprocess_internal_transaction(transaction)\n for transaction in blocks_traces\n if transaction[\"transactionHash\"]\n ]\n if docs:\n for chunk in bulk_chunks(docs, None, BYTES_PER_CHUNK):\n self.client.bulk_index(docs=chunk, index=self.indices[\"internal_transaction\"], doc_type=\"itx\",\n id_field=\"hash\", refresh=True)"
] | [
"0.76756275",
"0.6882982",
"0.6274878",
"0.61497986",
"0.58140624",
"0.57319975",
"0.5679993",
"0.5659589",
"0.5585741",
"0.55252254",
"0.55109656",
"0.5479615",
"0.5473953",
"0.546898",
"0.5449675",
"0.54464066",
"0.542897",
"0.54221237",
"0.5411895",
"0.5377797",
"0.5366215",
"0.5365713",
"0.53612506",
"0.53462315",
"0.53100985",
"0.53091353",
"0.5277577",
"0.5246816",
"0.52442926",
"0.5243285"
] | 0.7736577 | 0 |
This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. | def hide_fields_in_newer_versions(obj):
if not api_utils.allow_start_end_audit_time():
obj.start_time = wtypes.Unset
obj.end_time = wtypes.Unset
if not api_utils.allow_force():
obj.force = wtypes.Unset | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']",
"def remove_read_only_fields(self):\n self.fields = XML_List(Elements.FIELDS, [field for field in self.fields if\n not field.read_only or not str_to_bool(field.read_only)])",
"def remove_access_request_field(self):\n self.fields = XML_List(Elements.FIELDS, [field for field in self.fields if\n field.FIELD_CONTENT_ATTRIBUTES != Elements.ACCESS_REQUESTS])",
"def get_fields(self, exclude=('id',)):\n fields = {}\n for field in self._meta.fields:\n if not field.name in exclude and getattr(self, field.name):\n fields[field.name] = getattr(self, field.name)\n return fields",
"def hide_confidential_fields(record, fields=_CONFIDENTIAL_FIELDS):\n if not(isinstance(record, dict) and fields):\n return record\n\n keys = list(record.keys())\n keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))\n\n return merge_dicts(record, {k: '********' for k in keys if record[k]})",
"def non_editable_metadata_fields(self):\r\n # We are not allowing editing of xblock tag and name fields at this time (for any component).\r\n return [XBlock.tags, XBlock.name]",
"def fields(self):\n ...",
"def fields(self):\r\n pass",
"def test_dont_show_hidden_fields(self):\n class ExampleSerializer(serializers.Serializer):\n integer_field = serializers.IntegerField(max_value=10)\n hidden_field = serializers.HiddenField(default=1)\n\n class ExampleView(views.APIView):\n \"\"\"Example view.\"\"\"\n def post(self, request):\n pass\n\n def get_serializer(self):\n return ExampleSerializer()\n\n view = ExampleView.as_view()\n response = view(request=request)\n assert response.status_code == status.HTTP_200_OK\n assert set(response.data['actions']['POST'].keys()) == {'integer_field'}",
"def test_extra_field_when_not_requested(self):\n self.client.login(username=self.admin_user.username, password='test')\n response = self.verify_response(params={\n 'all_blocks': True,\n 'requested_fields': ['course_visibility'],\n })\n self.verify_response_block_dict(response)\n for block_data in response.data['blocks'].values():\n assert 'other_course_settings' not in block_data\n\n self.assert_in_iff(\n 'course_visibility',\n block_data,\n block_data['type'] == 'course'\n )",
"def data_without(self, fields):\n without = {}\n data = json.loads(self.data())\n for field, value in data.items():\n if field not in fields:\n without[field] = value\n return json.dumps(without)",
"def json_ignore_attrs():\n return ['metadata']",
"def raw_fields(self):\n pass",
"def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .identity_set import IdentitySet\n from .notebook import Notebook\n from .onenote_entity_schema_object_model import OnenoteEntitySchemaObjectModel\n from .onenote_section import OnenoteSection\n from .section_group import SectionGroup\n\n from .identity_set import IdentitySet\n from .notebook import Notebook\n from .onenote_entity_schema_object_model import OnenoteEntitySchemaObjectModel\n from .onenote_section import OnenoteSection\n from .section_group import SectionGroup\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"createdBy\": lambda n : setattr(self, 'created_by', n.get_object_value(IdentitySet)),\n \"displayName\": lambda n : setattr(self, 'display_name', n.get_str_value()),\n \"lastModifiedBy\": lambda n : setattr(self, 'last_modified_by', n.get_object_value(IdentitySet)),\n \"lastModifiedDateTime\": lambda n : setattr(self, 'last_modified_date_time', n.get_datetime_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields",
"def get_fields(self):\n fields = super().get_fields()\n fields['children'] = ForumListSerializer(read_only=True, many=True)\n return fields",
"def only(self, *fields):\n for field in fields:\n path = \".\".join(self.document._meta.resolve_subfield_hierarchy(field.split(\".\")))\n self._only_fields.add(path)\n if self.query._Cursor__fields is None:\n # Identifier and version fields must always be included\n self.query._Cursor__fields = { \"_id\" : 1, \"_version\" : 1 }\n\n self.query._Cursor__fields.update({ path : 1 })\n\n return self",
"def strip_useless_attributes(self):\n graph_dict = self.graph.graph\n if \"node\" in graph_dict and \"label\" in graph_dict[\"node\"]:\n graph_dict[\"node\"].pop(\"label\")\n if \"graph\" in graph_dict:\n graph_dict.pop(\"graph\")",
"def get_readonly_fields(self, request, obj=None):\n if obj and obj.cwr:\n return (\n 'nwr_rev', 'description', 'works', 'filename', 'view_link',\n 'download_link')\n else:\n return ()",
"def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .entity import Entity\n from .install_state import InstallState\n\n from .entity import Entity\n from .install_state import InstallState\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"deviceId\": lambda n : setattr(self, 'device_id', n.get_str_value()),\n \"deviceName\": lambda n : setattr(self, 'device_name', n.get_str_value()),\n \"errorCode\": lambda n : setattr(self, 'error_code', n.get_str_value()),\n \"installState\": lambda n : setattr(self, 'install_state', n.get_enum_value(InstallState)),\n \"lastSyncDateTime\": lambda n : setattr(self, 'last_sync_date_time', n.get_datetime_value()),\n \"osDescription\": lambda n : setattr(self, 'os_description', n.get_str_value()),\n \"osVersion\": lambda n : setattr(self, 'os_version', n.get_str_value()),\n \"userName\": lambda n : setattr(self, 'user_name', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields",
"def fields(self):",
"def _fields(self):\n fields = [(\"serial\", self.serial), (\"active\", str(self.active)),\n (\"name\", self.name), (\"version\", self.version),\n (\"auto_update\", str(self.auto_update)),\n (\"new_version_available\", str(self.new_version_available)),\n (\"product_type\", self.product_type),\n (\"network_device\", str(self.network_device))]\n return fields",
"def fields(self, update: bool = False):\n if self.__fields is None or update:\n self.__fields = lib_fields(self)\n return self.__fields",
"def data_only(self, fields):\n only = {}\n data = json.loads(self.data())\n for field, value in data.items():\n if field in fields:\n only[field] = value\n return json.dumps(only)",
"def get_readonly_fields(self, request, obj=None):\n if obj and obj.source == DigitizedWork.HATHI:\n return self.hathi_readonly_fields + self.readonly_fields\n return self.readonly_fields",
"def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n fields: Dict[str, Callable[[Any], None]] = {\n \"assignedDateTime\": lambda n : setattr(self, 'assigned_date_time', n.get_datetime_value()),\n \"capabilityStatus\": lambda n : setattr(self, 'capability_status', n.get_str_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"service\": lambda n : setattr(self, 'service', n.get_str_value()),\n \"servicePlanId\": lambda n : setattr(self, 'service_plan_id', n.get_uuid_value()),\n }\n return fields",
"def get_readonly_fields(self, request, obj):\n # FIXME(matzf) conceptually, an AS can change the ISD. Not allowed for now\n # as I anticipate this may unnecessarily complicate the TRC/certificate\n # update logic. Should be revisited.\n # TODO(matzf): Changing is_core should also be possible, not yet implemented\n # Requires removing core links etc, bump signed certificates\n if obj:\n return ('isd', 'is_core', 'as_id',)\n return ()",
"def remove_all_fields(self):\n self.fields = None",
"def get_readonly_fields(self, request, obj):\n self.request = request\n # fields that won't be editable. Just remove one to make it editable\n readonly_fields = ('git_username','git_name','repo_synced','last_compiled','provider','site_url_long','build_url_long','slug')\n if obj:\n readonly_fields = ('git_url',)+readonly_fields\n return readonly_fields\n #return super(RepositoryAdmin, self).get_readonly_fields(request, obj)",
"def allow_version_invalid_attributes(self):\n return self._allow_version_invalid_attributes",
"def excludeObsolete(self) -> 'ElementsRequestBuilder':\n ..."
] | [
"0.63221574",
"0.6136071",
"0.60758066",
"0.5660327",
"0.560701",
"0.5569673",
"0.55660045",
"0.55565786",
"0.55165595",
"0.54647285",
"0.54489416",
"0.54486406",
"0.54375917",
"0.5407587",
"0.54063267",
"0.5390326",
"0.53891975",
"0.53873795",
"0.5364011",
"0.5361968",
"0.5360927",
"0.5316059",
"0.53085107",
"0.5303084",
"0.5287476",
"0.52569544",
"0.5252061",
"0.52468055",
"0.523562",
"0.52254194"
] | 0.6415202 | 0 |
Retrieve information about the given audit. | def get_one(self, audit):
if self.from_audits:
raise exception.OperationNotPermitted
context = pecan.request.context
rpc_audit = api_utils.get_resource('Audit', audit)
policy.enforce(context, 'audit:get', rpc_audit, action='audit:get')
return Audit.convert_with_links(rpc_audit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=True, withScans=True)\n return audit",
"async def getAudit(self, auditid) -> GetAuditResponse:\n\n print(\"get audit 1\" + auditid)\n res = await self.stub.GetAudit(\n GetAuditRequest(_id=auditid\n ))\n print(res.status, res.message, res.audit)\n return res",
"def get_audit(self, query, session):\n raise NotImplementedError()",
"def get_auditlog_entry_report_status(session):\n\n url = session.get_url('audit', 'main')\n\n req = re.Request('GET', url)\n\n return session.send_recv(req, 'Obtained audit log entry report status.')",
"def get(self, audit_uuid):\n\n schema = AuditDownloadInputSchema()\n params, errors = schema.load(request.args)\n if errors:\n abort(400, errors)\n\n audit_query = AuditTable.select().where(AuditTable.uuid == audit_uuid)\n\n audit = audit_query.dicts()[0]\n output = audit[\"name\"] + \"\\n\" + audit[\"description\"] + \"\\n\\n\"\n\n scan_ids = []\n for scan in audit_query[0].scans.dicts():\n if scan[\"processed\"] is True:\n scan_ids.append(scan[\"id\"])\n\n results = (\n ResultTable.select(ResultTable, ScanTable, VulnTable)\n .join(ScanTable)\n .join(VulnTable, on=(ResultTable.oid == VulnTable.oid))\n .where(ResultTable.scan_id.in_(scan_ids))\n .order_by(ResultTable.scan_id)\n )\n\n with tempfile.TemporaryFile(\"r+\") as f:\n writer = csv.DictWriter(f, AuditDownload.AUDIT_CSV_COLUMNS, extrasaction=\"ignore\")\n writer.writeheader()\n for result in results.dicts():\n result[\"started_at\"] = result[\"started_at\"] + timedelta(minutes=params[\"tz_offset\"])\n result[\"ended_at\"] = result[\"ended_at\"] + timedelta(minutes=params[\"tz_offset\"])\n result[\"description\"] = Utils.format_openvas_description(result[\"description\"])\n writer.writerow(result)\n f.flush()\n f.seek(0)\n output += f.read()\n\n headers = {\"Content-Type\": \"text/csv\", \"Content-Disposition\": \"attachment\"}\n return Response(response=output, status=200, headers=headers)",
"def get_test_audit(context, **kw):\n obj_cls = objects.Audit\n db_data = db_utils.get_test_audit(**kw)\n obj_data = _load_related_objects(context, obj_cls, db_data)\n\n return _load_test_obj(context, obj_cls, obj_data, **kw)",
"def test_get(self, init_db, audit):\n assert Audit.get(audit.id) == audit",
"def get_auditlogs(self):\n res = self.get_object(\"/integrationServices/v3/auditlogs\")\n return res.get(\"notifications\", [])",
"def get(self):\n path = 'auditlogEntryReport'\n # status complete\n # download\n return self._session.get(path)",
"def dwl_auditlog_entry_report(session):\n url = session.get_url('audit', 'dwl')\n\n req = re.Request('GET', url)\n\n return session.send_recv(req, 'Audit log entry report downloaded.')",
"def detail(self, goal=None, marker=None, limit=None,\n sort_key='id', sort_dir='asc'):\n context = pecan.request.context\n policy.enforce(context, 'audit:detail',\n action='audit:detail')\n # NOTE(lucasagomes): /detail should only work agaist collections\n parent = pecan.request.path.split('/')[:-1][-1]\n if parent != \"audits\":\n raise exception.HTTPNotFound\n\n expand = True\n resource_url = '/'.join(['audits', 'detail'])\n return self._get_audits_collection(marker, limit,\n sort_key, sort_dir, expand,\n resource_url,\n goal=goal)",
"def source_audit(self) -> SourceAudit:\n return self._source_audit",
"def get_order_audit_trail(order_guid):\n return linnapi.orders.get_processed_order_audit_trail(order_guid)",
"def test_audit_log_view(self):\n initial_datetime = now()\n with reversion.create_revision():\n company = CompanyFactory(\n description='Initial desc',\n )\n\n reversion.set_comment('Initial')\n reversion.set_date_created(initial_datetime)\n reversion.set_user(self.user)\n\n changed_datetime = now()\n with reversion.create_revision():\n company.description = 'New desc'\n company.save()\n\n reversion.set_comment('Changed')\n reversion.set_date_created(changed_datetime)\n reversion.set_user(self.user)\n\n versions = Version.objects.get_for_object(company)\n version_id = versions[0].id\n url = reverse('api-v4:company:audit-item', kwargs={'pk': company.pk})\n\n response = self.api_client.get(url)\n response_data = response.json()['results']\n\n # No need to test the whole response\n assert len(response_data) == 1\n entry = response_data[0]\n\n assert entry['id'] == version_id\n assert entry['user']['name'] == self.user.name\n assert entry['comment'] == 'Changed'\n assert entry['timestamp'] == format_date_or_datetime(changed_datetime)\n assert entry['changes']['description'] == ['Initial desc', 'New desc']\n assert not set(EXCLUDED_BASE_MODEL_FIELDS) & entry['changes'].keys()",
"def func(self):\n char = self.character\n # cmd = self.cmdstring\n loc = char.location\n # account = self.account\n args = self.args\n # lhs, rhs = self.lhs, self.rhs\n # opt = self.switches\n obj_list = char.search(args, quiet=True, candidates=[loc] + loc.contents + char.contents) if args else [char]\n if not obj_list:\n _AT_SEARCH_RESULT(obj_list, char, args, quiet=False)\n return # Trying to audit something that isn't there. \"Could not find ''.\"\n obj = obj_list[0]\n obj_name = obj.get_display_name(char)\n hosted = obj.db.hosted\n if hosted:\n import time\n from evennia.utils import utils, evtable\n now = int(time.time())\n table = evtable.EvTable(border='none', pad_width=0, border_width=0, maxwidth=92)\n table.add_header(obj_name, '|wTimes', '|cLast', '|gFrom')\n table.reformat_column(0, width=25, align='l')\n table.reformat_column(1, width=7, align='c')\n table.reformat_column(2, width=35, align='l')\n table.reformat_column(3, width=25, pad_right=1, align='l')\n for each in hosted:\n delta_t = now - hosted[each][0]\n v_name = each.get_display_name(char)\n v_count = hosted[each][2]\n from_name = hosted[each][1].get_display_name(char) if hosted[each][1] else '|where|n'\n table.add_row(v_name, v_count, utils.time_format(delta_t, 2), from_name)\n self.msg('[begin] Audit showing visits to:')\n self.msg(str(table))\n self.msg('[end] Audit of {}'.format(obj_name))\n else:\n self.msg('No audit information for {}.'.format(obj_name))",
"def getInfo(notification):",
"async def view_audit_actions(self, ctx: Context) -> None:\n\n assert ctx.guild is not None # handle by `cog_check`\n\n if logging_info := (await typed_retrieve_query(\n self.bot.database,\n int,\n 'SELECT BITS FROM LOGGING WHERE GUILD_ID=?',\n (ctx.guild.id,))\n ):\n await ctx.send(embed=build_actions_embed(LoggingActions.all_enabled_actions((logging_info[0]))))\n else:\n await ctx.send('You must first set an audit channel before viewing audit actions.'\n '\\n_See `auditactions setchannel` for more information._')",
"def get_eventlogs_detail(self, conn, id):\n path = urlJoin(urls.EVENT_LOG[\"GET\"], id)\n resp = conn.command(apiMethod=\"GET\", apiPath=path)\n return resp",
"def getLog(self):\n \n return self.resp[\"log\"]",
"def audit_action(self):\n return self._audit_action",
"def update_audit_info(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n progress_controller.maximum = 2\n\n from stalker.db.session import DBSession\n from stalker import LocalSession\n\n with DBSession.no_autoflush:\n local_session = LocalSession()\n logged_in_user = local_session.logged_in_user\n progress_controller.increment()\n\n if logged_in_user:\n # update the version updated_by\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n if v:\n v.updated_by = logged_in_user\n\n from stalker.db.session import DBSession\n\n DBSession.commit()\n progress_controller.increment()\n progress_controller.complete()",
"def get_story_info(self, query):\n stories = self.get_stories(query)\n\n info = []\n\n for story in stories:\n story_info = {\n 'id': story['id'],\n 'name': story['name'],\n 'kind': story['kind'].capitalize(),\n 'state': story['current_state'],\n 'owner': self.get_person(story['owned_by_id'])['name'],\n 'pull': self.get_pull(story)\n }\n info.append(story_info)\n\n return info",
"def GetChangeDetail(host, change, o_params=None):\n path = '%s/detail' % _GetChangePath(change)\n if o_params:\n path = '%s?%s' % (path, '&'.join(['o=%s' % p for p in o_params]))\n return FetchUrlJson(host, path)",
"def audit(self, database=None):\n listOfErrors = []\n listOfWarnings = []\n\n for e in self.children:\n err, war = e.audit(database)\n listOfErrors += err\n listOfWarnings += war\n return listOfErrors, listOfWarnings",
"def get_entries(audit_id=None, start_time=None):\n al = []\n try:\n if start_time and audit_id:\n raise Exception('Incompatible parameters passed')\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n if audit_id:\n query = 'select * from audit where audit_id=\"%d\" order by audit_id desc' % int(\n audit_id)\n else:\n if not start_time:\n query = 'select * from audit order by audit_id desc'\n else:\n query = 'select * from audit where audit_time >= %d order by audit_id desc' % int(\n start_time)\n rows, err = db.get_multiple_rows(db_path, query)\n if err:\n raise Exception(err)\n if rows:\n for row in rows:\n audit_entry, err = _parse_audit_entry(row)\n if err:\n raise Exception(err)\n al.append(audit_entry)\n except Exception, e:\n return None, 'Error loading audit entries : %s' % str(e)\n else:\n return al, None",
"def getTenantAttributeUpdateAuditTrail(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_getAuditLogsWithNoParams(self):\r\n logs = self.client.getAuditLogs()\r\n return logs",
"def get_table_info_from_revision_record(revision_record):\n\n if (\"payload\" in revision_record) and \"tableInfo\" in revision_record[\"payload\"]:\n return revision_record[\"payload\"][\"tableInfo\"]",
"def audit(request):\n\tif request.method == 'POST':\n\t\tsearch_term = request.POST['search_term']\n\t\tsamples = Sample.objects.filter(Q(participant_id__contains=search_term) | \n\t\t\t\t\t\t\t\t\t\tQ(group_id__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(laboratory_sample_id__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(receiving_rack__receiving_rack_id__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(comment__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(issue_outcome__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(holding_rack_well__holding_rack__holding_rack_id__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(holding_rack_well__holding_rack__plate__plate_id__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(holding_rack_well__holding_rack__plate__gel_1008_csv__consignment_number__contains=search_term)).prefetch_related('receiving_rack', 'holding_rack_well', \n\t\t\t\t'holding_rack_well__holding_rack', 'holding_rack_well__holding_rack__plate', \n\t\t\t\t'holding_rack_well__holding_rack__plate__gel_1008_csv', \n\t\t\t\t'receiving_rack__gel_1004_csv', 'receiving_rack__gel_1004_csv__gel_1005_csv').order_by('-sample_received_datetime')[0:1000]\n\telse:\n\t\tsamples = None\n\treturn render(request, 'platerplotter/audit.html', {\"samples\" : samples})",
"def describe_audit_records(\n self,\n request: dds_20151201_models.DescribeAuditRecordsRequest,\n ) -> dds_20151201_models.DescribeAuditRecordsResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_audit_records_with_options(request, runtime)"
] | [
"0.7213119",
"0.7043593",
"0.66991794",
"0.63289493",
"0.6216085",
"0.62059367",
"0.6160432",
"0.58675766",
"0.5810578",
"0.56947064",
"0.5672697",
"0.5560825",
"0.55349076",
"0.5424039",
"0.541629",
"0.53783417",
"0.5374017",
"0.53407764",
"0.533264",
"0.53322256",
"0.5264904",
"0.5232477",
"0.51841307",
"0.5177964",
"0.51777893",
"0.5166521",
"0.51609564",
"0.5160911",
"0.51298845",
"0.5115037"
] | 0.7407178 | 0 |
Check request is authenticated. If API_AUTH_SECRET_HEADER_NAME is not in request headers then return 401. If API_AUTH_SECRET_HEADER_NAME is in request headers but incorrect then return 403. Else return none. | def is_authenticated_request(req: Request) -> Optional[Response]:
if API_AUTH_SECRET_HEADER_NAME not in req.headers:
return make_error_response(HTTP_STATUS_CODE.UNAUTHORIZED)
if req.headers[API_AUTH_SECRET_HEADER_NAME] != API_AUTH_SECRET:
return make_error_response(HTTP_STATUS_CODE.FORBIDDEN)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_authorization_header_not_present(self, get_key_secret):\r\n request = Request(self.environ)\r\n request.body = self.get_request_body()\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'failure',\r\n 'description': 'OAuth verification error: Malformed authorization header',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)",
"def test_authorization_header_not_present(self, _get_key_secret):\n request = Request(self.environ)\n request.body = self.get_request_body()\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': 'OAuth verification error: Malformed authorization header',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)",
"async def authorization(request):\n # Decode tokens, load/check users and etc\n # ...\n # in the example we just ensure that the authorization header exists\n return request.headers.get(\"authorization\", \"\")",
"def unauthorized():\n return HttpError(401)",
"def authorized(fn):\n\n def _wrap(*args, **kwargs):\n if 'Authorization' not in request.headers:\n # Unauthorized\n print(\"No token in header\")\n abort(401)\n\n\n if key not in request.headers['Authorization']:\n # Unauthorized\n print(\"Key not in auth header\")\n abort(401)\n\n return fn(*args, **kwargs)\n return _wrap",
"def check_authentication(self):\n try:\n cookies = os.environ['HTTP_COOKIE'].split('; ')\n except KeyError:\n cookies = []\n for c in cookies:\n prefix = Auth.AUTH_COOKIE_NAME + '='\n if (c.startswith(prefix) and\n self.is_authentication_token(c[len(prefix):])):\n return True\n print 'Status: 403 Forbidden'\n print 'Content-Type: application/json'\n print self.logout_headers()\n print json.JSONEncoder().encode({'error': 'Not authenticated.'})\n sys.exit(1)",
"def fusion_api_check_authorization(self, body=None, api=None, headers=None, sessionID=None):\n return self.auth.check(body=body, api=api, headers=headers, sessionID=sessionID)",
"def test_authorization_header_empty(self, get_key_secret):\r\n request = Request(self.environ)\r\n request.authorization = \"bad authorization header\"\r\n request.body = self.get_request_body()\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'failure',\r\n 'description': 'OAuth verification error: Malformed authorization header',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)",
"def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})",
"def authenticate_header(self, request):\n return \"Api key authentication failed.\"",
"def test_authorization_header_empty(self, _get_key_secret):\n request = Request(self.environ)\n request.authorization = \"bad authorization header\"\n request.body = self.get_request_body()\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': 'OAuth verification error: Malformed authorization header',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)",
"def test_security_headers_on_apis(flask_app):\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('X-Frame-Options') == 'DENY'\n assert headers.get('X-Content-Type-Options') == 'nosniff'",
"def should_skip_auth(flask_request):\n return flask_request.method in ['HEAD', 'OPTIONS']",
"def unauthorized():\n return {'errors': ['Unauthorized']}, 401",
"def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token",
"def user_must_authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def check_api_key(x_api_key: str = Security(api_key_header_auth)):\n\n if x_api_key != API_KEY:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid API Key\",\n )",
"def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)",
"def jwt_required(self) -> None:\n if not self._TOKEN:\n raise HTTPException(status_code=401,detail=\"Missing Authorization Header\")\n\n if self.get_raw_jwt()['type'] != 'access':\n raise HTTPException(status_code=422,detail=\"Only access tokens are allowed\")",
"def authenticate():\n return Response('Not Authorized', 401, {'WWW-Authenticate': 'Basic realm=\"api\"'})",
"def test_unauthorized_exception(exception_app):\n request, response = exception_app.test_client.get('/401')\n assert response.status == 401\n\n request, response = exception_app.test_client.get('/401/basic')\n assert response.status == 401\n assert response.headers.get('WWW-Authenticate') is not None\n assert response.headers.get('WWW-Authenticate') == \"Basic realm='Sanic'\"\n\n request, response = exception_app.test_client.get('/401/digest')\n assert response.status == 401\n\n auth_header = response.headers.get('WWW-Authenticate')\n assert auth_header is not None\n assert auth_header.startswith('Digest')\n assert \"qop='auth, auth-int'\" in auth_header\n assert \"algorithm='MD5'\" in auth_header\n assert \"nonce='abcdef'\" in auth_header\n assert \"opaque='zyxwvu'\" in auth_header\n\n request, response = exception_app.test_client.get('/401/bearer')\n assert response.status == 401\n assert response.headers.get('WWW-Authenticate') == \"Bearer\"",
"def auth_isok(self):\n # pylint: disable=W0603\n global KEY\n return_value = False\n if KEY is None:\n return_value = True\n elif self.headers.get('Authorization') == 'Basic ' + KEY:\n return_value = True\n return return_value",
"def authenticate(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n access_token = request.headers.get('token', '')\n if access_token.strip(' '):\n decoded = decode_token(access_token)\n if decoded['status']:\n return func(*args, **kwargs)\n abort(http_status_code=401, message='Invalid token.Please login')\n abort(http_status_code=401,\n message='Token is missing')\n return wrapper",
"def check_authorized(f):\n @functools.wraps(f)\n def wrapper(self, addr, request):\n if not self.sessions[addr].get(\"authorized\"):\n return Header.ERROR, Error.FORBIDDEN_REQUEST\n else:\n return f(self, addr, request)\n\n return wrapper",
"def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)",
"def require_http_auth(request):\n\n if http_auth_allowed(request) and not request.user.is_authenticated:\n site = get_current_site(request)\n response = HttpResponse(status=401)\n response['WWW-Authenticate'] = (\n 'Basic realm=\"{}\", charset=\"UTF-8\"'.format(site.name)\n )\n # Check whether the client supports cookies.\n response.set_cookie('testcookie', '1', secure=(not settings.DEBUG),\n httponly=True, samesite='Lax')\n return response\n else:\n raise PermissionDenied()",
"def authenticate():\n resp = {\"status\": 401, \"message\": \"Could not verify your access level for that URL\"}\n return Response(dumps(resp), status=404, mimetype='application/json')",
"def check_user():\n token = request.headers['Authorization'].replace('Bearer ', '')\n return jsonify({\"access_token\": token}), 200",
"def test_unauthorized_access(flask_test_client, http_method, endpoint):\n response = flask_test_client.open(\n method=http_method, path=endpoint, headers=get_headers()\n )\n assert response.status == \"401 UNAUTHORIZED\"\n assert response.content_type == \"application/json\"\n assert response.json[\"message\"] == \"Access token is invalid or expired.\""
] | [
"0.69187254",
"0.68816525",
"0.66055983",
"0.654799",
"0.6515225",
"0.64065534",
"0.63303685",
"0.63240373",
"0.63201493",
"0.6309157",
"0.6307791",
"0.62788045",
"0.6236683",
"0.6223184",
"0.6208542",
"0.6197912",
"0.6197359",
"0.6196875",
"0.61898404",
"0.6124128",
"0.61186516",
"0.61033416",
"0.6084603",
"0.60843873",
"0.60810524",
"0.605682",
"0.605031",
"0.60433537",
"0.6038789",
"0.6018673"
] | 0.81665546 | 0 |
Add the the number of minutes represented by min to the currentDate input and returns that new date timestamp | def addMinutes(self, currentDate:str, dateFormat:str, mins:int) -> str:
inputDateTime = datetime.strptime(currentDate, dateFormat)
nextTime = inputDateTime + timedelta(minutes=mins)
return nextTime.strftime(dateFormat) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_datetime_before_given_minutes(minutes):\n from datetime import datetime\n import datetime as dt\n date_obj_before_3min = datetime.now()- dt.timedelta(minutes=minutes)\n return date_obj_before_3min",
"def get_today_start():\n return datetime.combine(datetime.today(), time.min)",
"def next_run_date(self):\n return (\n datetime.combine(self.start_date, datetime.min.time(), tzinfo=pytz.UTC)\n if self.start_date and self.start_date > date.today()\n else None\n )",
"def next_run_date(self):\n return (\n datetime.combine(self.start_date, datetime.min.time(), tzinfo=pytz.UTC)\n if self.start_date and self.start_date > date.today()\n else None\n )",
"def date_minute(date):\n return date.minute",
"def setMinute(self, *args):\n return _libsbml.Date_setMinute(self, *args)",
"def getMinute(self):\n return _libsbml.Date_getMinute(self)",
"def _get_interval_start_time(self):\n current_time = timezone.now()\n minutes = self._get_time_interval_in_minutes()\n time_delta = datetime.timedelta(minutes=minutes)\n return current_time - time_delta",
"def min(self):\n\n return time_stat(self, stat=\"min\")",
"def get_current_time_lag_min(self):\n self.current_time_lag_min = self.get_timelag()[0] // 60",
"def make_current():\n current = datetime.datetime.now()\n hour = '{:02d}'.format(current.hour)\n minute = '{:02d}'.format(current.minute)\n second = '{:02d}'.format(current.second)\n current_time = hour + minute + second\n return current_time",
"def multMinuteAlign(ts, min):\n\tintv = secInMinute * min\n\treturn int((ts / intv)) * intv",
"def _compute_next_update(self):\n self.next_update = datetime.now() + timedelta(seconds=self.min_interval)",
"def min_time(self, min_time: str):\n\n self._min_time = min_time",
"def least_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Baker Island observes UTC-12\n return datetime.now(timezone(timedelta(hours=-12))).strftime(\"%Y-%m-%d\")",
"def now(self):\n return conditional_now() + self.timedelta(**self.now_shift_kwargs)",
"def set_Minute(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Minute', value)",
"def gen_date_with_mins(date):\n datetime_info = date.split(', ')\n time = convert_12_to_24(datetime_info[0])\n month, day = datetime_info[1].split(' ')\n year = datetime_info[2]\n day, year = map(int, [day, year])\n date = datetime.date(year, MONTHS[month.capitalize()], day)\n time = datetime.time(time.hour, time.minute)\n return date, time",
"def _next_update_time(self, seconds=10):\n now = get_aware_utc_now()\n next_update_time = now + datetime.timedelta(\n seconds=seconds)\n return next_update_time",
"def calculate_shorttimesince(d, now=None):\r\n chunks = (\r\n (60 * 60 * 24 * 365, lambda n: ungettext('yr', 'yr', n)),\r\n (60 * 60 * 24 * 30, lambda n: ungettext('mn', 'mn', n)),\r\n (60 * 60 * 24 * 7, lambda n : ungettext('wk', 'wk', n)),\r\n (60 * 60 * 24, lambda n : ungettext('d', 'd', n)),\r\n (60 * 60, lambda n: ungettext('hr', 'hr', n)),\r\n (60, lambda n: ungettext('min', 'min', n))\r\n )\r\n # Convert datetime.date to datetime.datetime for comparison\r\n if d.__class__ is not datetime.datetime:\r\n d = datetime.datetime(d.year, d.month, d.day)\r\n if now:\r\n t = now.timetuple()\r\n else:\r\n t = time.localtime()\r\n if d.tzinfo:\r\n tz = LocalTimezone(d)\r\n else:\r\n tz = None\r\n now = datetime.datetime(t[0], t[1], t[2], t[3], t[4], t[5], tzinfo=tz)\r\n\r\n # ignore microsecond part of 'd' since we removed it from 'now'\r\n delta = now - (d - datetime.timedelta(0, 0, d.microsecond))\r\n since = delta.days * 24 * 60 * 60 + delta.seconds\r\n if since <= 0:\r\n # d is in the future compared to now, stop processing.\r\n return u'0' + ugettext('min')\r\n for i, (seconds, name) in enumerate(chunks):\r\n count = since // seconds\r\n if count != 0:\r\n break\r\n s = ugettext('%(number)d%(type)s') % {'number': count, 'type': name(count)}\r\n if i + 1 < len(chunks):\r\n # Now get the second item\r\n seconds2, name2 = chunks[i + 1]\r\n count2 = (since - (seconds * count)) // seconds2\r\n if count2 != 0:\r\n s += ugettext(', %(number)d%(type)s') % {'number': count2, 'type': name2(count2)}\r\n return s",
"def get_tommorows_noon_time():\n dt = datetime.combine(date.today() + timedelta(days=1), datetime.min.time())\n return dt",
"def mins_since_event(file):\n initial = initial_time(file)\n actual = time.localtime(time.time())\n if initial[3] == actual[3]:\n return actual[4] - initial[4]\n else:\n return (60 - initial[4]) + actual[4]",
"def reformat_date(all_data, min_date):\n all_data[\"date\"] = [datetime.timedelta(x) for x in all_data[\"date\"]]\n all_data[\"date\"] = all_data[\"date\"] + min_date",
"def min_retire_time(self):\n return self._min_retire_time",
"def get_timestamp(prev_ts=None):\n t = time.time()\n t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))\n if prev_ts is not None:\n t = t.laterThan(prev_ts)\n return t",
"def date_calculator(years, days, hours, minutes):\n now = datetime.datetime.now()\n\n modified_dt = datetime.datetime(now.year + years, now.month, now.day,\n now.hour, now.minute)\n delta = datetime.timedelta(days=days, hours=hours, minutes=minutes)\n modified_dt += delta\n\n print(format_date(modified_dt))",
"def earliestTime(self):\n return self.__class__(\n self._year, self._month, self._day, 0, 0, 0, self._tz)",
"def min_time(self) -> str:\n return self._min_time",
"def reservetime_min(self):\n return self._get_time_info([\"Reserve_Time_M\", \"reserveTimeMinute\"])",
"def start():\r\n beginning_of_min = False\r\n while beginning_of_min == False:\r\n start_at = datetime.now()\r\n start_time_sec = start_at.strftime(\"%H:%M:%S\")\r\n start_time_min = start_at.strftime(\"%H:%M\")\r\n if start_time_sec[-2:] == '00':\r\n beginning_of_min = True \r\n \r\n print(\"Starting at\", start_time_sec)\r\n return start_time_sec"
] | [
"0.58505815",
"0.5687333",
"0.56670386",
"0.56670386",
"0.5537341",
"0.5529319",
"0.54898757",
"0.54418087",
"0.5429182",
"0.5377847",
"0.53345233",
"0.53310037",
"0.5325793",
"0.5316861",
"0.53041273",
"0.52726483",
"0.5251411",
"0.5228076",
"0.5211206",
"0.5204495",
"0.51693785",
"0.5162242",
"0.51464427",
"0.51319194",
"0.5098429",
"0.5043835",
"0.50067765",
"0.49820638",
"0.49787408",
"0.49477777"
] | 0.6531147 | 0 |
Prints out ">>" to make the prompt look nice. | def prompt():
sys.stdout.write('>> ')
sys.stdout.flush() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def showPrompt(self):\r\n self.terminal.nextLine()\r\n self.terminal.write(self.ps[self.pn])",
"def do_prompt(self, line):\n self.prompt = line + ': '",
"def prompt(self, question):\n self.output(' ')\n self.output(question)\n self.output(self.parse_response(str(self.ui())))",
"def print_cmd(cmd):\n padding = \" \" * 80\n sys.stdout.write(\"\\r\"+padding)\n sys.stdout.write(\"\\r\"+prompt+cmd)\n sys.stdout.flush()",
"def prompt(self):\n self.prompt_flag = True",
"def prompt() -> None:\n\n username = click.prompt(\n text=\"Please enter a username\",\n type=click.STRING\n )\n password = click.prompt(\n text=\"Please enter a new password\",\n hide_input=True,\n confirmation_prompt=True\n )\n newsletter_subscription = click.prompt(\n text=\"Would you like to subscribe to our newsletter?\",\n default=False,\n type=click.BOOL\n )\n favorite_color=click.prompt(\n text=\"What is your favorite color?\",\n type=click.Choice([\"blue\", \"green\", \"yellow\"], case_sensitive=False)\n )\n\n click.echo(\n f\"Username: {username} | Password: {'*' * len(password)} | \"\n + f\"Newsletter: {newsletter_subscription} | Favorite color: \"\n + click.style(favorite_color, fg=favorite_color)\n )",
"def prompt():\n program_info = ('Dice Rolling Simulator\\n'\n 'Author: Franklin Pinnock\\n'\n 'Language: Python 3.4\\n'\n 'Version: 1.0\\n')\n print(program_info)",
"def prompt(self):\n return input(self.message + \": \").strip()",
"def show_prompt(self, prompt=None):\n\n if prompt is None:\n prompt = self.prompt\n\n # Only insert the prompt if we don't have one already:\n #\n if self.find_prompt(prompt) == sublime.Region(-1, -1):\n self._write(0, prompt)",
"def prompt_for_input(prepend_prompt=''):\n if not prepend_prompt == '':\n prepend_prompt += ' '\n return raw_input(prepend_prompt + '> ').strip()",
"def write_output_prompt(self):\n # Use write, not print which adds an extra space.\n IPython.utils.io.Term.cout.write(self.output_sep)\n outprompt = str(self.prompt_out)\n if self.do_full_cache:\n IPython.utils.io.Term.cout.write(outprompt)",
"def render_input(self):\n prompt = \"Chat >>> \"\n message = self.input\n if len(message) + len(prompt) > self.w:\n message = message[len(message) + len(prompt) + 4 - self.w:]\n message = '...' + message\n self.stdscr.hline(self.h - 1, 0, ord(' '), self.w)\n self.stdscr.addstr(self.h - 1, 0, prompt + message)\n # Move cursor to the end of input",
"def render_prompt(self) -> str:\n # pylint: disable=no-member\n return '{}render: '.format(self.prompt)",
"def waitprompt(c):\n c.expect('\\n> ')\n time.sleep(0.1)",
"def prompt(self):\n\n p = f\"┌[Installed: {color('cyan')}{len(self.installed) + len(self.running)}{color('rs')}\"\n p += f\"]-[Running: {color('purple')}{len(self.running)}{color('rs')}]\\n\"\n p += f\"└╼{color('lblue')}RiotStar{color('rs')}> \"\n return p",
"def do_prompt(self, line):\n if line:\n self.prompt = \"(%s) \" %line\n\n else:\n print 'Please specify a prompt text'",
"def main_menu_for_testing():\n print(PROMPT_TEXT)",
"def help_shell(self):\n help_str = \"\"\"Execute a command as if at the OS prompt.\n\n Usage: shell cmd\"\"\"\n self.stdout.write(\"{}\\n\".format(help_str))",
"def print_interact_help():\n print(\"Commands:\")\n print(\"\\tj - up\")\n print(\"\\tk - down\")\n print(\"\\t<Space> - switch Bought to BoughtX\")\n print(\"\\t<Enter> - send Enter to Quicken\")\n print(\"\\t<Escape> - quit\")",
"def display_prompt(self, text):\n key = None\n res = ''\n while key != 'KEY_NEWLINE':\n if key == 'KEY_BACKSPACE':\n res = res[ : -1]\n elif ischar(key):\n res += key\n self.stdscr.erase()\n self.stdscr.addstr(f'{PADCHAR}{text}\\n')\n self.stdscr.addstr(f'{PADCHAR}{res}')\n key = self.get_key()\n return res",
"def __alt_prompt(self, prompt_text: str):\r\n if self.__use_windows_prompt:\r\n sys.stdout.write(prompt_text)\r\n sys.stdout.flush()\r\n i = sys.stdin.readline()\r\n return i.strip()\r\n return input(prompt_text)",
"def __window_prompt(self, text):\n return True",
"def console_mode():\n t = ''\n while True:\n string = input()\n t += string\n if string.strip() == 'exit()':\n break\n if is_not_full(string):\n t += '\\n'\n continue\n console(t)\n t = ''",
"def print_prompt(self):\n clear_term()\n\n print('Press \"w\", \"a\", \"s\", or \"d\" to move Up, Left, Down or Right',\n 'respectively.\\nEnter \"p\" or \"Q\" to quit.\\n')\n self.grid.draw_grid()\n print('\\nScore: ' + str(self.grid.score))",
"def hr() -> None:\n width, _ = click.get_terminal_size()\n click.echo('-' * width)",
"def make_prompt(self, location=None):\n prompt = '(acsploit : %s) ' % location if location is not None else '(acsploit) '\n return colorize(prompt, \"blue\")",
"def clear_screen(self,):\n sys.stdout.write('\\033[2J')\n sys.stdout.write('\\033[H')\n sys.stdout.flush()\n print \"\\n\\t\\tDo To - %s\\n\\n\" % self.user",
"def prompt(self, task, text='', print_=False):\n template = self.prompts[task]['prompt']\n res = self.format_prompt(task, template, text)\n if print_:\n print(res)\n else:\n return res",
"def prompt(msg):\n # remove non-blocking mode\n fd = sys.stdin.fileno()\n flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)\n return raw_input(msg)",
"def text_input():\n return input(\">>>\")"
] | [
"0.7335863",
"0.6978222",
"0.6823395",
"0.67102325",
"0.6643498",
"0.6569211",
"0.649697",
"0.64917946",
"0.6482021",
"0.6433004",
"0.63896614",
"0.63162845",
"0.62998056",
"0.6265454",
"0.6237534",
"0.6233695",
"0.6232359",
"0.618937",
"0.6169687",
"0.61388206",
"0.6136785",
"0.6133169",
"0.6133052",
"0.61107635",
"0.6093827",
"0.609364",
"0.6071846",
"0.60701525",
"0.60400504",
"0.6023701"
] | 0.7840775 | 0 |
Takes video file path and a transcode profile, transcode the file, and returns the transcoded file in bytes, along with ffmpeg's stderr output. | def transcode_segment(self,
in_path: str,
profile: TranscodeProfile
) -> Tuple[bytes, str, str]:
out_filepath = f"/tmp/{uuid4()}.ts"
transcode_command = [
"ffmpeg",
"-i", in_path,
"-vf", f"scale={profile.video_width}:-1",
*profile.get_video_transcode_parameters(),
"-bsf:v", "h264_mp4toannexb",
*profile.get_audio_transcode_parameters(),
"-copyts", "-muxdelay", "0",
"-preset", profile.video_preset,
out_filepath
]
process = subprocess.Popen(transcode_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.wait()
stderr = process.stderr.read().decode("utf-8")
# Read new file back in and delete
try:
with open(out_filepath, "rb") as f:
file_out_bytes = f.read()
os.remove(out_filepath)
except FileNotFoundError:
raise TranscodeError("FFmpeg returned a non-zero code.\n" + stderr)
return file_out_bytes, stderr, transcode_command | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transcode(path, outpath):\n\n needs_transcode = determine_transcode(path)\n logger.info(f\"Transcoding {path} to {outpath}...\")\n\n cmd = [\n \"ffmpeg\", \"-y\",\n \"-i\", path,\n \"-an\",\n \"-metadata:s\", \"handler_name=tator\",\n \"-vcodec\", \"libx264\",\n \"-g\", \"25\",\n \"-preset\", \"fast\",\n \"-pix_fmt\", \"yuv420p\",\n \"-vf\", \"pad=ceil(iw/2)*2:ceil(ih/2)*2\",\n \"-movflags\",\n \"faststart+frag_keyframe+empty_moov+default_base_moof\",\n \"-tune\", \"fastdecode\",\n ]\n\n if needs_transcode[1]:\n #Resize to 720p\n cmd.extend([\"-vf\", \"scale=-2:720\"])\n\n cmd.append(outpath)\n logger.info('ffmpeg cmd = {}'.format(cmd))\n subprocess.run(cmd, check=True)\n logger.info(\"Transcoding finished!\")",
"def reencode(filepath, loglevel='panic'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n # re encode video without b frame and as mp4\n basename, ext = os.path.splitext(filepath)\n output_filepath = os.path.join(basename, os.path.basename(filepath).replace(ext, '.mp4'))\n if not os.path.isdir(os.path.dirname(output_filepath)):\n os.makedirs(os.path.dirname(output_filepath))\n try:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_filepath,\n **{'x264opts': 'bframes=0',\n 'f': 'mp4'})\n ffmpeg.overwrite_output(stream).run()\n except Exception as e:\n logger.exception('ffmpeg error in disassemble:')\n raise\n\n output_probe = Videos.get_info(output_filepath)\n start_time = eval(output_probe['streams'][0]['start_time'])\n fps = eval(output_probe['streams'][0]['avg_frame_rate'])\n has_b_frames = output_probe['streams'][0]['has_b_frames']\n start_frame = fps * start_time\n if start_time != 0:\n logger.warning('Video start_time is not 0!')\n if has_b_frames != 0:\n logger.warning('Video still has b frames!')\n return output_filepath",
"def generate_still_from_video(self,\n in_path: str\n ) -> Tuple[bytes, float, str]:\n out_filepath = f\"/tmp/{uuid4()}.jpg\"\n command = [\n \"ffmpeg\",\n \"-i\", in_path,\n \"-vframes\", \"1\",\n out_filepath\n ]\n\n process = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n process.wait()\n stderr = process.stderr.read().decode(\"utf-8\")\n\n # Parse start timecode\n timecode = self.parse_start_timecode_from_stderr(stderr)\n\n # Read new file back in and delete\n try:\n with open(out_filepath, \"rb\") as f:\n file_out_bytes = f.read()\n os.remove(out_filepath)\n except FileNotFoundError:\n raise TranscodeError(\"FFmpeg returned a non-zero code.\\n\" + stderr)\n\n return file_out_bytes, timecode, stderr",
"def transcodetomp4(file_in, logger):\n\n import subprocess\n\n file_out = file_in.replace('.mkv', '.mp4')\n\n if os.path.isfile('/usr/bin/avconv'):\n\n convert_command = 'su securityspy -c \\\"/usr/bin/avconv -i \"{}\" -f mp4 -vcodec copy -acodec '.format(file_in) + \\\n 'libfaac -b:a 112k -ac 2 -y \"{}\"'.format(file_out) + \"\\\"\"\n\n try:\n subprocess.check_call(convert_command, shell=True)\n except subprocess.CalledProcessError:\n logger.error(\"The command to transcode: {} --- failed...\".format(convert_command))\n return file_in\n\n return file_out\n else:\n return file_in\n # fin",
"def transcode(filePath: str) -> str:\n asset_uuid = uuid.uuid4()\n outPath = os.path.join(\"/tmp\", str(asset_uuid)+'.mp4')\n ffmpeg.input(filePath).output(outPath).run()\n return outPath",
"def transcode(self) -> None:\n # Get source mediainfo to use in validation\n source_media_info = self.get_media_info(self.source)\n\n # Common ffmpeg flags\n ff = FFMPEG(overwrite=True, loglevel='repeat+level+info')\n # Init source file\n ff < SourceFile(self.source)\n # Scaling\n fc = ff.init_filter_complex()\n fc.video | Scale(**TRANSCODING_OPTIONS[SCALE]) | fc.get_video_dest(0)\n\n # set group of pixels length to segment size\n gop = math.floor(source_media_info[VIDEO_FRAME_RATE] * GOP_DURATION)\n # preserve source audio sampling rate\n arate = source_media_info[AUDIO_SAMPLING_RATE]\n # preserve original video FPS\n vrate = source_media_info[VIDEO_FRAME_RATE]\n # codecs, muxer and output path\n\n cv0 = VideoCodec(\n gop=gop,\n vrate=vrate,\n **TRANSCODING_OPTIONS[VIDEO_CODEC])\n ca0 = AudioCodec(\n arate=arate,\n **TRANSCODING_OPTIONS[AUDIO_CODEC])\n out0 = Muxer(self.destination, format='mp4')\n\n # Add output file to ffmpeg\n ff.add_output(out0, cv0, ca0)\n\n # Run ffmpeg\n self.run(ff)\n\n # Get result mediainfo\n dest_media_info = self.get_media_info(self.destination)\n\n # Validate ffmpeg result\n self.validate(source_media_info, dest_media_info)",
"def convert(self, infile, outfile, opts, timeout=10, preopts=None, postopts=None):\n if os.name == 'nt':\n timeout = 0\n\n if not os.path.exists(infile):\n raise FFMpegError(\"Input file doesn't exist: \" + infile)\n\n cmds = [self.ffmpeg_path]\n if preopts:\n cmds.extend(preopts)\n cmds.extend(['-i', infile])\n\n # Move additional inputs to the front of the line\n for ind, command in enumerate(opts):\n if command == '-i':\n cmds.extend(['-i', opts[ind + 1]])\n del opts[ind]\n del opts[ind]\n\n cmds.extend(opts)\n if postopts:\n cmds.extend(postopts)\n cmds.extend(['-y', outfile])\n\n if timeout:\n def on_sigalrm(*_):\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n raise Exception('timed out while waiting for ffmpeg')\n\n signal.signal(signal.SIGALRM, on_sigalrm)\n\n try:\n p = self._spawn(cmds)\n except OSError:\n raise FFMpegError('Error while calling ffmpeg binary')\n\n yielded = False\n buf = ''\n total_output = ''\n pat = re.compile(r'time=([0-9.:]+) ')\n\n while True:\n if timeout:\n signal.alarm(timeout)\n\n ret = p.stderr.read(10)\n\n if timeout:\n signal.alarm(0)\n\n if not ret:\n # For small or very fast jobs, ffmpeg may never output a '\\r'. When EOF is reached, yield if we haven't yet.\n if not yielded:\n yielded = True\n yield 10\n break\n\n try:\n ret = ret.decode(console_encoding)\n except UnicodeDecodeError:\n try:\n ret = ret.decode(console_encoding, errors=\"ignore\")\n except:\n pass\n\n total_output += ret\n buf += ret\n if '\\r' in buf:\n line, buf = buf.split('\\r', 1)\n\n tmp = pat.findall(line)\n if len(tmp) == 1:\n timespec = tmp[0]\n if ':' in timespec:\n timecode = 0\n for part in timespec.split(':'):\n timecode = 60 * timecode + float(part)\n else:\n timecode = float(tmp[0])\n yielded = True\n yield timecode\n\n if timeout:\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n\n p.communicate() # wait for process to exit\n\n if total_output == '':\n raise FFMpegError('Error while calling ffmpeg binary')\n\n cmd = ' '.join(cmds)\n if '\\n' in total_output:\n line = total_output.split('\\n')[-2]\n\n if line.startswith('Received signal'):\n # Received signal 15: terminating.\n raise FFMpegConvertError(line.split(':')[0], cmd, total_output, pid=p.pid)\n if line.startswith(infile + ': '):\n err = line[len(infile) + 2:]\n raise FFMpegConvertError('Encoding error', cmd, total_output,\n err, pid=p.pid)\n if line.startswith('Error while '):\n raise FFMpegConvertError('Encoding error', cmd, total_output,\n line, pid=p.pid)\n if not yielded:\n raise FFMpegConvertError('Unknown ffmpeg error', cmd,\n total_output, line, pid=p.pid)\n if p.returncode != 0:\n raise FFMpegConvertError('Exited with code %d' % p.returncode, cmd,\n total_output, pid=p.pid)\n\n return outfile",
"def convert(fname_src, verbose=False):\n if not os.path.isfile(fname_src):\n raise IOError('File not found: %s' % fname_src)\n\n # File names.\n b, e = os.path.splitext(fname_src)\n fname_dst = b + '.m4a'\n\n # Build command.\n cmd = 'ffmpeg -y -i \"%s\" \"%s\"' % (fname_src, fname_dst)\n\n t0 = time.time()\n std_out, std_err = run_cmd(cmd)\n dt = time.time() - t0\n\n if dt < 0.01:\n raise Exception('Problem processing file: %s %s %s %s' % (fname_src, std_out, std_err, cmd))\n\n if std_out.lower().find('error') >= 0:\n raise Exception('Problem processing file: %s %s %s %s' % (fname_src, std_out, std_err, cmd))\n\n # Done.\n return fname_dst",
"def __run(srcfile):\n\n # Test out with:\n # probe() {\n # ffprobe -v quiet -hide_banner -of json -print_format json -show_format -show_streams -i \"$1\"\n # }\n\n cp = subprocess.run([BIN_FFPROBE, \"-v\", \"quiet\", \"-hide_banner\", \"-of\",\n \"json\", \"-print_format\", \"json\", \"-show_format\", \"-show_streams\", \"-i\", srcfile],\n check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return cp.stdout.decode('utf-8')",
"def transcode(filename, enc_data):\n base = os.path.splitext(filename)[0]\n exe = g.muxapp if g.transcoder_path == \"auto\" else g.transcoder_path\n\n # ensure valid executable\n if not exe or not os.path.exists(exe) or not os.access(exe, os.X_OK):\n xprint(\"Encoding failed. Couldn't find a valid encoder :(\\n\")\n time.sleep(2)\n return filename\n\n command = shlex.split(enc_data['command'])\n newcom, outfn = command[::], \"\"\n\n for n, d in enumerate(command):\n\n if d == \"ENCODER_PATH\":\n newcom[n] = exe\n\n elif d == \"IN\":\n newcom[n] = filename\n\n elif d == \"OUT\":\n newcom[n] = outfn = base\n\n elif d == \"OUT.EXT\":\n newcom[n] = outfn = base + \".\" + enc_data['ext']\n\n returncode = subprocess.call(newcom)\n\n if returncode == 0 and g.delete_orig:\n os.unlink(filename)\n\n return outfn",
"def _transcode_ffmpeg_args(mpeg_filename, mp4_filename, res):\n\n \"\"\"\n 697 ffmpeg -i Chef\\ Wanted\\ With\\ Anne\\ Burrell\\:\\ \\\"The\\ Re-Launch\\\".mpg\n -strict experimental -acodec aac -ac 2 -ab 160k -s 960x540 -vcodec libx264\n -vpre iPod640 -b 1200k -f mp4 -threads 0 chef.conversionmatrixsettings.mp4\n \"\"\"\n return [FFMPEG, \"-i\", mpeg_filename, \"-strict\", \"experimental\",\n \"-acodec\", \"aac\", \"-ac\", \"2\", \"-ab\", \"160k\", \"-s\", res,\n \"-vcodec\", \"libx264\", \"-vpre\", \"iPod640\", \"-b\", \"1200k\",\n \"-f\", \"mp4\", \"-threads\", \"0\", mp4_filename]",
"def _decode(item):\n tivo_filename = item.filename()\n logger.info(\"Decoding %s\" % tivo_filename)\n\n mpeg_filename = item.filename(ext=\"mpg\")\n videos_dir = item.vdir()\n\n p = subprocess.Popen([\"/usr/local/bin/tivodecode\", \"--mak\", os.environ[\"MAK\"], \n \"--out\", mpeg_filename, tivo_filename], cwd=videos_dir,\n stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n rc = p.wait()\n\n logger.info(\"tivodecode returned %d\" % rc)\n logger.info(\"tivodecode output: '%s'\" % p.stdout.read())\n if rc == 0:\n # success!\n item.decoded = True\n item.save()\n else:\n raise Exception(\"Tivodecode failed on file '%s' with rc %d\" %\n (tivo_filename, rc))",
"def run(self, ff: FFMPEG) -> None:\n return_code, error = ff.run()\n if error or return_code != 0:\n # Check return code and error messages\n error = error or f\"invalid ffmpeg return code {return_code}\"\n raise TranscodeError(error)",
"def check_video_timestamps(movie_file, desired_format='.mp4', desired_framerate=30):\n\n check_video_format(movie_file, desired_format='.mp4', original_format='.avi')\n\n new_movie_file = movie_file+'_tt'+desired_format\n if not os.path.isfile(new_movie_file):\n #Convert file to 30 fps\n cmd = ['ffmpeg', '-i', movie_file+desired_format]\n cmd += ['-r', str(desired_framerate)]\n cmd += ['-y', movie_file+'_t'+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd]) \n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()\n\n #Add timecode text to video\n cmd = 'ffmpeg -i '+movie_file+'_t'+desired_format+' -vf drawtext=\\\"fontfile=/opt/X11/share/fonts/TTF/VeraMoBd.ttf: timecode=\\'00\\:00\\:00\\:00\\':rate=30: [email protected]: x=7: y=460\\\" -an -y '+movie_file+'_tt'+desired_format\n args = shlex.split(cmd)\n #print args\n p = subprocess.Popen(args, shell=False)\n p.wait()\n\n os.remove(movie_file+'_t'+desired_format)\n\n return new_movie_file",
"def process_video(proc_state):\n entry = proc_state.entry\n workbench = proc_state.workbench\n video_config = mgg.global_config['media_type:mediagoblin.media_types.video']\n\n queued_filepath = entry.queued_media_file\n queued_filename = proc_state.get_queued_filename()\n name_builder = FilenameBuilder(queued_filename)\n\n medium_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}-640p.webm'))\n\n thumbnail_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}.thumbnail.jpg'))\n\n # Create a temporary file for the video destination (cleaned up with workbench)\n tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)\n with tmp_dst:\n # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square\n progress_callback = ProgressCallback(entry)\n\n dimensions = (\n mgg.global_config['media:medium']['max_width'],\n mgg.global_config['media:medium']['max_height'])\n\n # Extract metadata and keep a record of it\n metadata = transcoders.VideoTranscoder().discover(queued_filename)\n store_metadata(entry, metadata)\n\n # Figure out whether or not we need to transcode this video or\n # if we can skip it\n if skip_transcode(metadata):\n _log.debug('Skipping transcoding')\n\n dst_dimensions = metadata['videowidth'], metadata['videoheight']\n\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n did_transcode = False\n else:\n transcoder = transcoders.VideoTranscoder()\n\n transcoder.transcode(queued_filename, tmp_dst.name,\n vp8_quality=video_config['vp8_quality'],\n vp8_threads=video_config['vp8_threads'],\n vorbis_quality=video_config['vorbis_quality'],\n progress_callback=progress_callback,\n dimensions=dimensions)\n\n dst_dimensions = transcoder.dst_data.videowidth,\\\n transcoder.dst_data.videoheight\n\n # Push transcoded video to public storage\n _log.debug('Saving medium...')\n mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)\n _log.debug('Saved medium')\n\n entry.media_files['webm_640'] = medium_filepath\n\n did_transcode = True\n\n # Save the width and height of the transcoded video\n entry.media_data_init(\n width=dst_dimensions[0],\n height=dst_dimensions[1])\n\n # Temporary file for the video thumbnail (cleaned up with workbench)\n tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)\n\n with tmp_thumb:\n # Create a thumbnail.jpg that fits in a 180x180 square\n transcoders.VideoThumbnailerMarkII(\n queued_filename,\n tmp_thumb.name,\n 180)\n\n # Push the thumbnail to public storage\n _log.debug('Saving thumbnail...')\n mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)\n entry.media_files['thumb'] = thumbnail_filepath\n\n # save the original... but only if we did a transcoding\n # (if we skipped transcoding and just kept the original anyway as the main\n # media, then why would we save the original twice?)\n if video_config['keep_original'] and did_transcode:\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n # Remove queued media file from storage and database\n proc_state.delete_queue_file()",
"def convert_files(enumerated_src_file):\n i, src_file = enumerated_src_file\n src_file = src_file.strip()\n file_extension, acodec, quality = audio_codec()\n\n dst_file = '.'.join(src_file.split('.')[:-1]) + file_extension\n sys.stdout.write(str(i + 1) + ': ' + src_file + ' -> ' + dst_file + '\\n')\n subprocess.call(['ffmpeg', '-i', src_file, '-vn', '-acodec',\n acodec, '-aq', quality, dst_file, '-loglevel', 'quiet'])\n return src_file",
"def init_transcode():\n if not os.path.exists(g.TCFILE):\n config_file_contents = \"\"\"\\\n# transcoding presets for mps-youtube\n# VERSION 0\n\n# change ENCODER_PATH to the path of ffmpeg / avconv or leave it as auto\n# to let mps-youtube attempt to find ffmpeg or avconv\nENCODER_PATH: auto\n\n# Delete original file after encoding it\n# Set to False to keep the original downloaded file\nDELETE_ORIGINAL: True\n\n# ENCODING PRESETS\n\n# Encode ogg or m4a to mp3 256k\nname: MP3 256k\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -b:a 256k OUT.EXT\n\n# Encode ogg or m4a to mp3 192k\nname: MP3 192k\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -b:a 192k OUT.EXT\n\n# Encode ogg or m4a to mp3 highest quality vbr\nname: MP3 VBR best\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -q:a 0 OUT.EXT\n\n# Encode ogg or m4a to mp3 high quality vbr\nname: MP3 VBR good\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -q:a 2 OUT.EXT\n\n# Encode m4a to ogg\nname: OGG 256k\nextension: ogg\nvalid for: m4a\ncommand: ENCODER_PATH -i IN -codec:a libvorbis -b:a 256k OUT.EXT\n\n# Encode ogg to m4a\nname: M4A 256k\nextension: m4a\nvalid for: ogg\ncommand: ENCODER_PATH -i IN -strict experimental -codec:a aac -b:a 256k OUT.EXT\n\n# Encode ogg or m4a to wma v2\nname: Windows Media Audio v2\nextension: wma\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a wmav2 -q:a 0 OUT.EXT\"\"\"\n\n with open(g.TCFILE, \"w\") as tcf:\n tcf.write(config_file_contents)\n dbg(\"generated transcoding config file\")\n\n else:\n dbg(\"transcoding config file exists\")\n\n with open(g.TCFILE, \"r\") as tcf:\n g.encoders = [dict(name=\"None\", ext=\"COPY\", valid=\"*\")]\n e = {}\n\n for line in tcf.readlines():\n\n if line.startswith(\"TRANSCODER_PATH:\"):\n m = re.match(\"TRANSCODER_PATH:(.*)\", line).group(1)\n g.transcoder_path = m.strip()\n\n elif line.startswith(\"DELETE_ORIGINAL:\"):\n m = re.match(\"DELETE_ORIGINAL:(.*)\", line).group(1)\n do = m.strip().lower() in (\"true\", \"yes\", \"enabled\", \"on\")\n g.delete_orig = do\n\n elif line.startswith(\"name:\"):\n e['name'] = re.match(\"name:(.*)\", line).group(1).strip()\n\n elif line.startswith(\"extension:\"):\n e['ext'] = re.match(\"extension:(.*)\", line).group(1).strip()\n\n elif line.startswith(\"valid for:\"):\n e['valid'] = re.match(\"valid for:(.*)\", line).group(1).strip()\n\n elif line.startswith(\"command:\"):\n e['command'] = re.match(\"command:(.*)\", line).group(1).strip()\n\n if \"name\" in e and \"ext\" in e and \"valid\" in e:\n g.encoders.append(e)\n e = {}",
"def ffmpeg(*options):\n\tffmpeg_command = [\"ffmpeg\"] + list(options)\n\tprint(\"Calling FFMPEG:\", \" \".join(ffmpeg_command))\n\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"Calling FFmpeg failed with exit code {exit_code}. CERR: {cerr} . COUT: {cout}\".format(exit_code=exit_code, cerr=str(cerr), cout=str(cout)))",
"def screenDataToPNG(self, rawFile, destFile, ffmpeg):\n\n args = [ffmpeg, '-vcodec rawvideo', '-f rawvideo', '-pix_fmt rgb565', \n '-s 320*480', '-i', rawFile, '-f image2', '-vcodec png', '%s.png' % destFile]\n \n # Something tricky here, need args.split(' ')\n args = ' '.join(args)\n try:\n ffmpegProcess = subprocess.Popen(args.split(' '),\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n \n except OSError, osErr:\n raise EmulatorClientError('-Failed to run ffmpeg command \\'%s\\': %s' % (args, osErr.strerror),\n theCode=EmulatorClientError.FFMPEG_RUN_ERROR,\n theBaseError=osErr)\n except:\n exc = traceback.format_exc()\n self.log.exce(exc)\n retval = ffmpegProcess.communicate()\n\n #adb.wait() \n self.log.info('-Result: %s' % str(retval))\n return retval",
"def ffmpeg_extract_frame(filename, t1, targetname):\n\n cmd = [get_setting(\"FFMPEG_BINARY\"),\n \"-i\", filename,\n \"-ss\", \"%0.2f\" % t1,\n \"-vframes\", \"1\", targetname]\n\n subprocess_call(cmd)",
"def frame_dump(filename, frametime, output_filename='out.png', \n meth='ffmpeg fast', subseek_cushion=20., verbose=False, dry_run=False,\n very_verbose=False):\n \n if meth == 'mplayer':\n raise ValueError(\"mplayer not supported\")\n elif meth == 'ffmpeg best':\n # Break the seek into a coarse and a fine\n coarse = np.max([0, frametime - subseek_cushion])\n fine = frametime - coarse\n syscall = 'ffmpeg -y -ss %r -i %s -ss %r -vframes 1 %s' % (\n coarse, filename, fine, output_filename)\n elif meth == 'ffmpeg accurate':\n syscall = 'ffmpeg -y -i %s -ss %r -vframes 1 %s' % (\n filename, frametime, output_filename)\n elif meth == 'ffmpeg fast':\n syscall = 'ffmpeg -y -ss %r -i %s -vframes 1 %s' % (\n frametime, filename, output_filename)\n \n if verbose:\n print(syscall)\n if not dry_run:\n #os.system(syscall)\n syscall_l = syscall.split(' ')\n syscall_result = subprocess.check_output(syscall_l, \n stderr=subprocess.STDOUT)\n if very_verbose:\n print(syscall_result)",
"def check_video_format(movie_file, desired_format='.mp4', original_format='.avi'):\n\n if not os.path.isfile(movie_file+original_format):\n print 'Error. avi file does not exist:'+movie_file+'.avi'\n if not os.path.isfile(movie_file+desired_format):\n cmd = ['ffmpeg']\n cmd += ['-i', movie_file+original_format]\n cmd += [movie_file+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()",
"def __format_run(arg):\n cp = subprocess.run([BIN_FFPROBE, arg, \"-v\", \"quiet\", \"-hide_banner\"],\n check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return cp.stdout.decode('utf-8')",
"def preprocess_file(self, filename):\n rawfilename = ''\n for command in [self.mplayer_command, \n self.ffmpeg_command]:\n while True:\n rawfilename = self.random_string()\n if not os.path.exists(rawfilename):\n break\n \n if 0 != subprocess.call(\n command.format(self.SRATE, filename, rawfilename), \n stdout=open(os.devnull, 'w'),\n stderr=subprocess.STDOUT,\n shell=True):\n os.remove(rawfilename)\n rawfilename = None\n continue\n \n break # file is successfully converted\n return rawfilename",
"def testSetVideoFrame():\n\n\t# create output\n\toutputFileName = \"testSetVideoFrame.mov\"\n\touputFile = av.OutputFile( outputFileName )\n\n\t# create video frame and codec\n\timageDesc = av.VideoFrameDesc()\n\timageDesc.setWidth( 1920 )\n\timageDesc.setHeight( 1080 )\n\timageDesc.setDar( 1920, 1080 )\n\n\tinputPixel = av.Pixel()\n\tinputPixel.setColorComponents( av.eComponentRgb );\n\tinputPixel.setPlanar( False );\n\n\timageDesc.setPixel( inputPixel );\n\n\tinputVideoCodec = av.VideoCodec( av.eCodecTypeEncoder, \"mpeg2video\" );\n\tinputVideoCodec.setImageParameters( imageDesc );\n\n\t# create transcoder and add a video stream\n\ttranscoder = av.Transcoder( ouputFile )\n\ttranscoder.add( \"\", 0, \"xdcamhd422\", inputVideoCodec )\n\tvideoEssence = transcoder.getStreamTranscoder( 0 ).getCurrentEssence()\n\n\t# start process\n\ttranscoder.init()\n\touputFile.beginWrap()\n\n\t# process 255 frames\n\tfor i in range(0,255):\n\t\ttranscoder.processFrame()\n\t\t# set video frame\n\t\tframe = av.VideoFrame( imageDesc )\n\t\tframe.getBuffer().assign(frame.getBuffer().size(), i)\n\t\tvideoEssence.setFrame( frame )\n\n\t# end process\n\touputFile.endWrap()\n\n\t# get dst file of transcode\n\tdst_inputFile = av.InputFile( outputFileName )\n\tprogress = av.NoDisplayProgress()\n\tdst_inputFile.analyse( progress, av.InputFile.eAnalyseLevelFast )\n\tdst_properties = dst_inputFile.getProperties()\n\tdst_videoStream = dst_properties.videoStreams[0]\n\n\tassert_equals( \"mpeg2video\", dst_videoStream.codecName )\n\tassert_equals( \"MPEG-2 video\", dst_videoStream.codecLongName )\n\tassert_equals( 1920, dst_videoStream.width )\n\tassert_equals( 1080, dst_videoStream.height )\n\tassert_equals( 16, dst_videoStream.dar.num )\n\tassert_equals( 9, dst_videoStream.dar.den )",
"def test_probe_video_from_file(self, test_video, config):\n full_path = os.path.join(VIDEO_DIR, test_video)\n probe_result = torch.ops.video_reader.probe_video_from_file(full_path)\n self.check_probe_result(probe_result, config)",
"def pix2pix_results_to_video(path, destination=\".\", name_out=\"out\"):\n files = list(map(str, get_files(path, '.png')))\n\n files.sort(key=get_id)\n\n img_array = img_list_from_files(files)\n frames = pix2pix_results_to_frames(img_array)\n write_video(frames, destination, name_out)",
"def convert_to_wav(fin, fout):\n temp = subprocess.run([\"ffmpeg\",\n \"-i\", \n fin, \n fout], \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)",
"def shell_2_file(_cmd=None, _cwd=None, _timeout=5*60*60):\n try:\n try:\n out_path=os.path.join(getCurpath(), \"%s__tmp_out\"%str(time.time()))\n err_path=os.path.join(getCurpath(), \"%s__tmp_err\"%str(time.time()))\n fout=open(out_path, 'a+')\n ferr=open(err_path, 'a+')\n shell=subwork(cmd=_cmd, stdout=fout, stderr=ferr, cwd=_cwd, timeout=_timeout)\n req=check_zero(shell.run())\n # get media info from tmp_out\n fout.seek(0)\n out=fout.read()\n ferr.seek(0)\n err_out=ferr.read()\n #\n return req[0], str(out) + str(err_out)\n finally:\n if fout:\n fout.close()\n if ferr:\n ferr.close()\n deleteFile(out_path)\n deleteFile(err_path)\n except:\n return False, trace_back()",
"def mp4_to_webm(srcfile, destfile, overwrite=False):\n syspkgs.check_installs([\"ffmpeg\"])\n cmd = [\n \"ffmpeg\",\n \"-i\",\n srcfile,\n \"-c:v\",\n \"libvpx\",\n \"-crf\",\n \"10\",\n \"-b:v\",\n \"1M\",\n \"-c:a\",\n \"libvorbis\",\n destfile,\n ]\n if overwrite:\n cmd.insert(1, \"-y\")\n print(\" \".join(cmd))\n return subprocess.check_output(cmd, encoding=\"utf-8\")"
] | [
"0.6997698",
"0.6566929",
"0.64950305",
"0.6379619",
"0.6161119",
"0.61035687",
"0.5958369",
"0.58059055",
"0.576237",
"0.57498455",
"0.57029223",
"0.5672103",
"0.5670517",
"0.5635099",
"0.5540602",
"0.5490141",
"0.541302",
"0.5378603",
"0.53590256",
"0.5312127",
"0.52753246",
"0.5261128",
"0.51669043",
"0.51525116",
"0.514161",
"0.5012437",
"0.50100416",
"0.5006522",
"0.5004922",
"0.5001083"
] | 0.75541896 | 0 |
Get duration from an ffmpeg stderr dump. | def parse_duration_from_stderr(self, stderr: str) -> float:
pattern = "Duration: (\\d\\d):(\\d\\d):(\\d\\d\\.\\d\\d)"
pattern = re.compile(pattern)
result = pattern.search(stderr)
if result is None:
return None
# Parse result
hours = float(result.group(1))
minutes = float(result.group(2))
seconds = float(result.group(3))
duration = (
(hours * 60 * 60) +
(minutes * 60) +
seconds)
return duration | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_duration(filename):\n cmd = ('ffprobe -v 0 -of flat=s=_ -select_streams v:0 -show_entries '\n 'stream=duration -of default=nokey=1:noprint_wrappers=1 ' +\n filename).split()\n pid = subprocess.run(cmd, universal_newlines=True,\n stdout=subprocess.PIPE)\n if pid.returncode != 0:\n return None\n\n duration_exp = pid.stdout.rstrip()\n try:\n duration = float(duration_exp)\n except:\n duration = 0.\n return duration",
"def get_duration(file):\n cmd = 'ffprobe -i \"{}\" -show_entries format=duration -v quiet -of csv=\"p=0\"'.format(file)\n try:\n output = subprocess.check_output(\n cmd,\n shell=True, # Let this run in the shell\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n print(e.output)\n output = 0\n # return round(float(output)) # ugly, but rounds your seconds up or down\n return float(output)",
"def _parse_duration(path):\n tag = \"[FlowShaper] Application complete after \" # xxx ms\n found = None\n with (path / \"stdout.txt\").open(mode=\"r\") as stdout:\n found = [line for line in stdout if line.startswith(tag)][-1]\n assert found, f\"Run never completed! {path}\"\n\n # Parse the next word as an integer\n return int(found[len(tag):].split()[0])",
"def duration(file_path):\n command = [\"ffprobe\", \"-show_entries\", \"format=duration\", \"-i\", file_path]\n pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT)\n out, error = pipe.communicate()\n match_object = None if error else DURATION_REGEX.search(out.decode('utf-8'))\n if match_object is None:\n return 0\n length = float(match_object.group(1)) / 60\n return length",
"def parse_start_timecode_from_stderr(self, stderr: str) -> float:\n pattern = \"start: ([0-9]+\\.[0-9]+)\"\n pattern = re.compile(pattern)\n result = pattern.search(stderr)\n if result is None:\n return None\n\n # Parse result\n timecode = float(result.group(1))\n return timecode",
"def duration_seconds(self):\n duration = 0.0\n if self.is_video() or self.is_audio():\n if self.__dict__['duration']:\n try:\n duration = float(self.__dict__['duration'])\n except ValueError:\n raise FFProbeError('None numeric duration')\n return duration",
"def duration():\r\n elapsed_time, duration = video_time()\r\n return duration",
"def _duration(self):\n if getattr(self, '_duration_cache', None):\n return self._duration_cache\n duration = extractMetadata(guessParser(\\\n InputIOStream(self))).get('duration')\n if not duration:\n raise Exception(u'Not an audio file')\n else:\n duration = duration.seconds\n self._duration_cache = duration\n return duration",
"async def read_video_info(vid_fp: str, logger=None):\n args = ['-v', 'quiet', '-print_format', 'json', '-show_streams', '-sexagesimal', vid_fp]\n p = await asyncio.create_subprocess_exec('ffprobe', *args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)\n stdout, _ = await p.communicate()\n if p.returncode != 0:\n err = f'Cannot get video info for {vid_fp}'\n if logger:\n logger.error(err)\n else:\n print(err)\n return\n # Find duration\n metadata = json.loads(stdout.decode())\n for stream in metadata['streams']:\n if stream['codec_type'] != 'video':\n continue\n # Good for H264\n dur = stream.get('duration')\n # H265\n if dur is None and stream.get('tags') is not None:\n dur = stream['tags'].get('DURATION')\n if dur is None:\n return\n return parse_duration(dur)\n return",
"def __get_duration_from_line(self, line):\n # TODO: catch exceptions\n duration_str = line.split('=')[1]\n return int(duration_str)",
"def __get_duration_from_string(cls, dstr):\n mtch = re.search(r'^(\\d+)$', dstr)\n if mtch is not None:\n return int(mtch.group(1))\n mtch = re.search(r'^(\\d+)s(?:ec(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1))\n mtch = re.search(r'^(\\d+)m(?:in(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 60\n mtch = re.search(r'^(\\d+)h(?:r(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 3600\n mtch = re.search(r'^(\\d+)d(?:ay(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 86400\n raise FlashFileException(('String \"%s\" is not a known duration'\n ' format. Try 30sec, 10min, 2days etc.') %\n str(dstr))",
"def find_duration(data):\n t = [i[0] for i in data]\n duration = t[len(t) - 1] - t[0]\n logging.info('Calculated duration: %s', duration)\n return duration",
"def get_data_duration(meta_file_name):\n try:\n with open(meta_file_name) as meta_file:\n info = kaa_metadata.parse(meta_file)\n except IOError:\n config_pytomo.LOG.error('Unable to open tempfile for kaa_metadata')\n\n if (info and 'length' in info):\n data_duration = info.length\n return data_duration",
"def _duration_to_secs(duration):\n secs = int(duration[:-1])\n if duration[-1] == 's':\n pass\n elif duration[-1] == 'm':\n secs *= 60\n elif duration[-1] == 'h':\n secs *= 60 * 60\n elif duration[-1] == 'd':\n secs *= 60 * 60 * 24\n else:\n raise ValueError('Invalid duration: %r' % duration)\n\n return secs",
"def getVideoLengthFromVideoFile(videofileforlengthcheck):\n vprobe = []\n vprobe.extend(probe_header)\n vprobe.extend(['-i', videofileforlengthcheck])\n vprobe.extend(probe_arguments)\n vout = sp.check_output(\n vprobe\n )\n vint = vout.decode().strip()\n return vint",
"def parse_duration(duration: str) -> int:\n\n def _get_value(match_obj, group_name):\n val = match_obj.group(group_name)\n return int(val) if val is not None else 0\n\n match = DURATION_REGEX.match(duration)\n err_msg = DURATION_MSG.format(pattern=duration)\n\n if not match:\n raise ValueError(err_msg)\n\n hours = _get_value(match, \"hours\")\n minutes = _get_value(match, \"minutes\")\n seconds = _get_value(match, \"seconds\")\n\n result = (hours * 3600) + (minutes * 60) + seconds\n\n if result <= 0:\n raise ValueError(err_msg)\n\n return (hours * 3600) + (minutes * 60) + seconds",
"def _get_duration(self):\n try:\n dur = self.im.info[\"duration\"] / 1000.0\n except KeyError:\n dur = DEFAULT_DURATION / 1000.0 \n\n return dur",
"def _parse_test_duration(duration_str):\n try:\n if duration_str.endswith(\"s\"):\n duration_str = duration_str[:-1]\n return float(duration_str)\n except:\n return None",
"def _parse_ps_output(string):\n t = string.replace('-', ':').split(':')\n t = [0] * (4 - len(t)) + [int(i) for i in t]\n seconds = t[0] * 86400 + t[1] * 3600 + t[2] * 60 + t[3]\n return seconds",
"def result_stderr(result):\n return result[1][1]",
"def parse_duration(duration):\n command_parse = re.compile(r\"(!mute|/mute) ?(\\d+)? ?([\\w+\\D]+)?\")\n parsed = command_parse.match(duration.text)\n time = parsed.group(2)\n reason = parsed.group(3)\n\n if not time:\n time = 5\n time = int(time)\n\n if not reason:\n reason = 'for no reason'\n\n until_date = datetime.now() + timedelta(minutes=time)\n return until_date, reason, time",
"def media_duration(self):\n if 'duration' in self._status:\n return int(float(self._status['duration']))",
"def seconds(duration):\n if not duration:\n return 0\n try:\n h, m, s = duration_parts(duration)\n return s\n except (ValueError, TypeError):\n return 0",
"def get_audio_file_duration_sec(file_path):\n pure_path = pathlib.PurePath(file_path)\n audio_seg = pydub.AudioSegment.from_file(pure_path, pure_path.suffix[1:])\n return audio_seg.duration_seconds",
"def duration(self):\n with audioread.audio_open(self.path) as f:\n return f.duration",
"def getDuration(fn: str) -> float:\n return QueryWav(fn).duration",
"def get_frame_durations(file):\n pos = file.tell()\n\n frame_durations = []\n last_frame_timestamp = None\n def collect_timestamps(frame, timestamp):\n timestamp = round(timestamp*1000)\n\n nonlocal last_frame_timestamp\n if last_frame_timestamp is not None:\n duration = timestamp - last_frame_timestamp\n frame_durations.append(duration)\n last_frame_timestamp = timestamp\n\n result = ExportMJPEG(frame_callback=collect_timestamps)\n mkvparse.mkvparse(file, result)\n\n # We don't have durations from the frame or a file duration. ugoira_downloader_mjpeg\n # duplicates the last frame with a zero duration to give the last frame its\n # duration so seamless looping works. Just match that here so everything round-trips\n # cleanly.\n frame_durations.append(0)\n\n # Return to the original file position.\n file.seek(pos)\n\n return frame_durations",
"def get_log_mediainfo():\n exec_version = float(str(xbmc.getInfoLabel(\"System.BuildVersion\"))[0:4])\n if exec_version < 14.0:\n logfn = xbmc.translatePath(r'special://logpath/xbmc.log')\n else:\n logfn = xbmc.translatePath(r'special://logpath/kodi.log')\n if is_xbmc_debug():\n lookbacksize = 6144\n lookbacklines = 60\n else:\n lookbacksize = 2560\n lookbacklines = 25\n ret = None\n numretries = 4\n while numretries > 0:\n xbmc.sleep(250)\n try:\n with open(logfn, \"r\") as f:\n f.seek(0, 2) # Seek @ EOF\n fsize = f.tell() # Get Size\n f.seek(max(fsize - lookbacksize, 0), 0) # Set pos @ last n chars\n lines = f.readlines() # Read to end\n lines = lines[-lookbacklines:] # Get last n lines\n\n for line in lines:\n if 'fps:' in line:\n start = line.find('fps:')\n sub = line[start:].rstrip('\\n')\n tret = dict(item.split(\":\") for item in sub.split(\",\"))\n ret = {}\n for key in tret:\n tmp = key.strip()\n try:\n if tmp == 'fps':\n ret['fps'] = float(tret[key])\n else:\n ret[tmp] = int(tret[key])\n except ValueError:\n pass\n if ret['pheight'] != 0:\n ret['par'] = float(ret['pwidth'])/float(ret['pheight'])\n if ret['dheight'] != 0:\n ret['dar'] = float(ret['dwidth'])/float(ret['dheight'])\n except Exception as e:\n xbmc.log('Error opening logfile: {0}'.format(logfn))\n if hasattr(e, 'message'):\n xbmc.log('Error message: {0}'.format(e.message))\n numretries = 0\n if ret is not None:\n numretries = 0\n if ret is None:\n xbmc.log('Could not retrieve video info from log')\n return ret",
"def readProcessStderrLog(self, name, offset, length):\r\n self._update('readProcessStderrLog')\r\n return self._readProcessLog(name, offset, length, 'stderr')",
"def find_average_duration(video: dict):\n global num_videos\n global total_duration\n\n if duration := video.get('duration'):\n with data_lock:\n num_videos += 1\n total_duration += (duration/1000)\n show_progress()"
] | [
"0.7139366",
"0.6637791",
"0.6516288",
"0.6415554",
"0.6111052",
"0.5992013",
"0.59010184",
"0.5797666",
"0.5752247",
"0.57105243",
"0.5628981",
"0.55969137",
"0.555516",
"0.5550522",
"0.551304",
"0.54656565",
"0.5385895",
"0.5377022",
"0.5360331",
"0.53306353",
"0.5317654",
"0.53172153",
"0.525319",
"0.5248714",
"0.5238223",
"0.52273595",
"0.5184157",
"0.5182558",
"0.51718515",
"0.516528"
] | 0.77763784 | 0 |
Save this entry in the SugarCRM server. If the 'id' field is blank, it creates a new entry and sets the 'id' value. | def save(self):
# If 'id' wasn't blank, it's added to the list of dirty fields; this
# way the entry will be updated in the SugarCRM connection.
if self['id'] != '':
self._dirty_fields.append('id')
# nvl is the name_value_list, which has the list of attributes.
nvl = []
for field in set(self._dirty_fields):
# Define an individual name_value record.
nv = {}
nv['name'] = field
nv['value'] = self[field]
nvl.append(nv)
# Use the API's set_entry to update the entry in SugarCRM.
result = self._module._connection.set_entry(self._module._name, nvl)
self._fields['id'] = result['id']
self._dirty_fields = []
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self):\n if self.id is None:\n self._insert()\n else:\n self._update()",
"def save(self):\n if self.id:\n self.update()\n else:\n self.create()",
"def save(self)->None:\n item = database.cursor.fetchone()\n if item:\n self.id = item['id']\n database.connection.commit()",
"def save(self):\n if not self.id:\n self.id = uuid4()\n DataStore.add_instance(self)",
"def save(self, id, **fields):\r\n # TODO : Effettuare la validazione prima di inserire\r\n user_id = current.auth.user_id\r\n record = self.table(auth_user=user_id, refs=id)\r\n if record:\r\n record.update(**fields)\r\n record.update_record()\r\n else:\r\n self.table.insert(auth_user=user_id, refs=id, **fields)",
"def save(self):\n pk = self.get_pk()\n if pk and not self._is_new_record and self._edited_fields:\n set_vars = self.get_field_dict(fields=self._edited_fields)\n self.update(**set_vars).filter(**{self.get_pk_name(): pk}).execute()\n elif self._is_new_record:\n insert_vars = self.get_field_dict()\n if self._meta.auto_increment:\n insert_vars.pop(self.get_pk_name())\n new_pk = self.insert(**insert_vars).execute()\n if self._meta.auto_increment:\n self.set_pk(new_pk)\n self.set_new_record_state(False)\n elif not pk and not self._is_new_record:\n raise ValueError('[Model.save] Primary key is not defined ' +\n 'while the data is stored')\n self._edited_fields.clear()",
"def save(self):\n if self.iid is not None:\n self.db().update(self.iid, self._attributes)\n else:\n self.iid = self.db().add(self._attributes)",
"def save(self, data):\n data['id'] = self.id\n\n self.db.append(data)",
"def save(self, **with_extra):\n\t\tif self.id:\n\t\t\tnew_fields = getattr(self._client, \"save_\" + self.method)(self, **with_extra)\n\t\t\tself._create_fields(new_fields)\n\t\t\treturn True\n\t\treturn False",
"def save_to_db(self):\n result = self.db.newsdb.insert_one({\"name\": self.name})\n self.id = str(result.inserted_id)",
"def save(self):\n if self.get('_id'):\n return self.connection.update({'_id': self.get('_id')}, {'$set': self._export(without_id=True)})\n else:\n return self.connection.insert(self._export())",
"def save(self):\n try:\n db.session.add(self)\n db.session.commit()\n return self.id\n except Exception as e:\n db.session.rollback()\n return {\n \"message\": \"Ensure the object you're saving is valid\",\n \"help\": \"Has all fields and doesn't repeat unique values.\",\n \"exception\": str(e)\n }",
"def save(self):\n\t\tdb.session.add(self)\n\t\tdb.session.commit()",
"def save(self):\n \n db.session.add(self)\n db.session.commit()",
"def save(self):\n response = settings.database.put_item(Item=self.to_dict())\n raise_for_response(response)",
"def save(self):\n params = self.to_params()\n if 'tweet_id' in params:\n params['tweet_ids'] = [params['tweet_id']]\n del params['tweet_id']\n\n if self.id:\n resource = self.RESOURCE.format(account_id=self.account.id, id=self.id)\n response = Request(self.account.client, 'put', resource, params=params).perform()\n return self.from_response(response.body['data'])\n\n resource = self.RESOURCE_COLLECTION.format(account_id=self.account.id)\n response = Request(self.account.client, 'post', resource, params=params).perform()\n return self.from_response(response.body['data'][0])",
"def save(self):\n db.session.add(self)\n db.session.commit()",
"def save(self):\n db.session.add(self)\n db.session.commit()",
"def save(self):\n db.session.add(self)\n db.session.commit()",
"def save(self):\n db.session.add(self)\n db.session.commit()",
"def save(self):\n db.session.add(self)\n db.session.commit()",
"def save(self):\n db.session.add(self)\n db.session.commit()",
"def save(self):\n db.session.add(self)\n db.session.commit()",
"def save(self):\n db.session.add(self)\n db.session.commit()",
"def save(self):\n db.session.add(self)\n db.session.commit()",
"def save(self):\n db.session.add(self)\n db.session.commit()",
"def save(self):\n db.session.add(self)\n db.session.commit()",
"async def save(self) -> None:\n if not hasattr(self, 'errors'):\n raise RuntimeError('you must call is_valid() before save instance')\n if self.errors:\n raise RoomValidationError(self.errors)\n if hasattr(self, '_id'):\n data = self.loads()\n room_id = data.pop('_id')\n await room_collection.replace_one({'_id': room_id}, data)\n else:\n result = await room_collection.insert_one(self.loads())\n self._id = result.inserted_id",
"def update(self):\n if not self.id:\n raise DataValidationError(\"Update called with empty ID field\")\n db.session.commit()\n db.session.refresh(self)",
"def save(self):\r\n db.session.add(self)\r\n db.session.commit()"
] | [
"0.7648956",
"0.7361002",
"0.6938848",
"0.6878406",
"0.66949844",
"0.6614205",
"0.6569775",
"0.6563675",
"0.6438659",
"0.6280113",
"0.62362057",
"0.62102276",
"0.61960924",
"0.6164901",
"0.6155119",
"0.6127607",
"0.61145216",
"0.61145216",
"0.61145216",
"0.61145216",
"0.61145216",
"0.61145216",
"0.61145216",
"0.61145216",
"0.61145216",
"0.61145216",
"0.61145216",
"0.61006325",
"0.6085792",
"0.60753417"
] | 0.7521411 | 1 |
Return the related entries in another module. | def get_related(self, module):
connection = self._module._connection
result = connection.get_relationships(self._module._name, self['id'],
module._name.lower(), '', ['id'])
entries = []
for elem in result['entry_list']:
entry = SugarEntry(module)
entry._fields['id'] = elem['id']
entries.append(entry)
return entries | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def relationships(self):",
"def get_related_objects(self):\n result = []\n if self['name'] != None:\n tmp = ObjectDefinition.objects.filter(use__has_field=self['name'], object_type=self['object_type'])\n for i in tmp: result.append(i)\n return result",
"def associated_objects(self):\n return self._associated_objects",
"def getEntries(self):\n return self.entries",
"def get_entries(self):\n return self._netdis.loxone.entries",
"def associatedObjects (self):\n return self.__associatedObjects",
"def _parts(self):\n return [part for part in Package.__walkparts(self.__relationships)]",
"def MODULES(self):\n pass",
"def getAncestors():",
"def get_related_indicators(self):\n # imported here to prevent circular deps\n from fn_threatq.threatqsdk.indicator import Indicator\n return self.get_related_objects(Indicator)",
"def related_to(self, name=None):\n\t\treturn self.related(name, True)",
"def get_entries(self):\n return self.find_by_st(\"urn:schemas-denon-com:device:ACT-Denon:1\")",
"def related_entities(self):\n related_entities = []\n\n for prop in dir(self):\n if prop.endswith('_related'):\n related = getattr(self, prop).all()\n if related:\n for entity in related:\n record_type = entity.object_ref._meta.object_name\n entity_metadata = {\n 'name': str(entity),\n 'record_type': record_type,\n 'field_name': entity._meta.model_name.replace(record_type.lower(), '').title(),\n 'value': entity.value,\n 'url': None\n }\n # Links for top-level entities\n if record_type in ['Organization', 'Person', 'Violation']:\n entity_metadata['url'] = reverse_lazy(\n 'edit-{}'.format(record_type.lower()),\n args=[entity.object_ref.uuid]\n )\n # Standardized relationship links\n elif record_type in ['Emplacement', 'Association']:\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-{}'.format(record_type.lower()),\n kwargs={\n 'organization_id': entity.object_ref.organization.get_value().value.uuid,\n 'pk': entity.object_ref.pk\n }\n )\n # Irregular relationship links\n elif record_type == 'Composition':\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-composition',\n kwargs={\n 'organization_id': entity.object_ref.parent.get_value().value.uuid,\n 'pk': entity.object_ref.pk\n }\n )\n elif record_type == 'MembershipPerson':\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-personnel',\n kwargs={\n 'organization_id': entity.object_ref.organization.get_value().value.uuid,\n 'pk': entity.pk\n }\n )\n elif record_type == 'MembershipOrganization':\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-membership',\n kwargs={\n 'organization_id': entity.object_ref.organization.get_value().value.uuid,\n 'pk': entity.pk\n }\n )\n related_entities.append(entity_metadata)\n return related_entities",
"def used_in_recipes(self):\n Recipe = apps.get_model('recipes','Recipe')\n values = {}\n rqset = Recipe.objects.filter(components__of_ingredient__pk=self.pk)\n\n while rqset.count(): # until no more child recipes\n values.update(rqset.values_list('slug','name')) # Add to return list\n rqset = Recipe.objects.filter(components__of_recipe__in=rqset) # Recurse\n\n return values",
"def __iter__(self):\n for rId in self._iter_rIds():\n yield self._presentation.related_parts[rId]",
"def modules(self):\n return self._modules.keys()",
"def getEntries(self):\n return self.__entries",
"def get_all_lessons(module) -> list:\n from core.models import DetailPage, TopicPage\n\n return [\n lesson\n for lesson in DetailPage.objects.live().specific().descendant_of(module)\n if isinstance(lesson.get_parent().specific, TopicPage)\n ]",
"def get_related_trackers(self):\n\n return Tracker.objects.filter(product=self.pk)",
"def modules(self):\n return self._modules",
"def test_get_related_nodes(self):\n pass",
"def get_related(this_obj, other_obj, m2m=False):\n # is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type\n # verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?\n # als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object\n # maar een relatie (uit de fields verzameling)\n if m2m:\n fields = [x for x in other_obj._meta.many_to_many]\n else:\n fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and\n x.get_internal_type() == 'ForeignKey']\n for fld in fields:\n if fld.related_model == this_obj._meta.model:\n related_name = fld.related_query_name()\n break\n else:\n return None # not found\n try:\n return this_obj.__getattribute__(related_name).all()\n except UnboundLocalError:\n return None\n # zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde\n # naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?\n # (heeft uiteraard konsekwenties voor de aanroepende code)\n # oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het\n # gedeelte dat nu nog zo heet was daarin hardgecodeerd\n # deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse\n # GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen\n # bepalen.\n # Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)\n # met dezelfde functie kunnen afhandelen",
"def related_entities(self):\n related_entities = []\n for point in self.accesspoint_set.all():\n related_entities.append({\n 'name': str(point),\n 'archive_url': point.archive_url,\n 'page_number': point.trigger,\n 'accessed_on': point.accessed_on,\n 'url': reverse_lazy(\n 'update-access-point',\n kwargs={'source_id': self.uuid, 'pk': point.uuid}\n )\n })\n return related_entities",
"def get_rel_elements(self):\n return self.merged_root.findall('OrgQuestion/Thread/RelQuestion')",
"def references(self):\n return self._get_related_resources(False)",
"def test_get_all_related(self):\n c1 = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"c1\")\n c2 = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"c2\")\n # if c1 is related to c2\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"c2\"])\n actual_output = api.get_all_related(channel_id=self.the_channel_id, content=c1)\n self.assertEqual(set(expected_output), set(actual_output))\n # then c2 should be related to c1\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"c1\"])\n actual_output = api.get_all_related(channel_id=self.the_channel_id, content=c2)\n self.assertEqual(set(expected_output), set(actual_output))",
"def worldobjects(self):\n return dict( self.domain.objects.items() | self.problem.objects.items() )",
"def getModules() -> tuple:\n return data.getFoldersOf(data.ETC)",
"def entries(self):\n return self._entries",
"def entries(self):\n return self._entries"
] | [
"0.5929166",
"0.5709051",
"0.54539883",
"0.54411966",
"0.54079044",
"0.54058117",
"0.5341041",
"0.53391445",
"0.5284375",
"0.5259152",
"0.5229081",
"0.52277374",
"0.5226618",
"0.52161896",
"0.5207432",
"0.5207311",
"0.5198739",
"0.5186677",
"0.5138827",
"0.5136619",
"0.5127005",
"0.51243556",
"0.5108948",
"0.51033384",
"0.5092421",
"0.50922024",
"0.5083319",
"0.5082698",
"0.50784314",
"0.50784314"
] | 0.7334194 | 0 |
Partition list ``l`` in ``K`` partitions. Examples >>> l = [0, 1, 2] >>> list(clusters(l, K=3)) [[[0], [1], [2]], [[], [0, 1], [2]], [[], [1], [0, 2]], [[0], [], [1, 2]], [[], [0], [1, 2]], [[], [], [0, 1, 2]]] >>> list(clusters(l, K=2)) [[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]], [[], [0, 1, 2]]] >>> list(clusters(l, K=1)) [[[0, 1, 2]]] | def clusters(l, K): # noqa
if l:
prev = None
for t in clusters(l[1:], K):
tup = sorted(t)
if tup != prev:
prev = tup
for i in range(K):
yield tup[:i] + [
[l[0]] + tup[i],
] + tup[i + 1 :]
else:
yield [[] for _ in range(K)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_list(l, k):\n\n\tn = len(l)\n\tsublists = []\n\tnsubs = n / k\n\tnrems = n % k\n\n\t# little algo to split lists.\n\n\ti = int(0)\n\twhile i < n:\n\t\tsublists.append(l[i:i+k])\n\t\ti += k\n\n\treturn sublists",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n # position initial clusters at the location of clusters with largest populations\n cluster_list_copy = sorted(cluster_list,\n reverse = True,\n key=lambda cluster: cluster.total_population())\n cluster_list_copy = cluster_list_copy[: num_clusters]\n cluster_cent = [(cluster.horiz_center(), cluster.vert_center()) for cluster in cluster_list_copy]\n result = []\n #clustering to k initial centers adjusting the centers after each iteration\n for dummy_q in range(num_iterations):\n #Initialize k empty sets C1,...,Ck\n k_clusters = []\n for dummy_k in range(num_clusters):\n k_clusters.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n for idx_j in range(len(cluster_list)):\n # defining the closest k center and add the cluster to it\n dist_list = []\n for idx_k in range(num_clusters):\n center_x, center_y = cluster_cent[idx_k]\n dist = cluster_list[idx_j].distance(\n alg_cluster.Cluster(set(), center_x, center_y, 0, 0))\n dist_list.append((dist, idx_k))\n dummy_k, idx = min(dist_list)\n k_clusters[idx].merge_clusters(cluster_list[idx_j])\n result = k_clusters\n #update the new center of k clusters\n cluster_cent = [(k_clusters[idx_f].horiz_center(), k_clusters[idx_f].vert_center()) for idx_f in range(num_clusters)]\n return result",
"def create_clusters(N, K):\n clusters = []\n centroids = create_points(N, K)\n for idx, centroid in enumerate(centroids):\n cluster = Cluster(centroid)\n cluster.label = _cluster_name(idx)\n clusters.append(cluster)\n return clusters",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\ttotal_clusters = len(cluster_list)\n\tclusters = sorted(cluster_list, key = lambda cluster: \\\n\t\t\t\t\t cluster.total_population(), reverse = True)\n\tk_clusters = clusters[:num_clusters]\n\tfor dummy_idx_i in range(num_iterations):\n\t\tk_empties = [Cluster(set([]), 0, 0, 0, 0) for \\\n\t\t\t\t\t dummy_idx in range(num_clusters)]\n\t\tfor idx_j in range(total_clusters):\n\t\t\tdist = [cluster_list[idx_j].distance(k_clusters[idx_f]) for \\\n\t\t\t\t\tidx_f in range(num_clusters)]\n\t\t\tidx_l = dist.index(min(dist))\n\t\t\tk_empties[idx_l].merge_clusters(cluster_list[idx_j])\n\t\tk_clusters = k_empties[:]\n\treturn k_clusters",
"def hierarchical_clustering(cluster_list, num_clusters):\n # n <-- |P|\n len_cluster_list = len(cluster_list)\n \n # Initialize n clusters C = {C1, ... Cn} such that Ci = {pi};\n new_cluster_list = []\n\n for index in range(len_cluster_list):\n new_cluster_list.append(alg_cluster.Cluster(cluster_list[index].fips_codes(), cluster_list[index].horiz_center(), cluster_list[index].vert_center(), cluster_list[index].total_population(), cluster_list[index].averaged_risk()))\n\n # while |C| > k do\n while len(new_cluster_list) > num_clusters:\n # (Ci,Cj) <-- argminCi,Cj Element C, i != j^dCi,Cj;\n # C <-- C Union {Ci Union Cj}; // line 5\n # C <-- C \\ {Ci, Cj}; // line 6\n fc_pair = fast_closest_pair(new_cluster_list)\n # print \"\\nfc_pair:\", fc_pair, \"\\n\"\n new_cluster_list[fc_pair[1]].merge_clusters(new_cluster_list[fc_pair[2]])\n del new_cluster_list[fc_pair[2]]\n\n return new_cluster_list",
"def neclusters(l, K): # noqa\n for c in clusters(l, K):\n if all(x for x in c):\n yield c",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\n # position initial clusters at the location of clusters with largest populations\n \n cluster_n = len(cluster_list)\n\n miu_k = sorted(cluster_list,\n key=lambda c: c.total_population())[-num_clusters:]\n miu_k = [c.copy() for c in miu_k]\n\n # n: cluster_n\n # q: num_iterations\n for _ in xrange(num_iterations):\n cluster_result = [alg_cluster.Cluster(set([]), 0, 0, 0, 0) for _ in range(num_clusters)]\n # put the node into closet center node\n\n for jjj in xrange(cluster_n):\n min_num_k = 0\n min_dist_k = float('inf')\n for num_k in xrange(len(miu_k)):\n dist = cluster_list[jjj].distance(miu_k[num_k])\n if dist < min_dist_k:\n min_dist_k = dist\n min_num_k = num_k\n\n cluster_result[min_num_k].merge_clusters(cluster_list[jjj])\n\n # re-computer its center node\n for kkk in xrange(len(miu_k)):\n miu_k[kkk] = cluster_result[kkk]\n\n return cluster_result",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings",
"def get_subsets(l, k):\n if k == 0:\n return [[]]\n else:\n res = []\n for i in range(len(l)):\n rest_subsets = get_subsets(l[i + 1:], k - 1)\n for subset in rest_subsets:\n subset.insert(0, l[i])\n res += rest_subsets\n return res",
"def to_clusters_list(cluster_tags, k):\n converted = [[] for i in range(k)]\n for i in range(len(cluster_tags)):\n converted[cluster_tags[i]].append(i)\n return converted",
"def partitions(n, k):\n if k == 1:\n yield (n,)\n return\n for i in range(1, n):\n for p in partitions(n-i, k-1):\n yield (i,) + p",
"def hierarchical_clustering(cluster_list, num_clusters):\n cluster_list_copy = list(cluster_list)\n\n if len(cluster_list) <= num_clusters:\n return cluster_list\n while len(cluster_list) > num_clusters:\n cluster_list_copy.sort(key=lambda cluster: cluster.horiz_center())\n dummy, cluster_i, cluster_j = fast_closest_pair(cluster_list)\n cluster_list[cluster_i].merge_clusters(cluster_list[cluster_j])\n cluster_list.remove(cluster_list[cluster_j])\n\n return cluster_list",
"def findClusters(l, scheme, clustertype='fluid'):\n # only convert items to labels if list of items, not list of lists\n if len(l) > 0:\n if isinstance(l[0], list):\n clusters=l\n else:\n clusters=labelClusters(l, scheme)\n else:\n clusters=[]\n \n csize=[]\n curcats=set([])\n runlen=0\n clustList=[]\n firstitem=1\n for inum, item in enumerate(clusters):\n if isinstance(item, list):\n clustList.append(findClusters(item, scheme, clustertype=clustertype))\n else:\n newcats=set(item.split(';'))\n if newcats.isdisjoint(curcats) and firstitem != 1: # end of cluster, append cluster length\n csize.append(runlen)\n runlen = 1\n else: # shared cluster or start of list\n runlen += 1\n \n if clustertype==\"fluid\":\n curcats = newcats\n elif clustertype==\"static\":\n curcats = (curcats & newcats)\n if curcats==set([]):\n curcats = newcats\n else:\n raise ValueError('Invalid cluster type')\n firstitem=0\n csize.append(runlen)\n if sum(csize) > 0:\n clustList += csize\n return clustList",
"def hierarchical_clustering(cluster_list, num_clusters):\n \n new_cluster_list = list(cluster_list)\n\n while len(new_cluster_list) > num_clusters:\n _, node1, node2 = fast_closest_pair(new_cluster_list)\n new_cluster_list[node1].merge_clusters(new_cluster_list[node2])\n del new_cluster_list[node2]\n\n return new_cluster_list",
"def Get(self,k:int): \n ### get partitions depending on the partition schemes C that depends on k!\n return subsets_k(list(range(self._n)),k)",
"def hierarchical_clustering(cluster_list, num_clusters):\n\n total_clusters = len(cluster_list)\n\n while total_clusters > num_clusters:\n cluster_list.sort(key = lambda cluster: cluster.horiz_center())\n closest_pair = fast_closest_pair(cluster_list)\n cluster_1 = cluster_list[closest_pair[1]]\n cluster_2 = cluster_list[closest_pair[2]]\n merged_clusters = cluster_1.merge_clusters(cluster_2)\n cluster_list.append(merged_clusters)\n cluster_list.remove(cluster_1)\n cluster_list.remove(cluster_2)\n total_clusters = len(cluster_list)\n\n return cluster_list",
"def cluster_items(xs: np.ndarray, k: int):\n kmeans = KMeans(n_clusters=k).fit(xs)\n\n centroids = kmeans.cluster_centers_\n labels = kmeans.labels_\n\n return centroids, labels",
"def partition(n, k=None, zeros=False):\n if not zeros or k is None:\n for i in ordered_partitions(n, k):\n yield tuple(i)\n else:\n for m in range(1, k + 1):\n for i in ordered_partitions(n, m):\n i = tuple(i)\n yield (0,)*(k - len(i)) + i",
"def partition(n, ks):\n if type(ks) not in (list, tuple):\n raise TypeError('ks must be an iterable')\n if not ks:\n raise ValueError('ks must have at least one value')\n elif min(ks) < 0:\n raise ValueError('group size k must be non-negative')\n num = _math.factorial(n)\n den = 1\n for k in ks:\n den *= _math.factorial(k)\n return int(num / den)",
"def clusterparts(parts, block_len):\n parts = sorted(parts, key=op.itemgetter(-1))\n global opt\n clusters = [[parts[0][-1]]]\n \n # assign all parts to clusters\n for i in range(1,len(parts)):\n x, y = parts[i][-1]\n \n # detect box already in cluster\n fc = []\n for k,cl in enumerate(clusters):\n for xc,yc in cl:\n ar = intersectarea((xc,yc),(x,y),block_len)\n intrat = float(ar)/(block_len*block_len)\n if intrat > float(opt.blint):\n if not fc: clusters[k].append((x,y))\n fc.append(k)\n break\n \n # if this is new cluster\n if not fc:\n clusters.append([(x,y)])\n else:\n # re-clustering boxes if in several clusters at once\n while len(fc) > 1:\n clusters[fc[0]] += clusters[fc[-1]]\n del clusters[fc[-1]]\n del fc[-1]\n \n item = op.itemgetter\n # filter out small clusters\n clusters = [clust for clust in clusters if Dist((min(clust,key=item(0))[0],min(clust,key=item(1))[1]), (max(clust,key=item(0))[0],max(clust,key=item(1))[1]))/(block_len*1.4) >= float(opt.rgsize)]\n \n # filter out clusters, which doesn`t have identical twin cluster\n clusters = [clust for x,clust in enumerate(clusters) if hassimilarcluster(x,clusters)]\n \n return clusters",
"def cluster(self, k=3, max_iter=10):\n\n # create a set of k random clusters as seeds\n old_clusters = [None] * k # just a placeholder\n clusters = self.random_clusters(k)\n\n iter = 0\n while (iter < max_iter) and not (old_clusters == clusters):\n print \"iteration %d...\" % iter\n # assign new clusters to old clusters\n for i in xrange(0, k):\n old_clusters[i] = copy(clusters[i])\n clusters[i].documents = []\n\n # for each document\n for document in self.documents:\n\n # determine the cluster with the highest similarity\n similarities = [cosine_similarity(document, cluster) for cluster in old_clusters]\n max_index = array(similarities).argmax()\n\n # assign document to that cluster\n clusters[max_index].add(document)\n\n # update cluster means\n for cluster in clusters:\n cluster.update_centroid()\n \n iter += 1\n \n return clusters",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\n clusters = list(cluster_list)\n\n # position initial clusters at the location of clusters with largest populations\n clusters.sort(reverse = True,\n key = lambda cluster: cluster.total_population())\n old_clusters = [clusters[idx] for idx in range(num_clusters)]\n\n# Initialize old cluster using large population counties\n# For number of iterations\n# Initialize the new clusters to be empty\n# For each county\n# Find the old cluster center that is closest\n# Add the county to the corresponding new cluster\n# Set old clusters equal to new clusters\n# Return the new clusters\n\n for dummy_i in range(num_iterations):\n new_clusters = [alg_cluster.Cluster(set(), 0, 0, 0, 0) for dummy_k in range(num_clusters)]\n for county in cluster_list:\n county_x = county.horiz_center()\n county_y = county.vert_center()\n l_idx = [float('inf'), -1]\n for cluster in old_clusters:\n distance = math.sqrt((county_x - cluster.horiz_center()) ** 2 + (county_y - cluster.vert_center()) ** 2)\n l_idx = min(l_idx, [distance, old_clusters.index(cluster)])\n new_clusters[l_idx[1]] = new_clusters[l_idx[1]].merge_clusters(county)\n old_clusters = new_clusters\n\n return new_clusters",
"def generateClustersRandomly(k=2, scale=1, num_clusters=1, points_per_cluster=20):\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(num_clusters)]\n point_list = []\n for rand in rands:\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n counter = 0\n while counter < points_per_cluster:\n nearCluster = np.array([np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)])\n nearClusterLastItem = math.sqrt(1 + np.dot(nearCluster, nearCluster))\n new_point = np.append(nearCluster, nearClusterLastItem)\n # radius of hyperbolic ball is 0.2\n if hyperboloidDist(new_point, rand) < .2:\n point_list.append(new_point)\n counter += 1\n\n return np.array(point_list)",
"def part(n, k, prev_parts=None):\n if prev_parts is None:\n prev_parts = {}\n if n < k or k < 1:\n raise Exception(\"Invalid partition args\")\n if k == 1:\n return [[n]]\n if n == k:\n return [[1 for i in range(n)]]\n parts = []\n for i in range(math.ceil(float(n) / float(k)), n - k + 2):\n others = deepcopy(prev_parts.get((n - i, k - 1), part(n - i, k - 1, prev_parts)))\n for other in others:\n other.append(i)\n parts.extend(others)\n deduplicated = set(tuple(sorted(x)) for x in parts)\n uniq_parts = []\n for dedup in deduplicated:\n uniq_parts.append(list(dedup))\n if (n, k) not in prev_parts:\n prev_parts[(n, k)] = uniq_parts\n return uniq_parts",
"def get_kmers(seq, k):\n\n return [seq[i:i+k] for i in range(len(seq)-k+1)]",
"def partition(self, data, labels):\n\t\treturn self.kfold.split(labels)",
"def recalculate_centers(data, k, clusters):\n centers = []\n for k_i in range(k):\n inds = [i for i, j in enumerate(clusters) if j == k_i]\n n = np.take(data, inds, axis=0)\n if len(inds) == 0:\n i = np.random.randint(len(data))\n centers.append((data[i,0], data[i,1]))\n\n elif len(inds) < 2: \n centers.append((n[0][0], n[0][1]))\n else:\n result = np.sum(n, axis=1)/len(inds)\n centers.append((result[0], result[0]))\n return centers",
"def partition(examples):\n\n cluster_examples = [[] for _ in range(0, cluster_count)]\n for example in examples:\n cluster_examples[example.type].append(example)\n\n return cluster_examples",
"def find_partitions(V,k):\n k_subs = k_subset(V,k)\n k_subs = uniq_subsets(k_subs)\n\n return k_subs",
"def cluster(self,method=\"kmeans\",properties=None,k=3):\n try :\n from sklearn.cluster import KMeans, Ward\n from sklearn import __version__\n except :\n logger.warning(\"install scikits-learning package\")\n return\n X = [] #List of feature vector of each blob\n if not properties:\n properties = ['color','shape','position']\n if k > len(self):\n logger.warning(\"Number of clusters cannot be greater then the number of blobs in the featureset\")\n return\n for i in self:\n featureVector = []\n if 'color' in properties:\n featureVector.extend(i.mAvgColor)\n if 'shape' in properties:\n featureVector.extend(i.mHu)\n if 'position' in properties:\n featureVector.extend(i.extents())\n if not featureVector :\n logger.warning(\"properties parameter is not specified properly\")\n return\n X.append(featureVector)\n\n if method == \"kmeans\":\n \n # Ignore minor version numbers.\n sklearn_version = re.search(r'\\d+\\.\\d+', __version__).group()\n \n if (float(sklearn_version) > 0.11):\n k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)\n else:\n k_means = KMeans(init='random', k=k, n_init=10).fit(X)\n KClusters = [ FeatureSet([]) for i in range(k)]\n for i in range(len(self)):\n KClusters[k_means.labels_[i]].append(self[i])\n return KClusters\n\n if method == \"hierarchical\":\n ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)\n WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]\n for i in range(len(self)):\n WClusters[ward.labels_[i]].append(self[i])\n return WClusters"
] | [
"0.6442916",
"0.6442183",
"0.6315446",
"0.6304946",
"0.6237085",
"0.617769",
"0.6146027",
"0.6125856",
"0.60932726",
"0.60206467",
"0.5957113",
"0.5909527",
"0.59080315",
"0.585992",
"0.58364034",
"0.58048147",
"0.58022964",
"0.57939726",
"0.5760786",
"0.5759672",
"0.57513547",
"0.57495826",
"0.57224643",
"0.5720571",
"0.5714274",
"0.57099193",
"0.56791675",
"0.5663382",
"0.56618917",
"0.566004"
] | 0.7760409 | 0 |
Partition list ``l`` in ``K`` partitions, without empty parts. >>> l = [0, 1, 2] >>> list(neclusters(l, 2)) [[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]]] >>> list(neclusters(l, 1)) [[[0, 1, 2]]] | def neclusters(l, K): # noqa
for c in clusters(l, K):
if all(x for x in c):
yield c | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clusters(l, K): # noqa\n if l:\n prev = None\n for t in clusters(l[1:], K):\n tup = sorted(t)\n if tup != prev:\n prev = tup\n for i in range(K):\n yield tup[:i] + [\n [l[0]] + tup[i],\n ] + tup[i + 1 :]\n else:\n yield [[] for _ in range(K)]",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\ttotal_clusters = len(cluster_list)\n\tclusters = sorted(cluster_list, key = lambda cluster: \\\n\t\t\t\t\t cluster.total_population(), reverse = True)\n\tk_clusters = clusters[:num_clusters]\n\tfor dummy_idx_i in range(num_iterations):\n\t\tk_empties = [Cluster(set([]), 0, 0, 0, 0) for \\\n\t\t\t\t\t dummy_idx in range(num_clusters)]\n\t\tfor idx_j in range(total_clusters):\n\t\t\tdist = [cluster_list[idx_j].distance(k_clusters[idx_f]) for \\\n\t\t\t\t\tidx_f in range(num_clusters)]\n\t\t\tidx_l = dist.index(min(dist))\n\t\t\tk_empties[idx_l].merge_clusters(cluster_list[idx_j])\n\t\tk_clusters = k_empties[:]\n\treturn k_clusters",
"def create_clusters(N, K):\n clusters = []\n centroids = create_points(N, K)\n for idx, centroid in enumerate(centroids):\n cluster = Cluster(centroid)\n cluster.label = _cluster_name(idx)\n clusters.append(cluster)\n return clusters",
"def all_segmentations(l):\n for K in range(1, len(l) + 1):\n gen = neclusters(l, K)\n yield from gen",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n # position initial clusters at the location of clusters with largest populations\n cluster_list_copy = sorted(cluster_list,\n reverse = True,\n key=lambda cluster: cluster.total_population())\n cluster_list_copy = cluster_list_copy[: num_clusters]\n cluster_cent = [(cluster.horiz_center(), cluster.vert_center()) for cluster in cluster_list_copy]\n result = []\n #clustering to k initial centers adjusting the centers after each iteration\n for dummy_q in range(num_iterations):\n #Initialize k empty sets C1,...,Ck\n k_clusters = []\n for dummy_k in range(num_clusters):\n k_clusters.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n for idx_j in range(len(cluster_list)):\n # defining the closest k center and add the cluster to it\n dist_list = []\n for idx_k in range(num_clusters):\n center_x, center_y = cluster_cent[idx_k]\n dist = cluster_list[idx_j].distance(\n alg_cluster.Cluster(set(), center_x, center_y, 0, 0))\n dist_list.append((dist, idx_k))\n dummy_k, idx = min(dist_list)\n k_clusters[idx].merge_clusters(cluster_list[idx_j])\n result = k_clusters\n #update the new center of k clusters\n cluster_cent = [(k_clusters[idx_f].horiz_center(), k_clusters[idx_f].vert_center()) for idx_f in range(num_clusters)]\n return result",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\n # position initial clusters at the location of clusters with largest populations\n \n cluster_n = len(cluster_list)\n\n miu_k = sorted(cluster_list,\n key=lambda c: c.total_population())[-num_clusters:]\n miu_k = [c.copy() for c in miu_k]\n\n # n: cluster_n\n # q: num_iterations\n for _ in xrange(num_iterations):\n cluster_result = [alg_cluster.Cluster(set([]), 0, 0, 0, 0) for _ in range(num_clusters)]\n # put the node into closet center node\n\n for jjj in xrange(cluster_n):\n min_num_k = 0\n min_dist_k = float('inf')\n for num_k in xrange(len(miu_k)):\n dist = cluster_list[jjj].distance(miu_k[num_k])\n if dist < min_dist_k:\n min_dist_k = dist\n min_num_k = num_k\n\n cluster_result[min_num_k].merge_clusters(cluster_list[jjj])\n\n # re-computer its center node\n for kkk in xrange(len(miu_k)):\n miu_k[kkk] = cluster_result[kkk]\n\n return cluster_result",
"def slicem_cluster(self, community_detection, network_from, wt_steps, n_clust, neighbors, top, drop_nodes):\n #TODO: change to prevent cluster on exception\n global scores_update, drop, flat, clusters, G, colors \n \n if len(n_clust) == 0:\n n_clust = None # Cluster at optimum modularity\n else:\n n_clust = int(n_clust)\n \n if len(drop_nodes) > 0:\n try:\n drop = [int(n) for n in drop_nodes.split(',')]\n print('dropping nodes:', drop)\n scores_update = {}\n for pair, score in complete_scores.items():\n if pair[0] in drop or pair[1] in drop:\n next\n else:\n scores_update[pair] = score\n except:\n self.show_drop_list_msg()\n else:\n drop = []\n scores_update = complete_scores\n\n flat, clusters, G = self.create_network(\n community_detection=community_detection, \n wt_steps=wt_steps,\n n_clust=n_clust,\n network_from=network_from, \n neighbors=neighbors, \n top=top\n )\n colors = get_plot_colors(clusters, G)\n print('clusters computed!')",
"def hierarchical_clustering(cluster_list, num_clusters):\n # n <-- |P|\n len_cluster_list = len(cluster_list)\n \n # Initialize n clusters C = {C1, ... Cn} such that Ci = {pi};\n new_cluster_list = []\n\n for index in range(len_cluster_list):\n new_cluster_list.append(alg_cluster.Cluster(cluster_list[index].fips_codes(), cluster_list[index].horiz_center(), cluster_list[index].vert_center(), cluster_list[index].total_population(), cluster_list[index].averaged_risk()))\n\n # while |C| > k do\n while len(new_cluster_list) > num_clusters:\n # (Ci,Cj) <-- argminCi,Cj Element C, i != j^dCi,Cj;\n # C <-- C Union {Ci Union Cj}; // line 5\n # C <-- C \\ {Ci, Cj}; // line 6\n fc_pair = fast_closest_pair(new_cluster_list)\n # print \"\\nfc_pair:\", fc_pair, \"\\n\"\n new_cluster_list[fc_pair[1]].merge_clusters(new_cluster_list[fc_pair[2]])\n del new_cluster_list[fc_pair[2]]\n\n return new_cluster_list",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings",
"def to_clusters_list(cluster_tags, k):\n converted = [[] for i in range(k)]\n for i in range(len(cluster_tags)):\n converted[cluster_tags[i]].append(i)\n return converted",
"def partition(n, k=None, zeros=False):\n if not zeros or k is None:\n for i in ordered_partitions(n, k):\n yield tuple(i)\n else:\n for m in range(1, k + 1):\n for i in ordered_partitions(n, m):\n i = tuple(i)\n yield (0,)*(k - len(i)) + i",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\n clusters = list(cluster_list)\n\n # position initial clusters at the location of clusters with largest populations\n clusters.sort(reverse = True,\n key = lambda cluster: cluster.total_population())\n old_clusters = [clusters[idx] for idx in range(num_clusters)]\n\n# Initialize old cluster using large population counties\n# For number of iterations\n# Initialize the new clusters to be empty\n# For each county\n# Find the old cluster center that is closest\n# Add the county to the corresponding new cluster\n# Set old clusters equal to new clusters\n# Return the new clusters\n\n for dummy_i in range(num_iterations):\n new_clusters = [alg_cluster.Cluster(set(), 0, 0, 0, 0) for dummy_k in range(num_clusters)]\n for county in cluster_list:\n county_x = county.horiz_center()\n county_y = county.vert_center()\n l_idx = [float('inf'), -1]\n for cluster in old_clusters:\n distance = math.sqrt((county_x - cluster.horiz_center()) ** 2 + (county_y - cluster.vert_center()) ** 2)\n l_idx = min(l_idx, [distance, old_clusters.index(cluster)])\n new_clusters[l_idx[1]] = new_clusters[l_idx[1]].merge_clusters(county)\n old_clusters = new_clusters\n\n return new_clusters",
"def find_knn(self, k, coordinate, threshold=0):\n def r_square(c1, c2):\n return (c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2\n\n h = []\n for sno in self._coordinates:\n heapq.heappush(\n h, (r_square(coordinate, self._coordinates[sno]), sno))\n\n knn = []\n for unused_i in range(k):\n knn.append(self._stations[heapq.heappop(h)[1]])\n\n min_dist = r_square((float(knn[0]['lat']), float(knn[0]['lng'])),\n coordinate)\n if threshold and min_dist > threshold ** 2:\n return []\n\n return knn",
"def neighbor_list(i, j, k, nx):\n left_center = (i-1, j, k)\n right_center = (i+1, j, k)\n top_center = (i, j+1, k)\n bottom_center = (i, j-1, k)\n left_up = (i, j, k + 1)\n left_down = (i, j, k -1)\n return np.mod([left_center, right_center, top_center, bottom_center, left_up, left_down], nx)",
"def sequential_clustering(singleton_list, num_clusters):\n\tcluster_list = []\n\tcluster_idx = 0\n\ttotal_clusters = len(singleton_list)\n\tcluster_size = float(total_clusters) / num_clusters\n\n\tfor cluster_idx in range(len(singleton_list)):\n\t\tnew_cluster = singleton_list[cluster_idx]\n\t\tif math.floor(cluster_idx / cluster_size) != \\\n\t\t math.floor((cluster_idx - 1) / cluster_size):\n\t\t\tcluster_list.append(new_cluster)\n\t\telse:\n\t\t\tcluster_list[-1] = cluster_list[-1].merge_clusters(new_cluster)\n\n\treturn cluster_list",
"def split_list(l, k):\n\n\tn = len(l)\n\tsublists = []\n\tnsubs = n / k\n\tnrems = n % k\n\n\t# little algo to split lists.\n\n\ti = int(0)\n\twhile i < n:\n\t\tsublists.append(l[i:i+k])\n\t\ti += k\n\n\treturn sublists",
"def hierarchical_clustering(cluster_list, num_clusters):\n cluster_list_copy = list(cluster_list)\n\n if len(cluster_list) <= num_clusters:\n return cluster_list\n while len(cluster_list) > num_clusters:\n cluster_list_copy.sort(key=lambda cluster: cluster.horiz_center())\n dummy, cluster_i, cluster_j = fast_closest_pair(cluster_list)\n cluster_list[cluster_i].merge_clusters(cluster_list[cluster_j])\n cluster_list.remove(cluster_list[cluster_j])\n\n return cluster_list",
"def clusters_connected( self):\n def check_connected( k, vertices, edges):\n dads = {}\n for p in vertices:\n dads[p] = p\n\n def Find( c):\n while c != dads[c]:\n c = dads[c]\n return c\n\n def Union( p, q):\n dads[Find(p)] = Find(q)\n\n for p,q in edges:\n Union( p, q)\n\n stuff = set([ Find(p) for (k,p) in dads.items()])\n assert len(stuff) == 1, \"More than one partition\"\n\n vertices = collections.defaultdict( list)\n for p in itertools.product( range(self.n), repeat=2):\n vertices[self.raster[p]].append( p)\n\n def X():\n for x in range(self.n-1):\n for y in range(self.n):\n yield (x,y),(x+1,y)\n\n def Y():\n for x in range(self.n):\n for y in range(self.n-1):\n yield (x,y),(x,y+1)\n\n connections = collections.defaultdict( list)\n for (p,q) in itertools.chain( X(), Y()):\n if self.raster[p] == self.raster[q]:\n connections[self.raster[p]].append( ( p, q))\n\n for (k,v) in vertices.items():\n check_connected( k, v, connections[k])",
"def hierarchical_k_means(X, n_clusters):\n\n n_big_clusters = int(np.sqrt(n_clusters))\n mbk = MiniBatchKMeans(init='k-means++', n_clusters=n_big_clusters, batch_size=1000,\n n_init=10, max_no_improvement=10, verbose=0,\n random_state=0).fit(X)\n coarse_labels = mbk.labels_\n fine_labels = np.zeros_like(coarse_labels)\n q = 0\n for i in range(n_big_clusters):\n n_small_clusters = int(\n n_clusters * np.sum(coarse_labels == i) * 1. / X.shape[0])\n n_small_clusters = np.maximum(1, n_small_clusters)\n mbk = MiniBatchKMeans(init='k-means++', n_clusters=n_small_clusters,\n batch_size=1000, n_init=10, max_no_improvement=10, verbose=0,\n random_state=0).fit(X[coarse_labels == i])\n fine_labels[coarse_labels == i] = q + mbk.labels_\n q += n_small_clusters\n\n return _remove_empty_labels(fine_labels)",
"def list_clusters(_filter=None):\n ecs_clusters = __paginate_call(ecs_client, 'list_clusters', 'clusterArns')\n if _filter:\n ecs_clusters = [cluster for cluster in ecs_clusters if _filter in cluster]\n return sorted(ecs_clusters)",
"def clusterparts(parts, block_len):\n parts = sorted(parts, key=op.itemgetter(-1))\n global opt\n clusters = [[parts[0][-1]]]\n \n # assign all parts to clusters\n for i in range(1,len(parts)):\n x, y = parts[i][-1]\n \n # detect box already in cluster\n fc = []\n for k,cl in enumerate(clusters):\n for xc,yc in cl:\n ar = intersectarea((xc,yc),(x,y),block_len)\n intrat = float(ar)/(block_len*block_len)\n if intrat > float(opt.blint):\n if not fc: clusters[k].append((x,y))\n fc.append(k)\n break\n \n # if this is new cluster\n if not fc:\n clusters.append([(x,y)])\n else:\n # re-clustering boxes if in several clusters at once\n while len(fc) > 1:\n clusters[fc[0]] += clusters[fc[-1]]\n del clusters[fc[-1]]\n del fc[-1]\n \n item = op.itemgetter\n # filter out small clusters\n clusters = [clust for clust in clusters if Dist((min(clust,key=item(0))[0],min(clust,key=item(1))[1]), (max(clust,key=item(0))[0],max(clust,key=item(1))[1]))/(block_len*1.4) >= float(opt.rgsize)]\n \n # filter out clusters, which doesn`t have identical twin cluster\n clusters = [clust for x,clust in enumerate(clusters) if hassimilarcluster(x,clusters)]\n \n return clusters",
"def hierarchical_clustering(cluster_list, num_clusters):\n \n new_cluster_list = list(cluster_list)\n\n while len(new_cluster_list) > num_clusters:\n _, node1, node2 = fast_closest_pair(new_cluster_list)\n new_cluster_list[node1].merge_clusters(new_cluster_list[node2])\n del new_cluster_list[node2]\n\n return new_cluster_list",
"def partitions(n, k):\n if k == 1:\n yield (n,)\n return\n for i in range(1, n):\n for p in partitions(n-i, k-1):\n yield (i,) + p",
"def generic_vertex_set_with_neighbourhood_iterator(self, S, k, N=None, skip_until_element=None):\n if k < len(S):\n return\n if N is None:\n N = self.neighbourhood(S)\n if k == len(S):\n yield (S, N)\n else:\n forbidden = map(lambda v: v[0], S)\n for X in self.L if skip_until_element is None else dropwhile(lambda x: x != skip_until_element, self.L):\n if X not in forbidden:\n for (v, NN) in self.neighbourhood_partition(X, N):\n for C in self.generic_vertex_set_with_neighbourhood_iterator(S + (v,), k, NN, X):\n yield C",
"def hierarchical_clustering(cluster_list, num_clusters):\n\twhile len(cluster_list) > num_clusters:\n\t\tcluster_list.sort(key = lambda cluster: cluster.horiz_center())\n\t\tdummy_dist, idx_i, idx_j = fast_closest_pair(cluster_list)\n\t\tcluster_list[idx_i].merge_clusters(cluster_list[idx_j])\n\t\tcluster_list.pop(idx_j)\n\treturn cluster_list",
"def dlk_partitions(totalD, totalL, totalK,\\\n minD = 0,minL = 0,minK = 0) :\n partitions = []\n## if goodDLK_2(totalD,totalL,totalK+1) and totalE >= 1:\n## partitions.append((((totalD,totalL,totalK,totalE-1),1),))\n if (totalD,totalL,totalK) == (0,0,0) :\n return [()]\n for d1 in range(minD, totalD +1):\n loD = totalD - d1\n for l1 in range(minL, totalL +1):\n loL = totalL - l1\n for k1 in range(minK, totalK +1):\n loK = totalK - k1\n if not goodDLK_2(d1,l1,k1+1) :\n continue\n \n rest = dlk_partitions(loD,loL,loK,d1,l1,k1)\n partitions += [updatePartition(r, (d1,l1,k1)) for r in rest]\n # this updating of the lower bound of iterations\n # is because bound is on lexicographical order.\n minK = 0\n minK = 0\n minL = 0\n return partitions",
"def k_neighbors(self, unknown, dataset, k):\n distances = []\n for title in dataset:\n point = dataset[title]\n distance_to_point = distance.euclidean_distance(point, unknown)\n distances.append([distance_to_point, title])\n distances.sort()\n neighbors = distances[0:k]\n return neighbors",
"def discover_new_cluster(\n self,\n n: int,\n items: List[str],\n embeddings: np.ndarray,\n weights: Optional[List[float]] = None,\n k_neighbours: int = 10,\n ) -> List[Tuple[float, str]]:\n # Get all cross-similarities\n similarity = cosine_similarity(embeddings)\n \n # Calculate scores for every row\n scores = []\n sorted_idx = similarity.argsort(axis=1) # Get sorted indices (sort on corresponding values)\n for i, (item, weight) in enumerate(zip(items, weights)):\n # No point in calculating score if weight equals zero\n if not weight:\n scores.append(0)\n continue\n \n # Assign score of zero if labeled entity is in K nearest neighbours\n top_indices = sorted_idx[i, -k_neighbours:]\n if any(items[idx] in self._clusters.keys() for idx in top_indices):\n scores.append(0)\n \n # Use accumulated similarity of K nearest neighbours as score\n else:\n scores.append(weight * similarity[i, top_indices].sum())\n \n # Filter out the highest score item\n return list(sorted(zip(scores, items), key=lambda x: x[0], reverse=True))[:n]",
"def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]",
"def get_subsets(l, k):\n if k == 0:\n return [[]]\n else:\n res = []\n for i in range(len(l)):\n rest_subsets = get_subsets(l[i + 1:], k - 1)\n for subset in rest_subsets:\n subset.insert(0, l[i])\n res += rest_subsets\n return res"
] | [
"0.7217303",
"0.63322186",
"0.625333",
"0.6235396",
"0.6019988",
"0.5918844",
"0.5856138",
"0.57952917",
"0.57450014",
"0.5674573",
"0.5655689",
"0.5525202",
"0.5506933",
"0.5506646",
"0.5503834",
"0.5496627",
"0.5489272",
"0.5465552",
"0.5463998",
"0.54601",
"0.54010504",
"0.5395626",
"0.5385965",
"0.5383908",
"0.53812057",
"0.53751945",
"0.53709483",
"0.5369598",
"0.5366698",
"0.53500074"
] | 0.74512917 | 0 |
Get all segmentations of a list ``l``. | def all_segmentations(l):
for K in range(1, len(l) + 1):
gen = neclusters(l, K)
yield from gen | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getIntersectorList(self, l):\n return [self.getIntersector(v) for v in l]",
"def getSegments(self):\n l = len(self.points)\n return [Segment(self.points[i % l], self.points[(i + 1) % l], \\\n color=self.side_color, width=self.side_width) for i in range(l)]",
"def Chunks(l):\n return_list = [[]]\n counter = 0\n index = 0\n for i in l:\n # Size is split in half due to the max size being a sum of src and dst.\n if counter > (self._ADDRESS_LENGTH_LIMIT/2):\n counter = 0\n index += 1\n return_list.append([])\n if i.version == 6:\n counter += self._IPV6_SIZE\n else:\n counter += 1\n return_list[index].append(i)\n return return_list",
"def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))",
"def segments(self):\n L = len(self.vertices)\n return itertools.chain((self._subset((i,i+1)) for i in range(len(self)-1)),\n (self._subset((L-1,0)),))",
"def get_all(self):\n return self._segments",
"def get_segmentations(self, aids):\n sseg_list = []\n for aid in aids:\n ann = self.dset.anns[aid]\n coco_sseg = ann.get('segmentation', None)\n if coco_sseg is None:\n sseg = None\n else:\n sseg = kwimage.MultiPolygon.coerce(coco_sseg)\n sseg_list.append(sseg)\n return sseg_list",
"def lists_and_segments(self):\n response = self._get(self.uri_for(\"listsandsegments\"))\n return json_to_py(response)",
"def getSegments(self) -> List[int]:\n ...",
"def dividir(l):\n\n\ta = []\n\tfor i in range(len(l)):\n\t\ta += l[i].split(' ')\n\treturn a[:100]",
"def segmentline(l,u1,u2):\n p1=sampleline(l,u1)\n p2=sampleline(l,u2)\n return [p1,p2]",
"def segments(seg_type=None):\n\n for index in xrange(idaapi.get_segm_qty()):\n seg = Segment(index=index)\n if (seg_type is None) or (seg.type == seg_type):\n yield Segment(index=index)",
"def divide_list(ld, division):\n buckets = []\n current = []\n for obj in ld:\n if len(current) < division:\n current.append(obj)\n else:\n buckets.append(current)\n current = [obj]\n if len(current) > 0:\n buckets.append(current)\n return buckets",
"def get_intervals(l):\n intervals = len(l) * [0]\n # Initalize with 1\n intervals[0] = 1\n for k in range(1, len(l)):\n intervals[k] = (len(l[k]) + 1) * intervals[k - 1]\n\n return intervals",
"def get_all_segments(edfFiles):\n\n segments = []\n preprocessor = Preprocessor(config_startShift,\n config_endShift,\n config_powerLineFreq,\n config_bandLowCut,\n config_bandHighCut)\n for edf in edfFiles:\n print(\"getting the labeled segments from the recording \", str(edf.filename))\n segments.extend(get_segments_from_edf(edf, preprocessor))\n if edfFiles.index(edf) == 20: break\n return segments",
"def group_list_by_seg(listToGroup, divs):\n \n # Get the list of ROI numbers:\n roiNums = []\n \n for div in divs:\n roiNums.append(div[0])\n \n uniqueRoiNums = list(set(roiNums))\n \n if len(uniqueRoiNums) == 1:\n groupedList = [listToGroup]\n else:\n groupedList = []\n \n for roiNum in uniqueRoiNums:\n listThisRoi = [] # initialise\n \n for i in range(len(listToGroup)):\n if divs[i][0] == roiNum:\n listThisRoi.append(listToGroup[i])\n \n groupedList.append(listThisRoi)\n \n return groupedList",
"def segmentation_split(Y, X, Ls, n_sampels): \n n_seg = int(n_sampels/Ls) # Number of segments\n X = X.T[:n_seg*Ls] # remove last segement if too small\n Y = Y.T[:n_seg*Ls]\n \n Ys = np.split(Y.T, n_seg, axis=1) # Matrices with segments in axis=0\n Xs = np.split(X.T, n_seg, axis=1) # Matrices with segments in axis=0\n \n return Ys, Xs, n_seg",
"def ins_all_positions(x, l):\n res = []\n for i in range(0, len(l) + 1):\n res.append(l[:i] + [x] + l[i:])\n return res",
"def merge_segments(lst):\n ii = 0\n while True:\n jj = ii + 1\n if len(lst) <= jj:\n return lst\n seg1 = lst[ii]\n seg2 = lst[jj]\n if seg1.merge(seg2):\n if seg2.empty():\n del lst[jj]\n else:\n ii += 1\n else:\n ii += 1\n return lst",
"def segments(self):\n return self._segments",
"def list_segment_names(self) -> PagingList[str]:\n return PagingList(self._generate_segment_names, 128)",
"def unif_partition(l):\n return tuple(i/l for i in range(l+1))",
"def clusters(l, K): # noqa\n if l:\n prev = None\n for t in clusters(l[1:], K):\n tup = sorted(t)\n if tup != prev:\n prev = tup\n for i in range(K):\n yield tup[:i] + [\n [l[0]] + tup[i],\n ] + tup[i + 1 :]\n else:\n yield [[] for _ in range(K)]",
"def __load_segments(self):\r\n self.__segments = []\r\n if len(self.points) > 1:\r\n s = self.points[0]\r\n k = 1\r\n while k < len(self.points):\r\n e = self.points[k]\r\n self.__segments.append(Segment(s, e))\r\n s = e \r\n k += 1\r\n e = self.points[0]\r\n self.__segments.append(Segment(s, e))",
"def chunks(l, n):\n lists = []\n for i in range(n):\n list1 = np.arange( i*l/n+1 , (i+1)*l/n+1 )\n lists.append(list1)\n return lists",
"def get_segments(self, sets=None):\n if sets is None:\n if self.sets is not None:\n sets = self.sets\n else:\n raise ValueError(\"sets and self.sets attributes are None, \\\n you need either to pass an origin argument to get_segments or \\\n to use get_filtration method before\")\n segments = []\n for s in sets:\n if self.epsilon <= s.getRelevance():\n t, a, b = s.getPosition()\n for i, seg in enumerate(segments):\n tp, ap, bp = seg\n if t >= tp and bp > a:\n bp = a\n elif t <= tp and ap < b:\n ap = b\n segments[i] = (tp, ap, bp)\n segments.append((t, a, b))\n return segments",
"def filter_segs(self, segs, normalize=True):\n return list(filter(lambda seg: self.seg_known(seg, normalize), segs))",
"def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()",
"def Segments():\n for n in range(ida_segment.get_segm_qty()):\n seg = ida_segment.getnseg(n)\n if seg:\n yield seg.start_ea",
"def split_rows(l):\n row0 = [l[0], l[3], l[7]]\n row1 = [l[1], l[4], l[8], l[12]]\n row2 = [l[2], l[5], l[9], l[13], l[16]]\n row3 = [l[6], l[10], l[14], l[17]]\n row4 = [l[11], l[15], l[18]]\n return [row0, row1, row2, row3, row4]"
] | [
"0.7343008",
"0.5786379",
"0.5737136",
"0.57333577",
"0.56923693",
"0.56832623",
"0.5526333",
"0.55071455",
"0.54750925",
"0.54672647",
"0.5410251",
"0.53292185",
"0.5260688",
"0.52098393",
"0.5078696",
"0.5060627",
"0.50543034",
"0.50465745",
"0.50461197",
"0.50444895",
"0.50383353",
"0.50059843",
"0.49926668",
"0.49475324",
"0.4923056",
"0.49107066",
"0.4894213",
"0.48751193",
"0.4858163",
"0.48436219"
] | 0.7035634 | 1 |
Test if ``s1`` and ``s2`` are in the same symbol, given the ``segmentation``. | def q(segmentation, s1, s2):
index1 = find_index(segmentation, s1)
index2 = find_index(segmentation, s2)
return index1 == index2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __eq__(self, other: Segment) -> bool:\n return any(\n (\n self.start == other.start and self.end == other.end,\n self.start == other.end and self.end == other.start,\n )\n )",
"def segment_segment(s1, s2):\n l1=s1.line()\n l2=s2.line()\n i = line_line(l1, l2)\n if isinstance(i, bool): return False\n k = s1.affine(i)\n return k >= 0 and k <= 1 and i",
"def seg_x_in_y(self, x: str, y: str) -> bool:\n return len(set(x + y)) == len(y)",
"def identical_cds(sc1,sc2):\n # Input 2 identical segment chains, return True if cds the same\n if sc1.covers(sc2) and sc2.covers(sc1):\n return True\n else:\n return False",
"def if2symbols(symbol1, symbol2, reel):\n for i in range(len(reel)-2):\n if reel[i] == symbol1 and reel[i+1] == symbol2:\n return True\n return False",
"def conflateable(seg1, seg2, segment_pairs):\n for segment_pair in segment_pairs:\n seg_set = set(segment_pair)\n if seg1 in seg_set and seg2 in seg_set:\n return True\n return False",
"def are_equal(self, sp1, sp2):\n for s1 in sp1.keys():\n spin1 = getattr(s1, \"spin\", 0)\n oxi1 = getattr(s1, \"oxi_state\", 0)\n for s2 in sp2.keys():\n spin2 = getattr(s2, \"spin\", 0)\n oxi2 = getattr(s2, \"oxi_state\", 0)\n if (s1.symbol == s2.symbol and oxi1 == oxi2 and\n spin2 == -spin1):\n break\n else:\n return False\n return True",
"def __is_contained_in(first_symbol, second_symbol):\n\n first_symbol_top_left = first_symbol.top_left_corner\n first_symbol_top_right = first_symbol.top_right_corner\n first_symbol_bottom_left = first_symbol.bottom_left_corner\n first_symbol_bottom_right = first_symbol.bottom_right_corner\n\n second_symbol_top_left = second_symbol.top_left_corner\n second_symbol_top_right = second_symbol.top_right_corner\n second_symbol_bottom_left = second_symbol.bottom_left_corner\n second_symbol_bottom_right = second_symbol.bottom_right_corner\n\n if (\n second_symbol_top_left[0] <= first_symbol_top_left[0] and\n first_symbol_top_right[0] <= second_symbol_top_right[0] and\n second_symbol_bottom_left[0] <= first_symbol_bottom_left[0] and\n first_symbol_bottom_right[0] <= second_symbol_bottom_right[0] and\n\n second_symbol_top_left[1] <= first_symbol_top_left[1] and\n first_symbol_bottom_left[1] <= second_symbol_bottom_left[1] and\n second_symbol_top_right[1] <= first_symbol_top_right[1] and\n first_symbol_bottom_right[1] <= second_symbol_bottom_right[1]\n ):\n return True\n else:\n return False",
"def intersect_segment(self, p1, p2):\n p1 = base.getvector(p1)\n if len(p1) == 2:\n p1 = np.r_[p1, 1]\n p2 = base.getvector(p2)\n if len(p2) == 2:\n p2 = np.r_[p2, 1]\n \n\n z1 = self.line * p1\n z2 = self.line * p2\n\n if np.sign(z1) != np.sign(z2):\n return True\n if self.contains(p1) or self.contains(p2):\n return True\n return False",
"def twoStrings(s1, s2):\n\n set1 = set(s1)\n set2 = set(s2)\n\n for char in set1:\n if char in set2:\n return True\n\n return False",
"def is_isomorphic_fast(self, s1, s2):\n # encode strings\n count1, count2 = 0, 0\n dict1, dict2 = dict(), dict()\n for i in range(len(s1)):\n char1, char2 = s1[i], s2[i] # current characters\n if char1 in dict1:\n curr1 = dict1[char1] # current index of character in s1\n else:\n count1 += 1\n dict1[char1] = count1\n curr1 = dict1[char1]\n if char2 in dict2:\n curr2 = dict2[char2] # current index of character in s2\n else:\n count2 += 1\n dict2[char2] = count2\n curr2 = dict2[char2]\n if curr1 != curr2:\n return False\n return True",
"def isSimilar(bin1, bin2, s):\n assert len(bin1) == len(bin2)\n for i in range(len(bin1)):\n if abs(bin1[i] - bin2[i]) > s:\n return False\n return True",
"def judge(self, s1, s2):\n if len(s2) < len(s1):\n return False\n index_of_s1 = 0\n index_of_s2 = 0\n while index_of_s1 < len(s1) and index_of_s2 < len(s2):\n if s1[index_of_s1] == s2[index_of_s2]:\n index_of_s1 += 1\n index_of_s2 += 1\n else:\n index_of_s2 += 1\n return True if index_of_s1 == len(s1) else False",
"def intersects_segment(A, B, X):\n \n AX = np.array([X.x - A.x, X.y - A.y])\n XB = np.array([B.x - X.x, B.y - X.y])\n equal_signs = np.array_equal(np.sign(AX), np.sign(XB))\n\n return equal_signs",
"def is_isomorphic(self, s1, s2):\n # encode strings\n enc1, enc2 = [], []\n count1, count2 = 0, 0\n dict1, dict2 = dict(), dict()\n for i in range(len(s1)):\n char1, char2 = s1[i], s2[i]\n if char1 in dict1:\n enc1.append(dict1[char1])\n else:\n count1 += 1\n dict1[char1] = count1\n enc1.append(dict1[char1])\n if char2 in dict2:\n enc2.append(dict2[char2])\n else:\n count2 += 1\n dict2[char2] = count2\n enc2.append(dict2[char2])\n return enc1 == enc2 # compare encodings",
"def __hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))",
"def contains_sequence(dna1, dna2):\n return dna2 in dna1",
"def segmentsIntersect(self, seg1, seg2):\n\t\ts1_x = seg1[1][0] - seg1[0][0]\n\t\ts1_y = seg1[1][1] - seg1[0][1]\n\t\ts2_x = seg2[1][0] - seg2[0][0]\n\t\ts2_y = seg2[1][1] - seg2[0][1]\n\n\t\tdenom = -s2_x * s1_y + s1_x * s2_y\n\n\t\tif (denom > 1e-10):\n\t\t\ts = (-s1_y * (seg2[0][0] - seg1[0][0]) + s1_x * (seg2[0][1] - seg1[0][1])) / (-s2_x * s1_y + s1_x * s2_y)\n\t\t\tt = ( s2_x * (seg2[0][1] - seg1[0][1]) - s2_y * (seg2[0][0] - seg1[0][0])) / (-s2_x * s1_y + s1_x * s2_y)\n\t\t\treturn (s >= 0 and s <= 1 and t >= 0 and t <= 1)\n\t\telse:\n\t\t\treturn False",
"def hamming(s1, s2):\n s1 = str(s1)\n s2 = str(s2)\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length.\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))",
"def fn(s1, s2):\n if len(s1) == 1: return s1 == s2\n if sorted(s1) != sorted(s2): return False #160ms -> 50ms\n return any(fn(s1[:i], s2[:i]) and fn(s1[i:], s2[i:]) or fn(s1[:i], s2[-i:]) and fn(s1[i:], s2[:-i]) for i in range(1, len(s1)))",
"def intersects_segment(\n self, a: Tuple[float, float], b: Tuple[float, float]\n ) -> bool:\n assert len(a) == 2\n assert len(b) == 2\n return bool(lib.cpBBIntersectsSegment(self, a, b))",
"def __isOneSEFound( se1, se2 ):\n if len( se1 ) >= len( se2 ):\n for i in se2:\n for j in se1:\n if i == j:\n return True\n return False\n elif len( se1 ) < len( se2 ):\n for i in se1:\n for j in se2:\n if i == j :\n return True\n return False",
"def contains_sequence(dna1, dna2):\r\n if dna2 in dna1:\r\n return True\r\n else:\r\n return False",
"def contains(s1, s2):\n\n return s2 in s1",
"def _is_equal(self, symbol):\n if symbol.type == self.scanner.EQUALS:\n return True\n else:\n return False",
"def end_other(s_1, s_2):\n str_1 = s_1[-3:]\n str_2 = s_2[-3:]\n\n if(str_1.lower() == s_2.lower()):\n \n isValid = True\n elif(str_2.lower() == s_1.lower()):\n isValid = True\n else:\n isValid = False\n return isValid",
"def are_equal(self, sp1, sp2):\n return",
"def same_as(self, space, in_space):\n if self.marks == space.marks and self.genus == space.genus:\n return True\n space = space.complementary_component(in_space)\n if self.marks == space.marks and self.genus == space.genus:\n return True\n return False",
"def equiv(subdiagram1, subdiagram2):\n # TODO: Make sure arguments are the right type\n # TODO: Make this work for subdiagrams of length >= 1\n # subdiagrams are not equivalent if they have different numbers of crossings\n # print \"sub1\\t\", subdiagram1, len(subdiagram1[0])\n # print \"sub2\\t\", subdiagram2, len(subdiagram2[0])\n if len(subdiagram1[0]) != len(subdiagram2[0]):\n return False\n # look for a match\n for i in range(len(subdiagram1[0])-1):\n crossing1 = subdiagram1[0][i]\n typeMatch = False\n for j in range(len(subdiagram2[0])-1):\n crossing2 = subdiagram2[0][j]\n print \"\\tc1 \",crossing1\n print \"\\tc2 \",crossing2\n # check for same crossing type\n # TODO: check for empty crossing\n if len(crossing1) == 5 and len(crossing2) == 5:\n if crossing1[0] == crossing2[0]:\n print \" :)\"\n typeMatch = True\n \n\n return True",
"def Stringchecker(s1, s2):\r\n\r\n if len(s1) != len(s2) or len(set(s1)) < len(set(s2)):\r\n return False\r\n d = dict()\r\n for idx,c in enumerate(s1):\r\n if not d.get(c):\r\n d[c] = s2[idx]\r\n elif d[c] != s2[idx]:\r\n return False\r\n return True"
] | [
"0.641324",
"0.6303756",
"0.62990654",
"0.6223713",
"0.60034645",
"0.59813756",
"0.59780806",
"0.5975479",
"0.58711016",
"0.5836675",
"0.5813403",
"0.5789767",
"0.5778101",
"0.57675564",
"0.5753647",
"0.5730708",
"0.56924576",
"0.5673696",
"0.5646707",
"0.5588982",
"0.5555126",
"0.5549565",
"0.554248",
"0.54712176",
"0.54555005",
"0.5446655",
"0.54199046",
"0.5417623",
"0.54147536",
"0.5405222"
] | 0.7217469 | 0 |
This builds your guide. Use Keyword to update any options at build time. | def build_guide(self, **kwargs):
# This builds your guide master and updates your options
self.create_guide_master(**kwargs)
prefix = self.prefix # Naming prefix. Use this for every new node you create and there should be no name clashes.
options = self.options # Build options
mirror_value = self.mirror_value # 1.0 for left and center sided parts and -1.0 for right sided part.
mc.setAttr(self.guide_master+'.offsetTranslateY', -0.2)
l_prefix = prefix.replace('C','L', 1)
r_prefix = prefix.replace('C','R', 1)
mirror_values = [1, -1]
enable_steering = options.get('enableSteering')
colors = ['green', 'red']
for mi, prefix in enumerate([l_prefix, r_prefix]):
mirror_value = mirror_values[mi]
color = colors[mi]
l_main_zero, l_main_plc = self.guide_joint('main', alt_prefix=prefix, placer_only=1)
# create hub
hub_zero, hub_plc, hub_jnt = self.guide_joint('wheelhub', alt_prefix=prefix, constraint_type='point')
hub_end_zero, hub_end_plc, hub_end_jnt = self.guide_joint('wheelhub_end', alt_prefix=prefix, constraint_type='point')
mc.xform(hub_end_zero, r=1, t=[1,0,0])
mc.parent(hub_end_jnt, hub_jnt)
mc.aimConstraint(hub_end_plc, hub_jnt, aim=[mirror_value,0,0], u=[0,1,0], wu=[0,1,0], wut='vector')
mc.parentConstraint(hub_plc, hub_end_zero , mo=1)
# Create steering arm
steer_zero, steer_plc, steer_jnt = self.guide_joint('steeringArm', alt_prefix=prefix, constraint_type='parent')
mc.xform(steer_zero, r=1, t=[-1,0,0])
mc.parent(hub_jnt, steer_jnt)
# Create shocks
shock_a_zero, shock_a_plc, shock_a_jnt = self.guide_joint('shock_A', alt_prefix=prefix, constraint_type='point')
shock_b_zero, shock_b_plc, shock_b_jnt = self.guide_joint('shock_B', alt_prefix=prefix, constraint_type='point')
mc.xform(shock_a_zero, ws=1, t=[-2,2,0])
mc.xform(shock_b_zero, ws=1, t=[-0.5,0.25,0])
mc.parent(shock_b_jnt, shock_a_jnt)
mc.aimConstraint(shock_b_plc, shock_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')
mc.aimConstraint(shock_a_plc, shock_b_jnt, aim=[-mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')
# upper arm
up_arm_zero, up_arm_plc, up_arm_jnt = self.guide_joint('upperArm', alt_prefix=prefix, constraint_type='point')
up_arm_end_zero, up_arm_end_plc, up_arm_end_jnt = self.guide_joint('upperArm_end', alt_prefix=prefix, constraint_type='point')
mc.xform(up_arm_end_zero, r=1, t=[-3.5,1,0])
mc.xform(up_arm_zero, r=1, t=[-1,0.5,0])
mc.parent(up_arm_end_jnt, up_arm_jnt)
mc.aimConstraint(up_arm_end_plc, up_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=up_arm_plc)
# lower arm
lo_arm_zero, lo_arm_plc, lo_arm_jnt = self.guide_joint('lowerArm', alt_prefix=prefix, constraint_type='point')
lo_arm_end_zero, lo_arm_end_plc, lo_arm_end_jnt = self.guide_joint('lowerArm_end', alt_prefix=prefix, constraint_type='point')
mc.xform(lo_arm_end_zero, r=1, t=[-4,-0.5,0])
mc.xform(lo_arm_zero, r=1, t=[-1,-0.5,0])
mc.parent(lo_arm_end_jnt, lo_arm_jnt)
mc.aimConstraint(lo_arm_end_plc, lo_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=lo_arm_plc)
# steeringArm
if enable_steering:
steeringArm_a_zero, steeringArm_a_plc, steeringArm_a_jnt = self.guide_joint('steeringArm_A', alt_prefix=prefix, constraint_type='point')
steeringArm_b_zero, steeringArm_b_plc, steeringArm_b_jnt = self.guide_joint('steeringArm_B', alt_prefix=prefix, constraint_type='point')
mc.xform(steeringArm_b_zero, r=1, t=[-1.5,0,1])
mc.xform(steeringArm_a_zero, r=1, t=[-4,0,1])
mc.parent(steeringArm_b_jnt, steeringArm_a_jnt)
mc.aimConstraint(steeringArm_b_plc, steeringArm_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')
# Create control
zero, ctrl = self.guide_ctrl('wheel', alt_prefix=prefix, driver=hub_end_jnt, color=color, shape='circle', axis='X', scale=[3]*3, create_pivot=0)
mc.setAttr(ctrl+'.numOffsetCtrls', 1)
mc.addAttr(ctrl+'.numOffsetCtrls', e=1, min=1)
mc.xform(ctrl.replace('_CTL','_A_OFF_CTL.cv[*]'), r=1, s=[0.8]*3)
control.create_shape('wheel', ctrl, axis='X', scale=[3]*3)
#suspension_zero, suspension_ctrl = self.guide_ctrl('suspension', create_pivot=0, driver=shock_a_jnt, axis='X', shape='pyramid', color=color, scale=[1.5,1,1], alt_prefix=prefix)
ground_zero, ground_ctrl = self.guide_ctrl('ground', create_pivot=0, shape='square', color='grass', alt_prefix=prefix)
mc.delete(mc.pointConstraint(hub_jnt, ground_zero))
# constraint to placer
childs = [prefix+'_wheelhub_JNT_PLC_ZERO',
prefix+'_steeringArm_JNT_PLC_ZERO',
prefix+'_shock_A_JNT_PLC_ZERO',
prefix+'_shock_B_JNT_PLC_ZERO',
prefix+'_upperArm_JNT_PLC_ZERO',
prefix+'_upperArm_end_JNT_PLC_ZERO',
prefix+'_lowerArm_JNT_PLC_ZERO',
prefix+'_lowerArm_end_JNT_PLC_ZERO']
for c in childs:
mc.parentConstraint(l_main_plc, c, mo=1)
mc.setAttr(l_main_plc+'.offsetTranslateY', mirror_value*0.5)
# ################3
# Place it all
hub_pos = mc.ls(options.get('hubCenter') or '')
if hub_pos:
loc = utils.snap_locator(hub_pos)
mc.delete(mc.pointConstraint(loc, self.guide_master))
mc.setAttr(self.guide_master+'.tx', 0)
mc.delete(mc.pointConstraint(loc, l_main_plc), loc)
hub_end_pos = mc.ls(options.get('hubEndCenter') or '')
if hub_end_pos:
loc = utils.snap_locator(hub_end_pos)
mc.delete(mc.pointConstraint(loc, hub_end_plc), loc)
else:
mc.xform(self.guide_master, ws=1, t=[0,2,10])
mc.xform(l_main_plc, r=1, t=[mirror_value*6,0,0])
mc.setAttr(self.guide_master+'.jointAxisVis', 1)
l = utils.snap_locator(hub_jnt)
mc.setAttr(l+'.ty', 0)
mc.delete(mc.pointConstraint(l, ground_zero), l)
chassis_plc_zero, chassis_plc = self.guide_joint('chassis_driver', placer_only=1)
mc.setAttr(chassis_plc+'.radius', 1)
mc.setAttr(chassis_plc+'.color', 0.96, 0.71, .01)
mc.setAttr(chassis_plc+'.otherType', 'Leg IK Driver', type='string');
mc.setAttr(chassis_plc+'.type', 18)
mc.pointConstraint(l_prefix+'_lowerArm_end_JNT_PLC', r_prefix+'_lowerArm_end_JNT_PLC', chassis_plc_zero)
utils.set_attrs(chassis_plc, l=1, k=0)
# This finalizes your guide.
self.finalize_guide()
self.mirror_guide() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix\n options = self.options\n mirror_value = self.mirror_value\n\n num_joints = options.get('numberJoints')\n single_joint = options.get('singleJoint')\n pickWalk_parent = options.get('pickWalkParent')\n\n num_joints += 1\n if single_joint:\n num_joints = 1\n\n # Builde joints\n if single_joint:\n jnt_zero, plc, jnt = self.guide_joint(constraint_type='parent')\n zero, ctrl = self.guide_ctrl(shape='circle', color='light_blue', driver=jnt, axis='X')\n ctrls = [ctrl]\n zeros = [zero]\n\n else:\n jnt_zeros, plcs, jnts = self.guide_joint_chain('', num_joints=num_joints)\n zeros, ctrls = [], []\n for i, jnt in enumerate(jnts[:-1]):\n letter = utils.letters[i]\n zero, ctrl = self.guide_ctrl(name=letter, shape='circle',\n color='light_blue', driver=jnt, axis='X')\n zeros.append(zero)\n ctrls.append(ctrl)\n\n mc.xform(zeros, jnt_zeros, r=1, t=[-1*self.mirror_value, 0, 0])\n\n # lock stuff\n pivots = [mc.listRelatives(c, p=1)[0] for c in ctrls]\n utils.set_attrs(zeros, l=1, k=0)\n utils.set_attrs(pivots, 't s', l=1, k=0)\n\n mc.setAttr(self.guide_master+'.offsetTranslateX', -0.5*self.mirror_value)\n\n # This finalizes your guide.\n self.finalize_guide()",
"def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix\n options = self.options\n mirror_value = self.mirror_value\n\n number_mid_ctrl = options.get('numberMidCtrls')\n num_joints = options.get('numberJoints')\n create_jaw = options.get('createJaw')\n create_skull = options.get('createReverseJaw')\n surface = options.get('createSurfaceDriver')\n create_fk_ctrls = options.get('createFKShaperCtrls')\n\n noxform_grp = self.guide_master + '_NOX'\n\n if mc.objExists ('drivenNeck_chest_Mid_bind'):\n mc.delete ('drivenNeck_chest_Mid_bind')\n\n\n pp = env.get_parts_paths()[-1]\n branch = r'BidepAutoRig\\part_joints\\neck_skel.mb'\n import_path = pp.replace('partsLibrary', branch)\n mc.file(import_path, i=1)\n\n if mc.objExists ('snap_chest_Mid_jnt'):\n mc.delete (mc.parentConstraint ('snap_chest_Mid_bind', 'drivenNeck_chest_Mid_bind'))\n\n\n snaps=[u'head_Mid_bind', u'headEnd_Mid_jnt', u'eye_Lt_bind', u'eye_Rt_bind', u'headTop_Mid_bind',\n u'headRear_Mid_bind', u'headSide_Lt_bind', u'headSide_Rt_bind', u'neck01_Mid_bind', u'neck02_Mid_bind',\n u'neck03_Mid_bind', u'neckEnd_Mid_jnt']\n\n for snap in snaps:\n target='snap_'+snap\n if mc.objExists (target):\n mc.delete (mc.parentConstraint (target, snap))\n\n\n\n\n # This finalizes your guide.\n self.finalize_guide()\n jnts_grp = self.guide_master + '_JNTS'\n mc.parent ('drivenNeck_chest_Mid_bind', jnts_grp)\n\n self.finalize_guide()",
"def finalize_options(self):\n self.build_dir = os.path.join(*DOC_BUILD_DIR.split(os.sep)[:-1])\n BuildDoc.finalize_options(self)",
"def cli(ctx, **kwds):\n invalid = _validate_kwds(kwds)\n if invalid:\n ctx.exit(invalid)\n tool_description = tool_builder.build(**kwds)\n tool_builder.write_tool_description(ctx, tool_description, **kwds)",
"def build():",
"def _sphinx_build(self, kind: str):\n if kind not in (\"html\", \"latex\"):\n raise ValueError(f\"kind must be html or latex, not {kind}\")\n\n cmd = [\"sphinx-build\", \"-b\", kind]\n if self.num_jobs:\n cmd += [\"-j\", self.num_jobs]\n if self.warnings_are_errors:\n cmd += [\"-W\", \"--keep-going\"]\n if self.verbosity:\n cmd.append(f\"-{'v' * self.verbosity}\")\n cmd += [\n \"-d\",\n os.path.join(BUILD_PATH, \"doctrees\"),\n SOURCE_PATH,\n os.path.join(BUILD_PATH, kind),\n ]\n return subprocess.call(cmd)",
"def _sphinx_build(self, kind: str):\n if kind not in (\"html\", \"latex\"):\n raise ValueError(f\"kind must be html or latex, not {kind}\")\n\n cmd = [\"sphinx-build\", \"-b\", kind]\n if self.num_jobs:\n cmd += [\"-j\", self.num_jobs]\n if self.warnings_are_errors:\n cmd += [\"-W\", \"--keep-going\"]\n if self.verbosity:\n cmd.append(f\"-{'v' * self.verbosity}\")\n cmd += [\n \"-d\",\n os.path.join(BUILD_PATH, \"doctrees\"),\n SOURCE_PATH,\n os.path.join(BUILD_PATH, kind),\n ]\n return subprocess.call(cmd)",
"def build(_):",
"def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])",
"def initialize_options(self):\n self.input_dir = getcwd()\n self.output_dir = path.join(getcwd(), 'dependency', 'static', 'apidocs')",
"def _build(self, **kwargs):",
"def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()",
"def build(self, *args, **kwargs):\n return",
"def _build(self):",
"def _build(self):",
"def buildDocumentation():\n helptext = 'usage: build_doc.py <output format> <type of documentation>' \\\n '\\n - html: for html output' \\\n '\\n - pdf: for pdf output' \\\n '\\n\\n - all: complete documentation' \\\n '\\n - dev: only developer documentation' \\\n '\\n - user: only user documentation'\n if len(sys.argv) != 3:\n print helptext\n sys.exit(1)\n\n if sys.argv[1] not in ['pdf', 'html']:\n print helptext\n sys.exit(1)\n if sys.argv[2] not in ['all', 'dev', 'user']:\n print helptext\n sys.exit(1)\n\n copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory\n os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))\n os.remove('index.rst') # delete config file from root directory",
"def docs_build(directory, site_name, view=True, assume_yes=False):\n context = toolkit.load_data_context_with_error_handling(directory)\n build_docs(context, site_name=site_name, view=view, assume_yes=assume_yes)\n toolkit.send_usage_message(\n data_context=context, event=\"cli.docs.build\", success=True\n )",
"def build(self):",
"def build(self):",
"def build(self):",
"def build(self):\n pass",
"def build(self):\n pass",
"def main():\n # We know that qidoc build will set the correct cwd\n qibuild_dir = \"..\"\n qibuild_dir = os.path.abspath(qibuild_dir)\n this_file = __file__\n this_dir = os.path.dirname(this_file)\n cmake_api = os.path.join(this_dir, \"../source/advanced/cmake/api\")\n cmake_api = os.path.abspath(cmake_api)\n if not os.path.exists(cmake_api):\n os.makedirs(cmake_api)\n qibuild_cmake = os.path.join(qibuild_dir, \"cmake\", \"qibuild\")\n for filename in DOCUMENTED_FILES:\n cmake_file = os.path.join(qibuild_cmake, filename + \".cmake\")\n rst_file = os.path.join(cmake_api, filename + \".rst\")\n gen_cmake_doc(cmake_file, rst_file)",
"def build(self) -> None:",
"def with_docs(self):\r\n self._configurations.append('javadoc')\r\n return self",
"def build_docs(options):\r\n verbose = getattr(options, 'verbose', False)\r\n\r\n cmd = \"cd {dir}; make html quiet={quiet}\".format(\r\n dir=doc_path(options),\r\n quiet=\"false\" if verbose else \"true\"\r\n )\r\n\r\n sh(cmd)",
"def build_step(self):\n\n pass",
"def makecmd(self, options):",
"def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')",
"def register_adhocs(self):\n aboutform = self.plugin['xep_0004'].makeForm('form', \"About SleekBot\")\n aboutform.addField('about', 'fixed', value= self.__doc__)\n self.plugin['xep_0050'].addCommand('about', 'About Sleekbot', aboutform)\n pluginform = self.plugin['xep_0004'].makeForm('form', 'Plugins')\n plugins = pluginform.addField('plugin', 'list-single', 'Plugins')\n for key in self.cmd_plugins:\n plugins.addOption(key, key)\n plugins = pluginform.addField('option', 'list-single', 'Commands')\n plugins.addOption('about', 'About')\n #plugins.addOption('config', 'Configure')\n self.plugin['xep_0050'].addCommand('plugins', 'Plugins', pluginform, self.form_plugin_command, True)"
] | [
"0.6530746",
"0.6263703",
"0.61089903",
"0.60497785",
"0.5882108",
"0.5788378",
"0.5788378",
"0.5754908",
"0.56987226",
"0.56863075",
"0.56494266",
"0.5625547",
"0.561691",
"0.5596764",
"0.5596764",
"0.55608445",
"0.5489056",
"0.5478003",
"0.5478003",
"0.5478003",
"0.546438",
"0.546438",
"0.5458712",
"0.54449",
"0.543405",
"0.53859526",
"0.5369135",
"0.53685397",
"0.53351",
"0.53346705"
] | 0.63234144 | 1 |
Add fu code to list. | def add_fu(self, state):
self._fu_set.add(state) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_code(self, code):\n self.code += code",
"def add_code(self, id, code):\n self.codes[id] = code",
"def set_function_list(self, L):\n\t\tself.function_list = L",
"def add_hook(f, h):\n if f in hooks:\n hooks[f] += [h]\n else:\n hooks[f] = [h]",
"def add_code(self, code_lines: List[str]) -> None:\n self.__code_block__ += code_lines",
"def _add_sinnenschaerfe(skill_list):\n\n attr_list = [\"IN\", \"FF\"]\n # remove original sinnenschaerfe entry\n ss_orig = skill_list.pop(-1)\n\n # change original sinnenschaerfe entry and add it to skill list\n for _, value in enumerate(attr_list):\n ss_temp = copy.deepcopy(ss_orig)\n ss_temp.name = value + \" Sinnenschärfe\"\n ss_temp.attrs[2] = value\n skill_list.append(ss_temp)\n\n return skill_list",
"def add(lst):\n # TODO",
"def _set_instruction_code_23E(self, val):\n self.swift_obj.InstructionCode.append(val)\n for each_instruction_code in self.swift_obj.InstructionCode:\n each_instruction_code.swiftTag = \"23E\"",
"def add_handler ( handler_list, handler_function ):\n if not (handler_function in handler_list):\n handler_list.append ( handler_function )\n \n #cellblender_added_handlers",
"def add_function(self, function):\n self.functions.append(function)",
"def add_function(self, function):\n self.functions.append(function)",
"def part_1(code: List):\n acc, _ = run_code(code)\n\n return acc",
"def code():",
"def append_function_index(self, node):\n ilist = self.function_index\n node._function_index = len(ilist)\n # node.fmtdict.function_index = str(len(ilist)) # debugging\n ilist.append(node)",
"def insert_codes(sess):\n # insert user permission types\n for t in lookups.PERMISSION_TYPES:\n permission_type = PermissionType(permission_type_id=t.id, name=t.name, description=t.desc)\n sess.merge(permission_type)",
"def handle_data(self, data):\r\n self.fed.append(data)",
"def fpa(tokens):\r\n varname = tokens[0][0]\r\n self.functions_used.add(varname)",
"def callback_extend_list(item):\n fisher_contingency_pval_parallel_insertion.extend(item)",
"def add():\n pass",
"def add_family(self, f):\n if f.fid in self.families.keys():\n print(f'US22 - {f.fid} id has a duplicate in line number {f._fid_line}')\n self.families[f.fid] = f\n return Family()",
"def _fix_up(self, cls, code_name):",
"def add(cls, name: str, code: int) -> None:\n setattr(cls, name, code)",
"def populate_code_list():\n\tletter_code_ST = \"JZIHGFEDCBA\"\n\tletter_code_FG = \"XWUTRQPNMLK\"\n\tfor pos in range(\n\t len(letter_code_ST)): #Interestingly, the values start from 0\n\t\tcode_ST.append(pos) # Number first\n\t\tcode_ST.append(letter_code_ST[pos])\n\tfor pos in range(len(letter_code_FG)):\n\t\tcode_FG.append(pos)\n\t\tcode_FG.append(letter_code_FG[pos])",
"def add_filter(self, f):\n raise NotImplementedError",
"def uf(self, uf):\n self._uf = uf",
"def af_list(self) -> List:\n ...",
"def _putCode(self, code):\n assert(type(code) == int)\n self.code[self.codeptr] = code\n self.codeptr += 1",
"def add_code(self, doc):\n\t\timport os\n\t\tfrom webnotes.modules import scrub, get_module_path\n\t\timport conf\n\t\t\n\t\tmodules_path = get_module_path(doc.module)\n\n\t\tpath = os.path.join(modules_path, 'doctype', scrub(doc.name))\n\n\t\tdef _add_code(fname, fieldname):\n\t\t\tfpath = os.path.join(path, fname)\n\t\t\tif os.path.exists(fpath):\n\t\t\t\twith open(fpath, 'r') as f:\n\t\t\t\t\tdoc.fields[fieldname] = f.read()\n\t\t\t\n\t\t_add_code(scrub(doc.name) + '.js', '__js')\n\t\t_add_code(scrub(doc.name) + '.css', '__css')\n\t\t_add_code('%s_list.js' % scrub(doc.name), '__listjs')\n\t\t_add_code('help.md', 'description')\n\t\t\n\t\t# embed all require files\n\t\timport re\n\t\tdef _sub(match):\n\t\t\tfpath = os.path.join(os.path.dirname(conf.modules_path), \\\n\t\t\t\tre.search('[\"\\'][^\"\\']*[\"\\']', match.group(0)).group(0)[1:-1])\n\t\t\tif os.path.exists(fpath):\n\t\t\t\twith open(fpath, 'r') as f:\n\t\t\t\t\treturn '\\n' + f.read() + '\\n'\n\t\t\telse:\n\t\t\t\treturn '\\n// no file \"%s\" found \\n' % fpath\n\t\t\n\t\tif doc.fields.get('__js'):\n\t\t\tdoc.fields['__js'] = re.sub('(wn.require\\([^\\)]*.)', _sub, doc.fields['__js'])\n\t\t\n\t\t# custom script\n\t\tfrom webnotes.model.code import get_custom_script\n\t\tcustom = get_custom_script(doc.name, 'Client') or ''\n\t\tdoc.fields['__js'] = doc.fields.setdefault('__js', '') + '\\n' + custom",
"def getNewCodeList(self):\n if self.modification == 'none':\n new_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines() \\\n if self._getNewCodeList(x)[0]]\n # we want old_code_list and new_code_list to have the same length\n if(self.new_code_length < self.old_code_length):\n filling = [(None, self.color)] * (self.old_code_length - \\\n self.new_code_length)\n new_code.extend(filling)\n else: # deletion or addition\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines()]\n return new_code",
"def addChange(change):"
] | [
"0.63861465",
"0.5658855",
"0.56266004",
"0.55539155",
"0.5490869",
"0.5488156",
"0.5473881",
"0.54088527",
"0.53393835",
"0.5329551",
"0.5329551",
"0.53095835",
"0.52636266",
"0.5237091",
"0.5221719",
"0.5194192",
"0.51796734",
"0.5135299",
"0.5118256",
"0.5104795",
"0.5077566",
"0.50638634",
"0.50614303",
"0.5056691",
"0.5042579",
"0.5033128",
"0.50304526",
"0.50275934",
"0.50254476",
"0.50246423"
] | 0.5944328 | 1 |
Allow to scroll through the raw data of the csv file selected | def see_raw_data(city):
while True:
try:
see_raw_data_input = input('\nIn addition of the stats above, would you like to scroll through the raw data? (y/n)\n')
if see_raw_data_input not in ('y', 'n'):
raise Exception ('Invalid answer')
if see_raw_data_input == 'n':
break
if see_raw_data_input == 'y':
with open (CITY_DATA[city], 'r') as f:
reader = csv.reader(f)
count_row_start_iteration = 0
count_row_read = 0
for row in reader:
print(row)
count_row_read += 1
if count_row_read == count_row_start_iteration +6:
continue_scroll = input('\nDo you want to continue scrolling 5 more rows through the raw data? (y/n): ')
if continue_scroll == 'n':
break
else:
count_row_start_iteration +=5
except Exception :
print ("Please answer 'y' or 'n'\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_loading_file(self):\n column_headers = []\n column_headers_all = []\n\n # Open the file once to get idea of the total rowcount to display progress\n with open(self.csv_file_path[0], newline='') as csv_file:\n self.progress_max.emit(len(csv_file.readlines()) - 2)\n\n with open(self.csv_file_path[0], newline='') as csv_file:\n\n self.csv_data_table.setRowCount(0)\n self.csv_data_table.setColumnCount(0)\n\n csv_file_read = csv.reader(csv_file, delimiter=',', quotechar='|')\n\n # Fetch the column headers and move the iterator to actual data\n column_headers = next(csv_file_read)\n\n # Reflect back the changes in the reference to the column headers\n for header in column_headers:\n self.column_headers.append(header)\n # A backup to keep a list of all the headers to toogle their view later\n self.column_headers_all.append(header)\n\n # TODO: Increase the reading speed by decreasing load on actual table population\n\n # self.csv_data_table.hide()\n\n for row_data in csv_file_read:\n\n self.relay.emit(self.csv_data_table.rowCount())\n # self.relay.emit(self.x)\n # self.x = self.x + 1\n row = self.csv_data_table.rowCount()\n self.csv_data_table.insertRow(row)\n self.csv_data_table.setColumnCount(len(row_data))\n for column, stuff in enumerate(row_data):\n item = QTableWidgetItem(stuff)\n self.csv_data_table.setItem(row, column, item)\n\n self.csv_data_table.setHorizontalHeaderLabels(self.column_headers)\n\n # Set WordWrap to True to make the cells change height according to content\n # Currently set it to false as it looks very decent and makes cell size uniform throughout\n self.csv_data_table.setWordWrap(False)\n # Uncomment below line to stretch to fill the column width according to content\n # self.csv_data_table.resizeColumnsToContents()\n self.csv_data_table.resizeRowsToContents()\n\n # Update the bottom toolbar to reflect changes\n self.update_bottom_toolbar.emit()\n self.finished.emit()",
"def _open_file(self,path):\n \n print \"Open File %s\" % path\n \n mapping = self.mappings.GetClientData(self.mappings.GetSelection())\n try:\n delimiter=mapping['_params']['delimiter']\n except:\n delimiter=','\n try:\n skip_last=mapping['_params']['skip_last']\n except:\n skip_last=0\n self.grid_table = SimpleCSVGrid(path,delimiter,skip_last)\n self.grid.SetTable(self.grid_table)\n\tself.opened_path = path",
"def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)",
"def open_with_skip_rows_window(self):\n skip_rows = self.skip_rows_window.ui.skip_rows_spinBox.value()\n if \".txt\" in self.filename[0]:\n self.file = np.loadtxt(self.filename[0], skiprows=skip_rows)\n \n if self.file.ndim == 1: # if there is only one trace, reshape to 2D\n self.file = self.file.reshape(self.file.shape[0], 1)\n \n elif \".csv\" in self.filename[0]:\n self.file = np.genfromtxt(self.filename[0], skip_header=skip_rows, delimiter=\",\")",
"def get_data(self, csv_file):\n pass",
"def load_csv(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n files, _ = QFileDialog.getOpenFileNames(\n self,\n \"Select one or more files\",\n \"\",\n \"csv files (*.csv);;All Files (*)\",\n options=options,\n )\n self.show()\n\n if files:\n self.files_now = files\n else:\n self.files_now = None\n\n if self.files_now:\n self.lineEdit_file_name.setText(self.files_now[0])\n self.update_gui_from_csv()",
"def browse_1(self):\r\n file = QFileDialog()\r\n filter_name = \"Csv files (*.csv);;Text files (*.txt);;Xls files (*.xls);; Xlsx files (*.xlsx)\"\r\n file.setNameFilter(filter_name)\r\n if file.exec():\r\n filenames = file.selectedFiles()\r\n self.browseLine.setText(str(filenames[0]))",
"def read_csv_file(self):\n pass",
"def loadCsv(self):\n # Close any already opened file if any\n self.close_file()\n\n # Disable cell change check to avoid crashes\n self.check_cell_change = False\n\n # Set the flag to no changes in current file state\n self.file_changed = False\n self.setSaveEnabled(False)\n\n csv_file_path = QFileDialog.getOpenFileName(self, \"Load CSV File\", \"\", 'CSV(*.csv)')\n\n # Proceed if and only if a valid file is selected and the file dialog is not cancelled\n if csv_file_path[0]:\n # Get only the file name from path. eg. 'data_file.csv'\n filepath = os.path.normpath(csv_file_path[0])\n filename = filepath.split(os.sep)\n self.csv_file_name = filename[-1]\n\n self.loading_progress = QProgressDialog(\"Reading Rows. Please wait...\", None, 0, 100, self)\n self.loading_progress.setWindowTitle(\"Loading CSV File...\")\n self.loading_progress.setCancelButton(None)\n\n # enable custom window hint\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() | QtCore.Qt.CustomizeWindowHint)\n # disable (but not hide) close button\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() & ~QtCore.Qt.WindowCloseButtonHint)\n\n # Show waiting cursor till the time file is being processed\n QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n\n self.loading_worker = loader.CsvLoaderWorker(csv_file_path=csv_file_path, csv_data_table=self.csv_data_table,\n column_headers=self.column_headers,\n column_headers_all=self.column_headers_all)\n\n self.loading_thread = QThread()\n # Set higher priority to the GUI Thread so UI remains a bit smoother\n QThread.currentThread().setPriority(QThread.HighPriority)\n\n self.loading_worker.moveToThread(self.loading_thread)\n self.loading_worker.workRequested.connect(self.loading_thread.start)\n self.loading_thread.started.connect(self.loading_worker.processLoadingFile)\n self.loading_worker.finished.connect(self.on_loading_finish)\n\n self.loading_worker.relay.connect(self.update_loading_progress)\n self.loading_worker.progress_max.connect(self.set_maximum_progress_value)\n self.loading_worker.update_bottom_toolbar.connect(self.setBottomToolbarInfo)\n\n self.loading_progress.setValue(0)\n self.loading_worker.requestWork()\n\n self.check_cell_change = True\n\n # Close the start page tab and load the file tab\n self.tabWidget.removeTab(0)\n self.tabWidget.insertTab(1, self.tableTab, \"Main Document\")\n\n # Enable Column Layout menu option\n self.action_column_layout.setEnabled(True)\n self.action_add_data.setEnabled(True)\n self.action_add_column.setEnabled(True)\n self.action_toolbar_add_data.setEnabled(True)\n self.action_close_file.setEnabled(True)",
"def load_csv(self):\n\n # Close any already opened file if any\n self.close_file()\n\n # Disable cell change check to avoid crashes\n self.check_cell_change = False\n\n # Set the flag to no changes in current file state\n self.file_changed = False\n self.set_save_enabled(False)\n\n csv_file_path = QFileDialog.getOpenFileName(self, \"Load CSV File\", \"\", 'CSV(*.csv)')\n\n # Proceed if and only if a valid file is selected and the file dialog is not cancelled\n if csv_file_path[0]:\n # Get only the file name from path. eg. 'data_file.csv'\n filepath = os.path.normpath(csv_file_path[0])\n filename = filepath.split(os.sep)\n self.csv_file_name = filename[-1]\n\n self.loading_progress = QProgressDialog(\"Reading Rows. Please wait...\", None, 0, 100, self)\n self.loading_progress.setWindowTitle(\"Loading CSV File...\")\n self.loading_progress.setCancelButton(None)\n\n # enable custom window hint\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() | QtCore.Qt.CustomizeWindowHint)\n # disable (but not hide) close button\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() & ~QtCore.Qt.WindowCloseButtonHint)\n\n # Show waiting cursor till the time file is being processed\n QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n\n self.loading_worker = CsvLoaderWorker(csv_file_path=csv_file_path, csv_data_table=self.csv_data_table,\n column_headers=self.column_headers,\n column_headers_all=self.column_headers_all)\n\n self.loading_thread = QThread()\n # Set higher priority to the GUI Thread so UI remains a bit smoother\n QThread.currentThread().setPriority(QThread.HighPriority)\n\n self.loading_worker.moveToThread(self.loading_thread)\n self.loading_worker.workRequested.connect(self.loading_thread.start)\n self.loading_thread.started.connect(self.loading_worker.process_loading_file)\n self.loading_worker.finished.connect(self.on_loading_finish)\n\n self.loading_worker.relay.connect(self.update_loading_progress)\n self.loading_worker.progress_max.connect(self.set_maximum_progress_value)\n self.loading_worker.update_bottom_toolbar.connect(self.set_bottom_toolbar_info)\n\n self.loading_progress.setValue(0)\n self.loading_worker.request_work()\n\n self.check_cell_change = True\n\n # Close the start page tab and load the file tab\n self.tabWidget.removeTab(0)\n self.tabWidget.insertTab(1, self.csv_table_tab, \"Main Document\")\n\n # Enable Column Layout menu option\n self.action_column_layout.setEnabled(True)\n self.action_add_data.setEnabled(True)\n self.action_add_column.setEnabled(True)\n self.action_toolbar_add_data.setEnabled(True)\n self.action_close_file.setEnabled(True)",
"def openData(self):\n\n\n path = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.getcwd(), 'CSV, XLSX(*.csv *.xlsx)')\n\n # If a file was specified, load it up. If not, tell the user to pick a valid file\n if path[0] != '':\n\n if os.path.exists(path[0]) and os.path.getsize(path[0]):\n\n filepath, filename = os.path.split(path[0])\n pandaData = procedures.load(filename, filepath)\n\n self.createTab(pandaData, name=filename)\n\n else:\n self.notifyUser(\"Please pick a valid file.\")",
"def reload_csv(self):\n self.load_csv()\n self.tableView.insert_data(self.database)\n self.update()",
"def get_contents(self, limit: int, offset: int = 0) -> \"RowSliceView\":\n contents = petl.fromcsv(self.download_path)\n return petl.rowslice(contents, offset, offset + limit)",
"def read_csv():",
"def show_data():\n with open(\"ScansforStudents.csv\", \"rU\") as csvfile:\n reader = csv.reader(csvfile, delimiter = ',', quotechar = '|')\n k = 0\n for row in reader:\n print(row)\n if k == 100:\n break\n k += 1",
"def _enumerate_csv(self, csv_input):\n csv_file = open(csv_input, 'rb') \n csv_reader = csv.reader(csv_file)\n next(csv_reader, None)\n for row in reader:\n yield row",
"def main():\r\n\r\n #open the file\r\n with open('csvfile1.csv', 'r') as csvfile1:\r\n #read the file\r\n csv_reader = csv.reader(csvfile1)\r\n #jummp the first line\r\n next(csv_reader)\r\n #loop through the file\r\n for line in csv_reader:\r\n print(line)",
"def loadFile(self):\r\n logger.debug(\"loadFile\")\r\n fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, \"Open File\", \"\", \"All Files (*);;CSV Files (*.csv);;TSV Files (*.txt; *.tsv);;Parquet Files (*.parc; *.parquet)\");\r\n\r\n\r\n if self.file_index_thread.isRunning():\r\n self.file_index_thread.terminate()\r\n if self.line_count_thread.isRunning():\r\n self.line_count_thread.terminate()\r\n if self.search_index_thread.isRunning():\r\n self.search_index_thread.terminate()\r\n\r\n self.reset_fileproperties()\r\n\r\n self.fileName = fileName\r\n logger.debug(f\"File name: {fileName}\")\r\n\r\n # self.pathLE.setText(self.fileName)\r\n self.setWindowTitle(\"Large File Reader \" + self.fileName)\r\n self.file_index_thread.filename = self.fileName\r\n self.line_count_thread.filename = self.fileName\r\n self.search_index_thread.filename = self.fileName\r\n\r\n self.fileFormat = Path(self.fileName).suffix\r\n logger.debug(\"File format is {}\".format(self.fileFormat))\r\n\r\n self.filelength = self._filelength()\r\n logger.debug(\"File length in bytes is {}\".format(self.filelength))\r\n\r\n self.loadFirst()\r\n self.estimate_lines()\r\n\r\n self.chunklines = self._chunklines()\r\n\r\n self.file_index_thread.start(QtCore.QThread.HighestPriority)\r\n self.line_count_thread.start(QtCore.QThread.HighPriority)\r\n self.search_index_thread.start(QtCore.QThread.NormalPriority)\r\n\r\n self.rawBtn.toggle() # toggle view as file button\r\n self.rawBtn.setEnabled(True)\r\n self.lastBtn.setEnabled(True)\r\n self.firstBtn.setEnabled(True)\r\n self.tableBtn.setEnabled(True)",
"def read_data(self,*args):\n doc = str(self.data_file.get())\n try:\n self.data = pd.read_csv(doc,sep=',')\n self.popup.destroy()\n \n except:\n tkMessageBox.showwarning(title='File not found',\n message='The file you entered does not exist in this location')\n return None\n self.interest_frame = InterestFrame(self.data)\n self.add_offer_frame = AddOfferFrame()",
"def browse(self):\n formats = [\n \"Text - comma separated (*.csv, *)\",\n \"Text - tab separated (*.tsv, *)\",\n \"Text - all files (*)\"\n ]\n\n dlg = QFileDialog(\n self, windowTitle=\"Open Data File\",\n acceptMode=QFileDialog.AcceptOpen,\n fileMode=QFileDialog.ExistingFile\n )\n dlg.setNameFilters(formats)\n state = self.dialog_state\n lastdir = state.get(\"directory\", \"\")\n lastfilter = state.get(\"filter\", \"\")\n\n if lastdir and os.path.isdir(lastdir):\n dlg.setDirectory(lastdir)\n if lastfilter:\n dlg.selectNameFilter(lastfilter)\n\n status = dlg.exec_()\n dlg.deleteLater()\n if status == QFileDialog.Accepted:\n self.dialog_state[\"directory\"] = dlg.directory().absolutePath()\n self.dialog_state[\"filter\"] = dlg.selectedNameFilter()\n\n selected_filter = dlg.selectedNameFilter()\n path = dlg.selectedFiles()[0]\n # pre-flight check; try to determine the nature of the file\n mtype = _mime_type_for_path(path)\n if not mtype.inherits(\"text/plain\"):\n mb = QMessageBox(\n parent=self,\n windowTitle=\"\",\n icon=QMessageBox.Question,\n text=\"The '{basename}' may be a binary file.\\n\"\n \"Are you sure you want to continue?\".format(\n basename=os.path.basename(path)),\n standardButtons=QMessageBox.Cancel | QMessageBox.Yes\n )\n mb.setWindowModality(Qt.WindowModal)\n if mb.exec() == QMessageBox.Cancel:\n return\n\n # initialize dialect based on selected extension\n if selected_filter in formats[:-1]:\n filter_idx = formats.index(selected_filter)\n if filter_idx == 0:\n dialect = csv.excel()\n elif filter_idx == 1:\n dialect = csv.excel_tab()\n else:\n dialect = csv.excel_tab()\n header = True\n else:\n try:\n dialect, header = sniff_csv_with_path(path)\n except Exception:\n dialect, header = csv.excel(), True\n\n options = None\n # Search for path in history.\n # If found use the stored params to initialize the import dialog\n items = self.itemsFromSettings()\n idx = index_where(items, lambda t: samepath(t[0], path))\n if idx is not None:\n _, options_ = items[idx]\n if options_ is not None:\n options = options_\n\n if options is None:\n if not header:\n rowspec = []\n else:\n rowspec = [(range(0, 1), RowSpec.Header)]\n options = Options(\n encoding=\"utf-8\", dialect=dialect, rowspec=rowspec)\n\n dlg = CSVImportDialog(\n self, windowTitle=\"Import Options\", sizeGripEnabled=True)\n dlg.setWindowModality(Qt.WindowModal)\n dlg.setPath(path)\n dlg.setOptions(options)\n status = dlg.exec_()\n dlg.deleteLater()\n if status == QDialog.Accepted:\n self.set_selected_file(path, dlg.options())",
"def csv_read_dict(filename,num=5):\n for chunk in pd.read_csv(filename, chunksize=num):\n print(style.BLUE,chunk,style.END)\n user_input = input(\" Would you like to view next 5 line? \"\n \"Type \\'y\\' or \\'n\\' \\n\")\n if user_input == 'Yes' or user_input == 'y' or user_input == 'yes':\n for chunk in pd.read_csv(filename, skiprows=5):\n bullet_empty_circle = u'\\u006F\\t'\n print(\" {} {}\".format(bullet_empty_circle,style.BLUE,chunk,style.END))\n #return csv_read_dict(filename,num)\n else:\n # Break for 'n' or any user input\n break",
"def _handleLoadFile(self) -> None:\n\n dialog: ChooseFileDialog = self._makeChooseFileDialog()\n result: DialogResult = dialog.show()\n if result == DialogResult.Ok:\n file: str = dialog.getSelectedFile()\n self._setWindowTitle(file)\n data: List[List[Any]] = csvReader.readFile(file)\n self.__spreadsheet.setData(data)",
"def edit_current_cell(self):\n cells = self.csv_data_table.selectionModel().selectedIndexes()\n if len(cells) == 1:\n for cell in sorted(cells):\n r = cell.row()\n c = cell.column()\n self.csv_data_table.editItem(self.csv_data_table.item(r, c))",
"def browse_files_in(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring.delete(0,tk.END)\n self.docstring.insert(0,path_to_data)\n #use chosen value as self.data_file\n self.data_file.set(path_to_data)",
"def onGrid(self, event):\n dlg = wx.FileDialog(self, wildcard=\"*.csv\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.model.exportToGrid(path)\n dlg.Destroy()",
"def browseforcsv(self, entry):\r\n filename = filedialog.askopenfilename(title='Select CSV')\r\n if filename != '': # Doesn't change if no file name entered\r\n entry.delete(0, tk.END)\r\n entry.insert(tk.END, filename)",
"def file_select(self):\n fname = QFileDialog.getSaveFileName(self,\n 'select file',\n '/home/pi/Documents/output.csv',\n \"csv file (*.csv)\")\n self.ui.qtBrowse.clear()\n self.ui.qtBrowse.setText(fname)",
"def loadData(self):\n\n\n path = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.getcwd(), 'CSV, XLSX(*.csv *.xlsx)')\n\n # If a file was specified, load it up. If not, tell the user to pick a valid file\n if path[0] != '':\n\n if os.path.exists(path[0]) and os.path.getsize(path[0]):\n\n filepath, filename = os.path.split(path[0])\n pandaData = procedures.load(filename, filepath)\n\n while self.tabWidget.count() != 0:\n self.closeTab()\n self.createTab(pandaData)\n\n else:\n self.notifyUser(\"Please pick a valid file.\")",
"def __openFile(self):\n itm = self.findList.selectedItems()[0]\n self.on_findList_itemDoubleClicked(itm, 0)",
"def scrollDown(self):\n if self.__firstShownLine < len(self.__data) - 1:\n self.__firstShownLine += 1\n self.__refreshContent()\n self.__printRow(self.__firstShownLine + self.height - 2)\n else:\n curses.beep()"
] | [
"0.6083973",
"0.6001787",
"0.59245104",
"0.58940566",
"0.5861581",
"0.581427",
"0.5679473",
"0.56790644",
"0.5672839",
"0.565956",
"0.5653351",
"0.5615062",
"0.560097",
"0.5599851",
"0.5589927",
"0.55863386",
"0.5574873",
"0.5549084",
"0.55258",
"0.55049545",
"0.54869586",
"0.5474881",
"0.547371",
"0.5427521",
"0.5406159",
"0.53902704",
"0.53868157",
"0.53485155",
"0.5345767",
"0.533422"
] | 0.62122077 | 0 |
Initialization function. Sets the model name and function, path to input data, and the output filename. | def __init__(self, sfs, model, popnames, output):
self.sfs = self.load_sfs(sfs)
self.modelname = model
# Make an extrapolating version of the function
self.modelfunc = dadi.Numerics.make_extrap_log_func(
self.set_model_func(model))
self.params = self.set_parameters()
self.popnames = popnames
self.output = '_'.join(popnames + [output, model]) + '.txt'
self.figout = '_'.join(popnames + [output, model]) + '_Comp.pdf'
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self,\n model_fn=cake_fn,\n model_dir: Optional[str] = \"model\",\n saved_path : Optional[str] = None,\n ):\n self.model_fn = model_fn \n self.model_dir = model_dir\n if saved_path == None:\n self.update_predictor()\n elif saved_path == \"most_recent\":\n subdirs = [x for x in Path('saved_model').iterdir() if x.is_dir()\\\n and 'temp' not in str(x)]\n self.saved_path = \"saved_model/\"+str(sorted(subdirs)[-1])\n self._build_predictor()\n else:\n self.saved_path = saved_path\n self._build_predictor()",
"def __init__(self, model, fn, log, imputer=None):\n self.model = model\n self.fn = fn\n self.log = log\n self.train_data = self.model.data.training_gen(model.params[\"BATCH_SIZE\"])\n self.test_data = self.model.data.testing_gen(model.params[\"BATCH_SIZE\"])\n self.imputer = imputer",
"def __init__(self, model_filename, sim_filename, include_paths = None):\n\n self.model_filename = model_filename\n self.sim_filename = sim_filename\n self.include_paths = include_paths\n \n self.simulation = None\n self.fit_input = None",
"def __init__(self , model_file_name ):\n logging.set_verbosity(logging.ERROR)\n with TheTFGraph.as_default():\n with TheTFSession.as_default():\n self.model = keras.models.load_model( model_file_name + \".hdf5\" , compile=False )\n JSON = json.load( open(model_file_name + \".json\" ) )\n self.all_sites = list(JSON['all_sites'])\n self.all_errors = list(JSON['all_errors'])\n self.all_actions = list(JSON['all_actions'])\n self.IsBinary = bool(JSON['IsBinary'])\n self.TiersOnly = bool(JSON['TiersOnly'])\n self.Task = Task({} , \"TaskLoader\" , self)\n self.Name = model_file_name.split('/')[-1]\n self.ModelID = int( JSON['model'] )\n self.InputTrainingDataID = int( JSON['trainingdata'])\n\n self.Prediction = Prediction.Prediction( self.ModelID , self.InputTrainingDataID )",
"def __init__(self, output_file, table_model):\n pass",
"def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')",
"def __init__(\n self,\n data_path: str,\n output_path: str\n ):\n\n self.data_path = data_path\n self.output_path = output_path",
"def init_model(config, program, exe):\n checkpoints = config['Global'].get('checkpoints')\n if checkpoints:\n if os.path.exists(checkpoints + '.pdparams'):\n path = checkpoints\n fluid.load(program, path, exe)\n logger.info(\"Finish initing model from {}\".format(path))\n else:\n raise ValueError(\"Model checkpoints {} does not exists,\"\n \"check if you lost the file prefix.\".format(\n checkpoints + '.pdparams'))\n else:\n pretrain_weights = config['Global'].get('pretrain_weights')\n if pretrain_weights:\n path = pretrain_weights\n load_params(exe, program, path)\n logger.info(\"Finish initing model from {}\".format(path))",
"def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())",
"def __init__(self):\n self.model = {'mol':[], 'nmol':0}\n self.template = {} \n self.config = {}\n self.config['tfile'] = 'gau-template-bsse.gjf'\n self.config['xyzfile'] = 'model.xyz'\n self.config['jobfile'] = 'gau.gjf'\n self.config['job_prefix'] = self.config['jobfile'].split(\".\")[0]\n self.config['incr'] = 1\n \n self.rd_cmd_stream()\n return",
"def __init__(self, targetDir, model):\n \n self.categoryFolder = targetDir\n self.model = model\n self.inputsFolder = os.path.join(targetDir, \"Inputs\")",
"def build_model_fn(self):",
"def __init__(self, data_path = None, output_path = '.', \n\t\tdata_mode='matlab', cell=None):\n\n\t\t# tracking function executions\n\t\t# Useful for repr and user guidance\n\t\tself._function_tracking = dict()\n\n\t\t# Load files and assign to attributes\n\t\t\n\t\t# Treat Hephaistos object differently \n\t\t\n\t\t# Check if data_path is hephaistos object\n\t\tif data_path.__class__.__name__ == 'Hephaistos':\n\t\t\t# Take data directly from hephaistos object\n\t\t\tself.data = load_data(data_path, 'heph', cell_no=cell)\n\n\t\t# If not, treat as path / str\n\t\telse:\n\t\t\tself.Data_path = pthl.Path(data_path).absolute()\n\t\t\tassert self.Data_path.exists(), f'Does not exist; data_path: {self.Data_path}'\n\n\t\t\t# If path is to a pkl file, treat as a hephaistos save file\n\t\t\tif self.Data_path.suffix == '.pkl':\n\t\t\t\tunit = heph.load(self.Data_path)\n\t\t\t\tself.data = load_data(unit, 'heph', cell)\n\n\t\t\t# If a directory, then treat as directory of txt or matlab data files\n\t\t\telif self.Data_path.is_dir():\n\n\t\t\t\tself.data = load_data(data_path, data_mode)\n\n\n\n\t\tself.Output_path = pthl.Path(output_path)\n\n\t\tself._absolute_paths = {}\n\t\tself._absolute_paths['Output_path'] = self.Output_path.absolute()\n\n\t\tif not self.Output_path.absolute().is_dir():\n\t\t\tself.Output_path.absolute().mkdir()\n\n\n\n\n\t\t\n\t\ttry:\n\t\t\tself.markers = self.data['markers']\n\t\texcept:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tself.marker_codes = self.data['marker_codes']\n\t\texcept:\n\t\t\tpass\n\n\n\t\ttry:\n\t\t\tself.spikes = self.data['spikes']\n\t\texcept:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tself.shape = self.data['shape']\n\t\texcept:\n\t\t\tpass\n\t\t\n\t\ttry:\n\t\t\tself.shape_SEM = self.data['shape_SEM']\n\t\texcept:\n\t\t\tpass\n\n\n\t\tself.parameters = {}\n\t\t\n\n\n\t\t# self.parameters['ops_directory'] = curr_dir\n\n\t\t# assert type(meta_data) == dict, 'meta data should be a ditionary from athena'\n\t\t\n\t\t\n\t\tprint('\\n\\n*********\\n')\t\n\t\tprint ('\\nRun the following methods:'\n\t\t\t\t'\\n self.sort() - to sort trials and generate PSTHs.'\n\t\t\t\t'\\n self.conditions() - to generate and define condition descriptions and labels'\n\t\t\t\t'\\nself._analyse() - to extract out the relevant measurement of response amplitude for each condition\\n\\n'\n\t\t\t\t'Saving and plotting functions may then be used.')",
"def initialize(self) -> None:\n self.model = load(self.path)",
"def __init__(self, data):\n self.data = data\n self.func = RandomForest._read_func_from_data(data[\"model\"])",
"def init_data(in_arg, model_param, phase=\"train\"): \n # Firstly, set the directories\n # PRE-REQUISITES: \n # train & valid sets (1 per folder) must exist within the in_arg.data_dir (to improve if I have some time later on)\n # train folder must be \"train\", validation folwer must be \"valid\"\n # each file must be correctly classified (=within the correct id folder). file name doesn't matter\n model_param['data_dir'] = in_arg.data_dir\n train_dir = model_param['data_dir'] + '/train'\n valid_dir = model_param['data_dir'] + '/valid'\n\n model_param['save_dir'] = in_arg.save_dir\n \n # Prepare the transformations for train & validation sets\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n valid_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n try:\n # Load the datasets with ImageFolder\n train_dataset = datasets.ImageFolder(train_dir, transform=train_transforms)\n valid_dataset = datasets.ImageFolder(valid_dir, transform=valid_transforms)\n\n model_param['class_to_idx'] = train_dataset.class_to_idx\n \n # TODO: Using the image datasets and the trainforms, define the dataloaders\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=in_arg.batch_size, shuffle = True)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=in_arg.batch_size, shuffle = True)\n\n # Initialize the cat_to_name catalog\n #with open(in_arg.cat_to_name, 'r') as f:\n #cat_to_name = json.load(f)\n # model_param['cat_to_name'] = json.load(f)\n\n except Exception as e:\n print(\"An exception occured: {}.\".format(e))\n sys.exit(0)\n\n print(\"Data loading completed!\")\n\n # Return all parameters we will need later on\n return train_loader, valid_loader, model_param",
"def __init__(\n self,\n dataset: Dataset,\n compiled_model_path: PathOrURI,\n modelframework: str,\n input_model_path: PathOrURI):\n self.modelframework = modelframework\n self.input_model_path = input_model_path\n self.outputtypes = [self.modelframework]\n super().__init__(dataset, compiled_model_path)",
"def __init__(self, input_directory, save_directory):\n self.input_directory = input_directory\n self.save_directory = save_directory\n self.__concatonate_files_controller()",
"def initialize(self, args):\n # You must parse model_config. JSON string is not parsed here\n self.model_config = json.loads(args['model_config'])\n print(\"model_config:\", self.model_config)\n\n self.input_names = []\n for input_config in self.model_config[\"input\"]:\n self.input_names.append(input_config[\"name\"])\n print(\"postprocess input names:\", self.input_names)\n\n self.output_names = []\n self.output_dtype = []\n for output_config in self.model_config[\"output\"]:\n self.output_names.append(output_config[\"name\"])\n dtype = pb_utils.triton_string_to_numpy(output_config[\"data_type\"])\n self.output_dtype.append(dtype)\n print(\"postprocess output names:\", self.output_names)\n self.postprocessor = fd.vision.ocr.DBDetectorPostprocessor()\n self.cls_preprocessor = fd.vision.ocr.ClassifierPreprocessor()\n self.rec_preprocessor = fd.vision.ocr.RecognizerPreprocessor()\n self.cls_threshold = 0.9",
"def init_model(model_filename, doGPU):\n # set model attributes list\n ##print(\"Model-dataset =\", model_ds_name)\n ##if model_ds_name == 'modelRAP':\n ## model_labels = loader_rapdataset_yiqiang.ATTRIBUTES\n ##elif model_ds_name == 'modelPETA':\n ## model_labels = loader_peta_dataset.ATTRIBUTES\n ##elif model_ds_name == 'modelRAPPETA':\n ## model_labels = [peta_label for rap_label,peta_label in loader_rap_plus_peta_dataset.ATTRIBUTES]\n ##else:\n ## print(\"ERROR: unknown model-dataset.\")\n ## sys.exit()\n model_labels = loader_rap_plus_peta_dataset.ATTRIBUTES\n assert (len(model_labels) == 49)\n\n # create model\n person.NO_ATTRIBUTES = len(model_labels) #TODO-elo: ugly, attr. nbr should be a parameter of person.Net.__init__()\n net = person.Net()\n if doGPU:\n net = person.Net().cuda()\n\n # load model\n print('loading model \"' + model_filename + '\"')\n person.load_model(net, model_filename)\n\n return net, model_labels",
"def __init__(self):\n cwd = os.path.join(os.path.dirname(__file__), config.vosk_model_dir)\n self.model = Model(cwd)\n logger.info(f'Loaded speech recognition model from {cwd}')",
"def __init__(self, f, save_directory=None, function_name=None, version=None, input_name=None):\n self.f = f \n self.save_directory = save_directory\n self.function_name = function_name if function_name else f.__name__\n self.version = version if version else ''\n self.input_name = input_name\n \n if save_directory is None:\n self.save_directory = self.save_directory_default\n \n if not exists(self.save_directory):\n makedirs(self.save_directory)\n \n suf_len = len(self._suffix)\n self.cache = [f[:-suf_len] for f in listdir(self.save_directory) if isfile(f) and f[-suf_len:] == self._suffix]",
"def __init__(self, root='/tmp', url=None, name=None):\n if url is None:\n url = 'http://188.138.127.15:81/models/model_heavy_89acc.h5'\n if name is None:\n name = 'model_heavy_89acc.h5'\n if not isdir(root):\n makedirs(root)\n\n filepath = join(root, name)\n if not isfile(filepath):\n print('could not find model.. downloading it')\n dl.download(url, filepath)\n\n self.model = load_model(filepath)",
"def __init__(self, function, base_model=None, num_config_vars=0):\n self.wt_function = function\n self.work_tracker = WorkTracker()\n self.base_model = base_model\n self.num_config_vars = num_config_vars",
"def __init__(self, args):\n\n self.directory = args.directory\n \n # generate string with only base data directory\n dirstring = os.path.basename(os.path.normpath(self.directory))\n self.string_directory = \"Data: {}\".format(dirstring) \n\n # get location of this file\n self.filepath = os.path.dirname(os.path.realpath(__file__))\n\n # initialize attributes to hold files and (inferEM, sample) dirs\n self.files = []\n self.betas = []\n self.compare_segs_dict = {}\n self.sample_dirs = {}\n self.prob_dirs = {}\n self.full_dataset = None\n self.full_dataset_tuple = None",
"def __init__(self, load_model_dir=None):\n \n if load_model_dir:\n raise RuntimeError('Whoops. Not implemented yet')\n \n ## Load pickeled preprocessing function (applied to raw features)\n ## Load pickeled postprocessing function (applied to labels before output)\n ## Load tf model",
"def __init__(self):\n\n self.read_input_file()\n self.read_simulation_files()",
"def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()",
"def __init__(self, path, verbose=1):\n self.model = load_model(path)\n if verbose:\n self.model.summary()\n self.path = path",
"def __init__(self):\n super().__init__()\n self.indices_dir = ''\n self.split_file = ''\n\n self.model = '' # string identifying the model\n self.experiment = '' # string to describe experiment\n self.maps = [data.ID_MAP_T1H2O, data.ID_MAP_FF, data.ID_MAP_B1] # the used maps\n self.patch_size = [1, 32, 32]\n\n # training configuration\n self.loss = 'mse' # string identifying the loss function (huber, mse or mae)\n self.learning_rate = 0.01 # the learning rate\n self.dropout_p = 0.2\n self.norm = 'bn' # none, bn\n\n # we use the mean absolute error as best model score\n self.best_model_score_is_positive = True\n self.best_model_score_name = 'mae'"
] | [
"0.71706414",
"0.69452214",
"0.6822937",
"0.66740066",
"0.6421178",
"0.63648045",
"0.6306152",
"0.62590146",
"0.6255789",
"0.62344706",
"0.62207854",
"0.62166464",
"0.6183033",
"0.61792994",
"0.61410034",
"0.61369526",
"0.61364913",
"0.61285555",
"0.61272836",
"0.6064312",
"0.6054011",
"0.6005909",
"0.599429",
"0.5978322",
"0.59615684",
"0.5937952",
"0.59171677",
"0.5896271",
"0.58921754",
"0.588874"
] | 0.69727397 | 1 |
Parse the dadi SFS file and return it as a Spectrum object. Dadi will do basic checking of the spectrum, but we will be more thorough. | def load_sfs(self, sfs):
try:
fs = dadi.Spectrum.from_file(sfs)
except:
print 'The spectrum file you provided is not valid!'
exit(1)
return fs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_file(self):\n file_time = ''\n num_dir = 0\n num_freq = 0\n freq_w_band = 0.0\n freq_0 = 0.0\n start_dir = 0.0\n\n dspec_matrix = []\n\n # Extract the file time from the file name\n input_file_name = self._stream_handle.name\n\n match = FILE_NAME_MATCHER.match(input_file_name)\n\n if match:\n file_time = match.group(1)\n else:\n error_message = 'Unable to extract file time from DSpec input file name: %s '\\\n % input_file_name\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # read the first line in the file\n line = self._stream_handle.readline()\n\n # loop over all lines in the data file\n while line:\n\n if EMPTY_LINE_MATCHER.match(line):\n # ignore blank lines, do nothing\n pass\n\n elif HEADER_MATCHER.match(line):\n\n # we need header records to extract useful information\n for matcher in HEADER_MATCHER_LIST:\n header_match = matcher.match(line)\n\n if header_match is not None:\n\n # Look for specific header lines and extract header fields\n if matcher is DIR_FREQ_MATCHER:\n num_dir = int(header_match.group(1))\n num_freq = int(header_match.group(2))\n\n elif matcher is FREQ_BAND_MATCHER:\n freq_w_band = header_match.group(1)\n freq_0 = header_match.group(2)\n\n elif matcher is START_DIR_MATCHER:\n start_dir = header_match.group(1)\n\n else:\n #ignore\n pass\n\n elif DSPEC_DATA_MATCHER.match(line):\n\n # Extract a row of the Directional Surface Spectrum matrix\n sensor_match = DSPEC_DATA_MATCHER.match(line)\n data = sensor_match.group(1)\n values = [int(x) for x in data.split()]\n\n num_values = len(values)\n\n # If the number of values in a line of data doesn't match num_dir,\n # Drop the record, throw a recoverable exception and continue parsing\n if num_values != num_dir:\n error_message = 'Unexpected Number of directions in line: expected %s, got %s'\\\n % (num_dir, num_values)\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n else:\n # Add the row to the dspec matrix\n dspec_matrix.append(values)\n\n else:\n # Generate a warning for unknown data\n error_message = 'Unexpected data found in line %s' % line\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # read the next line in the file\n line = self._stream_handle.readline()\n\n # Check to see if the specified number of frequencies were retrieved from the data\n dspec_matrix_length = len(dspec_matrix)\n if dspec_matrix_length != num_freq:\n error_message = 'Unexpected Number of frequencies in DSpec Matrix: expected %s, got %s'\\\n % (num_freq, dspec_matrix_length)\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # Construct the parsed data list to hand over to the Data Particle class for particle creation\n parsed_data = [\n file_time, # ('file_time', 0, str),\n num_dir, # ('num_dir', 1, int),\n num_freq, # ('num_freq', 2, int),\n freq_w_band, # ('freq_w_band', 3, float),\n freq_0, # ('freq_0', 4, float),\n start_dir, # ('start_dir', 5, float),\n dspec_matrix # ('directional_surface_spectrum', 6, list)]\n ]\n\n # Extract a particle and append it to the record buffer\n particle = self._extract_sample(AdcptMDspecInstrumentDataParticle, None, parsed_data)\n self._record_buffer.append(particle)",
"def load_spectrum(inputfile):\n if inputfile.endswith(\"fits\"):\n wav, flux = spectrum_sdss_fits(inputfile)\n imodel = False\n inu = False\n\n else:\n f = open(inputfile, \"r\")\n # Read header\n try:\n nn = int(f.tell())\n f.readline()\n except BaseException:\n pass\n\n # Read first line\n f.readline()\n # Check format of second line\n test = f.readline()\n f.seek(0) # rewind to begining\n\n # Read data\n if (len(test.split()) == 10) or (len(test.split()) == 6): # test62\n wav, flux = spectrum_test62(f)\n imodel = True\n inu = True\n\n elif len(test.split(\",\")) == 2 or len(test.split(\",\")) == 4: # csv\n wav, flux = spectrum_csv(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 2: # tsv\n wav, flux = spectrum_tsv(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 3: # tsv with uncertainties\n wav, flux = spectrum_tsv3(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 5 or len(test.split()) == 7: # mics format\n wav, flux = spectrum_misc(f)\n imodel = False\n inu = False\n\n else:\n\n raise ValueError(f\"Unknown format for {inputfile}.\")\n\n f.close()\n\n return Spectrum(wav, flux, (imodel, inu))",
"def loadsdss(hdu):\n farr=hdu[0].data[0]\n xarr=np.arange(len(farr))\n warr=10**(hdu[0].header['CRVAL1']+hdu[0].header['CD1_1']*(xarr+1))\n return create_spectrum(warr, farr)",
"def loadsdss(hdu):\n farr=hdu[0].data[0]\n xarr=np.arange(len(farr))\n warr=10**(hdu[0].header['CRVAL1']+hdu[0].header['CD1_1']*(xarr+1))\n return create_spectrum(warr, farr)",
"def _read_sp(sp_file):\n content = sp_file.read()\n\n start_byte = 0\n n_bytes = 4\n signature = content[start_byte:start_byte + n_bytes]\n\n start_byte += n_bytes\n # the description is fixed to 40 bytes\n n_bytes = 40\n description = content[\n start_byte:start_byte + n_bytes].decode('utf8')\n\n meta = {'signature': signature,\n 'description': description}\n spectrum = []\n\n NBP = []\n start_byte += n_bytes\n n_bytes = 6\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n NBP.append(start_byte + block_size)\n while block_id != 122 and start_byte < len(content) - 2:\n next_block_id = content[start_byte:start_byte + 2]\n if indexbytes(next_block_id, 1) == 117:\n start_byte = NBP[-1]\n NBP = NBP[:-1]\n while start_byte >= NBP[-1]:\n NBP = NBP[-1]\n else:\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n NBP.append(start_byte + block_size)\n\n meta.update(_decode_5104(\n content[start_byte:start_byte + block_size]))\n\n start_byte = NBP[1]\n while start_byte < len(content):\n n_bytes = 6\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n if block_id in FUNC_DECODE.keys():\n decoded_data = FUNC_DECODE[block_id](\n content[start_byte:start_byte + block_size])\n if isinstance(decoded_data, dict):\n meta.update(decoded_data)\n else:\n spectrum = decoded_data\n start_byte += block_size\n\n wavelength = np.linspace(meta['min_wavelength'],\n meta['max_wavelength'],\n meta['n_points'])\n\n if isinstance(sp_file, string_types):\n meta['filename'] = basename(sp_file)\n else:\n meta['filename'] = basename(sp_file.name)\n\n return Spectrum(spectrum, wavelength, meta)",
"def spectrum_sdss_fits(f):\n\n hdul = fits.open(f)\n\n if \"SDSS\" in hdul[0].header[\"TELESCOP\"]:\n # .fits from SDSS\n data = hdul[1].data\n\n # log10(wav) in the .fits\n wav = 10.0 ** data.field(1) # Angstrom\n\n # flux F_lambda in units of de 1e-17 erg/...\n flux = data.field(0) * 1e-17 # erg/cm^2/s/Ang\n\n # c_ang = speed of light in angstrom / s\n # flux *= wav**2/sc.c_ang # erg/cm^2/s/Hz\n\n hdul.close()\n return wav, flux\n\n else:\n raise Exception(\"Unknown .fits format.\")",
"def SJSspectrum(d, pathtosed=\"{}/static/ML_SEDs/\".format(PACKAGEDIR)):\n try:\n sed = pd.read_csv(\"{}{}_SED.txt\".format(pathtosed, d.SpT), delim_whitespace=True, names=[\"wav\",\"val\",\"e_val\"])\n return sed.val.values, sed.e_val, sed.wav.values*u.angstrom\n except FileNotFoundError: #no spectrum found in library\n return np.nan, np.nan, np.nan",
"def read_spectrum(specfile):\n hdu = pyfits.open(specfile)\n w = [a[0] for a in hdu[0].data]\n f = [a[1] for a in hdu[0].data]\n if 'cassis' in specfile.name:\n ef = [a[2] for a in hdu[0].data]\n colS = 'b'\n elif 'sws' in specfile.name:\n ef = [a[3] for a in hdu[0].data]\n colS = 'g'\n \n f2, ef2 = [], []\n for i in range(0, len(f)):\n f2.append(JyToLamFlam(f[i],ef[i],w[i]*1e-6)[0])\n ef2.append(JyToLamFlam(f[i],ef[i],w[i]*1e-6)[1])\n \n wvlen = [a[0] for a in sorted(zip(w,f2))]\n flux = [a[1] for a in sorted(zip(w,f2))]\n eflux = [a[1] for a in sorted(zip(w,ef2))]\n \n return wvlen,flux,eflux,colS",
"def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set",
"def spectre_sdss_fits(f):\n hdul = fits.open(f)\n \n if 'SDSS' in hdul[0].header['TELESCOP']:\n # .fits from SDSS\n data = hdul[1].data\n \n # log10(wav) dans les .fits\n wav = 10.**data.field(1) # Angstrom\n \n # flux F_lambda en unités de 1e-17 erg/...\n flux = data.field(0)*1e-17 # erg/cm^2/s/Ang\n \n # c_ang = vitesse de la lumière en angstrom / s\n # flux *= wav**2/sc.c_ang # erg/cm^2/s/Hz\n \n hdul.close()\n return wav, flux\n \n else:\n raise Exception('.fits format inconnu')",
"def build_spectrum(spectrum_filename):\n hdulist = fits.open(spectrum_filename)\n data = hdulist[1].data\n \n spec = Spectrum(data['wave'], data['flux'], data['error'])\n \n return spec",
"def deimos_spectrum2D_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='2D Spectrum')\n hdulist[1].header['CTYPE2'] = 'Spatial Y'\n wcs = WCS(hdulist[1].header)\n # original WCS has both axes named \"LAMBDA\", glue requires unique component names\n\n data.coords = coordinates_from_wcs(wcs)\n data.header = hdulist[1].header\n data.add_component(hdulist[1].data['FLUX'][0], 'Flux')\n data.add_component(1/np.sqrt(hdulist[1].data['IVAR'][0]), 'Uncertainty')\n return data",
"def import_sdf(self, fname):\n self.ftype = 'sdf'\n with open(fname) as f:\n lines = f.readlines()\n self.n_atom = int(lines[3].split()[0])\n self.n_connect = int(lines[3].split()[1])\n self.sym = []\n self.at_num = []\n self.xyz = np.zeros((self.n_atom, 3))\n for i, line in enumerate(lines[4:4+self.n_atom]):\n tmp = line.split()\n self.sym.append(tmp[3])\n self.at_num.append(self.sym2num(tmp[3]))\n self.xyz[i, 0] = float(tmp[0])\n self.xyz[i, 1] = float(tmp[1])\n self.xyz[i, 2] = float(tmp[2])\n self.connect = np.zeros((self.n_connect, 2))\n for i, line in enumerate(lines[4+self.n_atom:4+self.n_atom+self.n_connect]):\n tmp = line.split()\n self.connect[i, 0] = tmp[0]\n self.connect[i, 1] = tmp[1]",
"def deimos_spectrum1D_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='1D Spectrum')\n data.header = hdulist[1].header\n\n full_wl = np.append(hdulist[1].data['LAMBDA'][0], hdulist[2].data['LAMBDA'][0])\n full_spec = np.append(hdulist[1].data['SPEC'][0], hdulist[2].data['SPEC'][0])\n full_ivar = np.append(hdulist[1].data['IVAR'][0], hdulist[2].data['IVAR'][0])\n\n data.add_component(full_wl, 'Wavelength')\n data.add_component(full_spec, 'Flux')\n data.add_component(1/np.sqrt(full_ivar), 'Uncertainty')\n\n return data",
"def read_sdss(name):\n flux=py.getdata(name,0)\n wdel=py.getval(name,'CD1_1',0)\n w0=py.getval(name,'CRVAL1',0)\n wave= 10.0**(w0+wdel*np.arange(len(flux[0])))\n \n return(wave,flux[0]*1e-17)",
"def parseDigitalSValRecord(self, f):\n try:\n # gives an np.void with named fields:\n r = self.digitalsvalrecords[self.ndigitalsvalrecords]\n except IndexError:\n newsize = len(self.digitalsvalrecords) + DEFNDIGITALSVALRECORDS\n self.digitalsvalrecords.resize(newsize, refcheck=False)\n # gives an np.void with named fields:\n r = self.digitalsvalrecords[self.ndigitalsvalrecords]\n #junk, junk, r['TimeStamp'], r['SVal'], junk, junk = unpack('<hiqhhi', f.read(22))\n #junk, r['TimeStamp'], r['SVal'], junk, junk = unpack('qqhhi', f.read(24))\n junk, r['TimeStamp'], r['SVal'], junk, junk = unpackdsvalrec(f.read(24))\n self.ndigitalsvalrecords += 1",
"def readAmesDustySpectrum(fname=''):\n print('Reading : ', fname)\n\n # Get the effective temperature, logg and metallicity from the file name\n ind = fname.find('lte')\n fname_tags = fname[ind+3:ind+13].split('-')\n teff = np.float(fname_tags[0]) * 100.\n logg = np.float(fname_tags[1]) * 100.\n mph = np.float(fname_tags[2]) * 100.\n\n wav = []\n inu = []\n bnu = []\n with open(fname, 'r') as rfile:\n dum = rfile.readline()\n while dum != '':\n dum = str(dum).replace('D', 'E')\n sdum = dum.split()\n wav.append(np.float(sdum[0]))\n inu.append(np.float(sdum[1]))\n bnu.append(np.float(sdum[2]))\n dum = rfile.readline()\n\n wav = np.array(wav)\n inu = np.array(inu)\n bnu = np.array(bnu)\n ii = wav.argsort()\n\n wav = wav[ii]\n inu = inu[ii]\n bnu = bnu[ii]\n\n # \"Decode\" the intensity arrays\n inu = 10.**(inu - 8.0) * wav\n bnu = 10.**(bnu - 8.0) * wav\n\n # Convert the wavelength to micron from Angstrom\n wav /= 1e4\n nwav = wav.shape[0]\n\n return {'teff': teff, 'logg': logg, 'mph': mph, 'nwav': nwav, 'wav': wav, 'inu': inu, 'bnu': bnu}",
"def read(cls, path_or_file_like):\n if type(path_or_file_like) is str and not path_or_file_like.endswith(cls.file_suffix):\n raise IOError('Can only read {} file.'.format(cls.file_suffix))\n\n tbl = _table.Table.read(path_or_file_like, format='ascii.ecsv')\n w, dw, y = [tbl[s].quantity for s in ['w', 'dw', 'y']]\n tbl.remove_columns(['w', 'dw', 'y'])\n if 'err' in tbl.colnames:\n e = tbl['err'].quantity\n tbl.remove_column('err')\n else:\n e = None\n\n refs = tbl.meta['references']\n if 'meta' in tbl.meta:\n meta = tbl.meta['meta']\n else: # backwards compatability\n if 'notes' in tbl.meta:\n meta = {'misc notes':tbl.meta['notes']}\n\n ynames = tbl.meta['ynames']\n\n if len(tbl.colnames) > 0:\n other_data = {}\n for key in tbl.colnames:\n other_data[key] = tbl[key].quantity\n else:\n other_data = None\n\n spec = Spectrum(w, y, e, dw=dw, other_data=other_data, yname=ynames, references=refs, meta=meta)\n return spec",
"def parse_linetools_spectrum_format(hdulist, **kwargs):\n if 'WAVELENGTH' not in hdulist:\n pdb.set_trace()\n xspec1d = XSpectrum1D.from_spec1d(spec1d)\n else:\n wave = hdulist['WAVELENGTH'].data * u.AA\n fx = hdulist['FLUX'].data\n\n # Error array\n if 'ERROR' in hdulist:\n sig = hdulist['ERROR'].data\n else:\n sig = None\n\n if 'CONTINUUM' in hdulist:\n co = hdulist['CONTINUUM'].data\n else:\n co = None\n\n xspec1d = XSpectrum1D.from_tuple((wave, fx, sig, co), **kwargs)\n\n if 'METADATA' in hdulist[0].header:\n # Prepare for JSON (bug fix of sorts)\n metas = hdulist[0].header['METADATA']\n ipos = metas.rfind('}')\n try:\n xspec1d.meta.update(json.loads(metas[:ipos+1]))\n except:\n # TODO: fix this in a better manner, if possible\n print(\"Bad METADATA; proceeding without\")\n\n return xspec1d",
"def read_muscles(cls, path, format=None):\n try:\n path = _tbl.Table.read(path, hdu=1)\n except:\n pass\n try:\n w0, w1, f, e = [path[s].quantity for s in\n ['w0', 'w1', 'flux', 'error']]\n except KeyError:\n w0, w1, f, e = [path[s].quantity for s in\n ['WAVELENGTH0', 'WAVELENGTH1', 'FLUX', 'ERROR']]\n\n gaps = w0[1:] != w1[:-1]\n igaps, = _np.nonzero(gaps)\n f, e = [_np.insert(a, igaps, _np.nan) for a in [f, e]]\n wedges = _np.unique(_np.concatenate([w0.value, w1.value])) * w0.unit\n return Spectrum(None, f, e, wbins=wedges, yname=['f', 'flux'])",
"def load_sdss(sdss_filename=\"\", **extras):\n import astropy.io.fits as fits\n with fits.open(sdss_filename) as hdus:\n spec = np.array(hdus[1].data)\n info = np.array(hdus[2].data)\n line = np.array(hdus[3].data)\n return spec, info, line",
"def _read_sdsc_chunk(self, chunk):\n try:\n (s_size, acronym, paraname,\n unitname, snsamples, self._framerate,\n self._s_max, self._s_min, cmax, self._czero,\n imax, fmax) = unpack(\n '<L' # s_size 4\n 'L' # acronym 4\n '80s' # paraname 80\n '16s' # unitname 16\n 'L' # snsamples 4\n 'L' # _framerate 4 (Freq)\n 'h' # s_max 2\n 'h' # s_min 2\n 'h' # cmax 2\n 'h' # _czero 2\n 'i' # imax 4\n 'L', # fmax 8\n chunk.read(128)\n )\n except struct.error:\n raise EOFError from None\n\n # handle redundant characters\n self._paraname = paraname.replace(b'\\x00', b'').decode('ascii')\n self._unitname = unitname.replace(b'\\x00', b'').decode('ascii')\n\n # Calibration setting\n self._signaldynamic = float(cmax - self._czero)\n self._valueatmax = float(imax) + fmax / float(100000)",
"def nirspec_spectrum2d_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='2D Spectrum')\n data.header = hdulist['DATA'].header\n data.coords = coordinates_from_header(hdulist[1].header)\n data.add_component(hdulist['DATA'].data, 'Flux')\n data.add_component(np.sqrt(hdulist['VAR'].data), 'Uncertainty')\n\n return data",
"def from_file(fid, mask_infeasible=True, return_comments=False):\n newfile = False\n # Try to read from fid. If we can't, assume it's something that we can\n # use to open a file.\n if not hasattr(fid, 'read'):\n newfile = True\n fid = open(fid, 'r')\n\n line = fid.readline()\n # Strip out the comments\n comments = []\n while line.startswith('#'):\n comments.append(line[1:].strip())\n line = fid.readline()\n\n # Read the shape of the data\n shape,folded = line.split()\n shape = [int(shape)+1,int(shape)+1,int(shape)+1]\n\n data = np.fromstring(fid.readline().strip(), \n count=np.product(shape), sep=' ')\n # fromfile returns a 1-d array. Reshape it to the proper form.\n data = data.reshape(*shape)\n\n maskline = fid.readline().strip()\n mask = np.fromstring(maskline, \n count=np.product(shape), sep=' ')\n mask = mask.reshape(*shape)\n \n if folded == 'folded':\n folded = True\n else:\n folded = False\n\n # If we opened a new file, clean it up.\n if newfile:\n fid.close()\n\n fs = TLSpectrum(data, mask, mask_infeasible, data_folded=folded)\n if not return_comments:\n return fs\n else:\n return fs,comments",
"def _read(self, spec_file: IO[AnyStr], filename: str) -> List[Spectrum]:\n raise NotImplementedError(SpectrumReader._read.__qualname__)",
"def parse_DESI_brick(hdulist, select=0, **kwargs):\n fx = hdulist[0].data\n # Sig\n if hdulist[1].name in ['ERROR', 'SIG']:\n sig = hdulist[1].data\n else:\n ivar = hdulist[1].data\n sig = np.zeros_like(ivar)\n gdi = ivar > 0.\n sig[gdi] = np.sqrt(1./ivar[gdi])\n # Wave\n wave = hdulist[2].data\n wave = give_wv_units(wave)\n if wave.shape != fx.shape:\n wave = np.tile(wave, (fx.shape[0],1))\n # Finish\n xspec1d = XSpectrum1D(wave, fx, sig, select=select, **kwargs)\n return xspec1d",
"def fromFile(self,fn = None):\n\n while True:\n\n if fn == None:\n fn = getFilename(\"Spectrometer file\",\"spec\")\n else:\n fn = getExpandedFilename(fn) # Sort out logicals\n if not fn.endswith(\"spec\"): # Append \".spec\" if not given\n fn += \".spec\"\n\n try:\n sfile= open(fn,\"r\") # open file\n lines = sfile.readlines()\n sfile.close()\n break\n except FileNotFoundError:\n getLogger().error(\"Failed to find spectrometer file : \" + str(fn))\n fn = None\n\n\n # read file and process one line at a time\n #\n\n # Read through line at a time\n for line in lines:\n\n line = line.strip()\n if not line.startswith(\"#\") and len(line) > 0: # Kill comments and blanks\n token = line.split()\n\n if token[0].startswith(\"point\"):\n v = eval(token[1])\n self.setPoint(v)\n\n elif token[0].startswith(\"index\"):\n self.setIndex(token[1])\n\n elif token[0].startswith(\"angle\"):\n self.angle = math.radians(float(token[1]))\n self.setTilt(self.tilt) # Reset surfaces\n\n elif token[0].startswith(\"height\"):\n self.height = float(token[1])\n\n elif token[0].startswith(\"beam\"):\n self.beam = float(token[1])\n\n elif token[0].startswith(\"tilt\"):\n self.setTilt(math.radians(token[1]))\n\n elif token[0].startswith(\"setup\"):\n self.setUpWavelength(float(token[1]))\n\n else:\n raise ValueError(\"Sprectometer: illegal key : {0:s}\".format(token[0]))\n\n return self",
"def read_sdf(fname: Union[str, Path]) -> List[Chem.Mol]:\n supplier = Chem.SDMolSupplier(str(fname), removeHs=False)\n mols = [mol for mol in supplier]\n return mols",
"def spectrum(self):\r\n f, spectrum = tsa.get_spectra(self.input.data, method=self.method)\r\n return spectrum",
"def load_spectrum(fname):\n\n #Get real path of spectrum\n fname = os.path.realpath(fname)\n\n # Load spectrum\n if fname.split('.')[1] == 'fits':\n spec_FITS = pyfits.open(fname)\n #Load flux\n flux = spec_FITS[0].data\n\n #Obtain parameters for wavelength determination from header\n ref_pixel = spec_FITS[0].header['CRPIX1'] # Reference pixel\n coord_ref_pixel = spec_FITS[0].header['CRVAL1'] # Wavelength at ref. pixel\n wave_pixel = spec_FITS[0].header['CDELT1'] # Wavelength per pixel\n\n #Get starting wavelength\n wstart = get_wstart(ref_pixel, coord_ref_pixel, wave_pixel)\n\n #Obtain array of wavelength\n wave = get_wavelength(wstart, wave_pixel, len(flux))\n\n return np.dstack((wave, flux))[0]\n else:\n return loadtxt_fast(fname, np.float)"
] | [
"0.69581074",
"0.647524",
"0.63855165",
"0.63855165",
"0.6374014",
"0.63154924",
"0.6295988",
"0.61925375",
"0.6125525",
"0.6066723",
"0.60588676",
"0.6026092",
"0.5996392",
"0.5974746",
"0.5853758",
"0.5807754",
"0.5777854",
"0.57754326",
"0.5728101",
"0.57191104",
"0.57136464",
"0.5702443",
"0.56717926",
"0.56573695",
"0.5645897",
"0.5612351",
"0.56089354",
"0.5608517",
"0.55933857",
"0.55932766"
] | 0.7699114 | 0 |
Given a model name, set the function that has to be called to run that model. This should be safe because we restrict the user input for the models at the argument parsing stage. | def set_model_func(self, model):
if model == 'SI':
import cavefish_dadi.Models.si
return cavefish_dadi.Models.si.si
elif model == 'SC':
import cavefish_dadi.Models.sc
return cavefish_dadi.Models.sc.sc
elif model == 'IM':
import cavefish_dadi.Models.im
return cavefish_dadi.Models.im.im
elif model == 'AM':
import cavefish_dadi.Models.am
return cavefish_dadi.Models.am.am
elif model == 'SC2M':
import cavefish_dadi.Models.sc2m
return cavefish_dadi.Models.sc2m.sc2m
elif model == 'IM2M':
import cavefish_dadi.Models.im2m
return cavefish_dadi.Models.im2m.im2m
elif model == 'AM2M':
import cavefish_dadi.Models.am2m
return cavefish_dadi.Models.am2m.am2m
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_model(model: str) -> Any:\n try:\n model_function = eval(model)\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted models from {tf}, {sm}, {tfa}, {tfc}')\n return model_function",
"def get_function(model_or_function, preprocess_function=None):\n from dianna.utils.onnx_runner import SimpleModelRunner # pylint: disable=import-outside-toplevel\n\n if isinstance(model_or_function, Path):\n model_or_function = str(model_or_function)\n\n if isinstance(model_or_function, (str, bytes, Path)):\n runner = SimpleModelRunner(model_or_function,\n preprocess_function=preprocess_function)\n elif callable(model_or_function):\n if preprocess_function is None:\n runner = model_or_function\n else:\n\n def runner(input_data):\n return model_or_function(preprocess_function(input_data))\n else:\n raise TypeError(\n 'model_or_function argument must be string (path to model), '\n 'bytes (serialized onnx model), or function')\n return runner",
"def set_model(self, model_name):\n pass",
"async def gpt2_set_model(self, ctx, *, arg=None):\n print('Command gpt2_set_model triggered')\n if arg:\n if arg in VALID_DEFAULT_MODELS:\n self.update_config(model_name=arg)\n else:\n await ctx.send(f\"ERROR: Invalid model name {arg}\")\n else:\n await ctx.send(\"ERROR: Argument required\")",
"def get_model_function(name: str):\n if name not in REGISTRY:\n names = \", \".join(sorted(REGISTRY.keys()))\n raise KeyError(f\"Model {name} not found in registry. Available names: {names}\")\n return REGISTRY[name]",
"def set_model_name(self, model_name: str = \"355M\") -> None:\n self.model_name = model_name",
"def train(self, model, args):\n if model == self.WORD_DET_RFC:\n return self.train_rfc(args)\n elif model == self.REGRESSION_PARAMS:\n return self.train_bb_reg(args)\n else:\n raise Exception('No model %s exists to train' % model)",
"def _doRun(self, model: Model):\n raise Exception(\"Not implemented\")",
"def test_model_processor():\n global model_processor_called\n\n model_str = 'first 34 45 7 A 45 65 B true C \"dfdf\"'\n\n metamodel = metamodel_from_str(grammar)\n metamodel.register_model_processor(model_processor)\n\n metamodel.model_from_str(model_str)\n\n assert model_processor_called",
"def apply_model(model: BaseModel, **kwargs):\n raise NotImplementedError(f'Unknown model: {model}')",
"def run_model(self, exe_name=None, nam_file=None, silent=False):\n from ..mbase import run_model\n\n if exe_name is None:\n exe_name = self._exe_name\n if nam_file is None:\n nam_file = os.path.join(self._name + self._extension)\n return run_model(\n exe_name, nam_file, model_ws=self._model_ws, silent=silent\n )",
"def set_model(*, name: str, model: typing.Type) -> None:\n setattr(open_alchemy.models, name, model)",
"def model_name(self, model_name: str):\n\n self._model_name = model_name",
"def build_model_fn(self):",
"def model_fn(self, features, labels, mode, params, config):\n raise NotImplementedError()",
"def set_models_eval(self):\n raise NotImplementedError",
"def select_model(model_name: str):\r\n global predictor, currently_selected_model\r\n predictor = FeatureExtractor(model_name)\r\n currently_selected_model = model_name",
"def get_model_fn(model, num_classes, spatial_res):\n\n model = model.lower()\n if model == \"cnn\": return get_cnn_fn(model, num_classes)\n if model in RESNET_FNS: return get_resnet_fn(model, num_classes, spatial_res)\n if model in VIT_FNS: return get_vit_fn(model, num_classes, spatial_res)\n if model in EFFICIENTNET_FNS: return get_efficientnet_fn(model, num_classes,\n spatial_res)\n raise ValueError(f\"Model {model} not recognized.\")",
"def call(self, model):\n raise NotImplementedError('Define your score here')",
"def do_start(self, arg):\n args = arg.split(\" \")\n self.model.initialise(args[0])\n self.model.run()",
"def set_model_name(self, name):\n self.model_name = name",
"def main():\n model = Calculator()",
"def set_obj_func(self, funcName):\n if hasattr(funcName, '__call__'):\n self.func = funcName\n else:\n try:\n self.func = getattr(self, funcName)\n assert hasattr(self.func, '__call__'), 'Invalid function handle'\n except KeyError:\n print ('ERROR: The function specified does not exist in the ObjectiveFunction class or the _FUNC_DICT. Allowable methods are {}').format(self._FUNC_DICT)",
"def build_model(name, **model_params):\n assert name in globals().keys(),\\\n \"%s must be a model imported/defined in models/__init__.py\" % name\n return globals()[name](**model_params)",
"def check_model(func):\n def get_site(request):\n keyword_id = request.POST.get('keyword_id', None)\n if keyword_id:\n keyword = models.Keyword.objects.get(id=keyword_id)\n return keyword.site\n\n site_id = request.POST.get('site_id', None)\n if site_id:\n return models.Site.objects.get(id=site_id)\n\n msg = 'Can not get site by site_id'\n logger.info(msg)\n raise Exception(msg)\n\n def inner(request, *args, **kw):\n site = get_site(request)\n\n model_name = site.get_job_model()\n model = getattr(models, model_name, None)\n if model is None:\n msg = 'Wrong model name'\n logger.info(msg)\n raise Exception(msg)\n\n kw['job_model'] = model\n return func(request, *args, **kw)\n return inner",
"def default_invoke(self, func_name: str = \"main\"):\n funcs = [func_name]\n if \"main\" not in func_name:\n funcs.append(\"main\")\n\n state = next(self.ready_states)\n for name in funcs:\n func_inst: typing.Optional[FuncInst] = state.platform.get_export(name)\n if isinstance(func_inst, FuncInst):\n func_ty = func_inst.type\n\n args = []\n for idx, ty in enumerate(func_ty.param_types):\n if ty in {I32, F32}:\n args.append(state.new_symbolic_value(32, f\"arg{idx}_{ty.__name__}\"))\n elif ty in {I64, F64}:\n args.append(state.new_symbolic_value(64, f\"arg{idx}_{ty.__name__}\"))\n\n self.invoke(name=name, argv_generator=lambda s: args)\n break",
"def train_model(model, X_train, y_train, X_val, y_val, image_name):\n if MODEL == 1:\n return train_model_1(model, X_train, y_train, X_val, y_val, image_name)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return train_cv_model_3(model, X_train, y_train,\n X_val, y_val, image_name)\n else:\n return train_model_3(model, X_train, y_train,\n X_val, y_val, image_name)\n elif MODEL == 2:\n return train_model_2(model, X_train, y_train, X_val, y_val, image_name)\n else:\n # For models 4, 5 and 6\n return train_model_4(model, X_train, y_train, image_name)",
"def get_model(name):\n\n name_to_fun = {'audio': audio_model, 'video': video_model, 'both': combined_model}\n\n if name in name_to_fun:\n model = name_to_fun[name]\n else:\n raise ValueError('Requested name [{}] not a valid model'.format(name))\n\n def wrapper(*args, **kwargs):\n return recurrent_model(model(*args), **kwargs)\n\n return wrapper",
"def setFunctionName(self, function_name):\r\n self.actualFunction = function_name",
"def runModel(quickLogger,\n\t base,\n modelFile=\"\",\n\t irfs=\"P7SOURCE_V6\",\n run=True):\n \n if(modelFile):\n model = modelFile\n else:\n model = base+\"_likeMinuit.xml\"\n\n\n try:\n checkForFiles(quickLogger,\n [base+\"_srcMaps.fits\",\n model,\n base+\"_ltcube.fits\",\n base+\"_BinnedExpMap.fits\"])\n except(FileNotFound):\n quickLogger.critical(\"One or more needed files do not exist.\")\n return\n\n model_map['srcmaps'] = base+\"_srcMaps.fits\"\n model_map['srcmdl'] = model\n model_map['outfile'] = base+\"_modelMap.fits\"\n model_map['expcube'] = base+\"_ltcube.fits\"\n model_map['irfs'] = irfs\n model_map['bexpmap'] = base+\"_BinnedExpMap.fits\"\n \n runCommand(model_map,quickLogger,run)"
] | [
"0.6543695",
"0.6317053",
"0.6138432",
"0.6007846",
"0.5799357",
"0.5794181",
"0.57566136",
"0.56700885",
"0.55889934",
"0.5577638",
"0.5568402",
"0.5544729",
"0.55088854",
"0.55060846",
"0.5459015",
"0.5454922",
"0.54414725",
"0.53864926",
"0.5364902",
"0.5340603",
"0.5336307",
"0.5238269",
"0.5237726",
"0.5237689",
"0.5231768",
"0.51978904",
"0.5187118",
"0.5186177",
"0.51802754",
"0.51753956"
] | 0.6588683 | 0 |
Summarize the replicate runs and convert the parameters estimates into meaningful numbers. | def summarize(self, locuslen):
# First, calculate the mean of the parameter estimates from each
# of the replicates
hot_means = []
for r_t in zip(*self.hot_params):
v = [x for x in r_t if not math.isnan(x)]
hot_means.append(sum(v)/len(v))
cold_means = []
for r_t in zip(*self.cold_params):
v = [x for x in r_t if not math.isnan(x)]
cold_means.append(sum(v)/len(v))
bfgs_means = []
for r_t in zip(*self.opt_params):
v = [x for x in r_t if not math.isnan(x)]
bfgs_means.append(sum(v)/len(v))
theta_mean = sum(self.theta) / len(self.theta)
# Then, convert the parameters into meaningful values
# the theta estimate is 4*Na*u*L
anc_ne = theta_mean / (4 * 3e-9 * locuslen)
# Then, the parameters are scaled by that. Population sizes are scaled
# by theta (4Na), and times and migration rates are given in units of
# 2N.
scaled_params = []
for name, val in zip(self.params['Names'], bfgs_means):
if name.startswith('N'):
scaled_params.append(val * anc_ne)
elif name.startswith('m'):
scaled_params.append(val /(anc_ne * 2))
elif name.startswith('T'):
scaled_params.append(val * anc_ne * 2)
else:
scaled_params.append(val)
# Write these values into the class data
self.hot_mean = hot_means
self.cold_mean = cold_means
self.bfgs_mean = bfgs_means
self.theta_mean = theta_mean
self.Na = anc_ne
self.scaled_params = scaled_params
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_mcts(self, runs_per_round):\n for i in range(runs_per_round):\n self.select(self.env, 'r')\n self.env_reset()\n counts = [self.Nsa[('r', a)] for a in range(self.actions)]\n # print(\"counts \", counts)\n # print(\"Q-values\", [self.Qsa[('r', a)] for a in range(self.actions)])\n # print()\n return np.argmax(counts)",
"def experiment_runs (ins, exp) :\n return experiment_info.experiment_runs(ins, exp)",
"def load_run_summary(self):\n vrun_attrs = {}\n print 'Loading summary of {:} runs for {:} from SQL database'.format( \\\n len(self.runs),self.exp)\n print 'Estimate loading time ~{:} sec'.format(len(self.runs)/4)\n for run in range(1,self.runs[-1]['num']+1):\n run_attr = experiment_info.run_attributes(self.instrument,self.exp,run)\n for a in run_attr:\n if a['name'] not in vrun_attrs:\n vrun_attrs[a['name']] = {'class': a['class'], 'desc': a['descr'], \n 'type': a['type'], 'val': \n [None for i in range(1,run)]} \n vrun_attrs[a['name']]['val'].append(a['val'])\n self.run_summary = vrun_attrs",
"def test(self, n_test_runs: int = 10) -> None:\n steps: np.ndarray = np.zeros(n_test_runs)\n rewards: np.ndarray = np.zeros(n_test_runs)\n for t in range(n_test_runs):\n steps[t], rewards[t] = self.step(collect=False)\n\n self.get_logger().warn('---------- TEST RUN RESULTS ----------')\n self.get_logger().warn(f'Average: {steps.mean()}')\n self.get_logger().warn(f'STD: {steps.std()}')\n self.get_logger().warn(f'Median: {np.median(steps)}')\n self.get_logger().warn(f'Average Reward: {rewards.mean()}')",
"def LogRun(ss, dt):\n run = ss.TrainEnv.Run.Cur # this is NOT triggered by increment yet -- use Cur\n row = dt.Rows\n dt.SetNumRows(row + 1)\n\n epclog = ss.TrnEpcLog\n epcix = etable.NewIdxView(epclog)\n # compute mean over last N epochs for run level\n nlast = 5\n if nlast > epcix.Len()-1:\n nlast = epcix.Len() - 1\n epcix.Idxs = go.Slice_int(epcix.Idxs[epcix.Len()-nlast:])\n\n params = ss.Learn.name + \"_\" + ss.Pats.name\n\n dt.SetCellFloat(\"Run\", row, float(run))\n dt.SetCellString(\"Params\", row, params)\n dt.SetCellFloat(\"FirstZero\", row, float(ss.FirstZero))\n dt.SetCellFloat(\"SSE\", row, agg.Mean(epcix, \"SSE\")[0])\n dt.SetCellFloat(\"AvgSSE\", row, agg.Mean(epcix, \"AvgSSE\")[0])\n dt.SetCellFloat(\"PctErr\", row, agg.Mean(epcix, \"PctErr\")[0])\n dt.SetCellFloat(\"PctCor\", row, agg.Mean(epcix, \"PctCor\")[0])\n dt.SetCellFloat(\"CosDiff\", row, agg.Mean(epcix, \"CosDiff\")[0])\n\n runix = etable.NewIdxView(dt)\n spl = split.GroupBy(runix, go.Slice_string([\"Params\"]))\n split.Desc(spl, \"FirstZero\")\n split.Desc(spl, \"PctCor\")\n ss.RunStats = spl.AggsToTable(etable.AddAggName)\n\n # note: essential to use Go version of update when called from another goroutine\n if ss.RunPlot != 0:\n ss.RunPlot.GoUpdate()\n if ss.RunFile != 0:\n if row == 0:\n dt.WriteCSVHeaders(ss.RunFile, etable.Tab)\n dt.WriteCSVRow(ss.RunFile, row, etable.Tab)",
"def evaluate(self, runs=100):\n score_record = []\n \n print('Evaluation in progress...')\n for i in range(runs):\n score = self.run_evaluation_episode()\n score_record.append(score)\n \n ave_score = np.mean(score_record)\n \n print('System evaluated with an average score of {} in {} runs'.format(ave_score, runs))",
"def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)",
"def make_runs(start_run=0, end_run=25,\n base_dir=DEFAULT_BASE_DIR):\n for i, reshuffle_mod in enumerate([1, 5, 25, 125, 10000]):\n for j in range(start_run, end_run):\n # Remove all handlers associated with the root logger object.\n # Allows to write the log in another folder.\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n path_exp = \"{}/exp_{}/run_{:02d}\".format(base_dir, i, j)\n me.make_exps(reshuffle_mod, path_exp)\n # If one wants to plot again already made results.\n # me.load_results_and_plot(path_exp)\n out_folder_list = [\"{}/exp_{}/run_{:02d}\".format(base_dir, i, j)\n for j in range(start_run, end_run)]\n me.load_all_results_and_plot(out_folder_list, type_plot=\"average\")\n me.load_all_results_and_plot(out_folder_list, type_plot=\"quantile\")",
"def run(self):\n # Convert the dataset, index: Recovered, column: log10(Susceptible)\n sr_df = self.sr_df.copy()\n sr_df[self.S] = np.log10(sr_df[self.S].astype(np.float64))\n df = sr_df.pivot_table(index=self.R, values=self.S, aggfunc=\"last\")\n # Convert index to serial numbers\n serial_df = pd.DataFrame(np.arange(1, df.index.max() + 1, 1))\n serial_df.index += 1\n df = pd.merge(\n df, serial_df, left_index=True, right_index=True, how=\"outer\"\n )\n series = df.reset_index(drop=True).iloc[:, 0]\n series = series.interpolate(limit_direction=\"both\")\n # Sampling to reduce run-time of Ruptures\n samples = np.linspace(\n 0, series.index.max(), len(self.sr_df), dtype=np.int64\n )\n series = series[samples]\n # Detection with Ruptures\n algorithm = rpt.Pelt(model=\"rbf\", jump=2, min_size=self.min_size)\n results = algorithm.fit_predict(series.values, pen=0.5)\n # Convert index values to Susceptible values\n reset_series = series.reset_index(drop=True)\n reset_series.index += 1\n susceptible_df = reset_series[results].reset_index()\n # Convert Susceptible values to dates\n df = pd.merge_asof(\n susceptible_df.sort_values(self.S),\n sr_df.reset_index().sort_values(self.S),\n on=self.S, direction=\"nearest\"\n )\n found_list = df[self.DATE].sort_values()[:-1]\n # Only use dates when the previous phase has more than {min_size + 1} days\n delta_days = timedelta(days=self.min_size)\n first_obj = self.to_date_obj(self.dates[0])\n last_obj = self.to_date_obj(self.dates[-1])\n effective_list = [first_obj]\n for found in found_list:\n if effective_list[-1] + delta_days < found:\n effective_list.append(found)\n # The last change date must be under the last date of records {- min_size} days\n if effective_list[-1] >= last_obj - delta_days:\n effective_list = effective_list[:-1]\n # Set change points\n self._change_dates = [\n date.strftime(self.DATE_FORMAT) for date in effective_list[1:]\n ]\n return self",
"def evaluate(self, runs = 5, use_gui = False):\n self.env.render(use_gui)\n\n evaluation_results = {\n \"runs\" : runs,\n \"unfinished_runs\" : 0,\n \"average_delay\" : [],\n \"episode_mean_delays\" : [],\n \"episode_delay_lists\" : []\n }\n\n for i in range(runs):\n\n print('Evaluate {} -- running episode {} / {}'.format(self.connection_label,\n i+1,\n runs))\n all_trans, mean_delay, vehicle_delays = self.ddqn.evaluate(env = self.env,\n policy = \"greedy\")\n\n evaluation_results[\"episode_delay_lists\"].append(vehicle_delays)\n evaluation_results[\"episode_mean_delays\"].append(mean_delay)\n\n if mean_delay != -1:\n evaluation_results[\"average_delay\"].append(mean_delay)\n else:\n evaluation_results[\"unfinished_runs\"] += 1\n\n runs -= evaluation_results[\"unfinished_runs\"]\n\n if runs == 0:\n evaluation_results[\"average_delay\"].append(-1)\n else:\n evaluation_results[\"average_delay\"] = sum(evaluation_results[\"average_delay\"])/runs\n\n # print(self.ddqn.q_network.get_weights())\n\n return evaluation_results",
"def replicate(self,simulation_run):\n\n return self._runModel(params=simulation_run.params)",
"def get_mojo_run_info():\n \n #get movies from the db and calulate run info\n run_info_df = movie_helper.get_movie_run_info()\n \n with tqdm(total=len(run_info_df)) as pbar:\n for index, row in run_info_df.iterrows():\n #update the database\n updates = {\"end_weekend\" : row['end_weekend'], \n \"total_weekends\" : row['total_weekends'], \n \"total_release_weeks\" : row['total_release_weeks'], \n \"first_run_end\" : row['first_run_end'],\n \"first_run_weeks\" : row['first_run_weeks']}\n \n selects = {\"movieId\" : row[\"movieId\"]}\n database_helper.update_data(\"movies\", update_params = updates, select_params = selects)\n \n pbar.update(1)",
"def run_simulation(self, number_runs = 1):\n for i in range(0, number_runs):\n self.ques = [self.start for i in range(0, self.numQueues)]\n run = self.__single_sim_results()\n run_results = pd.DataFrame({'simulation':i,\n 'num_items': len(run),\n 'wait_count': len(run[run['wait_time']>datetime.timedelta(seconds=0)]),\n 'avg_wait_time': run.wait_time.mean(),\n 'close_time': max(run['appt_end_time'])}, index=[i])\n self.results = pd.concat([self.results, run_results], ignore_index=True)\n self.results['last_appt_to_close_minutes'] = (self.results['close_time']-self.end).dt.total_seconds().div(60)\n return",
"def summarize(self):\n \n print self._num_tests, \"tests ran with\", len(self._failed_tests), \"failures:\", sorted(list(self._failed_tests))\n\n self._num_tests = 0\n self._failed_tests = set()",
"def _get_traces(\n run, runs, running_runs, all_cses, trace_type='deconvolved', length_fr=15,\n pad_fr=31, offset_fr=1, running_threshold_cms=4., correct_trials=False,\n lick_cutoff=-1, lick_window=(-1, 0), running_fraction=0.3,\n max_n_onsets=-1, remove_stim=True, activity_scale=None):\n if run not in running_runs:\n # Prepare running baseline data.\n # NOTE: running thresholding is done differently here than later during\n # stimulus runs.\n out = {'other-running': _get_run_onsets(\n runs=running_runs,\n length_fr=length_fr,\n pad_fr=pad_fr,\n offset_fr=offset_fr,\n running_threshold_cms=running_threshold_cms)}\n\n for training_run in runs:\n t2p = training_run.trace2p()\n\n # Get the trace from which to extract time points\n trs = t2p.trace(trace_type)\n\n if activity_scale is not None:\n trs = (trs.T*activity_scale).T\n\n # If the target run is also a training run, make sure that we aren't\n # training on the same data that will later be used for comparison\n if remove_stim or training_run != run:\n # Search through all stimulus onsets, correctly coding them\n for ncs in t2p.cses(): # t.cses(self._pars['add-ensure-quinine']):\n if ncs in all_cses:\n # Remap cs name if needed\n # NOTE: blank trials are just labeled 'other' and not\n # checked for running.\n cs = all_cses[ncs]\n # Initialize output\n if cs not in out:\n out[cs] = []\n\n ons = t2p.csonsets(\n ncs, 0 if correct_trials else -1, lick_cutoff,\n lick_window)\n\n for on in ons:\n start = on + offset_fr\n toappend = trs[:, start:start + length_fr]\n # Make sure interval didn't run off the end.\n if toappend.shape[1] == length_fr:\n out[cs].append(toappend)\n\n # If the target run is in the training runs, don't use the times\n # that will later be used for comparison.\n if training_run != run:\n # Add all onsets of \"other\" frames\n others = t2p.nocs(length_fr, pad_fr, -1)\n\n if len(t2p.speed()) > 0:\n running = t2p.speed() > running_threshold_cms\n for ot in others:\n start = ot + offset_fr\n if nanmean(running[start:start + length_fr]) > \\\n running_fraction:\n out['other-running'].append(\n trs[:, start:start + length_fr])\n else:\n if 'other' not in out:\n out['other'] = []\n out['other'].append(\n trs[:, start:start + length_fr])\n\n # Selectively remove onsets if necessary\n if max_n_onsets > 0:\n for cs in out:\n if 'other' not in cs:\n print('WARNING: Have not yet checked new timing version')\n\n # Account for shape of array\n if len(out[cs]) > max_n_onsets:\n out[cs] = np.random.choice(\n out[cs], max_n_onsets, replace=False)\n\n for cs in out:\n out[cs] = np.array(out[cs])\n\n return out",
"def trial_setup(params):\n runs = []\n trials = []\n for run in range(params['runs']):\n runs = runs + [run]*params['trials_per_run']\n for trial in range(params['trials_per_run']):\n trials.append(trial)\n return(runs,trials)",
"def simulationTwoDrugsDelayedTreatment(numTrials):\n #Initialization\n delayList = [300, 150, 75, 0]\n #delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': False, 'grimpex' : False }\n #mutProb = 0.005\n mutProb = 0.010\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n print \"Running trials for delay %(delay)d\" % {'delay' : n}\n for i in range(numTrials):\n #print \"Trial: \" + str(i)\n pop = runTrialTwoDrugs(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop < 50:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)",
"def add_runs(self,runs):\n for r in runs:\n self.add(r)",
"def Repeater(algorithm, runs, nationtxt, schemeIn):\n\n scores = {}\n\n # Make sure appropriate range is used for scores\n\n scoreRange = range(0, 10000)\n\n # score range has to be between these two numbers\n for i in scoreRange:\n scores.update({i : 0})\n\n #~ print \"Running \" + str(algorithm)[0:-18] + \"> \" + str(runs) + \" times...\\n\"\n\n\n minScore = 10**40\n\n\n scheme = schemeIn\n avg = (scheme[0] + scheme[1] + scheme[2] + scheme[3] + scheme[4] + scheme[5] + scheme[6]) / 7.\n p0 = (scheme[0] - avg)**2\n p1 = (scheme[1] - avg)**2\n p2 = (scheme[2] - avg)**2\n p3 = (scheme[3] - avg)**2\n p4 = (scheme[4] - avg)**2\n p5 = (scheme[5] - avg)**2\n p6 = (scheme[6] - avg)**2\n var = (p0 + p1 + p2 + p3 + p4 + p5 + p6) / 7.\n sDev = var**0.5\n\n\n q0 = scheme[1] - scheme[0]\n q1 = scheme[2] - scheme[1]\n q2 = scheme[3] - scheme[2]\n q3 = scheme[4] - scheme[3]\n q4 = scheme[5] - scheme[4]\n q5 = scheme[6] - scheme[5]\n\n for i in range(runs):\n nation = algorithm(nationtxt)\n\n score = randScoreFunction(nation, scheme)\n scores[score] += 1\n\n # keep track of best scores and nation\n if score < minScore:\n minScore = score\n bestNation = nation\n\n maxFreq = 0\n\n scoreCount = 0\n\n for score in scores:\n if scores[score] > maxFreq:\n maxFreq = scores[score]\n maxFreqScore = score\n if score == minScore:\n minScoreFreq = scores[score]\n if scores[score] >= 1:\n scoreCount += 1\n\n\n usedTrans = []\n fivePlus = 0\n fivePlusNoDuplicate = 0\n\n one = 0\n two = 0\n three = 0\n four = 0\n five = 0\n six = 0\n seven = 0\n\n for province in bestNation:\n\n if bestNation[province][1] == 1:\n one += 1\n if bestNation[province][1] == 2:\n two += 1\n if bestNation[province][1] == 3:\n three += 1\n if bestNation[province][1] == 4:\n four += 1\n if bestNation[province][1] == 5:\n five += 1\n if bestNation[province][1] == 6:\n six += 1\n if bestNation[province][1] == 7:\n seven += 1\n\n\n if five > 0 or six > 0 or seven > 0:\n fivePlus += 1\n if scheme[3] != scheme[4]:\n fivePlusNoDuplicate += 1\n\n usedTrans.append([one, two, three, four, five, six, seven])\n\n\n return minScore, minScoreFreq, scheme, fivePlus, fivePlusNoDuplicate, usedTrans, scoreCount, sDev, q0, q1, q2, q3, q4, q5, avg",
"def simulationDelayedTreatment(numTrials):\n \n #Initialization\n #delayList = [300, 150, 75, 0]\n delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': True }\n mutProb = 0.005\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n for i in range(numTrials):\n pop = runTrial(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop == 0:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)",
"def _extract_results_from_run_history(self, run_history: RunHistory) -> None:\n\n for run_key, run_value in run_history.data.items():\n config = run_history.ids_config[run_key.config_id]\n self._update(config=config, run_key=run_key, run_value=run_value)\n\n self._check_null_in_optional_inference_choices()\n\n self.rank_opt_scores = scipy.stats.rankdata(\n -1 * self._metric._sign * self.opt_scores, # rank order\n method='min'\n )",
"def simulate_run(run, maker, all_data, train_mask, test_mask, instances, independent, mixture):\n\n train_data = all_data.masked(train_mask)\n test_data = all_data.masked(test_mask)\n\n if instances is not None:\n ids = sorted(train_data.run_lists, key = lambda _: numpy.random.rand())[:instances]\n train_data = train_data.filter(*ids)\n\n if independent:\n train_data = train_data.collect_independent(mixture).only_nonempty()\n else:\n train_data = train_data.collect_systematic(mixture).only_nonempty()\n\n budget = test_data.common_budget\n #budget = test_data.common_budget / 2 # XXX\n suite = borg.fake.FakeSuite(test_data)\n\n if maker.subname == \"preplanning-dir\":\n model_kwargs = {\"K\": 64}\n\n if \"set_alpha\" in maker.variants:\n model_kwargs[\"alpha\"] = 1e-2\n else:\n model_kwargs = {}\n\n solver = maker(suite, train_data, model_kwargs = model_kwargs)\n successes = []\n\n for (i, instance_id) in enumerate(test_data.run_lists):\n logger.info(\"simulating run %i/%i on %s\", i, len(test_data), instance_id)\n\n with suite.domain.task_from_path(instance_id) as instance:\n with borg.accounting() as accountant:\n answer = solver.start(instance).run_then_stop(budget)\n\n succeeded = suite.domain.is_final(instance, answer)\n\n logger.info(\n \"%s %s on %s (%.2f CPU s)\",\n maker.name,\n \"succeeded\" if succeeded else \"failed\",\n os.path.basename(instance),\n accountant.total.cpu_seconds,\n )\n\n if succeeded:\n successes.append(accountant.total.cpu_seconds)\n\n logger.info(\n \"%s had %i successes over %i instances\",\n maker.name,\n len(successes),\n len(test_data),\n )\n\n description = \"{0} ({1})\".format(mixture, \"Sep.\" if independent else \"Sys.\")\n\n return (\n description,\n maker.name,\n instances,\n len(successes),\n numpy.mean(successes),\n numpy.median(successes),\n )",
"def evaluate_benchmarks(self):\n\n # iterate over replicates\n results = {}\n for replicate_id, replicate in self.replicates:\n\n # evaluate benchmark for current replicate\n bmark = SimulationBenchmark(replicate.copy(),\n graph=self.graphs[replicate_id],\n **self.params)\n\n # store results\n results[replicate_id] = dict(\n\n labels_MAE=bmark.scores['labels'].MAE,\n level_only_MAE=bmark.scores['level_only'].MAE,\n spatial_only_MAE=bmark.scores['spatial_only'].MAE,\n community_MAE=bmark.scores['labels_comm'].MAE,\n\n labels_PCT=bmark.scores['labels'].percent_correct,\n level_only_PCT=bmark.scores['level_only'].percent_correct,\n spatial_only_PCT=bmark.scores['spatial_only'].percent_correct,\n community_PCT=bmark.scores['labels_comm'].percent_correct)\n\n # compile dataframe\n results = pd.DataFrame.from_dict(results, orient='index')\n results.index.set_names(self.multiindex, inplace=True)\n\n return results",
"def dict_of_recs_for_run (ins, exp, runnum) :\n return calibration_runs(ins, exp)[runnum]",
"def sum_simulated_test():\n f = open(\"./results/simulated_sigmoid_sum.csv\", \"w\")\n #f1 = open(\"./results/avg_pres.txt\", \"w\")\n #f.write(\"num. of qubits; precision\\n\")\n\n\n computable_qubits = 27\n num_subtest = 1000\n\n acum_precision = 0\n coeffs = []\n temp = -10\n while temp < 11:\n coeffs.append(temp)\n temp += 0.25\n #for coeff in coeffs:\n # variables.c_summation = coeff\n # print(coeff)\n for i in range(2, computable_qubits):\n #print(\"qubit: \", i)\n precision = 0\n x = []\n for j in range(num_subtest):\n\n random_dict = get_random_dict(i)\n\n # compute real answer\n real_answer = 0\n for value in random_dict.values():\n real_answer += value\n # f1.write(str(real_answer)+\";\")\n x.append(real_answer)\n\n # assign spin value to real_answer\n if real_answer < 0:\n real_answer = -1\n elif real_answer > 0:\n real_answer = 1\n else:\n real_answer = 0\n bqm = get_bqm()\n quantum_sigmoid_sum(bqm, random_dict, \"target\")\n sampler = get_simulated_sampler()\n result = sampler.sample(bqm)\n if real_answer == 0:\n precision += 1\n # f1.write(\"1\\n\")\n elif real_answer == result.first.sample['target']:\n precision += 1\n # f1.write(\"1\\n\")\n# else:\n # f1.write(\"0\\n\")\n\n precision /= num_subtest\n # acum_precision+= precision\n\n f.write(str(i) + \";\" + str(precision) + \"\\n\")\n f.close()\n #f1.write(str(coeff)+\";\"+ str(round(acum_precision/(computable_qubits-1), 4)) + \"\\n\")\n # acum_precision = 0\n #f1.close()",
"def train_multiple_runs_eps(self, env, runs=3, no_episodes=200, horizon=1000, lr=0.1):\n\t\tr_mat = []\n\t\tinfo = {}\n\t\t\n\t\tfor i in range(runs):\n\n\t\t\t# Resetting agent to default before each run\n\t\t\tself.reset()\n\n\t\t\t# Training the agent for ts_max\n\t\t\tr_vec, _ = self.train_multiple_eps(env, no_episodes=no_episodes, horizon=horizon, lr=lr)\n\n\t\t\t# Storing the results in a matrix\n\t\t\tr_mat.append(r_vec)\n\n\t\t# Finding the mean and standard deviation \n\t\tinfo['mean'] = np.mean(np.array(r_mat), axis=0)\n\t\tinfo['std'] = np.std(np.array(r_mat), axis=0)\n\n\t\treturn r_mat, info",
"def timings_across_runs(self):\n\n\t\t# first determine individual run duration (to make sure that stimulus timings of all runs are correct)\n\t\trun_duration = []\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tniiFile = NiftiImage(self.runFile(stage = 'processed/mri', run = r))\n\t\t\ttr, nr_trs = round(niiFile.rtime*1)/1000.0, niiFile.timepoints\n\t\t\trun_duration.append(tr * nr_trs)\n\t\trun_duration = np.r_[0,np.cumsum(np.array(run_duration))]\n\n\t\t# timing information stimuli\n\t\tstim_info = []\n\t\trun = 0\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tstim_events = np.loadtxt(self.runFile(stage = 'processed/behavior', run = r, extension = '.txt', postFix = ['stim' ,'all','task']))\n\t\t\tstim_events[:,:2] += run_duration[run]\n\t\t\tstim_info.append(stim_events)\n\t\t\trun += 1\n\n\t\t# save stim_info as text_file\t\n\t\tnp.savetxt(self.runFile(stage = 'processed/behavior', postFix = ['stim_info_all'],extension = '.txt'), np.vstack(stim_info), fmt = '%3.2f', delimiter = '\\t')",
"def num_trials(self):",
"def aggregate_results(results):\n\n for (config,con,dec),folds in results.iteritems():\n m = MODEL_PATTERN.match(config)\n if m:\n mode = m.groupdict()['mode'] # mle, rl, mrt, ...\n model = m.groupdict()['model'] # haem, hacm, hard, ...\n align = m.groupdict()['align'] # crp, cls ...\n else:\n mode, model, align = '', '', ''\n # mean accuracies across seeds for each fold\n foldaccuracies = []\n # we count number of models over folds and seeds\n num_individual_models = 0\n\n for foldname,fold in folds.items():\n if 'Q' in options.mode:\n seedaccurracies = fold.values()[:1] if fold.values() else [] # pick one\n# SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n else:\n seedaccurracies = []\n for seed_acc in fold.values():\n seedaccurracies.append(seed_acc)\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n # aggregate on fold level\n fold['__MEAN__'] = float(np.mean(seedaccurracies))\n fold['__SD__'] = float(np.std(seedaccurracies))\n l = len(seedaccurracies)\n num_individual_models += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__MEAN__')] += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__SD__')] += l\n\n # statistics over seeds for this fold\n fold['__STATS__'] = fold['__MEAN__'], fold['__SD__'], l\n foldaccuracies.append(fold['__MEAN__'])\n # aggregate on (config, condition, decoding) level\n folds['__MEAN__'] = float(np.mean(foldaccuracies))\n folds['__SD__'] = float(np.std(foldaccuracies))\n # statistics over folds for this (config, condition, decoding)\n folds['__STATS__'] = folds['__MEAN__'], folds['__SD__'], num_individual_models",
"def evaluate_model(args, eval_runs, warm_runs, metrics=['psnr', 'ssim', 'fps']):\n upsampler = Upsampler(args)\n if warm_runs > 0:\n print(\"Warming up for evaluation\")\n for i in range(warm_runs):\n print(\"Performing warm-up run\", str(i+1))\n for sequence in ['foliage', 'walk', 'calendar', 'city']:\n bix_dir = os.path.join(VID4_DIR, 'BIx4', sequence)\n upsampler.run_dir(bix_dir, reset=False)\n \n time = 0.\n psnrs = []\n ssims = []\n for i in range(eval_runs):\n run_psnrs = []\n run_ssims = []\n print(\"Performing evaluation run\", str(i+1))\n for sequence in ['foliage', 'walk', 'calendar', 'city']:\n bix_dir = os.path.join(VID4_DIR, 'BIx4', sequence)\n gt_dir = os.path.join(VID4_DIR, 'GT', sequence)\n print(\"Evaluating on\", bix_dir)\n time += upsampler.run_dir(bix_dir, reset=False)\n vid_psnrs, vid_ssims = _eval_sr_perf(os.path.join(bix_dir, 'up'), gt_dir)\n run_psnrs += vid_psnrs\n run_ssims += vid_ssims\n if i == eval_runs-1:\n with open(os.path.join(upsampler.get_model_dir(), \"psnr.txt\"), \"w\") as f:\n f.writelines(str(psnr) + '\\n' for psnr in run_psnrs)\n with open(os.path.join(upsampler.get_model_dir(), \"ssim.txt\"), \"w\") as f:\n f.writelines(str(ssim) + '\\n' for ssim in run_ssims)\n psnrs += run_psnrs\n ssims += run_ssims\n\n fps = VID4_LENGTH/ (time/eval_runs)\n return Performance(psnr=psnrs, ssim=ssims, fps=fps)"
] | [
"0.596659",
"0.5817768",
"0.57848805",
"0.5778032",
"0.5736855",
"0.5722777",
"0.5721804",
"0.56309265",
"0.5599941",
"0.5561327",
"0.5554734",
"0.5542044",
"0.55361605",
"0.55108637",
"0.5486883",
"0.5457525",
"0.544647",
"0.542683",
"0.5396689",
"0.53914917",
"0.5365846",
"0.53630924",
"0.5362303",
"0.53572017",
"0.53560716",
"0.53171176",
"0.5314839",
"0.530668",
"0.5274999",
"0.52711713"
] | 0.58705 | 1 |
Drive the UV plane combination. Functionally, this means Performing concatenation Cleaning the concatenated MS in the UV plane Imaging the concatenated MS | def _drive_uv(param_dict, clargs, output_basename, casa_instance):
script = []
if glob.glob('{}.concat.ms'.format(output_basename)) and clargs.overwrite:
os.system('rm -rf {}.concat.ms'.format(output_basename))
# casa_instance.run_script(script)
# todo
# write an extension of the drivecasa command for imstat, which will let
# us do the imstat work to do the inference for clean params.
# perform concatenation
if not glob.glob('{}.concat.ms'):
concat_vis = drivecasa.commands.reduction.concat(script,
[
param_dict[
'twelve_meter_filename'],
param_dict[
'seven_meter_filename']
],
out_path='./{}.concat.ms'.
format(output_basename))
# clean + image
thresh, clean_args = utils.param_dict_to_clean_input(
param_dict, seven_meter=False)
clean_args.update(
{'spw': str(param_dict['seven_meter_spw'] + ',' + param_dict['twelve_meter_spw'])})
clean_image = drivecasa.commands.clean(
script,
concat_vis,
niter=10000,
threshold_in_jy=thresh,
other_clean_args=clean_args)
if param_dict['moments']:
for moment in param_dict['moments']:
_ = additional_casa_commands.immoments(
script, clean_image.image, clean_image.image, moment)
if clargs.verbose:
utils.eprint(script)
if not clargs.generate:
_ = casa_instance.run_script(script, timeout=None)
if clargs.generate:
utils.output_to_file(script, output_basename)
if clargs.verbose:
utils.eprint("Data products present in {}".format(clean_image)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply_uvs(mesh, bsp_verts):\n\n mesh.uv_textures.new(\"UVs\")\n bm = bmesh.new()\n bm.from_mesh(mesh)\n\n if hasattr(bm.faces, \"ensure_lookup_table\"): \n bm.faces.ensure_lookup_table()\n\n uv_layer = bm.loops.layers.uv[0]\n\n for face_idx, current_face in enumerate(bm.faces):\n current_face.loops[0][uv_layer].uv = bsp_verts[current_face.loops[0].vert.index][1]\n current_face.loops[1][uv_layer].uv = bsp_verts[current_face.loops[1].vert.index][1]\n current_face.loops[2][uv_layer].uv = bsp_verts[current_face.loops[2].vert.index][1]\n \n bm.to_mesh(mesh)",
"def voxelize4(self, materials):\n\t\tlayers = list()\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\t\n\t\tlayerMaterial = list()\n\t\tself.volumeComposition = list()\n\t\tfor l in range(len(materials)):\n\t\t\tlayerMaterial.append(list())\n\t\t\tself.volumeComposition.append(list())\n\n\t\tvolumeGeneral = list()\n\t\tm = 0\n\t\tfor i in self.slicePoints:\n\t\t\t#print self.boolResult[m].shape\n\t\t\ttupleResultR = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleMaterial = list()\n\t\t\tfor l in range(len(materials)):\n\t\t\t\ttupleMaterial.append(numpy.zeros(self.boolLayers[m].shape, dtype=float))\n\t\t\t\n\t\t\tj = numpy.nditer(self.boolLayers[m], flags=['multi_index'], op_flags=['readwrite'])\n\t\t\twhile not j.finished:\n\t\t\t\tif j[0] == True:\n\t\t\t\t#tupleResult[j.multi_index] = round((i[direction] - minValue) * ratio)\n\t\t\t\t#tupleResult[j.multi_index] = 78\n\t\t\t\t\tprint type(j.multi_index)\n\t\t\t\t\tprint j.multi_index\n\t\t\t\t\t#tupleResult[j.multi_index] = planeWeight * math.fabs((j.multi_index[1] - planeOrigin[0]) * planeNormal[0] + (j.multi_index[0] - planeOrigin[1]) * planeNormal[1] + (i[2] - planeOrigin[2]) * planeNormal[2]) + pointWeight * math.sqrt(math.pow((j.multi_index[1]- pointValue[0]),2) + math.pow((j.multi_index[0] - pointValue[1]), 2)+math.pow((i[2] - pointValue[2]),2))\n\t\t\t\t\t\n\t\t\t\t\tdistanceList = []\n\t\t\t\t\ttotalDistance = 0.0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Plane\":\n\t\t\t\t\t\t\tGplane = math.fabs((j.multi_index[1] - materials[k].origin[0]) * materials[k].normal[0] + (j.multi_index[0] - materials[k].origin[1]) * materials[k].normal[1] + (i[2] - materials[k].origin[2]) * materials[k].normal[2])\n\t\t\t\t\t\t\tdistanceList.append(Gplane)\n\t\t\t\t\t\t\ttotalDistance += Gplane\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Point\":\n\t\t\t\t\t\t\tGpoint = (math.sqrt(math.pow((j.multi_index[1]- materials[k].point[0]),2) + math.pow((j.multi_index[0] - materials[k].point[1]), 2)+math.pow((i[2] - materials[k].point[2]),2)))\n\t\t\t\t\t\t\tdistanceList.append(Gpoint)\n\t\t\t\t\t\t\ttotalDistance += Gpoint\n\t\t\t\t\tfor k in range(len(distanceList)):\n\t\t\t\t\t\tdistanceList[k] = distanceList[k] / totalDistance\n\t\t\t\t\t\tdistanceList[k] = 1.0 - distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleResultR[j.multi_index] += materials[k].materialColor[0] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultG[j.multi_index] += materials[k].materialColor[1] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultB[j.multi_index] += materials[k].materialColor[2] * distanceList[k] * materials[k].weight\n\t\t\t\t\t#if(tupleResult[j.multi_index] > 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(tupleResult[j.multi_index]) \n\t\t\t\t\t#if(tupleResult[j.multi_index] == 0):\n\t\t\t\t\t#\t\ttupleResult[j.multi_index] = 1\n\t\t\t\t\t#if(tupleResult[j.multi_index] < 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(0 - tupleResult[j.multi_index]) \n\t\t\t\telse:\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = 0.0\n\t\t\t\tj.iternext()\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\t\tfor k in range(len(materials)):\n\t\t\t\tlayerMaterial[k].append(tupleMaterial[k])\n\t\t\t\t\n\t\t\tm = m + 1\n\t\tprint \"i got here\"\n\t\tvolumeR=numpy.array(layersR) # create the 3d volume\n\t\tvolumeG=numpy.array(layersG) \n\t\tvolumeB=numpy.array(layersB)\n\t\tfor k in range(len(materials)):\n\t\t\tself.volumeComposition[k] = numpy.array(layerMaterial[k])\n\t\t\n\t\tvolumeGeneral.append(volumeR)\n\t\tvolumeGeneral.append(volumeG)\n\t\tvolumeGeneral.append(volumeB)\n\t\treturn volumeGeneral",
"def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]",
"def CreateBiPennate1():\r\n \r\n print('Opening Data...')\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Rotating Vectors...')\r\n \r\n # Rotate Vectors\r\n RotVectors1 = np.zeros((Vectors1.shape[0],3))\r\n\r\n idxpos = np.argwhere(Centroids1[:,1] >= 0)\r\n idxpos = idxpos.flatten()\r\n idxneg = np.argwhere(Centroids1[:,1] < 0)\r\n idxneg = idxneg.flatten()\r\n\r\n PosVectors = RotationTransform(Vectors1[idxpos,:],degZ = 30)\r\n NegVectors = RotationTransform(Vectors1[idxneg,:],degZ = -30)\r\n RotVectors1[idxpos,:] = PosVectors[:,:]\r\n RotVectors1[idxneg,:] = NegVectors[:,:]\r\n print('Vectors Rotated \\n Inserting Plane...')\r\n \r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,RotVectors1,50,4)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,1)\r\n print('Interpolation Finished \\n Plotting...')\r\n \r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"New Centroid Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/BiPennateCentralPlaneFibres30.dat\",Vectors2,header = header,comments='')",
"def _prepare_plane(self):\n verticies = [\n # main plane - note that the mainplane is scaled so the mat_plane\n # matrix will it transform to the correct coordinates\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n\n # coord plane\n 0, 0,\n 0, -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], 0,\n 0, 0,\n\n # axes\n 0, -self.o_wh[1], self.o_wh[0], -self.o_wh[1], #x\n 0, 0, 0, -self.o_wh[1], #y\n ]\n\n colors = [\n 1.0, 1.0, 1.0, 1.0, # outer box XXX Remove outer box...\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n .9, .9, .9, 9.0, # plot box\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n 0.0, 0.0, 0.0, 1.0, #lines\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n ]\n\n self._fonts = []\n for u in range(1, self._unit_count[0]+1):\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]+0.02)\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]-0.02)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(u*(self.i_axis[0]/self._unit_count[0])-self.i_origin[0]),\n (self._unit_w[0]*u+self.i_border[0]-0.05)*self._scaling[0],\n (-self.o_wh[1]+(self.i_border[3])*0.5)\n ])\n for u in range(0, self._unit_count[1]):\n verticies.append(0.02)\n verticies.append(-self._unit_w[1]*u)\n verticies.append(-0.02)\n verticies.append(-self._unit_w[1]*u)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(self.i_axis[1]-u*self.i_axis[1]/self._unit_count[1]-self.i_origin[1]),\n (0.025)*self._scaling[0],\n (-(self._unit_w[1])*u-self.i_border[1]+0.01)*self._scaling[1]\n ])\n\n self._draw_plane_indicies = (0, 12)\n self._draw_line_indicies = (12, 4+self._unit_count[0]*2+self._unit_count[1]*2)\n\n # convert data into valid data format\n verticies = numpy.array(verticies, dtype=numpy.float32)\n colors = numpy.array(colors, dtype=numpy.float32)\n\n self._plane_vao = util.VAO()\n self._plane_vbo = util.VBO(2)\n\n with self._plane_vao:\n # plane verticies\n with self._plane_vbo.get(0):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(verticies), verticies, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_position'), 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n # place vertex colors\n with self._plane_vbo.get(1):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(colors), colors, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_color'), 4, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(1)",
"def CreateBiPennate2():\r\n \r\n print('Opening Data...')\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n Vectors1 = LongaxisOrtho(Vectors1)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/Project_Gastro/workflows/Cesim/musc_mod_v2/OutputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Rotating Vectors...')\r\n \r\n # Rotate Vectors\r\n RotVectors1 = np.zeros((np.shape(Vectors1)[0],3))\r\n\r\n idxpos = np.argwhere(Centroids1[:,1] >= 0)\r\n idxpos = idxpos.flatten()\r\n idxneg = np.argwhere(Centroids1[:,1] < 0)\r\n idxneg = idxneg.flatten()\r\n\r\n PosVectors = RotationTransform(Vectors1[idxpos,:],degZ = -30)\r\n NegVectors = RotationTransform(Vectors1[idxneg,:],degZ = 30)\r\n RotVectors1[idxpos,:] = PosVectors[:,:]\r\n RotVectors1[idxneg,:] = NegVectors[:,:]\r\n print('Vectors Rotated \\n Inserting Plane...')\r\n \r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,RotVectors1,50,4)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.1)\r\n print('Interpolation Finished \\n Plotting...')\r\n \r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(211,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,1,1)\r\n\r\n ax2 = fig.add_subplot(212,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,1,1)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"New Centroid Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/BiPennateCentralPlaneFibres.dat\",Vectors2,header = header,comments='')",
"def InterpolateSurfaceVectorsWithPlane():\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Inserting Plane...')\r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,Vectors1,50,8)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.5)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.5)\r\n print('Interpolation Finished \\n Plotting...')\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors With Central axis Plane\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/SurfacePlaneVectorInterpolation.dat\",Vectors2,header = header,comments='')",
"def untangleUV(*args, mapBorder: AnyStr=\"\", maxRelaxIterations: int=0, pinBorder: bool=True,\n pinSelected: bool=True, pinUnselected: bool=True, relax: AnyStr=\"\",\n relaxTolerance: float=0.0, shapeDetail: float=0.0, **kwargs)->int:\n pass",
"def planeSliceTOAFig(uxmax, uymax, dso, dsl, f, dm, m, n, ax, ay, npoints, xax = True, yax = True):\n \n # Calculate coefficients\n rF2 = rFsqr(dso, dsl, f)\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n lc = lensc(dm, f)\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n tg0 = tg0coeff(dso, dsl)\n tdm0 = tdm0coeff(dm, f)\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args=(alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n # print(upcross)\n \n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n \n cdist = uxmax/(np.abs(50*lc))\n \n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones, dtype = int)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff)))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n ncomplex = np.zeros(nzones) # don't care about complex solutions in this case\n print(nreal)\n \n # Find roots\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate TOAs\n alltoas = []\n for i in range(nzones):\n toas = obsCalc(deltat, allroots[i], int(nreal[i]), npoints, 1, args = (tg0, tdm0, alp, ax, ay)).real\n alltoas.append(toas)\n \n # Plots\n fig, ax1 = plt.subplots(figsize=(10, 8), dpi = 100)\n # grid = gs.GridSpec(2, 2, width_ratios=[4, 1])\n # ax0 = plt.subplot(grid[1:, 1])\n # ax1 = plt.subplot(grid[0, 1])\n \n \n # ax2 = plt.subplot(grid[:, 0]) # Plot results\n colors = assignColor(allroots, nreal)\n l = []\n for i in range(len(upxvecs)):\n zone = alltoas[i]\n for j in range(len(zone)):\n line = ax1.plot(upxvecs[i], zone[j], color = colors[i][j], lw = 3.)\n l.append(line)\n for i in range(ncross):\n ax1.plot([upcross[i][0], upcross[i][0]], [-100, 100], color = 'black', ls = 'dashed', scaley = False, scalex = False, lw = 2.5)\n label = r'$\\nu = $' + str(f/GHz) + ' GHz'\n ax1.text(0.05, 0.9, label, transform=ax1.transAxes, fontsize = 28, bbox=dict(facecolor = 'white', alpha=1.))\n # ax1.set_ylim(min(alltoas.flatten() - 1), max(alltoas.flatten() + 1))\n if not xax:\n ax1.xaxis.set_ticklabels([])\n else:\n ax1.set_xlabel(r\"$u'_x$\", fontsize=28)\n if not yax:\n ax1.yaxis.set_ticklabels([])\n else:\n ax1.set_ylabel(r'$\\Delta t \\: (\\mu s)$', fontsize=28)\n if dm > 0:\n ax1.set_ylim(-0.5, 15.)\n else:\n ax1.set_ylim(-2.5, 10.)\n ax1.tick_params(labelsize = 22)\n ax1.grid()\n \n ax2 = inset_axes(ax1, width='18%', height='23%', loc=1)\n rx = np.linspace(-uxmax, uxmax, 1000) # Plot caustic surfaces\n ry = np.linspace(-uxmax, uxmax, 1000)\n uvec = np.meshgrid(rx, ry)\n ucaus = causCurve(uvec, coeff)\n cs = ax2.contour(rx, ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax2.plot(upx, upy, color = 'blue')\n ax2.plot(np.linspace(xmin, xmax, 10), np.linspace(ymin, ymax, 10), color = 'green')\n ax2.scatter(upcross.T[0], upcross.T[1], color = 'green')\n # ax2.set_xlabel(r\"$u'_x$\")\n # ax2.set_ylabel(r\"$u'_y$\")\n ax2.set_xlim(-uxmax, uxmax)\n ax2.tick_params(labelsize = 16)\n # ax1.set_title(\"Caustic curves\")\n # ax1.set_aspect('equal', anchor = 'N')\n ax2.grid()\n # ax2.tight_layout()\n \n plt.tight_layout()\n plt.show()\n return",
"def uvregister(self,v):\n return self.get('patchmesh.uvvertices').intern(v)",
"def uvmap(self, p):\n # bottom left corner of the plane\n p00 = self.position - (self.sx * self.n0) / 2 - (self.sy * self.n1) / 2\n dif_vector = p - p00\n u = np.dot(dif_vector, self.n0) / self.sx\n v = np.dot(dif_vector, self.n1) / self.sy\n return u, v",
"def add_subdivision(self):\n temp_sub_vertices = []\n for plane in (self.subdivision_list):\n current_mids = []\n mid_m_01 = Vec3d(0, 0, 0, 0)\n mid_m_12 = Vec3d(0, 0, 0, 0)\n mid_m_20 = Vec3d(0, 0, 0, 0)\n\n mid_m_01.x = (plane[0].x + plane[1].x) / 2\n mid_m_01.y = (plane[0].y + plane[1].y) / 2\n mid_m_01.z = (plane[0].z + plane[1].z) / 2\n mid_m_01.w = plane[0].w\n\n mid_m_12.x = (plane[1].x + plane[2].x) / 2\n mid_m_12.y = (plane[1].y + plane[2].y) / 2\n mid_m_12.z = (plane[1].z + plane[2].z) / 2\n mid_m_12.w = plane[1].w\n\n mid_m_20.x = (plane[2].x + plane[0].x) / 2\n mid_m_20.y = (plane[2].y + plane[0].y) / 2\n mid_m_20.z = (plane[2].z + plane[0].z) / 2\n mid_m_20.w = plane[2].w\n\n current_mids = [mid_m_01, mid_m_12, mid_m_20]\n temp_sub_vertices.append(current_mids)\n\n for index in range(len(current_mids)):\n v0 = Vec3d(0, 0, 0, 0)\n v1 = Vec3d(0, 0, 0, 0)\n v2 = Vec3d(0, 0, 0, 0)\n\n v0.x = plane[index].x\n v0.y = plane[index].y\n v0.z = plane[index].z\n\n v1.x = current_mids[index].x\n v1.y = current_mids[index].y\n v1.z = current_mids[index].z\n\n v2.x = current_mids[index - 1].x\n v2.y = current_mids[index - 1].y\n v2.z = current_mids[index - 1].z\n\n temp_sub_vertices.append([v0, v1, v2])\n\n self.subdivision_list = temp_sub_vertices",
"def test_set_vx_to_vx_plus_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8004 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_plus_vy()\n value = v1 + v2\n if value > 0xFF:\n assert(cpu.V_register[0xF] == 1)\n assert(cpu.V_register[x] == value & 0xFF)\n else:\n assert(cpu.V_register[0xF] == 0)\n assert(cpu.V_register[x] == value)",
"def blackbodyUV(temp):\n lam=lambda wl: planckian(temp, wl)\n xyz=spectrumToTristim(perfectrefl, lam)\n uvy=xyzTouvY(xyz)\n return [uvy[0], uvy[1]*2.0/3]",
"def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v",
"def levelsets_to_vector_field(levelsets, stepsize):\r\n vector_field_shape = levelsets[0][0].shape\r\n y_comp_combined = np.ndarray(vector_field_shape)\r\n x_comp_combined = np.ndarray(vector_field_shape)\r\n y_comp_combined.fill(np.nan)\r\n x_comp_combined.fill(np.nan)\r\n\r\n for source, target in levelsets:\r\n labels_present = set(np.array([source.flatten(),target.flatten()]).flatten())\r\n labels_present.remove(0)#relates to background\r\n\r\n #print(labels_present)\r\n for l in labels_present:\r\n\r\n source_cluster = source == l\r\n target_cluster = target == l\r\n\r\n\r\n \"\"\"plt.imshow(source_cluster.astype(np.int32)+target_cluster.astype(np.int32))\r\n plt.show()\r\n print(\"-----------\")\"\"\"\r\n\r\n #plot_gradient_field(source_cluster.astype(np.int32), target_cluster.astype(np.int32))\r\n\r\n y_comp, x_comp = array_to_vector_field(source_cluster, target_cluster, stepsize=stepsize)\r\n y_comp_combined[~np.isnan(y_comp)] = y_comp[~np.isnan(y_comp)]\r\n x_comp_combined[~np.isnan(x_comp)] = x_comp[~np.isnan(x_comp)]\r\n return y_comp_combined, x_comp_combined",
"def uvregister(self,v):\n return self.get('mesh.uvvertices').intern(v)",
"def vector_arrows(Out, x, y, z, plot_layer):\n\n x = sort_dim(x)\n y = sort_dim(y)\n z = sort_dim(z)\n\n # length of array in each dimension\n Ny = len(y)-1\n Nx = len(x)-1\n Nz = len(z)-1\n\n # coordinates of cell centres\n # (halfway between L and R edges)\n xm = 0.5 * (x[:-1] + x[1:])\n ym = 0.5 * (y[:-1] + y[1:])\n zm = 0.5 * (z[:-1] + z[1:])\n\n # create empty arrays for output\n U = np.zeros((len(Out.Qx[:,0,0,0]),len(Out.Qx[0,:,0,0]),len(Out.Qx[0,0,:,0]),len(Out.Qx[0,0,0,:])+1)) \n V = np.zeros((len(Out.Qy[:,0,0,0]),len(Out.Qy[0,:,0,0]),len(Out.Qy[0,0,:,0])+1,len(Out.Qy[0,0,0,:])))\n W = np.zeros((len(Out.Qz[:,0,0,0]),len(Out.Qz[0,:,0,0])+1,len(Out.Qz[0,0,:,0]),len(Out.Qz[0,0,0,:])))\n\n # create mesh\n X, Y, = np.meshgrid(xm, ym) # coordinates of cell centers\n Z = np.meshgrid(zm)\n\n # iterate through timesteps\n for t in range(len(Out.Qy[:,0,0,0])): # number of timesteps\n\n #grab relevant timestep from Out array\n Qx = Out.Qx[t,:,:,:]\n Qy = Out.Qy[t,:,:,:]\n Qz = Out.Qz[t,:,:,:]\n\n # Calculate flows at cell centers by interpolating between L and R faces\n Ut = np.concatenate((Qx[plot_layer, :, 0].reshape((1, Ny, 1)), \\\n 0.5 * (Qx[plot_layer, :, :-1].reshape((1, Ny, Nx-2)) +\\\n Qx[plot_layer, :, 1: ].reshape((1, Ny, Nx-2))), \\\n Qx[plot_layer, :, -1].reshape((1, Ny, 1))), axis=2).reshape((Ny,Nx))\n\n Vt = np.concatenate((Qy[plot_layer, 0, :].reshape((1, 1, Nx)), \\\n 0.5 * (Qy[plot_layer, :-1, :].reshape((1, Ny-2, Nx)) +\\\n Qy[plot_layer, 1:, :].reshape((1, Ny-2, Nx))), \\\n Qy[plot_layer, -1, :].reshape((1, 1, Nx))), axis=1).reshape((Ny,Nx))\n\n # average flow across vertical cell to get z flow at cell centre\n QzTop = Qz[0:-1,:,:]\n QzBot = Qz[1:,:,:]\n Wt = (QzTop+QzBot)/2\n \n # add results to output arrays\n U[t,:,:,:] = Ut\n V[t,:,:,:] = Vt\n W[t,1:-1,:,:] = Wt\n\n return X,Y,Z,U,V,W",
"def scale_uv(self):\n self.u = [i * self.scale * self.scaleratio for i in self.u]\n self.v = [i * self.scale for i in self.v]",
"def render(self, scene):\n if self.degenerate:\n return\n # The number of subdivisions around the hoop's radial direction.\n if self.thickness:\n band_coverage = scene.pixel_coverage(self.pos, self.thickness)\n else:\n band_coverage = scene.pixel_coverage(self.pos, self.radius * 0.1)\n if band_coverage < 0:\n band_coverage = 1000\n bands = sqrt(band_coverage * 4.0)\n bands = clamp(4, bands, 40)\n # The number of subdivisions around the hoop's tangential direction.\n ring_coverage = scene.pixel_coverage(self.pos, self.radius)\n if ring_coverage < 0:\n ring_coverage = 1000\n rings = sqrt(ring_coverage * 4.0)\n rings = clamp(4, rings, 80)\n slices = int(rings)\n inner_slices = int(bands)\n radius = self.radius\n inner_radius = self.thickness\n\n # Create the vertex and normal arrays.\n vertices = []\n normals = []\n\n outer_angle_step = 2 * pi / (slices - 1)\n inner_angle_step = 2 * pi / (inner_slices - 1)\n outer_angle = 0.\n for i in range(slices):\n cos_outer_angle = cos(outer_angle)\n sin_outer_angle = sin(outer_angle)\n inner_angle = 0.\n for j in range(inner_slices):\n cos_inner_angle = cos(inner_angle)\n sin_inner_angle = sin(inner_angle)\n\n diameter = (radius + inner_radius * cos_inner_angle)\n vertex_x = diameter * cos_outer_angle\n vertex_y = diameter * sin_outer_angle\n vertex_z = inner_radius * sin_inner_angle\n\n normal_x = cos_outer_angle * cos_inner_angle\n normal_y = sin_outer_angle * cos_inner_angle\n normal_z = sin_inner_angle\n\n vertices.extend([vertex_x, vertex_y, vertex_z])\n normals.extend([normal_x, normal_y, normal_z])\n inner_angle += inner_angle_step\n outer_angle += outer_angle_step\n\n # Create ctypes arrays of the lists\n vertices = (gl.GLfloat *len(vertices))(*vertices)\n normals = (gl.GLfloat * len(normals))(*normals)\n\n # Create a list of triangle indices.\n indices = []\n for i in range(slices - 1):\n for j in range(inner_slices - 1):\n pos = i * inner_slices + j\n indices.extend([pos, pos + inner_slices, pos + inner_slices +\n 1])\n indices.extend([pos, pos + inner_slices + 1, pos + 1])\n indices = (gl.GLuint * len(indices))(*indices)\n\n # Compile a display list\n self.list = gl.glGenLists(1)\n gl.glNewList(self.list, gl.GL_COMPILE)\n self.color.gl_set(self.opacity)\n\n gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT)\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n gl.glEnableClientState(gl.GL_NORMAL_ARRAY)\n self.model_world_transform(scene.gcf,\n Vector([self.radius, self.radius,\n self.radius])).gl_mult()\n\n gl.glVertexPointer(3, gl.GL_FLOAT, 0, vertices)\n gl.glNormalPointer(gl.GL_FLOAT, 0, normals)\n gl.glDrawElements(gl.GL_TRIANGLES, len(indices), gl.GL_UNSIGNED_INT,\n indices)\n gl.glPopClientAttrib()\n\n gl.glEndList()\n gl.glCallList(self.list)",
"def planeSliceGFig2(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 3000, gsizex = 2048, gsizey = 2048, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n xx = np.linspace(gridToPixel(xmin, uxmax, gsizex/2), gridToPixel(xmax, uxmax, gsizex/2) - 1, gsizex)\n yy = np.linspace(gridToPixel(ymin, uymax, gsizey/2), gridToPixel(ymax, uymax, gsizey/2) - 1, gsizey)\n\n cdist = uxmax/(np.abs(100*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones, dtype = int)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = int(len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000)))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n if comp == True:\n diff = difference(nreal) # determine number of complex solutions\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 1, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n \n fogain = np.zeros([nzones, npoints])\n zogain = np.zeros([nzones, npoints])\n for i in range(nzones):\n nroots = nreal[i]\n if nroots == 1:\n fogain[i] = np.abs(allfields[i])**2\n zogain[i] = np.abs(allfields[i])**2\n else:\n fogain[i] = np.abs(np.sum(allfields[i], axis = 0))**2\n zog = 0\n for j in range(nroots):\n zog = zog + np.abs(allfields[i][j])**2\n zogain[i] = zog\n \n fogain = fogain.flatten()\n zogain = zogain.flatten()\n\n # Construct uniform asymptotics\n # asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n # interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n # finx = np.linspace(xmin, xmax, 4*npoints)\n # asymG = interp(finx)\n\n # KDI\n rx = np.linspace(-2*uxmax, 2*uxmax, gsizex)\n ry = np.linspace(-2*uymax, 2*uymax, gsizey)\n dux = 4*uxmax/gsizex\n duy = 4*uymax/gsizey\n extent = (-uxmax, uxmax, -uymax, uymax)\n ux, uy = np.meshgrid(rx, ry)\n lens = lensPhase(ux, uy, lc)\n lensfft = fft2(lens)\n geo = geoPhase(ux, uy, uF2x, uF2y)\n geofft = fft2(geo)\n fieldfft = lensfft*geofft\n field = fftshift(ifft2(fieldfft))\n soln = np.abs((dux*duy*field)**2/(4*pi**2*uF2x*uF2y))\n soln = soln[int(0.25*gsizex):int(0.75*gsizex), int(0.25*gsizey):int(0.75*gsizey)]\n\n # Plots\n fig = plt.figure(figsize = (15, 6), dpi = 100)\n grid = gs.GridSpec(2, 2)\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n ax0, ax1 = plt.subplot(grid[:, 0]), plt.subplot(grid[0, 1])\n # ax0, ax2 = plt.subplot(grid[0]), plt.subplot(grid[1])\n ax2 = plt.subplot(grid[1, 1], sharex=ax1)\n\n rx = np.linspace(-uxmax, uxmax, gsizex)\n ry = np.linspace(-uymax, uymax, gsizey)\n ux, uy = np.meshgrid(rx, ry)\n\n rx2 = np.linspace(xmin, xmax, gsizex)\n im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n cbar = fig.colorbar(im0, ax = ax0)\n cbar.set_label('G', fontsize = 18)\n cbar.ax.tick_params(labelsize=14)\n ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n ax0.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax0.set_ylim([-uymax, uymax])\n ax0.set_xlim([-uxmax, uxmax])\n ax0.set_ylabel(r\"$u'_y$\", fontsize = 18)\n ax0.tick_params(labelsize = 14)\n # ax0.set_title(\"Gain in the u' plane\")\n\n G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n G = G - G[-1] + 1\n ax1.plot(rx2, G, color = 'blue', label = \"FFT gain\", linewidth = 1.)\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n xaxis = upxvecs.flatten()\n ax1.plot(xaxis, zogain, color = 'red', label = r'$0^{th}$ order GO gain')\n ax1.set_ylim(-cdist, np.max(G) + 1.)\n ax1.set_xlim(np.min(rx2), np.max(rx2))\n # ax1.set_xlabel(r\"$u'_x$\")\n ax1.set_ylabel('G', fontsize = 18)\n ax1.legend(loc = 1, fontsize = 12)\n ax1.tick_params(labelsize = 14)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n \n # Plot gain along observer motion\n ax2.plot(rx2, G, color='blue', label=\"FFT gain\", linewidth=1.)\n for caus in upcross.T[0]:\n ax2.plot([caus, caus], [-10, 1000], ls='dashed', color='black')\n ax2.plot(xaxis, fogain, color='orange', label=r'$1^{st}$ order GO gain')\n ax2.set_ylim(-cdist, np.max(G) + 1.)\n ax2.set_xlim(np.min(rx2), np.max(rx2))\n ax2.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax2.set_ylabel('G', fontsize = 18)\n ax2.legend(loc = 1, fontsize = 12)\n # ax1.set_title(\"Slice Gain\")\n ax2.tick_params(labelsize = 14)\n ax2.grid()\n grid.tight_layout(fig)\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n plt.show()\n return",
"def check_uv_border_crossing(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n # skip if this is a representation\n v = staging.get(\"version\")\n if v and Representation.repr_separator in v.take_name:\n progress_controller.complete()\n return\n\n all_meshes = pm.ls(type=\"mesh\")\n mesh_count = len(all_meshes)\n progress_controller.maximum = mesh_count\n nodes_with_uvs_crossing_borders = []\n\n for node in all_meshes:\n all_uvs = node.getUVs()\n # before doing anything get all the uvs and skip if all of them are\n # in the same UV quadrant (which is the wrong name, sorry!)\n all_uvs_u = sorted(all_uvs[0])\n all_uvs_v = sorted(all_uvs[1])\n if int(all_uvs_u[0]) == int(all_uvs_u[-1]) and int(all_uvs_v[0]) == int(\n all_uvs_v[-1]\n ):\n # skip this mesh\n continue\n\n #\n # Group uvs according to their UV shells\n #\n # The following method is 20-25% faster than using getUvShellsIds()\n #\n num_uvs = node.numUVs()\n uv_ids = list(range(num_uvs))\n\n uv_shells_and_uv_ids = []\n uv_shells_and_uv_coords = []\n\n i = 0\n while len(uv_ids) and i < num_uvs + 1:\n current_uv_id = uv_ids[0]\n # the polyListComponentConversion takes 85% of the processing time\n # of getting the uvShells here\n shell_uv_group_ids = pm.polyListComponentConversion(\n \"%s.map[%s]\" % (node.name(), current_uv_id), toUV=1, uvShell=1\n )\n\n uv_shell_uv_ids = []\n uv_shell_uv_coords = [[], []]\n for uv_group_ids in shell_uv_group_ids:\n if \":\" in uv_group_ids:\n splits = uv_group_ids.split(\":\")\n start_uv_id = int(splits[0].split(\"[\")[1])\n end_uv_id = int(splits[1].split(\"]\")[0])\n else:\n splits = uv_group_ids.split(\"[\")\n start_uv_id = int(splits[1].split(\"]\")[0])\n end_uv_id = start_uv_id\n\n for j in range(start_uv_id, end_uv_id + 1):\n uv_ids.remove(j)\n uv_shell_uv_ids.append(j)\n uv_shell_uv_coords[0].append(all_uvs[0][j])\n uv_shell_uv_coords[1].append(all_uvs[1][j])\n\n # store the uv ids and uv coords in this shell\n uv_shells_and_uv_ids.append(uv_shell_uv_ids)\n uv_shells_and_uv_coords.append(uv_shell_uv_coords)\n\n # go to the next pseudo uv shell id\n i += 1\n\n # now check all uvs per shell\n try:\n for uv_shell_uv_coords in uv_shells_and_uv_coords:\n us = sorted(uv_shell_uv_coords[0])\n vs = sorted(uv_shell_uv_coords[1])\n\n # check first and last u and v values\n if int(us[0]) != int(us[-1]) or int(vs[0]) != int(vs[-1]):\n # they are not equal it is crossing spaces\n nodes_with_uvs_crossing_borders.append(node)\n break\n except (IndexError, RuntimeError) as e:\n print(\"%s\\nnode: %s\" % (e, node))\n raise RuntimeError()\n\n progress_controller.increment()\n\n progress_controller.complete()\n if len(nodes_with_uvs_crossing_borders):\n # get transform nodes\n tra_nodes = list(map(lambda x: x.getParent(), nodes_with_uvs_crossing_borders))\n pm.select(tra_nodes)\n raise RuntimeError(\n \"\"\"There are nodes with <b>UV-Shells</b> that are crossing\n <b>UV BORDERS</b>:<br><br>%s\"\"\"\n % \"<br>\".join(map(lambda x: x.name(), tra_nodes[:MAX_NODE_DISPLAY]))\n )",
"def clean_line_with_uvt_mask(cfg, spw, fullcube=False, parallel=True):\n log_post(':: Running clean with uv-taper ({0}, {1})'.format(targ.name, spw.name))\n imagename = cfg.get_basename(spw, ext='smask')\n maskname = cfg.get_basename(spw, ext='uvtaper') + '.image.smask'\n spw_id = cfg.get_spw_id(spw)\n # restart parameters\n if not fullcube and spw.line_win != -1:\n start = spw.nchan // 2 - spw.line_win\n nchan = spw.line_win * 2\n else:\n start = -1\n nchan = -1\n delete_all_extensions(imagename)\n tclean(\n vis=cfg.vis,\n imagename=imagename,\n field=targ.name,\n spw=spw_id,\n specmode='cube',\n outframe='lsrk',\n veltype='radio',\n restfreq=spw.restfreq,\n start=start,\n nchan=nchan,\n imsize=cfg.array_config.imsize,\n cell=cfg.array_config.cell,\n # gridder parameters\n gridder='standard',\n # deconvolver parameters\n deconvolver='multiscale',\n scales=[0, 5, 10], # point, 1, 2 beam hpbw's\n smallscalebias=0.6,\n restoringbeam='common',\n weighting='briggs',\n robust=2.0,\n niter=int(1e6),\n nsigma=2.0,\n interactive=False,\n parallel=parallel,\n # mask from smoothed uv-taper\n usemask='user',\n mask=maskname,\n verbose=True,\n )\n workdir = '{0}.workdirectory'.format(imagename)\n if os.path.exists(workdir):\n shutil.rmtree(workdir)",
"def CreateLandmask(Fieldset, test = False):\n \n \n \"\"\"\n This first set of lines creates a numpy array with u velocities and a numpy\n array with v velocities. First we get the U and V fields from the dataset. Then\n we compute a time chunk, which is needed because of the dataset. Then we only\n take the first slice of the U and V field (we do not need more for finding the land\n and ocean grids). As last we make an empty array which will be filled with zeros and \n ones.\n \"\"\"\n fU = Fieldset.U\n fV = Fieldset.V\n Fieldset.computeTimeChunk(fU.grid.time[0], 1) \n uvel_mask_c = fU.data[0,:,:] \n vvel_mask_c = fV.data[0,:,:]\n# vvel_mask_c = np.roll(vvel_mask_c, 1, axis = 0)\n landmask = np.zeros((uvel_mask_c.shape[0], uvel_mask_c.shape[1]))\n \n \"\"\"\n The first loop checks the value of the u and v velocitites. Notice that we get the\n values of two adjacent grid, since we're working with a C-grid.\n Visualizations of velocities in the C-grids(see below). So for a grid to be flagged identified\n as a land grid two U velocities and 2 V velocities need to be zero. The first loop makes all\n ocean grids 1 and land grids 0. \n ____ ____ ____ ____\n | V | V | \n | | | \n U T U T U\n | | | \n |____V____|_____V_____| \n \"\"\"\n \n for i in range (len(landmask[:,0])-1):\n for j in range (len(landmask[0,:])-1):\n u1 = uvel_mask_c[i,j]\n\n u2 = uvel_mask_c[i,j+1]\n\n v1 = vvel_mask_c[i,j]\n\n v2 = vvel_mask_c[i+1,j]\n\n if u1 != 0 or u2 != 0 or v1 != 0 or v2 != 0:\n landmask[i,j] = 1\n \n \n \"\"\"\n Change all zero to 1 and rest 0. since we want the land grids to be 1 and ocean\n grids to be 0. \n \"\"\"\n \n landmask = ChangeValues(landmask,0,1) \n \n \"\"\"\n The created landmask needs to be shifted upwards one grid. We will\n use the numpy roll function to do this.\n \"\"\"\n \n if test == True:\n plt.figure()\n plt.imshow(landmask)\n plt.colorbar()\n \n return landmask",
"def test_uv_degrid():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n uv = uv_degrid(max_lambda=1400, nside=20, uvw=uvw, sigma=3, kersize=21, kernel=None)\n\n assert uv.shape == (20, 20)\n assert uv[0, 0] == 0.0",
"def set_vx_to_vx_or_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8001 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_or_vy()\n assert(cpu.V_register[x] == v1 | v2)",
"def test_set_vx_to_vx_or_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8003 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_or_vy()\n assert(cpu.V_register[x] == v1 | v2)",
"def draw_vectors(self):\r\n for v in self.vehicles:\r\n pg.draw.line(self.screen, (0, 255, 0), v.pos, v.pos + v.extent, 1)\r\n pg.draw.circle(self.screen, (0, 255, 0),\r\n (int(v.pos.x + v.extent.x),\r\n int(v.pos.y + v.extent.y)), 30, 1)\r\n start = v.pos + v.extent\r\n end = v.target\r\n d = end - start\r\n if d.length_squared() < 2000:\r\n pg.draw.line(self.screen, (0, 255, 0), start, end, 1)",
"def InterpolateSurfaceVectors():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(Centroids1,Vectors1,Centroids2)\r\n # Make the data sparser to display better.\r\n C1,V1 = SparseData(Centroids1,Vectors1,0.2)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.2)\r\n\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,1,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,1,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/NormalVectorInterpolation.dat\",Vectors2,header = header,comments='')",
"def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)"
] | [
"0.5860706",
"0.5756254",
"0.558495",
"0.55116755",
"0.5500914",
"0.54856575",
"0.5411383",
"0.53948486",
"0.53849983",
"0.5343461",
"0.5337835",
"0.5308709",
"0.53073806",
"0.53056586",
"0.5296829",
"0.5273831",
"0.52715695",
"0.5257271",
"0.52373564",
"0.5233863",
"0.52252555",
"0.5224352",
"0.5209211",
"0.5199574",
"0.5192895",
"0.5185242",
"0.51826525",
"0.51823306",
"0.5169408",
"0.51657516"
] | 0.61613876 | 0 |
Drive the feather combination. Functionally, this means Cleaning the individual ms separately. Imaging the individual ms. Feathering the two together. | def _drive_feather(param_dict, clargs, output_basename, casa_instance):
# todo later -> the imstat stuff
script = []
thresh, seven_meter_clean_args = utils.param_dict_to_clean_input(
param_dict, seven_meter=True)
_, twelve_meter_clean_args = utils.param_dict_to_clean_input(
param_dict, seven_meter=False)
if clargs.verbose:
utils.eprint('Seven meter clean args {}'.format(
seven_meter_clean_args))
utils.eprint('Twelve meter clean args {}'.format(
twelve_meter_clean_args))
utils.eprint('Running individual cleaning...')
seven_meter_cleaned = drivecasa.commands.reduction.clean(
script,
niter=10000,
vis_paths=param_dict['seven_meter_filename'],
threshold_in_jy=thresh,
other_clean_args=seven_meter_clean_args,
out_path=os.path.abspath(output_basename))
twelve_meter_cleaned = drivecasa.commands.reduction.clean(
script,
niter=10000,
vis_paths=param_dict['twelve_meter_filename'],
threshold_in_jy=thresh,
other_clean_args=twelve_meter_clean_args,
out_path=os.path.abspath(output_basename))
if not clargs.generate:
_ = casa_instance.run_script(script, timeout=None)
if clargs.generate:
utils.output_to_file(script, output_basename)
if clargs.verbose:
utils.eprint('Individual cleanings complete. Now feathering.')
script = []
feathered_image = additional_casa_commands.feather(script,
output_basename=output_basename,
highres=twelve_meter_cleaned.image,
lowres=seven_meter_cleaned.image,
weighting=_calc_feather_weighting(param_dict))
if clargs.verbose:
utils.eprint("Feather script")
utils.eprint(script)
if not clargs.generate:
_ = casa_instance.run_script(script, timeout=None)
if clargs.generate:
utils.output_to_file(script, output_basename)
script = []
if param_dict['moments']:
for moment in param_dict['moments']:
_ = additional_casa_commands.immoments(
script, feathered_image, feathered_image, moment)
if clargs.verbose:
utils.eprint("Moments")
utils.eprint(script)
if not clargs.generate:
_ = casa_instance.run_script(script, timeout=None)
if clargs.generate:
utils.output_to_file(script, output_basename)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _removeFX(self):\r\n\t\tnodesToClean = [CONST.FOAM_FLUID_SHAPENODE, CONST.WAKE_FLUID_SHAPENODE, 'fluids_hrc']\r\n\t\tfor eachNode in nodesToClean:\r\n\t\t\ttry:\r\n\t\t\t\tcmds.delete(each)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\tfor eachCache in cmds.ls(type = 'cacheFile'):\r\n\t\t\tcmds.delete(eachCache)",
"def step(self):\n self.world.slosh_oceans()\n self.world.transfer_energy_vertically()\n self.world.transfer_energy_horizontally()\n self.world.absorb_energy_from_core()\n self.world.absorb_energy_from_sun(self.sun)",
"def run(self):\n\n self.steer()\n self.drive()",
"def at_med_filt(self):\n\t fmed = self.fm - nd.median_filter(self.fm, size=self.header['tdurcad']*3)\n\n\t # Shift t-series so first transit is at t = 0 \n\t dt = t0shft(self.t,self.P,self.t0)\n\t tf = self.t + dt\n\t phase = np.mod(tf + 0.25 * self.P, self.P) / self.P - 0.25\n\t tPF = phase * self.P # Phase folded time\n\n\t # bin up the points\n\t for nbpt in [1,5]:\n\t # Return bins of a so that nbpt fit in a transit\n\t nbins = np.round( tPF.ptp()/self.tdur*nbpt ) \n\t bins = np.linspace(tPF.min(),tPF.max(),nbins+1)\n\t fmed = ma.masked_invalid(fmed)\n\t btPF,bfmed = hbinavg(tPF[~fmed.mask],fmed[~fmed.mask],bins)\n \n\t rbmed = np.rec.fromarrays([btPF,bfmed],names=['t','f'])\n\n\t self.add_dset('rbmed%i' % nbpt, rbmed, description='Binned phase-folded, median filtered timeseries, %i points per tdur'% nbpt) \n\n\t self.add_dset('fmed',fmed,description='Median detrended flux')",
"def at_med_filt(self):\n\t fmed = self.fm - nd.median_filter(self.fm, size=self.header['tdurcad']*3)\n\n\t # Shift t-series so first transit is at t = 0 \n\t dt = t0shft(self.t,self.P,self.t0)\n\t tf = self.t + dt\n\t phase = np.mod(tf + 0.25 * self.P, self.P) / self.P - 0.25\n\t tPF = phase * self.P # Phase folded time\n\n\t # bin up the points\n\t for nbpt in [1,5]:\n\t # Return bins of a so that nbpt fit in a transit\n\t nbins = np.round( tPF.ptp()/self.tdur*nbpt ) \n\t bins = np.linspace(tPF.min(),tPF.max(),nbins+1)\n\t fmed = ma.masked_invalid(fmed)\n\t btPF,bfmed = hbinavg(tPF[~fmed.mask],fmed[~fmed.mask],bins)\n \n\t rbmed = np.rec.fromarrays([btPF,bfmed],names=['t','f'])\n\n\t self.add_dset('rbmed%i' % nbpt, rbmed, description='Binned phase-folded, median filtered timeseries, %i points per tdur'% nbpt) \n\n\t self.add_dset('fmed',fmed,description='Median detrended flux')",
"def test_apply_father_wavelet_dirac(self):\n pass",
"def runBrighterFatter():\n RunData([getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/')[0],], out='I800nmlow',\n wavelength='l800l')\n RunData([getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/')[2],], out='I800nmmed',\n wavelength='l800m')\n RunData([getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/')[4],], out='I800nmhigh',\n wavelength='l800h')",
"def applyMorphologicalCleaning(self, image):",
"def stop(self):\n self.stop_aperture()",
"def processFoil(self):\n \n # Split airfoil in upper and lower portions\n self.__airfoilSplit()\n \n # Interpolate\n self.__hinterpolate()",
"def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)",
"def moveFWto(self, filtpos):\n self.askFW(\"3FDE\") # Dumps errors \n self.askFW(\"3FMP \" + str(filtpos))\n self.waitFW()\n self.askFW(\"3FDE\") # Dumps errors \n self.askFW(\"1HDE\") # Dumps errors \n self.askFW(\"2HDE\") # Dumps errors",
"def drive(self, kilometres_driven):\n self.fuel -= (self.litres_per_kilometre * kilometres_driven)",
"def run(self):\n self.coffee_machine.water_tank.decrease_weight(self.coffee_machine.chosen_coffee_data.get('water_weight'))",
"def removeInsignificant(self):\n #TODO make sure this method now works AFTER meanCurves and analyseCures have been run\n \n # Searching for curves that are in the noise\n if len(self.plate.noProtein) > 0:\n thresholdm, i = rh.meanSd([self.originalPlate.wells[x].monoThresh for x in self.plate.noProtein])\n for well in self.originalPlate.wells:\n if not self.originalPlate.wells[well].contents.isControl and well not in self.delCurves:\n if self.originalPlate.wells[well].monoThresh > thresholdm/1.15:\n #self.wells[well].fluorescence = None\n self.delCurves.append(well)\n\n # Searching for curves that have overloaded the sensor\n for well in self.wells:\n if well not in self.delCurves:\n mini = self.wells[well].fluorescence[0]\n maxi = self.wells[well].fluorescence[0]\n\n maxInd = 0\n for i in range(len(self.wells[well].fluorescence)):\n if self.wells[well].fluorescence[i] > maxi:\n maxi = self.wells[well].fluorescence[i]\n maxInd = i\n if self.wells[well].fluorescence[i] < mini:\n mini = self.wells[well].fluorescence[i]\n\n diff = maxi - mini\n\n # A boundry defining how much the points can fluctuate and still be considered flat\n lowFlatBoundry = maxi - 0.005*diff\n\n # Look each way to see how many temperature steps the curve stays flat for\n count = 0\n ind = maxInd - 1\n while ind>=0:\n if self.wells[well].fluorescence[ind] > lowFlatBoundry:\n count += 1\n ind -= 1\n else:\n break\n ind = maxInd+1\n while ind<len(self.wells[well].fluorescence):\n if self.wells[well].fluorescence[ind] > lowFlatBoundry:\n count += 1 \n ind += 1\n else:\n break\n if well not in self.delCurves and count >= 10:\n self.delCurves.append(well) \n return",
"def step(self):\n if self.store_paths:\n leapfrog_steps = self._max_leapfrog_steps\n else:\n leapfrog_steps = torch.ceil(self._max_leapfrog_steps * torch.rand(1)).int()\n self.potential_ = self.get_potential()\n self.metric_ = self.get_metric()\n self.momentum = self.resample_momenta()\n self.hamiltonian_ = self.get_hamiltonian()\n old_hamiltonian = self.hamiltonian_\n if self.shadow:\n if self.max_shadow is not None:\n old_shadow = torch.max(self.shadow_.clone() + self.max_shadow, old_hamiltonian)\n else:\n old_shadow = self.shadow_.clone()\n rejected = False\n for step in range(leapfrog_steps):\n if (self._integrator == 'RMHMC') and (self.lbfgs == False):\n self.momentum, rejected = self.implicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.parameters, rejected = self.implicit_drift()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.momentum, rejected = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n elif self.lbfgs == True:\n self.momentum, rejected = self.lbfgs_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.parameters, rejected = self.lbfgs_drift()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.momentum = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n else:\n self.momentum, rejected = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n self.parameters = self.explicit_drift()\n self.parameters = self.parameters.detach().requires_grad_(True)\n self.momentum, rejected = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if self.store_paths:\n self.paths.append(self.parameters.detach())\n new_hamiltonian = self.get_hamiltonian()\n ratio = old_hamiltonian - new_hamiltonian\n self.hamiltonian_error.append(ratio.detach().unsqueeze(0))\n if self.shadow:\n if self.max_shadow is not None:\n new_shadow = torch.max(self.get_shadow() + self.max_shadow, new_hamiltonian)\n else:\n new_shadow = self.get_shadow()\n shadow_error = old_shadow - new_shadow\n newratio = ratio + shadow_error\n self.shadow_hamiltonian_error.append(newratio.detach().unsqueeze(0))\n ratio = newratio\n\n uniform_rand = torch.rand(1)\n if uniform_rand >= torch.exp(ratio):\n # Reject sample\n rejected = True\n\n if rejected:\n if (len(self.momenta) > 10) and (self.momenta[-1] == self.momenta[-10]).sum().item():\n self.degenerate = True\n self.rejected += 1\n self.momentum = self.momenta[-1]\n self.parameters = self.samples[-1].clone().detach().requires_grad_(True)\n if self.shadow:\n radon_nikodym = torch.exp(old_shadow).unsqueeze(0)\n \n if self.verbose:\n print(\"(Rejected)\", int(self.acceptance_rate() * 100), \"%; Log-ratio: \",\n ratio.detach())\n else:\n self.accepted += 1\n if self.shadow:\n radon_nikodym = torch.exp(new_shadow).unsqueeze(0)\n if self.verbose:\n print(\"(Accepted)\", int(self.acceptance_rate() * 100), \"%; Log-ratio: \",\n ratio.detach())\n self.samples.append(self.parameters.detach())\n self.momenta.append(self.momentum)\n self.hamiltonians.append(self.hamiltonian_.detach())\n self.rands_.append(uniform_rand)\n self.shadows.append(self.shadow_.detach())\n if self.shadow:\n self.radon_nikodym.append(radon_nikodym.detach())\n return None",
"def clean_tod(self,d,ifeed,feed):\n scan_edges = d[f'{self.level2}/Statistics/scan_edges'][...]\n nscans = scan_edges.shape[0]\n\n feed_tod = d[f'{self.level2}/averaged_tod'][ifeed,:,:,:]\n weights = np.zeros(feed_tod.shape)\n mask = np.zeros(feed_tod.shape[-1],dtype=bool)\n az = d['level1/spectrometer/pixel_pointing/pixel_az'][ifeed,:]\n el = d['level1/spectrometer/pixel_pointing/pixel_el'][ifeed,:]\n\n # Statistics for this feed \n medfilt_coefficient = d[f'{self.level2}/Statistics/filter_coefficients'][ifeed,...]\n atmos = d[f'{self.level2}/Statistics/atmos'][ifeed,...]\n atmos_coefficient = d[f'{self.level2}/Statistics/atmos_coefficients'][ifeed,...]\n wnoise_auto = d[f'{self.level2}/Statistics/wnoise_auto'][ifeed,...]\n fnoise_fits = d[f'{self.level2}/Statistics/fnoise_fits'][ifeed,...]\n\n # then the data for each scan\n last = 0\n scan_samples = []\n for iscan,(start,end) in enumerate(scan_edges):\n scan_samples = np.arange(start,end,dtype=int)\n median_filter = d[f'{self.level2}/Statistics/FilterTod_Scan{iscan:02d}'][ifeed,...]\n N = int((end-start))\n end = start+N\n tod = feed_tod[...,start:end]\n mask[start:end] = True\n # Subtract atmospheric fluctuations per channel\n for iband in range(4):\n for ichannel in range(64):\n #if self.channelmask[ifeed,iband,ichannel] == False:\n amdl = Statistics.AtmosGroundModel(atmos[iband,iscan],az[start:end],el[start:end]) *\\\n atmos_coefficient[iband,ichannel,iscan,0]\n if self.median_filter:\n tod[iband,ichannel,:] -= median_filter[iband,:N] * medfilt_coefficient[iband,ichannel,iscan,0]\n if self.atmosphere:\n tod[iband,ichannel,:] -= amdl\n tod[iband,ichannel,:] -= np.nanmedian(tod[iband,ichannel,:])\n\n\n wnoise = wnoise_auto[:,:,iscan,0]\n weights[...,start:end] = 1./wnoise[...,None]**2\n bad = np.isnan(weights) | np.isinf(weights) | ~np.isfinite(feed_tod)\n feed_tod[bad] = 0\n weights[bad] = 0\n\n return feed_tod, weights, mask",
"def stop_aperture(self):\n self.aperture_id = None\n self.mode = \"\"",
"def climb(self):\n print(\"Inside WoodElf.climb\")",
"def masterflat(input_file):\n #set original directory\n original_path = os.getcwd()\n data_path = input_file['data_path']\n save_path = input_file['save_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n flat = glob.glob('flat*.fits')\n print 'Loading flat images \\nTotal of flat files = ',len(flat),'\\nFiles = \\n'\n print flat\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n os.system('cp flat*.fits '+save_path)\n #creating the names of flat with bias subctracted\n bflat = []\n for i in flat:\n bflat.append('B'+i)\n print '\\n Names os flat images with bias subtracted: \\n \\n',bflat\n #change for save_path directory\n os.chdir(save_path)\n #verify if previous superbias exist\n if os.path.isfile('superflat.fits') == True:\n os.system('rm superflat.fits')\n #verify if exits previous bflat*.fits files and remove then.\n for i in bflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n print '\\nCreating superflat .... \\n'\n #create the list of flat images and bflat images\n #flat = string.join(flat,',')\n #bflat = string.join(bflat,',')\n print '\\n Subtracting bias from flat images and creating bflat images.... \\n'\n #iraf.imarith()\n for i in range(len(flat)):\n iraf.imarith(flat[i],'-','superbias.fits',bflat[i])\n #print statistics from bflat*.fits images\n iraf.imstat(bflat[i])\n print '\\n .... done \\n'\n #clean previos flat*.fits files\n print '\\n Clean flat*.fits images .... \\n'\n os.system('rm flat*.fits')\n print '\\n .... done. \\n'\n #normalizing each flat\n print '\\nNormalizing each flat ....\\n'\n #checking if mean from numpy is the same from your bflat images using imstat\n #take the mean of each bflat image\n bflat_mean = np.zeros(len(bflat))\n for i in range(len(bflat)):\n image = fits.getdata(bflat[i])\n image = np.array(image,dtype='Float64')\n bflat_mean[i] = round(np.mean(image))\n image = 0 #clean image allocate to this variable\n print 'The mean of each bflat image, respectivaly ...'\n print bflat_mean\n #creating the names of bflat images after the normalization:\n abflat = []\n for i in bflat:\n abflat.append('A'+i)\n print '\\n Names os bflat images with bias subtracted and normalizad: \\n \\n',abflat\n #verify if exist previous ABflat*.fits images and remove then.\n for i in abflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n for i in range(len(abflat)):\n iraf.imarith(bflat[i],'/',bflat_mean[i],abflat[i])\n print '\\n.... done!\\n'\n # print '\\n Cleaning bflat*.fits images ....\\n'\n # os.system('rm Bflat*.fits')\n print '\\n.... done.\\n'\n print 'Statistics of the abflat*.fits images .... \\n'\n for i in range(len(abflat)):\n iraf.imstat(abflat[i])\n print '\\n Combining abflat images ....\\n'\n\n # ablist = string.join(abflat,',')\n # iraf.imcombine(ablist,'superflat.fits')\n #change how import flat files\n #usning the abflat list of flat files We will create a pandas python dataframe\n ablist = DataFrame(abflat)\n ablist.columns=['flat_files']\n ablist.to_csv('flat_list',index_label=False,index=False,header=False)\n #combine all flat images\n iraf.imcombine('@flat_list','superflat.fits')\n iraf.imstat('superflat.fits')\n print '\\n .... done. \\n'\n # print '\\nCleaning ABflat*.fits images ....\\n'\n # os.system('rm ABflat*.fits')\n print '\\n.... done!'\n #Verify if the image was created:\n output = glob.glob('superflat*.fits')\n if len(output) != 0:\n output = 0\n else:\n output = 1\n #Return to original directory\n os.chdir(original_path)\n #last mensage\n print '\\n MASTERFLAT.FITS created! \\n'\n print '\\n END of Data Reduction for create a masterflat.fits file. \\n'\n #obtain the value of return\n if output == 1:\n print '!!! ERROR/WARNING !!!'\n print 'Check if the superbias was created or if there is more than one superbias image.'\n return output",
"def start_fare(self):\n self.current_fare_distance = 0",
"def _remove_flux_extinction(self):\n self.fluxUnred = self.flux.copy()\n self.fluxErrUnred = self.fluxErr.copy()\n self.fluxRenorm = self.flux.copy()\n self.fluxErrRenorm = self.fluxErr.copy()\n\n # Using negative a_v so that extinction.apply works in reverse and removes the extinction\n if self.mwebv:\n extinctions = extinction.fitzpatrick99(wave=self._good_filter_wave, \\\n a_v=-3.1 * self.mwebv, r_v=3.1, unit='aa')\n\n for i, pb in enumerate(self._good_filters):\n mask = (self.passband == pb)\n\n flux_pb = self.flux[mask]\n fluxerr_pb = self.fluxErr[mask]\n npbobs = len(flux_pb)\n\n if npbobs < 1:\n return\n\n if self.mwebv:\n flux_out = extinction.apply(extinctions[i], flux_pb, inplace=False)\n fluxerr_out = extinction.apply(extinctions[i], fluxerr_pb, inplace=False)\n else:\n flux_out = flux_pb\n fluxerr_out = fluxerr_pb\n self.fluxUnred[mask] = flux_out\n self.fluxErrUnred[mask] = fluxerr_out\n\n if npbobs > 1:\n # there's at least enough observations to find minimum and maximum\n minfluxpb = flux_out.min()\n maxfluxpb = flux_out.max()\n norm = maxfluxpb - minfluxpb\n self.fluxRenorm[mask] = (flux_out - minfluxpb) / norm\n self.fluxErrRenorm[mask] = fluxerr_out / norm\n elif npbobs == 1:\n # deal with the case with one observation in this passband by setting renorm = 0.5\n norm = self.fluxUnred[mask] / 0.5\n self.fluxRenorm[mask] /= norm\n self.fluxErrRenorm[mask] /= norm\n\n self._default_cols = ['time', 'flux', 'fluxErr', 'fluxUnred', 'fluxErrUnred', \\\n 'fluxRenorm', 'fluxErrRenorm', 'photflag', 'zeropoint', 'obsId']\n return",
"def expand_slicer_aperture(system):\n\n # First of all, we need to find the Surface Number for the IMAGE SLICER\n N_surfaces = system.LDE.NumberOfSurfaces\n surface_names = {} # A dictionary of surface number -> surface comment\n for k in np.arange(1, N_surfaces):\n surface_names[k] = system.LDE.GetSurfaceAt(k).Comment\n # find the Slicer surface number\n try:\n # The naming convention for this surface has changed. Not the same for Nominal Design as Monte Carlos\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('Slicer Mirror')]\n except ValueError:\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('IFU ISA')]\n slicer = system.LDE.GetSurfaceAt(slicer_num)\n\n # Read Current Aperture Settings\n apt_type = slicer.ApertureData.CurrentType\n # print(\"Aperture type: \", apt_type)\n if apt_type == 4: # 4 is Rectangular aperture\n current_apt_sett = slicer.ApertureData.CurrentTypeSettings\n # print(\"Current Settings:\")\n x0 = current_apt_sett._S_RectangularAperture.XHalfWidth\n y0 = current_apt_sett._S_RectangularAperture.YHalfWidth\n # If the Y aperture hasn't been changed already, we change it here to 999 mm to get all rays through\n if y0 != 999:\n # Change Settings\n aperture_settings = slicer.ApertureData.CreateApertureTypeSettings(\n constants.SurfaceApertureTypes_RectangularAperture)\n aperture_settings._S_RectangularAperture.XHalfWidth = x0\n aperture_settings._S_RectangularAperture.YHalfWidth = 999\n slicer.ApertureData.ChangeApertureTypeSettings(aperture_settings)\n\n current_apt_sett = slicer.ApertureData.CurrentTypeSettings\n # Notify that we have successfully modified the aperture\n print(\"Changing aperture of surface: \", slicer.Comment)\n print(\"New Settings:\")\n print(\"X_HalfWidth = %.2f\" % current_apt_sett._S_RectangularAperture.XHalfWidth)\n print(\"Y_HalfWidth = %.2f\" % current_apt_sett._S_RectangularAperture.YHalfWidth)\n\n return",
"def setup_fermi(self):\n eventclass=5 # 2 (Source) or 5 (UltracleanVeto)\n eventtype=0 # 0 (all), 3 (bestpsf) or 5 (top3 quartiles)\n mask_type='top300'\n force_mask_at_bin_number=10\n\n self.f1 = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=eventclass,eventtype=eventtype,newstyle=1,data_July16=True)\n\n if mask_type != 'False':\n self.f1.make_ps_mask(mask_type = mask_type,force_energy = True,energy_bin = force_mask_at_bin_number)\n self.f1.add_diffuse_newstyle(comp = 'p7', eventclass = eventclass, eventtype = eventtype)\n self.f1.add_bubbles(comp='bubs') #bubbles\n self.f1.add_iso(comp='iso') #iso\n self.f1.add_ps_model(comp='ps_model')\n\n # Exposure correct J_map_arr\n self.J_map_arr *= self.f1.CTB_exposure_maps\n\n # Add J-factor map with mean 1 in each energy bin\n self.f1.add_template_by_hand('J_map',np.array([self.J_map_arr[i]/np.mean(self.J_map_arr[i]) for i in range(40)]))",
"def enemy_waves(self):\n\n pass",
"def make_flats(side='blue',overwrite=False):\r\n\r\n iraf.unlearn('flatcombine')\r\n iraf.flatcombine.ccdtype = \"\"\r\n iraf.flatcombine.process = \"no\"\r\n iraf.flatcombine.subsets = \"no\"\r\n iraf.flatcombine.rdnoise = \"RON\"\r\n iraf.flatcombine.gain = \"GAIN\"\r\n for aperture in ['0.5', '1.0', '1.5', '2.0']:\r\n flats = find_flats(aperture, side=side)\r\n if len(flats) > 0:\r\n if overwrite:\r\n iraf.delete('flat_%s_%s.fits' % (side, aperture), verify='no')\r\n iraf.delete('temp.fits' , verify='no')\r\n iraf.delete('tempsmooth.fits', verify='no')\r\n iraf.delete('norm_temp.fits', verify='no')\r\n # normalize the flat\r\n if side == 'blue': \r\n if len(flats) < 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='pclip')\r\n if len(flats) >= 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='avsigclip') \r\n iraf.twodspec.longslit.dispaxis = 2\r\n # iraf.unlearn('response')\r\n # iraf.response.function = 'legendre'\r\n iraf.response.order = 100\r\n # iraf.response.high_rej = 5\r\n # iraf.response.low_rej = 2\r\n # iraf.response.niterate = 10\r\n # iraf.response('temp[0]', 'temp[0]',\r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # os.rename('temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n iraf.imfilter.boxcar('temp', 'tempsmooth', xwindow='1', ywindow='500')\r\n iraf.imarith('temp', '/', 'tempsmooth', 'norm_temp.fits')\r\n iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n os.rename('norm_temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n else:\r\n if len(flats) < 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='pclip')\r\n if len(flats) >= 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='avsigclip') \r\n iraf.twodspec.longslit.dispaxis = 1\r\n iraf.unlearn('response')\r\n iraf.response.function = \"spline3\" \r\n iraf.response.order = 100\r\n iraf.response.high_rej = 3\r\n iraf.response.low_rej = 3\r\n iraf.response.niterate = 3\r\n iraf.response('temp[0]', 'temp[0]',\r\n 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n os.rename('temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n # iraf.unlearn('response')\r\n # iraf.response.function = \"spline3\"\r\n # iraf.response.order = 100\r\n # iraf.response.niterate = 3\r\n # iraf.response.low_rej = 3\r\n # iraf.response.high_rej = 3\r\n # if side == 'blue':\r\n # iraf.twodspec.longslit.dispaxis = 2\r\n # else:\r\n # iraf.twodspec.longslit.dispaxis = 1\r\n \r\n\r\n # measure flat-field error from sigma images\r\n iraf.unlearn('imcombine')\r\n iraf.imcombine.reject = 'avsigclip'\r\n iraf.imcombine(','.join(flats), output='flat', sigma='sigma', scale='mode')\r\n iraf.imarith('sigma', '/', 'flat', 'frac')\r\n s = iraf.imstat('frac.fits', fields=\"mean\", nclip=20, Stdout=1, format=\"no\")\r\n print 'Flat field error: ', np.float(s[0])\r\n iraf.delete('flat.fits', verify=\"no\")\r\n iraf.delete('sigma.fits', verify=\"no\")\r\n iraf.delete('frac.fits', verify=\"no\")\r\n else:\r\n print \"No dome or internal flats for the %s arcsec slit.\" % aperture",
"def step7(self):\n for indx, mr in enumerate(self.mrs):\n self.log.info(\"Set boot drive on controller:%d\"\n % (mr.ctrl_id))\n for vd in self.mr_vds[indx]:\n if (int(mr.cli.bootdrive_vd_get()) != vd):\n mr.cli.bootdrive_vd_set(vd_id=self.mr_vds[indx][indx],\n setting=\"On\")\n break",
"def main():\n print_banner()\n params = read_steering()\n s, x, y, cur, theta = build_kinoshita()\n s, x, y, cur, theta = read_centerline(s, x, y, cur, theta)\n s, x, y, cur, theta = extend_centerline(s, x, y, cur, theta)\n for t in range(TSTEPS+1):\n cur, theta = tan2curv(s, x, y)\n cur_ori = np.copy(cur)\n cur = filter_curvature(cur, t)\n cur_flt = np.copy(cur)\n cur = lag(s, cur, t)\n cur_lag = np.copy(cur)\n beck_bed = build_beck(cur, s, t)\n allxyz = offset_all(x, y, beck_bed, t)\n if t == 0:\n write_xyz_file(allxyz)\n write_mesh_file(allxyz, beck_bed)\n oxbowxList, oxbowyList = [], []\n centerlinexList, centerlineyList = [], []\n if np.mod(t, GPRINT) == 0:\n centerlinexList.append(x)\n centerlineyList.append(y)\n mf.make_figure(x, y, allxyz, cur_ori, cur_flt, cur_lag, s, beck_bed,\n params, t, oxbowxList, oxbowyList, centerlinexList, centerlineyList)\n if t == TSTEPS:\n break\n s, x, y = migration(s, x, y, cur_flt, cur_lag, theta, t)\n s, x, y, oxbowx, oxbowy, found_cutoff = cutoff(s, x, y)\n s, x, y = smooth_centerline(x, y)\n s, x, y, cur, theta = resample_centerline(s, x, y)\n if found_cutoff:\n oxbowxList.append(oxbowx)\n oxbowyList.append(oxbowy)\n make_gif()\n job_done()",
"def fire_smelter(self):\n # Get the smelter\n screenshot = utils.take_screenshot()\n forge = screenshot[152:168, 168:184]\n\n # Check if the cold forge exists\n result = cv2.matchTemplate(forge, self.cold_forge_template, cv2.TM_CCORR_NORMED)\n max_val = cv2.minMaxLoc(result)[1]\n\n # Found cold forge, light it and wait\n if max_val > 0.9:\n pyautogui.moveTo(192, 159, 0.15)\n pyautogui.doubleClick()\n sleep(1.5)",
"def _clean_files(self):\n if self.delfiles & 1:\n ProcUtils.remove(self.okm)\n if self.delfiles & 2:\n ProcUtils.remove(self.hkm)\n if self.delfiles & 4:\n ProcUtils.remove(self.qkm)\n if self.delfiles & 8:\n ProcUtils.remove(self.obc)\n\n if self.log is False:\n ProcUtils.remove(self.pcf_file)\n base = os.path.basename(self.okm)\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogReport', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogStatus', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogUser', base])))"
] | [
"0.53458524",
"0.5341943",
"0.5341767",
"0.51823235",
"0.51823235",
"0.51713705",
"0.51584935",
"0.50687635",
"0.50499785",
"0.5038589",
"0.50249314",
"0.5024092",
"0.5023892",
"0.50143516",
"0.49986392",
"0.49689344",
"0.49604985",
"0.49398032",
"0.49370384",
"0.49188516",
"0.49081752",
"0.49075094",
"0.48980296",
"0.48947164",
"0.48892295",
"0.48620415",
"0.48538738",
"0.4850283",
"0.4842821",
"0.4827529"
] | 0.72748023 | 0 |
Calculate weightings to use for the feather task | def _calc_feather_weighting(param_dict):
weightings = param_dict['weightings']
if not isinstance(weightings, (list, tuple)):
return 1.0
return float(weightings[1]) / float(weightings[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_weighted_results():\n pass",
"def weight(self):",
"def getWeight(self) -> float:\n ...",
"def calculateWeights(self):\n return self.distances #En lo que encontramos una funcion que represente",
"def get_weights(self):",
"def calculate_weights():\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights",
"def get_weight(self):\n pass",
"def get_weight(self):\n pass",
"def calcweight( self ):\n weight = 0\n zeroval = 0\n for sensor in ('right_top', 'right_bottom', 'left_top', 'left_bottom'):\n\t\treading = self.readings[sensor]\n\t\tcalibration = self.named_calibration[sensor]\n if sensor == 'right_top':\n zeroval = self.rtzv\n elif sensor == 'right_bottom':\n zeroval = self.rbzv\n elif sensor == 'left_top':\n zeroval = self.ltzv\n else:\n zeroval = self.lbzv\n\t\tif reading > calibration[2]:\n\t\t\tprint \"Warning, %s reading above upper calibration value\" % sensor\n\t\tif reading < calibration[1]:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[0]) / (calibration[1] - calibration[0])\n\t\telse:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[1]) / (calibration[2] - calibration[1]) + 1700\n\n if self.debug == 1:\n print \"weight calculated pre-conversion\", weight\n print \"return val\", self.converttolbs( weight / 100.0 )\n\n # return self.converttolbs( weight / 100.0 )\n return weight / 100.0",
"def weights(self):\r\n\t\treturn None",
"def weights(self) -> List[float]:",
"def _weigh_object(self, host_state, weight_properties):\n\n weight = 0.0\n if host_state.patch_prefer:\n weight += CONF.filter_scheduler.swmgmt_patch_weight_multiplier\n if host_state.upgrade_prefer:\n weight += CONF.filter_scheduler.swmgmt_upgrade_weight_multiplier\n return weight",
"def get_weight(self):\n # FIXME: BELUM ADA KEPUTUSAN\n return 0",
"def weight(self):\n counters = [\n (\"total_mhz\", self.dominfo.vms_online + self.dominfo.cpus_online / 4.0),\n (\"memory\", self.dominfo.vms_online + self.dominfo.ram_online / 4096.0),\n ]\n load_w = sum((self.node[k] / float(v or 1)) / self.node[k] for k, v in counters)\n return load_w * self.srv_weight",
"def totalWeighting(distance, count, data, n):\n\n weighting = (data)*(distance)*count\n weighting = weighting/(np.sum(np.sum(weighting))) \n return weighting",
"def getWeights(self, gameState, action):\n # return {'successorScore': 1.0}\n if self.isOffensive:\n return self.getOffensiveWeights(gameState, action)\n else:\n return self.getDefensiveWeights(gameState, action)",
"def _get_weight(self, reaction: db.Reaction) -> Tuple[float, float]:\n for step in reaction.get_elementary_steps(self.db_manager):\n # # # Barrierless weights for barrierless reactions\n if step.get_type() == db.ElementaryStepType.BARRIERLESS:\n return self.barrierless_weight, self.barrierless_weight\n return 1.0, 1.0",
"def calculate_weight(self, element, total_cores_used, total_disk_used,\n total_memory_used):\n cpu_capacity = self.model.get_resource_from_id(\n resource.ResourceType.cpu_cores).get_capacity(element)\n\n disk_capacity = self.model.get_resource_from_id(\n resource.ResourceType.disk).get_capacity(element)\n\n memory_capacity = self.model.get_resource_from_id(\n resource.ResourceType.memory).get_capacity(element)\n\n score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /\n float(cpu_capacity))\n\n # It's possible that disk_capacity is 0, e.g., m1.nano.disk = 0\n if disk_capacity == 0:\n score_disk = 0\n else:\n score_disk = (1 - (float(disk_capacity) - float(total_disk_used)) /\n float(disk_capacity))\n\n score_memory = (\n 1 - (float(memory_capacity) - float(total_memory_used)) /\n float(memory_capacity))\n # TODO(jed): take in account weight\n return (score_cores + score_disk + score_memory) / 3",
"def weight(self) -> None:\n assert hasattr(self, \"characterized_inventory\"), \"Must do lcia first\"\n if not hasattr(self, \"weighting_value\"):\n self.load_weighting_data()\n self.weighting_calculation()",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def weight(self) -> float:\r\n return self._weight",
"def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...",
"def getWeights(self, gameState, actton):\n\t\treturn {'successorScore': 1.0}"
] | [
"0.7639016",
"0.75578135",
"0.7499282",
"0.73658234",
"0.72466195",
"0.7217344",
"0.7214428",
"0.7214428",
"0.7117307",
"0.7097232",
"0.70433426",
"0.6888601",
"0.6846439",
"0.68106675",
"0.6809401",
"0.6799938",
"0.67643374",
"0.6751411",
"0.6691422",
"0.6686457",
"0.6686457",
"0.6686457",
"0.6686457",
"0.6686457",
"0.6686457",
"0.6686457",
"0.6686457",
"0.6682024",
"0.66690844",
"0.6659174"
] | 0.7724041 | 0 |
Automatically generate a basename or else use the one provided. | def _gen_basename(param_dict, clargs):
if param_dict['output_basename'] in ['', 'auto']:
return clargs.input_fname.lower().split('.json')[0]
else:
return param_dict['output_basename'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_fullname(basename, _type=None):\n return '{}.{}'.format(basename, extensions.get(_type, None))",
"def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None):\n if not basename:\n msg = \"Unable to generate filename for command %s. \" % self.cmd\n msg += \"basename is not set!\"\n raise ValueError(msg)\n\n if cwd is None:\n cwd = os.getcwd()\n if ext is None:\n ext = Info.output_type_to_ext(self.inputs.outputtype)\n if change_ext:\n suffix = \"\".join((suffix, ext)) if suffix else ext\n\n if suffix is None:\n suffix = \"\"\n fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)\n return fname",
"def genBaseName(fileName):\n return fileName.split(\"_\")[0].split(\".\")[0]",
"def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True,\n ext='.mif'):\n\n if basename == '':\n msg = 'Unable to generate filename for command %s. ' % self.cmd\n msg += 'basename is not set!'\n raise ValueError(msg)\n if cwd is None:\n cwd = os.getcwd()\n if change_ext:\n if suffix:\n suffix = ''.join((suffix, ext))\n else:\n suffix = ext\n if suffix is None:\n suffix = ''\n fname = fname_presuffix(basename, suffix=suffix,\n use_ext=False, newpath=cwd)\n return fname",
"def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True,\n ext='.mif'):\n\n if basename == '':\n msg = 'Unable to generate filename for command %s. ' % self.cmd\n msg += 'basename is not set!'\n raise ValueError(msg)\n if cwd is None:\n cwd = os.getcwd()\n if change_ext:\n if suffix:\n suffix = ''.join((suffix, ext))\n else:\n suffix = ext\n if suffix is None:\n suffix = ''\n fname = fname_presuffix(basename, suffix=suffix,\n use_ext=False, newpath=cwd)\n return fname",
"def gen_save_name(basename = os.getcwd()):\n fname, suffix = basename.split('.') # just assume this is true.\n qualifier = 1\n unique_fname = fname\n while (os.path.exists(unique_fname + '.' + suffix)):\n unique_fname = fname + '_{}'.format(qualifier)\n qualifier += 1\n return unique_fname + '.' + suffix",
"def purebasename(self):\n return self._getbyspec(\"purebasename\")[0]",
"def purebasename(self):\n return self.namebase",
"def built_file_basename(self, name, type=None, **kw):\n if not kw.get('bare'):\n if type == self.EXECUTABLE:\n name = name + self._exe\n elif type == self.STATIC_LIB:\n name = self.lib_ + name + self._lib\n elif type == self.SHARED_LIB:\n name = self.dll_ + name + self._dll\n return name",
"def basename(source_file) :\n if source_file is not None and source_file != '' :\n return os.path.basename(source_file)\n\n return ''",
"def basename(path: str) -> str:\n pass",
"def getBaseName(filepath):\n return os.path.basename(filepath)",
"def base_name(path):\n return os.path.basename(path)",
"def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"",
"def make_path(self, basename):\n return os.path.join(self.output_folder, basename.format(self.sample_name))",
"def basename(self):\n return self._getbyspec(\"basename\")[0]",
"def name(self) -> str:\n if '/' in self.path.strip('/'):\n basename: str = os.path.basename(self.path)\n return basename\n return self.path",
"def basename(self):\n return get_basename(self.filename)",
"def TaskBaseName(cls, task):\n if not task: return None\n return os.path.basename(task)",
"def fname( file_, base=None, new_base=None, new_ext=None ):\n if base and new_base:\n file_ = file_.replace(base, new_base, 1)\n if new_ext:\n file_ = os.path.splitext(file_)[0] + new_ext\n return file_",
"def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name",
"def basename(self) -> str:\n return self._basename",
"def _UrlBaseName(url):\n return url.rstrip('/').rpartition('/')[-1]",
"def new_filename(fname=None,ndigits=3):\n if fname is None:\n ext = (\"%%.%ii\" % ndigits) % 1\n fname = \"%s.%s\" % (random_string(6), ext)\n \n if os.path.exists(fname): \n fname = increment_filename(fname,ndigits=ndigits)\n\n return fname",
"def getInputFileBasenameNoSuffix():\n\n inputFileBasename = getInputFileBasename()\n basenameRemovedSuffix = removeSuffix(inputFileBasename)\n return basenameRemovedSuffix",
"def basename(path):\r\n return split(path)[1]",
"def basefname(fname):\n return os.path.splitext(fname.split(\"\\\\\")[-1])[0]",
"def basename(self):\n return self.name.basename",
"def build_base_filename(self):\n if self.stream:\n self.stream.close()\n self.stream = None\n\n # remove old suffix\n # if self.suffix_time != \"\":\n # index = self.baseFilename.find(\".\" + self.suffix_time)\n # if index == -1:\n # index = self.baseFilename.rfind(\".\")\n # self.baseFilename = self.baseFilename[:index]\n\n # add new suffix\n current_time_tuple = time.localtime()\n self.suffix_time = time.strftime(self.suffix, current_time_tuple)\n self.baseFilename = self._get_format_filename()\n\n self.mode = 'a'\n if not self.delay:\n self.stream = self._open()",
"def get_basename(absolute_file_path):\r\n return absolute_file_path.split('/')[-1]"
] | [
"0.7162985",
"0.71130204",
"0.71064943",
"0.70725805",
"0.70725805",
"0.6988176",
"0.6869497",
"0.6849551",
"0.672568",
"0.6704061",
"0.66934615",
"0.65571934",
"0.6534404",
"0.64742374",
"0.6473847",
"0.646789",
"0.6352444",
"0.6296653",
"0.6274127",
"0.62643063",
"0.62446624",
"0.62266135",
"0.6223458",
"0.62166315",
"0.62140113",
"0.6213255",
"0.6211356",
"0.6198727",
"0.61853725",
"0.61833555"
] | 0.77820414 | 0 |
! Create ssh client. Create ssh client to run commands in host machine from inside container. | def create_client():
hostname = "localhost"
username = "she393"
password = os.getenv("PASSWORD")
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=hostname, username=username, password=password)
return client | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open_sshclient(host, user, port, secret):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret, port=port)\n elif secret and port==0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client",
"def create_client(host, user, password):\n client = paramiko.client.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy)\n client.connect(hostname=host, username=user, password=password, timeout=60)\n return client",
"def create_ssh_client(self, hostname, username, password):\n if self.ssh_client is None:\n self.ssh_client = paramiko.SSHClient()\n self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh_client.connect(hostname, username=username, password=password)\n else:\n print(\"SSH client session exist.\")",
"def sshclient_from_instance(instance, ssh_key_file,\r\n host_key_file='~/.ssh/known_hosts',\r\n user_name='root', ssh_pwd=None):\r\n s = FakeServer(instance, ssh_key_file)\r\n return SSHClient(s, host_key_file, user_name, ssh_pwd)",
"def connect_instance(tag, key_name, user_name):\n inst = get_instance(tag)\n cmd = boto.manage.cmdshell.sshclient_from_instance(\n inst,\n SSH_FOLDER + key_name + \".pem\",\n user_name=user_name\n )\n return inst, cmd",
"def openSSH(target, user):\r\n ssh = paramiko.SSHClient()\r\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n ssh.connect(target, username=user)\r\n return ssh",
"def ssh():\n vbox = Vbox(env.vm_name)\n with vbox as session:\n session.wait_for_ssh()\n open_shell()",
"def create_ssh_handle(xcnode):\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy)\n\n try:\n client.connect(\n hostname=xcnode.hostname,\n username=xcnode.username,\n password=xcnode.password,\n port=int(xcnode.port)\n )\n xcnode.fd.write('ssh\\'ed to {} @ {}\\n'.format(\n xcnode.hostname, datetime.now()))\n except Exception as e:\n print e\n client = None\n\n xcnode.client = client\n\n return xcnode",
"def ssh(host_=None):\n run_command_on_selected_server(open_shell, host_=host_)",
"def _get_sshclient(host_name, ip, port=22):\n ssh_clt = paramiko.SSHClient()\n # Allow connection not in the known_host\n ssh_clt.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_clt.connect(ip, port, host_name,\n key_filename=conf.SSH_KEY_ARGS['path'])\n return ssh_clt",
"def connect(self,host, container):\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n attempts = 3\n count = 0\n while attempts:\n attempts -= 1\n count +=1\n try:\n if attempts > 0:\n print \"Attempting Connection to %s (%i/%i)\" % (host, count, attempts)\n logging.debug(\"\\t connecting to %s@%s\" % (args.user, host))\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(\n host,\n username=args.user,\n port=22,\n allow_agent=True,\n look_for_keys=True,\n timeout=5\n )\n logging.debug(\"Connected to %s\" % (host))\n chan = ssh.invoke_shell()\n # print(repr(ssh.get_transport()))\n if not container:\n logging.debug(\"*** Initiating Interactive Session\")\n interactive().rshell(chan)\n logging.debug(\"Closing SSH session to %s\" % (host))\n chan.close()\n interactive().disconnect()\n break\n else:\n print \"Max Connection attempts reached (%i/%i)\" % (count, attempts)\n logging.debug(\"Exiting with code 3\")\n sys.exit(3)\n except paramiko.AuthenticationException:\n print \"Authentication failed when connecting to %s\" % (host)\n sys.exit(1)\n except:\n print \"Connection (%i/%i) failed to %s, waiting 5s retry\" % (count, attempts, host)\n time.sleep(5)",
"def create_sftp_client(ssh_client):\n sftp_client = ssh_client.open_sftp()\n return sftp_client",
"def connect(self, instance):\n client = sshclient.SSHClient()\n client.set_missing_host_key_policy(sshclient.AutoAddPolicy())\n client.connect(instance.ip_address, username=\"core\",\n key_filename=self._ssh_keyfile)\n return client",
"def _ssh_connect():\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy)\n\n client.connect(**SSH_CONFIG)\n yield client\n\n client.close()",
"def connect_to_remote_host(host, username, password):\n ssh_client = paramiko.SSHClient()\n ssh_client.load_system_host_keys()\n ssh_client.connect(host, username=username, password=password)\n return ssh_client",
"def cli(ctx, host, user, no_ask):\n from ._ssh import open_ssh\n from .vdi import vdi_ctl\n\n try:\n ssh, ssh_cfg = open_ssh(host, user, no_ask=no_ask)\n except:\n click.echo('Failed to connect to \"{}{}\"'.format(user+'@' if user else '', host))\n ctx.exit()\n\n ctl = vdi_ctl(ssh)\n\n ctx.obj = Ctx(ssh=ssh, ssh_cfg=ssh_cfg, ctl=ctl)",
"def common_setup(ssh_client):\n with open_cfg() as cfg:\n delete_hdfs = cfg.getboolean('main', 'delete_hdfs')\n # preliminary steps required due to differences between azure and aws\n if c.PROVIDER == \"AZURE\":\n\n # todo only if first run\n if c.NUM_INSTANCE > 0 or True:\n print(\"In common_setup, NUM_INSTANCE=\" + str(c.NUM_INSTANCE))\n # add ssh key that matches the public one used during creation\n if not c.PRIVATE_KEY_NAME in ssh_client.listdir(\"/home/ubuntu/.ssh/\"):\n ssh_client.put(localpath=c.PRIVATE_KEY_PATH, remotepath=\"/home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n ssh_client.run(\"chmod 400 /home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n\n # ssh_client.run(\"sudo groupadd supergroup\")\n ssh_client.run(\"sudo usermod -aG supergroup $USER\")\n ssh_client.run(\"sudo usermod -aG supergroup root\")\n\n # join docker group\n ssh_client.run(\"sudo usermod -aG docker $USER\")\n\n ssh_client.run(\"mkdir /usr/local/spark/spark-events\")\n\n # ssh_client.run(\"sudo chmod -R 777 /mnt\")\n\n # to refresh groups\n ssh_client.close()\n ssh_client.connect()\n\n # restore environmental variables lost when creating the image\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native/' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n\n ssh_client.run(\"source $HOME/.bashrc\")\n\n if c.PROVIDER == \"AWS_SPOT\":\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n ssh_client.run(\"source $HOME/.bashrc\")\n \n ssh_client.run(\"export GOMAXPROCS=`nproc`\")\n\n if c.UPDATE_SPARK_DOCKER:\n print(\" Updating Spark Docker Image...\")\n ssh_client.run(\"docker pull elfolink/spark:2.0\")\n\n if delete_hdfs:\n ssh_client.run(\"sudo umount /mnt\")\n ssh_client.run(\n \"sudo mkfs.ext4 -E nodiscard \" + c.TEMPORARY_STORAGE + \" && sudo mount -o discard \" + c.TEMPORARY_STORAGE + \" /mnt\")\n\n ssh_client.run(\"test -d /mnt/tmp || sudo mkdir -m 1777 /mnt/tmp\")\n ssh_client.run(\"sudo mount --bind /mnt/tmp /tmp\")\n\n ssh_client.run('ssh-keygen -f \"/home/ubuntu/.ssh/known_hosts\" -R localhost')\n\n print(\" Stop Spark Slave/Master\")\n # ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-master.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && sudo {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n \n stdout, stderr, status = ssh_client.run(\n \"cd \" + c.SPARK_HOME + \" && cp conf/log4j.properties.template conf/log4j.properties\")\n print(stdout, stderr)\n print(\" Set Log Level\")\n ssh_client.run(\n \"sed -i '19s/.*/log4j.rootCategory={}, console /' {}conf/log4j.properties\".format(c.LOG_LEVEL,\n c.SPARK_HOME))\n if c.KILL_JAVA:\n print(\" Killing Java\")\n ssh_client.run('sudo killall java && sudo killall java && sudo killall java')\n\n print(\" Kill SAR CPU Logger\")\n ssh_client.run(\"screen -ls | grep Detached | cut -d. -f1 | awk '{print $1}' | xargs -r kill\")\n\n if c.SYNC_TIME:\n print(\" SYNC TIME\")\n ssh_client.run(\"sudo ntpdate -s time.nist.gov\")\n\n print(\" Removing Stopped Docker\")\n ssh_client.run(\"docker ps -a | awk '{print $1}' | xargs --no-run-if-empty docker rm\")",
"def _build_ssh_client(self):\n # Create instance of SSHClient object\n # If not using SSH keys, we use noauth\n if not self.use_keys:\n remote_conn_pre = SSHClient_noauth()\n else:\n remote_conn_pre = SSHClient()\n\n # Load host_keys for better SSH security\n if self.system_host_keys:\n remote_conn_pre.load_system_host_keys()\n if self.alt_host_keys and path.isfile(self.alt_key_file):\n remote_conn_pre.load_host_keys(self.alt_key_file)\n\n # Default is to automatically add untrusted hosts (make sure appropriate for your env)\n remote_conn_pre.set_missing_host_key_policy(self.key_policy)\n return remote_conn_pre",
"def docker_enter(self, user, host, container):\n import os\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n ssh_host = user+\"@\"+host\n ssh_timeout = \"5\"\n ssh_options = \"-A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=\"+ssh_timeout+\" -o ConnectionAttempts=1 -tt\"\n docker_cmd = \"\\\"/opt/bin/docker-enter \"+container+\"\\\" \"\n cmd = \"ssh \"+ssh_options+\" \"+ssh_host+\" \"+docker_cmd\n logging.debug(\"Executing Command: %s\" % (cmd))\n returned = os.system(cmd)\n logging.debug(\"docker_enter func Exiting with code %i\" % (returned))\n sys.exit(returned)",
"def editor_cloud9_ssh_command():\n docker_vars = _editor_cloud9_docker_vars()\n print \"ssh -p %s -i private/ssh/id_rsa_devbox root@%s\" % (docker_vars['public_ssh_port'], env.host)",
"def ssh_cmd(ctx):\n pass",
"def setup_machine():\n client = docker.from_env()\n if client.info().get(\"ServerVersion\") < \"18.09.2\":\n raise (\"Docker server needs to be at least 18.09.2\")\n ssh_path = os.path.join(expanduser(\"~\"), \".ssh\")\n cloud_path = os.path.join(ssh_path, \"cloud_keys\")\n config_path = os.path.join(cloud_path, \"config\")\n bash(\"mkdir -p {}\".format(cloud_path))\n bash(\"cp ~/.ssh/config ~/.ssh/{}/config\".format(\"cloud_keys\"))\n bash(\"sed -i '' '/.*UseKeychain.*/d' ~/.ssh/cloud_keys/config\")\n bash(\"sed -i '' '/.*ControlPath .*/d' ~/.ssh/cloud_keys/config\")\n\n config = \"\"\"\n Host *\n ControlPath /tmp/master-%r@%h:%p\n User {}\n \"\"\".format(\n getpass.getuser()\n )\n with open(config_path, \"r\") as h:\n conents = h.read()\n with open(config_path, \"w\") as h:\n h.write(config)\n with open(config_path, \"a\") as h:\n h.write(conents)\n keys = [\n splitext(x)[0]\n for x in glob.glob(os.path.join(ssh_path, \"*.pub\"))\n if not x.endswith(\"-cert.pub\") # filter out signed keys\n ]\n for key in keys:\n if not os.path.isfile(key):\n logger.warning(\"No private key for {}, skipping\".format(key))\n else:\n logger.info(\"Adding key {}\".format(key))\n dest = os.path.join(cloud_path, basename(key))\n if os.path.lexists(dest) is False:\n bash(\"cp {} {}\".format(key, dest))",
"def ssh():\n env['remote_port'] = env['port_map']['22']\n\n sys.stdout.write('Connecting to SSH session on remote port %(remote_port)s\\n' % env)\n\n run('chmod 600 %(pair_private_key)s' % env)\n\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.connect(\n hostname=env['relay_server'],\n port=int(env['remote_port']),\n username=env['pair_user'],\n key_filename=env['pair_private_key']\n )\n\n channel = client.invoke_shell()\n posix_shell(channel)",
"def ssh_connect(cf):\n try:\n ssh = paramiko.SSHClient()\n ssh.load_system_host_keys()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(cf.server,username=cf.username)\n print(\"Connected to %s\" % cf.server)\n except paramiko.AuthenticationException as e:\n print(\"Authentication failed when connecting to %s\" % cf.server)\n print(\"error:\",e)\n sys.exit(1)\n except Exception as e:\n print(\"Couldn't establish an ssh connection to %s\" % cf.server)\n print(\"error:\", e)\n sys.exit(1)\n\n return ssh",
"def ssh_call ( server, identity, cmd ) :\n print \"Running SSH command on server \" + server + \": \" + cmd\n return subprocess.call( [ \"ssh\",\n ssh_opt,\n \"-tt\",\n \"-i\",\n identity,\n \"ec2-user@\" + server,\n cmd ] )",
"def get_ssh_client(self, ip, username, password, retries=10):\n try:\n ssh_client = SshClient(ip, 22, username, password, retries)\n except Exception as e:\n raise unittest.SkipTest(\"Unable to create ssh connection: \" % e)\n\n self.assertIsNotNone(\n ssh_client, \"Failed to setup ssh connection to ip=%s\" % ip)\n\n return ssh_client",
"def open_ssh():\n print('Opening SSH...')",
"def _new_client(self) -> paramiko.SSHClient:\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self._paramiko_client = ssh\n return self._paramiko_client",
"def sendcommand(sshclient, ip, user, password, commands):\n\n # Trying to establish the SSH session, using a timeout of 3 seconds\n sshclient.connect(ip, username=user, password=password,\n look_for_keys=False, allow_agent=False, timeout=3)\n # To execute commands we'll need an input shell to execute them against\n sshsession = sshclient.invoke_shell()\n # Read current output buffer for hostname extraction.\n # Expected is something like 'hostname#'\n hostname = sshsession.recv(1000)\n # Decode output to UTF-8 encoding\n hostname = hostname.decode('utf-8')\n # Replace whitespaces and the expected '#' from the prompt with nothing\n hostname = hostname.replace('\\r\\n', '').replace('#', '')\n # Execute 'nopaging' function to disable paging\n nopaging(sshsession)\n # Run each command in commands list against the current session, using\n # a sleep timer of 3s after each command.\n for command in commands:\n command = textwrap.wrap(command)[0]\n sshsession.send(command)\n # Don't forget to press 'Enter' after each command. This will do.\n sshsession.send('\\n')\n # Might need more time for commands like 'show tech' but 3s should\n # do fine for most outputs.\n time.sleep(3)\n\n # Flush current output into output variable. Might need adjustment for\n # larger outputs.\n output = sshsession.recv(100000)\n # Say goodbye to the device.\n sshclient.close()\n\n # Return the SSH output and extracted hostname\n return output, hostname",
"def ssh(pi):\n command = \"ssh {0}\".format(pi)\n subprocess.Popen(command, shell=True)"
] | [
"0.69851446",
"0.6791386",
"0.6668736",
"0.6655599",
"0.6630987",
"0.65980434",
"0.6580812",
"0.65597147",
"0.655599",
"0.6490338",
"0.641064",
"0.6405131",
"0.6365197",
"0.63595355",
"0.6352704",
"0.6299737",
"0.62719107",
"0.62428546",
"0.6240554",
"0.6232706",
"0.6225897",
"0.6223149",
"0.6206834",
"0.6184578",
"0.6174426",
"0.6149093",
"0.6141177",
"0.613032",
"0.61184734",
"0.6107781"
] | 0.7121306 | 0 |
! Wrapper for HTTP responses. message The content of the successful (200) HTTP response. Flask HTTP response object with content of message from the argument and status code 200. | def response(message):
res = Response(json.dumps(message))
res.status_code = 200
res.content_type = "application/json"
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def response(status, message, code):\n return make_response(jsonify({\n 'status': status,\n 'message': message\n })), code",
"def HandleResponse(data,message,success = True,err = 'no err',resp_status = status.HTTP_200_OK):\n return Response({\n 'success':success,\n \"error\":err,\n \"message\":message,\n \"data\":data\n },status = resp_status)",
"def _http_response(response, http_status_code):\n return make_response(jsonify(response), http_status_code)",
"def resp200(msg):\n return Resp({'message':msg, 'success':True})",
"def response(content=None, error_code='0', message=''):\n if error_code == '0':\n data = {\n 'success': True,\n 'errorCode': error_code,\n 'data': content\n }\n else:\n data = {\n 'success': False,\n 'errorCode': error_code,\n 'errorMsg': message,\n }\n resp = jsonify(data)\n\n return resp",
"def format_response(message, status, message_type=\"error\"):\n return make_response(\n jsonify({message_type: message}),\n status\n )",
"def build_response(message: str, status_code: int) -> str:\n response = current_app.response_class(\n response=json.dumps({\n 'code': status_code,\n 'message': message\n }, indent=2),\n status=status_code,\n mimetype='application/json'\n )\n return response",
"def build_response(http_status, message):\n return Response(data={'detail': message},\n status=http_status)",
"def create_response(data={}, status=200, message=''):\n response = {\n 'success': 200 <= status < 300,\n 'code': status,\n 'message': message,\n 'result': data\n }\n return jsonify(response), status",
"def make_success_response(status, content):\n return dict(status=status, content=content)",
"def assign_message_code(success: bool):\n return (HTTPStatus.OK.phrase, HTTPStatus.OK) if success\\\n else (HTTPStatus.INTERNAL_SERVER_ERROR.phrase, HTTPStatus.INTERNAL_SERVER_ERROR)",
"def ping_response():\n\n return Response(\"ok\", status=200)",
"def create_response(data={}, status=200, message=''):\n if type(data) is not dict:\n raise TypeError('Data should be a dictionary 😞')\n\n response = {\n 'success': 200 <= status < 300,\n 'code': status,\n 'message': message,\n 'result': data\n }\n return jsonify(response), status",
"def status(code=200):\n\treturn jsonify(server.status_data()), code",
"def error_response(status_code, message=None):\n payload = {'error': str(status_code)+\" : \"+HTTP_STATUS_CODES.get(status_code, \"Unknown Error\")}\n if message:\n payload['message'] = message\n response = jsonify(payload)\n response.status_code = status_code\n return response",
"def http501(message):\n response = HttpResponse(message)\n response.status_code = 501\n return response",
"def status(self, code, content_length=None):",
"def error_return(content, status):\n content = '{' + '\"status\":{},\"message\":\"{}\"'.format(status, content) + '}'\n return Response(content, status=status, mimetype='application/json')",
"def handle_error(self, message):\n data = {\n \"success\": False,\n \"error\": message\n }\n\n return JsonResponse(data, status=200)",
"def response_with(response, status=200):\n return make_response(jsonify(response), status)",
"def send_response(data: dict = None, error: str = None, status_code: int = 200):\n if data is None:\n data = {}\n\n response = {\"data\": data, \"error\": error}\n\n return jsonify(response), status_code",
"def custom_response(status, details):\n return app.response_class(status=status,\n mimetype='application/json',\n response=json.dumps({\"status\": status,\n \"details\": details}))",
"def httperror( status_code=500, message=b'' ):",
"def error_response(http_response_code: Union[HTTPStatus, int], message: Text) -> JSONResponse:\n\n if isinstance(http_response_code, HTTPStatus):\n http_response_code = http_response_code.value\n\n return JSONResponse(dict(\n code=str(http_response_code),\n message=message\n ), http_response_code)",
"def create_response(self, status, statusmsg, body):\n self.response.setStatus(status, statusmsg)\n return body",
"def return_payload(status_code: int, message: str):\n return {\n \"statusCode\": status_code,\n \"headers\": {\n \"Content-Type\": \"application/json\",\n \"Access-Control-Allow-Origin\": \"*\",\n },\n \"body\": message,\n }",
"def response(code):\n\n def decorator(func):\n func.wsgi_code = code\n return func\n return decorator",
"def make_json_response(data, status=True, code=200):\n\n to_serialize = {}\n if status:\n to_serialize['status'] = True\n if data is not None:\n to_serialize['result'] = data\n else:\n to_serialize['status'] = False\n to_serialize['error'] = data\n response = app.response_class(\n response=json.dumps(to_serialize),\n status=code,\n mimetype='application/json'\n )\n return response",
"def handle_status(message):\n\n status = _build_status()\n message.reply(status)",
"def get_500_response(message):\n headers = HTTPHeaders.HTTPHeaders()\n add_default_headers(headers)\n headers[\"Connection\"] = \"close\"\n headers[\"Content-Length\"] = str(len(message))\n headers[\"Content-Type\"] = \"text/plain\"\n\n return HTTPResponse.HTTPResponse(version=1.0, status_code=500, phrase=\"Internal Error\",\n headers=headers, data=message)"
] | [
"0.8077968",
"0.7261069",
"0.7197937",
"0.71356225",
"0.7122571",
"0.7062746",
"0.70605296",
"0.70228976",
"0.7000634",
"0.69415414",
"0.6871562",
"0.67692024",
"0.6674502",
"0.6664123",
"0.65871406",
"0.65599144",
"0.6558028",
"0.65498847",
"0.6513427",
"0.6454303",
"0.6445546",
"0.6440139",
"0.6360144",
"0.63113296",
"0.628619",
"0.62825716",
"0.6280527",
"0.6280462",
"0.62779075",
"0.6227512"
] | 0.7737896 | 1 |
One to one identification of the snapshots. | def snapshot_identification(snapshot):
return {
'user_id': snapshot['user_id'],
'timestamp': snapshot['timestamp'],
'snapshot_id': snapshot['snapshot_id']} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unique_id(self):\n return f\"{DOMAIN}_{self._cam_name}_{self._obj_name}_snapshot\"",
"def snapshot_id(self) -> Optional[str]:\n return pulumi.get(self, \"snapshot_id\")",
"def snapshot_id(self) -> Optional[str]:\n return pulumi.get(self, \"snapshot_id\")",
"def snapshot_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"snapshot_id\")",
"def snapshot_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"snapshot_id\")",
"def snapshot_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"snapshot_id\")",
"def identity(self):\n return self.id",
"def source_instant_snapshot_id(self) -> str:\n return pulumi.get(self, \"source_instant_snapshot_id\")",
"def get_from_snap_id(self):\n return self.from_snapshot_id",
"def get_snapshot_uuid(self) -> str:\n return self._snapshot_uuid",
"def get_image(self):\n logging.debug(\"%s get_image entered\" % str(self.machine_name))\n snapshots = cs.list_snapshots()\n # find the one for this server\n if self.cloudserver:\n server_id = self.cloudserver.id\n else:\n return self.image_id\n\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print \"XXX:\", img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n return img\n\n print \"Server %s has no snapshots\" % (server_id)\n return None",
"def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids",
"def identity(self, *args, **kwargs):\n return {\n 'id': self.drone_id,\n }",
"def get_primary_id(self):",
"def get_from_snap_id(self):\n raise NotImplementedError()",
"def getIdent (self) :\n return self.id",
"def getID():",
"def identifier(self):\r\n return self.id",
"def get_snapshot(self, name=None, snapshot_id=None):\n if snapshot_id:\n return self._search_snapshot(key=\"snapshot_id\", value=snapshot_id)\n elif name:\n return self._search_snapshot(key=\"name\", value=name)\n else:\n raise ValueError(\"name or snapshot_id must be provided\")",
"def snapshot(self, snapshot_id):\r\n return self.connection.create_dbsnapshot(snapshot_id, self.id)",
"def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info['source'] == self.dataset_name:\n return info['id']\n else:\n super.image_reference(image_id)",
"def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds",
"def snapshot_by_id(self, snapshot_id: int) -> Optional[Snapshot]:\n try:\n return next(snapshot for snapshot in self.metadata.snapshots if snapshot.snapshot_id == snapshot_id)\n except StopIteration:\n return None",
"def getId(self):",
"def test_reservation_id_one_instance(self):\n (refs, resv_id) = self.compute_api.create(self.context,\n self.default_flavor,\n image_href=uuids.image_href_id)\n self.assertEqual(len(refs), 1)\n self.assertEqual(refs[0]['reservation_id'], resv_id)",
"def identify(self):\r\n if \"signature\" in self or \"signatures\" in self:\r\n if len(self._multisignature):\r\n missings = \\\r\n self._multisignature[\"min\"] - \\\r\n len(self.get(\"signatures\", []))\r\n if missings:\r\n raise Exception(\"owner signature missing (%d)\" % missings)\r\n elif self._secondPublicKey:\r\n if \"signSignature\" not in self:\r\n raise Exception(\"second signature is missing\")\r\n dict.pop(self, \"id\", False)\r\n self[\"id\"] = dposlib.core.crypto.getIdFromBytes(\r\n serialize(self, exclude_multi_sig=False)\r\n )\r\n else:\r\n raise Exception(\"transaction not signed\")",
"def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"tampers\":\n return info[\"id\"]\n else:\n super(self.__class__, self).image_reference(image_id)",
"def image_reference(self, image_id):\n\n info = self.image_info[image_id]\n if info[\"source\"] == \"openimage\":\n return info[\"id\"]\n else:\n super(self.__class__, self).image_reference(image_id)",
"def identifier(self):\n ident = self._json['coredata']['dc:identifier'].split(\":\")[-1]\n if ident != self._id:\n text = \"Profile with ID {} has been merged and the new ID is \"\\\n \"{}. Please update your records manually. Files have \"\\\n \"been cached with the old ID.\".format(self._id, ident)\n warn(text, UserWarning)\n return ident",
"def current_snapshot(self) -> Optional[Snapshot]:\n if snapshot_id := self.metadata.current_snapshot_id:\n return self.snapshot_by_id(snapshot_id)\n return None"
] | [
"0.6157024",
"0.6145881",
"0.6145881",
"0.58339775",
"0.58339775",
"0.58339775",
"0.5751372",
"0.5697604",
"0.5678704",
"0.55947214",
"0.5580041",
"0.5498619",
"0.5444909",
"0.54207444",
"0.53875417",
"0.5353588",
"0.5305235",
"0.52906895",
"0.5282701",
"0.5255412",
"0.5254084",
"0.5218394",
"0.5204684",
"0.51923317",
"0.51632935",
"0.5161018",
"0.5130703",
"0.51238745",
"0.51203436",
"0.5115463"
] | 0.6568016 | 0 |
The AccountBroker initialze() function before we added the policy stat table. Used by test_policy_table_creation() to make sure that the AccountBroker will correctly add the table for cases where the DB existed before the policy support was added. | def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
self.create_container_table(conn)
self.create_account_stat_table(conn, put_timestamp) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pre_init(self) -> None:\n self._check_and_set_network()\n self._check_and_apply_migrations()",
"def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()",
"def init():\n database.create_tables([Tracker])\n database.commit()",
"def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)",
"def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)",
"def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()",
"def initialise(self):\n\n if self.db_type == 'sqlite':\n try:\n # Attempt to create schema if not present, to cope with fresh DB file\n BaseSQLite.metadata.create_all(self.engine)\n except OperationalError:\n print(\"Error creating database schema, possible invalid path? ('\" + self.db_name + \"'). Quitting\")\n exit()\n elif self.db_type == 'postgres':\n try:\n # ensure that create schema scripts created before create table scripts\n event.listen(BasePostgres.metadata, 'before_create', CreateSchema('datastore_schema'))\n BasePostgres.metadata.create_all(self.engine)\n except OperationalError:\n print(f\"Error creating database({self.db_name})! Quitting\")\n exit()",
"def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User], safe=True)\n DATABASE.close()",
"def init_post_connection(self):\n\n if self.authorized and not self.post_initiated:\n self.create_tables_and_apply_patches()\n self.post_initiated = True\n\n PyFunceble.INTERN[\"mysql\"] = self.__dict__.copy()",
"def initialize():\n \n db.connect()\n db.create_tables([Product], safe=True)",
"def initialize():\n DATABASE.connect()\n DATABASE.drop_tables([Journal], safe=True)\n DATABASE.create_tables([Journal], safe=True)\n DATABASE.close()",
"def initialize():\n\n db.connect() # Se conecta\n db.create_tables([Entry], safe=True) # Crea las tablas\n # safe=true evita crear modelos ya creados",
"def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise",
"def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()",
"def _initial_setup(self):\n logger.info(\"Performing initial database setup...\")\n\n # Set up the migration_version table\n self._execute(\n \"\"\"\n CREATE TABLE migration_version (\n version INTEGER PRIMARY KEY\n )\n \"\"\"\n )\n\n # Initially set the migration version to 0\n self._execute(\n \"\"\"\n INSERT INTO migration_version (\n version\n ) VALUES (?)\n \"\"\",\n (0,),\n )\n\n # Set up any other necessary database tables here\n\n logger.info(\"Database setup complete\")",
"def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)",
"def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")",
"def initialize():\n db.connect()\n db.create_tables([Expense], safe=True)",
"def initialise(self):\n self.set_up()",
"def init_db():\n # We are setting the module variables here for the first time, so disable the warning\n global DB_USER_TABLE # pylint: disable=global-variable-undefined\n global DB_CUSTOMER_TABLE # pylint: disable=global-variable-undefined\n global DB_USER_CUSTOMER_RELS_TABLE # pylint: disable=global-variable-undefined\n global DB_TICKET_TABLE # pylint: disable=global-variable-undefined\n global DB_COMMENT_TABLE # pylint: disable=global-variable-undefined\n\n db = TinyDB(app.config['DB_NAME'])\n\n DB_USER_TABLE = db.table('users')\n DB_CUSTOMER_TABLE = db.table('customers')\n DB_USER_CUSTOMER_RELS_TABLE = db.table('user_customer_rels')\n DB_TICKET_TABLE = db.table('tickets')\n DB_COMMENT_TABLE = db.table('comments')",
"def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True",
"def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)",
"def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n self.bucketlist = {'name':'Go to vacation'}\n\n # bind the app to current context\n with self.app.app_context():\n # create all tables\n db.create_all()",
"def initialize_db(self) -> None:\n if not self.check_schema_initialized():\n self._create_genes_table()\n self._create_meta_data_table()",
"def _pre_setup(self):\n apps.clear_cache()\n call_command('migrate', interactive=False, verbosity=0)\n call_command('loaddata', 'initial_data', verbosity=0)\n super(DatatableViewTestCase, self)._pre_setup()",
"def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()",
"def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()",
"def init_tables(database_url, _metadata, checkfirst=True):\n import dpds.storages.db.tables.operations\n import dpds.storages.db.tables.block\n import dpds.storages.db.tables.meta\n with isolated_nullpool_engine(database_url) as engine:\n _metadata.create_all(bind=engine, checkfirst=checkfirst)",
"def _init_db():\n import alembic.config\n import alembic.command\n alembic_cfg = alembic.config.Config('alembic.ini')\n alembic_cfg.attributes['configure_logger'] = False\n alembic.command.upgrade(alembic_cfg, 'head')\n _reset_db(get_test_db_session())",
"def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()"
] | [
"0.66067785",
"0.6455682",
"0.6373156",
"0.63563114",
"0.63563114",
"0.62698793",
"0.6163393",
"0.6154188",
"0.60957396",
"0.60849506",
"0.60809666",
"0.5964424",
"0.5955808",
"0.5944223",
"0.5935643",
"0.5916183",
"0.591556",
"0.5893408",
"0.58916914",
"0.5885537",
"0.58717024",
"0.58418834",
"0.58233",
"0.58160424",
"0.5815057",
"0.5799095",
"0.57769054",
"0.5734957",
"0.5724158",
"0.5717349"
] | 0.73197085 | 0 |
Copied from AccountBroker before the metadata column was added; used for testing with TestAccountBrokerBeforeMetadata. Create account_stat table which is specific to the account DB. | def premetadata_create_account_stat_table(self, conn, put_timestamp):
conn.executescript('''
CREATE TABLE account_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
container_count INTEGER,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO account_stat (container_count) VALUES (0);
''')
conn.execute('''
UPDATE account_stat SET account = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, Timestamp.now().internal, str(uuid4()),
put_timestamp)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pre_track_containers_create_policy_stat(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE policy_stat (\n storage_policy_index INTEGER PRIMARY KEY,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0\n );\n INSERT OR IGNORE INTO policy_stat (\n storage_policy_index, object_count, bytes_used\n )\n SELECT 0, object_count, bytes_used\n FROM account_stat\n WHERE container_count > 0;\n \"\"\")",
"def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()",
"def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):\n if not self.account:\n raise ValueError(\n 'Attempting to create a new database with no account set')\n self.create_container_table(conn)\n self.create_account_stat_table(conn, put_timestamp)",
"def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()",
"async def statinit(client):\n conn = client.bot.dbs[client.server_tag]\n print(('Initializing stat columns in \\'users\\''\n f' in /persist/db/{client.server_tag}.db...'))\n for attr in usr_attributes:\n db.add_column(conn, 'users', attr)\n db.ccache()\n print('User stat initialization complete.')",
"def load_status_table():",
"def _populate_table_status():\n [db_insert_or_get(Status, name=name) for name in app.config['STATUS_DICT'][1:]]\n db.session.commit()",
"def create_table(self):\n pass",
"def build_metadata():\n metadata = sa.MetaData()\n\n sa.Table(\n 'hive_blocks', metadata,\n sa.Column('num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('hash', CHAR(40), nullable=False),\n sa.Column('prev', CHAR(40)),\n sa.Column('txs', SMALLINT, server_default='0', nullable=False),\n sa.Column('ops', SMALLINT, server_default='0', nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('hash', name='hive_blocks_ux1'),\n sa.ForeignKeyConstraint(['prev'], ['hive_blocks.hash'], name='hive_blocks_fk1'),\n )\n\n sa.Table(\n 'hive_accounts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('name', VARCHAR(16), nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n #sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('reputation', sa.Float(precision=6), nullable=False, server_default='25'),\n\n sa.Column('display_name', sa.String(20)),\n sa.Column('about', sa.String(160)),\n sa.Column('location', sa.String(30)),\n sa.Column('website', sa.String(100)),\n sa.Column('profile_image', sa.String(1024), nullable=False, server_default=''),\n sa.Column('cover_image', sa.String(1024), nullable=False, server_default=''),\n\n sa.Column('followers', sa.Integer, nullable=False, server_default='0'),\n sa.Column('following', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('proxy', VARCHAR(16), nullable=False, server_default=''),\n sa.Column('post_count', sa.Integer, nullable=False, server_default='0'),\n sa.Column('proxy_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('vote_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('kb_used', sa.Integer, nullable=False, server_default='0'), # deprecated\n sa.Column('rank', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('lastread_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('active_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('cached_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('raw_json', sa.Text),\n\n\n sa.UniqueConstraint('name', name='hive_accounts_ux1'),\n sa.Index('hive_accounts_ix1', 'vote_weight', 'id'), # core: quick ranks\n sa.Index('hive_accounts_ix2', 'name', 'id'), # core: quick id map\n sa.Index('hive_accounts_ix3', 'vote_weight', 'name', postgresql_ops=dict(name='varchar_pattern_ops')), # API: lookup\n sa.Index('hive_accounts_ix4', 'id', 'name'), # API: quick filter/sort\n sa.Index('hive_accounts_ix5', 'cached_at', 'name'), # core/listen sweep\n )\n\n sa.Table(\n 'hive_posts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('parent_id', sa.Integer),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.Column('depth', SMALLINT, nullable=False),\n sa.Column('is_deleted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_pinned', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_muted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_valid', BOOLEAN, nullable=False, server_default='1'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n\n sa.ForeignKeyConstraint(['author'], ['hive_accounts.name'], name='hive_posts_fk1'),\n sa.ForeignKeyConstraint(['parent_id'], ['hive_posts.id'], name='hive_posts_fk3'),\n sa.UniqueConstraint('author', 'permlink', name='hive_posts_ux1'),\n sa.Index('hive_posts_ix3', 'author', 'depth', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: author blog/comments\n sa.Index('hive_posts_ix4', 'parent_id', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: fetching children\n sa.Index('hive_posts_ix5', 'id', postgresql_where=sql_text(\"is_pinned = '1' AND is_deleted = '0'\")), # API: pinned post status\n sa.Index('hive_posts_ix6', 'community_id', 'id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_pinned = '1' AND is_deleted = '0'\")), # API: community pinned\n )\n\n sa.Table(\n 'hive_post_tags', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('tag', sa.String(32), nullable=False),\n sa.UniqueConstraint('tag', 'post_id', name='hive_post_tags_ux1'), # core\n sa.Index('hive_post_tags_ix1', 'post_id'), # core\n )\n\n sa.Table(\n 'hive_follows', metadata,\n sa.Column('follower', sa.Integer, nullable=False),\n sa.Column('following', sa.Integer, nullable=False),\n sa.Column('state', SMALLINT, nullable=False, server_default='1'),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('following', 'follower', name='hive_follows_ux3'), # core\n sa.Index('hive_follows_ix5a', 'following', 'state', 'created_at', 'follower'),\n sa.Index('hive_follows_ix5b', 'follower', 'state', 'created_at', 'following'),\n )\n\n sa.Table(\n 'hive_reblogs', metadata,\n sa.Column('account', VARCHAR(16), nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.ForeignKeyConstraint(['account'], ['hive_accounts.name'], name='hive_reblogs_fk1'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_reblogs_fk2'),\n sa.UniqueConstraint('account', 'post_id', name='hive_reblogs_ux1'), # core\n sa.Index('hive_reblogs_ix1', 'post_id', 'account', 'created_at'), # API -- not yet used\n )\n\n sa.Table(\n 'hive_payments', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('tx_idx', SMALLINT, nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('from_account', sa.Integer, nullable=False),\n sa.Column('to_account', sa.Integer, nullable=False),\n sa.Column('amount', sa.types.DECIMAL(10, 3), nullable=False),\n sa.Column('token', VARCHAR(5), nullable=False),\n\n sa.ForeignKeyConstraint(['from_account'], ['hive_accounts.id'], name='hive_payments_fk1'),\n sa.ForeignKeyConstraint(['to_account'], ['hive_accounts.id'], name='hive_payments_fk2'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_payments_fk3'),\n )\n\n sa.Table(\n 'hive_feed_cache', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('account_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.UniqueConstraint('post_id', 'account_id', name='hive_feed_cache_ux1'), # core\n sa.Index('hive_feed_cache_ix1', 'account_id', 'post_id', 'created_at'), # API (and rebuild?)\n )\n\n sa.Table(\n 'hive_posts_cache', metadata,\n sa.Column('post_id', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n\n # important/index\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('depth', SMALLINT, nullable=False, server_default='0'),\n sa.Column('children', SMALLINT, nullable=False, server_default='0'),\n\n # basic/extended-stats\n sa.Column('author_rep', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('flag_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('total_votes', sa.Integer, nullable=False, server_default='0'),\n sa.Column('up_votes', sa.Integer, nullable=False, server_default='0'),\n\n # basic ui fields\n sa.Column('title', sa.String(255), nullable=False, server_default=''),\n sa.Column('preview', sa.String(1024), nullable=False, server_default=''),\n sa.Column('img_url', sa.String(1024), nullable=False, server_default=''),\n\n # core stats/indexes\n sa.Column('payout', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('created_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('payout_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('updated_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('is_paidout', BOOLEAN, nullable=False, server_default='0'),\n\n # ui flags/filters\n sa.Column('is_nsfw', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_declined', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_full_power', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_hidden', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_grayed', BOOLEAN, nullable=False, server_default='0'),\n\n # important indexes\n sa.Column('rshares', sa.BigInteger, nullable=False, server_default='0'),\n sa.Column('sc_trend', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('sc_hot', sa.Float(precision=6), nullable=False, server_default='0'),\n\n # bulk data\n sa.Column('body', TEXT),\n sa.Column('votes', TEXT),\n sa.Column('json', sa.Text),\n sa.Column('raw_json', sa.Text),\n\n # index: misc\n sa.Index('hive_posts_cache_ix3', 'payout_at', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # core: payout sweep\n sa.Index('hive_posts_cache_ix8', 'category', 'payout', 'depth', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: tag stats\n\n # index: ranked posts\n sa.Index('hive_posts_cache_ix2', 'promoted', postgresql_where=sql_text(\"is_paidout = '0' AND promoted > 0\")), # API: promoted\n\n sa.Index('hive_posts_cache_ix6a', 'sc_trend', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending todo: depth=0\n sa.Index('hive_posts_cache_ix7a', 'sc_hot', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot todo: depth=0\n sa.Index('hive_posts_cache_ix6b', 'post_id', 'sc_trend', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending, filtered todo: depth=0\n sa.Index('hive_posts_cache_ix7b', 'post_id', 'sc_hot', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot, filtered todo: depth=0\n\n sa.Index('hive_posts_cache_ix9a', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout todo: rem depth\n sa.Index('hive_posts_cache_ix9b', 'category', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout, filtered todo: rem depth\n\n sa.Index('hive_posts_cache_ix10', 'post_id', 'payout', postgresql_where=sql_text(\"is_grayed = '1' AND payout > 0\")), # API: muted, by filter/date/payout\n\n # index: stats\n sa.Index('hive_posts_cache_ix20', 'community_id', 'author', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: pending distribution; author payout\n\n # index: community ranked posts\n sa.Index('hive_posts_cache_ix30', 'community_id', 'sc_trend', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community trend\n sa.Index('hive_posts_cache_ix31', 'community_id', 'sc_hot', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community hot\n sa.Index('hive_posts_cache_ix32', 'community_id', 'created_at', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community created\n sa.Index('hive_posts_cache_ix33', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND is_paidout = '0'\")), # API: community payout\n sa.Index('hive_posts_cache_ix34', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '1' AND is_paidout = '0'\")), # API: community muted\n )\n\n sa.Table(\n 'hive_state', metadata,\n sa.Column('block_num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('db_version', sa.Integer, nullable=False),\n sa.Column('steem_per_mvest', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('usd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('sbd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('dgpo', sa.Text, nullable=False),\n )\n\n metadata = build_metadata_community(metadata)\n\n metadata = build_metadata_blacklist(metadata)\n\n metadata = build_trxid_block_num(metadata)\n\n return metadata",
"def create_table(self):\n from deployflag.models.metadata import (\n GridSearchParameter,\n ModelFramework,\n ModelPerformanceMetadata,\n )\n\n with self.connection:\n self.connection.create_tables(\n [ModelPerformanceMetadata, GridSearchParameter, ModelFramework],\n safe=True,\n )",
"def __create_wallets_table(self):\n cmd = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n %s text PRIMARY KEY,\n %s blob,\n %s blob);\"\"\" %(TABLE_WALLETS,\n COL_WALLETS_NAME,\n COL_WALLETS_PUB_KEY,\n COL_WALLETS_PVT_KEY)\n self.__dbcursor.execute(cmd)",
"def create_statistics(self):\n now = datetime.now()\n min_timestamp = Statistic.objects.all().aggregate(Max('timestamp_end'))[\"timestamp_end__max\"]\n max_timestamp = (now + ((datetime.min - now) % timedelta(minutes=60)) - timedelta(minutes=60)).replace(tzinfo=pytz.UTC)\n\n if min_timestamp is None:\n min_timestamp = datetime(2000, 1, 1, tzinfo=timezone('UTC'))\n\n aggregated_measurements = MeasurementService.get_aggregate_measurements(min_timestamp,max_timestamp)\n StatisticService.create_statistics(aggregated_measurements)",
"def _setup_user_bookmark_count(self):\r\n test_date_1 = datetime(2013, 11, 25)\r\n stat1 = factory.make_user_bookmark_count(username=u'admin',\r\n data=20,\r\n tstamp=test_date_1)\r\n test_date_2 = datetime(2013, 11, 15)\r\n stat2 = factory.make_user_bookmark_count(username=u'admin',\r\n data=30,\r\n tstamp=test_date_2)\r\n test_date_3 = datetime(2013, 12, 28)\r\n stat3 = factory.make_user_bookmark_count(username=u'admin',\r\n data=15,\r\n tstamp=test_date_3)\r\n transaction.commit()\r\n return [stat1, stat2, stat3]",
"def generate_cap_table(logger: Logger,\n dbsession: Session,\n token_address: str,\n order_by: str,\n order_direction: str,\n identity_provider: IdentityProvider,\n include_empty: bool,\n TokenScanStatus: type,\n TokenHolderAccount: type,\n no_name=\"<Unknown>\") -> CapTableInfo:\n\n status = dbsession.query(TokenScanStatus).filter_by(address=token_address).one_or_none() # type: TokenScanStatus\n if not status or status.end_block is None:\n raise NeedsTokenScan(\n \"No token {} balances available in the local database. Please run tokfetch token-scan first.\".format(\n token_address))\n\n q = status.get_accounts(include_empty)\n\n results = []\n total_balance = Decimal(0)\n last_token_transfer_at = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)\n for holder in q:\n\n id_check = identity_provider.get_identity(holder.address)\n if id_check:\n name = id_check.name\n else:\n name = no_name\n\n decimal_balance = holder.get_decimal_balance()\n\n entry = CapTableEntry(name, holder.address, decimal_balance, holder.last_block_updated_at)\n\n if entry.updated_at > last_token_transfer_at:\n last_token_transfer_at = entry.updated_at\n results.append(entry)\n\n if decimal_balance > 0: # Ignore cases where we cannot detect mint transaction\n total_balance += decimal_balance\n\n sort_entries(results, order_by, order_direction)\n\n # Retrofit decimal balances after we know the total sum\n if total_balance > 0:\n for r in results:\n r.percent = r.balance / total_balance\n\n info = CapTableInfo(status, last_token_transfer_at, total_balance, results)\n\n return info",
"async def _create_tables_declarative(self, base, engine):\n if hasattr(base, 'metadata'):\n base.metadata.create_all(bind=engine, checkfirst=True)\n return",
"def _create_table(self):\n query = f\"\"\"CREATE TABLE IF NOT EXISTS {TABLE}(\n member_Id INT,\n memberName VARCHAR(50),\n amount INT,\n date datetime NOT NULL,\n time datetime NOT NULL,\n status VARCHAR(20) NOT NULL DEFAULT 'Completed'\n );\"\"\"\n\n self.cursor.execute(query)\n self.conn.commit()",
"def describe_account_attributes():\n pass",
"def create_meta_loan_table(self):\n table_exists = self.check_if_table_exists(\"meta_loan_tables\")\n\n if not table_exists:\n self.read_sql_from_file('create_meta_loan_tables.sql')\n return",
"def add_statistics(self, stat_col):\n self.module.add_statistics(stat_col)",
"def create_table(response_json):\n account_table = PrettyTable()\n account_table.field_names = ([\"Account ID\", \"Account Name\"])\n for account in response_json['result']['accounts']:\n account_id = account['accountID']\n account_name = account['username']\n account_table.add_row([account_id, account_name])\n return account_table",
"def init():\n database.create_tables([Tracker])\n database.commit()",
"def create_marker_table(self):\n if self.marker_table is None:\n self.marker_table = luigi.configuration.get_config().get('sqlalchemy', 'marker-table', 'table_updates')\n\n engine = self.engine\n\n with engine.begin() as con:\n metadata = sqlalchemy.MetaData()\n if not con.dialect.has_table(con, self.marker_table):\n self.marker_table_bound = sqlalchemy.Table(\n self.marker_table, metadata,\n sqlalchemy.Column(\"ParquetSource\", sqlalchemy.String(128), primary_key=True),\n sqlalchemy.Column(\"TargetTable\", sqlalchemy.String(128)),\n sqlalchemy.Column(\"Environment\", sqlalchemy.String(128)),\n sqlalchemy.Column(\"BackupDate\", sqlalchemy.DateTime),\n sqlalchemy.Column(\"InsertedDate\", sqlalchemy.DateTime, default=datetime.now()))\n metadata.create_all(engine)\n else:\n metadata.reflect(only=[self.marker_table], bind=engine)\n self.marker_table_bound = metadata.tables[self.marker_table]",
"def __init__(self, config_path: str = \"config.yml\", config_dict: dict = None,\n create_all: bool = True):\n\n # Prepare user_details configured in config.yml for user_details table creation\n self.config = Config(config_path, config_dict)\n user_details_list = []\n if \"twitter_user_details\" in self.config.config:\n for detail, sqldatatype in self.config.config[\"twitter_user_details\"].items():\n if sqldatatype is not None:\n user_details_list.append(detail + \" \" + sqldatatype)\n else:\n print(\"\"\"Key \"twitter_user_details\" could not be found in config.yml. Will not create\n a user_details table.\"\"\")\n\n # Table creation for SQLITE database type.\n # Note and TODO: the collector does not support sqlite (yet)\n if self.config.dbtype.lower() == \"sqlite\":\n try:\n self.engine = lite.connect(self.config.dbname + \".db\")\n print(\"Connected to \" + self.config.dbname + \"!\")\n except Error as e:\n raise e\n if create_all:\n try:\n create_friends_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS friends (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n burned TINYINT NOT NULL,\n timestamp DATETIME DEFAULT CURRENT_TIMESTAMP\n );\"\"\"\n create_friends_index_sql_1 = \"CREATE INDEX iFSource ON friends(source);\"\n create_friends_index_sql_2 = \"CREATE INDEX iFTimestamp ON friends(timestamp);\"\n create_results_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS result (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n timestamp DATETIME DEFAULT CURRENT_TIMESTAMP\n );\"\"\"\n create_results_index_sql_1 = \"CREATE INDEX iRSource ON result(source);\"\n create_results_index_sql_2 = \"CREATE INDEX iRTimestamp ON result(timestamp);\"\n c = self.engine.cursor()\n c.execute(create_friends_table_sql)\n c.execute(create_friends_index_sql_1)\n c.execute(create_friends_index_sql_2)\n c.execute(create_results_table_sql)\n c.execute(create_results_index_sql_1)\n c.execute(create_results_index_sql_2)\n if user_details_list != []:\n create_user_details_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS user_details\n (\"\"\" + \", \".join(user_details_list) + \"\"\",\n timestamp DATETIME DEFAULT CURRENT_TIMESTAMP);\"\"\"\n create_ud_index = \"CREATE INDEX iUTimestamp ON user_details(timestamp)\"\n c.execute(create_user_details_sql)\n c.execute(create_ud_index)\n else:\n # TODO: Make this a minimal user_details table?\n print(\"\"\"No user_details configured in config.yml. Will not create a\n user_details table.\"\"\")\n except Error as e:\n print(e)\n\n # Table creation for mysql database type\n elif self.config.dbtype.lower() == \"mysql\":\n try:\n self.engine = create_engine(\n f'mysql+pymysql://{self.config.dbuser}:'\n f'{self.config.dbpwd}@{self.config.dbhost}/{self.config.dbname}'\n )\n print('Connected to database \"' + self.config.dbname + '\" via mySQL!')\n except OperationalError as e:\n raise e\n if create_all:\n try:\n create_friends_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS friends (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n burned TINYINT NOT NULL,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n ON UPDATE CURRENT_TIMESTAMP,\n UNIQUE INDEX fedge (source, target),\n INDEX(timestamp)\n );\"\"\"\n create_results_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS result (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n UNIQUE INDEX redge (source, target),\n INDEX(timestamp)\n );\"\"\"\n self.engine.execute(create_friends_table_sql)\n self.engine.execute(create_results_table_sql)\n if user_details_list != []:\n create_user_details_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS user_details\n (\"\"\" + \", \".join(user_details_list) + \"\"\", timestamp TIMESTAMP\n DEFAULT CURRENT_TIMESTAMP,\n INDEX(timestamp));\"\"\"\n self.engine.execute(create_user_details_sql)\n else:\n print(\"\"\"No user_details configured in config.yml. Will not create a\n user_details table.\"\"\")\n except OperationalError as e:\n raise e",
"def _populate_new_notifications_imei_table(self, conn):\n with CodeProfiler() as cp:\n tblname = self._notifications_imei_new_tblname\n num_records = self._populate_new_blacklist_or_notifications_imei_table(conn, tblname, is_blacklist=False)\n\n return num_records, cp.duration",
"def createTable(self):\n results = self.db.table_create(self.entity).run(self.r)\n time.sleep(5)\n return results",
"def get_table(base, engine):\n class w1_temp_table(base):\n __tablename__ = 'w1_temp'\n __table_args__ = {\"useexisting\": True}\n\n id = sa.Column(sa.types.Integer, primary_key=True)\n logger_id = sa.Column(sa.types.Integer)\n value = sa.Column(sa.types.String)\n datetime = sa.Column(sa.types.DateTime)\n return w1_temp_table",
"def populate_stat(self, table):\n myrow = table.row\n # HDF5 doesn't handle unicode strings, so we need to convert to \n # *byte* strings, which we can put in the HDF5 file \n addy = numpy.zeros(len(self.address), \n dtype=(numpy.str, glob.nchar_address))\n for i in range(len(addy)):\n addy[i] = (self.address[i]).encode('utf8')\n\n myrow[\"address\"] = addy\n myrow[\"bike_stands\"] = self.bike_stands\n myrow[\"number\"] = self.number\n myrow[\"position\"] = self.position\n myrow.append()\n table.flush()",
"def accounts():",
"def __create(self):\n cursor = self.conn.cursor()\n sql = 'CREATE TABLE IF NOT EXISTS speedlogs ' + \\\n '(id INTEGER primary key, measure_dt TIMESTAMP, ping REAL, download REAL, upload REAL)'\n cursor.execute(sql)\n self.conn.commit()",
"async def create_sys_tables(self) -> None:\n await self.conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tinymud_migrations (\n table_name TEXT,\n level INTEGER\n )\"\"\")"
] | [
"0.6411355",
"0.5693446",
"0.5648235",
"0.5506348",
"0.5451028",
"0.53575075",
"0.53320557",
"0.52726936",
"0.5267531",
"0.5266184",
"0.5225775",
"0.5171474",
"0.5141938",
"0.51097775",
"0.5103295",
"0.5092772",
"0.50799894",
"0.5075006",
"0.5058689",
"0.5051445",
"0.5033744",
"0.49713793",
"0.49707794",
"0.4965065",
"0.49502057",
"0.49417546",
"0.49387902",
"0.49206743",
"0.49206284",
"0.49006578"
] | 0.76754844 | 0 |
Copied from AccountBroker before the sstoage_policy_index column was added; used for testing with TestAccountBrokerBeforeSPI. Create container table which is specific to the account DB. | def prespi_create_container_table(self, conn):
conn.executescript("""
CREATE TABLE container (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
put_timestamp TEXT,
delete_timestamp TEXT,
object_count INTEGER,
bytes_used INTEGER,
deleted INTEGER DEFAULT 0
);
CREATE INDEX ix_container_deleted_name ON
container (deleted, name);
CREATE TRIGGER container_insert AFTER INSERT ON container
BEGIN
UPDATE account_stat
SET container_count = container_count + (1 - new.deleted),
object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used,
hash = chexor(hash, new.name,
new.put_timestamp || '-' ||
new.delete_timestamp || '-' ||
new.object_count || '-' || new.bytes_used);
END;
CREATE TRIGGER container_update BEFORE UPDATE ON container
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER container_delete AFTER DELETE ON container
BEGIN
UPDATE account_stat
SET container_count = container_count - (1 - old.deleted),
object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used,
hash = chexor(hash, old.name,
old.put_timestamp || '-' ||
old.delete_timestamp || '-' ||
old.object_count || '-' || old.bytes_used);
END;
""") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pre_track_containers_create_container_table(self, conn):\n # revert to old trigger script to support one of the tests\n OLD_POLICY_STAT_TRIGGER_SCRIPT = \"\"\"\n CREATE TRIGGER container_insert_ps AFTER INSERT ON container\n BEGIN\n INSERT OR IGNORE INTO policy_stat\n (storage_policy_index, object_count, bytes_used)\n VALUES (new.storage_policy_index, 0, 0);\n UPDATE policy_stat\n SET object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used\n WHERE storage_policy_index = new.storage_policy_index;\n END;\n CREATE TRIGGER container_delete_ps AFTER DELETE ON container\n BEGIN\n UPDATE policy_stat\n SET object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used\n WHERE storage_policy_index = old.storage_policy_index;\n END;\n\n \"\"\"\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0,\n storage_policy_index INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\" + OLD_POLICY_STAT_TRIGGER_SCRIPT)",
"def pre_track_containers_create_policy_stat(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE policy_stat (\n storage_policy_index INTEGER PRIMARY KEY,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0\n );\n INSERT OR IGNORE INTO policy_stat (\n storage_policy_index, object_count, bytes_used\n )\n SELECT 0, object_count, bytes_used\n FROM account_stat\n WHERE container_count > 0;\n \"\"\")",
"def create_table(self):\n pass",
"def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise",
"def _create_intermediate_new_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._blocking_conditions_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n cond_name TEXT NOT NULL,\n reason TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._mnc_mcc_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n mcc_mnc_pattern TEXT NOT NULL,\n operator_id TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._notifications_imei_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_triplets_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n fallback_operators TEXT[]\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._pairings_imei_imsi_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY RANGE (virt_imei_shard) \"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True, fillfactor=45)\n table_names.append(tblname)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration",
"def imp_create_tables():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n # Drop the tables (uncomment if necessary)\n #drop_tables(cur, conn)\n\n # Create the tables\n create_tables(cur, conn)\n\n conn.close()",
"def create_tables (cls, env=os.environ):\n\n cur = cls.pri_table_read_cursor (env=env)\n cur.execute ('SPECIALCASE gettablelist')\n ret = cur.fetchall ()\n \n existingtables = set ([x[0].lower() for x in ret])\n\n for tabname in (set (cls.table_desc.keys ()) - existingtables):\n sql, lsd = cls.table_desc[tabname]\n epls, desls, sqlprefix = lsd.get_create_labeling (savels=True)\n\n conn = get_labeled_conn (epls, desls)\n cur = conn.cursor ()\n cur.execute (sql)\n conn.close ()\n lsd.pop_labelset ()\n\n \n import psycopg2\n for sql in cls.sql_createindex:\n conn = get_labeled_conn ()\n cur = conn.cursor ()\n # XXX It would be better to check which indices exist as we do for tables.\n try:\n cur.execute (sql)\n except psycopg2.ProgrammingError, e: \n pass\n conn.close ()",
"def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")",
"def create_base_table(self, table_name):\n print('new')\n # Create table at first.\n select_stm = self.construct_base_table()\n exec_query('DROP TABLE IF EXISTS %s;' % table_name) \n sql = \"\"\"\n CREATE TABLE %s AS\n %s\n \"\"\" % (table_name, select_stm)\n exec_query(sql)",
"def create_table(self):\n from deployflag.models.metadata import (\n GridSearchParameter,\n ModelFramework,\n ModelPerformanceMetadata,\n )\n\n with self.connection:\n self.connection.create_tables(\n [ModelPerformanceMetadata, GridSearchParameter, ModelFramework],\n safe=True,\n )",
"def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):\n if not self.account:\n raise ValueError(\n 'Attempting to create a new database with no account set')\n self.create_container_table(conn)\n self.create_account_stat_table(conn, put_timestamp)",
"def _create_tables():\n from Model.DataAccessor.DbAccessor.DbOrmAccessor import db\n db.create_tables([SubjectType, SubjectRegion, Subject])",
"def create_table(user_id: int, jap_event_id: int) -> Table:\n table = Table(emperor=user_id,\n jap_event_id=jap_event_id,\n status=0)\n\n member = User.query.filter(User.id.__eq__(user_id)).first()\n table.members.append(member)\n\n db.session.add(table)\n db.session.commit()\n\n table_id = table.id\n command = CommandService.create_command(1, table_id)\n table.current_command_id = command.id\n\n db.session.add(table, command)\n db.session.commit()\n return table",
"def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()",
"def __create_wallets_table(self):\n cmd = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n %s text PRIMARY KEY,\n %s blob,\n %s blob);\"\"\" %(TABLE_WALLETS,\n COL_WALLETS_NAME,\n COL_WALLETS_PUB_KEY,\n COL_WALLETS_PVT_KEY)\n self.__dbcursor.execute(cmd)",
"def create_database():\n DB_NAME = 'cloud_storage.db'\n DB_DIRECTORY = 'server_side_storage/'\n db = sqlite3.connect('{}/{}'.format(DB_DIRECTORY, DB_NAME))\n cursor = db.cursor()\n cursor.execute('''CREATE TABLE user_ids\n (row_id INTEGER PRIMARY KEY AUTOINCREMENT, uid TEXT, user_table_name TEXT)''')\n db.commit()\n cursor.close()\n db.close()",
"def _create(self):\n with self.pdq:\n c=self.pdq.cursor() \n c.execute('CREATE TABLE pdq (item blob,priority int)')\n c.execute('CREATE INDEX priority_index ON pdq (priority)')",
"def _create_intermediate_delta_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {blacklist_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname),\n blacklist_delta_tbl=sql.Identifier(self._blacklist_tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_delta_tblname\n notifications_delta_tbl = sql.Identifier(self._notifications_lists_tblname)\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {notifications_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n notifications_delta_tbl=notifications_delta_tbl))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_delta_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {exceptions_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n exceptions_delta_tbl=sql.Identifier(self._exceptions_lists_tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_delta_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration",
"def create_database_tables():\n with APP.app_context():\n DB.create_all()",
"def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()",
"def parseToDb(self):\n self.cursor.execute('''DROP TABLE IF EXISTS policy''')\n self.cursor.execute('''CREATE TABLE policy\n (name text, src text, dst text, services text, action INTEGER)''')",
"def test_create_hyperflex_cluster_storage_policy(self):\n pass",
"def create_table(self, conn, create_table_sql):\n try:\n # create a Cursor object and call its .execute() method to perform SQL queries\n c = conn.cursor()\n # execute SQL queries: create a table named card\n c.execute(create_table_sql)\n except Error as e:\n print(e)",
"def create_table(self, schema, table):\n fields = \", \".join([\" \".join(t) for t in zip(self.schemas[schema][table][0], self.schemas[schema][table][1])])\n sql = f'set role {self.write_role}; ' \\\n + f'CREATE TABLE IF NOT EXISTS {schema}.{table} ( {fields} );'\n return sql",
"def create_dataBase(conn, create_cmd):\n if conn:\n cursor = conn.cursor()\n cursor.execute(create_cmd)\n conn.commit()\n #print '[sql management] Table Created...'",
"def ensure_schema(client, table_name):\n query = ''.join([\n 'CREATE TABLE {cf} ',\n '(\"lockId\" ascii, \"claimId\" timeuuid, PRIMARY KEY(\"lockId\", \"claimId\"));'])\n\n def errback(failure):\n failure.trap(InvalidRequestException)\n\n return client.execute(query.format(cf=table_name),\n {}, ConsistencyLevel.QUORUM).addErrback(errback)",
"def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()",
"async def create_sys_tables(self) -> None:\n await self.conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tinymud_migrations (\n table_name TEXT,\n level INTEGER\n )\"\"\")",
"def create_container(cls, values):\n dbdriver = get_instance()\n return dbdriver.create_container(values)",
"def init_tables(database_url, _metadata, checkfirst=True):\n import dpds.storages.db.tables.operations\n import dpds.storages.db.tables.block\n import dpds.storages.db.tables.meta\n with isolated_nullpool_engine(database_url) as engine:\n _metadata.create_all(bind=engine, checkfirst=checkfirst)"
] | [
"0.7222646",
"0.66707873",
"0.5947068",
"0.5887795",
"0.58477587",
"0.5694725",
"0.56702393",
"0.56475055",
"0.56451815",
"0.56367904",
"0.5633519",
"0.55940306",
"0.55933654",
"0.55901736",
"0.5587355",
"0.55573237",
"0.555128",
"0.5534241",
"0.55316937",
"0.55145425",
"0.5506047",
"0.54992753",
"0.54602504",
"0.5443343",
"0.5436131",
"0.5422965",
"0.5420758",
"0.5395065",
"0.5394638",
"0.5386561"
] | 0.6937344 | 1 |
Copied from AccountBroker before the container_count column was added. Create policy_stat table which is specific to the account DB. Not a part of Pluggable Backends, internal to the baseline code. | def pre_track_containers_create_policy_stat(self, conn):
conn.executescript("""
CREATE TABLE policy_stat (
storage_policy_index INTEGER PRIMARY KEY,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0
);
INSERT OR IGNORE INTO policy_stat (
storage_policy_index, object_count, bytes_used
)
SELECT 0, object_count, bytes_used
FROM account_stat
WHERE container_count > 0;
""") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pre_track_containers_create_container_table(self, conn):\n # revert to old trigger script to support one of the tests\n OLD_POLICY_STAT_TRIGGER_SCRIPT = \"\"\"\n CREATE TRIGGER container_insert_ps AFTER INSERT ON container\n BEGIN\n INSERT OR IGNORE INTO policy_stat\n (storage_policy_index, object_count, bytes_used)\n VALUES (new.storage_policy_index, 0, 0);\n UPDATE policy_stat\n SET object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used\n WHERE storage_policy_index = new.storage_policy_index;\n END;\n CREATE TRIGGER container_delete_ps AFTER DELETE ON container\n BEGIN\n UPDATE policy_stat\n SET object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used\n WHERE storage_policy_index = old.storage_policy_index;\n END;\n\n \"\"\"\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0,\n storage_policy_index INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\" + OLD_POLICY_STAT_TRIGGER_SCRIPT)",
"def premetadata_create_account_stat_table(self, conn, put_timestamp):\n conn.executescript('''\n CREATE TABLE account_stat (\n account TEXT,\n created_at TEXT,\n put_timestamp TEXT DEFAULT '0',\n delete_timestamp TEXT DEFAULT '0',\n container_count INTEGER,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0,\n hash TEXT default '00000000000000000000000000000000',\n id TEXT,\n status TEXT DEFAULT '',\n status_changed_at TEXT DEFAULT '0'\n );\n\n INSERT INTO account_stat (container_count) VALUES (0);\n ''')\n\n conn.execute('''\n UPDATE account_stat SET account = ?, created_at = ?, id = ?,\n put_timestamp = ?\n ''', (self.account, Timestamp.now().internal, str(uuid4()),\n put_timestamp))",
"def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):\n if not self.account:\n raise ValueError(\n 'Attempting to create a new database with no account set')\n self.create_container_table(conn)\n self.create_account_stat_table(conn, put_timestamp)",
"def parseToDb(self):\n self.cursor.execute('''DROP TABLE IF EXISTS policy''')\n self.cursor.execute('''CREATE TABLE policy\n (name text, src text, dst text, services text, action INTEGER)''')",
"def prespi_create_container_table(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\")",
"def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()",
"def _update_cardinality(self, c):\n if c.type in STRUCT:\n Log.error(\"not supported\")\n try:\n if c.table == \"meta.columns\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.columns, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.columns),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n return\n if c.table == \"meta.tables\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.tables, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.tables),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"name\": c.name}}\n })\n return\n\n es_index = c.table.split(\".\")[0]\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data={\n \"aggs\": {c.name: _counting_query(c)},\n \"size\": 0\n })\n r = result.aggregations.values()[0]\n count = result.hits.total\n cardinality = coalesce(r.value, r._nested.value, 0 if r.doc_count==0 else None)\n if cardinality == None:\n Log.error(\"logic error\")\n\n query = Data(size=0)\n if cardinality > 1000 or (count >= 30 and cardinality == count) or (count >= 1000 and cardinality / count > 0.99):\n Log.note(\"{{table}}.{{field}} has {{num}} parts\", table=c.table, field=c.es_column, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif c.type in _elasticsearch.ES_NUMERIC_TYPES and cardinality > 30:\n Log.note(\"{{field}} has {{num}} parts\", field=c.name, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif len(c.nested_path) != 1:\n query.aggs[literal_field(c.name)] = {\n \"nested\": {\"path\": c.nested_path[0]},\n \"aggs\": {\"_nested\": {\"terms\": {\"field\": c.es_column, \"size\": 0}}}\n }\n else:\n query.aggs[literal_field(c.name)] = {\"terms\": {\"field\": c.es_column, \"size\": 0}}\n\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data=query)\n\n aggs = result.aggregations.values()[0]\n if aggs._nested:\n parts = jx.sort(aggs._nested.buckets.key)\n else:\n parts = jx.sort(aggs.buckets.key)\n\n Log.note(\"{{field}} has {{parts}}\", field=c.name, parts=parts)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"partitions\": parts,\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n except Exception, e:\n if \"IndexMissingException\" in e and c.table.startswith(TEST_TABLE_PREFIX):\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": 0,\n \"cardinality\": 0,\n \"last_updated\": Date.now()\n },\n \"clear\":[\n \"partitions\"\n ],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n else:\n self.meta.columns.update({\n \"set\": {\n \"last_updated\": Date.now()\n },\n \"clear\": [\n \"count\",\n \"cardinality\",\n \"partitions\",\n ],\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n Log.warning(\"Could not get {{col.table}}.{{col.es_column}} info\", col=c, cause=e)",
"def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')",
"def _setup_user_bookmark_count(self):\r\n test_date_1 = datetime(2013, 11, 25)\r\n stat1 = factory.make_user_bookmark_count(username=u'admin',\r\n data=20,\r\n tstamp=test_date_1)\r\n test_date_2 = datetime(2013, 11, 15)\r\n stat2 = factory.make_user_bookmark_count(username=u'admin',\r\n data=30,\r\n tstamp=test_date_2)\r\n test_date_3 = datetime(2013, 12, 28)\r\n stat3 = factory.make_user_bookmark_count(username=u'admin',\r\n data=15,\r\n tstamp=test_date_3)\r\n transaction.commit()\r\n return [stat1, stat2, stat3]",
"def __get_metrics_adapted(self, policies):\n percent_min = 1 - policies['percent']\n percent_max = 1 + policies['percent']\n metrics = {'cpu_min':percent_min*policies['cpu'], 'cpu_max':percent_max*policies['cpu'],\n 'memory_min':percent_min*policies['ram'], 'memory_max':percent_max*policies['ram'],\n 'disk_min':percent_min*policies['disk'], 'disk_max':percent_max*policies['disk']}\n return metrics",
"def _create(self):\n with self.pdq:\n c=self.pdq.cursor() \n c.execute('CREATE TABLE pdq (item blob,priority int)')\n c.execute('CREATE INDEX priority_index ON pdq (priority)')",
"def _create_intermediate_new_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._blocking_conditions_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n cond_name TEXT NOT NULL,\n reason TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._mnc_mcc_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n mcc_mnc_pattern TEXT NOT NULL,\n operator_id TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._notifications_imei_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_triplets_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n fallback_operators TEXT[]\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._pairings_imei_imsi_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY RANGE (virt_imei_shard) \"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True, fillfactor=45)\n table_names.append(tblname)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration",
"def test_update_hyperflex_cluster_storage_policy(self):\n pass",
"def Attributes(self) -> PolicyStatementAttribute:",
"def _add_policy(self, policy):\n self.by_name[policy.name.upper()] = policy\n self.by_index[int(policy)] = policy",
"def update_policy(self):\n pass",
"def initialize_policies(self, policy_collection, options):",
"def t_announceDbCount(self, *_):\n try: self.current_count=self.dbh.getRowCount()\n except: self.current_count=0\n \n self.dprint(\"* ratings_count: current count(%s)\" % self.current_count)\n self.pub(\"ratings_count\", self.current_count)",
"def curr_policy_time_calc(self):\n for policy in self.policy_list:\n if policy == self.current_policy:\n policy.current_policy_time += 1",
"def derive_newrelic_innodb(self):\n # InnoDB Metrics\n vals = self.get_values([\"status/innodb_pages_created\", \"status/innodb_pages_read\",\n \"status/innodb_pages_written\", \"status/innodb_buffer_pool_read_requests\",\n \"status/innodb_buffer_pool_reads\", \"status/innodb_data_fsyncs\",\n \"status/innodb_os_log_fsyncs\"])\n if vals:\n created, read, written, bp_read_requests, bp_reads, data_fsync, log_fsync = vals\n self.update_metric(\"newrelic/innodb_bp_pages_created\", created)\n self.update_metric(\"newrelic/innodb_bp_pages_read\", read)\n self.update_metric(\"newrelic/innodb_bp_pages_written\", written)\n\n hit_ratio = 0.0\n if (bp_read_requests + bp_reads) > 0:\n hit_ratio = (bp_read_requests / (bp_read_requests + bp_reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_innodb_buffer_pool_hit_ratio\", hit_ratio)\n self.update_metric(\"newrelic/innodb_fsyncs_data\", data_fsync)\n self.update_metric(\"newrelic/innodb_fsyncs_os_log\", log_fsync)\n\n # InnoDB Buffer Metrics\n vals = self.get_values([\"status/innodb_buffer_pool_pages_total\", \"status/innodb_buffer_pool_pages_data\",\n \"status/innodb_buffer_pool_pages_misc\", \"status/innodb_buffer_pool_pages_dirty\",\n \"status/innodb_buffer_pool_pages_free\"])\n if vals:\n pages_total, pages_data, pages_misc, pages_dirty, pages_free = vals\n unassigned = pages_total - pages_data - pages_free - pages_misc\n\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_clean\", pages_data - pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_dirty\", pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_misc\", pages_misc)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_free\", pages_free)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_unassigned\", unassigned)",
"def __create_wallets_table(self):\n cmd = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n %s text PRIMARY KEY,\n %s blob,\n %s blob);\"\"\" %(TABLE_WALLETS,\n COL_WALLETS_NAME,\n COL_WALLETS_PUB_KEY,\n COL_WALLETS_PVT_KEY)\n self.__dbcursor.execute(cmd)",
"def test_counts(pawprint_default_tracker_db_with_table):\n\n tracker = pawprint_default_tracker_db_with_table\n\n # Add a bunch of events\n query = (\n \"\"\"\n INSERT INTO {} (timestamp, user_id, event) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in'),\n ('2016-01-01 12:40', 'bob', 'logged_in'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in'),\n ('2016-01-02 00:00', 'dan', 'logged_in'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in'),\n ('2016-01-05 00:00', 'frank', 'logged_in'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in'),\n ('2016-01-20 00:00', 'hans', 'logged_in'),\n ('2016-02-01 00:00', 'iris', 'logged_in'),\n ('2016-02-01 00:00', 'james', 'logged_in'),\n ('2016-03-01 00:00', 'kelly', 'logged_in'),\n ('2016-03-01 00:00', 'laura', 'logged_in'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in')\n \"\"\"\n ).format(tracker.table)\n\n pd.io.sql.execute(query, tracker.db)\n\n logins_hourly = tracker.count(event=\"logged_in\", resolution=\"hour\")\n logins_daily = tracker.count(event=\"logged_in\")\n logins_weekly = tracker.count(event=\"logged_in\", resolution=\"week\")\n logins_monthly = tracker.count(event=\"logged_in\", resolution=\"month\")\n logins_weekly_left_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", start=datetime(2016, 2, 1)\n )\n logins_weekly_right_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", end=datetime(2016, 2, 1)\n )\n logins_daily_full_range = tracker.count(\n event=\"logged_in\", start=datetime(2016, 1, 15), end=datetime(2016, 2, 15)\n )\n\n # Hourly\n assert len(logins_hourly) == 8\n assert np.all(logins_hourly[\"count\"].values == [2, 1, 2, 1, 1, 1, 2, 2])\n\n # Daily\n assert len(logins_daily) == 7\n assert np.all(logins_daily[\"count\"].values == [3, 2, 1, 1, 1, 2, 2])\n\n # Weekly\n assert len(logins_weekly) == 5\n assert np.all(logins_weekly[\"count\"].values == [5, 2, 1, 2, 2])\n\n # Others\n assert len(logins_monthly) == 3\n assert len(logins_weekly_left_range) == 2 # weeks start on Monday\n assert len(logins_weekly_right_range) == 4 # and not at the start / end dates provided\n assert len(logins_daily_full_range) == 2",
"def test_setup_db_for_use_retention_creation(self):\n\n expected_retention = {\n 'name': 'testRetention',\n 'duration': '1h0m0s',\n 'shardGroupDuration': '1h0m0s',\n 'replicaN': 1,\n 'default': True\n }\n assert expected_retention in self.test_client.get_list_retention_policies(\n )",
"def test_create_hyperflex_cluster_storage_policy(self):\n pass",
"def PolicyStatement(self) -> PolicyStatement:",
"def test_create_cluster_policy(self):\n pass",
"def _create_intermediate_delta_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {blacklist_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname),\n blacklist_delta_tbl=sql.Identifier(self._blacklist_tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_delta_tblname\n notifications_delta_tbl = sql.Identifier(self._notifications_lists_tblname)\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {notifications_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n notifications_delta_tbl=notifications_delta_tbl))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_delta_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {exceptions_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n exceptions_delta_tbl=sql.Identifier(self._exceptions_lists_tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_delta_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration",
"def get_counters(table_id):\n fields = [\"0\"]*BUCKET_NUM\n\n for pos, cntr_list in counter_bucket_dict.items():\n for counter_name in cntr_list:\n full_table_id = COUNTER_TABLE_PREFIX + table_id\n counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name)\n if counter_data is None:\n fields[pos] = STATUS_NA\n elif fields[pos] != STATUS_NA:\n fields[pos] = str(int(fields[pos]) + int(counter_data))\n\n cntr = NStats._make(fields)\n return cntr",
"def test_create_namespaced_policy(self):\n pass",
"def _populate_blocking_conditions_table(self, conn):\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blocking_conditions_new_tblname\n execute_values(cursor,\n sql.SQL(\"\"\"INSERT INTO {0}(cond_name, reason)\n VALUES %s\"\"\").format(sql.Identifier(tblname)).as_string(cursor),\n [(c.label, c.reason) for c in self._blocking_conditions])\n self._add_pk(conn, tblname=tblname, pk_columns=['cond_name'])\n self._analyze_helper(cursor, tblname)\n\n # Need to get table count since execute_values doesn't retain insert count\n num_records = self._get_total_record_count(conn, tblname)\n return num_records, cp.duration"
] | [
"0.6394596",
"0.6312397",
"0.52926415",
"0.5226492",
"0.521135",
"0.5126524",
"0.50752866",
"0.50496316",
"0.49747297",
"0.49588305",
"0.4934026",
"0.48855892",
"0.48768532",
"0.4860666",
"0.48166457",
"0.4806398",
"0.4796792",
"0.4787185",
"0.477994",
"0.47572404",
"0.47571683",
"0.4751286",
"0.47465718",
"0.4733808",
"0.47229534",
"0.46898544",
"0.46874636",
"0.46838748",
"0.46799147",
"0.4676652"
] | 0.8145411 | 0 |
Copied from AccountBroker before the container_count column was added (using old stat trigger script) Create container table which is specific to the account DB. | def pre_track_containers_create_container_table(self, conn):
# revert to old trigger script to support one of the tests
OLD_POLICY_STAT_TRIGGER_SCRIPT = """
CREATE TRIGGER container_insert_ps AFTER INSERT ON container
BEGIN
INSERT OR IGNORE INTO policy_stat
(storage_policy_index, object_count, bytes_used)
VALUES (new.storage_policy_index, 0, 0);
UPDATE policy_stat
SET object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used
WHERE storage_policy_index = new.storage_policy_index;
END;
CREATE TRIGGER container_delete_ps AFTER DELETE ON container
BEGIN
UPDATE policy_stat
SET object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used
WHERE storage_policy_index = old.storage_policy_index;
END;
"""
conn.executescript("""
CREATE TABLE container (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
put_timestamp TEXT,
delete_timestamp TEXT,
object_count INTEGER,
bytes_used INTEGER,
deleted INTEGER DEFAULT 0,
storage_policy_index INTEGER DEFAULT 0
);
CREATE INDEX ix_container_deleted_name ON
container (deleted, name);
CREATE TRIGGER container_insert AFTER INSERT ON container
BEGIN
UPDATE account_stat
SET container_count = container_count + (1 - new.deleted),
object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used,
hash = chexor(hash, new.name,
new.put_timestamp || '-' ||
new.delete_timestamp || '-' ||
new.object_count || '-' || new.bytes_used);
END;
CREATE TRIGGER container_update BEFORE UPDATE ON container
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER container_delete AFTER DELETE ON container
BEGIN
UPDATE account_stat
SET container_count = container_count - (1 - old.deleted),
object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used,
hash = chexor(hash, old.name,
old.put_timestamp || '-' ||
old.delete_timestamp || '-' ||
old.object_count || '-' || old.bytes_used);
END;
""" + OLD_POLICY_STAT_TRIGGER_SCRIPT) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prespi_create_container_table(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\")",
"def pre_track_containers_create_policy_stat(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE policy_stat (\n storage_policy_index INTEGER PRIMARY KEY,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0\n );\n INSERT OR IGNORE INTO policy_stat (\n storage_policy_index, object_count, bytes_used\n )\n SELECT 0, object_count, bytes_used\n FROM account_stat\n WHERE container_count > 0;\n \"\"\")",
"def create_dataBase(conn, create_cmd):\n if conn:\n cursor = conn.cursor()\n cursor.execute(create_cmd)\n conn.commit()\n #print '[sql management] Table Created...'",
"def create_table(self):\n pass",
"def create_base_table(self, table_name):\n print('new')\n # Create table at first.\n select_stm = self.construct_base_table()\n exec_query('DROP TABLE IF EXISTS %s;' % table_name) \n sql = \"\"\"\n CREATE TABLE %s AS\n %s\n \"\"\" % (table_name, select_stm)\n exec_query(sql)",
"def imp_create_tables():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n # Drop the tables (uncomment if necessary)\n #drop_tables(cur, conn)\n\n # Create the tables\n create_tables(cur, conn)\n\n conn.close()",
"def premetadata_create_account_stat_table(self, conn, put_timestamp):\n conn.executescript('''\n CREATE TABLE account_stat (\n account TEXT,\n created_at TEXT,\n put_timestamp TEXT DEFAULT '0',\n delete_timestamp TEXT DEFAULT '0',\n container_count INTEGER,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0,\n hash TEXT default '00000000000000000000000000000000',\n id TEXT,\n status TEXT DEFAULT '',\n status_changed_at TEXT DEFAULT '0'\n );\n\n INSERT INTO account_stat (container_count) VALUES (0);\n ''')\n\n conn.execute('''\n UPDATE account_stat SET account = ?, created_at = ?, id = ?,\n put_timestamp = ?\n ''', (self.account, Timestamp.now().internal, str(uuid4()),\n put_timestamp))",
"def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()",
"def _create_intermediate_new_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._blocking_conditions_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n cond_name TEXT NOT NULL,\n reason TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._mnc_mcc_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n mcc_mnc_pattern TEXT NOT NULL,\n operator_id TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._notifications_imei_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_triplets_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n fallback_operators TEXT[]\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._pairings_imei_imsi_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY RANGE (virt_imei_shard) \"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True, fillfactor=45)\n table_names.append(tblname)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration",
"def __set_container_info(self):\n self.container = \"{}_{}_1\".format(self.build, self.service.lower())\n self.mysql_container = \"{}_{}-mysql_1\".format(self.build, self.service.lower())",
"def create_and_insert_d(connection: DBConnection) -> None:\n print(\"\\n[-] creating table d\", end=\"\")\n connection.execute(\"\"\"\n CREATE TABLE d AS\n SELECT COUNT(DISTINCT did) AS size FROM tfs\n \"\"\")\n print(\"\\r[+] creating table d\")",
"def create_table(self):\n from deployflag.models.metadata import (\n GridSearchParameter,\n ModelFramework,\n ModelPerformanceMetadata,\n )\n\n with self.connection:\n self.connection.create_tables(\n [ModelPerformanceMetadata, GridSearchParameter, ModelFramework],\n safe=True,\n )",
"def _create_table(self) :\n\n cur = self.con.cursor()\n delete_sql = 'DROP TABLE IF EXISTS \"%s\"' % self.name\n cur.execute(delete_sql)\n\n col_sql = ','.join(['\"%s\" %s' % (self.cols[i], self.types[i])\n for i in range(len(self.cols))])\n create_sql = 'CREATE TABLE \"%s\" ( %s );' % (self.name, col_sql)\n cur.execute(create_sql)",
"def create_and_insert_dls(connection: DBConnection) -> None:\n print(\"\\n[-] creating table dls\", end=\"\")\n connection.execute(\"\"\"\n CREATE TABLE dls AS\n SELECT did, SUM(tf) AS len FROM tfs GROUP BY did\n \"\"\")\n print(\"\\r[+] creating table dls\")",
"def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()",
"def bd_createTable(self, _c):\n\n _c.execute('CREATE TABLE IF NOT EXISTS package (id TEXT, num INT, desc TEXT, status TEXT, source_env TEXT, dest_env TEXT, app TEXT, last_rev TEXT)')",
"def _create_intermediate_delta_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {blacklist_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname),\n blacklist_delta_tbl=sql.Identifier(self._blacklist_tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_delta_tblname\n notifications_delta_tbl = sql.Identifier(self._notifications_lists_tblname)\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {notifications_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n notifications_delta_tbl=notifications_delta_tbl))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_delta_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {exceptions_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n exceptions_delta_tbl=sql.Identifier(self._exceptions_lists_tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_delta_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration",
"def create_tables (cls, env=os.environ):\n\n cur = cls.pri_table_read_cursor (env=env)\n cur.execute ('SPECIALCASE gettablelist')\n ret = cur.fetchall ()\n \n existingtables = set ([x[0].lower() for x in ret])\n\n for tabname in (set (cls.table_desc.keys ()) - existingtables):\n sql, lsd = cls.table_desc[tabname]\n epls, desls, sqlprefix = lsd.get_create_labeling (savels=True)\n\n conn = get_labeled_conn (epls, desls)\n cur = conn.cursor ()\n cur.execute (sql)\n conn.close ()\n lsd.pop_labelset ()\n\n \n import psycopg2\n for sql in cls.sql_createindex:\n conn = get_labeled_conn ()\n cur = conn.cursor ()\n # XXX It would be better to check which indices exist as we do for tables.\n try:\n cur.execute (sql)\n except psycopg2.ProgrammingError, e: \n pass\n conn.close ()",
"def create_database_tables():\n with APP.app_context():\n DB.create_all()",
"def create_tables(self):\n con = self.connect()\n cursor = con.cursor()\n queries = self.tables()\n for query in queries:\n cursor.execute(query)\n cursor.close()\n con.commit()\n con.close()",
"def create_all_tables(self):\n pass",
"def _create(self):\n with self.pdq:\n c=self.pdq.cursor() \n c.execute('CREATE TABLE pdq (item blob,priority int)')\n c.execute('CREATE INDEX priority_index ON pdq (priority)')",
"def createTables(self,table=\"all\"):\n auto=\"\"\n\tif self.dbType==\"mysql\":\n\t auto=\"AUTO_INCREMENT\"\n\t \n\ttableName=\"FileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t fileid %s %s PRIMARY KEY, \n\t fileName TEXT,\n\t typeid %s\n\t )\n\t \"\"\"%(tableName,self.long,auto,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"KeyFile\"\n\tif table==\"all\" or table==tableName: \n\t # Drop/create KeyFile table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL, \n\t view VARCHAR(255) NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t keyFileId %s NOT NULL, PRIMARY KEY(graphid,view,run,uid) )\n\t \"\"\"%(tableName,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"RunUID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t run %s NOT NULL,\n\t uid %s )\n\t \"\"\"%(tableName,self.UINT,self.uid)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"MaxMasterID\"\n if table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t masterMaxId %s NOT NULL,\n\t comment TEXT )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Location\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Localtion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t graphid %s NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t locationFileId %s NOT NULL )\n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t query = \"CREATE INDEX LocationGroups ON Location(graphid,run,uid)\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Version\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Version table in SQLDB.EventStoreDB\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t grade VARCHAR(255) NOT NULL, \n\t timeStamp %s NOT NULL, \n\t minRunNumber %s NOT NULL, \n\t maxRunNumber %s NOT NULL, \n\t graphid %s NOT NULL,\n\t state VARCHAR(10) ) \n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersion\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t svName VARCHAR(255) NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersionComment\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersionComment table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s NOT NULL PRIMARY KEY,\n\t svid %s NOT NULL,\n\t CommentDate %s,\n\t Comment TEXT )\n\t \"\"\"%(tableName,self.UINT,auto,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"GraphPath\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"PathDepend\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t parentId %s, \n\t childId %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"FileType\"\n if table==\"all\" or table==tableName: \n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s %s PRIMARY KEY, \n\t type VARCHAR(8) NOT NULL,\n\t description TEXT )\n\t \"\"\"%(tableName,self.UINT,auto)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"OrphanFileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s PRIMARY KEY, \n\t dateTime DATETIME,\n\t user VARCHAR(8) NOT NULL )\n\t \"\"\"%(tableName,self.long)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query",
"def create_table(user_id: int, jap_event_id: int) -> Table:\n table = Table(emperor=user_id,\n jap_event_id=jap_event_id,\n status=0)\n\n member = User.query.filter(User.id.__eq__(user_id)).first()\n table.members.append(member)\n\n db.session.add(table)\n db.session.commit()\n\n table_id = table.id\n command = CommandService.create_command(1, table_id)\n table.current_command_id = command.id\n\n db.session.add(table, command)\n db.session.commit()\n return table",
"def createTable(self):\n results = self.db.table_create(self.entity).run(self.r)\n time.sleep(5)\n return results",
"def create_table(self, conn, create_table_sql):\n try:\n # create a Cursor object and call its .execute() method to perform SQL queries\n c = conn.cursor()\n # execute SQL queries: create a table named card\n c.execute(create_table_sql)\n except Error as e:\n print(e)",
"def test_create_container(self):\n pass",
"def create_table(opts, stats):\n print(\"--------------------------------------\")\n print(\"Creating table %s\" % (opts.table_name,))\n print(\"--------------------------------------\")\n print(timestamp())\n create_table_ddl = \"CREATE TABLE %s (\" % (opts.table_name,)\n num_bigint_cols = opts.columns - opts.num_string_columns\n assert(num_bigint_cols > 0)\n for i in range(opts.columns):\n coltype = 'STRING'\n if i < num_bigint_cols: coltype = 'BIGINT'\n if i > 0: create_table_ddl += ', '\n create_table_ddl += \"f%d %s\" % (i, coltype)\n if i == 0: create_table_ddl += ' PRIMARY KEY'\n create_table_ddl += \") PARTITION BY HASH(f0) PARTITIONS %d STORED AS KUDU \" % \\\n (opts.partitions, )\n create_table_ddl += \"TBLPROPERTIES ('kudu.num_tablet_replicas' = '%d')\" % \\\n (opts.replication_factor, )\n\n cmd = 'echo \"%s\" | impala-shell -i %s -f -' % (create_table_ddl, opts.impalad_address)\n run_command(opts, cmd)",
"def create_tables(self):\n for query in table_create_sql:\n self.cursor.execute(query)\n\n self.commit()",
"def create_layers_table():\n\n table_name = f\"{BQ_LAYERS_TABLE}\""
] | [
"0.74409187",
"0.7198487",
"0.61117744",
"0.59451663",
"0.5910709",
"0.58744675",
"0.5785568",
"0.57599217",
"0.5757854",
"0.5751283",
"0.5730389",
"0.5673858",
"0.56548923",
"0.5640886",
"0.5640633",
"0.56349385",
"0.56337035",
"0.56274104",
"0.5623988",
"0.5606793",
"0.5597963",
"0.5573337",
"0.5571024",
"0.5565667",
"0.55620784",
"0.55570585",
"0.554801",
"0.55396",
"0.5525229",
"0.5510787"
] | 0.75585115 | 0 |
Compute the class count of ROIs for each sample. | def count_classes(self, index=None):
if index is None:
index = np.arange(self.Samples.shape[0])
elif isinstance(index, int):
index = [index]
count = np.zeros((len(index), len(self._classes)), dtype=np.int)
for _ind in range(len(index)):
rois = self.__getrois__(index[_ind])
count[_ind, :] = np.bincount(rois[:,4].astype(np.int),
minlength=len(self._classes))
return count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_num_classes(self):",
"def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter",
"def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts",
"def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts",
"def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts",
"def count_classes(labels):\n class_dict = {}\n for image in labels:\n for row in image:\n for label in row:\n if label not in class_dict:\n class_dict[label] = 1\n else:\n class_dict[label] += 1\n return class_dict",
"def get_roi_counts(self):\n counts = [[roi.counts for roi in group.rois] for group in self.roi_groups]\n return counts",
"def get_class_count(df):\r\n \r\n return df[\"class\"].value_counts()",
"def computeNumClass(self):\n # Get the number of data\n n = len(self.data)\n # For IQR\n # First, compute the position of the first and third quartile\n fQPos = ( (n - 1) / 4 ) + 1\n tQPos = ( (3 * (n - 1)) / 4 ) + 1\n # Get the quartiles\n firstQ = 0.0\n thirdQ = 0.0\n if fQPos == round(fQPos):\n firstQ = self.data[int(fQPos)]\n else:\n up = round(fQPos)\n firstQ = self.data[up - 1] + ((self.data[up] - self.data[up - 1]) / 4.0)\n if tQPos == round(tQPos):\n thirdQ = self.data[int(tQPos)]\n else:\n up = round(tQPos)\n thirdQ = self.data[up - 1] + (3 * (self.data[up] - self.data[up - 1]) / 4.0)\n # Compute the IQR\n IQR = thirdQ - firstQ\n # Compute the number of classes and its length\n self.numBins = int(2 * IQR * m.pow(n, -1/3))\n self.computeBinWidth()",
"def num_classes():\n return NUM_CLASSES",
"def summarize_classes(classes):\n u, indices = np.unique(classes,return_inverse=True)\n num_u=len(u)\n print(\"****************************\")\n print(\"Number of samples: {0}\".format(len(classes)))\n print(\"Number of Classes:{0}\".format(num_u))\n for c in u:\n num_c=np.sum(classes==c)\n print(\"Class {0}: {1} Samples\".format(c,num_c))\n print(\"****************************\")",
"def gen_img_counts(img_path, model):\n\n img = transform(Image.open(img_path).convert('RGB'))\n print(type(img))\n output = model(img.unsqueeze(0))\n pred_count = int(output.detach().cpu().sum().numpy())\n return pred_count",
"def classes_calculations(input):\n counts, _ = np.histogram(input, bins=int(\n input.max() + 1), range=(0, int(input.max())))\n return np.nonzero(counts)[0]",
"def class_distribution(y): \n # ===================== PLEASE WRITE HERE =====================\n \n bin_array = np.bincount(y)\n n_class1 = bin_array[1]\n n_class2 = bin_array[2]\n n_class3 = bin_array[3]\n \n # ===================== PLEASE WRITE HERE =====================\n \n print('Number of samples in class_1:', n_class1)\n print('Number of samples in class_2:', n_class2)\n print('Number of samples in class_3:', n_class3)",
"def classesAndFrames(self):\n classes = defaultdict(int)\n with open(self.inputfile) as fin:\n for line in fin:\n arr = line.strip().split()\n y = int(arr[1])\n classes[y] += 1\n return classes",
"def num_classes(self):\n raise NotImplementedError",
"def get_num_classes(df):\n classes = df.groupby('class_label')\n return classes.ngroups",
"def test_class_counts(self):\n oz = ClassificationScoreVisualizer(GaussianNB())\n oz.fit(self.multiclass.X.train, self.multiclass.y.train)\n\n unique, counts = np.unique(self.multiclass.y.train, return_counts=True)\n npt.assert_array_equal(oz.classes_, unique)\n npt.assert_array_equal(oz.class_counts_, counts)",
"def num_classes(self) -> int:\n y = self.data.y\n if y is None:\n return 0\n elif y.numel() == y.size(0) and not torch.is_floating_point(y):\n return int(self.data.y.max()) + 1\n elif y.numel() == y.size(0) and torch.is_floating_point(y):\n return torch.unique(y).numel()\n else:\n return self.data.y.size(-1)",
"def __uniqueCounts(rows):\n results = {} #Initialize a dictionary to store the results\n for row in rows: #Iterate over all rows of data\n #The result is the last column\n r = row[-1]\n if r not in results: results[r] = 0 #Start the count for each class at zero\n results[r] += 1 #Increment the count for this row's class by 1\n return results",
"def _classify(self, sample):\n # This function is used so that we can reduce each row with respect \n # to the sample.\n def calc_dist(vector):\n return distance_utils.euclidean(vector, sample)\n\n distances = self.training_set.reduce_rows(calc_dist)\n \n votes = self._tally_votes(self.training_set.get_labels(), distances)\n \n return collection_utils.get_key_with_highest_value(votes)",
"def sample_count(self):",
"def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])",
"def num_of_classes(self):\n return len(self.classes_())",
"def num_of_classes(self):\n return len(self.classes_())",
"def get_num_cat(sample_by_cat, samples_in_otus):\r\n num_cat = defaultdict(int)\r\n for cat, samples in sample_by_cat.items():\r\n num_samples = len(set(samples_in_otus) & set(samples))\r\n num_cat[cat[0]] += (num_samples * (num_samples - 1)) / 2\r\n return num_cat",
"def num_classes(self):\n\t\treturn 10",
"def count_target_class_data(data, target_class):\n count = 0\n for row in data:\n if row[0] == target_class:\n count += 1\n\n return count",
"def get_class_count(Y_category):\n # Assertions\n assert isinstance(Y_category, np.ndarray), \\\n 'Input must be a numpy ndarray.'\n cls, counts = np.unique(Y_category, return_counts = True)\n cls_counts = dict(zip(cls, counts))\n\n return cls_counts",
"def count(self, cls=None):\n return len(self.all(cls))"
] | [
"0.6769185",
"0.6670084",
"0.6666405",
"0.6666405",
"0.6666405",
"0.6578789",
"0.6500897",
"0.64842236",
"0.6412822",
"0.63247687",
"0.6280898",
"0.6262814",
"0.62501544",
"0.6188363",
"0.6161805",
"0.6135146",
"0.6125166",
"0.6116448",
"0.60772496",
"0.6040804",
"0.60212356",
"0.6016649",
"0.60124046",
"0.600877",
"0.600877",
"0.6008631",
"0.59963614",
"0.59748554",
"0.59655774",
"0.5942847"
] | 0.73258656 | 0 |
Find the maximum number of ROIs per batch sample in the dataset | def get_max_rois(self):
maxsize = 0
for index in self.SampleID:
rois = self.__getrois__(index);
maxsize = max(maxsize, rois.shape[0])
return maxsize | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __len__(self):\n return int(np.ceil(self.max_index / float(self.batch_size)))",
"def max_num_batches(self):\n return self._max_num_batches",
"def max_individuals(self) -> int:\n return self.group_size.upper * self.groups_allowed",
"def __len__(self):\n return int(np.ceil(len(self.ids) / self.batch_size))",
"def get_max_rows_per_partition() -> int:\n pass",
"def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))",
"def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes",
"def __len__(self):\n return int(np.floor(len(self.ids) / self.batch_size))",
"def batch_size(self):\n if self._batch_size is not None:\n return self._batch_size # custom batch size defined\n if self.task == 'objdet':\n return 8\n annos_per_img = self._annos_per_img[self.dataset]\n if self.task in {'predcls', 'sgcls'}:\n annos_per_img = annos_per_img['pairs']\n elif self.task == 'objcls':\n annos_per_img = annos_per_img['objects']\n elif self.task == 'preddet' and self.filter_multiple_preds:\n annos_per_img = annos_per_img['predicates_filtered']\n elif self.task == 'preddet' and self.filter_duplicate_rels:\n annos_per_img = annos_per_img['duplicates_filtered']\n elif self.task in {'preddet', 'sggen'}:\n annos_per_img = annos_per_img['relations']\n batch_size = ceil(self._annotations_per_batch / annos_per_img)\n return max(batch_size, 2)",
"def num_of_cancerous_pixels(batch, max_num=10):\n stats = dict()\n n_print = min(max_num, len(batch))\n for i in range(n_print):\n stats.update({'Scan ' + str(i): int(np.sum(batch.get(i, 'masks')))})\n\n stats = {'Number of cancerous pixels: ': stats}\n stats_df = pd.DataFrame.from_dict(stats, orient='index').loc[:, ['Scan '+ str(i) for i in range(n_print)]]\n return stats_df",
"def max_counts(self):\n\n return np.nanmax(self.pre_proc_data)",
"def batch_size(self) -> int:\n ...",
"def get_num_batches(self,batch_size):\r\n \r\n return len(self) // batch_size",
"def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))",
"def maximum_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"maximum_count\")",
"def load_max(self):\n return max(self.load_samples)",
"def __len__(self):\n gen_len = len(self.image_ids) // self.batch_size\n if len(self.image_ids) % self.batch_size != 0:\n gen_len += 1\n return gen_len",
"def getEpochCount(rawStimData, epochColumn=3):\n # get the max epoch count from the rawStimData\n # 4th column is the epoch number\n # add plus 1 since the min epoch no is zero\n \n # BG edit: Changed the previous epoch extraction, which uses the maximum \n # number + 1 as the epoch number, to a one finding the unique values and \n # taking the length of it\n epochCount = np.shape(np.unique(rawStimData[:, epochColumn]))[0]\n print(\"Number of epochs = \" + str(epochCount))\n\n return epochCount",
"def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)",
"def _min_sampled_from_batch(self):\n return min([col._last_batch_size for col in self._profile], default=0)",
"def get_max_run(run):\n max = 0\n max_i = 0\n for i in range(800, 900):\n if int(run[i]) > int(max):\n max = run[i]\n max_i = i\n return max, max_i",
"def find_max_nr_doc(data):\n queries = list(set(data[:, 1].astype(int)))\n max_nr = 0\n for query in queries:\n n_max = data[data[:,1] == query].shape[0]\n if n_max > max_nr:\n max_nr = n_max\n return max_nr",
"def max_size(self):\n sizes = np.array([m.sum() for m in self.masks])\n return sizes.max()",
"def get_max_readings( self ):\n return 2500",
"def _update_num_batches(self):\n # maximum possible number of batches is equal to number of whole times\n # batch_size divides in to the number of data points which can be\n # found using integer division\n possible_num_batches = self.inputs.shape[0] // self.batch_size\n if self.max_num_batches == -1:\n self.num_batches = possible_num_batches\n else:\n self.num_batches = min(self.max_num_batches, possible_num_batches)",
"def ram_max(self):\n return max(self.ram_samples)",
"def max(self):\r\n\t\treturn max(self.sample)",
"def recommended_max_num_datapoints(self) -> int:\n # very large number, essentially no limit by default\n return 1e9",
"def _get_max_answers(self):\n return max([len(x) for x in self.labels])",
"def _max_col_samples_used(self):\n samples_used = 0\n for col in self._profile:\n samples_used = max(samples_used, col.sample_size)\n return samples_used"
] | [
"0.6792534",
"0.659393",
"0.65083873",
"0.64983654",
"0.6442182",
"0.6357087",
"0.6351913",
"0.63443124",
"0.6337801",
"0.630785",
"0.62681437",
"0.6250322",
"0.6208346",
"0.6205874",
"0.616179",
"0.61612123",
"0.6137554",
"0.6132003",
"0.612465",
"0.6107981",
"0.61064506",
"0.61027664",
"0.60993475",
"0.6096231",
"0.6074781",
"0.60655445",
"0.60606486",
"0.6056908",
"0.604342",
"0.6014732"
] | 0.78903097 | 0 |
Makes dictionary of the station and their amount of connections. | def make_station_dict(self):
self.station_dict = {}
# interates over stations and puts the amount of connections in the dict
for station in self.stations:
length = len(self.stations[station].connections)
self.station_dict[station] = length
return self.station_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stations_dict(self):\n return self.__stations_dict",
"def get_online_count():\n return dict(online_user=get_online_users())",
"def getConnections():\n\n c = psutil.net_connections()\n connects = {}\n\n count = 0\n for connection in c:\n conn = {}\n status = connection.status\n if status == 'ESTABLISHED' or connection.status == 'CLOSE_WAIT':\n conn['status'] = status\n conn['local'] = connection.laddr[0] + ':' + str(connection.laddr[1])\n conn['remote'] = connection.raddr[0] + ':' + str(connection.raddr[1])\n connects[count] = conn\n count += 1\n elif status == 'LISTEN':\n conn['status'] = status\n conn['local'] = connection.laddr[0] + ':' + str(connection.laddr[1])\n connects[count] = conn\n count += 1\n else:\n pass\n\n return connects",
"def get_stats(self):\n return {\n \"pings_sent\" : self.ping_count,\n \"measurements\" : self.measurements,\n }",
"def to_dict(self) -> dict:\n return {'Stations': [station.as_json_dict() for station in self.stations]}",
"def connected_component_statistics(self, printStats=False):\n lengths = self.connected_component_lengths()\n lengthDict = dict(collections.Counter(lengths))\n\n if printStats:\n orderedLengthDict = collections.OrderedDict(sorted(lengthDict.items()))\n numberOfGroups = nx.number_connected_components(self.return_undirected())\n for k, v in orderedLengthDict.iteritems():\n percent = round((100.00*v / numberOfGroups), 2)\n print str(k) + ' nodes: ' + str(v) + ' (' + str(percent) + '%) groups'\n print '-----------------------------------------'\n print 'TOTAL: ' + str(super(SynonymNetwork, self).number_of_nodes()) + ' nodes in network, ' + str(numberOfGroups) + ' distinct groups'\n else:\n return lengthDict",
"def connected_network_devices(self):\n connected = {'ip': self.ip, 'port': self.port}\n return connected",
"def num_stations(self) -> int:\n return self._num_stations",
"def to_dict(self):\n\n out = super().to_dict()\n out[\"connections\"] = self.connections\n return out",
"def to_dict(self):\n result = {'Id': self.id, 'Na': self.name, \\\n 'Sc': self.schedule.to_dict(), 'Lc': self.location.to_dict()}\n if len(self.connections)>0:\n result['Co'] = self.connections_to_string()\n return result",
"def station_list() -> List[Dict]:\n return STATIONS",
"def create_dict(info):\n \"\"\"\n dict = {ip: {counter:*}, {weekdays: []}, {hours: []}}\n \"\"\"\n dict_info = dict()\n for i in info:\n ip = i[0]\n hours = i[1]\n weekdays = i[2]\n if ip not in dict_info:\n dict_info[ip] = {}\n dict_info[ip]['counter'] = 0\n dict_info[ip]['hours'] = []\n dict_info[ip]['weekdays'] = []\n dict_info[ip]['counter'] += 1\n dict_info[ip]['hours'].append(hours)\n dict_info[ip]['weekdays'].append(weekdays)\n return dict_info",
"def stats(series):\n\td={}\n\tfor index in series[\"Country Code\"].unique():\n\t\td[index]={\n\t\t\"total servers\" : len(series.loc[series[\"Country Code\"]==index]),\n\t\t\"lat\" : series.loc[series[\"Country Code\"]==index][\"LAT\"].iat[0],\n\t\t\"long\" : series.loc[series[\"Country Code\"]==index][\"LONG\"].iat[0]\n\t\t}\n\treturn d",
"def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}",
"def load(self):\n total = sum(self.connections.values())\n return total",
"def network_to_dict(self):\n return reduce(lambda x,y: x.update(y) or x, \n [self.downstream(root) for root in self.roots])",
"def getUsersBySSID():\n\tstats = {}\n\tms = MobileStation.objects.filter(ssid__isnull=False)\n\tfor ssid in set(MobileStation.objects.values_list('ssid', flat=True)):\n\t\tstats[ssid] = MobileStation.objects.areAssociated().filter(ssid=ssid).count()\n\treturn stats",
"def get_connections_out(self) -> dict:\n return self.__ni_out",
"def get_connections_in(self) -> dict:\n return self.__ni_in",
"def totalConnections(analyzer):\n return model.totalConnections(analyzer)",
"def result_count(sol,Nt,G):\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq",
"def device_values(mac_address,base_stations=[],start=datetime.datetime(2014,1,1,0),end=datetime.datetime(2015,1,1),resolution=1,verbose=False,start_date=None,end_date=None):\n\tif not start_date:\n\t\tstart_date = datetime.datetime(2014,1,1,0,0)\n\tif not end_date:\n\t\tend_date = datetime.datetime(2015,1,1,0,0)\n\t\n\tstart_unix = int(start_date.strftime('%s'))\n\tend_unix = int(end_date.strftime('%s'))\n\t\n\tdjango_devices = FoundDevices.objects.filter(time__range=(start_unix, end_unix),mac_address=mac_address).exclude(mac_address__in=base_stations).values()\n\t\n\tfound_devices = {}\n\tfor entry in django_devices:\n\t\tfound_devices[entry['found_id']] = {'mac_address':entry['mac_address'].encode(\"utf8\"),'station':str(entry['stations_id']),'time':entry['time'].encode(\"utf8\")}\n\t\n\t\n\n\n\tcount = {}\n\n\tstations = set()\n\n\tfor key in found_devices.keys():\n\t\tfound_time = time.localtime(float(found_devices[key]['time']))\n\t\ttime_found = datetime.datetime(found_time.tm_year,found_time.tm_mon,found_time.tm_mday,int(math.floor((found_time.tm_hour/resolution)*resolution)))\n\t\tstation = found_devices[key]['station']\n\t\tstations.add(station)\n\n\t\tif start <= time_found <= end and found_devices[key]['mac_address'] == mac_address:\n\t\t\tif time_found not in count:\n\t\t\t\tcount[time_found] = {'stations':{}}\n\t\t\t\tif station not in count[time_found]['stations'].keys():\n\t\t\t\t\tcount[time_found]['stations'][station] = 1\n\t\t\t\telse:\n\t\t\t\t\tcount[time_found]['stations'][station] += 1\n\n\t\t\telse:\n\t\t\t\tif station not in count[time_found]['stations'].keys():\n\t\t\t\t\tcount[time_found]['stations'][station] = 1\n\t\t\t\telse:\n\t\t\t\t\tcount[time_found]['stations'][station] += 1\n\n\tstart = min(count.keys())\n\tcurrent = min(count.keys())\n\tend = max(count.keys())\n\tstep = datetime.timedelta(hours=resolution)\n\tstations = list(stations)\n\twhile current < end:\n\t\tif current not in count.keys():\n\t\t\tcount[current] = {'stations':{}}\n\n\t\t\n\n\t\t\n\n\t\tcurrent += step\n\ttimes = count.keys()\n\ttimes.sort()\n\tfor current in times:\n\t\tfor station in stations:\n\t\t\t\tif station not in count[current]['stations'].keys():\n\t\t\t\t\tcount[current]['stations'][station] = 0\n\t\t\n\t\tprint current,' ',count[current]['stations']\n\n\treturn count",
"def __len__(self):\n return len(self.stations)",
"def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict",
"def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict",
"def getNbStations(self) :\n return len(self._stations)",
"def current_queues(petrol_stations):\n current_queues = {}\n for number_of_station in petrol_stations:\n info = {}\n info['cars in the queue'] = 0\n info['max of queue'] = petrol_stations[number_of_station]['queue']\n current_queues[number_of_station] = info\n return current_queues",
"def reload_infos(self):\n self.networks = {}\n networks = self.client.waveform.getNetworkIds()\n # Get stations.\n for key in networks:\n if not key:\n continue\n self.networks[key] = {}\n stations = self.client.waveform.getStationIds(network_id=key)\n for station in stations:\n if not station:\n continue\n self.networks[key][station] = {}\n # Get locations.\n locations = self.client.waveform.getLocationIds(network_id=key,\n station_id=station)\n for location in locations:\n channels = self.client.waveform.getChannelIds(\\\n network_id=key , station_id=station,\n location_id=location)\n self.networks[key][station][location] = [channels]\n # Add current date to Dictionary.\n self.networks['Date'] = UTCDateTime()\n # Also add the server to it.\n self.networks['Server'] = self.client.base_url\n # Open file.\n file = open(self.pickle_file, 'wb')\n pickle.dump(self.networks, file, protocol = 2)\n file.close()",
"def get_socket_dictionary(self) -> dict:\n socket_dictionary = {\n \"action\": self.action,\n \"car_id\": self.car_id,\n \"username\": self.username,\n \"password\": self.password,\n \"usertoken\": self.usertoken,\n \"info_date_time\": self.info_date_time,\n \"current_location\": self.current_location,\n \"engineer_bluetooth\": self.engineer_bluetooth,\n \"engineer_code\": self.engineer_code\n }\n return socket_dictionary",
"def result_count(sol,Nt,G):\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq"
] | [
"0.66026574",
"0.6212001",
"0.61680955",
"0.61130697",
"0.60983795",
"0.60969037",
"0.6081789",
"0.6016051",
"0.59923977",
"0.58641666",
"0.5816404",
"0.58135796",
"0.5811761",
"0.5791444",
"0.5785143",
"0.577446",
"0.5770902",
"0.57675856",
"0.5764687",
"0.57554936",
"0.5753903",
"0.574948",
"0.5734664",
"0.5732042",
"0.5731708",
"0.5730051",
"0.57246375",
"0.57243097",
"0.57074004",
"0.5702683"
] | 0.8745632 | 0 |
Sorts the station dict based on the amount of connections (value). | def create_station_list(self):
sorted_station_list = sorted(self.station_dict, key=self.station_dict.get)
return sorted_station_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sort(self):\n\t\tself.servers = sorted(self.servers, key=lambda s: s.load)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.distance_class)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.country == self.locale_info.country, reverse=True)",
"def _get_sorted_by_n_connections(m):\n small = nx.Graph()\n for k, v in m.items():\n small.add_edge(k[0], k[1])\n return sorted(small.adj, key=lambda x: len(small[x])), small",
"def _sort_compounds(self):\n self.sorted_molecules = sorted(self.values(), key=operator.attrgetter('criterion'))",
"def make_station_dict(self):\n self.station_dict = {}\n\n # interates over stations and puts the amount of connections in the dict\n for station in self.stations:\n length = len(self.stations[station].connections)\n self.station_dict[station] = length\n \n return self.station_dict",
"def sort_bike_stations(bike_stations, location):\n\n stations = bike_stations.copy()\n\n for index, station in enumerate(stations):\n station_location = (station[\"lat\"], station[\"lon\"])\n dist = distance.distance(station_location, location).m\n stations[index][\"distance\"] = dist\n\n stations = sorted(stations, key=lambda station: station[\"distance\"])\n stations = list(filter(lambda station: station[\"bikesAvailable\"] > 0, stations))\n\n return stations",
"def sortDistance(netlist):\n netlist_dictionary = {}\n for i in range(len(netlist)):\n start = chips[netlist[i][0]]\n end = chips[netlist[i][1]]\n\n delta_x = abs(start[0]-end[0])\n delta_y = abs(start[1]-end[1])\n distance = delta_x + delta_y\n\n netlist_dictionary[(netlist[i][0], netlist[i][1])] = distance\n\n sorted_dictionary = sorted(netlist_dictionary.items(), key=operator.itemgetter(1))\n sorted_netlist = []\n for j in range(len(sorted_dictionary)):\n sorted_netlist.append(sorted_dictionary[j][0])\n\n return sorted_netlist",
"def get_top_station_set(city):\n s = {}\n for file in os.listdir(exp_data_path + os.sep + 'station' + os.sep + city):\n with open(exp_data_path + os.sep + 'station' + os.sep + city + os.sep + file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0] not in s:\n s[row[0]] = 1\n else:\n s[row[0]] = s[row[0]] + 1\n\n sort_s = dict(sorted(s.items(), key=lambda x : x[1], reverse=True))\n first = True\n res = []\n for k, v in sort_s.items():\n if first:\n top = v\n first = False\n if top - v <= 30:\n res.append(k)\n print('before', len(sort_s))\n print('after', len(res))\n\n # restore new map [old_index, new_index]\n list_remap = {}\n new_index = 0\n for index in range(0, data_length[city]):\n if str(index) in res:\n list_remap[index] = new_index\n new_index = new_index + 1\n\n # print(list_remap)\n check_path(exp_data_path + os.sep + 'station_list')\n file_name = exp_data_path + os.sep + 'station_list' + os.sep + 'list_remap_{}'.format(city) + '.npy'\n if os.path.exists(file_name):\n os.remove(file_name)\n np.save(file_name, list_remap)",
"def _topological_sort(self):\n self._reset_topological_order()\n\n def is_connected(src, dst):\n \"\"\"Judge two node whether are connected.\"\"\"\n for precursor in dst.precursor_nodes:\n if src == precursor.split(\":\")[0]:\n return 1\n return 0\n\n idx = 0\n while idx < len(self._topological_order):\n cur_node_name = self._topological_order[idx]\n cur_node = self.get_node(cur_node_name)\n # `scsr` is abbreviation for `successor`.\n for scsr_name in cur_node.successor_nodes:\n scsr_node = self.get_node(scsr_name)\n scsr_node.cur_in_degree -= is_connected(cur_node_name,\n scsr_node)\n if scsr_node.cur_in_degree == 0:\n self._topological_order.append(scsr_name)\n idx += 1\n self.sorted = True",
"def resort(self):\n self.items.sort(key=lambda node: node.path_weight, reverse=True)",
"def sort_values(self):\r\n for loopindex in range(0, self.population_size):\r\n index = self.cost_populations.index(min(self.cost_populations))\r\n \r\n if loopindex < int(self.population_size / 2):\r\n self.best_districts.append(self.district_population[index])\r\n self.best_costs.append(self.cost_populations[index])\r\n else:\r\n self.worst_districts.append(self.district_population[index])\r\n \r\n del self.cost_populations[index]\r\n del self.district_population[index]",
"def sortdb():\n return sorted(donor_db.items(), key=sumdbkey, reverse=True)",
"def compress(self):\n sorted_table = sorted(self.forwarding_table, reverse=True, key=lambda x: x[\"CIDR\"])\n i = 0\n while i + 1 < len(sorted_table):\n at_i = sorted_table[i]\n at_i_plus = sorted_table[i+1]\n if self.adj_numerically(at_i, at_i_plus) and self.same_attributes(at_i, at_i_plus):\n copy_of_route = self.aggregate_routes(at_i, at_i_plus)\n sorted_table[i] = copy_of_route\n sorted_table.pop(i+1)\n i += 1\n return sorted_table",
"def sort_vnet(model, option='traffic'): \n failed_dict = model.failed_dict\n vnet_info = model.get_vnet_info()\n vnets = model.vnets\n vnet_traffic = {}\n for vn in vnets:\n failed_id = failed_dict[vn.vnet_id]\n failed_node_traffic = vnet_info[vn.vnet_id]['traffic'][failed_id][1]\n vnet_traffic[vn] = round(failed_node_traffic, 5)\n sorted_vn = sorted(vnet_traffic.iteritems(), key=operator.itemgetter(1)) \n sorted_vn.reverse()\n return sorted_vn",
"def receive_routing_table(self, router):\n for network, distance in router.networks.items():\n # Only if the network doesn't exist in current routing table or\n # current distance is more than new info then add the new info\n if (network not in self.networks or\n self.networks[network] > distance + 1):\n self.networks[network] = distance + 1",
"def sort_table(table, sats_table):",
"def sorted_streams(streams):\n return sorted(streams, key=lambda s: len(streams[s][\"topic_data\"]), reverse=True)",
"def sort_by_default(self):\n self.data.sort()",
"def sorted_categories(self):\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l",
"def preference_ordering(self) -> None:\n for i in self._destinations:\n self._destinations[i] = sorted(self._destinations[i])",
"def sort_by_area():\n # Create a list where index --> neuron and value --> area\n matched = [areas_from_channels[int(c)] for c in channels]\n # Find the indices (aka neurons) where they have a score < 2\n bad_indices = [i for i, score in enumerate(quality) if score[0] < 2]\n # Create a dictionary to sort neurons according to areas\n d = {}\n for index, area in enumerate(matched): # Iterate index and value together\n # Discard bad recordings\n if index not in bad_indices:\n # If the area is already a key then append this neuron index\n if area in d.keys():\n d[area].append(index)\n # Else create a new key for a single element list\n else:\n d[area] = [index]\n return d",
"def make_tree(self):\n\n # list [station_name]\n visited = []\n\n # creates empty station object for each station and adds coordinates\n for station in self.stations:\n new_station = Station(station)\n coordinates = self.stations[station].get_coordinates()\n new_station.add_coordinates(coordinates[0], coordinates[1])\n\n # saves station in prims_tree dictionary\n self.prims_tree[station] = new_station\n\n # choose random beginning station\n random_station = random.choice(list(self.stations.values()))\n\n # sort station connections and retrieve shortest\n station_connections = random_station.get_connections()\n station_connections = sorted(station_connections.items(), key=operator.itemgetter(1))\n new_connection = station_connections.pop(0)\n new_station = new_connection[0]\n new_time = new_connection[1]\n\n # retrieve empty stations from prims_tree dictionary\n first_station = self.prims_tree[random_station.name]\n new_station = self.prims_tree[new_station.name]\n\n # add shortest connection to stations\n first_station.add_connection(new_station, new_time)\n new_station.add_connection(first_station, new_time)\n\n # add stations to visited\n visited.append(first_station.name)\n visited.append(new_station.name)\n\n # runs until all stations are visited\n while len(visited) is not len(self.prims_tree):\n # starts as arbitrarily high number\n min_connection_time = 9999\n\n # get connections of visited stations\n for station in visited:\n connections = self.stations[station].get_connections()\n\n # get time of connections\n for connection in connections:\n connection_time = connections[connection]\n\n # save smallest connection if time is smallest and station is not visited\n if connection.name not in visited and connection_time < min_connection_time:\n smallest_connection = self.prims_tree[connection.name]\n smallest_connection_station = self.prims_tree[station]\n min_connection_time = connection_time\n else:\n continue\n\n # add smallest connection to station in prims_tree dictionary\n smallest_connection_station.add_connection(smallest_connection, min_connection_time)\n smallest_connection.add_connection(smallest_connection_station, min_connection_time)\n\n # add new connection to visited list\n visited.append(smallest_connection.name)\n\n return self.prims_tree",
"def dag_topology_sort(self):\n mlist = []\n mod_wrapper = self.mod_wrapper.copy()\n while mod_wrapper:\n temp_list = []\n for mod, wrapper in mod_wrapper.items():\n if wrapper.is_root_mod():\n temp_list.append(mod)\n wrapper.remove_self_from_bindings()\n\n for mod in temp_list:\n mod_wrapper.pop(mod, None)\n\n mlist += temp_list\n\n mod_wrapper_sort = {}\n for mod, i in zip(mlist, range(len(mlist))):\n self.mod_wrapper[mod].set_idx_name(i)\n mod_wrapper_sort[mod] = self.mod_wrapper[mod]\n\n self.mod_wrapper = mod_wrapper_sort",
"def _sort_by_flows(stats_values):\n return sorted(stats_values, key=lambda entry: entry.flows, reverse=True)",
"def _sort_by_satellite(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n time = []\n satellite = []\n system = []\n for sat in sorted(self.dset.unique(\"satellite\"), reverse=True):\n idx = self.dset.filter(satellite=sat)\n time.extend(self.dset.time.gps.datetime[idx])\n satellite.extend(self.dset.satellite[idx])\n system.extend(self.dset.system[idx])\n \n return np.array([time]), np.array([satellite]), np.array([system])",
"def alter_connection_order(connections, order, chip):\n # Sort the connections by distance between gates from shortest to longest\n if order >= 2:\n length_order = {}\n\n for connect in connections:\n reorder = connect.strip(\"\\n\").split(\",\")\n source_coords = [chip.gates[reorder[0]][\"x\"], chip.gates[reorder[0]][\"y\"], 0]\n target_coords = [chip.gates[reorder[1]][\"x\"], chip.gates[reorder[1]][\"y\"], 0]\n gate_dif = abs(source_coords[0] - target_coords[0]) + abs(source_coords[1] - target_coords[1])\n\n # Check if there are gates with the same distance\n while gate_dif in length_order:\n gate_dif += .1\n\n length_order[gate_dif] = connect\n\n sort = sorted(length_order)\n connections = [length_order[key] for key in sort]\n\n # Reverse the connections order\n if order == 1 or order == 3:\n connections = connections[::-1]\n\n return connections",
"def hubs(self):\r\n cities = col.defaultdict(int)\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n cities[code] += 1\r\n heap = [(-value, key) for key, value in cities.items()]\r\n largest = heapq.nsmallest(5, heap)\r\n largest = [(key, -value) for value, key in largest]\r\n return largest",
"def sortby(self):\n ...",
"def connected_component_statistics(self, printStats=False):\n lengths = self.connected_component_lengths()\n lengthDict = dict(collections.Counter(lengths))\n\n if printStats:\n orderedLengthDict = collections.OrderedDict(sorted(lengthDict.items()))\n numberOfGroups = nx.number_connected_components(self.return_undirected())\n for k, v in orderedLengthDict.iteritems():\n percent = round((100.00*v / numberOfGroups), 2)\n print str(k) + ' nodes: ' + str(v) + ' (' + str(percent) + '%) groups'\n print '-----------------------------------------'\n print 'TOTAL: ' + str(super(SynonymNetwork, self).number_of_nodes()) + ' nodes in network, ' + str(numberOfGroups) + ' distinct groups'\n else:\n return lengthDict",
"def sort_mapping_by_size(cluster_mapping):\r\n\r\n return sorted(cluster_mapping.keys(),\r\n cmp=lambda a, b: cmp(len(a), len(b)),\r\n key=lambda k: cluster_mapping[k], reverse=True)",
"def sorted_nodes(self):\r\n def is_source(node, connections):\r\n for connection in connections:\r\n if node == connection[1]:\r\n return False\r\n return True\r\n\r\n def source_connections(node, connections):\r\n conns = set()\r\n for connection in connections:\r\n if node == connection[0]:\r\n conns.add(connection)\r\n return conns\r\n\r\n nodes = set(self.nodes.values())\r\n connections = self.connections.copy()\r\n sorted_nodes = []\r\n\r\n # Find source nodes:\r\n source_nodes = set([n for n in nodes if is_source(n, connections)])\r\n\r\n # while S is non-empty do\r\n while source_nodes:\r\n # remove a node n from S\r\n node = source_nodes.pop()\r\n # insert n into L\r\n sorted_nodes.append(node)\r\n\r\n # for each node m with an edge e from n to m do\r\n s_connections = source_connections(node, connections)\r\n for connection in s_connections:\r\n # remove edge e from the graph\r\n m = connection[1]\r\n connections.remove(connection)\r\n # if m has no other incoming edges then\r\n # insert m into S\r\n if is_source(m, connections):\r\n source_nodes.add(m)\r\n\r\n # if graph has edges then\r\n # output error message (graph has at least one cycle)\r\n # else\r\n # output message (proposed topologically sorted order: L)\r\n\r\n if connections:\r\n raise Exception(\"Steram has at least one cycle (%d connections left of %d)\" % (len(connections), len(self.connections)))\r\n\r\n return sorted_nodes"
] | [
"0.6438473",
"0.6063636",
"0.5954494",
"0.5753256",
"0.572934",
"0.5722744",
"0.55783653",
"0.54763424",
"0.5446469",
"0.54192704",
"0.5373764",
"0.531757",
"0.5296655",
"0.5287621",
"0.5285237",
"0.5215991",
"0.52076167",
"0.52006",
"0.5183816",
"0.51670825",
"0.51528376",
"0.5143027",
"0.5131351",
"0.51195717",
"0.51148933",
"0.5101579",
"0.51013666",
"0.5089672",
"0.5086954",
"0.5074658"
] | 0.6300883 | 1 |
Tries all possible configurations starting at the first station and only adds the configuration with the best score. | def visit_all_possibilities(self, first_station, track, grid):
# loops over connections of station
for connection in first_station.connections:
# keeps adding untill the max length of a track is reached
if track.add_station(grid, self.stations[connection].name):
# calculates the quality of adding the station and remembers it if it is the best score yet
if grid.get_quality() > self.best_score:
self.best_score = grid.get_quality()
self.grid = copy.deepcopy(grid)
print(f"new best score: {self.best_score}:\n{self.grid}\n\n")
# repeat untill there are no more configurations left
self.visit_all_possibilities(self.stations[connection], track, grid)
track.remove_last_station() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pick_next_station(self, station):\n self.best_score = 0\n\n stations = self.grid.stations\n # all connections of the last added added station \n lookahead_1 = self.grid.get_station(self.best_connection[1]).connections\n\n for la1 in lookahead_1.values():\n next_station = la1[0].name\n # if adding the connection exceeds the tracks max time length \n if self.track.add_station(self.grid, next_station) is False:\n break\n\n lookahead_2 = self.grid.get_station(la1[0].name).connections\n\n # keeps adding stations untill the time limit is reached\n for la2 in lookahead_2:\n la2 = stations.get(la2)\n if self.track.add_station(self.grid, la2.name) is False:\n break\n \n quality = self.grid.get_quality()\n \n self.track.remove_last_station()\n\n # if quality improves, add first station to the track\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [la2.name, la1[0].name]\n \n self.track.remove_last_station()",
"def get_bests(self):\n set_names = [\"training\", \"hp_selection\", \"validation\"]\n run_tec_conf_set = recursivedict()\n validation = self._campaign_configuration['General']['validation']\n hp_selection = self._campaign_configuration['General']['hp_selection']\n if (validation, hp_selection) in {(\"All\", \"All\"), (\"Extrapolation\", \"All\"), (\"All\", \"HoldOut\"), (\"HoldOut\", \"All\"), (\"HoldOut\", \"HoldOut\"), (\"Extrapolation\", \"HoldOut\")}:\n # For each run, for each technique the best configuration\n run_tec_best_conf = recursivedict()\n\n # Hyperparameter search\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n technique = conf.technique\n run_tec_conf_set[run][technique][str(conf.get_signature()[4:])] = conf.mapes\n # First experiment for this technique or better than the current best\n if technique not in run_tec_best_conf[run] or conf.mapes[\"hp_selection\"] < run_tec_best_conf[run][technique].mapes[\"hp_selection\"]:\n run_tec_best_conf[run][technique] = conf\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"-->Printing results for run %s\", str(run))\n overall_run_best = None\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp.get_signature()[4:], temp.mapes[\"training\"], temp.mapes[\"hp_selection\"], temp.mapes[\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp.mapes[\"hp_selection\"] < overall_run_best.mapes[\"hp_selection\"]:\n overall_run_best = temp\n best_model_description = overall_run_best.print_model()\n self._logger.info(\"<--Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best.get_signature()[3:], \"(\" + best_model_description + \")\" if best_model_description else \"\", overall_run_best.mapes[\"training\"], overall_run_best.mapes[\"hp_selection\"], overall_run_best.mapes[\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"All\"), (\"KFold\", \"HoldOut\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each fold, for each technique, the best configuration\n run_fold_tec_best_conf = recursivedict()\n\n # Hyperparameter search inside each fold\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[1].replace(\"f\", \"\"))\n technique = conf.technique\n if \"hp_selection\" not in run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] + conf.mapes[set_name] / folds\n # First experiment for this fold+technique or better than the current best\n if technique not in run_fold_tec_best_conf[run][fold] or conf.mapes[\"hp_selection\"] < run_fold_tec_best_conf[run][fold][technique].mapes[\"hp_selection\"]:\n run_fold_tec_best_conf[run][fold][technique] = conf\n\n # Aggregate different folds (only the value of the mapes)\n run_tec_set = recursivedict()\n for run in run_fold_tec_best_conf:\n for fold in run_fold_tec_best_conf[run]:\n for tec in run_fold_tec_best_conf[run][fold]:\n if \"hp_selection\" not in run_tec_set[run][technique]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_fold_tec_best_conf[run][fold][tec].mapes[set_name]\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", str(run))\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n # Overall best will contain as first argument the technique with the best (across runs) average (across folds) mape on validation; now we consider on all the runs and on all the folds the configuraiton of this technique with best validation mape\n\n elif (validation, hp_selection) in {(\"All\", \"KFold\"), (\"HoldOut\", \"KFold\"), (\"Extrapolation\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each technique, for each configuration, the aggregated mape\n run_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + conf.mapes[set_name] / folds\n\n # Select the best configuration for each technique across different folders\n run_tec_best_conf = recursivedict()\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n if tec not in run_tec_best_conf[run] or run_tec_conf_set[run][tec][conf][\"hp_selection\"] < run_tec_best_conf[run][tec][1][\"hp_selection\"]:\n run_tec_best_conf[run][tec] = (conf, run_tec_conf_set[run][tec][conf])\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = () # (technique, configuration, mapes)\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp[0], temp[1][\"training\"], temp[1][\"hp_selection\"], temp[1][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp[1][\"hp_selection\"] < overall_run_best[2][\"hp_selection\"]:\n overall_run_best = (technique, temp[0], temp[1])\n\n self._logger.info(\"---Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1], overall_run_best[2][\"training\"], overall_run_best[2][\"hp_selection\"], overall_run_best[2][\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each external fold, for each technique, the aggregated mape\n run_efold_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over internal folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n ext_fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n if configuration not in run_efold_tec_conf_set[run][ext_fold][technique]:\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n\n # Select the best configuration for each technique in each external fold across different internal folders\n run_efold_tec_best_conf = recursivedict()\n for run in run_efold_tec_conf_set:\n for efold in run_efold_tec_conf_set[run]:\n for tec in run_efold_tec_conf_set[run][efold]:\n for conf in run_efold_tec_conf_set[run][efold][tec]:\n if conf not in run_efold_tec_best_conf[run][efold][tec] or run_efold_tec_conf_set[run][efold][tec][conf][\"hp_selection\"] < run_efold_tec_best_conf[run][efold][tec][1][\"hp_selection\"]:\n run_efold_tec_best_conf[run][efold][tec] = (conf, run_efold_tec_conf_set[run][efold][tec][conf], run_efold_tec_conf_set[run][efold][tec][conf])\n\n # Aggregate on external folds\n run_tec_set = recursivedict()\n for run in run_efold_tec_best_conf:\n for efold in run_efold_tec_best_conf[run]:\n for tec in run_efold_tec_best_conf[run][efold]:\n if \"hp_selection\" not in run_tec_set[run][tec]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_tec_set[run][tec][set_name] + run_efold_tec_best_conf[run][efold][tec][1][set_name]\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n else:\n self._logger.error(\"Unexpected combination: %s\", str((validation, hp_selection)))\n sys.exit(1)\n best_confs = {}\n best_technique = None\n for conf in self._exp_confs:\n technique = conf.technique\n if technique not in best_confs or conf.mapes[\"validation\"] < best_confs[technique].mapes[\"validation\"]:\n best_confs[technique] = conf\n for technique in best_confs:\n if not best_technique or best_confs[technique].mapes[\"validation\"] < best_confs[best_technique].mapes[\"validation\"]:\n best_technique = technique\n if bool(self._campaign_configuration['General']['details']):\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n assert \"hp_selection\" in run_tec_conf_set[run][tec][conf]\n assert \"validation\" in run_tec_conf_set[run][tec][conf], \"training MAPE not found for \" + str(run) + str(tec) + str(conf)\n self._logger.info(\"Run %s - Technique %s - Conf %s - Training MAPE %f - Test MAPE %f\", str(run), ec.enum_to_configuration_label[tec], str(conf), run_tec_conf_set[run][tec][conf][\"hp_selection\"], run_tec_conf_set[run][tec][conf][\"validation\"])\n return best_confs, best_technique",
"def pick_first_connection(self):\n self.best_connection = []\n stations = list(self.grid.stations.values())\n\n # add a first station to the track \n for station in stations:\n self.track = Track(f\"greedy_track_{self.count}\", self.grid)\n self.track.add_station(self.grid, station.name)\n\n lookahead_1 = station.connections\n\n # calculate quality of all connections and save the best connection\n for la1 in lookahead_1: \n next_station = stations[int(la1)].name\n self.track.add_station(self.grid, next_station)\n lookahead_2 = stations[int(la1)].get_connections()\n \n for la2 in lookahead_2:\n # if adding the connection exceeds the track's max time length \n if self.track.add_station(self.grid, la2[0].name) is False:\n break\n \n quality = self.grid.get_quality()\n self.track.remove_last_station()\n\n # checks if the quality of the track is the best one yet and remembers it\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [station.name, stations[int(la1)].name, la2[0].name]\n self.track.remove_last_station()\n \n # if adding another track does not lead to a better quality, stop algorithm\n if self.best_connection == []:\n return False\n \n # add best connection to the track\n self.track = Track(f\"greedy_track_{self.count}\", self.grid)\n self.track.add_station(self.grid, self.best_connection[0])\n\n self.count += 1\n\n return station",
"def best_config(self):\n if self.total_propose == 0:\n idx = random.randint(0, len(self.config_list))\n result = {'config_id': idx,\n 'score': -1 * float('inf'),\n 'configs': self.config_list[idx]}\n return [result]\n else:\n pareto_board = self.sieve_board.copy()\n pareto_board = pareto_board.dropna()\n nondominated = pareto.eps_sort([list(pareto_board.itertuples(False))],\n objectives=self.pareto_cols,\n epsilons=None,\n maximize=self.max_object_ids)\n pareto_list = []\n for tmp_list in nondominated:\n result = {}\n for i, value in enumerate(tmp_list):\n if i == 1:\n result['config_id'] = value\n result['configs'] = self.config_list[int(value)]\n elif i >= 3:\n result[self.sieve_columns[i]] = value\n pareto_list.append(result)\n return pareto_list",
"def get_best_model_configs(self):\n self.best_models = {}\n with self.database:\n cur = self.database.cursor()\n for model in self.active_models:\n if self.tuning_depth == 'minimal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results\")\n elif self.tuning_depth == 'normal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results WHERE model = ?\",\n (model,))\n elif self.tuning_depth == 'maximal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results WHERE model = ?\",\n (model,))\n # TODO not implimented, same as normal\n self.best_models[model] = list(a)[0][0]",
"def get_optimum_config(\n self, tested_configs, fold_operation=FoldOperations.MEAN\n ):\n\n list_of_config_vals = []\n list_of_non_failed_configs = [\n conf for conf in tested_configs if not conf.config_failed\n ]\n\n if len(list_of_non_failed_configs) == 0:\n raise Warning(\"No Configs found which did not fail.\")\n try:\n\n if len(list_of_non_failed_configs) == 1:\n best_config_outer_fold = list_of_non_failed_configs[0]\n else:\n for config in list_of_non_failed_configs:\n list_of_config_vals.append(\n MDBHelper.get_metric(\n config,\n fold_operation,\n self.best_config_metric,\n train=False,\n )\n )\n\n if self.maximize_metric:\n # max metric\n best_config_metric_nr = np.argmax(list_of_config_vals)\n else:\n # min metric\n best_config_metric_nr = np.argmin(list_of_config_vals)\n\n best_config_outer_fold = list_of_non_failed_configs[\n best_config_metric_nr\n ]\n\n # inform user\n logger.debug(\n \"Optimizer metric: \"\n + self.best_config_metric\n + \"\\n\"\n + \" --> Maximize metric: \"\n + str(self.maximize_metric)\n )\n\n logger.info(\n \"Number of tested configurations: \" + str(len(tested_configs))\n )\n logger.photon_system_log(\n \"---------------------------------------------------------------------------------------------------------------\"\n )\n logger.photon_system_log(\"BEST_CONFIG \")\n logger.photon_system_log(\n \"---------------------------------------------------------------------------------------------------------------\"\n )\n logger.photon_system_log(\n json.dumps(\n best_config_outer_fold.human_readable_config,\n indent=4,\n sort_keys=True,\n )\n )\n\n return best_config_outer_fold\n except BaseException as e:\n logger.error(str(e))",
"def run_algorithm(self):\n print(f\"Checking all possible configurations with {self.algorithm}...\")\n\n if self.algorithm == \"test\" or (self.algorithm == \"greedy\" and\n self.iterations == 1000):\n\n # Test each configuration found with greedy (1000 iterations)\n while True:\n try:\n self.index += 1\n self.batteries = self.load_batteries(self.index)\n\n # Break if all configurations are checked\n except FileNotFoundError:\n break\n self.calculate_cable()\n self.link_houses()\n greedy(self, 1000)\n\n # Load best solution if user wanted to run greedy\n if self.algorithm == \"greedy\":\n self.load()\n self.plot_houses()\n\n # Call correct algorithm\n else:\n self.load()\n if self.algorithm == \"stepdown\":\n stepdown(self)\n elif self.algorithm == \"greedy\":\n greedy(self, self.iterations)\n elif self.algorithm == \"hill\":\n hill_climber(self, self.iterations)\n elif self.algorithm == \"dfs\":\n dfs(self)\n elif self.algorithm == \"random\":\n random_algorithm(self, self.iterations)\n elif self.algorithm == \"bnb\":\n bnb(self)\n\n self.load()\n self.plot_houses()",
"def save_final_config(self, finalCfg):\n if True:\n cfgDict = self.bestcfg\n else:\n cfgDict = finalCfg.data\n print 'best score was %d, discrepancies follow:' % self.best\n for p in sorted(cfgDict.keys()):\n if cfgDict[p] != self.goals[p]:\n print '(%s, weight %d) ' % (p, self.weight[p]),",
"def next_tune_cfg(self):\n # generate tuning space according to user chosen tuning strategy\n\n while True:\n op_cfgs = {}\n op_cfgs['calib_iteration'] = int(np.random.choice(self.calib_iter))\n op_cfgs['op'] = {}\n for op, configs in self.opwise_quant_cfgs.items():\n cfgs_len = len(configs)\n if cfgs_len > 0:\n op_cfgs['op'][op] = configs[np.random.choice(cfgs_len)]\n else:\n op_cfgs['op'][op] = self.opwise_tune_cfgs[op][np.random.choice(\n len(self.opwise_tune_cfgs[op]))]\n\n yield op_cfgs",
"def update_global_best(self, offsprings):\n\n # the fitness of the particles are calculated by their crowding distance\n crowding_distance(swarm)\n\n # the length of the leaders archive cannot be longer than the number of the initial population\n self.leaders += swarm\n self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')\n # self.problem.archive += swarm\n\n return",
"def update_global_best(self, swarm):\n\n # the fitness of the particles are calculated by their crowding distance\n crowding_distance(swarm)\n\n # the length of the leaders archive cannot be longer than the number of the initial population\n self.leaders += swarm\n self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')\n # self.problem.archive += swarm\n\n return",
"def save_final_config(self, configuration):\n optimal_cfg = ''\n for cfg in configuration.data.keys():\n if configuration.data[cfg] == \"on\":\n optimal_cfg += cfg\n optimal_cfg += ' '\n log.info(\n \"Optimal pass sequence seen so far: [{0}]\".format(optimal_cfg))",
"def _get_best_configs(\n self,\n configs: list[Configuration],\n bracket: int,\n stage: int,\n from_keys: list[InstanceSeedBudgetKey],\n ) -> list[Configuration]:\n try:\n n_configs = self._n_configs_in_stage[bracket][stage + 1]\n except IndexError:\n return []\n\n rh = self.runhistory\n configs = configs.copy()\n\n for config in configs:\n isb_keys = rh.get_instance_seed_budget_keys(config)\n if not all(isb_key in isb_keys for isb_key in from_keys):\n raise NotEvaluatedError\n\n selected_configs: list[Configuration] = []\n while len(selected_configs) < n_configs:\n # We calculate the pareto front for the given configs\n # We use the same isb keys for all the configs\n all_keys = [from_keys for _ in configs]\n incumbents = calculate_pareto_front(rh, configs, all_keys)\n\n # Idea: We recursively calculate the pareto front in every iteration\n for incumbent in incumbents:\n configs.remove(incumbent)\n selected_configs.append(incumbent)\n\n # If we have more selected configs, we remove the ones with the smallest crowding distance\n if len(selected_configs) > n_configs:\n all_keys = [from_keys for _ in selected_configs]\n selected_configs = sort_by_crowding_distance(rh, selected_configs, all_keys)[:n_configs]\n logger.debug(\"Found more configs than required. Removed configs with smallest crowding distance.\")\n\n return selected_configs",
"def get_config(self, **kwargs) -> dict:\n if self._first_is_default and (not self._results):\n # Try default config first\n new_config = self._params_default\n else:\n new_config = self._sample_config()\n num_tries = 1\n while self._pickle_config(new_config) in self._results:\n if num_tries > self.MAX_RETRIES:\n if self._num_configs is not None:\n num_results = len(self._results)\n logger.log(30, f\"Stopping HPO due to exhausted search space: {num_results} of {self._num_configs} possible configs ran.\")\n raise ExhaustedSearchSpaceError\n assert num_tries <= self.MAX_RETRIES, f\"Cannot find new config in LocalRandomSearcher, even after {self.MAX_RETRIES} trials\"\n new_config = self._sample_config()\n num_tries += 1\n self._add_result(new_config, self._reward_while_pending())\n return new_config",
"def update_global_best(self, swarm):\n\n # the fitness of the particles are calculated by their crowding distance\n crowding_distance(swarm)\n\n # the length of the leaders archive cannot be longer than the number of the initial population\n self.leaders += swarm\n self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')\n self.archive += swarm\n\n return",
"def search_station(st):\n\n res = []\n for key, val in _STATIONS.items():\n score = fuzz.token_set_ratio(st, key)\n res.append(\n {\n 'station': key,\n 'score': score,\n 'station_id': val\n }\n )\n if not res:\n return {}\n else:\n res = sorted(res, key=lambda k: k['score'], reverse=True)\n res = res[0]\n return res",
"def _generate_best_stats(self):\n self._best_trip = self._trips_dict[self._primary_mode]\n self._duration = self._best_trip.get_duration()\n self._distance = self._best_trip.get_distance()\n self._price_range = self._best_trip.get_price_range()\n self.set_start_loc_from_dict(self._best_trip.get_start_location())\n self.set_end_loc_from_dict(self._best_trip.get_end_location())\n self._build_legs()\n self._build_directions()",
"def best_last_option(self):\n \n # get essential values\n board = self.get_game_space()\n affinity = self.get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get all possible blocks to make a move in\n winning_blocks = board.get_winning_blocks(affinity)\n print('total winning blocks:'+str(len(winning_blocks)))\n best_blocks = []\n best_block = None\n\n # find the largest blocks to place a stone in\n for block in winning_blocks:\n if affinity == BLUE_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n elif len(block.blue) > len(best_blocks[0].blue):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.blue) == len(best_blocks[0].blue):\n best_blocks.append(block)\n elif affinity ==RED_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n if len(block.red) > len(best_blocks[0].red):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.red) == len(best_blocks[0].red):\n best_blocks.append(block)\n\n # find the best block to place a stone in\n for block in best_blocks:\n if best_block is None: best_block = block \n elif block.tiles[0][0] <= best_block.tiles[0][0]: \n if (block.tiles[0][1] != block.tiles[1][1]):\n if block.direction == 'vertical':\n if block.tiles[WINNING_ROW_SIZE()-1][1] >= best_block.tiles[WINNING_ROW_SIZE()-1][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block\n else:\n if block.tiles[0][1] >= best_block.tiles[0][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n else:\n if block.tiles[0][1] >= best_block.tiles[0][1] and block.tiles[1][0] <= best_block.tiles[1][0]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n\n # find the best move to make out of the best block \n # print('best block:'+str(best_block.tiles))\n best_move = (7,-1)\n for tile_i in range(len(best_block.tiles)):\n tile = best_block.tiles[tile_i]\n next_tile = None\n prev_tile = None \n if tile_i+1 in range(len(best_block.tiles)):\n next_tile = best_block.tiles[tile_i+1]\n if tile_i-1 in range(len(best_block.tiles)):\n prev_tile = best_block.tiles[tile_i-1]\n if board.get_tile(tile[0],tile[1]) == BLANK_TILE():\n if prev_tile is not None and next_tile is None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is None:\n if board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is not None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity or \\\n board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n \n return best_move",
"def Alternating_Minimization(self):\n Losses = {'Total':[]}\n self._compute_loss_seperately()\n\n Losses['Total'].append(self.total_loss)\n\n for it in range(self.max_its):\n print('iteration {} starts'.format(it+1))\n\n print('Local Updates:')\n for k in range(self.K):\n self._update_L(k)\n self._update_local_X(k)\n self._update_local_W(k)\n\n print('Global Updates:')\n self._update_G()\n self._update_global_X()\n self._update_global_W()\n\n print('Compute Losses')\n self._compute_loss_seperately()\n self._imputation_error()\n self._forecasting_error()\n\n Losses['Total'].append(self.total_loss)\n\n print('iteration {} complete: SSE: {}, Total loss: {}, Imputation NRMSE: {}, Imputation ND: {}, Forecasting NRMSE {}, Forecasting ND {}'.format(it+1, self.SSE, self.total_loss, self.imputation_NRMSE, self.imputation_ND, self.forecasting_NRMSE, self.forecasting_ND))\n print('Best results so far is {}'.format(self.best_results))\n\n if Losses['Total'][-1] > Losses['Total'][-2]:\n print(\"Error: total loss increases and it is impossible\")\n break",
"def get_next_config(self):\n\n self.reset_trial()\n self._cur_config = self.get_default()\n return self._cur_config if len(self._results) == 0 else None",
"def _choose_best_trip(self):\n times = [(key, self._trips_dict[key].get_duration()) for key in self._trips_dict.keys()\n if self._trips_dict[key] is not None]\n self._primary_mode = min(times, key=lambda tup: tup[1])[0]",
"def set_optimal_parameters(self):\n # Getting the best trial based on the test errors\n idx = self.trial_losses.index(min(self.trial_losses))\n self.best_trial = self.trial_list[idx]\n self.objective.parse_trial(self.best_trial)",
"def test_get_all_configurations(self):\n\n time_series = ['test-all-conf-1', 'test-all-conf-2', 'test-all-conf-3']\n [timeserie_configuration.get_timeserie_configure(self.get_local_dynamo_cli(),\n ts) for ts in time_series]\n\n all_configurations = timeserie_configuration.get_all_configurations(\n self.get_local_dynamo_cli())\n self.assertEquals(3, len(all_configurations))\n self.assertTrue(all([conf.default for conf in all_configurations]))",
"def greedy(self, iterations):\n # turn houses into list\n random_houses = list(self.houses.values())\n\n iterations = int(iterations)\n\n prices = []\n count = 0\n misses = -iterations\n\n # Do untill we have <iterations> succesfull configurations\n while count < iterations:\n self.disconnect()\n # While one or more batteries are over their capacity or not every\n # house is linked to a battery\n while self.check_linked() is False or self.check_full() is True:\n misses += 1\n\n # shuffle order of houses\n random.shuffle(random_houses)\n\n # remove connections, if any\n self.disconnect()\n\n # for every house find closest battery to connect to provided\n # that this house wont over-cap the battery\n for house in random_houses:\n for i in range(len(self.batteries.values())):\n output = house.output\n curr = self.batteries[list(house.diffs)[i]].filled()\n cap = self.batteries[list(house.diffs)[i]].capacity\n if output + curr <= cap:\n batt = self.batteries[list(house.diffs)[i]]\n house.link = batt\n batt.linked_houses.append(house)\n break\n\n # calculate price\n for battery in self.batteries.values():\n if not battery.linked_houses:\n del battery\n price = self.calculate_cost()\n prices.append(price)\n\n count += 1\n\n if min(prices) < self.lowest:\n self.lowest = min(prices)\n with open(f\"weighted_clusters_WIJK{self.input}.dat\",\n \"wb\") as f:\n pickle.dump([self.houses, self.batteries], f)\n\n # self.plot_houses()\n return min(prices)",
"def find_best_free_param_configuration_LOO_adj_sen(p):\n\n measures_res = linux_base_path+ \"/measures_res\"+setup+\"/\"\n# measures_res = base_path +\"\\\\measures_res\"+setup+\"\\\\\"\n# nDCG_MAP_res = base_path +\"\\\\nDCG_MAP_res\\\\\"\n claim_dict = read_pickle(\"claim_dict\")\n claim_num_list = [4,7,17,21,36,37,39,40,41,42,45,46,47,50,51,53,54,55,57,58,59,60,61,62,66,69,70,79,80]\n# claim_num_list = [4,47,53,58,7,54]\n best_configuration_for_nDCG_AP_prec_at_k_left_out_res = {} #key is left out claim and and value is the alpha,beta,lambda configuration that led to best measures - avg nDCG and AP across the train claims\n measures_res_of_left_out_in_its_best_conf = {} #key - left out claim num, and value is the measures of it, in the best configuration without it.\n \n k_val = 50\n prec_at_k_train = rcdtype.recordtype('prec_at_k_train', 'at_5 at_10')\n max_prec_at_k = rcdtype.recordtype('max_prec_at_k', 'max_val max_conf')\n try:\n for left_out_claim_indx in range(len(claim_num_list)):\n max_nDCG = 0\n max_MAP = 0\n max_nDCG_conf = []\n max_MAP_conf = []\n max_prec_at_5 = max_prec_at_k(0,\"\")\n max_prec_at_10 = max_prec_at_k(0,\"\")\n \n left_out_claim_num = claim_num_list[left_out_claim_indx]\n temp_claim_num_list = claim_num_list[:]\n temp_claim_num_list.remove(left_out_claim_num)\n for alpha in range(0,7,1): #change just for test!\n for beta in range(0,10,1):\n for lambda_int in range(0,11,1):\n for delta_1 in range(0,10,1):\n for delta_2 in range(0,10,1):\n if not delta_1+delta_2 >9: \n lambda_f = turn_to_float([lambda_int])\n (alpha_f,beta_f,delta_1_f,delta_2_f) = turn_to_float([alpha,beta,delta_1,delta_2])\n measures_all_claims = utils_linux.read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_delta1_\"+str(delta_1_f)+\"_delta2_\"+str(delta_2_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f))\n \n # AP_all_claims_curr_param_values = read_pickle(nDCG_MAP_res+\"AP_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f))\n # nDCG_all_claims_curr_param_values = read_pickle(nDCG_MAP_res+\"NDCG_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f)+\"_at_\"+str(p))\n # prec_at_k_all_claims_params_values = read_pickle(nDCG_MAP_res+\"prec_at_k_all_claims_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f))\n avg_nDCG_on_train = 0\n MAP_on_train = 0\n p_at_k_train_avg = prec_at_k_train(0,0)\n for clm_num_train in temp_claim_num_list:\n avg_nDCG_on_train += measures_all_claims[str(clm_num_train)][0]\n MAP_on_train += measures_all_claims[str(clm_num_train)][1] #in this config' -> get the measures\n p_at_k_train_avg.at_5 += measures_all_claims[str(clm_num_train)][2]\n p_at_k_train_avg.at_10 += measures_all_claims[str(clm_num_train)][3]\n avg_nDCG_on_train = float(float(avg_nDCG_on_train)/float(len(temp_claim_num_list)))\n MAP_on_train = float(float(MAP_on_train)/float(len(temp_claim_num_list)))\n p_at_k_train_avg.at_5 = float(float(p_at_k_train_avg.at_5)/float(len(temp_claim_num_list)))\n p_at_k_train_avg.at_10 = float(float(p_at_k_train_avg.at_10)/float(len(temp_claim_num_list)))\n \n if avg_nDCG_on_train > max_nDCG:\n max_nDCG = avg_nDCG_on_train\n max_nDCG_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n if MAP_on_train > max_MAP:\n max_MAP = MAP_on_train\n max_MAP_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n if p_at_k_train_avg.at_5 > max_prec_at_5.max_val:\n max_prec_at_5.max_val = p_at_k_train_avg.at_5\n max_prec_at_5.max_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n if p_at_k_train_avg.at_10 > max_prec_at_10.max_val:\n max_prec_at_10.max_val = p_at_k_train_avg.at_10\n max_prec_at_10.max_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n best_configuration_for_nDCG_AP_prec_at_k_left_out_res[left_out_claim_num] = [(max_nDCG,max_nDCG_conf),(max_MAP,max_MAP_conf),(max_prec_at_5.max_val,max_prec_at_5.max_conf),(max_prec_at_10.max_val,max_prec_at_10.max_conf)]\n #finished leaving out,\n #now calculate the nDCG and MAP of the left out claims with its best configuration results\n avg_nDCG_on_left_out = 0\n MAP_on_left_out = 0\n avg_prec_at_5_on_left_out = 0\n avg_prec_at_10_on_left_out = 0\n for clm_num in claim_num_list:\n (best_alpha_nDCG,best_beta_nDCG,best_lambda_nDCG,best_delta1_nDCG,best_delta2_nDCG) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1]\n (best_alpha_MAP,best_beta_MAP,best_lambda_MAP,best_delta1_MAP,best_delta2_MAP) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1]\n (best_alpha_prec_at_5,best_beta_prec_at_5,best_lambda_prec_at_5,best_delta1_prec_at_5,best_delta2_prec_at_5) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1]\n (best_alpha_prec_at_10,best_beta_prec_at_10,best_lambda_prec_at_10,best_delta1_prec_at_10,best_delta2_prec_at_10) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1]\n #read the best config' dict\n best_config_of_nDCG_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_nDCG)+\"_beta_\"+str(best_beta_nDCG)+\"_delta1_\"+str(best_delta1_nDCG)+\"_delta2_\"+str(best_delta2_nDCG)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_nDCG))\n best_config_of_AP_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_MAP)+\"_beta_\"+str(best_beta_MAP)+\"_delta1_\"+str(best_delta1_MAP)+\"_delta2_\"+str(best_delta2_MAP)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_MAP))\n best_config_of_prec_at_5_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_prec_at_5)+\"_beta_\"+str(best_beta_prec_at_5)+\"_delta1_\"+str(best_delta1_prec_at_5)+\"_delta2_\"+str(best_delta2_prec_at_5)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_prec_at_5)) #take only the first item in the tuple\n best_config_prec_of_at_10_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_prec_at_10)+\"_beta_\"+str(best_beta_prec_at_10)+\"_delta1_\"+str(best_delta1_prec_at_10)+\"_delta2_\"+str(best_delta2_prec_at_10)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_prec_at_10)) #take only the second item in the tuple\n measures_res_of_left_out_in_its_best_conf[clm_num] = (best_config_of_nDCG_dict[str(clm_num)][0],best_config_of_AP_dict[str(clm_num)][1],best_config_of_prec_at_5_dict[str(clm_num)][2],best_config_prec_of_at_10_dict[str(clm_num)][3])\n avg_nDCG_on_left_out += best_config_of_nDCG_dict[str(clm_num)][0]\n MAP_on_left_out += best_config_of_AP_dict[str(clm_num)][1]\n avg_prec_at_5_on_left_out += best_config_of_prec_at_5_dict[str(clm_num)][2]\n avg_prec_at_10_on_left_out += best_config_prec_of_at_10_dict[str(clm_num)][3]\n \n save_pickle(measures_res+\"measures_res_of_left_out_in_its_best_conf_k_top_docs_\"+str(k_val)+\"_at_\"+str(p), measures_res_of_left_out_in_its_best_conf)\n #report the avg\n avg_nDCG_on_left_out = float(float(avg_nDCG_on_left_out)/float(len(claim_num_list))) \n MAP_on_left_out = float(float(MAP_on_left_out)/float(len(claim_num_list))) \n avg_prec_at_5_on_left_out = float(float(avg_prec_at_5_on_left_out)/float(len(claim_num_list)))\n avg_prec_at_10_on_left_out = float(float(avg_prec_at_10_on_left_out)/float(len(claim_num_list)))\n #write res to file:\n # claim text, the best nDCG conf and result on train, the nDCG it really has, and the same for AP\n with open(measures_res+\"nDCG_AP_prec_at_k_res_of_left_out_in_its_best_conf_k_top_docs_\"+str(k_val)+\"_at_\"+str(p)+\".csv\", 'wb') as csvfile:\n w = csv.writer(csvfile)\n row = \"claim|best_nDCG|alpha,beta,lambda,delta_1,delta_2,delta_3|best_AP|alpha,beta,lambda,delta_1,delta_2,delta_3|best_prec_at_5|alpha,beta,lambda,delta_1,delta_2,delta_3|best_prec_at_10|alpha,beta,lambda,delta_1,delta_2,delta_3\"\n w.writerow([row])\n for (clm_num,(nDCG,AP,prec_at_5,prec_at_10)) in measures_res_of_left_out_in_its_best_conf.items():\n row = claim_dict[str(clm_num)]+\"&\"+'%.3f'%nDCG+\"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][3])\n row += \"&\"+'%.3f'%AP+\"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][3])\n row += \"&\"+'%.3f'%prec_at_5+ \"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][3])\n row += \"&\"+'%.3f'%prec_at_10+ \"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][3])\n w.writerow([row])\n w.writerow([\"avg_nDCG_on_left_out: \"+ '%.4f'%avg_nDCG_on_left_out ])\n w.writerow([\"MAP_on_left_out: \"+ '%.4f'%MAP_on_left_out])\n w.writerow([\"avg_prec_at_5_on_left_out: \"+ '%.4f'%avg_prec_at_5_on_left_out])\n w.writerow([\"avg_prec_at_10_on_left_out: \"+ '%.4f'%avg_prec_at_10_on_left_out])\n except Exception as err: \n sys.stderr.write('problem in LOO') \n print err",
"def find_best(self, num_iters: int, hparams_path: str, trials_path: str) -> None:\n for _ in range(num_iters):\n try:\n trials = pickle.load(open(trials_path, \"rb\"))\n self.last_best = trials.best_trial[\"result\"][\"loss\"]\n except FileNotFoundError:\n trials = Trials()\n logger.info(\n f\"Last best from previous iteration was: {self.last_best} on \"\n f\"[{datetime.now().replace(second=0, microsecond=0)}]\"\n )\n best_hparams = space_eval(\n self.search_space,\n fmin(\n self.objective,\n self.search_space,\n algo=tpe.suggest,\n max_evals=len(trials.trials) + 1,\n show_progressbar=False,\n trials=trials,\n ),\n )\n # Dump Trials object always\n with open(trials_path, \"wb\") as trials_file:\n pickle.dump(trials, trials_file)\n\n # Dump hparams only if better result was achieved\n if trials.best_trial[\"result\"][\"loss\"] < self.last_best:\n best_hparams[\"name\"] = self.name\n best_hparams[\"seed\"] = self.seed\n\n with open(hparams_path, \"w\") as yaml_file:\n YAML().dump(best_hparams, yaml_file)",
"def greedy_initial(self):\r\n sol = [] # [[0;2;5;0;4;6;0],[],...]\r\n sol_veh_type = [] # corresponding vehicle type for the solution\r\n route_way_time = []\r\n\r\n to_vist = [i+1 for i in range(store_num - 1)] # [1,5,8,...]\r\n itr = 0\r\n\r\n while len(to_vist) > 0 and itr < 500:\r\n itr += 1\r\n\r\n if itr <= small_veh_cnt:\r\n vehicle_type0 = 2\r\n elif itr <= small_veh_cnt + medium_veh_cnt:\r\n vehicle_type0 = 3\r\n else:\r\n vehicle_type0 = 5\r\n\r\n sol_veh_type.append(vehicle_type0)\r\n\r\n used_res = [0, 0, 0, 0] # used volume, and travel time of the vehicle, leave time, travel distance\r\n veh_rout = [0]\r\n\r\n # print '\\nA new vehicle will be used.'\r\n way_time = 0 # travel time of coming to the store + wait time at the store + operation time at this store\r\n while True:\r\n curr_cust = veh_rout[-1]\r\n\r\n next_one, way_time = self.time_nn(way_time, curr_cust, to_vist, used_res, len(veh_rout), vehicle_type0)\r\n next_cust, next_start = next_one[0], next_one[1]\r\n # print('next start', next_cust, next_start)\r\n if next_cust == 0: # next visiting customer is depot\r\n # print 'Get back to the depot, and ready for a new round.'\r\n veh_rout.append(next_cust)\r\n break\r\n\r\n else: # next visiting customer is a store\r\n used_res[0] += (num_demd[next_cust][0] * bskt_vol + num_demd[next_cust][1] * trsf_vol + (num_demd[next_cust][2] + \\\r\n num_demd[next_cust][3]) * milk_vol + num_demd[next_cust][4] * paper_bskt)\r\n used_res[2] = (next_start + oprt_t)\r\n used_res[3] += dist_mat[curr_cust, next_cust]\r\n\r\n\r\n veh_rout.append(next_cust)\r\n # print 'Vehicle used resource: ', used_res\r\n to_vist.remove(next_cust)\r\n\r\n sol.append(veh_rout)\r\n route_way_time.append(way_time)\r\n\r\n # print 'Last point 0 earliest leave time: ', int(used_res[-1]) / 60, ':', int(used_res[-1]) % 60\r\n # print 'Route %s is: ' % itr, veh_rout\r\n print('*'*10, 'Iteration:', itr, '*'*10)\r\n\r\n\r\n if len(to_vist) > 0:\r\n print('number of stores remained: ', len(to_vist))\r\n\r\n return sol, sol_veh_type, route_way_time",
"def combine_all_populations(folder: str,\n max_v: int = None,\n neat: bool = False,\n neat_gru: bool = False,\n neat_lstm: bool = False,\n neat_sru: bool = False,\n neat_sru_s: bool = False,\n ):\n # Collect all the populations\n populations = []\n if neat: populations.append(D_NEAT)\n if neat_gru: populations.append(D_NEAT_GRU)\n if neat_lstm: populations.append(D_NEAT_LSTM)\n if neat_sru: populations.append(D_NEAT_SRU)\n if neat_sru_s: populations.append(D_NEAT_SRU_S)\n if len(populations) == 0: return\n \n # Collect all the measure options\n OPTIONS = ['distance', 'finished', 'fitness', 'score', 'time', 'training']\n # OPTIONS = ['fitness']\n \n # Go over all possibilities\n print(f\"\\n===> COMBINING POPULATIONS OF FOLDER {folder} <===\")\n path = f\"population_backup/storage/{folder}/\"\n path_images = get_subfolder(path, 'images')\n for option in OPTIONS:\n plt.figure(figsize=(8, 2.5))\n max_data = 0\n max_gen = 0\n for pop in populations:\n # Load the dictionary\n d = load_dict(f\"{path}{pop}/evaluation/{option}\")\n size = len(list(d.values())[0])\n if max_v: assert size == max_v\n \n # Prepare the data containers\n q1 = []\n q2 = [] # Median\n q3 = []\n idx_q1 = int(round(1 / 4 * size))\n idx_q2 = int(round(2 / 4 * size))\n idx_q3 = int(round(3 / 4 * size))\n \n # Loop over each iteration\n x = sorted([int(k) for k in d.keys()])\n for g in x:\n if g > max_gen: max_gen = g\n lst = sorted(d[str(g)]) # Sort values from low to high\n q1.append(lst[idx_q1])\n q2.append(lst[idx_q2])\n q3.append(lst[idx_q3])\n \n # Plot the results\n plt.plot(x, q1, color=COLORS[pop], linestyle=\":\", linewidth=.5)\n plt.plot(x, q3, color=COLORS[pop], linestyle=\":\", linewidth=.5)\n plt.plot(x, q2, color=COLORS[pop], linestyle=\"-\", linewidth=2, label=pop)\n plt.fill_between(x, q1, q3, color=COLORS[pop], alpha=0.2)\n \n # Update the max-counter\n if max(q3) > max_data: max_data = max(q3)\n \n # Finalize the figure\n leg = plt.legend(loc='upper center',\n bbox_to_anchor=(0.5, 1.25),\n fancybox=True,\n fontsize=10,\n ncol=len(populations))\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n # plt.xticks([i * 100 for i in range(11)]) # TODO\n plt.xlabel(\"generation\")\n plt.xlim(0, max_gen)\n # plt.yticks([i for i in range(7)]) # TODO\n plt.ylabel(option)\n plt.ylim(0, max(max_data * 1.05, 1.05))\n # plt.ylim(0, 6) # TODO\n plt.grid()\n plt.tight_layout()\n plt.savefig(f\"{path_images}comb_{option}.png\", bbox_inches='tight', pad_inches=0.02, dpi=500)\n # plt.savefig(f\"{path_images}comb_{option}.eps\", format=\"eps\", bbox_inches='tight', pad_inches=0.02)\n # plt.show()\n plt.close()",
"def save_final_config(self, configuration):\n # outfile = 'passes_final.json'\n # print \"Optimal passes written to \" + outfile + \":\", configuration.data\n # self.manipulator().save_to_file(configuration.data, outfile)\n msg = \"Tuned on program {0}, with priority {1}. \\nBest pass ordering found:\\n{2}\".format(\n self.args.makefile, OPT_LVL, self.build_options(configuration.data))\n print msg\n self.make(\"clean\")",
"def test_get_best_candidate(self):\n optimizer = \"RandomSearch\"\n name = \"test_init_experiment\"\n param_defs = {\n \"x\": MinMaxNumericParamDef(0, 1),\n \"name\": NominalParamDef([\"A\", \"B\", \"C\"])\n }\n minimization = True\n\n LAss = PrettyLabAssistant()\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)\n cand_one = LAss.get_next_candidate(name)\n cand_one.result = 1\n LAss.update(name, cand_one)\n\n cand_two = LAss.get_next_candidate(name)\n cand_two.result = 0\n LAss.update(name, cand_two)\n\n assert_equal(cand_two, LAss.get_best_candidate(name))"
] | [
"0.7005242",
"0.66232514",
"0.6464399",
"0.62459195",
"0.6219636",
"0.6025062",
"0.5821818",
"0.58028185",
"0.56950855",
"0.5568496",
"0.5530859",
"0.5526991",
"0.55262834",
"0.5522291",
"0.547059",
"0.5425894",
"0.54113275",
"0.5394614",
"0.5392455",
"0.5378019",
"0.53733134",
"0.5367146",
"0.5268899",
"0.52353597",
"0.5216201",
"0.52160203",
"0.5210259",
"0.5201441",
"0.5195899",
"0.51886064"
] | 0.71698874 | 0 |
creates a species identified by taxid and containing empty dictionnary of orthologs | def __init__(self, taxid, species_name = None, lineage=None):
self.genes = dict()
self.taxid = taxid
self.species = species_name
self.lineage = lineage | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_taxon():\n if not exists('./data/taxdmp.zip'):\n ftp = FTP('ftp.ncbi.nih.gov')\n ftp.login()\n ftp.cwd('pub/taxonomy')\n ftp.retrbinary('RETR taxdmp.zip', open('./data/taxdmp.zip', 'wb').write)\n ftp.quit\n with ZipFile('./data/taxdmp.zip', 'r') as dumpfile:\n dumpfile.extractall(path='./data/')\n taxon_id = dict()\n data = list()\n name = dict()\n specie = list()\n son = dict()\n greatson = dict()\n parent = dict()\n rank = dict()\n global taxon\n taxon = list()\n with open('./data/names.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n if add[0] not in name or add[2] == 'scientific name':\n name[add[0]] = add[1]\n with open('./data/nodes.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n # 1696063|Sarcocystis corvusi||scientific name|\n taxon_id[add[0]] = add[1]\n rank[add[0]] = add[3]\n if add[2] == 'species':\n specie.append(add[0])\n for specie in specie:\n record = [specie, ]\n while taxon_id[specie] != '1':\n record.append(taxon_id[specie])\n specie = taxon_id[specie]\n # if '33090' in record:\n # record.pop()\n # record.pop()\n data.append(record)\n for data in data:\n for n in range(len(data)):\n if data[n] not in parent:\n parent[data[n]] = data[(n + 1):]\n if n == 0:\n continue\n if data[n] not in son:\n son[data[n]] = {data[n - 1], }\n else:\n son[data[n]].add(data[n - 1])\n if data[n] not in greatson:\n greatson[data[n]] = {data[0], }\n else:\n greatson[data[n]].add(data[0])\n for specie in name.items():\n if specie[0] not in son:\n son[specie[0]] = set()\n if specie[0] not in parent:\n parent[specie[0]] = list()\n if specie[0] not in greatson:\n greatson[specie[0]] = set()\n record = [specie[0], name[specie[0]], rank[specie[0]], son[specie[0]], parent[specie[0]], greatson[specie[0]]]\n taxon.append(record)\n\n con = sqlite3.connect('./data/DB')\n cur = con.cursor()\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS taxon (Id TEXT, Name TEXT, Rank TEXT, Son TEXT, Parent TEXT, GreatSon TEXT);')\n for line in taxon:\n son = ' '.join(line[3])\n parent = ' '.join(line[4])\n greatson = ' '.join(line[5])\n cur.execute('INSERT INTO taxon (Id, Name, Rank, Son, Parent, GreatSon) VALUES (?, ?, ?, ?, ?, ?);',\n (line[0], line[1], line[2], son, parent, greatson))\n con.commit()\n cur.close()\n con.close()\n print('Done.\\n')",
"def writeTaxonomies( self ):\n\n self.logger.info( 'writeTaxonomies: START' )\n\n self.logger.info( 'writeTaxonomies: keggreader.getAllOrganisms(): START' )\n\n organisms = self.reader.getAllOrganisms()\n\n self.logger.info( 'writeTaxonomies: keggreader.getAllOrganisms(): DONE' )\n\n taxonomies = {} \n\n taxonomyFile = self.openInsertFile( 'taxonomiesInsert.psql' )\n\n self.logger.info( 'writeTaxonomies: We got ' + str(len(organisms)) + ' organisms and our insert file is taxonomiesInsert.psql' )\n\n\n for organism,taxonomyData in organisms.iteritems():\n for tax in taxonomyData['lineage']:\n\n taxonomies[ tax['name'] ] = { 'name': tax['name'], 'tax_id': tax['tax_id'], 'type': tax['type'] } \n\n\n self.logger.info( 'writeTaxonomies: We got ' + str(len(taxonomies)) + ' taxonomies.' )\n\n\n for taxonomy,taxData in taxonomies.iteritems():\n taxonomyInserted = self.writeFile( taxonomyFile, 'taxonomies', [ str(taxData['name']), str(taxData['tax_id']), str(taxData['type']) ] )\n self.taxonomiesInserted[ taxData['name'] ] = taxonomyInserted\n\n self.logger.info( 'writeTaxonomies: DONE' )",
"def load_taxdict():\n tax = {}\n with open(\"../../data/taxonomy/tree_taxid.txt\", 'r') as file:\n for line in file:\n current_line = line.split() \n current_taxid = current_line[0]\n current_name = current_line[1]\n tax[current_taxid] = current_name \n\n return tax",
"def add_taxonomy(tax_idx, pkl):\n for tax,v in tax_idx.items():\n for genome_id,genome_len in v.items():\n T = '|'.join(list(tax) + ['t__' + genome_id])\n pkl['taxonomy'][T] = ('', int(genome_len))\n return pkl",
"def species_table(self):\n if self.hdf5_data is None:\n return None\n species_section = self.hdf5_data.get('/species', None)\n if species_section is None:\n return None\n return dict(\n (id, dict(name=name, radius=radius, D=D))\n for id, name, radius, D in species_section.value)",
"def createTaxDict(taxFile):\n taxonomyDict = {}\n with open(taxFile, 'r') as f:\n for line in f:\n split = line.rstrip().split(\"\\t\")\n key = split[0]\n vals = split[1:]\n if \"Sendai virus\" in key:\n #print(key,flush=True)\n vals = [\"genus:Respirovirus\",\"family:Paramyxoviridae\",\"order:Mononegavirales\",\\\n \"class:Monjiviricetes\",\"phylum:Negarnaviricota\",\"resolution:genus\"]\n elif \"Bastrovirus\" in key:\n #print(key,flush=True)\n vals = [\"family:Astroviridae\",\"order:Stellavirales\",\"class:Stelpaviricetes\",\\\n \"phylum:Pisuviricota\",\"resolution:family\"]\n keyDict = {}\n for value in vals:\n splitVal = value.split(\":\")\n keyDict[splitVal[0]] = splitVal[1]\n taxonomyDict[key] = keyDict\n print(len(taxonomyDict))\n return taxonomyDict",
"def summary(self,attr='raw'):\n g = {}\n g['gid'] = map(lambda x : x.gid, self.taxonomies)\n g['sp'] = map(lambda x : x.presences.species , self.taxonomies)\n \n g['gns'] = map(lambda x : x.presences.genera , self.taxonomies) \n g['fam'] = map(lambda x : x.presences.families , self.taxonomies)\n g['ord'] = map(lambda x : x.presences.orders , self.taxonomies)\n g['cls'] = map(lambda x : x.presences.classes , self.taxonomies)\n g['phy'] = map(lambda x : x.presences.phyla , self.taxonomies)\n g['kng'] = map(lambda x : x.presences.kingdoms , self.taxonomies)\n #g['all'] = map(lambda x : (x.gid,int(x.presences.species),int(x.genera),int(x.families),int(x.orders),int(x.classes),int(x.phyla),int(x.kingdoms)),self.taxonomies)\n keys = settings.TAXONOMIC_TREE_KEYS\n if attr == 'int':\n for key in keys:\n g[key] = map(lambda p : int(p) ,g[key])\n elif attr == 'str':\n for key in keys:\n g[key] = map(lambda p : str(p) ,g[key]) \n elif attr == 'list':\n for key in keys:\n g[key] = map(lambda p : p.list ,g[key]) \n elif attr == 'mapping':\n for key in keys:\n g[key] = map(lambda p : p.map ,g[key]) \n elif attr == 'raw':\n return g\n else:\n logger.error(\"Wrong attribute selection\")\n return None\n \n return g",
"def writeOrganismTaxonomies( self ):\n\n self.logger.info( 'writeOrganismTaxonomies: START' )\n\n organisms = self.reader.getAllOrganisms()\n\n taxonomies = {} \n\n self.logger.info( 'writeOrganismTaxonomies: insert file will be organismTaxonomiesInsert.psql' )\n\n taxonomyFile = self.openInsertFile( 'organismTaxonomiesInsert.psql' )\n\n for organism,taxonomyData in organisms.iteritems():\n for tax in taxonomyData['lineage']:\n\n taxId = self.taxonomiesInserted[ tax['name'] ] \n organismId = self.importerOrganism.organismsInserted[ organism ] \n\n self.writeFile( taxonomyFile, 'organism_taxonomies', [ str(organismId), str(taxId) ] )\n\n\n self.logger.info( 'writeOrganismTaxonomies: DONE' )",
"def get_full_tax(idx):\n logging.info('Compiling the taxonomy for all genomes...')\n tax_idx = collections.defaultdict(dict)\n for cluster_id,v in idx.items():\n for tax,vv in v.items():\n for genome_id,x in vv.items():\n tax_idx[tax][genome_id] = x['genome_len']\n n_genomes = 0\n for tax,v in tax_idx.items():\n n_genomes += len(v.keys())\n logging.info(' Total number of genomes: {}'.format(n_genomes))\n # return\n return tax_idx",
"def make_homologues_mirnas(phylogenetic_tree, mirna_seqs):\n species = [leaf.taxon.label for leaf in phylogenetic_tree.leaf_iter()]\n mirhomologues = pd.DataFrame({sp: {mirid: mirna_seqs[mirid][:21]\n for mirid in mirna_seqs.keys()}\n for sp in species}).transpose()\n return mirhomologues",
"def as_dict(self):\n species_dict = dict()\n species_dict['force_field'] = self.force_field\n species_dict['is_ts'] = self.is_ts\n if self.e_elect is not None:\n species_dict['e_elect'] = self.e_elect\n if self.e0 is not None:\n species_dict['e0'] = self.e0\n species_dict['arkane_file'] = self.arkane_file\n if self.yml_path is not None:\n species_dict['yml_path'] = self.yml_path\n if self.is_ts:\n species_dict['ts_methods'] = self.ts_methods\n species_dict['ts_guesses'] = [tsg.as_dict() for tsg in self.ts_guesses]\n species_dict['ts_conf_spawned'] = self.ts_conf_spawned\n species_dict['ts_number'] = self.ts_number\n species_dict['ts_report'] = self.ts_report\n species_dict['rxn_label'] = self.rxn_label\n species_dict['successful_methods'] = self.successful_methods\n species_dict['unsuccessful_methods'] = self.unsuccessful_methods\n species_dict['chosen_ts_method'] = self.chosen_ts_method\n species_dict['chosen_ts'] = self.chosen_ts\n if self.run_time is not None:\n species_dict['run_time'] = self.run_time.total_seconds()\n species_dict['t1'] = self.t1\n species_dict['label'] = self.label\n species_dict['long_thermo_description'] = self.long_thermo_description\n species_dict['multiplicity'] = self.multiplicity\n if self.number_of_radicals is not None:\n species_dict['number_of_radicals'] = self.number_of_radicals\n species_dict['charge'] = self.charge\n species_dict['generate_thermo'] = self.generate_thermo\n if self.opt_level is not None:\n species_dict['opt_level'] = self.opt_level\n if self.final_xyz is not None:\n species_dict['final_xyz'] = self.final_xyz\n species_dict['number_of_rotors'] = self.number_of_rotors\n species_dict['rotors_dict'] = self.rotors_dict\n species_dict['external_symmetry'] = self.external_symmetry\n species_dict['optical_isomers'] = self.optical_isomers\n species_dict['neg_freqs_trshed'] = self.neg_freqs_trshed\n if self.conf_is_isomorphic is not None:\n species_dict['conf_is_isomorphic'] = self.conf_is_isomorphic\n if self.bond_corrections is not None:\n species_dict['bond_corrections'] = self.bond_corrections\n if self.mol is not None:\n species_dict['mol'] = self.mol.toAdjacencyList()\n if self.initial_xyz is not None:\n species_dict['initial_xyz'] = self.initial_xyz\n if self.checkfile is not None:\n species_dict['checkfile'] = self.checkfile\n if self.most_stable_conformer is not None:\n species_dict['most_stable_conformer'] = self.most_stable_conformer\n if self.cheap_conformer is not None:\n species_dict['cheap_conformer'] = self.cheap_conformer\n if self.recent_md_conformer is not None:\n species_dict['recent_md_conformer'] = self.recent_md_conformer\n if self.svpfit_output_file is not None:\n species_dict['svpfit_output_file'] = self.svpfit_output_file\n if self._radius is not None:\n species_dict['radius'] = self._radius\n if self.conformers:\n species_dict['conformers'] = self.conformers\n species_dict['conformer_energies'] = self.conformer_energies\n if self.conformers_before_opt is not None:\n species_dict['conformers_before_opt'] = self.conformers_before_opt\n if self.bdes is not None:\n species_dict['bdes'] = self.bdes\n return species_dict",
"def species_lookup_by_taxonid(self, taxon_id):\n return self.species_name_lookup(taxon_id)",
"def parse_taxonomy( seq_id, lineage, key_dictionary ):\n\tif seq_id in sti_dict:\n\t\ttax_id = sti_dict[ seq_id ]\n\t\ttax_names = [ tax_id ] #list of taxon names\n\telse:\n\t\ttax_id = str( seq_id )\n\t\ttax_names = [ tax_id ] #list of taxon names\n\ttax_numbers = [ seq_id ]\n\tis_A_list = [] #store is_A relationships\n\n\twhile lineage != '1': #forces traversal through the tri file until we get to the root of taxonomy\n\t\t#print lineage\n\t\tif lineage == '0': #need this to process the root in the tri file. \n\t\t\tbreak\n\t\tis_A_list = [lineage] + is_A_list\n\t\ttax_numbers = [lineage] + tax_numbers\n\t\tif lineage in sti_dict: #we have the next taxonomic representative in the sti file\n\t\t\ttax_id = sti_dict[ lineage ]\n\t\t\ttax_names = [tax_id] + tax_names #append tax_id to front of list\n\t\telse: #the taxon does not have a sequence representative. \n\t\t\ttax_id = str( lineage ) \n\t\t\ttax_names = [tax_id] + tax_names\n\t\t#now process to next lineage\n\t\tlineage = tri_dict[ lineage ] \n\n\n\ttax_names = ['root'] + tax_names #append tax_id to front of list\n\ttax_numbers = [lineage] + tax_numbers\n\tis_A_list = ['0'] + [lineage] + is_A_list\n\n\t#now append all of these reuslts to the final dictionary, which will be keyed \n\t#off of the tax_numbers list (unique IDs for each taxonomic level.\n\n\tfor i in xrange( len( tax_numbers ) ):\n\t\tid = tax_numbers[i]\n\t\tif id in key_dictionary:\n\t\t\tpass\n\t\telse:\n\t\t\tparent = is_A_list[i]\n\t\t\tlevel = i #taxonomic level (how far down in levels are we?)\n\t\t\tnames = process_names( tax_names[:i+1] )\n\t\t\tkey_dictionary[ id ] = [ parent, level, names ]\n\n\treturn( key_dictionary )",
"def construct_taxonomy(termID_list, dih, dih_metric_name=\"invCL\", taxorg_method=\"NoCyc\", \n graph_init_threshold=0.01, verbose=True):\n assert dih_metric_name in [\"weeds_prec\", \"clarkeDE\", \"invCL\"]\n assert taxorg_method in [\"NoCyc\", \"DMST\"]\n\n # DIH\n weighted_edges = [] # (hyper, hypo, score)\n for termID_pair in tqdm(list(combinations(termID_list, r=2))):\n score = 2.0 * dih.predict(termID_pair[0], termID_pair[1], dih_metric_name)\n if score >= graph_init_threshold:\n weighted_edges.append([termID_pair[1], termID_pair[0], score])\n\n # reverse order testing\n score = 2.0 * dih.predict(termID_pair[1], termID_pair[0], dih_metric_name)\n if score >= graph_init_threshold:\n weighted_edges.append([termID_pair[0], termID_pair[1], score])\n\n # Taxonomy organization\n G = construct_graph(termID_list, weighted_edges)\n if verbose:\n print(\"Before Taxonomy Organization\")\n describe_graph(G)\n print(\"=\"*89)\n\n if taxorg_method == \"NoCyc\":\n T = NoCyc(G)\n elif taxorg_method == \"DMST\":\n T = DMST(G)\n\n if verbose:\n print(\"After Taxonomy Organization\")\n describe_graph(T)\n \n return T",
"def genotype(rsid):\n if rsid[0] == 'I' or rsid[0] == 'i':\n return { 'error': 'Cannot find indicators, must use rs #s'}\n soup = BeautifulSoup(urllib.urlopen('http://snpedia.com/index.php/Special:Browse/' + rsid).read())\n trows = soup('table')[1].find_all('tr')\n if len(trows) < 2:\n return { 'error': 'That rsid does not have any data/does not exist.' }\n locations = getLocations(soup)\n genotypeData = getData(locations, soup)\n genotypeData['rsid'] = rsid\n return genotypeData",
"def test_speciesCreation():\n \n sys = LVsystem.Ecosystem()\n sys.addSpecies('rabbit')\n sys.addSpecies('fox')\n sys.setInteraction('rabbit', 'fox', -1)\n sys.setInteraction('fox', 'rabbit', 1)\n sys.setInitialCond('rabbit', 10)\n sys.setInitialCond('fox', 5)\n sys.setGrowthRate('rabbit', 1)\n sys.setGrowthRate('fox', -1)\n sys.setCarrCap('rabbit', 10000)\n sys.setCarrCap('fox', 10000)\n sys.setChangeRate('rabbit', 10)\n sys.setChangeRate('fox', 20) \n \n assert len(sys.species_list) == 2\n assert sys.species_list == ['rabbit','fox']\n assert sys.intMatrix == {('rabbit','fox'):-1, ('fox','rabbit'):1}\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')",
"def create_taxonomy(dataset_name, attr, dataset=[]):\n #path = os.getcwd()\n\n path_in = os.getcwd()\n pattern = '^.*/thesis-data-anonymisation/'\n path_top = re.search(pattern, path_in).group(0)\n\n path = path_top +'data'\n\n if len(dataset_name) > 0:\n prefix = '../data/'+dataset_name+'/hierarchy_'\n else:\n prefix = '../data/hierarchy_'\n\n postfix = '.csv'\n\n try:\n file = open(path + '/' + prefix + attr + postfix, 'r')\n except FileNotFoundError:\n if len(dataset_name) > 0:\n prefix = '/data/'+dataset_name+'/hierarchy_'\n else:\n prefix = '/data/hierarchy_'\n file = open(path+prefix + attr + postfix, 'r')\n\n taxonomy = {}\n #dataset_group = dataset.groupby(attr).groups\n\n lines_in = file.readlines()\n file.close()\n lines = [line.strip().split(';') for line in lines_in]\n max_height = max([len(line) for line in lines])\n try:\n float(lines[0][0])\n is_numeric = True\n except ValueError:\n is_numeric = False\n for line in lines:\n #try:\n # if is_numeric:\n # dataset_group[int(line[0])]\n # else:\n # dataset_group[line[0]]\n #except KeyError:\n # continue\n line.reverse()\n for i, val in enumerate(line):\n is_leaf = False\n if val == '*':\n node = TaxNode(val, None, is_numeric, is_leaf)\n else:\n if i == len(line) - 1:\n is_leaf = True\n\n node = TaxNode(val, taxonomy[line[i - 1]][-1], is_numeric, is_leaf)\n try:\n current_nodes = taxonomy[val]\n already_added = False\n for current_node in current_nodes:\n if current_node.parent is None:\n already_added = True\n elif current_node.parent.value == node.parent.value:\n already_added = True\n if not already_added:\n taxonomy[val].append(node)\n except KeyError:\n taxonomy[val] = [node] # Saves the nodes in a list in case of several parents (only valid for nodes with several parents!!!)\n hierarchy = Taxonomy(taxonomy, max_height)\n\n return hierarchy",
"def create_species_encode():\n\tdata = pd.read_csv(\"../train.csv\")\n\tspecies = sorted(data.species.unique())\n\tspecies_dict = {species: index for index, species in enumerate(species)}\n\treturn species_dict",
"def __getTaxonomnyChainMapping(self, siftsSummaryDirPath, csvFileName):\n fp = os.path.join(siftsSummaryDirPath, csvFileName)\n rowDL = self.__readSiftsSummaryFile(fp)\n logger.info(\"Length of SIFTS summary file %s %d\", csvFileName, len(rowDL))\n logger.debug(\"%r\", list(rowDL[0].items()))\n tD = {}\n for rowD in rowDL:\n entryId = rowD[\"PDB\"]\n chainId = rowD[\"CHAIN\"]\n taxId = rowD[\"TAX_ID\"]\n tD.setdefault(entryId.upper(), {}).setdefault(chainId, {}).update({taxId: True})\n #\n logger.info(\"Taxonomy for %d entries\", len(tD))\n return tD",
"def nsrTaxonomy():\r\n # Input file\r\n taxonomyFile = pd.read_csv(args.indir+\"/\"+args.infile1, header=2,\r\n sep=\"\\t\", encoding=\"utf8\")\r\n\r\n # Parse taxonomic names into their elementary components\r\n taxonomy = taxonomyFile.loc[taxonomyFile['rank'] == 'soort']\r\n taxonList = []\r\n for taxon in taxonomy['scientific_name']:\r\n parser = taxonParser(taxon)\r\n if not parser or parser is False:\r\n pass\r\n else:\r\n taxonList.append(parser)\r\n\r\n # Write taxonomy to file\r\n index = 0\r\n with io.open(par_path+\"/results/nsr_species.csv\", \"w\", encoding=\"utf-8\") as outfile:\r\n outfile.write('\"species_id\",\"species_name\",\"identification_reference\"\\n')\r\n for i in taxonList:\r\n binomial = ' '.join(str(i).split()[:2])\r\n authorship = ' '.join(str(i).split()[2:])\r\n outfile.write('%s,%s,\"%s\"\\n' % (index, binomial, authorship))\r\n index += 1\r\n\r\n return taxonList",
"def build_phenotype(phenotype_id, adapter):\n phenotype_obj = {}\n phenotype = adapter.hpo_term(phenotype_id)\n if phenotype:\n phenotype_obj[\"phenotype_id\"] = phenotype[\"hpo_id\"]\n phenotype_obj[\"feature\"] = phenotype[\"description\"]\n return phenotype",
"def __init__(\n self,\n gene_lists,\n taxon,\n requests_per_sec=10,\n padj_threshold=0.05,\n log2_fc_threshold=0,\n fc_threshold=None,\n enrichment_fdr=0.05,\n annot_col=\"Name\",\n ):\n Ontology.__init__(self)\n PlotGOTerms.__init__(self)\n\n self.gene_lists = gene_lists\n self.enrichment_fdr = enrichment_fdr\n\n # users can set the fold change threshold in the log2 scale or normal\n # scale.\n assert log2_fc_threshold >= 0, \"log2 fc_threshold must be >=0\"\n if fc_threshold is not None:\n log2_fc_threshold = pylab.log2(fc_threshold)\n\n from bioservices import panther, quickgo\n\n self.quick_go_graph = QuickGOGraph()\n\n self.panther = panther.Panther(cache=True)\n self.valid_taxons = [x[\"taxon_id\"] for x in self.panther.get_supported_genomes()]\n self.summary = {}\n\n self._taxon = None\n self.taxon = taxon\n\n self.quickgo = quickgo.QuickGO(cache=True)\n self.quickgo.requests_per_sec = requests_per_sec\n self.quickgo.services.settings.TIMEOUT = 120\n\n self._ancestors = {\n \"MF\": \"GO:0003674\",\n \"CC\": \"GO:0005575\",\n \"BP\": \"GO:0008150\",\n \"SLIM_MF\": \"GO:0003674\",\n \"SLIM_CC\": \"GO:0005575\",\n \"SLIM_BP\": \"GO:0008150\",\n }\n self.ontologies.extend(\n [\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_MF\",\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_BP\",\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_CC\",\n \"ANNOT_TYPE_ID_PANTHER_PC\",\n \"ANNOT_TYPE_ID_PANTHER_PATHWAY\",\n \"ANNOT_TYPE_ID_REACTOME_PATHWAY\",\n ]\n )\n\n self.ontology_aliases.extend(\n [\n \"SLIM_MF\",\n \"SLIM_BP\",\n \"SLIM_CC\",\n \"PROTEIN\",\n \"PANTHER_PATHWAY\",\n \"REACTOME_PATHWAY\",\n ]\n )\n\n # panther accepts onyl ~2-3000 genes at max. Let us restrict the analysis\n # to the first 2000 genes based on their log2 fold change 2000 + and\n # 2000 negatives\n\n msg = \"Ignoring DEGs with adjusted p-value > {} and fold change in [{}, {}]\".format(\n padj_threshold, 1 / (2**log2_fc_threshold), 2**log2_fc_threshold\n )\n logger.info(msg)\n\n # used in report module\n self.summary[\"fold_change_range\"] = [\n 1 / (2**log2_fc_threshold),\n 2**log2_fc_threshold,\n ]\n self.summary[\"padj_threshold\"] = padj_threshold\n\n fc_threshold = log2_fc_threshold\n\n for x in sorted(gene_lists.keys()):\n\n N = len(gene_lists[x])\n logger.info(f\"Starting with {N} genes from category '{x}'\")\n\n self.summary[\"DGE_after_filtering\"] = {k: len(v) for k, v in gene_lists.items()}\n\n self.enrichment = {}\n self.stats = {}\n self.obsolets = []",
"def testTaxaData(self):\n try:\n numEukaryota = 0\n numBacteria = 0\n numVirus = 0\n numArchaea = 0\n numOther = 0\n numUnclass = 0\n logger.info(\"Loading taxonomy data\")\n tU = TaxonomyUtils()\n logger.info(\"Done loading taxonomy data\")\n iCount = 0\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n for entryId in entryD:\n for entityId, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n taxId = eD[\"ncbi_taxonomy_id\"] if \"ncbi_taxonomy_id\" in eD else None\n if taxId is None:\n logger.debug(\"Missing taxId entryId %s entityId %s\", entryId, entityId)\n continue\n # lin = tU.getLineage(taxId)\n # nmL = tU.getLineageNames(taxId)\n ok1 = tU.isEukaryota(taxId)\n if ok1:\n numEukaryota += 1\n ok3 = tU.isVirus(taxId)\n if ok3:\n numVirus += 1\n ok2 = tU.isBacteria(taxId)\n if ok2:\n numBacteria += 1\n #\n ok4 = tU.isArchaea(taxId)\n if ok4:\n numArchaea += 1\n #\n ok5 = tU.isOther(taxId)\n if ok5:\n numOther += 1\n #\n ok6 = tU.isUnclassified(taxId)\n if ok6:\n numUnclass += 1\n\n if ok1 and (ok1 and ok2):\n logger.info(\"taxid %r conflicting lineage\", taxId)\n #\n if not ok1 and not ok2 and not ok3 and not ok4 and not ok5 and not ok6:\n logger.info(\"unassigned taxid %r\", taxId)\n\n logger.debug(\"taxId %r entryId %s entityId %s\", taxId, entryId, entityId)\n iCount += 1\n # if iCount > 5000:\n # break\n logger.info(\"Eukaryota %d Bacteria %d Virus %d Archaea %d Other/Syn %r Unclass %d\", numEukaryota, numBacteria, numVirus, numArchaea, numOther, numUnclass)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def create_specimen_taxonomy():\n\n form = TaxonomyForm()\n if session.get(\"link\") is not None:\n if form.validate_on_submit():\n session[\"common_name\"] = (\n form.common_name.data or Taxonomy.common_name.default.arg\n )\n session[\"species\"] = form.species.data or Taxonomy.species.default.arg\n session[\"genus\"] = form.genus.data or Taxonomy.genus.default.arg\n session[\"family\"] = form.family.data or Taxonomy.family.default.arg\n session[\"order\"] = form.order.data or Taxonomy.order.default.arg\n session[\"phylum\"] = form.phylum.data or Taxonomy.phylum.default.arg\n session[\"kingdom\"] = form.kingdom.data or Taxonomy.kingdom.default.arg\n session[\"authorship\"] = (\n form.authorship.data or Taxonomy.authorship.default.arg\n )\n\n return redirect(\"/specimen/new/details\")\n\n else:\n return render_template(\n \"newspecimen.html\", form=form, step=\"taxonomy\"\n )\n else:\n return (\"\", 403)",
"def create_all_taxonomic_keys(point_locations: dict, location_species: dict, location_range_species: dict,\n trait_data: dict, all_taxa_data: dict) -> dict:\n\n all_keys = {}\n\n # find all unique sets of species\n species_sets = set()\n for p in point_locations:\n loc = point_locations[p]\n all_species = set()\n all_species |= location_species[loc.name]\n if loc.n_direct_children() > 0:\n for c in loc.direct_children():\n all_species |= fetch_child_data(c, location_species)\n\n range_species = set(find_species_by_name(s) for s in location_range_species[loc])\n all_species |= range_species\n if len(all_species) > 0:\n species_sets.add(frozenset(all_species))\n\n # create keys for each unique set of species\n warnings = set()\n for sp_set in species_sets:\n taxa_data = {}\n for s in sp_set:\n try:\n taxa_data[\"Male \" + s.binomial()] = all_taxa_data[\"♂ Male {{\" + s.species + \"}}\"]\n taxa_data[\"Female \" + s.binomial()] = all_taxa_data[\"♀ Female {{\" + s.species + \"}}\"]\n except KeyError:\n report_error(\"Missing taxonomic key data: \" + s.species)\n\n all_keys[sp_set], new_warning = TMB_TaxKeyGen.generate_taxonomic_key(trait_data, taxa_data, verbose=False)\n warnings |= new_warning\n\n # global key for all species\n all_keys[\"all\"], new_warning = TMB_TaxKeyGen.generate_taxonomic_key(trait_data, all_taxa_data, verbose=False)\n warnings |= new_warning\n\n for w in sorted(warnings):\n report_error(w)\n\n return all_keys",
"def update_tip_names(tree, taxdict):\n\n list_nodes = []\n uniprot_mapping = pd.DataFrame(columns=['taxid', 'name', 'uniprot'])\n\n counter = 0\n for node in tree.traverse(\"postorder\"):\n current_name = node.name\n\n if 'NMR' in current_name:\n new_name = \"Heterocephalus_glaber\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\" \n uniprot_mapping.loc[counter] = (taxid, new_name, \"UP000006813\")\n counter += 1\n\n elif 'Nfurzer' in current_name:\n new_name = \"Nothobranchius_furzeri\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\"\n uniprot_mapping.loc[counter] = (taxid, new_name, new_name)\n counter += 1\n\n elif 'TAX' in current_name:\n taxid = current_name[3:].split('x')[0]\n new_name = taxdict.get(taxid, taxid) \n node.name = new_name \n list_nodes.append(node.name)\n unip = get_uniprot(taxid, accession)\n uniprot_mapping.loc[counter] = (taxid, new_name, unip)\n counter += 1\n\n\n \n tree.write(outfile=\"../../data/tree/tree.nw\")\n\n nodes_df = pd.DataFrame(list_nodes)\n nodes_df.to_csv(\"../../data/tree/tree_list_nodes.txt\", index=False, header=False)\n\n uniprot_mapping.to_csv(\"../../data/tree/tree_uniprot.txt\", sep='\\t', index=False, header=True)\n\n return tree, list_nodes",
"def duplicate_names(self, taxonomy, check_species=True):\n\n # get lineages for each taxon name\n taxon_lineages = defaultdict(set)\n for taxa in taxonomy.values():\n for i, taxon in enumerate(taxa):\n if len(taxon) > 3:\n taxon_lineages[taxon].add(';'.join(taxa[0:i + 1]))\n\n # identify taxon belonging to multiple lineages\n duplicates = {}\n for taxon, lineages in taxon_lineages.items():\n if len(lineages) >= 2:\n if not taxon.startswith('s__') or check_species:\n duplicates[taxon] = lineages\n\n return duplicates",
"def fill_taxonomy_database(taxids, password):\r\n\r\n for taxid in taxids:\r\n lineage = ncbi.get_lineage(taxid)\r\n names = ncbi.get_taxid_translator(lineage)\r\n print(lineage)\r\n print([names[taxid] for taxid in lineage])\r\n\r\n previous = \"\"\r\n\r\n for lin in lineage:\r\n if int(lin) != 1: # skipping 'root'\r\n rank = ncbi.get_rank([lin])\r\n SQL_connection = set_connection(password)\r\n cursor = SQL_connection.cursor(buffered=True)\r\n cursor.execute(\r\n \"select * \"\r\n \"from Taxonomie \"\r\n \"where taxonomy_ID = {};\".format(\r\n lin))\r\n results = cursor.fetchone()\r\n if results is None:\r\n if previous == \"\":\r\n cursor.execute(\"insert into Taxonomie \"\r\n \"(rank_up, taxonomy_ID, naam, rang) \"\r\n \"values(NULL, {}, '{}', '{}');\".format(\r\n lin, names[lin], rank[lin]))\r\n SQL_connection.commit()\r\n else:\r\n cursor.execute(\"insert into Taxonomie \"\r\n \"(rank_up, taxonomy_ID, naam, rang) \"\r\n \"values({}, {}, '{}', '{}');\".format(\r\n previous, lin, names[lin], rank[lin]))\r\n SQL_connection.commit()\r\n cursor.close()\r\n SQL_connection.close()\r\n previous = lin",
"def create_city():\n city = {}\n city['biysk'] = {}\n city['biysk']['barnaul'] = 9\n city['biysk']['novosibirsk'] = 11\n city['biysk']['belokurikha'] = 8\n city['barnaul'] = {}\n city['barnaul']['tomsk'] = 4\n city['belokurikha'] = {}\n city['belokurikha']['novosibirsk'] = 2\n city['novosibirsk'] = {}\n city['novosibirsk']['barnaul'] = 2\n city['novosibirsk']['tomsk'] = 5\n city['novosibirsk']['omsk'] = 20\n city['tomsk'] = {}\n city['tomsk']['krasnoyarsk'] = 6\n city['krasnoyarsk'] = {}\n city['krasnoyarsk']['omsk'] = 7\n city['omsk'] = {}\n return city",
"def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind"
] | [
"0.62028277",
"0.5643144",
"0.55928254",
"0.5586047",
"0.5550129",
"0.5545237",
"0.55178034",
"0.54141647",
"0.5405542",
"0.5331211",
"0.528797",
"0.52788687",
"0.5253945",
"0.52389264",
"0.5223071",
"0.52124864",
"0.516965",
"0.51686347",
"0.51635945",
"0.49660262",
"0.49572685",
"0.49468812",
"0.4933717",
"0.49297443",
"0.49112213",
"0.49094617",
"0.48972872",
"0.4891772",
"0.48882136",
"0.48873076"
] | 0.6710074 | 0 |
add an entry in the dic with key "human gene ID" and value "ortholog gene ID" | def add_gene(self, human_gene, ortholog):
if human_gene not in self.genes:
self.genes[human_gene] = list()
self.genes[human_gene].append(ortholog) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_gene_info(ensembl_info, word, value):\n if \"gene\" in word:\n if \"id\" in word:\n ensembl_info[\"ensembl_gene_id\"] = value\n elif \"start\" in word:\n ensembl_info[\"gene_start\"] = int(value)\n elif \"end\" in word:\n ensembl_info[\"gene_end\"] = int(value)\n return ensembl_info",
"def _add_chebi_identifiers(map_dict) -> dict:\n sys.stdout.write(\"Adding ChEBI identifiers...\\n\")\n all_chebi = [k for k in map_dict if k.lower().startswith('chebi')]\n\n ch = ChEBI()\n\n for chebi_id in tqdm.tqdm(all_chebi, total=len(all_chebi)):\n uid = chebi_id.split(':')[-1]\n\n try:\n # query ChEBI API\n result = ch.getCompleteEntity(uid)\n except Exception as x:\n print(\"%s: %s\" % (chebi_id, x.__class__.__name__))\n continue\n\n to_add = []\n\n if hasattr(result, 'SecondaryChEBIIds'):\n to_add += [str(s) for s in result.SecondaryChEBIIds]\n\n if hasattr(result, 'OntologyChildren'):\n to_add += [str(ent.chebiId) for ent in result.OntologyChildren\n if ent.type in ('is conjugate acid of',\n 'is conjugate base of',\n 'is tautomer of')]\n\n if hasattr(result, 'OntologyParents'):\n to_add += [str(ent.chebiId) for ent in result.OntologyParents\n if ent.type in ('is conjugate acid of',\n 'is conjugate base of',\n 'is tautomer of')]\n\n for ent_id in to_add:\n new_id = '{}:{}'.format('ChEBI', ent_id.split(':')[-1])\n map_dict[chebi_id].add(new_id)\n\n return map_dict",
"def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(\",\"):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes",
"def get_gene_id_dict(list_of_results):\n dict1 = {}\n for i, dict2 in enumerate(list_of_results):\n key = dict2[\"GeneID\"]\n if key in dict1.keys():\n # list1 = dict1[key]\n # list1.append(list_of_results[i])\n # dict1[key] = list1\n # list1.append(list_of_results[i])\n dict1[key].append(list_of_results[i])\n else:\n dict1[key] = [list_of_results[i]]\n return dict1",
"def addDic(dic, elt):\n pass",
"def test_addDict(self):\n lidi = []\n lidi.append({'term': 'foo', 'tags': 'a', 'value': '1'})\n lidi.append({'term': 'bar', 'tags': 'a, b', 'value': '2'})\n lidi.append({'term': 'gnark', 'tags': 'a, c', 'value': '3'})\n self.g.add_dict(lidi)",
"def addToHistogram(s,d):\n\n if s in d:\n #if the character is in the dictionary, the amount of that character record increases \n d[s] += 1\n else:\n #if not in the dictionary, a new key & value will be made\n d[s] =1\n\n #lastly returns the dictionary itself \n return d",
"def set_dict(self, lines):\n for line in lines:\n line = line.rstrip()\n split_line = line.split(\"\\t\")\n old_gene_id = split_line[0]\n new_gene_id = split_line[2]\n conv_dict = self.conversion_dict\n conv_dict[old_gene_id] = new_gene_id\n self.conversion_dict = conv_dict",
"def gene_ID_wrangler(inpAcc):\n \n print(\"processing gene symbols\")\n \n resD = {}\n \n for convI in inpAcc:\n keyI = convI[\"InputValue\"]\n valueI = convI[\"Gene ID\"]\n resD[keyI] = valueI\n\n return resD",
"def _add_uniprot_identifiers(map_dict) -> dict:\n sys.stdout.write(\"Adding UniProt identifiers...\\n\")\n r_session = base_utils.requests_retry_session()\n all_uniprot = [k for k in map_dict if k.lower().startswith('uniprot')]\n\n for uniprot_id in tqdm.tqdm(all_uniprot, total=len(all_uniprot)):\n db, uid = uniprot_id.split(':')\n\n try:\n # query UniProt API\n r = r_session.get(\n 'http://www.uniprot.org/uniprot/' + uid + '.xml'\n )\n except Exception as x:\n print(\"%s: %s\" % (uniprot_id, x.__class__.__name__))\n continue\n\n if r.content:\n root = etree.fromstring(r.content)\n if root:\n for s in root[0]:\n if s.tag.endswith('accession'):\n new_id = '{}:{}'.format('UniProt', s.text.split(':')[-1])\n map_dict[uniprot_id].add(new_id)\n else:\n break\n\n return map_dict",
"def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict",
"def init_gene():\n gene_details=dict(\n id = '', \n anno_id = [],\n confgenes_id = [],\n name = '',\n source = '',\n gene_info = {},\n alias = '',\n name2 = [],\n strand = '',\n chr = '',\n chr_num = [],\n paralogs = [],\n start = '',\n stop = '',\n transcripts = [],\n transcript_info = [],\n transcript_status = [],\n transcript_valid = [],\n exons = [],\n exons_confirmed = [],\n cds_exons = [],\n utr5_exons = [],\n utr3_exons = [],\n tis = [],\n tis_conf = [],\n tis_info = [],\n cdsStop = [],\n cdsStop_conf = [],\n cdsStop_info = [],\n tss = [],\n tss_info = [],\n tss_conf = [],\n cleave = [],\n cleave_info = [],\n cleave_conf = [],\n polya = [],\n polya_info = [],\n polya_conf = [],\n is_alt = [],\n is_alt_spliced = 0,\n is_valid = [],\n transcript_complete = [],\n is_complete = [],\n is_correctly_gff3_referenced = '',\n splicegraph = []\n )\n return gene_details",
"def _add_item(dic: dict, keys: list, value):\n\tfor key in keys[:-1]:\n\t\tdic = dic.setdefault(key, {})\n\n\tdic[keys[-1]] = value",
"def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind",
"def add(self, key, value):",
"def anno_gene_stats(anno_gene, loc_file, gene_file, isConvert):\r\n LocationNum = collections.Counter()\r\n LocationGene = collections.defaultdict(list)\r\n\r\n\r\n GeneCatSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n CatGeneSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n\r\n allLocations = set()\r\n anno_h = open(anno_gene, \"r\")\r\n for line in anno_h:\r\n lines = line.strip().split(\"\\t\")\r\n sample, location, number, gene = lines[:4]\r\n number = int(number)\r\n\r\n ### whether convert the category to \"Exon\" or \"Intron\"\r\n if isConvert == \"True\":\r\n if location == \"Intron\":\r\n newLoc = \"Intron\"\r\n else:\r\n newLoc = \"Exon\"\r\n elif isConvert == \"False\":\r\n newLoc = location\r\n else:\r\n print(\"Please check whether convert the original category to 'Intron' or 'Exon' based on True of False.\")\r\n sys.exit(1)\r\n\r\n allLocations.add(newLoc)\r\n ### get the dict of gene -> location -> sample\r\n genes = gene.split(\",\")\r\n for g in genes:\r\n GeneCatSample[g][newLoc].append(sample)\r\n\r\n ### get the location -> gene -> sample\r\n CatGeneSample[newLoc][g].append(sample)\r\n anno_h.close()\r\n\r\n\r\n ## output gene and number in samples\r\n ### sort all locations\r\n sortedAllLocation = sorted(list(allLocations))\r\n\r\n gene_h = open(gene_file, \"w\")\r\n\r\n headerSample = [l + \"_samples\" for l in sortedAllLocation]\r\n gene_h.write(\"Gene\\tTotal\\t%s\\t%s\\n\" % (\"\\t\".join(sortedAllLocation), \"\\t\".join(headerSample)))\r\n\r\n GeneRecord = {}\r\n GeneNumber = {}\r\n\r\n allGenes = sorted(list(GeneCatSample.keys()))\r\n for ge in allGenes:\r\n ### get the number and samples for each location of each gene\r\n GeneNum = []\r\n GeneSample = []\r\n\r\n for loc in sortedAllLocation:\r\n if loc in GeneCatSample[ge]:\r\n samples = GeneCatSample[ge][loc]\r\n ##############################\r\n ####### unique for samples\r\n samples = sorted(list(set(samples)))\r\n sampleNum = len(samples)\r\n else:\r\n sampleNum = 0\r\n samples = [\"-\"]\r\n\r\n GeneNum.append(sampleNum)\r\n GeneSample.append(samples)\r\n\r\n GeneNumSum = sum(GeneNum)\r\n CatNumOut = \"\\t\".join([str(g) for g in GeneNum])\r\n CatSampleOut = \"\\t\".join([\",\".join(s) for s in GeneSample])\r\n\r\n record = \"%s\\t%d\\t%s\\t%s\\t\" % (ge, GeneNumSum, CatNumOut, CatSampleOut)\r\n GeneNumber[ge] = GeneNumSum\r\n GeneRecord[ge] = record\r\n \r\n ### output\r\n GeneNumSorted = sort_dict_value(GeneNumber)\r\n for g, n in GeneNumSorted:\r\n r = GeneRecord[g]\r\n gene_h.write(\"%s\\n\" % r)\r\n\r\n gene_h.close() \r\n\r\n\r\n ### location and genes\r\n loc_h = open(loc_file, \"w\")\r\n loc_h.write(\"Location\\tGeneNumber\\tGenes\\tSampleNumber\\tSamples\\n\")\r\n for loc in sortedAllLocation:\r\n geneSample = CatGeneSample[loc]\r\n genes = sorted(list(geneSample.keys()))\r\n geneNum = len(genes)\r\n samNum = 0\r\n samList = []\r\n for ge in geneSample:\r\n sam = geneSample[ge]\r\n samList.append(sam)\r\n samNum += len(sam)\r\n samOut = \";\".join([\",\".join(s) for s in samList])\r\n loc_h.write(\"%s\\t%d\\t%s\\t%d\\t%s\\n\" % (loc, geneNum, \",\".join(genes), samNum, samOut))\r\n loc_h.close()",
"def add_protogene(self, protogene):\n if protogene.name:\n name = protogene.name\n else:\n name = str(self.num_protogenes())\n self.protogenes[name] = protogene",
"def write_concat_GO_dicts(self, *GO_dict):\n\n for i, j in zip(self.IDs[0:3], GO_dict):\n with open(i, 'w') as f:\n f.write('transcript_id\\tgene_ontology\\n')\n for k, v in j.iteritems():\n f.write(k + '\\t' + '\\t'.join(v) + '\\n')",
"def gencode_dic(gencode_file,gene_type_dic):\n gen_dic = {}\n for i in range(1,len(gencode_file)):\n words_gen = gencode_file[i].strip().split('\\t')\n chr_no = words_gen[2]\n trans_id = words_gen[1]\n cds_info = words_gen[13]\n cde_info = words_gen[14]\n gene_type = gene_type_dic[trans_id]\n gene_name = words_gen[12]\n TSS_start = int(words_gen[4])\n TSS_end = int(words_gen[5])\n CDS_start = int(words_gen[6])\n CDS_end = int(words_gen[7])\n strand = words_gen[3]\n start_list = [int(x) for x in words_gen[9].split(',')[:-1]]\n end_list = [int(x) for x in words_gen[10].split(',')[:-1]]\n exon_no = int(words_gen[8])\n# if (chr_no,trans_id) in gen_dic: #Some trans_id are not unique, especially transcripts in chrX and chrY\n# print trans_id\n interval_list = [P.closedopen(start_list[x],end_list[x]) for x in range(0,exon_no)]\n interval_merge = P.empty()\n for i in range(0,len(interval_list)):\n interval_merge = interval_merge | interval_list[i]\n if gene_type == 'protein_coding':\n if (cds_info == 'cmpl') and (cde_info == 'cmpl'):\n # print (interval_merge)\n gen_dic.setdefault((chr_no,strand),[]).append([TSS_start,TSS_end,CDS_start,CDS_end,\\\n gene_name,gene_type,interval_merge])\n else:\n gen_dic.setdefault((chr_no,strand),[]).append([TSS_start,TSS_end,CDS_start,CDS_end,\\\n gene_name,gene_type,interval_merge])\n return gen_dic",
"def add(self, val: dict):\n keys = self.header or val.keys()\n vals = tuple(val.get(k) for k in keys)\n self[vals] += 1",
"def process_gene_line(self, line):\n kwargs = self.extract_gene_args(line)\n if not kwargs:\n return\n gene_id = kwargs['identifier']\n self.genes[gene_id] = Gene(**kwargs)",
"def _addCounterToMap(probeMap, counter, index):\n if counter.probe in probeMap:\n probeMap[counter.probe].append(index)\n else:\n probeMap.update({counter.probe : [index]})",
"def internal_id_append(json_keys, json):\n candidate_id = 0\n for name in list(json_keys):\n json[name]['internal_id'] = candidate_id\n candidate_id += 1\n return json",
"def add(self, entry):\n \"An entry is a tuple of (id, datatime, text).\"\n id = entry[0]\n datee = entry[1]\n text = re.sub('[^A-Za-z0-9]+', ' ', entry[2].lower())\n self.recordsDict[id].create(id, datee, entry[2])\n for word in text.split():\n self.wordDict[word].add(id)",
"def addOmimAnnotation(merged_data, OmimAnnotationFile):\n omim_genes = dict.fromkeys(list(OmimAnnotationFile['ENSID']))\n has_omim = []\n for index, row in merged_data.iterrows():\n human_ensid = str(row['Human ENSID'])\n if human_ensid in omim_genes:\n has_omim.append('t')\n else:\n has_omim.append('f')\n\n merged_data['Has Omim Annotation'] = has_omim\n return",
"def _add_new_word(self, word):\n if word not in self.word_to_id:\n word_id = len(self.word_to_id)\n self.word_to_id[word] = word_id\n self.id_to_word[word_id] = word",
"def append_data(dic,key,value):\n if(dic.has_key(key)):\n dic[key].append(value)\n else:\n dic[key] = [value]\n return dic",
"def add_dict_entry(dictionary: dict, key: Any, value: Any) -> None:\n try:\n dictionary[key].append(value)\n except KeyError:\n dictionary[key] = [value]",
"def buildDict(self, dict):\n for word in dict:\n self.add(word)",
"def add(self, item):\n self._dict[item] = item"
] | [
"0.65119916",
"0.5768898",
"0.5745668",
"0.5678076",
"0.56137764",
"0.5577298",
"0.5574873",
"0.5562769",
"0.55310345",
"0.54852855",
"0.5448665",
"0.5448038",
"0.5416714",
"0.54129606",
"0.54046834",
"0.539394",
"0.5387726",
"0.53722453",
"0.5369394",
"0.53620994",
"0.5358258",
"0.53442824",
"0.5333245",
"0.5328457",
"0.53133875",
"0.5296068",
"0.5284008",
"0.52584064",
"0.5251175",
"0.5251093"
] | 0.7082176 | 0 |
Execute default analysis with baySeq | def run_bayseq(self):
try:
res = robjects.r('library("parallel")')
res = robjects.r('library("stats4")')
res = robjects.r('library("BiocGenerics")')
res = robjects.r('library("S4Vectors")')
res = robjects.r('library("IRanges")')
res = robjects.r('library("GenomeInfoDb")')
res = robjects.r('library("abind")')
# res = robjects.r('library("perm")')
res = robjects.r('library("GenomicRanges")')
res = robjects.r('library("baySeq")')
res = robjects.r('if(require("parallel")) cl <- makeCluster(4) else cl <- NUL')
ct = 'table <- read.csv("' + self._table_count + '", row.names = 1, header = TRUE, stringsAsFactors = FALSE)'
res = robjects.r(ct)
res = robjects.r('m <- as.matrix(table)')
replicates = ""
assert isinstance(self._replic, int)
for ind in iter(self._groups_name):
aux = "'" + ind + "', "
replicates = replicates + aux * self._replic
replicates = replicates[:(len(replicates) - 2)]
replicates = 'replicates <- c(' + replicates + ')'
res = robjects.r(replicates)
groups = 'groups <- list(NDE = c('+ "1," * len(self._groups_name)
groups = groups[:(len(groups) - 1)] + ')'
groups = groups + ', DE = c('+ '1,' * self._replic
groups = groups + '2,' * self._replic
groups = groups[:(len(groups) - 1)] + "))"
print(groups)
res = robjects.r(groups)
res = robjects.r('CD <- new("countData", data = m, replicates = replicates, groups = groups)')
res = robjects.r('libsizes(CD) <- getLibsizes(CD)')
res = robjects.r('CD <- getPriors.NB(CD, samplesize = 1000, estimation = "QL", cl = cl, equalDispersions = TRUE)')
res = robjects.r('CD <- getLikelihoods(CD, prs=c(0.5, 0.5), pET="BIC", cl=cl)')
# CD.posteriors.DE < - exp(CD @ posteriors)[, 2]
res = robjects.r('write.table(topCounts(CD, group = "DE", number = 65000, normaliseData = TRUE), "' + self._output +'", sep="\t", quote = FALSE)')
self._message.message_9("--- baySeq is completed!")
except RRuntimeError as rre:
self._message.message_9("Error in baySeq execution: " + str(rre))
raise rre | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main(self):\n try:\n self.parse_args()\n self.run()\n return 0\n except AnalysisBackendError as e:\n L.error(e)\n return 1",
"def run_analysis(self, argv):\n self._run_argparser(argv)\n self.run()",
"def run(self) :\n# print \"evaluating with laban\"\n # currently, labanx reads from a preset file\n os.system('labanx '+str(self.rank)+\" \"+self.input+\" \"+self.output)",
"def run(self) -> None:\n barcoded = BarcodedFilename.from_sample(self.analysis.sample)\n\n if barcoded.analyte == Analyte.RNASEQ:\n if self.analysis.parameters[\"rnaseq_aligner\"] == RnaSeqAligner.STAR:\n self.star()\n else:\n raise Exception(\"unexpected aligner for this type of sample\")\n else:\n if self.analysis.parameters[\"aligner\"] == GenericAligner.NOVOALIGN:\n self.novoalign()\n elif self.analysis.parameters[\"aligner\"] == GenericAligner.BWA:\n self.bwa()\n else:\n raise Exception(\"unexpected aligner for this type of sample\")",
"def executeAnalysis(config, samples, visitor):\n # store cuts in \"info\" (re-created from TQCuts)\n # ROOT.xAOD.clearTransientTrees()\n #nEventsProcessed = 0\n\n CLI = config.getFolder(\"CLI+\")\n\n # flag indicating to run analysis in debug mode\n debug = CLI.getTagBoolDefault(\"debug\",False)\n # flag indicating to run a dummy analysis\n dummy = CLI.getTagBoolDefault(\"dummy\",False)\n\n downmerge = CLI.getTagBoolDefault(\"downmerge\",False)\n downmergeTo = CLI.getTagStandardStringDefault(\"downmergeTo\",\"\")\n\n pathselect = CLI.getTagVStandardString(\"pathselect\")\n\n if debug:\n maxEvents = 100\n else:\n maxEvents = config.getTagIntegerDefault(\"maxEvents\",-1)\n\n # proceed with analysis\n appname = QFramework.TQLibrary.getApplicationName().Data()\n visitor.setVisitTraceID(appname)\n if maxEvents > 0:\n QFramework.WARN(\"setting maximum number of events per sample to {:d}\".format(maxEvents))\n visitor.setMaxEvents(maxEvents)\n QFramework.TQLibrary.allowRedirection(False)\n timer = ROOT.TStopwatch()\n nsamples = 0\n if pathselect.size() > 0:\n paths = ROOT.TString(\",\".join(map(str,pathselect)))\n else:\n # Read in sample folder restrictions and convert to a single comma-\n # separated string, the same format as it would be passed in via CLI.\n # Can't use `join` since this is a vector<TString>\n # Can't read in the field as a single string with getTagString,\n # perhaps since it has commas\n paths = \"\"\n for path in config.getTagVString(\"restrict\"):\n paths += path.Data() + \",\"\n paths = ROOT.TString(paths[:-1])\n if paths.Length() != 0:\n if not dummy:\n nsamples = samples.visitSampleFolders(visitor,paths)\n QFramework.TQLibrary.recordMemory()\n QFramework.TQObservable.clearAll()\n QFramework.TQLibrary.recordMemory()\n if downmerge or downmergeTo:\n downmergeTargets = downmergeTo\n if not downmergeTargets:\n downmergeTargets = paths\n samples.setTag(\".generalize.histograms\",True,downmergeTargets)\n samples.setTag(\".generalize.cutflow\",True,downmergeTargets)\n else:\n QFramework.WARN(\"dummy run, skipping execution of cutbased analysis on paths '{:s}'\".format(pathselect))\n else:\n if not dummy:\n nsamples = samples.visitMe(visitor)\n QFramework.TQLibrary.recordMemory()\n else:\n QFramework.WARN(\"dummy run, skipping execution of cutbased analysis on root sample folder\")\n\n # TODO: put the rest of this in a separate function like for post processing?\n # right now nsamples is returned but nothing is done with it\n if nsamples > 0:\n if downmerge or downmergeTo:\n samples.generalizeObjects(\".generalize\")\n timer.Stop()\n\n # TODO: put this section in its own function (with cuts available)\n # just get cuts from visitor? (will need to provide a channel in the MCASV case I think)\n if config.getTagBoolDefault(\"checkRun\",True):\n\n if dummy:\n allevents = QFramework.TQCounter(\"dummy\",0,0,0)\n else:\n if isinstance(visitor,QFramework.TQAnalysisSampleVisitor):\n allevents = samples.getCounter(\".\",visitor.getBaseCut().GetName())\n elif isinstance(visitor,QFramework.TQMultiChannelAnalysisSampleVisitor):\n channels = config.getTagVString(\"channels\")\n allevents = samples.getCounter(\".\",visitor.getBaseCut(channels[0]).GetName())\n\n if nsamples > 0:\n # debugging printout\n # TODO: make separate method?\n if config.getTagBoolDefault(\"printCounterValues\",False):\n samples.printListOfCounters()\n printhists = config.getTagVString(\"printHistogramsASCII\")\n for hist in printhists:\n h = samples.getHistogram(\".\",hist)\n if h:\n QFramework.TQHistogramUtils.printHistogramASCII(h)\n else:\n QFramework.ERROR(\"unable to access histogram '{:s}'\".format(hist))\n\n else:\n QFramework.ERROR(\"execution of analysis finished but might have failed, no samples were visited successfully (they might simply be empty).\")\n runtime = config.getFolder(\"runtime+\")\n # store in runtime folder the fact that no samples were visited in the form of an error string\n analysisError = \"execution of analysis finished but might have failed, no samples were visited successfully (they might simply be empty).\"\n runtime.setTagString(\"analysisError\", analysisError)\n #don't quit just now, but instead we'll write an alternative output file later which basically states \"job didn't crash but there is a small chance something went wrong\"\n #quit()\n\n #return nEventsProcessed\n return nsamples",
"def runsbeana(self):",
"def run_analys(global_config, sample_config):\n # check that what I am going to run is available on the path and on the \n # global config\n common._check_pipeline(sample_config, global_config)\n pipeline = sample_config[\"pipeline\"] # pipeline/analysis to be executed\n # this stopped to --> workgetattr(__import__(command), \"run\")\n command_fn = getattr(globals()[pipeline], \"run\")\n command_fn(global_config, sample_config)",
"def run_all(self):\n\n self.run_mash() ###Run MASH analysis\n self.filter_query() ###Filter fasta sequences out based on p value\n self.build_index(self.filtered_out_path) ###Build index for off-target analysis\n os.remove(self.filtered_out_path) ###Clean up intermediate fasta file\n self.format_gRNA(self.path1) ###Format everything in the right order\n self.run_OTF() ###Run off-target analysis\n self.output_parse() ###Parse output values and update table",
"def single_analysis(config, name):\n # graphviz = GephiOutput()\n graphviz = GraphvizOutput()\n graphviz.output_file = name\n\n print \"Preparing test case...\"\n radio, lines = _prepare_test_case()\n\n print \"Running test case...\"\n with PyCallGraph(output=graphviz, config=config):\n _run_test_case(radio, lines)",
"def test_pandaseq_assembly(self):\n\n # write temp files\n self.writeTmpFastq(self.test_fn1, self.test_fn2)\n\n ### Run with recomended default function params ##\n params = {}\n params['-f'] = self.test_fn1\n params['-r'] = self.test_fn2\n \n pandaseq_app = PandaSeq(params=params,\n WorkingDir=self.temp_dir_string)\n\n pandaseq_app.Parameters['-F'].on()\n\n res = pandaseq_app([self.test_fn1, self.test_fn2])\n\n # assembly is sent to StdOut, check output\n self.assertEqual(res['StdOut'].read(), expected_default_assembly)\n \n res.cleanUp()\n\n ### Run with altered params ###\n # run with out -F option (output is FASTA format)\n params2 = {}\n params2['-f'] = self.test_fn1\n params2['-r'] = self.test_fn2\n \n pandaseq_app2 = PandaSeq(params=params2,\n WorkingDir=self.temp_dir_string)\n \n res2 = pandaseq_app2([self.test_fn1, self.test_fn2])\n\n # assembly is sent to StdOut, check output\n self.assertEqual(res2['StdOut'].read(), expected_default_assembly_fasta)\n \n res2.cleanUp()\n shutil.rmtree(self.temp_dir_string)",
"def main():\n subcommands = {\n \"train\": train.train,\n \"tune\": train_tune.train,\n \"predict\": predict.cli_predict,\n \"evaluate\": evaluate.cli_evaluate,\n \"version\": version,\n }\n\n try:\n import xarray_behave.gui.app\n\n subcommands[\"gui\"] = xarray_behave.gui.app.main_das\n except (ImportError, ModuleNotFoundError):\n logging.exception(\"No GUI avalaible.\")\n # fall back to function that displays helpful instructions\n subcommands[\"gui\"] = no_xb_gui\n\n logging.basicConfig(level=logging.INFO, force=True)\n defopt.run(subcommands, show_defaults=False)",
"def bct_analysis():\n # Detect the number of active documents.\n num_active_docs = detect_active_docs()\n # Get labels with their ids.\n id_label_map = \\\n FileManagerModel().load_file_manager().get_active_labels_with_id()\n\n # Fill in default options.\n if 'analyoption' not in session:\n session['analyoption'] = constants.DEFAULT_ANALYZE_OPTIONS\n if 'bctoption' not in session:\n session['bctoption'] = constants.DEFAULT_BCT_OPTIONS\n\n try:\n from lexos.models.bct_model import BCTModel\n # Use a black hole variable to hold the model to get rid of warning.\n _ = BCTModel()\n # Render the HTML template.\n return render_template(\n 'bct_analysis.html',\n itm=\"bct-analysis\",\n labels=id_label_map,\n numActiveDocs=num_active_docs\n )\n except ImportError:\n return render_template(\n 'bct_analysis_import_error.html',\n itm=\"bct-analysis\"\n )",
"def run(bmark):\r\n raise Exception(\"Not implemented\")",
"def call(args) :\n from caller import bam_call\n bam_call(args)",
"def run_analysis(self, query, key=None):\n logger.info(\"Running analysis on query...\")\n core_annotation = Annotation(query, key)\n clf_pipeline = AnalysisPipeline()\n entity_pipeline = AnalysisPipeline()\n clf = self.clf_accessor.get_classification_pipeline('multiclass', 'intent_classifier')\n\n \"\"\" Create the IntentClassificationAnnotator using the pipeline 'clf' \"\"\"\n clf_annotator = IntentClassificationAnnotator('clf', clf)\n clf_pipeline.add_annotator(clf_annotator)\n \"\"\" Run clf_pipeline to obtain intent classification \"\"\"\n core_annotation = clf_pipeline.analyze(core_annotation)\n \"\"\" Ensure classification results exists, otherwise raise AnalyzerError \"\"\"\n if core_annotation.annotations['results']['classification'] is []:\n raise AnalyzerError(\"No intent classification results.\")\n \"\"\" Create annotators based on entity types of intent classification \"\"\"\n entities = core_annotation.annotations['entity_types']\n\n \"\"\" Obtain gazetteers associated with the given key \"\"\"\n gazetteers = self.gaz_accessor.get_gazeteers(key)\n\n logger.debug(\"Core annotation intents: {0}\".format(core_annotation.annotations['results']['classification']))\n logger.debug(\"Core annotation entities: {0}\".format(core_annotation.annotations['entity_types']))\n logger.debug(\"Core annotation stopwords: {0}\".format(core_annotation.annotations['stopwords']))\n\n \"\"\" Iterate over entities and create an the appropriate Annotator based on the entity_type \"\"\"\n for entity in entities:\n \"\"\" Access the binary classifier for the appropriate entity types and create BinaryClassifierAnnotator\"\"\"\n if entity['entity_type'] == 'binary_classifier':\n logger.debug(\"Creating BinaryClassificationAnnotator for: {0}\".format(entity['entity_name']))\n clf = self.clf_accessor.get_classification_pipeline('binary_classifier', entity['entity_name'])\n binary_clf_annotator = BinaryClassificationAnnotator(entity['entity_name'], clf)\n entity_pipeline.add_annotator(binary_clf_annotator)\n\n \"\"\" Create a RegexAnnotator for each regex entity type\"\"\"\n if entity['entity_type'] == 'regex':\n logger.debug(\"Creating RegexAnnotator for: {0}\".format(entity['entity_name']))\n regex_annotator = RegexAnnotator(entity['entity_name'], Regexer(entity['regular_expressions']))\n entity_pipeline.add_annotator(regex_annotator)\n\n \"\"\" Create a BinaryRegexAnnotator for each regex entity type\"\"\"\n if entity['entity_type'] == 'binary_regex':\n logger.debug(\"Creating BinaryRegexAnnotator for: {0}\".format(entity['entity_name']))\n regex_annotator = BinaryRegexAnnotator(entity['entity_name'], Regexer(entity['regular_expressions']))\n entity_pipeline.add_annotator(regex_annotator)\n\n \"\"\" Create a NaiveNumberAnnotator for each number entity type\"\"\"\n if entity['entity_type'] == 'number':\n logger.debug(\"Creating NaiveNumberAnnotator for: {0}\".format(entity['entity_name']))\n number_annotator = NaiveNumberAnnotator(entity['entity_name'], NumberExtractor())\n entity_pipeline.add_annotator(number_annotator)\n\n \"\"\" Create a FuzzyMatchAnnotator for each fuzzy_match entity type\"\"\"\n if entity['entity_type'] == 'fuzzy_match':\n logger.debug(\"Creating FuzzyFindAnnotator for: {0}\".format(entity['entity_name']))\n logger.debug(\"Entity Keywords: {}\".format(entity['keywords']))\n fuzzy_matcher_annotator = FuzzyMatcherAnnotator(entity['entity_name'], FuzzyMatcher(), entity['keywords'])\n entity_pipeline.add_annotator(fuzzy_matcher_annotator)\n\n \"\"\" Create a DatetimeAnnotator for each number entity type\"\"\"\n if entity['entity_type'] == 'datetime':\n logger.debug(\"Creating DatetimeAnnotator for: {0}\".format(entity['entity_name']))\n duckling_instance = self.duckling_factory.getDucklingInstance()\n parser = DucklingDatetimeParser(duckling_instance)\n datetime_annotator = DatetimeAnnotator(entity['entity_name'], parser)\n entity_pipeline.add_annotator(datetime_annotator)\n\n \"\"\" Access the gazetteer for the appropriate entity types and create an GazetteerAnnotator \"\"\"\n if entity['entity_type'] == 'gazetteer' or entity['entity_type'] == 'simple_gazetteer':\n if gazetteers is not None:\n logger.debug(\"Creating GazetteerAnnotator for: {0}\".format(entity['entity_name']))\n \"\"\" Check to make sure gazetteers contains the gazetteer type to avoid key error \"\"\"\n if entity['entity_name'] in gazetteers.keys():\n gaz_annotator = GazetteerAnnotator(entity['entity_name'], gazetteers[entity['entity_name']])\n entity_pipeline.add_annotator(gaz_annotator)\n\n core_annotation = entity_pipeline.analyze(core_annotation)\n return core_annotation.annotations['results']",
"def run(self, verbose=0):\n self.verbose = verbose\n self._preproc()\n self._lda()\n self._evaluate()",
"def main():\n args = parse_args(sys.argv[1:])\n\n if args.version:\n print(birdvoxclassify.version.version)\n return\n\n if args.quiet:\n logger_level = 30\n elif args.verbose:\n logger_level = 20\n else:\n logger_level = 25\n\n run(args.inputs,\n output_dir=args.output_dir,\n output_summary_path=args.output_summary_path,\n model_name=args.model_name,\n batch_size=args.batch_size,\n select_best_candidates=args.select_best_candidates,\n hierarchical_consistency=args.hierarchical_consistency,\n suffix=args.suffix,\n logger_level=logger_level)",
"def run_analysis(wf):\n if wf.analysis[\"type\"] == \"one_sample_tests\":\n start_one_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"two_sample_tests\":\n start_two_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"factorial_tests\":\n start_factorial_tests(wf)\n\n elif wf.analysis[\"type\"] == \"n_sample_tests\":\n start_n_sample_tests(wf)\n\n info(\"> Finished analysis\")",
"def main():\n\t#ps = PackageScanner()\n\t#packages = ps.getInstalledPackages()\n\t#print(packages)\n\t#ps.saveScanResults()\n\n\tan = Analyzer()\n\tan.loadFromFile(config.PKG_SCAN_DIR / config.PKG_SCAN_FILE)\n\t#an.loadFromPackageCont(packages)\n\tan.analyze()\n\tan.saveAnalysisResults()",
"def run():\n import hmmmAssembler ; reload(hmmmAssembler) # import helpers\n hmmmAssembler.main(Random) # this runs the code!",
"def run(self,infilename): \n ### initizlize the analysis\n self.init_analysis(infilename)\n ### run the analysis\n self.run_analysis()\n ### store selected results\n self.store_results()\n return",
"def run(self, fileStore):\n work_dir = fileStore.getLocalTempDir()\n fastaFile = os.path.join(work_dir, 'seq.fa')\n fileStore.readGlobalFile(self.fastaID, fastaFile)\n\n # download the model\n modelFile = os.path.join(work_dir, 'model.knm')\n assert os.environ.get(\"CACTUS_DNA_BRNN_MODEL_ID\") is not None \n modelID = os.environ.get(\"CACTUS_DNA_BRNN_MODEL_ID\")\n fileStore.readGlobalFile(modelID, modelFile)\n\n # ignore existing model flag\n if '-i' in self.dnabrnnOpts:\n i = self.dnabrnnOpts.index('-i')\n del self.dnabrnnOpts[i]\n del self.dnabrnnOpts[i]\n\n cmd = ['dna-brnn', fastaFile] + self.dnabrnnOpts.split() + ['-i', modelFile]\n \n if self.cores:\n cmd += ['-t', str(self.cores)]\n\n bedFile = os.path.join(work_dir, 'regions.bed')\n\n # run dna-brnn to make a bed file\n cactus_call(outfile=bedFile, parameters=cmd)\n\n if self.mergeLength is None:\n self.mergeLength = 0\n if self.minLength is None:\n self.minLength = 0\n \n # merge up the intervals into a new bed file\n mergedBedFile = os.path.join(work_dir, 'filtered.bed')\n merge_cmd = []\n merge_cmd.append(['awk', '{{if($3-$2 > {}) print}}'.format(self.minLength), bedFile])\n merge_cmd.append(['bedtools', 'sort', '-i', '-'])\n merge_cmd.append(['bedtools', 'merge', '-i', '-', '-d', str(self.mergeLength)]) \n cactus_call(outfile=mergedBedFile, parameters=merge_cmd)\n\n maskedFile = os.path.join(work_dir, 'masked.fa')\n \n if self.action in ('softmask', 'hardmask'):\n mask_cmd = ['cactus_fasta_softmask_intervals.py', '--origin=zero', bedFile]\n if self.minLength:\n mask_cmd += ['--minLength={}'.format(self.minLength)]\n if self.action == 'hardmask':\n mask_cmd += ['--mask=N']\n # do the softmasking\n cactus_call(infile=fastaFile, outfile=maskedFile, parameters=mask_cmd)\n else:\n assert self.action == \"clip\"\n # to clip, we need a bed of the regions we want to *keep*. We'll start with the whole thing\n allRegionsFile = os.path.join(work_dir, 'chroms.bed')\n cactus_call(parameters=['samtools', 'faidx', fastaFile])\n cactus_call(outfile=allRegionsFile, parameters=['awk', '{print $1 \"\\\\t0\\\\t\" $2}', fastaFile + '.fai'])\n # load the contig lengths\n contig_lengths = {}\n with open(fastaFile + '.fai', 'r') as fai:\n for line in fai:\n toks = line.strip().split('\\t')\n contig_lengths[toks[0]] = int(toks[1])\n # now we cut out the regions\n clippedRegionsFile = os.path.join(work_dir, 'clipped.bed')\n cactus_call(outfile=clippedRegionsFile, parameters=['bedtools', 'subtract', '-a', allRegionsFile, '-b', mergedBedFile])\n # now we make a fiadx input regions\n faidxRegionsFile = os.path.join(work_dir, 'faidx_regions.txt')\n with open(clippedRegionsFile, 'r') as clipFile, open(mergedBedFile, 'a') as mergeFile, open(faidxRegionsFile, 'w') as listFile:\n for line in clipFile:\n toks = line.strip().split(\"\\t\")\n if len(toks) > 2:\n seq, start, end = toks[0], int(toks[1]), int(toks[2])\n if end - start > self.minLength or contig_lengths[seq] <= self.minLength:\n region = seq\n if end - start < contig_lengths[seq]:\n # go from 0-based end exlusive to 1-based end inclusive when\n # converting from BED to samtools region\n region += ':{}-{}'.format(start + 1, end)\n else:\n assert start == 0 and end == contig_lengths[seq]\n listFile.write('{}\\n'.format(region))\n else:\n # the region was too small, we remember it in our filtered bed file\n mergeFile.write(line)\n # and cut the fasta apart with samtools\n cactus_call(outfile=maskedFile, parameters=['samtools', 'faidx', fastaFile, '-r', faidxRegionsFile])\n \n return fileStore.writeGlobalFile(maskedFile), fileStore.writeGlobalFile(bedFile), fileStore.writeGlobalFile(mergedBedFile)",
"def bwa(self) -> None:\n self.analysis.logger.info(\"Running alignment with BWA\")\n self.chdir()\n config = self.analysis.config\n executor = Executor(self.analysis)\n executor(\n f\"{config.bwa} mem -t 6 -L 5,10 -v 1 {{genome_ref}} \"\n f\"{{input_filename}}> {{output_filename}}\",\n input_function=lambda l: \" \".join(sorted(l)),\n input_split_reads=False,\n output_format=f\"{self.analysis.basename}{{organism_str}}.sam\",\n split_by_organism=True,\n only_human=self.only_human,\n unlink_inputs=True,\n )\n self.analysis.logger.info(\"Alignment finished. Aligner used: BWA\")",
"def _cmd_bintest(args):\n cnarr = read_cna(args.cnarray)\n segments = read_cna(args.segment) if args.segment else None\n sig = do_bintest(cnarr, segments, args.alpha, args.target)\n tabio.write(sig, args.output or sys.stdout)",
"def RUN(self):",
"def run(config=None):\n AlignmentWorkflow().run(config)",
"def __call__(self, seq_path, result_path=None, log_path=None):\r\n raise NotImplementedError(\"Aligner is an abstract class\")",
"def skesa_assemble(self):\n with progressbar(self.metadata) as bar:\n for sample in bar:\n # Initialise the assembly command\n sample.commands.assemble = str()\n try:\n if sample.general.trimmedcorrectedfastqfiles:\n # If the sample is a pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline\n try:\n status = sample.run.Description\n except AttributeError:\n status = 'unknown'\n if status == 'metagenome':\n self.merge(sample)\n else:\n # Set the output directory\n sample.general.assembly_output = os.path.join(sample.general.outputdirectory,\n 'assembly_output')\n make_path(sample.general.assembly_output)\n sample.general.assemblyfile = os.path.join(sample.general.assembly_output,\n '{name}_unfiltered.fasta'\n .format(name=sample.name))\n sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output,\n '{name}.fasta'\n .format(name=sample.name))\n fastqfiles = sample.general.trimmedcorrectedfastqfiles\n\n # Set the the forward fastq files\n sample.general.assemblyfastq = fastqfiles\n forward = fastqfiles[0]\n gz = True if '.gz' in forward else False\n # If there are two fastq files\n if len(fastqfiles) == 2:\n # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7\n sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\\n '--use_paired_ends --vector_percent 1 ' \\\n '--contigs_out {contigs}'\\\n .format(fastqfiles=','.join(fastqfiles),\n threads=self.cpus,\n contigs=sample.general.assemblyfile)\n # Same as above, but use single read settings for the assembler\n else:\n sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\\n '--vector_percent 1 --contigs_out {contigs}'\\\n .format(fastqfiles=','.join(fastqfiles),\n threads=self.cpus,\n contigs=sample.general.assemblyfile)\n # Specify that the files are gzipped\n if gz:\n sample.commands.assemble += ' --gz'\n # If there are no fastq files, populate the metadata appropriately\n else:\n sample.general.assembly_output = 'NA'\n sample.general.assemblyfastq = 'NA'\n sample.general.bestassemblyfile = 'NA'\n except AttributeError:\n sample.general.assembly_output = 'NA'\n sample.general.assemblyfastq = 'NA'\n sample.general.trimmedcorrectedfastqfiles = 'NA'\n sample.general.bestassemblyfile = 'NA'\n if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile):\n # Run the assembly\n out, err = run_subprocess(sample.commands.assemble)\n write_to_logfile(sample.commands.assemble,\n sample.commands.assemble,\n self.logfile,\n sample.general.logout,\n sample.general.logerr,\n None,\n None)\n write_to_logfile(out,\n err,\n self.logfile,\n sample.general.logout,\n sample.general.logerr,\n None,\n None)",
"def main():\n # Define Parser object and add to Toil\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)\n subparsers = parser.add_subparsers(dest='command')\n # Generate subparsers\n subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.')\n subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.')\n subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.')\n # Run subparser\n parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline')\n group = parser_run.add_mutually_exclusive_group()\n parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str,\n help='Path to the (filled in) config file, generated with \"generate-config\".')\n group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str,\n help='Path to the (filled in) manifest file, generated with \"generate-manifest\". '\n '\\nDefault value: \"%(default)s\".')\n group.add_argument('--sample', nargs='+', action=required_length(2, 3),\n help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].')\n # Print docstring help if no arguments provided\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n Job.Runner.addToilOptions(parser_run)\n args = parser.parse_args()\n # Parse subparsers related to generation of config and manifest\n cwd = os.getcwd()\n if args.command == 'generate-config' or args.command == 'generate':\n generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config)\n if args.command == 'generate-manifest' or args.command == 'generate':\n generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest)\n # Pipeline execution\n elif args.command == 'run':\n require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config))\n if not args.sample:\n args.sample = None\n require(os.path.exists(args.manifest), '{} not found and no sample provided. '\n 'Please run \"generate-manifest\"'.format(args.manifest))\n # Parse config\n parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()}\n config = argparse.Namespace(**parsed_config)\n config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint\n samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest)\n # Sanity checks\n require(config.ref, 'Missing URL for reference file: {}'.format(config.ref))\n require(config.output_dir, 'No output location specified: {}'.format(config.output_dir))\n # Launch Pipeline\n Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args)",
"def run (self, bioseqs, *clargs):\t\t\n\t\t## Preconditions:\n\t\tassert (2 <= len (bioseqs))\n\t\t## Main:\n\t\tself._inseqs = bioseqs\n\t\tself.call_cmdline (*clargs)"
] | [
"0.5965446",
"0.5941467",
"0.5888768",
"0.5804564",
"0.57392657",
"0.5699895",
"0.5644962",
"0.5548859",
"0.5535779",
"0.5506015",
"0.54572743",
"0.54180855",
"0.5404095",
"0.54018456",
"0.53995997",
"0.53812164",
"0.53697616",
"0.5368945",
"0.5345558",
"0.5340646",
"0.5333773",
"0.5324846",
"0.53215164",
"0.532145",
"0.5311923",
"0.5310135",
"0.5288766",
"0.5287111",
"0.52798647",
"0.5254399"
] | 0.6089528 | 0 |
Returns the requested income range view in full detail. | def GetIncomeRangeView(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getRange(self):\n return self.range",
"def displayActiveRange(selforcls):\n vRange = selforcls.activeRange()\n try:\n vRange = (selforcls.toDisplay(min(vRange)),\n selforcls.toDisplay(max(vRange)))\n except AttributeError:\n pass # toDisplay() only in ParameterFloat\n return vRange",
"def get_display_trange(self) -> float:\n return float(self.query(':timebase:range?'))",
"def range (self):\n return self._range",
"def range (self):\n return self._range",
"def range(self):\n return self.range_array",
"def _get_sight_range(self):\n raise NotImplementedError",
"def range(self):\n \n return self._range",
"def test_get_range(self):\n pass",
"def get_featureRange(self):\n\n return self.featureRange",
"def income_report_gen(start, end):\n payments = get_income(start, end)\n row_title = [\"Name\", \"Boat\", \"Rent Day\", \"Pay Day\", \"Amount\"]\n data = []\n for payment in payments:\n temp = []\n for title, value in payment.items():\n temp.append(str(value))\n data.append(temp)\n row_format = \"{:>15}\" * (len(row_title)+1)\n print(row_format.format(\"\", *row_title))\n total_income = 0\n for i in range(len(data)):\n print(row_format.format(i+1, *data[i]))\n total_income += int(data[i][4])\n print(row_format.format(\"SUM\", *([\"--------------\"] * 4), str(total_income)))",
"def get_range(self):\n if self.battery_size == 40:\n range = 150\n elif self.battery_size == 65:\n range = 225\n print(f\"This car can go about {range} miles on a full charge.\")",
"def range_(self):\n return self.bset.range_",
"def show_total(request):\n user_id = request.user\n end_date = datetime.datetime.utcnow()\n start_date = end_date.replace(day=1,\n hour=datetime.time(0, 0, 0).hour,\n minute=datetime.time(0, 0, 0).minute,\n second=datetime.time(0, 0, 0).second)\n total = 0\n incomes_to_date = IncomeHistory.objects.filter(date__range=(start_date, end_date),\n income_id__owner_id=user_id)\n if not incomes_to_date:\n return HttpResponse(0, status=200)\n\n for income in incomes_to_date:\n if income.is_active:\n total = total + income.value\n return HttpResponse(total, status=200)",
"def get_range(self):\r\n\t\tif self.battery_size == 70:\r\n\t\t\trange = 240\r\n\t\telif self.battery_size == 85:\r\n\t\t\trange = 270\r\n\t\t\t\r\n\t\tmessage = \"This car can go approx. \" + str(range)\r\n\t\tmessage += \" miles on a full charge.\"\r\n\t\tprint(message)",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go about {range} miles on a full charge\")",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f'This car can go about {range} miles on a full charge.')",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go to about {range} miles on a full charge.\")",
"def getRange(self, p_int): # real signature unknown; restored from __doc__\n pass",
"def search_geoloc_range_free_loc(request):\n\n template_var = {\n }\n\n return template_var",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go about {range} miles on a full charge.\")",
"def roi(self):\n return super().get_queryset().exclude(\n outcome__isnull=True\n ).all().aggregate(\n roi=Sum('profit') / Sum('size_matched')\n )['roi']",
"def range(self):\n return self.timerange()",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n \n print(f\"This car can go about {range} miles on a full charge.\")",
"def range_table(self):\n raise NotImplementedError('Abstract method.')",
"def get_gain_range(self, *args):\n return _uhd_swig.usrp_source_get_gain_range(self, *args)",
"def test_data_with_range_view(self):\n\n self.create_model()\n self.create_machine()\n self.insert_data()\n\n date_literal = '%Y-%m-%d'\n start_date = dt.today()\n end_date = start_date + datetime.timedelta(days=1)\n\n self.create_user_account_and_login()\n query_url = self.range_url + '/' + self.data['mid'] + \\\n '/?s=' + dt.strftime(start_date, date_literal) + \\\n '&e=' + dt.strftime(end_date, date_literal)\n\n response = self.client.get(query_url)\n results = json.loads(response.content)\n\n self.assertEquals(len(results), 2)",
"def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value",
"def __set_range_to_show(self) -> None:\n cantus_firmus_positions = [\n line_element.scale_element.position_in_semitones\n for line_element in self.cantus_firmus\n ]\n cantus_firmus_lower_bound = min(cantus_firmus_positions)\n cantus_firmus_upper_bound = max(cantus_firmus_positions)\n\n counterpoint_lower_bound = self.lowest_element.position_in_semitones\n counterpoint_upper_bound = self.highest_element.position_in_semitones\n\n self.lowest_row_to_show = min(\n cantus_firmus_lower_bound,\n counterpoint_lower_bound\n )\n self.highest_row_to_show = max(\n cantus_firmus_upper_bound,\n counterpoint_upper_bound\n )",
"def get_income(start, end):\n\n payments = session.query(part2.Sailors.name, part2.Payments.bid, part2.Payments.day, part2.Payments.payDay, part2.Payments.amount).\\\n select_from(part2.Payments). \\\n join(part2.Sailors, part2.Sailors.id == part2.Payments.sid). \\\n filter(part2.Payments.day >= start). \\\n filter(part2.Payments.day <= end). \\\n all()\n results = []\n for payment in payments:\n results.append({\"name\": payment[0], \"boat\": payment[1], \"rent_day\": payment[2], \"pay_day\": payment[3], \"amount\": payment[4]})\n return results"
] | [
"0.5824725",
"0.5612662",
"0.55873716",
"0.54605365",
"0.54605365",
"0.5444421",
"0.5324229",
"0.5322225",
"0.5300629",
"0.5277442",
"0.5269223",
"0.5262803",
"0.5230335",
"0.52228105",
"0.5218109",
"0.51848346",
"0.5179088",
"0.51768607",
"0.5176457",
"0.51640767",
"0.51595587",
"0.5153438",
"0.51508856",
"0.5130879",
"0.51223975",
"0.5076467",
"0.50653094",
"0.5058624",
"0.5058521",
"0.50528425"
] | 0.700728 | 0 |
Runs a single byte through the packet parsing state amchine. Returns NOT_DONE if the packet is incomplete. Returns SUCCESS is the packet was received successfully. Returns CHECKSUM if a checksum error is detected. | def process_byte(self, byte):
if self.index == -1:
if byte == 0xff:
self.index = 0
self.checksum = 0
elif self.index == 0:
if byte != 0xff:
self.checksum += byte
self.pkt_bytes[0] = byte
self.index += 1
else:
self.checksum += byte
self.pkt_bytes[self.index] = byte
self.index += 1
if self.index == 7: # packet complete
self.index = -1
if self.checksum & 0xff != 0xff:
return CommanderRx.CHECKSUM
self.lookv = self.pkt_bytes[0] - 128 # 0 - 255 ==> -128 - 127
self.lookh = self.pkt_bytes[1] - 128
self.walkv = self.pkt_bytes[2] - 128
self.walkh = self.pkt_bytes[3] - 128
self.button = self.pkt_bytes[4]
self.ext = self.pkt_bytes[5]
return CommanderRx.SUCCESS
return CommanderRx.NOT_DONE | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readCommand(self):\n while (True):\n time.sleep(1)\n # At least a package of 4 bytes (minimum)\n # [ Head | Length | Address | Data[0…N] | Check ]\n if (self._serial.inWaiting()>=4):\n # Gets only the first byte of the packet (it should be HEAD)\n packet_header = self._serial.read(1)\n if (packet_header != Ind903Packet.PACKET_HEAD):\n # the next one is the length of the packet\n packet_length_bytes = self._serial.read(1)\n packet_length = int.from_bytes(packet_length_bytes, byteorder='big')\n if (packet_length > 0):\n raw_packet = b\"\".join([packet_header, packet_length_bytes, self._serial.read(packet_length)]) \n result_packet = Ind903Packet.parsePacket(raw_packet)\n return (result_packet)",
"def _receive_packet(self):\n report = self._serial_read(1)\n if len(report) != 1:\n self.log(\"ERROR: Didn't read back a report!\")\n report = -1\n else:\n report = report[0]\n retval = self._serial_read(1)\n if len(retval) != 1:\n self.log(\"ERROR: Didn't read back a return value!\")\n retval = -1\n else:\n retval = retval[0]\n\n return_payload_len = self._serial_read(1)\n if len(return_payload_len) != 1:\n self.log(\"ERROR: Didn't read back a return payload length!\")\n return_payload_len = 0\n else:\n return_payload_len = return_payload_len[0]\n\n if return_payload_len != 0:\n return_payload = self._serial_read(return_payload_len)\n else:\n return_payload = []\n checksum = self._serial_read(1)\n if len(checksum) != 1:\n self.log(\"ERROR: Didn't read back a checksum!\")\n checksum = -1\n else:\n checksum = checksum[0]\n\n data = self.MAGIC_HEADER + [report, retval, return_payload_len] + return_payload\n data.append(checksum)\n\n our_checksum = self.generate_checksum(data[:-1])\n if our_checksum != checksum:\n self.log(\"ERROR: Our checksum didn't calculate properly! \"\n \"(Calculated {}, expected {})\".format(our_checksum, checksum))\n return -1, checksum, []\n else:\n if self.verbose:\n self.log(\"Checksum match! ({} == {})\".format(our_checksum, checksum))\n\n return report, retval, return_payload",
"def process_message(msg):\r\n print(\"received \")\r\n global bytes_in\r\n if len(msg) == 200: # is header or end\r\n print(\"found header\")\r\n msg_in = msg.decode(\"utf-8\")\r\n msg_in = msg_in.split(\",,\")\r\n print(msg_in)\r\n if msg_in[0] == \"end\": # is it really last packet?\r\n in_hash_final = in_hash_md5.hexdigest()\r\n if in_hash_final == msg_in[2]:\r\n print(\"File copied OK -valid hash \", in_hash_final)\r\n return -1\r\n else:\r\n print(\"Bad file receive \", in_hash_final)\r\n return False\r\n else:\r\n if msg_in[0] != \"header\":\r\n in_hash_md5.update(msg)\r\n return True\r\n else:\r\n return False\r\n else:\r\n bytes_in = bytes_in + len(msg)\r\n in_hash_md5.update(msg)\r\n print(\"found data bytes= \", bytes_in)\r\n return True",
"def _recv(self):\n result = self._con.receive()\n if result.startswith(Parser.NOT_OK_MSG) or len(result) == 0:\n return result\n while not result.endswith(Parser.OK_MSG + '\\n') and not result.startswith(Parser.OK_MSG):\n result += self._con.receive()\n return result",
"def packet_read(self):\n bytes_received = 0\n \n if self.sock == NC.INVALID_SOCKET:\n return NC.ERR_NO_CONN\n \n if self.in_packet.command == 0:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, 1)\n if errnum == 0 and len(ba_data) == 1:\n bytes_received += 1\n byte = ba_data[0]\n self.in_packet.command = byte\n \n if self.as_broker:\n if self.bridge is None and self.state == NC.CS_NEW and (byte & 0xF0) != NC.CMD_CONNECT:\n print \"RETURN ERR_PROTOCOL\"\n return NC.ERR_PROTOCOL, bytes_received\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n \n if not self.in_packet.have_remaining:\n loop_flag = True\n while loop_flag:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, 1)\n \n if errnum == 0 and len(ba_data) == 1: \n byte = ba_data[0]\n bytes_received += 1\n self.in_packet.remaining_count += 1\n if self.in_packet.remaining_count > 4:\n return NC.ERR_PROTOCOL, bytes_received\n \n self.in_packet.remaining_length += (byte & 127) * self.in_packet.remaining_mult\n self.in_packet.remaining_mult *= 128\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n \n if (byte & 128) == 0:\n loop_flag = False\n \n if self.in_packet.remaining_length > 0:\n self.in_packet.payload = bytearray(self.in_packet.remaining_length)\n if self.in_packet.payload is None:\n return NC.ERR_NO_MEM, bytes_received\n self.in_packet.to_process = self.in_packet.remaining_length\n \n self.in_packet.have_remaining = True\n \n if self.in_packet.to_process > 0:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, self.in_packet.to_process)\n if errnum == 0 and len(ba_data) > 0:\n readlen = len(ba_data)\n bytes_received += readlen\n for idx in xrange(0, readlen):\n self.in_packet.payload[self.in_packet.pos] = ba_data[idx]\n self.in_packet.pos += 1\n self.in_packet.to_process -= 1\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n\n #all data for this packet is read\n self.in_packet.pos = 0\n \n ret = self.packet_handle()\n \n self.in_packet.packet_cleanup()\n \n self.last_msg_in = time.time()\n \n return ret, bytes_received",
"def do(self, command):\r\n command += xsct_line_end\r\n logger.info('Sending command: %s ...', repr(command))\r\n self.send(command)\r\n ans = self.recv()\r\n if ans.startswith('okay'):\r\n return ans[5:]\r\n if ans.startswith('error'):\r\n raise PyXilException(ans[6:])\r\n raise PyXilException('Illegal start-string in protocol. Answer is: ' + ans)",
"def got_packet(self, pkt):\n self._log.debug(\"got a packet {}\".format(pkt))\n if pkt.is_syn():\n # this is a syn packet\n # set the sequence number to 0\n self.seqno = 0\n elif pkt.is_ack():\n # this is a plain ack\n # the sender got our data\n # just increment the sequence number\n self.seqno += 1\n return\n if pkt.empty():\n # this packet is emtpy?\n self._log.info(\"empty packet {}\".format(pkt))\n return\n # have the user recv the payload\n self._recv(pkt.payload)",
"def check_ack_or_nak(message):\n value = message.body[-1]\n\n if value == 0x06:\n return\n elif value == 0x15:\n raise CommandFailure(command_code=message.command_code)\n else:\n raise RuntimeError(\"Unexpected ACK/NAK value (0x%02x)\" % value)",
"def recvData(self) -> bytes:\n \n packet = self.recvPacket()\n if(packet.seq == Rudp.ackPlusOne(self.ack)):\n self.ack = Rudp.ackPlusOne(self.ack)\n self.acknowledgePacket(packet)\n return packet.payload\n else:\n return None",
"def _do_some_logic(self, packet):\n\n\n pass",
"def parse(self):\n try:\n if self.bitstream:\n # Parse message header\n self.bitstream.bytepos = 0\n\n if self.bitstream.endswith(\"\\n\"):\n pass\n\n else:\n raise PacketIncomplete(\"Packet does not end with carriage return\")\n\n if self.bitstream.find('0x 50 52 56 41 54',bytealigned=True): # If 'PRVAT' text in bitstream\n self.dataformat = 'NMEA'\n else:\n self.dataformat = 'TRITECH'\n\n if self.dataformat=='NMEA' and self.id != Message.CONFIGURATION_PARAM:\n # go to first comma\n self.bitstream.bytepos = self.bitstream.find('0x2C', bytealigned = True)[0]/8 + 1\n self.payload = self.bitstream.read('bytes:6')\n #skip comma\n self.bitstream.read('bytes:1')\n self.dataunits = self.bitstream.read('bytes:1')\n\n\n elif self.dataformat=='TRITECH' and self.id != Message.CONFIGURATION_PARAM:\n self.bitstream.bytepos = 0\n self.payload = self.bitstream.read('bytes:6')\n self.dataunits = self.bitstream.read('bytes:1')\n else:\n self.bitstream.bytepos = 0\n length_string = 'bytes:'+ str(len(self.bitstream)/8)\n self.payload = self.bitstream.read(length_string)\n\n else:\n pass\n\n except ValueError as e:\n raise PacketCorrupted(\"Unexpected error\", e)",
"def parse_packet(packet, traffic_type, pkt_type, exp_dst, step):\n packet_count = 0\n if(traffic_type == \"encap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect encapsulation'\n print(\"Correct encapsulation\")\n\n elif(traffic_type == \"decap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect decapsulation'\n print(\"Correct decapsulation\")",
"def process(self, packet):\n pass",
"def read_versa5(self,addr,fullrepsonse=False):\n time.sleep(0.002)\n addr = addr & 0xff\n cmd = bytes([0x07,0xea,addr,0x00])\n res = self.command(0x3c,cmd)\n if fullresponse:\n return res\n else:\n return res.response_data & 0x0ff",
"def _compute_checksum(packet):\n # checksum is the sum of the bytes\n # from device id to the end of the data\n # mod (%) 256 and bit negated (~) (1's compliment)\n # and (&) with 0xFF to make sure it is a byte.\n return ~(sum(packet[2:]) % 0x100) & 0xFF",
"def process(self, data):\n\n\t\t# Check if the 802.15.4 packet is valid\n\t\tif makeFCS(data[:-2]) != data[-2:]:\n\t\t\tprint(hue.bad(\"Received invalid packet\"))\n\t\t\treturn\n\n\t\tpacket = Dot15d4FCS(data)\n\n\t\tif packet.fcf_frametype == 2: # ACK\n\t\t\tself.last_ack = packet.seqnum",
"def checkChecksum(self):\n if not self.checkPacketLength():\n return False\n return CCSDS.DU.DataUnit.checkChecksum(self)",
"def receive(self, packet):\n if packet.dest in self.address_to_port:\n # The packet is destined to one of the clients connected to this middlebox;\n # send the packet there.\n # if packet.is_fin:\n # print(\"2nd wan sees a fin\")\n\n if packet.is_fin and len(packet.payload) == 0:\n # print(\"empty fin, foward fin\")\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash not in self.hash_to_raw_data.keys():\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = False)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # reset buffer\n self.send(packet, self.address_to_port[packet.dest]) # forward empty fin\n return\n \n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n \n if packet.is_raw_data:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n pack_buff += packet.payload\n\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n # print(\"sending1\")\n if block_hash in self.hash_to_raw_data.keys():\n # send extract data from hash in packet\n block_to_send = self.hash_to_raw_data[block_hash]\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n\n if remaining_buff:\n # print(\"wan to client remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n block_hash = get_hash(remaining_buff)\n block_to_send = remaining_buff\n # print(\"sending2\")\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n # print(\"sending fin1\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n if packet.is_fin:\n # print(\"sending fin2\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff\n else:\n block_hash = packet.payload\n block_to_send = self.hash_to_raw_data[block_hash]\n # print(\"sending3\")\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n if packet.is_fin:\n # print(\"sending fin3\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n # self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # TESTING\n else:\n # The packet must be destined to a host connected to the other middlebox\n # so send it across the WAN.\n if packet.is_fin and len(packet.payload) == 0:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = True)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n self.send(packet, self.wan_port)\n return\n\n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n\n pack_buff += packet.payload\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n\n # send off all completed blocks\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n\n if remaining_buff:\n # print(\"wan to wan remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n # print(\"finfin\")\n block_to_send = remaining_buff\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.wan_port)\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff",
"def EndOfPacket(self) -> bool:",
"def handle_packet(self, pkt):\n logger.info('got a message:{}'.format(pkt))\n self._sock_rep_to_server.send_pyobj(packet.Ack())\n \n state = True\n extradata = {}\n \n if hasattr(self, 'handle_action'):\n _tmp = self.handle_action(pkt)\n try:\n state, data = _tmp\n extradata['extra'] = data\n except ValueError:\n extradata['extra'] = _tmp\n if extradata:\n state = False\n \n return state, extradata",
"def process_message(msg):\n global fout\n print(\"received \")\n if len(msg)==200: #is header or end\n msg_in=msg.decode(\"utf-8\",\"ignore\")\n msg_in=msg_in.split(\",,\")\n if msg_in[0]==\"header\": #header\n filename=extract_file_data(msg_in[1])\n file_out=\"copy-\"+filename\n fout=open(file_out,\"wb\") #use a different filename\n\n if msg_in[0]==\"end\": #is it really last packet?\n in_hash_final=in_hash_md5.hexdigest()\n if in_hash_final==msg_in[2]:\n print(\"File copied OK -valid hash \",in_hash_final)\n else:\n print(\"Bad file receive \",in_hash_final)\n return False\n else:\n if msg_in[0]!=\"header\":\n in_hash_md5.update(msg)\n return True\n else:\n return False\n else:\n in_hash_md5.update(msg)\n #msg_in=msg.decode(\"utf-8\",\"ignore\")\n if len(msg) <100:\n print(msg)\n return True",
"def parse_message(buffer):\n _discard_until_message_start(buffer)\n\n if buffer and buffer[0] == MESSAGE_FAILURE_BYTE:\n buffer[:] = buffer[1:]\n return MessageFailure(\n 'Command send failure (probable collision). Expect a retry.',\n ), 2 - len(buffer)\n\n # It takes at least 2 bytes to move forward.\n if len(buffer) < 2:\n return None, 2 - len(buffer)\n\n try:\n command_code = CommandCode(buffer[1])\n except ValueError:\n logger.warning(\n \"Unrecognized command code (0x%02x). Ignoring invalid data.\",\n buffer[1],\n )\n buffer[:2] = []\n\n return None, 2\n\n extension = 0\n\n # If the message is an Insteon message and has the extended flag, we expect\n # 14 user-data more bytes.\n if command_code == CommandCode.send_standard_or_extended_message:\n if len(buffer) >= 6 and buffer[5] & (1 << 4):\n extension = 14\n\n body, expected = _extract_body(\n buffer,\n BODY_SIZES[command_code] + extension,\n )\n\n # Not enough bytes to process the message. Let's wait for more.\n if body is None:\n return None, expected\n\n return (\n IncomingMessage(command_code=command_code, body=body),\n max(2 - len(buffer), 1),\n )",
"def handle_flow(self, expected: [Flag]) -> Optional[dict]:\n try:\n segment = self.buffer.get(block=False)\n message = self.unpack_segment(segment)\n if message['flag'] in expected and self.valid_checksum(message):\n self.others_recv_win = message['win']\n return message\n except queue.Empty:\n pass\n return None",
"def unpack(self, pkt):\n if pkt[0]!='$' or pkt[-3]!='#':\n raise ValueError('bad packet')\n if (sum(ord(c) for c in pkt[1:-3]) % 256) != int(pkt[-2:],16):\n raise ValueError('bad checksum')\n pkt = pkt[1:-3]\n return pkt",
"def _parse_udp_packet(self, packet_bytes):\n opcode = packet_bytes[:2]\n if opcode == 5:\n reply = self.error_messages[int.from_bytes(packet_bytes[2:4], 'big')]\n print(reply)\n elif opcode == 4:\n reply = \"ACK\"\n else:\n reply = \"UNK\"\n return reply",
"def acknowledge(self, validity = True) -> int:\n (data, s) = self.socket.recvfrom(Rudp.Packet.buffer())\n (packet, validity) = Rudp.Packet.unpack(data)\n if(validity and s == self.server):\n return packet.ack\n else:\n return None",
"def ping(self):\n\t\t## NOTE: the Microblaze can only accept byte values between -128 and 127 (so 0xCF is too large)\n\t\trb = [0x00]\n\n\t\t# self.spi.transfer([0xCF], rb, 1)\n\t\t# mapped_cmd_byte = [_map_value(0xCF, 0, 255, -128, 127)]\n\t\tmapped_cmd_byte = [0xCF-128]\n\t\tself.spi.transfer(mapped_cmd_byte, rb, 1)\n\n\t\ttime.sleep(0.1)\n\t\tif rb[0] < 0: \t\t\t\t\t\t## Account for implicit unsigned-to-signed \n\t\t\trb[0] += 256\t\t\t\t\t## conversion from the transfer operation\n\t\treturn rb[0] == 0xF3",
"def receive(self, packet):\n if packet.dest in self.address_to_port:\n # The packet is destined to one of the clients connected to this middlebox;\n # send the packet there.\n if packet.is_raw_data:\n if not (packet.src, packet.dest) in self.buffer:\n self.buffer[packet.src, packet.dest] = \"\"\n start = len(self.buffer[(packet.src, packet.dest)])\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)] + packet.payload\n i = max(start, 47)\n while i < len(self.buffer[(packet.src, packet.dest)]):\n i += 1\n h = utils.get_hash(self.buffer[(packet.src, packet.dest)][i-48:i])\n if utils.get_last_n_bits(h, 13) == self.GLOBAL_MATCH_BITSTRING:\n block = self.buffer[(packet.src, packet.dest)][:i]\n self.cache[utils.get_hash(block)] = block\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)][i:]\n i = 47\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, False, block), self.address_to_port[packet.dest])\n\n # remainder = self.buffer[(packet.src, packet.dest)][self.BLOCK_SIZE:]\n \n if packet.is_fin:\n block = self.buffer[(packet.src, packet.dest)]\n self.cache[utils.get_hash(block)] = block\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, True, block), self.address_to_port[packet.dest])\n self.buffer[(packet.src, packet.dest)] = \"\"\n else:\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, packet.is_fin, self.cache[packet.payload]), self.address_to_port[packet.dest])\n else:\n # The packet must be destined to a host connected to the other middlebox\n # so send it across the WAN.\n if packet.is_raw_data:\n if not (packet.src, packet.dest) in self.buffer:\n self.buffer[packet.src, packet.dest] = \"\"\n start = len(self.buffer[(packet.src, packet.dest)])\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)] + packet.payload\n i = max(start, 47)\n while i < len(self.buffer[(packet.src, packet.dest)]):\n i += 1\n h = utils.get_hash(self.buffer[(packet.src, packet.dest)][i-48:i])\n if utils.get_last_n_bits(h, 13) == self.GLOBAL_MATCH_BITSTRING:\n block = self.buffer[(packet.src, packet.dest)][:i]\n if utils.get_hash(block) in self.cache:\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, False, False, utils.get_hash(block)), self.wan_port)\n else:\n self.cache[utils.get_hash(block)] = block\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, False, block), self.wan_port)\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)][i:]\n i = 47\n\n if packet.is_fin:\n block = self.buffer[(packet.src, packet.dest)]\n if utils.get_hash(block) in self.cache:\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, False, True, utils.get_hash(block)), self.wan_port)\n else:\n self.cache[utils.get_hash(block)] = block\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, True, block), self.wan_port)\n self.buffer[(packet.src, packet.dest)] = \"\"\n else:\n # 1/0\n self.send_block(packet, self.wan_port)",
"def _receive_check(self, length):\n data = self._receive(length)\n return data[:-1]",
"def packet_handler(pkt):\n if pkt[Ether].type == 0x800:\n if pkt[IP].dst == VICTIM_IP:\n if pkt[Ether].dst == HACKER_MAC:\n print(pkt.summary()) # print spoofed packet\n pkt[Ether].dst = VICTIM_MAC\n PACKET_QUEUE.insert(0, pkt)"
] | [
"0.5792792",
"0.56804043",
"0.5637224",
"0.55658627",
"0.5561786",
"0.5408823",
"0.53773457",
"0.5322868",
"0.5277083",
"0.52678686",
"0.52137786",
"0.5208731",
"0.51964766",
"0.5184436",
"0.51623356",
"0.5156039",
"0.51016897",
"0.51004124",
"0.50996864",
"0.508539",
"0.5084296",
"0.5061333",
"0.5051528",
"0.50316834",
"0.5030048",
"0.5013841",
"0.49835753",
"0.4948625",
"0.4934323",
"0.49079862"
] | 0.6739887 | 0 |
Registers a function to run before each request. | def before_request(self, f):
self.before_request_funcs.append(f)
return f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def before_request(self, func: typing.Callable):\n return self.add_hook(type_=\"pre\", hook=func)",
"def before_request(self, f):\n self.before_request_handlers.append(f)\n return f",
"def before_worker_start(func):\n _func_only(func)\n worker_methods_db.register_before_start(func)\n return func",
"def before_request():\n pass",
"def configure_before_request_funcs(app):\n @app.before_request\n def conf_set_user_cookie_id():\n return set_user_cookie_id()\n \n @app.before_request\n def check_for_maintenance():\n if config.DOWN_FOR_MAINTENANCE:\n return 'Sorry, we\\'re down momentarily for a teensey bit of maintenance!', 503\n \n @app.before_request\n def count_uniques():\n return\n statsd.set('unique_users', g.user_cookie_id)\n statsd.set('unique_ips', request.remote_addr)\n \n @app.before_request\n def set_statsd_context():\n g.statsd_context = \"%s.%s\" % (request.endpoint, request.method)\n g.total_request_timer = statsd.timer(g.statsd_context + \".response_time\")\n g.total_request_timer.start()",
"def hook_client_before_request(self, event):\r\n for functor in self._hooks['client_before_request']:\r\n functor(event)",
"def request_filter(self, fn):\n self.request_filters.append(fn)\n return fn",
"def register_before_first(app):\n app.before_first_request_funcs.append(\n partial(admin.utils.create_root_user, app))\n app.before_first_request_funcs.append(\n partial(admin.utils.load_messenger_config, app))\n app.before_first_request_funcs.append(\n partial(admin.utils.load_site_config, app))",
"def hook_server_before_exec(self, request_event):\r\n\r\n for functor in self._hooks['server_before_exec']:\r\n functor(request_event)",
"def before_call(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> None:",
"def process_before_request_hooks(self):\n\n hooks = []\n\n if self.resource:\n hooks.extend(self.resource.api.before_all_hooks)\n hooks.extend(self.resource.before_all_hooks)\n\n hooks.extend(self.before_all_hooks)\n hooks.extend(\n getattr(\n self,\n 'before_{method}_hooks'.format(method=self.meth),\n []\n )\n )\n\n for hook in chain(hooks):\n hook(self)",
"async def _pre_call(self, _request_id: int, request: fastapi.Request, *args, **kwargs) -> None:\n return",
"def register_method_before(fn, phase): # type: (Callable, str) -> None\n PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn\n PackageMixinsMeta._add_method_before[phase].append(fn)",
"def after_request(self, f):\n self.after_request_funcs.append(f)\n return f",
"def before(self, before: Route.Decorator):\n pass",
"def before_test(self, func, *args, **kwargs):\n pass",
"def register_ajax_handler(self, request, function):\n if request in self.ajax_handlers:\n L.error(\"Error: request:\" + request + \" is already registered\")\n return False\n self.ajax_handlers[request] = function\n L.info(\"registered:\"+request)\n return True",
"def before_request():\r\n\r\n\tinit_classes()",
"def after_request_handle(self, func):\n self.after_request.append(func)\n return func",
"def get_request(func):\r\n func.request = True\r\n return func",
"def global_request_interceptor(self):\n # type: () -> Callable\n def wrapper(process_func):\n if not callable(process_func):\n raise SkillBuilderException(\n \"Global Request Interceptor process_func input parameter \"\n \"should be callable\")\n\n class_attributes = {\n \"process\": lambda self, handler_input: process_func(\n handler_input)\n }\n\n request_interceptor = type(\n \"RequestInterceptor{}\".format(\n process_func.__name__.title().replace(\"_\", \"\")),\n (AbstractRequestInterceptor,), class_attributes)\n\n self.add_global_request_interceptor(\n request_interceptor=request_interceptor())\n return process_func\n return wrapper",
"def register_request_hydrator(self):\n\n # pylint: disable=missing-return-doc, missing-return-type-doc\n def decorator(func):\n self.request_hydrator_func = func\n return func\n\n return decorator",
"def threaded_callback(self, func):\n\n self.th_func_map[func.__name__] = func",
"def register(self):\n REGISTERED_FUNCTIONS[self.path] = self",
"def bofore_response_handle(self, func):\n self.before_response.append(func)\n return func",
"def after_request(self, f):\n self.after_request_handlers.append(f)\n return f",
"def after_request(self, f):\n self.after_request_handlers.append(f)\n return f",
"def register_request_hooks(app):\n\n @app.before_request\n def before_request():\n g.db = open_db()\n\n @app.teardown_request\n def after_request(exc):\n g.db.__exit__(type(exc), exc, None)",
"def pre_runroute_callable(self, route, request):\n return None",
"def api_request_globals(f):\n @wraps(f)\n def inner(*args, **kwargs):\n request.is_api_request = True\n return f(*args, **kwargs)\n return inner"
] | [
"0.8118946",
"0.7769813",
"0.69504994",
"0.67644304",
"0.6689539",
"0.65856653",
"0.65145713",
"0.64500964",
"0.6426918",
"0.6376406",
"0.6375528",
"0.63239646",
"0.6193873",
"0.6136782",
"0.59849536",
"0.5943737",
"0.58747",
"0.5869897",
"0.5857567",
"0.58538926",
"0.5827149",
"0.5812418",
"0.5764284",
"0.57346094",
"0.56955945",
"0.5689887",
"0.5689887",
"0.56871367",
"0.5681491",
"0.56393886"
] | 0.81964976 | 0 |
Register a function to be run after each request. | def after_request(self, f):
self.after_request_funcs.append(f)
return f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def after_request_handle(self, func):\n self.after_request.append(func)\n return func",
"def after_request(self, f):\n self.after_request_handlers.append(f)\n return f",
"def after_request(self, f):\n self.after_request_handlers.append(f)\n return f",
"def after_request(self, func: typing.Callable):\n return self.add_hook(type_=\"post\", hook=func)",
"def after_worker_start(func):\n _func_only(func)\n worker_methods_db.register_after_start(func)\n return func",
"def before_request(self, f):\n self.before_request_funcs.append(f)\n return f",
"def middleware_after(self):\n pass",
"def after_error_request(self, f):\n self.after_error_request_handlers.append(f)\n return f",
"def register_method_after(fn, phase): # type: (Callable, str) -> None\n PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn\n PackageMixinsMeta._add_method_after[phase].append(fn)",
"def post(self):\n self.finish(self.register())",
"def bofore_response_handle(self, func):\n self.before_response.append(func)\n return func",
"def register(self):\n REGISTERED_FUNCTIONS[self.path] = self",
"def register_ajax_handler(self, request, function):\n if request in self.ajax_handlers:\n L.error(\"Error: request:\" + request + \" is already registered\")\n return False\n self.ajax_handlers[request] = function\n L.info(\"registered:\"+request)\n return True",
"def before_request(self, f):\n self.before_request_handlers.append(f)\n return f",
"def threaded_callback(self, func):\n\n self.th_func_map[func.__name__] = func",
"def addHandler(self, fn):\n self.handlers.append(fn)",
"def Postcall(function_to_call_later): \n def postcall_inside(fun): \n @functools.wraps(fun)\n def relay(*args, **kwargs):\n return function_to_call_later(fun(*args, **kwargs))\n return relay\n return postcall_inside",
"def register_callback(self, func):\n self.callback = func",
"def after_test(self, func, *args, **kwargs):\n pass",
"def register(name, func):\n WebSocketRouter.funcmap[name] = func",
"def register(self):\n # self.register_route(\"GET\", self.__route, lambda req, res: self.status(req, res))\n self.register_route(\"GET\", self.__route, None, self.status)",
"def register(self, funcs):\n for name, func in funcs.items():\n self.functions[name] = func",
"def onfinish( request ):",
"def onfinish( request ):",
"def process_after_request_hooks(self, resp):\n\n hooks = []\n meth_hooks = getattr(\n self,\n 'after_{method}_hooks'.format(method=self.meth),\n []\n )\n\n hooks.extend(meth_hooks)\n hooks.extend(self.after_all_hooks)\n\n if self.resource:\n hooks.extend(self.resource.after_all_hooks)\n hooks.extend(self.resource.api.after_all_hooks)\n\n for hook in chain(hooks):\n resp = hook(self, resp)\n\n return resp",
"def after(self, after: Route.Decorator):\n pass",
"def before_request(self, func: typing.Callable):\n return self.add_hook(type_=\"pre\", hook=func)",
"def addFunction(self, func):\n self.__functions.append(func)",
"def onRegister(self):\n pass",
"def onRegister(self):\n pass"
] | [
"0.7849521",
"0.7643819",
"0.7643819",
"0.75291634",
"0.62461793",
"0.618368",
"0.61611503",
"0.61363375",
"0.6126715",
"0.61173093",
"0.6038293",
"0.5960427",
"0.5939071",
"0.5817307",
"0.57547176",
"0.57469493",
"0.57465345",
"0.573892",
"0.56387985",
"0.5626159",
"0.56247324",
"0.55933654",
"0.5586124",
"0.5586124",
"0.5585159",
"0.5543857",
"0.55187714",
"0.5512837",
"0.5504014",
"0.5504014"
] | 0.79526246 | 0 |
Registers a template context processor function. | def context_processor(self, f):
self.template_context_processors.append(f)
return f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register(self):\n REGISTERED_FUNCTIONS[self.path] = self",
"def test_enable_extension_registers_context_processors(self):\n class TestExtension(Extension):\n context_processors = ['my_custom_processor']\n\n # Back up the list, so we can replace it later.\n if hasattr(settings, 'TEMPLATES'):\n orig_context_processors_list = \\\n list(settings.TEMPLATES[0]['OPTIONS']['context_processors'])\n else:\n orig_context_processors_list = \\\n list(settings.TEMPLATE_CONTEXT_PROCESSORS)\n\n # Sanity-check that the context processor didn't wind up in here.\n self.assertNotIn('my_custom_processor', orig_context_processors_list)\n\n try:\n extension = self.setup_extension(TestExtension)\n\n # We have to re-fetch these lists now, since they may have\n # been normalized to lists.\n if hasattr(settings, 'TEMPLATES'):\n context_processors_list = \\\n settings.TEMPLATES[0]['OPTIONS']['context_processors']\n else:\n context_processors_list = \\\n settings.TEMPLATE_CONTEXT_PROCESSORS\n\n # This should have been added, since the extension was enabled.\n self.assertIn('my_custom_processor', context_processors_list)\n\n # Shutting down the extension should remove the context\n # processor.\n self.manager.disable_extension(extension.id)\n self.assertNotIn('my_custom_processor',\n context_processors_list)\n finally:\n if hasattr(settings, 'TEMPLATES'):\n settings.TEMPLATES[0]['OPTIONS']['context_processors'] = \\\n orig_context_processors_list\n else:\n settings.TEMPLATE_CONTEXT_PROCESSORS = \\\n orig_context_processors_list",
"def register():\n PLUGINS = dict()\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n value = func(*args, **kwargs)\n PLUGINS[func.__name__] = func\n return value\n return wrapper\n return decorator",
"def make_processor(cls, fnc):\n #def wrapper(**kw):\n # name = fnc.__name__\n # fnc.__name__ = 'run'\n # return type()\n # pass",
"def register_template_renderer(\n self, plugin, template_name, context=default_context\n ):\n self._renderers[plugin] = (template_name, context)",
"def context_processors(self):\n return [\n 'leonardo.module.web.processors.page.add_page_if_missing',\n 'leonardo.module.web.processors.config.leonardo',\n 'leonardo.module.web.processors.font.webfont_cookie',\n ]",
"def register(func):\n PLUGINS[func.__name__] = func\n return func",
"def add_processor(self, termprocessor):\n self.pipeline.append(termprocessor)",
"def register(self, prim, fn):\n assert prim not in self.mapping\n self.mapping[prim] = fn",
"def add_macro_context(self, name: str, context: dict = None):\r\n if context is None:\r\n context = dict()\r\n self.macros[name].add_instance(dict())",
"def register(prim):\n def deco(fn):\n vm_register(prim)(lambda vm, *args: fn(*args))\n return py_register(prim)(fn)\n return deco",
"def register(func):\n plugins[func.__name__] = func\n return func",
"def register():\n \n global _registered\n if not _registered:\n _registered = True\n sys.path_hooks.insert(0, VFSImporter)",
"def get_render_fn(self):\n def render(environment, ctxt_data, file_path):\n \"Renders a jinja2 template\"\n logging.debug(\"Rendering with context data %s\", ctxt_data)\n\n template = environment.get_template(file_path)\n return template.render(**ctxt_data)\n return render",
"def add_preprocess_callback(self, name, func, *args, **kwargs):\n\n self.preprocess[name] = (func, args, kwargs)",
"def template_extra_functions(self):\n\t\treturn []",
"def render_template():\n template_engine = engines['django']\n def func(template_string):\n load_tags_string = '{% load wagtailextensions_tags %}'\n return template_engine.from_string(load_tags_string + template_string).render()\n return func",
"def process(f: ProcessFunction) -> ProcessFunction:\n process_registry_040.add_function(f)\n process_registry_100.add_function(f)\n return f",
"def template_function2(self, node, ordered_functions):\n new = node.clone()\n ordered_functions.append(new)\n self.append_function_index(new)\n\n new._generated = \"cxx_template\"\n\n new.cxx_template = {}\n # fmt.CXX_template = targs.instantiation # ex. <int>\n\n # self.push_instantiate_scope(new, targs)\n\n if new.ast.template_argument:\n iast = getattr(self.instantiate_scope, new.ast.template_argument)\n new.ast = new.ast.instantiate(node.ast.instantiate(iast))\n # Generics cannot differentiate on return type\n new.options.F_create_generic = False\n\n # Replace templated arguments.\n newparams = []\n for arg in new.ast.declarator.params:\n if arg.template_argument:\n iast = getattr(self.instantiate_scope, arg.template_argument)\n newparams.append(arg.instantiate(iast))\n else:\n newparams.append(arg)\n new.ast.declarator.params = newparams\n # self.pop_instantiate_scope()\n\n # Do not process templated node, instead process\n # generated functions above.\n node.wrap.clear()",
"def render_inclusion(func, file_name, takes_context, django_context, *args, **kwargs):\r\n\r\n if takes_context:\r\n args = [django_context] + list(args)\r\n\r\n _dict = func(*args, **kwargs)\r\n if isinstance(file_name, Template):\r\n t = file_name\r\n elif not isinstance(file_name, basestring) and is_iterable(file_name):\r\n t = select_template(file_name)\r\n else:\r\n t = get_template(file_name)\r\n\r\n nodelist = t.nodelist\r\n\r\n new_context = Context(_dict)\r\n csrf_token = django_context.get('csrf_token', None)\r\n if csrf_token is not None:\r\n new_context['csrf_token'] = csrf_token\r\n\r\n return nodelist.render(new_context)",
"def register_function(self, function, name=None):\n if name:\n self[name] = function\n else:\n self[function.__name__] = function",
"def register_shellcontext(app):\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\n 'db': db,\n 'Person': person,\n 'Movie': movie,\n 'Actors': actors,\n 'Writers': writers,\n 'Directors': directors,\n 'Genres': genres\n }\n\n app.shell_context_processor(shell_context)",
"def register_entrypoints(self):\n for entrypoint in entrypoints.get_group_all(\"mlflow.run_context_provider\"):\n try:\n self.register(entrypoint.load())\n except (AttributeError, ImportError) as exc:\n warnings.warn(\n 'Failure attempting to register context provider \"{}\": {}'.format(\n entrypoint.name, str(exc)\n ),\n stacklevel=2\n )",
"def Map(context, funcname, *nodesets):\n (prefix, local) = ExpandQName(funcname, namespaces=context.processorNss)\n func = (g_extFunctions.get(expanded) or\n CoreFunctions.CoreFunctions.get(expanded, None))\n if not func:\n raise Exception('Dynamically invoked function %s not found.'%funcname)\n flist = [f]*len(nodesets)\n lf = lambda x, f, *args: apply(f, args)\n retlist = apply(map, (lf, flist) + nodesets)\n\n proc = context.processor\n result_nodeset = []\n for ret in retlist:\n proc.pushResult()\n proc.writers[-1].text(Conversions.StringValue(ret))\n frag = proc.popResult()\n context.rtfs.append(frag)\n result_nodeset.append(frag.childNodes[0])\n return result_nodeset",
"def env_reg_deco(func):\n envelopes[str(func.__name__)] = func\n return func",
"def register_render_tag(renderer):\n def tag(parser, token):\n class TagNode(template.Node):\n def render(self, context):\n return renderer(context, token)\n return TagNode()\n for copy_attr in (\"__dict__\", \"__doc__\", \"__name__\"):\n setattr(tag, copy_attr, getattr(renderer, copy_attr))\n return register.tag(tag)",
"def register_function(self, function, name=None):\n if name is None:\n name = function.__name__\n self.funcs[name] = function",
"def push_context(self, ctx):\n self._tpl_context = ctx",
"def dajaxice_register(*dargs, **dkwargs):\r\n\r\n if len(dargs) and not dkwargs:\r\n function = dargs[0]\r\n dajaxice_functions.register(function)\r\n return function\r\n\r\n def decorator(function):\r\n @functools.wraps(function)\r\n def wrapper(request, *args, **kwargs):\r\n return function(request, *args, **kwargs)\r\n dajaxice_functions.register(function, *dargs, **dkwargs)\r\n return wrapper\r\n return decorator",
"def preprocessor(f):\n f._is_preprocessor = True\n return staticmethod(f)"
] | [
"0.56674093",
"0.56304383",
"0.5513914",
"0.5493239",
"0.5407706",
"0.5372997",
"0.52977866",
"0.52085143",
"0.5204628",
"0.51973486",
"0.5171223",
"0.51613235",
"0.51562536",
"0.51132387",
"0.5089765",
"0.50798845",
"0.5026726",
"0.499174",
"0.4989556",
"0.49833864",
"0.49706504",
"0.49704543",
"0.4954364",
"0.49457413",
"0.49374548",
"0.493102",
"0.49267408",
"0.49232724",
"0.4909595",
"0.49087003"
] | 0.75228786 | 0 |
Enroll a new profile to Azure Speaker ID. | def enroll_profile(region, subscription_key, wav_path):
fs, audio_data = _check_and_load_wav_file_length(wav_path)
profile_id = _add_profile(region, subscription_key)
url = "%s/speaker/identification/v2.0/text-independent/profiles/%s/enrollments" % (
_get_azure_endpoint(region), profile_id)
headers = {
"Ocp-apim-subscription-key": subscription_key,
"Content-Type": "audio/wav; codecs=audio/pcm; samplerate=%s" % fs,
}
session = requests.Session()
resp = session.post(url, headers=headers, data=audio_data)
print("Enrollment response status code: %s\n" % resp.status_code)
print(json.dumps(json.loads(resp.content), indent=2)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def put(self, id):\n adm = Administration()\n print(api.payload)\n lp = LearnProfile.from_dict(api.payload)\n if lp is not None:\n lp.set_id(id)\n adm.save_learnprofile(lp)\n return lp, 200\n\n else:\n return '', 500",
"def perform_create(self, serializer):\r\n serializer.save(user_type=\"SPEAKER\")",
"def perform_create(self, serializer):\n profile = models.Profile.objects.get(pk=self.kwargs.get(\"pk\"))\n\n return serializer.save(profile=profile)",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n # create new Stellar account\n stellar.api.create_account(user=instance)",
"def createProfile(self):\n if self.profile:\n return\n from soc.modules.gsoc.models.profile import GSoCProfile\n user = self.createUser()\n properties = {'link_id': user.link_id, 'student_info': None, 'user': user,\n 'parent': user, 'scope': self.program, 'status': 'active'}\n self.profile = seeder_logic.seed(GSoCProfile, properties)",
"def perform_create(self, serializer):\n topic = models.ProfileTopic.objects.get(pk=self.kwargs.get(\"pk\"))\n\n return serializer.save(topic=topic)",
"def set_speaker(self, identifier):\n self.up_to_date = False\n self._speaker = identifier",
"def add_profile(self, profile):\r\n self.profiles.append(profile)",
"def save_profile(sender, instance, **kwargs):\n instance.profile.save()",
"def update_profile(id):\n tags = request.form.get('tags')\n user = User.query.get(id)\n speaker = Speaker.query.filter_by(id_assigned_user=user.id).first()\n\n speaker.tags = tags\n try:\n db.session.commit()\n except:\n abort(500)\n\n return redirect(url_for('get_profile', id=user.id))",
"def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)",
"def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)",
"def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)",
"def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)",
"def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)",
"def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)",
"def new_profile(email):\n key = challenge_12.deterministic_random_key()\n profile = bytes(profile_for(email.decode()), 'ascii')\n\n return challenge_11.AES_ECB(key).encrypt(profile)",
"def enable(self,\n profile_id=None):\n if profile_id is None:\n self._enabled = True\n else:\n self._profiles[profile_id] = True",
"def perform_create(self, serializer):\n serializer.save(user_profile = self.request.user)",
"def save_profile(self):\n self.save()",
"def put(self, id ):\n adm = Administration()\n print(api.payload)\n p = Profile.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_profile(p)\n return p, 200\n else:\n return '', 500",
"def create(self, validated_data):\n return Speaker.objects.create(**validated_data)",
"def enrol(self, enrol_data):\n self.busy_wait(enrol_cost)\n return {'speaker': enrol_data['speaker'], 'room': enrol_data['room']}",
"def perform_create(self, serializer): # this method runs everytime a POST method is called\n serializer.save(user_profile=self.request.user)",
"def switch_profile(self, params):\n profile_id = params.get('profile_id', [''])[0]\n switch_profile = self.netflix_session.switch_profile(\n profile_id=profile_id,\n account=self.credentials)\n return switch_profile",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )",
"def perform_create(self,serializer):\n serializer.save(user_profile=self.request.user)",
"def perform_create(self,serializer):\n serializer.save(user_profile=self.request.user)",
"def createStudent(self):\n self.createProfile()\n from soc.modules.gsoc.models.profile import GSoCStudentInfo\n properties = {'key_name': self.profile.key().name(), 'parent': self.profile}\n self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)\n self.profile.put()",
"async def on_speaking(self, speaking, uid):\n pass"
] | [
"0.57121813",
"0.5669998",
"0.55642205",
"0.5468942",
"0.54388547",
"0.5423377",
"0.5405765",
"0.53824395",
"0.5380911",
"0.5366512",
"0.52692974",
"0.52692974",
"0.52692974",
"0.52692974",
"0.52692974",
"0.52692974",
"0.525241",
"0.5236725",
"0.5225078",
"0.52119666",
"0.5204216",
"0.52007973",
"0.51753956",
"0.51603705",
"0.51337224",
"0.5130088",
"0.5123543",
"0.5123543",
"0.5117897",
"0.51121044"
] | 0.72638845 | 0 |
Calculates the number of suicides for a type of agent given game mode, observability, and game seed. If game seed passed is 1, then all game seeds are aggregated. | def suicide_query(game_mode=0, observability=-1, game_seed=-1, agent=-1):
event_id = "death"
# Keep only those games within given configuration
if game_seed != -1:
selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability) &
(data['game_seed'] == game_seed)]
else:
selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability)]
if agent != -1:
for index, row in selection.iterrows():
if agent not in row["agents"]:
selection.drop(index, inplace=True)
# print(selection.size)
team_kill_count = []
ngames = 0 # Number of games in which this agent dies
suicides = 0 # Number of games in which this agent commits suicide
events_per_sample = []
team_kills = 0
# Iterate through selected game data
for index, row in selection.iterrows():
if agent in row["agents"] and row['event_id'] == event_id: # This agent played in the game
# Find its agent ID depending on its position in the agent list. There may be more than 1 agent of this
# type in the game, so iterate over all and check individually.
ll = row["agents"]
indices = [i for i, el in enumerate(ll) if el == agent]
for agent_id in indices:
# teammate = (agent_id + 2) % 4
sample_event_counter = 0
for event in row["event_data"]:
if event["agent_id"] == agent_id: # This agent dies
if event["killer"] == agent_id: # Suicide
sample_event_counter += 1
# if event["killer"] == teammate: # Killed by teammate
# team_kills += 1
# if event["agent_id"] == teammate: # Teammate dies
# if event["killer"] == agent_id: # Killed by this agent
# team_kill_count += 1
ngames += 1
events_per_sample.append(sample_event_counter)
suicides += sample_event_counter
# suicide_count.append(100*suicides/ngames) # Showing percentage of game suicides
# team_kill_count.append(100*team_kills/games)
# percentage = 100 * suicides / ngames
# mean = ngames * (percentage / 100)
# variance = mean * (1 - (percentage / 100))
# std_dev = math.sqrt(variance)
# std_err = std_dev / math.sqrt(ngames)
# h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95 confidence interval
# return percentage, h
# print(events_per_sample)
mean = suicides/ngames
variance = sum([pow(x - mean, 2) for x in events_per_sample])/len(events_per_sample)
std_dev = math.sqrt(variance)
std_err = std_dev/math.sqrt(len(events_per_sample))
h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95% confidence interval
return mean * 100, h * 100 # , team_kill_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_agent_number_of_players(players):\n return sum([count_players(player) for player in players\n if player.startswith('agent')])",
"def test_winners_per_type_sum(self):\n sim = ss.Simulation()\n sim.run_simulation(14)\n winners = sim.winners_per_type()\n assert sum(winners.values()) == 14",
"def get_number_of_agents(model):\n\n n_agents = len(model.schedule.agents_by_type['Customer'])\n return n_agents",
"def get_number_of_investors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'Investor'])\n return n_agents",
"def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number",
"def culggroup_thickestdonecount(As, Rps, group, dones):\n pairs = sorted(((get_culg_dimension(As, Rps, l), dones[l], l)\n for l in group),\n reverse=True)\n count = len(tuple(itt.takewhile(lambda p: p[1], pairs)))\n return count",
"def CountSuits(hand):\r\n numtrump = 0\r\n numss = 0\r\n numos1 = 0\r\n numos2 = 0\r\n\r\n for card in hand:\r\n if card < 7:\r\n numtrump += 1\r\n elif card < 12:\r\n numss += 1\r\n elif card < 18:\r\n numos1 += 1\r\n else:\r\n numos2 += 1\r\n \r\n numsuits = 0\r\n if numtrump != 0:\r\n numsuits += 1\r\n if numss != 0:\r\n numsuits += 1\r\n if numos1 != 0:\r\n numsuits += 1\r\n if numos2 != 0:\r\n numsuits += 1\r\n return [numtrump,numss,numos1,numos2,numsuits]",
"def EmpiricalValues(deals):\n global noOfFaceUpCardsPerGame, noOfLegalActionsPerGame\n noOfFaceUpCardsPerGame = []\n noOfLegalActionsPerGame = []\n\n for i in range(deals):\n print(\"\\n\\nDeal %d\" % i)\n newGame = Game()\n firstPlayer = newGame.firstPlayer() # choosing randomly the first player\n\n if firstPlayer == \"player\" :\n dealer = newGame.computer\n else:\n dealer = newGame.player\n\n tableCanBeSwept = newGame.checkIfInitialTableCanBeSwept()\n\n if tableCanBeSwept:\n newGame.sweepTheTable(dealer)\n\n newGame.run(firstPlayer)\n\n noOfFaceUpCardsPerGame.append(newGame.noOfFaceUpCardsPerPlay) # this stores the number of face up cards per play\n noOfLegalActionsPerGame.append(newGame.noOfLegalActionsPerPlay) # this stores the number of legal actions per play\n\n print_stats(deals, noOfFaceUpCardsPerGame, noOfLegalActionsPerGame)",
"def sixes(dice):\n return sum([x for x in dice if x == 6])",
"def get_suits(hand, board):\n suits = {}\n for card in hand + board:\n if card[1] in suits:\n suits[card[1]] += 1\n else:\n suits[card[1]] = 1\n return suits",
"def countModes(self,l_edges):\n\n\t\tassert l_edges is not None\n\n\t\t#Determine the multipole values of each bin in the FFT grid\n\t\tell = self.getEll()\n\n\t\t#Count how many of these pixels fall inside each bin\n\t\tmodes_on = ell[None] < l_edges[:,None,None]\n\t\tmodes_ly_0 = modes_on.copy()\n\t\tmodes_ly_0[:,:,1:] = 0\n\n\t\t#Count the total number of modes, and the number of modes with ly=0 \n\t\tnum_modes = np.diff(modes_on.sum((1,2)).astype(np.float))\n\t\tnum_modes_ly_0 = np.diff(modes_ly_0.sum((1,2)).astype(np.float))\n\n\t\t#Return the corrected number of modes that yields the right variance in the Gaussian case\n\t\treturn num_modes**2/(num_modes+num_modes_ly_0)",
"def count_genotypes(genotypeList,StateGenPosData, x, y):\r\n allMos = 0\r\n nonEggs = 0\r\n Adults = 0\r\n for i in range(len(genotypeList)):\r\n gt = genotypeList[i]\r\n b = sum(1 for item in StateGenPosData if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n c = sum(1 for item in StateGenPosData if 'adult' in item[0] and 'XX' in item[1] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n d = sum(1 for item in StateGenPosData if 'adult' in item[0] and gt in item[1] and item[2]==(x,y))\r\n## for item in StateGenPosData:\r\n## print(item[0],item[1],item[2])\r\n## if 'adult' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## d+=1\r\n## print('yay')\r\n## if not 'new' in item[0] and not 'egg' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## c+=1\r\n## if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## b+=1\r\n allMos = allMos + b\r\n nonEggs = nonEggs + c\r\n Adults = Adults + d\r\n return allMos, nonEggs, Adults",
"def get_number_of_char_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'CharitableSponsor'])\n return n_agents",
"def get_agent_count(self, i: int, j: int, dist: str = 'current') -> int:\n return int(self._dist[dist][i, j] / self._param['size_fraction'])",
"def op_count(cls, crawler, stage=None):\n if stage:\n total_ops = cls.conn.get(make_key(crawler, stage))\n else:\n total_ops = cls.conn.get(make_key(crawler, \"total_ops\"))\n return unpack_int(total_ops)",
"def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum",
"def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")",
"def run_tournament_(genes):\n\n n_genes = len(genes)\n scores = np.zeros(n_genes, dtype=np.uint32)\n for i, j in itertools.combinations(range(n_genes), 2):\n s_i, s_j = run_duel(genes[i], genes[j])\n scores[i] += s_i\n scores[j] += s_j\n continue\n\n return scores / (n_genes - 1)",
"def clairvoyant_agent(self, seeds): \n rewards = []\n for seed in seeds:\n self.env.seed(seed)\n self.env.reset()\n\n # store the initial generation levels\n initial_action = [self.env.state.generator_1_level, self.env.state.generator_2_level]\n\n while not self.env.state.is_done():\n # repeat constant action, just in order to get to the end\n self.env.step(initial_action)\n # read realised demand\n realised_demand = np.diagonal(np.array(env.state.agent_predictions_all))\n # optimise the run cost against (clairvoyant) realised demand, pretending to run at t=-1\n min_cost = agent.full_solution([-1] + initial_action + list(realised_demand))\n # collect (negative) cost\n rewards.append(- min_cost)\n return np.mean(rewards)",
"def count_sheeps(sheep):\n return sheep.count(True)",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass",
"def hives_count(self) -> int:\n return self.hives.count()",
"def choose_target(self, agents):\n\n number_of_suspects = [0]*(len(agents))\n number_of_suspects_per_agent = []\n\n index = 0\n for a1 in agents:\n if not a1.is_impostor():\n for a2 in agents:\n if self.km.suspects(a1.agent_id, a2.agent_id):\n number_of_suspects[index] = number_of_suspects[index] + 1\n else:\n number_of_suspects[index] = 999999\n number_of_suspects_per_agent.append((a1.agent_id,number_of_suspects[index]))\n index = index + 1\n\n self.target = min(number_of_suspects_per_agent, key = lambda t: t[1])[0]",
"def assess_progress():\n\tconn = pymongo.Connection(MASTER_SERVER)\n\tdb = conn.SocialLearning\n\tdb.authenticate(MONGO_USER, MONGO_PASSWORD)\n\n\tcoll_names = db.collection_names()\n\n\tresult = {}\n\n\tfor m in modes:\n\t\tresult[m[0]] = [0]*MAX_DEMES\n\t\tfor subm in [c for c in coll_names if c.startswith('gp_ '+m[0])]:\n\t\t\tidx = int(subm[4 + len(m[0]):])\n\t\t\tif idx < MAX_DEMES:\n\t\t\t\tcoll = db[subm]\n\t\t\t\tresult[m[0]][idx] = coll.count()\n\n\treturn result",
"def winners_per_type(self):\n winners = [winner[1] for winner in self.result]\n # making a list of the type of winners\n return Counter(winners)\n # Using the Counter tool from the standard library to count the\n # types in a dictionary",
"def synergy_counter_role(A_side, B_side, A_ban = [], B_ban = []):\n \n #Decide which team\n if len(A_side)%2 == 0 or (len(A_side)%2 == 1 and len(B_side) > len(A_side)):\n team_side = \"A\"\n print(\"AI is on A Side\")\n my_team = A_side\n enemy_team = B_side\n else:\n team_side = \"B\"\n my_team = B_side\n enemy_team = A_side\n print(\"AI is on B Side\")\n \n #Role accounting \n team_roles = {\"Carry\" : 3, \"Captain\" : 1, \"Jungler\" : 1}\n for name in my_team:\n data = [hero for hero in API_rates if hero['name'] == name]\n roles = data[0][\"roles\"]\n for role in roles: \n team_roles[role] -= 1/len(roles)\n print(team_roles)\n\n candidates = []\n #Obtain eligible candidates by roles\n for hero in API_rates:\n if (hero[\"name\"] in A_side) or (hero[\"name\"] in B_side) or (hero[\"name\"] in A_ban) or (hero[\"name\"] in B_ban) :\n pass\n else:\n for role in hero[\"roles\"]:\n if team_roles[role] > 0:\n candidates.append(hero[\"name\"])\n break\n else:\n pass \n \n nominees = get_nominees(candidates, my_team, enemy_team)\n return nominees[0][\"name\"]",
"def agentCounter(gameState, index, depth):\n if index == gameState.getNumAgents():\n return [depth-1, 0]\n else:\n return [depth, index]",
"def count_choices(self) -> dict:\r\n times_chosen = dict()\r\n\r\n # exclude the optimistic value when counting choices\r\n for arm, values in self.rewards.items():\r\n if self.optim_c not in values:\r\n times_chosen[arm] = len(values)\r\n else:\r\n times_chosen[arm] = 0\r\n\r\n return times_chosen",
"def _choose_clusters_num(database_type: str, synthetic_data_dim: int) -> int:\n data_dim: int = 1\n if database_type == DatabaseType.Synthetic:\n data_dim = synthetic_data_dim\n elif database_type in [DatabaseType.ThreeDRoadNetwork, DatabaseType.IndividualHouseholdElectricPowerConsumption]:\n data_dim = 2\n elif database_type == DatabaseType.HouseSalesInKingCounty:\n data_dim = 8\n return 2 * (data_dim + 1) ** 2 + 2"
] | [
"0.56811786",
"0.5137774",
"0.5076857",
"0.503548",
"0.4957199",
"0.4900915",
"0.48782182",
"0.4876376",
"0.48688662",
"0.48500103",
"0.47992226",
"0.47777793",
"0.475898",
"0.4754263",
"0.47437844",
"0.47234103",
"0.47161484",
"0.4714737",
"0.4711583",
"0.47112495",
"0.47099024",
"0.4708766",
"0.4707552",
"0.47072753",
"0.4702373",
"0.46961015",
"0.46789366",
"0.46784815",
"0.46740007",
"0.46648973"
] | 0.65967184 | 0 |
Create a module item. | def create_module_item(self, module_item, **kwargs):
unrequired_types = ["ExternalUrl", "Page", "SubHeader"]
if isinstance(module_item, dict) and "type" in module_item:
# content_id is not required for unrequired_types
if module_item["type"] in unrequired_types or "content_id" in module_item:
kwargs["module_item"] = module_item
else:
raise RequiredFieldMissing(
"Dictionary with key 'content_id' is required."
)
else:
raise RequiredFieldMissing("Dictionary with key 'type' is required.")
response = self._requester.request(
"POST",
"courses/{}/modules/{}/items".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
module_item_json = response.json()
module_item_json.update({"course_id": self.course_id})
return ModuleItem(self._requester, module_item_json) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_item(request):\r\n usage_key = UsageKey.from_string(request.json['parent_locator'])\r\n category = request.json['category']\r\n\r\n display_name = request.json.get('display_name')\r\n\r\n if not has_course_access(request.user, usage_key.course_key):\r\n raise PermissionDenied()\r\n\r\n parent = get_modulestore(category).get_item(usage_key)\r\n dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)\r\n\r\n # get the metadata, display_name, and definition from the request\r\n metadata = {}\r\n data = None\r\n template_id = request.json.get('boilerplate')\r\n if template_id:\r\n clz = parent.runtime.load_block_type(category)\r\n if clz is not None:\r\n template = clz.get_template(template_id)\r\n if template is not None:\r\n metadata = template.get('metadata', {})\r\n data = template.get('data')\r\n\r\n if display_name is not None:\r\n metadata['display_name'] = display_name\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=data,\r\n metadata=metadata,\r\n system=parent.runtime,\r\n )\r\n\r\n # TODO replace w/ nicer accessor\r\n if not 'detached' in parent.runtime.load_block_type(category)._class_tags:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent.location).update_item(parent, request.user.id)\r\n\r\n return JsonResponse({\"locator\": unicode(dest_usage_key), \"courseKey\": unicode(dest_usage_key.course_key)})",
"def _create_module(self, rootdir):\n name = 'module_' + rootdir.get_name()\n moduleobj = Module(name, rootdir)\n rootdir.set_module(moduleobj)\n self._modules[name] = moduleobj",
"def create_module(cls, *args, **kwargs): # real signature unknown\n pass",
"def create_module(cls, *args, **kwargs): # real signature unknown\n pass",
"def create_item(self, user: User, **kwargs) -> None:",
"def create_module(self, body: list, **kwargs):\n return ast.Module(body=body)",
"def add_item(self):\n item = LibGen.create_item()\n if not self.item_exists(item.call_number):\n self.item_list[item.call_number] = item\n print(f\"Item({item.call_number}) bas been added.\")\n else:\n print(\"This item already exists.\")",
"def createItem(name, description, category_id, image, user_id):\n i = Item(name=name, description=description, category_id=category_id,\n image=image, user_id=user_id, pub_date=datetime.utcnow())\n db_session.add(i)\n db_session.commit()\n return i",
"def _createModuleObj(self):\n raise NotImplementedError(\"Implement in derived class.\")",
"def add_item(self):\n item = models.Item(item_name=self.test_item,\n list_id=1,\n description=self.test_item_desc)\n item.add()",
"def create_work_item(self):",
"def _create_item(self, parent_location, category, display_name, **kwargs):\n return ItemFactory.create(\n parent_location=parent_location,\n category=category,\n display_name=display_name,\n publish_item=False,\n user_id=self.user.id,\n **kwargs\n )",
"def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")",
"def _create_item(self, item_id: str, data: dict) -> Pipeline:\n return Pipeline(id=item_id, **data)",
"def _create_module(name):\n module = new.module(name)\n sys.modules[name] = module\n return module",
"def test_create_module_invalid(self):\n payload = {'name': ''}\n res = self.client.post(MODULES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def _create_item(self, category, name, data, metadata, parent_category, parent_name, draft=True, split=True):\r\n location = self.old_course_key.make_usage_key(category, name)\r\n if not draft or category in DIRECT_ONLY_CATEGORIES:\r\n mongo = self.old_mongo\r\n else:\r\n mongo = self.draft_mongo\r\n mongo.create_and_save_xmodule(location, data, metadata, self.runtime)\r\n if isinstance(data, basestring):\r\n fields = {'data': data}\r\n else:\r\n fields = data.copy()\r\n fields.update(metadata)\r\n if parent_name:\r\n # add child to parent in mongo\r\n parent_location = self.old_course_key.make_usage_key(parent_category, parent_name)\r\n if not draft or parent_category in DIRECT_ONLY_CATEGORIES:\r\n mongo = self.old_mongo\r\n else:\r\n mongo = self.draft_mongo\r\n parent = mongo.get_item(parent_location)\r\n parent.children.append(location)\r\n mongo.update_item(parent, self.userid)\r\n # create pointer for split\r\n course_or_parent_locator = BlockUsageLocator(\r\n course_key=self.split_course_key,\r\n block_type=parent_category,\r\n block_id=parent_name\r\n )\r\n else:\r\n course_or_parent_locator = self.split_course_key\r\n if split:\r\n self.split_mongo.create_item(course_or_parent_locator, category, self.userid, block_id=name, fields=fields)",
"def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))",
"def create_item_command(cog_href: str, destination: str) -> None:\n item = stac.create_item(cog_href)\n\n item.save_object(dest_href=destination)",
"def createItem(self, parentFolderId, name, description) :\n path = 'item'\n params = { 'folderId': parentFolderId,\n 'name': name,\n 'description': description }\n obj = self.sendRestRequest('POST', path, params)\n if '_id' in obj :\n return obj['_id']\n else :\n raise Exception('Error, expected the returned item object to have an \"_id\" field')",
"def edit(self, **kwargs):\n response = self._requester.request(\n \"PUT\",\n \"courses/{}/modules/{}/items/{}\".format(\n self.course_id, self.module_id, self.id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)",
"def hfp_firmware_pack_item_add(handle, org_dn, hfp_name, hw_vendor, hw_model,\r\n type, version):\r\n\r\n from ucsmsdk.mometa.firmware.FirmwarePackItem import FirmwarePackItem\r\n\r\n dn = org_dn + \"/fw-host-pack-\" + hfp_name\r\n obj = handle.query_dn(dn)\r\n if obj is None:\r\n raise ValueError(\"HFP '%s' does not exist\" % dn)\r\n\r\n mo = FirmwarePackItem(hw_vendor=hw_vendor,\r\n hw_model=hw_model,\r\n type=type,\r\n version=version)\r\n handle.add_mo(mo)\r\n handle.commit()\r\n\r\n return mo",
"def newModule(name, swipl):\n if isinstance(name, str):\n name = Atom(name, swipl)\n\n return swipl.PL_new_module(name.handle)",
"def createItem(name, category, price, user_id):\n try:\n description = wikipedia.summary(name)\n except wikipedia.exceptions.DisambiguationError as e:\n description = wikipedia.summary(name + \" \" + category.name)\n\n i = Item(name=name, description=description,\n category_id=category.id, price=price, user_id=user_id)\n session.add(i)\n session.commit()\n print 'Item \"' + name + '\" added.'\n return i",
"def create(cls):\n return BasketItem(code=str(uuid.uuid4()))",
"def create_item(world: World, item_id: str, x: int, y: int, *args):\n item_id = ITEMS[item_id]\n if item_id == \"coin\":\n item = Coin()\n elif item_id == \"star\":\n item = Star()\n else:\n item = DroppedItem(item_id)\n\n world.add_item(item, x * BLOCK_SIZE, y * BLOCK_SIZE)",
"def create_modules(self):\n self.nmos = ptx(width=self.nmos_size,\n mults=self.nmos_mults,\n tx_type=\"nmos\")\n self.add_mod(self.nmos)\n\n self.pmos = ptx(width=self.pmos_size,\n mults=self.pmos_mults,\n tx_type=\"pmos\")\n self.add_mod(self.pmos)",
"def create(self, item_type, uuid):\n return self.write.create(item_type, uuid)",
"def create_item(self, obj):\n logger.info('ItemProduct adding item initiated')\n try:\n with Transaction().start(DBNAME, 1) as transaction:\n unit, = self.ProductUom.search([('name', '=', obj['units'])])\n template = self.ProductTemplate()\n try:\n if self.Product.search([('code', '=', obj['id']), ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]:\n return False\n except Exception:\n pass\n template.category = self.ProductCategory.search([('name', '=', obj['category'])])[-1]\n template.default_uom = unit\n template.purchase_uom = unit\n template.type = 'goods'\n rate = Decimal(obj['rate'])\n cost = rate / 2\n template.name = obj['name']\n template.list_price = Decimal(rate)\n template.cost_price = Decimal(cost)\n template.purchasable = True\n template.account_expense = self.accounts['expense']\n template.account_receivable = self.accounts['receivable']\n template.save()\n # transaction.cursor.commit()\n product = self.Product()\n product.template = template\n product.code = obj['id']\n product.description = 'Stock'\n product.save()\n transaction.cursor.commit()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False",
"def test_create_module_successful(self):\n payload = {'name': 'Test Module'}\n self.client.post(MODULES_URL, payload)\n\n exists = Module.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n\n self.assertTrue(exists)"
] | [
"0.7228198",
"0.65279996",
"0.63207406",
"0.63207406",
"0.6307136",
"0.6218153",
"0.6193564",
"0.61688536",
"0.6122195",
"0.61181813",
"0.60919625",
"0.6052571",
"0.6050587",
"0.6030838",
"0.6015427",
"0.6014498",
"0.6013799",
"0.59456086",
"0.5937084",
"0.5928567",
"0.5924971",
"0.5920281",
"0.5902033",
"0.5873777",
"0.5870397",
"0.5856607",
"0.58520806",
"0.5821009",
"0.57937807",
"0.57715744"
] | 0.7833921 | 0 |
Delete this module item. | def delete(self, **kwargs):
response = self._requester.request(
"DELETE",
"courses/{}/modules/{}/items/{}".format(
self.course_id, self.module_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
module_item_json = response.json()
module_item_json.update({"course_id": self.course_id})
return ModuleItem(self._requester, module_item_json) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __do_module_delete(item):\n\n file_path = DTF_MODULES_DIR + item.install_name\n\n if utils.delete_file(file_path) != 0:\n log.e(TAG, \"Error removing module file! Continuing.\")\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM modules '\n \"WHERE name='%s'\" % item.name)\n\n cur.execute(sql)\n conn.commit()\n\n return 0",
"def delete(self):\r\n self.domain.delete_item(self)",
"def delete(self):\n response = settings.database.delete_item(Key={'id': str(self.id)})\n raise_for_response(response)",
"def delete_item(self):\n\n\t\tdb.session.delete(self)\n\t\tdb.session.commit()",
"def delete(self):\n return self.items.delete(item_id=self.id)",
"def delete(self, item):\n self._createAction(item, \"delete\")",
"def delete(self):\n\n raise NotImplementedError('Must be implemented by subclasses')",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n\n raise NotImplementedError()",
"def _delete(self, item):\n self.cv.delete(item)",
"def delete(self):\n self.manager.delete(self.name)",
"def delete(self):\n self.manager.delete(self.name)",
"def delete(self, item):\n # eg. node=item to attrs, telling item type to Graphviz._setattr\n self.graph._del(self.parent.handle, **{self.type: item})",
"def delete(self):\n self.package = None",
"def delete_item(self) -> None:\n item = self.get_selected_item(self.tree_cache)\n if item is None:\n return\n\n item.data().set_enabled(False)\n self.sync_tree_cache()",
"def uncomplete(self, **kwargs):\n response = self._requester.request(\n \"DELETE\",\n \"courses/{}/modules/{}/items/{}/done\".format(\n self.course_id, self.module_id, self.id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)",
"def delete(self, itemId):\n\n table = self.__getTable()\n table.delete_item(itemId = itemId)",
"def delete(self, *args, **kwargs):\n pass",
"def delete(self, *args, **kwargs):\n pass",
"def __delitem__(self,item):\n if item == self.lastKey: return\n installer = self.data[item]\n apath = self.dir.join(item)\n if isinstance(installer,InstallerProject):\n apath.rmtree(safety='Installers')\n else:\n apath.remove()\n del self.data[item]",
"def delete(self):\n raise NotImplementedError",
"def delete(self):\n ...",
"def delete(self):\n return self.parent.delete_instance(self.name)",
"def delete(self):\n os.system(\"rm \"+self._name)",
"def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")",
"def delete(self):\n raise NotImplementedError()",
"def delete(self):\n raise NotImplementedError()"
] | [
"0.76490295",
"0.76386184",
"0.7293862",
"0.7082898",
"0.7029256",
"0.7006791",
"0.6655195",
"0.66186965",
"0.66186965",
"0.66186965",
"0.66186965",
"0.6595931",
"0.657923",
"0.65764403",
"0.65764403",
"0.65596324",
"0.65374285",
"0.6532332",
"0.65124965",
"0.6511554",
"0.6504799",
"0.6504799",
"0.64847076",
"0.6469156",
"0.6462438",
"0.6453846",
"0.6430069",
"0.6428038",
"0.63959616",
"0.63959616"
] | 0.7723441 | 0 |
One sample/paired sample permutation test based on a tstatistic. This function can perform the test on one variable or simultaneously on multiple variables. When applying the test to multiple variables, the "tmax" method is used for adjusting the pvalues of each variable for multiple comparisons. Like Bonferroni correction, this method adjusts pvalues in a way that controls the familywise error rate. However, the permutation method will be more powerful than Bonferroni correction when different variables in the test | def permutation_t_test(
X, n_permutations=10000, tail=0, n_jobs=None, seed=None, verbose=None
):
from .cluster_level import _get_1samp_orders
n_samples, n_tests = X.shape
X2 = np.mean(X**2, axis=0) # precompute moments
mu0 = np.mean(X, axis=0)
dof_scaling = sqrt(n_samples / (n_samples - 1.0))
std0 = np.sqrt(X2 - mu0**2) * dof_scaling # get std with var splitting
T_obs = np.mean(X, axis=0) / (std0 / sqrt(n_samples))
rng = check_random_state(seed)
orders, _, extra = _get_1samp_orders(n_samples, n_permutations, tail, rng)
perms = 2 * np.array(orders) - 1 # from 0, 1 -> 1, -1
logger.info("Permuting %d times%s..." % (len(orders), extra))
parallel, my_max_stat, n_jobs = parallel_func(_max_stat, n_jobs)
max_abs = np.concatenate(
parallel(
my_max_stat(X, X2, p, dof_scaling) for p in np.array_split(perms, n_jobs)
)
)
max_abs = np.concatenate((max_abs, [np.abs(T_obs).max()]))
H0 = np.sort(max_abs)
if tail == 0:
p_values = (H0 >= np.abs(T_obs[:, np.newaxis])).mean(-1)
elif tail == 1:
p_values = (H0 >= T_obs[:, np.newaxis]).mean(-1)
elif tail == -1:
p_values = (-H0 <= T_obs[:, np.newaxis]).mean(-1)
return T_obs, p_values, H0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def PermutationTest(self):\n # U = union of B and T\n union_sample = np.concatenate((self.x_benchmark, self.x_trial), axis=0)\n n_samples = self.NB + self.NT\n \n # Initialize array of test statistic values\n self.TS_tilde = np.zeros(self.n_perm, dtype=np.float)\n \n count=0\n print(\"Running {:d} Permutations... 0%\".format(self.n_perm))\n \n # loop over different samplings\n for i in range(self.n_perm):\n \n # Print progress\n progress = int(round(((i+1)/self.n_perm)*100,0))\n progress_list = [25, 50, 75, 100]\n if count < len(progress_list) and progress == progress_list[count]:\n count+=1\n print(\"Running {:d} Permutations... {:d}%\".format(self.n_perm, progress))\n \n # Random permutations of U (sampling without replacement)\n x_resampled = shuffle(union_sample)\n # Assign first NB elements to Benchmark\n B_resampled = x_resampled[:self.NB]\n # Assign remaning NT elements to Trial\n T_resampled = x_resampled[self.NB:]\n \n # Compute the test statistic\n self.TS_tilde[i] = self.TestStatistic(B_resampled, T_resampled)",
"def post_hoc_perm(conditions, n_shuffles, dataframe, method = scipy.stats.ttest_rel, seed = 1010):\n \n np.random.seed(seed)\n\n pairs = [pair for pair in itertools.combinations(conditions, 2)]\n n_pairs = len(pairs)\n\n t = np.floor(n_pairs * 0.25)\n\n obs_cond = {}\n perm_cond = {}\n p_cond = {}\n p_ph = {}\n\n maxT = np.zeros(n_shuffles)\n\n #First loop: Generate permutations\n for n, pair in enumerate(pairs):\n\n if n % t == 0:\n print((n / n_pairs) * 100)\n\n term = pair[0] + '_vs_' + pair[1]\n obs, perm, p = t_perm(dataframe[pair[0]], dataframe[pair[1]], n_shuffles, term)\n obs_cond.update(obs)\n perm_cond.update(perm)\n p_cond.update(p)\n\n\n\n for n in range(0, n_shuffles):\n shuffle = np.array([shuffles[n] for shuffles in perm_cond.values()])\n maxT[n] = shuffle[np.squeeze(np.where(abs(shuffle) == np.max(np.abs(shuffle))))]\n\n p_ph = {cond: sum(abs(maxT) >= abs(obs_cond[cond])) / n_shuffles for cond in obs_cond.keys()}\n \n print('Complete')\n return(obs_cond, perm_cond, maxT, p_ph)",
"def test_all_pairs_t_test_few_perms(self):\r\n exp = \"\"\"# The tests of significance were performed using a one-sided (low) Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean < Group 2 mean\r\n# The nonparametric p-values were calculated using 5 Monte Carlo permutations.\r\n# The nonparametric p-values contain the correct number of significant digits.\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\t-6.6\t0.00354023978206\t0.0106207193462\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\nfoo\tbaz\t-9.79795897113\t0.000304092472232\t0.000912277416695\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\nbar\tbaz\t-3.0\t0.0288344428112\t0.0865033284337\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\n\"\"\"\r\n obs = all_pairs_t_test(self.labels2, self.dists2,\r\n num_permutations=5, tail_type='low')\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))",
"def compare_samples(populations,parametric=False):\n from scipy.stats import mannwhitneyu, ttest_ind, f_oneway, kruskal, ranksums\n from statsmodels.stats.multicomp import pairwise_tukeyhsd\n populations = [np.array(pop) for pop in populations] #obscure line to take out missing values\n populations = [pop[~np.isnan(pop)] for pop in populations]\n\n if len(populations) == 2:\n if parametric:\n stat, p_value = ttest_ind(*populations)\n print(\"P-value t-test: {0:2.10f}\".format(p_value))\n else:\n stat, p_value1 = mannwhitneyu(*populations)\n print(\"P-value MWW: {0:2.10f}\".format(p_value))\n stat, p_value2 = ranksums(*populations)\n print(\"P-value Ranksum: {0:2.10f}\".format(p_value))\n \n if len(populations) > 2:\n if parametric:\n stat, p_value = f_oneway(*populations)\n print(\"P-value anova: {0:2.10f}\".format(p_value))\n else:\n stat, p_value = kruskal(*populations) \n print(\"P-value kruskal: {0:2.10f}\".format(p_value))\n \n if p_value < 0.05:\n flatten_pop = []\n label_pop = []\n for i,pop in enumerate(populations):\n flatten_pop += list(pop)\n label_pop += [\"pop{0}\".format(i)]*len(pop)\n \n res2 = pairwise_tukeyhsd(np.asarray(flatten_pop),label_pop)\n print(\"Printing pair comparisons using Tukey HSD\")\n print(res2)\n res2.plot_simultaneous(comparison_name=None,xlabel='diffs',ylabel='grups')\n \n print((\"Means: \" + \", {}\"*len(populations)).format(*[np.mean(_) for _ in populations]))\n print((\"STDs: \" + \", {}\"*len(populations)).format(*[np.std(_) for _ in populations]))\n \n \n return p_value",
"def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):\r\n x = mean(a)\r\n v = var(a)\r\n n = len(a)\r\n df = n-1\r\n svar = ((n-1)*v)/float(df)\r\n t = (x-popmean)/math.sqrt(svar*(1.0/n))\r\n prob = betai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Single-sample T-test.'\r\n outputpairedstats(printit,writemode,\r\n 'Population','--',popmean,0,0,0,\r\n name,n,x,v,min(a),max(a),\r\n statname,t,prob)\r\n return t,prob",
"def permutation_test_score(self, estimator, y, n_permutations=100):\n\n return nmf_permutation_test_score(estimator, y, n_permutations=n_permutations)",
"def permutation_test_score(self, estimator, y, n_permutations=100):\n\n return nmf_permutation_test_score(estimator, y, n_permutations=n_permutations, verbose=self.verbose)",
"def manual_perm_test(model: 'Fitted sklearn estimator',\n X: 'Pandas df',\n y: 'Pandas series',\n true_score: float,\n n_permutations: int=10000,\n plot: bool=True,\n clf: bool=False) -> 'p-value, null_counts':\n\n scores = [] # Empty list for null distribution scores\n n_perms = range(1, n_permutations, 1) # Range of values to permute\n for n in tqdm(n_perms, desc='Permutation test'): # tqdm for progress bar\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, stratify=y, test_size=0.90, random_state=n\n )\n model.fit(X_train, y_train)\n y_test_perm = np.random.permutation(y_test) # Permuting class labels\n chance_scores = round(model.score(X=X_test, y=y_test_perm), 4)\n scores.append(chance_scores)\n\n # Converting to a pandas dataframe\n perm_scores_df = pd.DataFrame(data=scores, columns=['null_dist'])\n perm_scores_df['null_dist'] *= 100\n null_counts = (\n perm_scores_df # Counts greater than or equal to our test set score\n .loc[(perm_scores_df['null_dist']) >= true_score]\n .count()\n .iloc[0]\n )\n p_value = (null_counts + 1) / (n_permutations + 1)\n p_value = np.round(p_value, decimals=5)\n\n if plot is True: # Plotting a histogram of permutation scores\n plt.figure(figsize=(10, 10))\n sns.distplot(a=perm_scores_df['null_dist'],\n hist=True,\n label='Permutation scores')\n ylim = plt.ylim()\n if clf is False:\n # True classifier score and p-value\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='R2 score %s (pvalue : %s)' %\n (true_score, p_value))\n else:\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='Multimodal AUC score: %s (pvalue = %s)' %\n (true_score, p_value))\n n_classes = np.unique(y).size\n chance = 2 * [100. / n_classes]\n plt.plot(chance,\n ylim,\n '--k',\n linewidth=3,\n label='Null model mean AUC score: %s' % 50.00)\n \n plt.ylim(ylim)\n plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.38))\n plt.tight_layout()\n\n if clf is False:\n plt.xlabel(xlabel='R2 Scores')\n else:\n plt.xlabel(xlabel='AUC Scores')\n plt.title(label='Null Distribution')\n plt.savefig('quadratic_null_dist.png', dpi=300, bbox_inches='tight')\n plt.show()\n\n return p_value, null_counts",
"def _t_test(_sample_a, _sample_b):\n res = stats.ttest_ind(_sample_a, _sample_b, axis=0, equal_var=equal_var, nan_policy='propagate')\n print('Independent t-test\\nt-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)",
"def attest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):\r\n if type(a) != N.ndarray:\r\n a = N.array(a)\r\n x = amean(a)\r\n v = avar(a)\r\n n = len(a)\r\n df = n-1\r\n svar = ((n-1)*v) / float(df)\r\n t = (x-popmean)/math.sqrt(svar*(1.0/n))\r\n prob = abetai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Single-sample T-test.'\r\n outputpairedstats(printit,writemode,\r\n 'Population','--',popmean,0,0,0,\r\n name,n,x,v,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n statname,t,prob)\r\n return t,prob",
"def test_permutation(perm):\n n_src = len(perm)\n perm_tensor = torch.Tensor(perm)\n source_base = torch.ones(1, n_src, 10)\n sources = torch.arange(n_src).unsqueeze(-1) * source_base\n est_sources = perm_tensor.unsqueeze(-1) * source_base\n\n loss_func = PITLossWrapper(pairwise_mse)\n loss_value, reordered = loss_func(est_sources, sources, return_est=True)\n\n assert loss_value.item() == 0\n assert_allclose(sources, reordered)",
"def test_permutations(experiment, verbose=False):\n topics = experiment.topics\n no_topics = len(topics) # The total number of topics used for the given experiment.\n no_permutations = experiment.n # The total number of possible permutations.\n\n if verbose:\n print \"Topics: {0} (total of {1})\".format(topics, no_topics)\n print \"Total permutations: {0}\".format(no_permutations)\n print\n\n for i in range(0, no_permutations):\n rotations = experiment.get_rotations(i)\n\n if verbose:\n print \"Permutation {0} ({1})\".format(i, rotations)\n\n for k in range(0, no_topics):\n rotation_topic = experiment.get_rotation_topic(i, k)\n\n if verbose:\n print \"\\tTopic {0} at permutation list position {1}\".format(rotation_topic, k)\n\n if experiment.get_rotations(i)[k] == experiment.get_rotation_topic(i, k):\n if verbose:\n print \"\\t\\tPASS\"\n else:\n if verbose:\n print \"\\t\\tFAIL\"\n return False\n\n if verbose:\n print \"Permutation check PASSED\"\n\n return True",
"def ttest(x, mu=0, alpha=0.05, is_bernoulli=False, two_sided=True, return_tuple=False):\n\n # Define test degrees of freedom\n if two_sided:\n quant_order = 1 - (alpha / 2)\n h0 = f'X_bar = {mu}'\n h1 = f'X_bar != {mu}'\n else:\n quant_order = 1 - alpha\n h0 = f'X_bar <= {mu}'\n h1 = f'X_bar > {mu}'\n\n # Input vector as array\n x = np.asarray(x)\n # Sample size\n n = len(x)\n\n # Empirical mean\n x_bar = x.mean()\n # s estimator (variance)\n if is_bernoulli:\n s2 = x_bar * (1 - x_bar)\n else:\n s2 = desc.var(x)\n\n # Degrees of freedom\n df = n - 1\n\n # T statistic\n t = (x_bar - mu) / (math.sqrt(s2 / n))\n if two_sided:\n t = math.fabs(t)\n # p and critical values\n p = 2.0 * (1.0 - scp.t.cdf(t, df=df))\n\n if n > 30:\n cv = scp.norm.ppf(quant_order)\n else:\n cv = scp.t.ppf(quant_order, df=df)\n\n _summ = test_summary(df=df, critical_value=cv, t_value=t,\n p_value=p,\n title='One Sample Student test',\n h0=h0, h1=h1,\n alpha=alpha)\n\n if return_tuple:\n return t, cv, p\n else:\n return _summ",
"def ttest_review(sample_1, sample_2, alpha=.05):\n\n result = stats.ttest_ind(sample_1, sample_2)\n crit_val, p_val = result\n \n ## Creating interpretation based on p-value results.\n\n if p_val < .05:\n print(f'The feature is statistically significant with a p-value of {p_val}.')\n\n else:\n print(f'The feature is not statistically significant with a p-value of {p_val}.')\n \n return p_val",
"def permutation_test(overlap_bins, nonoverlap_bins, thresh, ntrials):\n X = num_top_snps(I(overlap_bins.values()), thresh)\n if X == 0:\n return thresh, 0, 0, 0, 1, 0, 0\n overlap_counts = {k: len(overlap_bins[k]) for k in overlap_bins}\n Y = [num_top_snps(match(overlap_counts, nonoverlap_bins), thresh) for _ in range(ntrials)]\n mean, variance = moments(Y)\n anderson, critical_values, _ = scipy.stats.anderson(Y)\n exact_p = (1 + len([y for y in Y if y >= X])) / (1 + ntrials)\n return thresh, X, mean, variance, exact_p, anderson, critical_values[2]",
"def test_p_tilda(self, test_inputs, random_inputs, training):\n \n self.batch_size = test_inputs.shape[0]\n \n self.num_samples = random_inputs.shape[0]\n \n self.add_p_tilda(training = training)\n \n var_list = [self.x, self.x_tilda]\n \n get_p_tilda = theano.function(inputs = var_list,\n outputs= self.p_tilda)\n \n probs = get_p_tilda(test_inputs, random_inputs)\n \n si = self.batch_size+self.np_rand_gen.choice(self.num_samples, 10, False)\n \n return probs[0:self.batch_size], probs[si]",
"def test_permutation(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.permutation((20,), 10), updates=random.updates())\r\n\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n # rng.permutation outputs one vector at a time, so we iterate.\r\n numpy_val0 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n numpy_val1 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)",
"def paired_permutation_test(D1, a, b, tradeoff, threshold=0.05, R=10000, verbose=1):\n\n # extract the scores by example for each system\n A = D1[D1.policy == a]\n B = D1[D1.policy == b]\n assert (A.example == B.example).all()\n assert (A.index == B.index).all()\n\n W = B.want.sum() # number of thing we want is constant among permutations\n n = len(A.index)\n\n AC = np.array(A.want_and_got) * 1.0\n AG = np.array(A.got) * 1.0\n A_runtime = np.array(A.pushes) * 1.0\n\n BC = np.array(B.want_and_got) * 1.0\n BG = np.array(B.got) * 1.0\n B_runtime = np.array(B.pushes) * 1.0\n\n # observed value of test statistic -- the difference of rewards.\n T_observed = test_statistic(AC, AG, A_runtime,\n BC, BG, B_runtime,\n np.zeros(n, dtype=np.int32), W, tradeoff)\n\n r = 0.0\n for _ in iterview(range(R), msg='perm test'):\n # randomly generate a vector of zeros and ones (uniformly).\n # Note: endpoint not included in np.random.randit (that's why theres a 2).\n flip = np.random.randint(0, 2, size=n).astype(np.int32)\n if test_statistic(AC, AG, A_runtime,\n BC, BG, B_runtime,\n flip, W, tradeoff) >= T_observed:\n r += 1\n s = (r+1)/(R+1)\n\n # observed rewards\n ra = cgw_f(AC.sum(), AG.sum(), W) - tradeoff*A_runtime.mean()\n rb = cgw_f(BC.sum(), BG.sum(), W) - tradeoff*B_runtime.mean()\n\n if verbose:\n # which system has higher reward? is it significant?\n asig = (red % bold) if ra > rb and s <= 0.05 else '%s'\n bsig = (blue % bold) if rb > ra and s <= 0.05 else '%s'\n any_sig = bold if s <= threshold else yellow\n\n print asig % 'R(A) = %g (%s)' % (ra, a)\n print bsig % 'R(B) = %g (%s)' % (rb, b)\n print any_sig % 'confidence = %g' % (1-s)\n print\n\n if s <= threshold:\n return s, -1 if ra > rb else +1\n else:\n return s, 0 # \"statistical tie\"",
"def run_paired_t(data_generator):\r\n test_stats, pvals = [], []\r\n for b_data, a_data in data_generator:\r\n test_stat, pval = t_paired(b_data, a_data)\r\n test_stats.append(test_stat)\r\n pvals.append(pval)\r\n return test_stats, pvals",
"def mc_t_two_sample(x_items, y_items, tails=None, permutations=999,\r\n exp_diff=0):\r\n if tails is not None and tails != 'high' and tails != 'low':\r\n raise ValueError(\"Invalid tail type '%s'. Must be either None, \"\r\n \"'high', or 'low'.\" % tails)\r\n if permutations < 0:\r\n raise ValueError(\"Invalid number of permutations: %d. Must be greater \"\r\n \"than or equal to zero.\" % permutations)\r\n\r\n if (len(x_items) == 1 and len(y_items) == 1) or \\\r\n (len(x_items) < 1 or len(y_items) < 1):\r\n raise ValueError(\"At least one of the sequences of observations is \"\r\n \"empty, or the sequences each contain only a single \"\r\n \"observation. Cannot perform the t-test.\")\r\n\r\n # Perform t-test using original observations.\r\n obs_t, param_p_val = t_two_sample(x_items, y_items, tails=tails,\r\n exp_diff=exp_diff,\r\n none_on_zero_variance=False)\r\n\r\n # Only perform the Monte Carlo test if we got a sane answer back from the\r\n # initial t-test and we have been specified permutations.\r\n nonparam_p_val = nan\r\n perm_t_stats = []\r\n if permutations > 0 and not (isnan(obs_t) or isnan(param_p_val)):\r\n # Permute observations between x_items and y_items the specified number\r\n # of times.\r\n perm_x_items, perm_y_items = _permute_observations(x_items, y_items,\r\n permutations)\r\n perm_t_stats = [t_two_sample(perm_x_items[n], perm_y_items[n],\r\n tails=tails, exp_diff=exp_diff,\r\n none_on_zero_variance=False)[0]\r\n for n in range(permutations)]\r\n\r\n # Compute nonparametric p-value based on the permuted t-test results.\r\n if tails is None:\r\n better = (absolute(array(perm_t_stats)) >= absolute(obs_t)).sum()\r\n elif tails == 'low':\r\n better = (array(perm_t_stats) <= obs_t).sum()\r\n elif tails == 'high':\r\n better = (array(perm_t_stats) >= obs_t).sum()\r\n nonparam_p_val = (better + 1) / (permutations + 1)\r\n return obs_t, param_p_val, perm_t_stats, nonparam_p_val",
"def permutation(data, dataLabel=None, nperm=10000, decimals=4):\n\n # test calling values\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.permutation: data must be'\n + ' a dictionary with at exactly 2 keys'\n + '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n\n g1 = data[k[0]]\n g2 = data[k[1]]\n # (w1, p1) = Stats.shapiro(g1, a=None, reta=False)\n # (w2, p2) = Stats.shapiro(g2, a=None, reta=False)\n\n combined = np.concatenate((g1, g2))\n diffobs = np.mean(g2)-np.mean(g1)\n diffs = np.zeros(nperm)\n nperm = nperm\n index = range(0, combined.shape[0])\n for i in range(nperm):\n # draw from combined data set without replacement\n #shuff = np.random.randint(combined.shape[0], size=combined.shape[0])\n shuff = np.random.permutation(index)\n ar = combined[shuff[0:len(g1)]]\n br = combined[shuff[len(g1):]]\n diffs[i] = np.mean(br) - np.mean(ar)\n pvalue = np.sum(np.abs(diffs) >= np.abs(diffobs)) / float(nperm)\n if dataLabel is not None:\n print ('\\n%s: Permutation Test (Nperm = %d)' % (dataLabel, nperm))\n # if p1 < 0.05 and p2 < 0.05:\n # print(u' Both data sets appear normally distributed: Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # else:\n # print(u' ****At least one Data set is NOT normally distributed****\\n Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # print (u' (Permutation test does not depend on distribution)')\n \n n = max([len(l) for l in k])\n print(u' {:s}={:8.{pc}f} \\u00B1{:.{pc}f}, {:d} (mean, SD, N)'.\n format(k[0].rjust(n), np.mean(g1), np.std(g1, ddof=1),\n len(g1), pc=decimals))\n print(u' {:s}={:8.{pc}f} \\u00B1{:.{pc}f}, {:d} (mean, SD, N)'.\n format(k[1].rjust(n), np.mean(g2), np.std(g2, ddof=1),\n len(g2), pc=decimals))\n summarizeData(data, decimals=decimals)\n # iqr1 = np.subtract(*np.percentile(g1, [75, 25]))\n # iqr2 = np.subtract(*np.percentile(g2, [75, 25]))\n # print(u' {:s}: median={:8.4f} IQR={:8.4f}'.format(k[0].rjust(n), np.median(g1), iqr1))\n # print(u' {:s}: median={:8.4f} IQR={:8.4f}'.format(k[1].rjust(n), np.median(g2), iqr2))\n print(u' Observed difference: {:8.4f}'.format(diffobs))\n print(u' p={:8.6f}, Nperm={:8d}\\n'.format(float(pvalue), int(nperm)))\n return(pvalue, nperm)",
"def entropy_permutation_test(ordered_pitch_types, single_pitch_pdf, conditional_joint_probabilities, total_transitions,\n n=1000):\n pitch_types, pitch_probabilities = zip(*single_pitch_pdf.items())\n permutation_entropies = []\n progress = progressbar.ProgressBar()\n\n for test_number in progress(xrange(n)):\n # create the new matrix\n permutation_counts = {}\n for first_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type] = {}\n for second_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type][second_pitch_type] = 0\n\n pitch_permutation = numpy.random.choice(pitch_types, total_transitions, p=pitch_probabilities)\n current_pitch = numpy.random.choice(pitch_types, p=pitch_probabilities)\n for next_pitch in pitch_permutation:\n permutation_counts[current_pitch][next_pitch] += 1\n current_pitch = next_pitch\n\n joint_probabilities, _, _ = joint_probabilities_from_transitions(ordered_pitch_types, permutation_counts)\n permutation_entropies.append(entropy_from_probability_matrix(joint_probabilities))\n\n joint_entropy = entropy_from_probability_matrix(conditional_joint_probabilities)\n # print 'Mean', numpy.mean(permutation_entropies)\n # print 'Standard deviation', numpy.std(permutation_entropies)\n # tdof, tloc, tscale = stats.t.fit(permutation_entropies)\n # print 'DF', tdof, 'Loc (mean)', tloc, 'Scale (SD)', tscale\n # t_score = (joint_entropy - tloc) / tscale\n # print stats.t.cdf(joint_entropy, df=tdof, loc=tloc, scale=tscale)\n\n mean, stddev = stats.norm.fit(permutation_entropies)\n print 'Mean = {mean}\\t StdDev = {stddev}'.format(mean=mean, stddev=stddev)\n z_score = (joint_entropy - mean) / stddev\n p_value = stats.norm.cdf(joint_entropy, mean, stddev)\n print 'The joint entropy has a Z-score of {z_score} which gives a P-value of {p_value}'.format(z_score=z_score,\n p_value=p_value)\n return z_score, p_value",
"def test_all_pairs_t_test_no_perms(self):\r\n exp = \"\"\"# The tests of significance were performed using a two-sided Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean != Group 2 mean\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\t-6.6\t0.00708047956412\t0.0212414386924\tN/A\tN/A\r\nfoo\tbaz\t-9.79795897113\t0.000608184944463\t0.00182455483339\tN/A\tN/A\r\nbar\tbaz\t-3.0\t0.0576688856224\t0.173006656867\tN/A\tN/A\r\n\"\"\"\r\n obs = all_pairs_t_test(self.labels2, self.dists2,\r\n num_permutations=0)\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))",
"def ttest():\n # open test results and perform regression analysis\n alphas = []\n betas = []\n iterations = {}\n with open(f\"Results/conclusion2.csv\") as f:\n csv_reader = csv.reader(f, delimiter=',')\n\n for run in csv_reader:\n max, max_i = get_max_run(run)\n if int(run[0]) not in iterations:\n iterations[int(run[0])] = {100 - int(run[1])-1: int(max)}\n else:\n iterations[int(run[0])][100 - int(run[1])-1] = int(max)\n\n for iteration in iterations:\n mono_levels = list(iterations[iteration].keys())\n pop_sizes = [iterations[iteration][i] for i in mono_levels]\n\n regress_result = regress(pop_sizes, mono_levels)\n alphas += [regress_result[1]]\n betas += [regress_result[0]]\n\n # plot scatter and regression line\n avg_alpha = sum(alphas)/len(alphas)\n avg_beta = sum(betas)/len(betas)\n stddev_beta = np.std(betas)\n vis.scatter_mono(iterations, avg_alpha, avg_beta)\n\n # perform t-test\n ttest_result = stats.ttest_ind(betas, [0 for i in betas], equal_var=True)\n t_stat = ttest_result[0]\n p_value = ttest_result[1]\n print(f'Results from t-test:')\n print(f'Avg beta: {avg_beta}, stddev beta: {stddev_beta}.')\n print(f't-stat: {t_stat}, p-value: {p_value}.')",
"def posthoc_ttests(dataframe, var_='dVz'):\n posthocs = pg.pairwise_ttests(data=dataframe, dv=var_, within='block', subject='user', between='condition',\n alpha=0.05, within_first=False,\n padjust='fdr_by', marginal=True, return_desc=True, tail='one-sided', parametric=True)\n return posthocs",
"def ttest(self, data: ['SASdata', str] = None,\n by: str = None,\n cls: [str, list] = None,\n freq: str = None,\n paired: str = None,\n var: str = None,\n weight: str = None,\n procopts: str = None,\n stmtpassthrough: str = None,\n **kwargs: dict) -> SASresults:",
"def _perm_stat(self, index): # pragma: no cover\n\n permu = np.random.permutation(self.u)\n permv = np.random.permutation(self.v)\n\n # calculate permuted statics, store in null distribution\n perm_stat = self.indep_test._statistic(permu, permv)\n\n return perm_stat",
"def t_test(sample1, sample2, paired=False, alpha=0.05,\n alternative='two-sided', correction='auto', r=0.707,\n show_graph=True, **kwargs):\n confidence = 1 - alpha\n df_result = pg.ttest(\n sample1,\n sample2,\n paired=paired,\n confidence=confidence,\n alternative=alternative,\n correction=correction,\n r=r\n )\n if show_graph:\n if paired:\n difference = [x - y for x, y in zip(sample1, sample2)]\n Visualization.histogram(difference, **kwargs)\n else:\n Visualization.density_plot(sample1, sample2,\n fig_size=(5, 4), **kwargs)\n return HypothesisTester.define_hypothesis(df_result, 'mean',\n alternative, paired,\n alpha).T",
"def test_pairwise(self, test_type='t-test'):\n return pair_tests(self.evaluations, test_type, self.diff_var, self.dof)",
"def t_one_sample(x, mu, tails=2):\n assert tails in (1,2), \"invalid: tails must be 1 or 2, found %s\"%str(tails)\n x = np.asarray(x)\n N = x.size\n df = N - 1\n t_obs = (x.mean() - mu) / (x.std() / np.sqrt(N))\n p_value = tails * st.t.sf(abs(t_obs), df)\n return TtestResults(t_obs, p_value)"
] | [
"0.66151273",
"0.6504498",
"0.60829633",
"0.6001584",
"0.5969407",
"0.5941336",
"0.5911625",
"0.5903978",
"0.5805787",
"0.5780182",
"0.57714087",
"0.56875885",
"0.56720537",
"0.56512654",
"0.5612089",
"0.55364573",
"0.5505601",
"0.55000603",
"0.5491501",
"0.54649895",
"0.5463243",
"0.54480636",
"0.5421607",
"0.5403361",
"0.53854585",
"0.53620905",
"0.5341939",
"0.5336638",
"0.5336576",
"0.531883"
] | 0.70762056 | 0 |
Get confidence intervals from nonparametric bootstrap. | def bootstrap_confidence_interval(
arr, ci=0.95, n_bootstraps=2000, stat_fun="mean", random_state=None
):
if stat_fun == "mean":
def stat_fun(x):
return x.mean(axis=0)
elif stat_fun == "median":
def stat_fun(x):
return np.median(x, axis=0)
elif not callable(stat_fun):
raise ValueError("stat_fun must be 'mean', 'median' or callable.")
n_trials = arr.shape[0]
indices = np.arange(n_trials, dtype=int) # BCA would be cool to have too
rng = check_random_state(random_state)
boot_indices = rng.choice(indices, replace=True, size=(n_bootstraps, len(indices)))
stat = np.array([stat_fun(arr[inds]) for inds in boot_indices])
ci = (((1 - ci) / 2) * 100, ((1 - ((1 - ci) / 2))) * 100)
ci_low, ci_up = np.percentile(stat, ci, axis=0)
return np.array([ci_low, ci_up]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def confidence_intervals(data):\r\n\r\n x_bar = np.nanmean(data) # Mean value\r\n s = np.nanstd(data) # Standard deviation\r\n n = len(data) # Sample size\r\n\r\n lo_conf = x_bar - (1.96 * (s / np.sqrt(n))) # Lower bound of confidence interval\r\n hi_conf = x_bar + (1.96 * (s / np.sqrt(n))) # Upper bound of confidence interval\r\n\r\n conf_range = hi_conf - lo_conf # Size of the 95% confidence interval\r\n\r\n return lo_conf, hi_conf, conf_range",
"def get_bootstrap_CI(self, alpha, num_samples):\n\n # set random number generator seed\n numpy.random.seed(1)\n\n # initialize delta array\n delta = numpy.zeros(num_samples)\n\n # obtain bootstrap samples\n for i in range(num_samples):\n sample_i = numpy.random.choice(self._data, size=self._n, replace=True)\n delta[i] = sample_i.mean() - self.get_mean()\n\n # return [l, u]\n return self.get_mean() - numpy.percentile(delta, [100*(1-alpha / 2.0), 100*alpha / 2.0])",
"def eeg_bootstrapCI(array,alpha):\t\n\t\n\tif len(array.shape) == 3:\n\t\tprint \"Only works on 2D bootstrapped data (ntpts x nboot)\"\n\t\tarray_low = []\n\t\tarray_high = []\n\telse:\n\t\tntpts, nboot = array.shape\n\t\t#sort along last (bootstrap) dimension\n\t\tarray_srt = np.sort(array,axis=1)\n\t\tarray_low = array_srt[:,np.round(nboot*alpha/2)-1]\n\t\tarray_high = array_srt[:,np.round(nboot*(1-alpha/2))-1]\n\t\treturn array_low,array_high",
"def bootstrap_ci(x, n=300, ci=0.95):\n\n low_per = 100 * (1 - ci) / 2\n high_per = 100 * ci + low_per\n x = removena_numpy(x)\n if not len(x):\n return (np.nan, np.nan)\n bootstrap_samples = choice(a=x, size=(\n len(x), n), replace = True).mean(axis = 0)\n return np.percentile(bootstrap_samples, [low_per, high_per])",
"def compute_boot_ci(window_means, n_boot=100):\n n_curves = window_means.shape[0]\n\n boot_vals = []\n for _ in range(n_boot):\n indices = np.random.randint(0, n_curves, n_curves)\n means = compute_means(window_means[indices, :])\n boot_vals.append(means)\n\n # crop the means sequences to same length\n shortest_len = min([len(means) for means in boot_vals])\n boot_vals = [means[:shortest_len] for means in boot_vals]\n boot_vals = np.array(boot_vals)\n\n ci_lower = np.percentile(boot_vals, 5, axis=0)\n ci_upper = np.percentile(boot_vals, 95, axis=0)\n return ci_lower, ci_upper",
"def _ci(arr, ci=0.95, method=\"bootstrap\", n_bootstraps=2000, random_state=None):\n if method == \"bootstrap\":\n return bootstrap_confidence_interval(\n arr, ci=ci, n_bootstraps=n_bootstraps, random_state=random_state\n )\n else:\n from .parametric import _parametric_ci\n\n return _parametric_ci(arr, ci=ci)",
"def get_bootstrap_CI(self, alpha, num_samples):\n return None",
"def get_bootstrap_CI(self, alpha, num_samples):\n return None",
"def ci(self):\n var_assumptions = self.var_assumptions if self.var_assumptions == \"pooled\" else \"unequal\"\n ci_vals = self.comparison.zconfint_diff(self.alpha, self.hypothesis_sm, var_assumptions)\n\n return [ci_vals, self.ci_percents]",
"def confidence_intervals(self, level = 95):\n margin = (100 - level) / 2 # interval is middle level% of vals, so this is margin to either side of it\n try:\n len(self.binom_control)\n len(self.binom_treatment)\n\n except:\n self.binom_distribution()\n\n control = self.binom_control\n treatment = self.binom_treatment\n\n control_upper = np.percentile(a = control, q = level + margin)\n control_lower = np.percentile(a = control, q = margin)\n self.interval_control = {'lower': control_lower, 'upper':control_upper, 'level':level}\n\n treatment_upper = np.percentile(a = treatment, q = level + margin)\n treatment_lower = np.percentile(a = treatment, q = margin)\n self.interval_treatment = {'lower': treatment_lower, 'upper':treatment_upper, 'level':level}\n\n return self.interval_control, self.interval_treatment",
"def get_bootstrap_CI(self, alpha, num_samples):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")",
"def boot_conf_intervals(indep,\n dep,\n estimator,\n display_name=None,\n resample_cases=False,\n significance=0.05,\n num_sims=10000,\n verbose=True,\n seed=None,\n precision=4):\n if display_name is None:\n display_name = \"\"\n\n est_params = estimator.fit(indep, dep)\n est_params = np.array(est_params)\n\n params_arr = resampling.boot_param_dist(indep=indep,\n dep=dep,\n estimator=estimator,\n num_sims=num_sims,\n resample_cases=resample_cases,\n seed=seed,\n include_fixed_params=False,\n verbose=verbose)\n\n if estimator.has_restricted_params:\n est_params = est_params[estimator.estimated_params_indices]\n\n (bca_ci_df,\n percentile_ci_df,\n basic_ci_df) = _confidence_intervals(params_arr=params_arr,\n est_params=est_params,\n significance=significance,\n estimator=estimator,\n indep=indep,\n dep=dep)\n\n if verbose:\n def my_formatter(x):\n format_str = '.' + str(precision) + 'f'\n return format(x, format_str)\n\n formatters = [my_formatter for dummy in range(len(bca_ci_df.columns))]\n\n print()\n print(\"confidence level: \", 100.0 * (1.0 - significance), \"%\")\n print()\n print(\"bootstrap bca confidence intervals\")\n print()\n print(bca_ci_df.to_string(formatters=formatters))\n# if latex:\n# print(bca_ci_df.to_latex(escape=False, formatters=formatters))\n# else:\n print(\"bootstrap percentile confidence intervals\")\n print()\n print(percentile_ci_df.to_string(formatters=formatters))\n print()\n print(\"bootstrap basic confidence intervals\")\n print()\n print(basic_ci_df.to_string(formatters=formatters))\n print()\n\n return bca_ci_df, percentile_ci_df, basic_ci_df",
"def compute_confidence_interval(data):\n a = 1.0 * np.array(data)\n m = np.mean(a)\n std = np.std(a)\n pm = 1.96 * (std / np.sqrt(len(a)))\n return m, pm",
"def confidenceInterval(model, N = 30):\n predicted_accuracies = [0]*N\n predicted_roc = [0]*N\n for i in tqdm(range(N)):\n X_train, X_test, y_train, y_test = train_test_split(X, y_binary, random_state=i)\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n model = model.fit(X_train, y_train)\n predicted_accuracies[i] = accuracy_score(model.predict(X_test), y_test)\n predicted_roc[i] = roc_auc_score(model.predict(X_test), y_test)\n r = np.mean(predicted_roc)\n m = np.mean(predicted_accuracies)\n\n variance_roc = np.var(predicted_roc)\n variance_acc = np.var(predicted_accuracies)\n sd_acc = np.sqrt(variance_acc)\n sd_roc = np.sqrt(variance_roc)\n CI_acc = 2*sd_acc\n CI_roc = 2*sd_roc\n return m, CI_acc, r, CI_roc",
"def test_conf_interval_normal_method_no_conditionals(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``;\n # with no ``conditional_cols``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=None,\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.05, 290.37, 292.42, 292.74), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.41, -5.08, -3.04, -2.72), (\n \"quantiles are incorrect\")",
"def compute_interval_limits(bias, acceleration, n_boots, ci=95):\n from scipy.stats import norm\n from numpy import isnan, nan\n\n alpha = _compute_alpha_from_ci(ci)\n\n alpha_low = alpha / 2\n alpha_high = 1 - (alpha / 2)\n\n z_low = norm.ppf(alpha_low)\n z_high = norm.ppf(alpha_high)\n\n kws = {'bias': bias, 'acceleration': acceleration}\n low = _compute_quantile(z_low, **kws)\n high = _compute_quantile(z_high, **kws)\n\n if isnan(low) or isnan(high):\n return low, high\n\n else:\n low = int(norm.cdf(low) * n_boots)\n high = int(norm.cdf(high) * n_boots)\n return low, high",
"def bootstrap(series, func=statistics.mean, confidence=0.9):\n n = len(series)\n n_bootstrap = 250\n digests = []\n for j in range(n_bootstrap):\n bootstrap_sample = [\n random.choice(series)\n for _ in range(n)\n ]\n digest = func(bootstrap_sample)\n digests.append(digest)\n digests.sort()\n low, mid, high = (1.0-confidence)/2.0, 0.5, (1.0+confidence)/2.0\n low, mid, high = int(low*n_bootstrap), int(mid*n_bootstrap), int(high*n_bootstrap)\n return digests[low], digests[mid], digests[high]",
"def get_confidence_interval(self, scores, ci_method='bca', ci_size=0.95, replications=100000, seed_value=None):\n def score(x):\n return np.array([x.mean()])\n data = np.array([float(score) for score in scores])\n if min(data) == max(data):\n return tuple([min(data), max(data)])\n bs = IIDBootstrap(data)\n if seed_value is not None:\n bs.seed(seed_value)\n ci = bs.conf_int(score, replications, method=ci_method, size=ci_size, tail='two')\n return tuple([ci[0][0], ci[1][0]])",
"def test_conf_interval_normal_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.9, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.14, -4.88, -3.24, -2.98), (\n \"quantiles are incorrect\")",
"def get_bootstraps(self):\n col_range = range(self.response.shape[1])\n random_state = np.random.RandomState(seed=self.random_seed)\n return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()",
"def confidence(samples, confidence_level):\n mean = scipy.mean(samples)\n sdev = scipy.std(samples)\n n = len(samples)\n df = n - 1\n t = distributions.t.ppf((1+confidence_level)/2.0, df)\n interval = (interval_low, interval_high) = ( mean - t * sdev / math.sqrt(n) , mean + t * sdev / math.sqrt(n) )\n interval_size = interval_high - interval_low\n interval_percentage = interval_size / mean * 100.0\n return (interval, mean, sdev, interval_percentage)",
"def bootstrap_interval(data, percentiles=(2.5, 97.5), n_boots=100):\n # Create empty array to fill the results\n bootstrap_means = np.zeros([n_boots, data.shape[-1]])\n for ii in range(n_boots):\n # Generate random indices for data *with* replacement, then take the sample mean\n random_sample = resample(data)\n bootstrap_means[ii] = random_sample.mean(axis=0)\n\n # Compute the percentiles of choice for the bootstrapped means\n percentiles = np.percentile(bootstrap_means, percentiles, axis=0)\n return percentiles",
"def do_mean_cis_differ(mean1, ci1, mean2, ci2):\n\n assert ci1 >= 0.0 and ci2 >= 0.0, 'Found negative confidence interval from bootstrapping.'\n x1 = mean1 - ci1\n y1 = mean1 + ci1\n x2 = mean2 - ci2\n y2 = mean2 + ci2\n return do_intervals_differ((x1, y1), (x2, y2))",
"def plot_ci_bootstrap(xs, ys, resid, nboot=500, ax=None): \n if ax is None:\n ax = plt.gca()\n\n bootindex = sp.random.randint\n\n for _ in range(nboot):\n resamp_resid = resid[bootindex(0, len(resid) - 1, len(resid))]\n # Make coeffs of for polys\n pc = np.polyfit(xs, ys + resamp_resid, 1) \n # Plot bootstrap cluster\n ax.plot(xs, np.polyval(pc, xs), \"r-\", linewidth=2, alpha=3.0 / float(nboot))\n\n return ax",
"def calc_bootstrap(fcs,obs,ref,func, bootstrap_range, L, B):\n \n from sklearn.utils import resample\n \n idxs = np.arange(len(fcs))\n results = []\n \n random_state = 0\n for smp in range(B):\n block_sample = np.array([]).astype(int)\n while(len(block_sample) < len(fcs)):\n random_state += 1\n rolls = resample(idxs, n_samples=1, random_state=random_state)[0]\n block = np.roll(idxs, rolls)[0:L]\n block_sample = np.append(block_sample, block)\n\n block_sample = block_sample[0:len(idxs)]\n results.append(func(fcs[block_sample],obs[block_sample],ref[block_sample]))\n \n try:\n out = [ np.percentile(results, bootstrap_range[0]), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, bootstrap_range[1])]\n except:\n out = [ np.percentile(results, 2.5), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, 97.5)]\n\n # For indicating the statistical significance \n # of the lower boundary:\n if(out[0]>0): \n out.append('*')\n else:\n out.append('')\n \n return out",
"def _get_uncertainty_regions(\n mus: np.array, stds: np.array, beta_sqrt: float\n) -> Union[np.array, np.array]:\n low_lims, high_lims = [], []\n\n for i in range(0, mus.shape[1]):\n low_lim, high_lim = _get_uncertainty_region(mus[:, i], stds[:, i], beta_sqrt)\n low_lims.append(low_lim.reshape(-1, 1))\n high_lims.append(high_lim.reshape(-1, 1))\n\n return np.hstack(low_lims), np.hstack(high_lims)",
"def bootstrap(data,func,nboot):\n\n n = len(data)\n resamples = np.array([[random.choice(data) for i in range(n)]\n for j in range(nboot)])\n return np.apply_along_axis(func, 1, resamples)",
"def confint(arr):\n res=[[],[],[]]\n #r=hpd(arr)\n r=(sap(arr,2.5),sap(arr,97.5))\n res[0]=r[0]\n res[1]=arr.mean(0)\n res[2]=r[1]\n return np.array(res)",
"def bootstrap(data, alpha=0.05, n_bootstrap = 2000, func=None, **func_args):\n\t\n\tassert data.ndim == 3, 'Data is not 3-dimensional. Function only works for 3-D data.' \n\t\n\t# Trials form the second dimension\n\tn_trials = data.shape[1]\n\t\n\t# generate randomised bootstrap resamples as random indices\n\tbootstrap_index = np.random.randint(0, n_trials, \n\t\t\t\t\t\t\t\t\t\t(n_trials, n_bootstrap) )\n\t\n\t# For each bin in the histogram, randomly samples from the results\n\t# of each trial and repeats, effectively, n_bootstrap times \n\ttrials_bootstrap = data[:, bootstrap_index, :]\n\t\n\t# dimension one is the trials, zero is the conditions; this averaging \n\t# goes across the trials creating a PSTH for each condition, and,\n\t# importantly, for each bootstrap resample\n\tavg_bootstrap = trials_bootstrap.mean(axis=1)\n\t\n\tif func:\n\t\tavg_bootstrap = func(avg_bootstrap, **func_args)\n\t\t\n\t# find percentile values for each bin along the bootstrap resamples,\n\t# which are on axis 1 \n\tCI_pos = np.percentile(avg_bootstrap, 100*(1 - (alpha/2.)), \n\t\t\t\t\t\t\t\taxis=1)\n\tCI_neg = np.percentile(avg_bootstrap, 100*(alpha/2.), \n\t\t\t\t\t\t\t\taxis=1)\n\n\n\treturn CI_pos, CI_neg",
"def confidenceInterval(start,end,confidence):\n\n\tmean = 0.5*(end+start)\n\tstddev = getStdDev(0.5*(end-start), confidence)\n\n\treturn (mean,stddev)"
] | [
"0.6679884",
"0.6648013",
"0.6620062",
"0.66174746",
"0.6499037",
"0.646875",
"0.6136825",
"0.6136825",
"0.61319476",
"0.6111559",
"0.60788226",
"0.6075902",
"0.6074073",
"0.5812202",
"0.5790033",
"0.57862234",
"0.57668763",
"0.57511514",
"0.574001",
"0.5700028",
"0.5675738",
"0.5670809",
"0.56043696",
"0.5597088",
"0.5589453",
"0.55657214",
"0.5554705",
"0.5529725",
"0.5524544",
"0.5507186"
] | 0.6944301 | 0 |
Check if word is provided in slot values. Send word to URLbuilder and return JSON data. Give user definition information. | def my_word_definition_handler(handler_input):
# type: (HandlerInput) -> Response
slots = handler_input.request_envelope.request.intent.slots
if word_slot in slots:
curr_word = slots[word_slot].value
handler_input.attributes_manager.session_attributes[
word_slot_key] = curr_word
try:
response = http_get(curr_word, False)
if response:
speech = ("The definition of {} with part of speech {} "
"is: {}".format(curr_word, response[0]['fl'], response[0]['shortdef'][0]))
reprompt = ("What word would you like me to look up?")
else:
speech = ("I am sorry I could not find the word {}").format(curr_word)
reprompt = ("What word would you like me to look up?")
except:
speech = ("I am sorry I could not find the word {}. "
"Can I look up another word?").format(curr_word)
reprompt = ("What word would you like me to look up?")
else:
speech = "I'm not sure what word to look up, please try again"
reprompt = ("I didn't catch that. What word would you like me "
"me to look up?")
handler_input.attributes_manager.session_attributes[previous_key] = speech
handler_input.response_builder.speak(speech).ask(reprompt)
return handler_input.response_builder.response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def my_word_example_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if example_slot in slots:\n curr_word = slots[example_slot].value\n handler_input.attributes_manager.session_attributes[\n example_slot_key] = curr_word\n\n try:\n response = http_get(curr_word, False)\n\n if response:\n example = response[0]['def'][0]['sseq'][0][0][1]['dt'][1][0]\n if example == \"vis\":\n vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][1][1][0]['t'])\n speech = (\"An example with {} (part of speech {}) \"\n \"is: {}\".format(curr_word, response[0]['fl'],\n vis))\n elif example == \"wsgram\":\n vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][2][1][0]['t'])\n speech = (\"An example with {} (part of speech {}) \"\n \"is: {}\".format(curr_word, response[0]['fl'],\n vis))\n else:\n speech = (\"No example is available for {}\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n else:\n speech = (\"No example is available for {}\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n except Exception as e:\n speech = (\"No example is available for {}. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n else:\n speech = \"I'm not sure what word to look up, please try again\"\n reprompt = (\"I didn't catch that. What word would you like me \"\n \"me to look up?\")\n\n handler_input.attributes_manager.session_attributes[previous_key] = speech\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response",
"def my_word_example_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if synonym_slot in slots:\n curr_word = slots[synonym_slot].value\n handler_input.attributes_manager.session_attributes[\n synonym_slot_key] = curr_word\n\n try:\n synonyms = http_get(curr_word, True)\n\n if type(synonyms[0]) == dict:\n speech = (\"A synonym for {} is {}\".format(curr_word,\n synonyms[0]['meta']['syns'][0][0]))\n synonym_list = synonyms[0]['meta']['syns'][0]\n reprompt = (\"What word would you like a synonym for?\")\n else:\n speech = (\"No synonyms for {} are available. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like a synonym for?\")\n except:\n speech = (\"No synonyms for {} are available. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like a synonym for?\")\n else:\n speech = \"I'm not sure what word to find a synonym for, please try again\"\n reprompt = (\"I didn't catch that. What word would you like me \"\n \"me to look up a synonym for?\")\n\n handler_input.attributes_manager.session_attributes[previous_key] = speech\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response",
"def score_word():\n data = request.json\n user_word = data['word'].upper()\n game_id = data['game_id']\n current_game = games[game_id]\n\n print(\"checking word\", games)\n \n if not current_game.is_word_in_word_list(user_word): # true/false\n result = \"not-word\"\n elif not current_game.check_word_on_board(user_word): # true/false\n result = \"not-on-board\"\n else:\n result = \"ok\"\n \n\n return jsonify(result=result)",
"def guess():\n word = request.args[\"word\"]\n board = session[\"board\"]\n\n # create response by the response of the function if word is valid\n response = boggle_game.check_valid_word(board, word)\n\n return jsonify({'result': response})",
"def lookup_word(word):\n\n return API.get_response(word)",
"def validate_word(word: str) -> bool:\n if word:\n url = f'{OXFORD_DICT_BASE_URL}{OXFORD_DICT_ENTRY_URL}/en-us/{word.lower()}'\n headers = {\n 'app_id': settings.OXFORD_APP_ID,\n 'app_key': settings.OXFORD_API_KEY,\n }\n\n logger.info(f'validating {word} against oxford dictionary...')\n response = requests.get(\n url,\n headers=headers,\n )\n\n if response.status_code == status.HTTP_200_OK:\n return True\n else:\n return False\n\n return False",
"def __call__(self, word):\n return self.parse_request(self.request(f\"https://www.dictionaryapi.com/api/v3/references/collegiate/json/{word}?key={self.apikey}\"), word)",
"def definition(request, word_to_lookup):\n return render(request, 'definition.html')",
"def search_word(word : str = typer.Argument(..., help=\"Searches the trie if the word exists\")):\n response_url = url + \"/search/\" + word\n response = requests.get(response_url)\n typer.echo(response.json()[\"status\"])",
"def wordInfo(self, input_word):\n return self.app.get('/words/1.0/info/' + input_word, follow_redirects=True, headers=self.headers)",
"def get_response(self, word: str):\n lkup = self.get_lookup_segments(word)\n lkup_url = self.url + lkup\n\n res = requests.request(\"GET\", lkup_url, headers=self.headers)\n\n if res.status_code == 200:\n return res.json()['definitions']\n else:\n raise NetworkError()",
"def add_word(word : str = typer.Argument(..., help=\"Adds a word into the trie\")):\n response_url = url + \"/add-word/\" + word\n response = requests.post(response_url)\n # typer.echo(response.status_code)\n typer.echo(response.json()[\"status\"])",
"def distribute_actions(jo):\n\n # check if valid session\n\n # check login\n\n log.log_info(\"in distribute_actions\")\n\n rj = {}\n result = \"\"\n \n action = jo[\"action\"]\n\n log.log_info(\"action is: \" + str(action))\n\n if action == \"addOneWord\":\n\n session = jo[\"session\"]\n\n elif action == \"addText\": # todo: is this anywhere used ???\n\n text = jo[\"text\"]\n language = jo[\"language\"] # the input language\n\n elif action == \"adVocFromUrl\":\n\n log.log_info(\"in distribute_actions adVocFromUrl\")\n\n session = jo[\"session\"]\n user_id = dbs.get_user_id_from_session(session)\n\n time_stamp = int(time.time())\n\n dbac.add_one_word_txt(user_id, jo[\"language\"], jo[\"word\"], jo[\"translationLanguage\"], jo[\"translationWord\"], True, \"\", \"\", time_stamp)\n dbac.add_one_word_txt(user_id, jo[\"translationLanguage\"], jo[\"translationWord\"], jo[\"language\"], jo[\"word\"], False, \"\", \"\", time_stamp)\n\n# now test if it arrived\n log.log_info(\"in distribute_actions preparing response\")\n \n rj['action'] = \"adVocFromUrl\"\n rj['result'] = \"successfully inserted \"\n \n result = json.dumps(rj)\n\n elif action == \"loadWord\": # ATTENTION !!! this is probably not used anymore !!!\n\n log.log_logic(\" \")\n log.log_logic(\" \")\n log.log_logic(\" \")\n log.log_logic(\"===============================================================================\")\n log.log_logic(\"=========================== ROUTE loadWord ====================================\")\n log.log_logic(\"===============================================================================\")\n\n log.log_info(\"loading new word\")\n log.log_info(jo)\n\n wordId = jo[\"wordId\"]\n answer = jo[\"answer\"]\n session = jo[\"session\"]\n\n log.log_info(\"answer was \" + answer)\n log.log_info(\"wordId was \" + str(wordId))\n log.log_info(\"session was \" + str(session))\n\n user_id = dbs.get_user_id_from_session(session)\n\n log.log_info(\"user_id is \" + str(user_id))\n\n success, experiment, once_learned = dbl.process_answer(str(wordId), user_id, answer)\n\n log.log_info(\"process_answer done -------------------------------\")\n\n new_id = dbl.get_next_word_id(user_id, str(wordId))\n\n log.log_info(\"get_next_word_id done\")\n\n id, l1, w1, l2, w2 = dbl.get_word(new_id)\n\n #get a random word from the words already learned\n learned_id = dbl.get_learned_random(user_id)\n rnd_id, rnd_l1, rnd_w1, rnd_l2, rnd_w2 = dbl.get_word(learned_id)\n\n rj['action'] = action\n rj[\"wordId\"] = id\n rj[\"language1\"] = dbac.get_language_label(l1)\n rj[\"word1\"] = w1\n rj[\"language2\"] = dbac.get_language_label(l2)\n rj[\"word2\"] = w2\n rj['error'] = False\n rj['error_description'] = \"\"\n rj['success'] = success\n rj['experiment'] = experiment\n rj['once_learned'] = once_learned\n\n rj[\"rnd_wordId\"] = rnd_id\n rj[\"rnd_language1\"] = dbac.get_language_label(rnd_l1)\n rj[\"rnd_word1\"] = rnd_w1\n rj[\"rnd_language2\"] = dbac.get_language_label(rnd_l2)\n rj[\"rnd_word2\"] = rnd_w2\n rj[\"rnd_frequency\"] = 15 #todo: convert to algorithm depending on % learned and size of vocabulary\n\n\n result = json.dumps(rj)\n\n log.log_info(\"distribute_actions(jo) result for new word \" + result)\n\n elif action == \"loadWordArray\":\n\n log.log_logic(\" \")\n log.log_logic(\" \")\n log.log_logic(\" \")\n log.log_logic(\"===============================================================================\")\n log.log_logic(\"=========================== ROUTE loadWordArray ====================================\")\n log.log_logic(\"===============================================================================\")\n\n log.log_info(\"loading new word array\")\n log.log_info(jo)\n\n wordId = jo[\"wordId\"]\n answer = jo[\"answer\"]\n session = jo[\"session\"]\n\n log.log_info(\"answer was \" + answer)\n log.log_info(\"wordId was \" + str(wordId))\n log.log_info(\"session was \" + str(session))\n\n if len(str(wordId).strip()) > 0:\n\n xxxx, yyyy, w1, zzzz, w2 = dbl.get_word(wordId)\n\n log.log_logic(\"answer was \" + answer)\n log.log_logic(\"wordId was \" + str(wordId))\n log.log_logic(\"w1 was \" + str(w1))\n log.log_logic(\"w2 was \" + str(w2))\n\n user_id = dbs.get_user_id_from_session(session)\n\n log.log_info(\"user_id is \" + str(user_id))\n\n # January 2019 we change this logic now using a ordered list avoiding random\n #success, experiment, once_learned = dbl.process_answer(str(wordId), user_id, answer)\n success, experiment, once_learned = dbl.process_answer_with_sorted_array(str(wordId), user_id, answer)\n\n log.log_logic(\"was experiment? \" + str(experiment))\n log.log_logic(\"was success? \" + str(success))\n log.log_logic(\"once learned? \" + str(once_learned))\n log.log_logic(\"***** processing uf user answer done, now prepare response *****\")\n\n # January 2019 trying out a new algorithm using a logic that does not use random, but ordered by logic\n #new_id_array = dbl.get_next_word_id_array(user_id, str(wordId))\n\n new_id_array = dbl.get_next_word_id_array_ordered_position(user_id, str(wordId))\n\n word_arr = []\n # ToDo: this is here very inefficient code that creates a lot of traffic on database. Integrate in previous function call\n for new_id in new_id_array:\n\n row_j = {}\n id, l1, w1, l2, w2 = dbl.get_word(new_id[0])\n row_j[\"wordId\"] = id\n row_j[\"language1\"] = dbac.get_language_label(l1)\n row_j[\"word1\"] = w1\n row_j[\"language2\"] = dbac.get_language_label(l2)\n row_j[\"word2\"] = w2\n row_j[\"position\"] = new_id[1]\n\n log_str = str(row_j[\"wordId\"]) + \", \"\n log_str += str(row_j[\"position\"]) + \", \"\n log_str += str(row_j[\"word1\"]) + \", \"\n log_str += str(row_j[\"word2\"]) + \", \"\n\n log.log_logic(log_str)\n\n word_arr.append(row_j)\n\n rj['action'] = action\n rj['error'] = False\n rj['error_description'] = \"\"\n rj['success'] = success\n rj['bucket-sizes'] = 3\n rj['bucket-distribution'] = [0.6, 0.9]\n rj['experiment'] = experiment\n rj['once_learned'] = once_learned\n rj[\"words\"] = word_arr\n\n log.log_logic(\"sending to client success = \" + str(success))\n log.log_logic(\"sending to client experiment = \" + str(experiment))\n\n # get a random word from the words already learned\n # this is to repeat words and to create a better training set\n learned_id = dbl.get_learned_random(user_id)\n rnd_id, rnd_l1, rnd_w1, rnd_l2, rnd_w2 = dbl.get_word(learned_id)\n\n rj[\"rnd_wordId\"] = rnd_id\n rj[\"rnd_language1\"] = dbac.get_language_label(rnd_l1)\n rj[\"rnd_word1\"] = rnd_w1\n rj[\"rnd_language2\"] = dbac.get_language_label(rnd_l2)\n rj[\"rnd_word2\"] = rnd_w2\n rj[\"rnd_frequency\"] = 10 #todo: convert to algorithm depending on % learned and size of vocabulary\n\n log.log_logic(\"sending to client extra random word: \" + rnd_w1 + \" == \" + rnd_w2)\n\n result = json.dumps(rj)\n\n log.log_info(\"distribute_actions(jo) result for new word \" + result)\n\n elif action == \"editWord\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n\n fromWord = jo[\"fromWord\"]\n toWord = jo[\"toWord\"]\n word_id = jo[\"wordId\"]\n\n dbc.update_word_by_id(user_id, fromWord, toWord, word_id)\n\n log.log_info(\"update word done\")\n\n rj['action'] = action\n rj['error'] = False\n rj['error_description'] = \"\"\n\n result = json.dumps(rj)\n\n elif action == \"report\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n\n new_words, learned_words, ratio_learned = dbr.get_simple_report(user_id)\n c1, c2, c3, c4 = dbr.get_report_charts(user_id)\n\n log.log_info(\"c1 = \" + str(c1))\n log.log_info(\"c2 = \" + str(c2))\n log.log_info(\"c3 = \" + str(c3))\n log.log_info(\"c4 = \" + str(c4))\n\n log.log_info(\"done getting data for charts\")\n\n rj['action'] = action\n rj['newWords'] = new_words\n rj['learnedWords'] = learned_words\n rj['ratioLearned'] = ratio_learned\n rj['c1'] = c1\n rj['c2'] = c2\n rj['c3'] = c3\n rj['c4'] = c4\n rj['html'] = \"\"\n rj['error'] = False\n rj['error_description'] = \"\"\n\n log.log_info(\"converting to json\")\n\n try:\n result = json.dumps(rj)\n except Exception as ex:\n log.log_error(\"error in making report: \" + str(ex))\n rj = {}\n rj['action'] = action\n rj['error'] = True\n rj['error_description'] = \"error in making report: \" + str(ex)\n result = json.dumps(rj)\n\n log.log_info(\"distribute_actions(jo) result for report = \" + result)\n\n elif action == \"readerSaveText\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n # user_id, language, url, text\n rj['text_id'], err = db_reader.save_text(user_id, jo[\"language\"], jo[\"url\"], jo[\"text\"])\n rj['action'] = action\n if len(err) > 0:\n rj['error'] = True\n rj['error_description'] = err\n else:\n rj['error'] = False\n rj['error_description'] = \"\"\n result = json.dumps(rj)\n\n elif action == \"readerLoadTextTitles\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n rj['titles'] = db_reader.get_text_titles(user_id)\n rj['action'] = action\n rj['error'] = False\n rj['error_description'] = \"\"\n result = json.dumps(rj)\n\n elif action == \"readerLoadOneText\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n rj['text'], rj['text_id'] = db_reader.get_one_text(jo[\"id\"], user_id)\n rj['action'] = action\n rj['error'] = False\n rj['error_description'] = \"\"\n result = json.dumps(rj)\n\n elif action == \"readerSetTextRead\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n db_reader.set_text_read(jo[\"id\"], user_id)\n rj['action'] = action\n rj['error'] = False\n rj['error_description'] = \"\"\n result = json.dumps(rj)\n\n elif action == \"logIn\":\n\n # login and create session\n user = jo[\"user\"].strip()\n password = jo[\"password\"].strip()\n rj['action'] = \"logIn\"\n\n password = password.strip()\n password = password.replace(\" \", \"\")\n\n user = user.lower()\n user = user.strip()\n user = user.replace(\" \", \"\")\n\n if dbs.check_login(user, password) > 0:\n rj['success'] = True\n rj['result'] = \"success\"\n rj['session'] = dbs.make_save_session(user)\n\n # we need to register the session in the MASTER's database\n register_user_and_session_at_master(rj['session'], user)\n\n\n else:\n rj['success'] = False\n rj['result'] = \"failure\"\n rj['session'] = \"\"\n\n log.log_info(\"result - \" + str(rj))\n result = json.dumps(rj)\n\n elif action == \"logout\":\n\n # ToDo\n # logfiles out by destroying session and or cookie?\n\n session = jo[\"session\"]\n\n elif action == \"checkSession\":\n\n # check if session is valid\n session = jo[\"session\"]\n rj['action'] = \"checkSession\"\n\n if dbs.check_session(session) > 0:\n log.log_info(\"valid session \" + session)\n rj['sessionValid'] = True\n else:\n log.log_info(\"invalid session \" + session)\n rj['sessionValid'] = False\n\n result = json.dumps(rj)\n\n elif action == \"getLanguages\":\n\n rj['action'] = action\n\n rj['labels'] = [\"English\", \"German\", \"Russian\", \"Franch\", \"Italian\", \"Spanish\", \"Portuguese\"]\n rj['values'] = [\"english\", \"german\", \"russian\", \"franch\", \"italian\", \"spanish\", \"portuguese\"]\n\n rj['error'] = False\n rj['error_description'] = \"\"\n\n result = json.dumps(rj)\n\n elif action == \"resetPassword\":\n\n rj['action'] = action\n\n # ToDo\n # reset password and send new password to user by email\n\n user = jo[\"user\"]\n user = user.lower()\n user = user.strip()\n user = user.replace(\" \", \"\")\n\n if dbs.check_user(user) > 0:\n p = dbs.random_string_simple(6)\n dbs.update_password(user, p)\n # ToDo: put in a separate thread to prevent slow down of process\n # ToDo: make nice test in mail\n email_sender.send_mail(user, \"resetPassword\", \"Password: \" + p)\n rj['result'] = \"success\"\n rj['success'] = True\n log.log_info(\"success in resetting password for \" + user)\n else:\n rj['result'] = \"failure\"\n rj['success'] = False\n log.log_info(\"failure in resetting password because user not existing \" + user)\n\n result = json.dumps(rj)\n\n elif action == \"registerUser\":\n\n rj['action'] = action\n\n # ToDo\n # reset password and send new password to user by email\n\n user = jo[\"user\"]\n user = user.lower()\n user = user.strip()\n user = user.replace(\" \", \"\")\n\n if dbs.check_user(user) < 1:\n\n p = dbs.random_string_simple(4)\n dbs.register_user(user, p)\n\n # ToDo: put in a separate thread to prevent slow down of process\n # ToDo: make nice test in mail\n email_sender.send_mail(user, \"registerUser\", \"password: \" + p)\n\n # wwe need to inform the MASTER about the registration.\n register_user_and_session_at_master(\"\", user)\n\n log.log_info(\"registering user \" + user)\n\n rj['result'] = \"success\"\n rj['success'] = True\n else:\n\n log.log_info(\"user already exists: \" + user)\n\n rj['result'] = \"failure\"\n rj['success'] = False\n\n result = json.dumps(rj)\n\n elif action == \"getSettings\":\n\n session = jo[\"session\"]\n\n rj['action'] = action\n rj['settings'] = settings.get_settings(session)\n rj['result'] = \"success\"\n rj['success'] = True\n\n result = json.dumps(rj)\n\n elif action == \"setSettings\":\n\n session = jo[\"session\"]\n\n data = jo[\"settings\"]\n settings.set_settings(session, data)\n\n rj['action'] = action\n rj['result'] = \"success\"\n rj['success'] = True\n\n result = json.dumps(rj)\n\n elif action == \"bulkAddVoc\":\n\n table_text = jo[\"text\"]\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n\n dbac.add_words_bulk(user_id, table_text)\n\n rj['action'] = action\n rj['result'] = \"success\"\n rj['success'] = True\n\n result = json.dumps(rj)\n\n else:\n # then we have a problem because we do not know the request and we need to throw an error\n log.log_error(\"unknown method for processing JSON\")\n xxx = 111\n\n return result",
"def test_word_info_bad_request(self):\n word = \"defination of vitality \"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n \"code\": 400,\n \"message\": \"A Term must be only a single word\"\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 400)\n self.assertEquals(response_data[\"code\"], expected_output[\"code\"])\n self.assertEquals(response_data[\"message\"], expected_output[\"message\"])",
"def word():\n\n word = Word(random_word())\n # word = Word(\"arroyo\")\n\n word.speak()\n word.messup()\n l.debug(\"Displaying %s\", word.word)\n\n prons = sorted([word.word, word.word + \"-a\", word.word + \"-b\", word.word + \"-c\"], key=lambda x: random.random())\n\n return jsonify({\"word\": word.word, \"pron\": prons, \"correct\": prons.index(word.word)})",
"def test_word_info_bad_word(self):\n word = \"hdiasudhisuahdiasushdiaushdiaushdiasuhdisauh\"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n word: {\n \"frequency\": None,\n \"defination\": None,\n \"antonyms\": None,\n \"examples\": None,\n \"pronounciation\": None,\n \"synonyms\": None\n }\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 200)\n self.assertEquals(response_data[word][\"defination\"], expected_output[word][\"defination\"])\n self.assertEquals(response_data[word][\"antonyms\"], expected_output[word][\"antonyms\"])\n self.assertEquals(response_data[word][\"examples\"], expected_output[word][\"examples\"])\n self.assertEquals(response_data[word][\"frequency\"], expected_output[word][\"frequency\"])\n self.assertEquals(response_data[word][\"pronounciation\"], expected_output[word][\"pronounciation\"])\n self.assertEquals(response_data[word][\"synonyms\"], expected_output[word][\"synonyms\"])",
"def post(self):\n data = request.json\n return check_spelling(data)",
"def urban_dict(word):\n\n url = \"https://mashape-community-urban-dictionary.p.rapidapi.com/define\"\n\n querystring = {}\n\n querystring[\"term\"] = word\n\n headers = config.headers\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n print(response.text)",
"async def dict(self, ctx, *keywords):\n\n if not keywords:\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Did you tried `{}help dict` yet?'.format(self.config['prefix']),\n colour=0xf20006)\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['error'])\n return\n if keywords:\n old_keyword = \" \".join(keywords)\n try:\n keywords = \"%20\".join(keywords)\n url = 'http://api.urbandictionary.com/v0/define?term={}'.format(keywords)\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n source = await response.json(encoding='utf8')\n\n source = json.dumps(source, indent=2)\n result = json.loads(str(source))\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Your search tag was:\\n***`{}`***'.format(old_keyword),\n colour=0xf20006)\n embed.add_field(name='Word:', value='`{}`'.format(result['list'][0]['word']), inline=False)\n embed.add_field(name='Definition:', value='```{}```'.format(result['list'][0]['definition']), inline=False)\n embed.add_field(name='example:', value='```{}```'.format(result['list'][0]['example']), inline=True)\n embed.add_field(name='Author:', value='`{}`'.format(result['list'][0]['author']), inline=False)\n embed.add_field(name='Link:', value='{}'.format(result['list'][0]['permalink']), inline=False)\n embed.add_field(name='Likes:', value='\\U0001f44d `{}`'.format(result['list'][0]['thumbs_up']),\n inline=True)\n embed.add_field(name='Dislikes:', value='\\U0001f44e `{}`'.format(result['list'][0]['thumbs_down']),\n inline=True)\n\n\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['succes'])\n except Exception as e:\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Your search tag was:\\n***`{}`***\\n\\nNothing found :sailboat:'.format(old_keyword, self.config['prefix']),\n colour=0xf20006)\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['warning'])",
"def create_validation_function(name_of_slot):\n def validate_slot(\n self,\n value: Text,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> Dict[Text, Any]:\n \"\"\"Validate user input.\"\"\"\n\n if value.lower() in self.answers_db()[name_of_slot]:\n # validation succeeded, set the value of the slot to \n # user-provided value\n return {name_of_slot: value}\n else:\n # find the closest answer by some measure (edit distance?)\n choices = self.answers_db()[name_of_slot]\n answer = process.extractOne(value.lower(), choices)\n\n # check to see if distnace is greater than some threshold\n if answer[1] < 45:\n # if so, set slot to \"other\"\n return {name_of_slot: \"other\"}\n else:\n return {name_of_slot: answer[0]}\n \n return(validate_slot)",
"def submit_definition():\n if request.method == \"POST\":\n game = mongo.db.games.find_one(\n {\"game_name\": request.form.get(\"game_name\")})\n user = mongo.db.users.find_one({\"username\": session[\"user\"]})\n today = date.today()\n submission_date = today.strftime(\"%Y/%m/%d\")\n definition = {\n \"term_header\": request.form.get(\"term_header\").upper(),\n \"game_fk\": game['_id'],\n \"short_definition\": request.form.get(\"short_definition\"),\n \"long_description\": request.form.get(\"long_description\", False),\n \"youtube_link\": request.form.get(\"youtube_link\", False),\n \"submitted_by\": user[\"_id\"],\n \"submission_date\": submission_date,\n \"rating\": 1,\n \"upvoted_by\": [user[\"_id\"]],\n \"downvoted_by\": []\n }\n mongo.db.terms.insert_one(definition)\n updateUserRating(definition, 1)\n flash(f\"Thank you, {session['user']}, for your submission\",\n category=\"success\")\n return redirect(url_for(\"get_terms\"))\n try:\n # Ensure that user is logged in before displaying page\n if session[\"user\"]:\n games = mongo.db.games.find().sort(\"game_name\", 1)\n return render_template(\"add_term.html\", games=games)\n except KeyError:\n # Redirect user to homepage if not logged in\n flash(Markup(\"Please <a href='login'>\"\n \"login</a> or <a href='register'>\"\n \"register</a> to add a new definition\"), category=\"error\")\n return redirect(url_for(\"get_terms\"))",
"async def cmd_define(\n self,\n args: Args,\n src: Src,\n _language: str = None,\n _l: str = None,\n _etymology: int = None,\n _e: int = None,\n **_,\n ):\n if not args:\n return \"Wiktionary, the Free Dictionary\\nhttps://en.wiktionary.org/\"\n word = args[0]\n self.log.f(\"dict\", \"Query string: \" + word)\n\n async with src.channel.typing():\n which = _etymology or _e or 0\n\n ref = Define(word, _language or _l, which)\n url = \"https://en.wiktionary.org/wiki/\" + word\n if ref.valid:\n em = discord.Embed(color=0xF8F9FA)\n em.set_author(\n name=\"'{}' on Wiktionary ({} etymolog{} available)\".format(\n word, ref.alts, \"y\" if ref.alts == 1 else \"ies\"\n ),\n url=url,\n icon_url=\"https://upload.wikimedia.org/wikipedia/en/thumb/8/80/Wikipedia-logo-v2.svg/1122px-Wikipedia-logo-v2.svg.png\",\n )\n em.add_field(name=\"Etymology\", value=ref.etymology, inline=False)\n for definition in ref.definitions:\n em.add_field(\n name=\"`{}` ({}):\".format(word, definition[\"partOfSpeech\"]),\n value=\"\\n- \".join(\n [\n text\n for text in definition[\"text\"]\n if not re.search(r\"^\\(.*vulgar.*\\)\", text.lower())\n ]\n ),\n inline=False,\n )\n\n return em\n else:\n raise CommandOperationError(\"No definition found.\")",
"def process_action(action, params, context):\n if action == 'define_word':\n word = params.get('word')\n if word is None:\n return make_simple_reply('I do not know this word')\n word_id = normalize_word(word)\n word_model = ndb.Key('Word', word_id).get()\n if word_model is not None:\n word_model.practice_count += 1\n word_model.learned = False\n word_model.put()\n return generate_definition_reply(word_model)\n \n word_model = Word()\n word_model.learned = False\n word_model.word = word\n word_model.key = ndb.Key('Word', word_id)\n if not get_word_definition(word_model):\n return make_simple_reply('I do not know this word')\n else:\n word_model.practice_count = 1\n word_model.put()\n return generate_definition_reply(word_model)\n \n elif action == 'practice':\n keys = Word.query().filter(Word.learned == False).fetch(keys_only=True)\n selected_word_key = random.sample(keys, 1)[0]\n reply = make_simple_reply(\n 'How about %s! Do you remember it?' % selected_word_key.get().word)\n reply['context'] = [{\n 'name': 'practice',\n 'lifespan': 2,\n 'parameters': {'word_id': selected_word_key.id()}\n }]\n return reply\n \n elif action == 'practice_known':\n # User knows this word. Mark it as learned\n word_id = context.get('practice', {}).get('word_id', None)\n reset_context = [{\n 'name': 'practice',\n 'lifespan': 0,\n 'parameters': {'word_id': word_id}\n }]\n\n if word_id is None:\n reply = make_simple_reply('I am afraid I do not know this word')\n reply['context'] = reset_context\n return reply\n\n word_model = ndb.Key('Word', word_id).get()\n if word_model is None:\n reply = make_simple_reply('I am afraid I do not know this word')\n reply['context'] = reset_context\n return reply\n\n word_model.learned = True\n word_model.put()\n reply = make_simple_reply('OK, I will not ask this word again')\n reply['context'] = reset_context\n return reply\n \n elif action == 'practice_unknown':\n # User does not know this word. Return its definition\n word_id = context.get('practice', {}).get('word_id', None)\n reset_context = [{\n 'name': 'practice',\n 'lifespan': 0,\n 'parameters': {'word_id': word_id}\n }]\n\n if word_id is None:\n reply = make_simple_reply('I do not know this word either, sorry')\n reply['context'] = reset_context\n return reply\n\n word_model = ndb.Key('Word', word_id).get()\n if word_model is None:\n reply = make_simple_reply('I do not know this word either, sorry')\n reply['context'] = reset_context\n return reply\n\n word_model.practice_count += 1\n word_model.learned = False\n word_model.put()\n reply = generate_definition_reply(word_model)\n reply['context'] = reset_context\n return reply\n \n return make_simple_reply('I did not get that')",
"async def ud(self,word):\r\n defs = ud.define(word)\r\n for d in defs:\r\n await self.bot.say(d)",
"def getWord(wordType):\n if (wordType == ADJECTIVE) or (wordType == ADJECTIVE):\n newWord = input('Enter an ' + wordType.lower() + \":\\n\")\n return newWord\n else:\n newWord = input('Enter a ' + wordType.lower() + \":\\n\")\n return newWord",
"def choose_word():\n pass",
"def define(update, context):\n word = update.message.text\n output = make_output(word)\n if output:\n response_message = output\n else:\n response_message = 'Sorry, I was unable to complete that request.'\n context.bot.send_message(\n chat_id=update.effective_chat.id, text=response_message)",
"def spellcheck():\n text = request.args.get('text', '')\n words = {}\n for word in text.split():\n words[word] = Word(word).spellcheck()\n return jsonify(**words)",
"async def define(self, interaction: Interaction, args: str):\n baseurl = \"https://www.merriam-webster.com/dictionary/\"\n output = args\n await interaction.response.send_message(baseurl + output)",
"def is_validword(word, hand, word_list1):\n # TO DO ... <-- Remove this comment when you code this function\n word_list = []\n cnt_1 = 0\n for i in word:\n word_list += i.split(\",\")\n for i in word_list:\n if i in hand.keys():\n cnt_1 += 1\n if cnt_1 == len(word) and word in word_list1:\n score = get_word_score(word, n_num)\n update_hand(hand, word)\n else:\n print(\"Invalid Word\")"
] | [
"0.61216205",
"0.58270305",
"0.58179843",
"0.57400197",
"0.5649318",
"0.5606277",
"0.5591704",
"0.5455674",
"0.5431328",
"0.5341701",
"0.5329532",
"0.52859133",
"0.5281709",
"0.52787936",
"0.52630574",
"0.51918024",
"0.51817644",
"0.5147042",
"0.51228225",
"0.5112976",
"0.51122284",
"0.5095223",
"0.5091525",
"0.5090254",
"0.5044046",
"0.5017014",
"0.50117487",
"0.5011184",
"0.4999181",
"0.49991223"
] | 0.6205886 | 0 |
This function handles the example sentence intent | def my_word_example_handler(handler_input):
# type: (HandlerInput) -> Response
slots = handler_input.request_envelope.request.intent.slots
if example_slot in slots:
curr_word = slots[example_slot].value
handler_input.attributes_manager.session_attributes[
example_slot_key] = curr_word
try:
response = http_get(curr_word, False)
if response:
example = response[0]['def'][0]['sseq'][0][0][1]['dt'][1][0]
if example == "vis":
vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][1][1][0]['t'])
speech = ("An example with {} (part of speech {}) "
"is: {}".format(curr_word, response[0]['fl'],
vis))
elif example == "wsgram":
vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][2][1][0]['t'])
speech = ("An example with {} (part of speech {}) "
"is: {}".format(curr_word, response[0]['fl'],
vis))
else:
speech = ("No example is available for {}").format(curr_word)
reprompt = ("What word would you like me to look up?")
else:
speech = ("No example is available for {}").format(curr_word)
reprompt = ("What word would you like me to look up?")
except Exception as e:
speech = ("No example is available for {}. "
"Can I look up another word?").format(curr_word)
reprompt = ("What word would you like me to look up?")
else:
speech = "I'm not sure what word to look up, please try again"
reprompt = ("I didn't catch that. What word would you like me "
"me to look up?")
handler_input.attributes_manager.session_attributes[previous_key] = speech
handler_input.response_builder.speak(speech).ask(reprompt)
return handler_input.response_builder.response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sentence(self):",
"def onCurrentSentence(self, *_args):\n global instance\n log(str(_args))\n #if (instance.isSpeaking and len(_args[1])==0): instance.SpeakDone()\n return",
"def hook(self, sentence, words):\n pass",
"def handle_gui_example_three_intent(self, message):\n self.gui['sampleText'] = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Egestas sed tempus urna et pharetra pharetra massa massa ultricies. Aliquam sem et tortor consequat id porta nibh. Amet est placerat in egestas erat imperdiet sed. Ut ornare lectus sit amet est placerat in egestas erat. Iaculis eu non diam phasellus vestibulum lorem sed risus ultricies. Hac habitasse platea dictumst vestibulum rhoncus est pellentesque. Vulputate eu scelerisque felis imperdiet proin fermentum. Neque convallis a cras semper auctor neque. Pharetra magna ac placerat vestibulum lectus mauris ultrices eros in. Phasellus faucibus scelerisque eleifend donec pretium vulputate. Malesuada bibendum arcu vitae elementum curabitur vitae nunc. Tellus id interdum velit laoreet id donec. Diam donec adipiscing tristique risus nec. Nisi lacus sed viverra tellus in hac habitasse platea. Amet venenatis urna cursus eget nunc scelerisque viverra mauris in. Sit amet nisl suscipit adipiscing bibendum est ultricies. Nec ultrices dui sapien eget mi proin sed. Egestas dui id ornare arcu odio ut sem nulla. Rhoncus aenean vel elit scelerisque. Neque gravida in fermentum et sollicitudin. Pellentesque massa placerat duis ultricies lacus sed. Nunc id cursus metus aliquam eleifend mi. Eu feugiat pretium nibh ipsum consequat nisl. Aenean euismod elementum nisi quis eleifend quam adipiscing vitae. Est ante in nibh mauris cursus mattis. Sagittis eu volutpat odio facilisis mauris sit amet. At consectetur lorem donec massa sapien faucibus. Odio facilisis mauris sit amet. Quis ipsum suspendisse ultrices gravida dictum fusce. Sagittis nisl rhoncus mattis rhoncus urna neque viverra justo nec. Eget mi proin sed libero enim sed faucibus. Interdum velit euismod in pellentesque massa. Et netus et malesuada fames. Velit aliquet sagittis id consectetur purus. Condimentum lacinia quis vel eros donec ac odio tempor orci. Amet consectetur adipiscing elit pellentesque habitant. Eleifend mi in nulla posuere sollicitudin aliquam ultrices sagittis orci. Nisi porta lorem mollis aliquam ut porttitor leo a diam. Egestas integer eget aliquet nibh praesent tristique. Velit scelerisque in dictum non. Id volutpat lacus laoreet non curabitur gravida arcu ac. Suspendisse interdum consectetur libero id faucibus nisl tincidunt eget. Ipsum a arcu cursus vitae congue mauris. Duis at consectetur lorem donec massa. Orci sagittis eu volutpat odio facilisis mauris. Eget mauris pharetra et ultrices neque ornare. Commodo nulla facilisi nullam vehicula ipsum a. Arcu risus quis varius quam quisque. Gravida in fermentum et sollicitudin. Lacus laoreet non curabitur gravida arcu ac tortor dignissim. Netus et malesuada fames ac turpis. Ipsum dolor sit amet consectetur adipiscing. Tellus elementum sagittis vitae et leo duis ut diam quam. Vitae et leo duis ut diam quam nulla. Risus pretium quam vulputate dignissim. Justo laoreet sit amet cursus sit amet dictum sit. Blandit libero volutpat sed cras. Lacus sed viverra tellus in. Ornare lectus sit amet est placerat in egestas erat. Tortor dignissim convallis aenean et tortor at. Tempus quam pellentesque nec nam aliquam. Nisi scelerisque eu ultrices vitae auctor eu augue ut lectus. Consequat id porta nibh venenatis cras sed felis eget. Massa enim nec dui nunc mattis enim ut. Dignissim enim sit amet venenatis urna. Ac tincidunt vitae semper quis lectus nulla at. Sed felis eget velit aliquet sagittis. Vel turpis nunc eget lorem dolor sed viverra. Non consectetur a erat nam at lectus. Iaculis eu non diam phasellus vestibulum. Dolor sit amet consectetur adipiscing elit ut aliquam purus sit. Libero justo laoreet sit amet cursus sit. Tellus pellentesque eu tincidunt tortor. Maecenas volutpat blandit aliquam etiam erat velit scelerisque in. Semper risus in hendrerit gravida rutrum quisque non tellus orci. Diam in arcu cursus euismod quis viverra nibh cras pulvinar. Habitasse platea dictumst quisque sagittis purus sit amet volutpat consequat. Elit ut aliquam purus sit. Dui faucibus in ornare quam viverra orci sagittis eu. Purus ut faucibus pulvinar elementum integer. Condimentum lacinia quis vel eros donec ac odio tempor. At in tellus integer feugiat scelerisque varius morbi. Augue eget arcu dictum varius duis. Aliquam sem et tortor consequat id. Bibendum arcu vitae elementum curabitur vitae. Massa sed elementum tempus egestas sed sed. Suscipit adipiscing bibendum est ultricies. Etiam tempor orci eu lobortis.\"\n self.gui.show_page(\"paginationExample.qml\")",
"def test_make_sentences():\n long_comment = ['I think this movie was really good and will go and see it again. '\n 'This movie really sucked and I hated it']\n new_sentences = make_sentences(long_comment[0])\n print(new_sentences)",
"def getSentenceInfo(sentence):\n\tpass",
"def test_get_sentence_sentiments():\n long_comment = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\"]\n\n sentence_score_list = get_sentence_sentiments(long_comment[0])\n print(long_comment[0])\n print('per sentence sentiment:', sentence_score_list)\n print()",
"def nao_speech(possible_sentences):\n\n print(random.choice(possible_sentences))",
"def hey(self, sentence=\"\"):\n if sentence == \"\" or sentence.replace(\" \", \"\") == \"\":\n return \"Fine. Be that way!\"\n if sentence.isupper():\n return \"Woah, chill out!\"\n if sentence[-1] == \"?\":\n return \"Sure.\"\n return \"Whatever.\"",
"def main(words, s):\n if words:\n words = int(words)\n click.echo(lorem.words(words))\n\n # Returns a lorem ipsum sentence\n elif s:\n click.echo(lorem.sentence())\n\n # Returns a lorem ipsum paragraph by default\n else:\n click.echo(lorem.paragraph())",
"def example_single(args, model, word2idx):\n #在命令行中加载和分段<目标、(推特内容)>配对\n while True:\n target = raw_input(\"问题: \")\n tweet = raw_input(\"回答: \")\n targets = [str(target)]\n tweets = [str(tweet)]\n seged_tweets = yutils.seg_sentence(tweets, choice=\"list\", place=\"hpc\") # may use lexicon here\n seged_targets = yutils.seg_sentence(targets, choice=\"list\", place=\"hpc\")\n predictions = evaluate(args, model, word2idx, seged_tweets, seged_targets)\n print(\"预测结果: \", predictions)",
"def test_extend_to_sentence(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.5\", \"3.5\"),\n after_sel=(\"1.395\", \"3.142\"),\n command_name=\"extend-to-sentence\",\n )",
"def handle_gui_example_one_intent(self, message):\n self.gui.show_text(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec placerat varius turpis porta scelerisque. Nam feugiat, lectus a ultricies tempus, mi sem tempor felis, vitae laoreet nisi ipsum vitae mauris.\")",
"def response(sentence, model, user_id='123', context={}, show_details=False):\n # Load intents\n data_path = os.path.join(\"data/\", \"data_intents.json\")\n with open(data_path) as json_data:\n intents = json.load(json_data)\n\n # Classify sentence\n results = classify(sentence, model)\n # if we have a classification then find the matching intent tag\n if results:\n # loop as long as there are matches to process\n while results:\n for i in intents['intents']:\n # find a tag matching the first result\n if i['tag'] == results[0][0]:\n # set context for this intent if necessary\n if 'context_set' in i:\n if show_details: print('context:', i['context_set'])\n context[user_id] = i['context_set']\n\n # check if this intent is contextual and applies to this user's conversation\n if not 'context_filter' in i or \\\n (user_id in context and 'context_filter' in i and i['context_filter'] == context[user_id]):\n if show_details: print ('tag:', i['tag'])\n # a random response from the intent\n if i[\"tag\"] == \"goodbye\":\n print(random.choice(i['responses']))\n sys.exit()\n else:\n return print(random.choice(i['responses']))\n\n results.pop(0)",
"def subject_info(intent, extra_info=[]):\n\n text = intent['inputTranscript'].lower()\n utterances = AS.load_file('sample_utterances.txt')\n\n # add \"book\" and \"books\" to every utterance\n for line in list(utterances):\n utterances.insert(0, line + \" book\")\n utterances.insert(0, line + \" books\")\n\n # tells how many characters needs to be dropped before the subject starts\n to_drop = 0\n\n for line in utterances:\n if text.startswith(line):\n to_drop = len(line)\n break\n\n # drops the characters and makes a list from the strings that are left\n text = text[to_drop:].strip()\n text_list = text.split(' ', len(text))\n\n subject_list = []\n keywords = [\"books\", \"book\", \"by\", \"published\", \"written\"]\n keyword = \"\"\n\n # Find out when the book name ends\n for word in text_list:\n if word not in keywords:\n subject_list.append(word)\n else:\n break\n\n subject = \" \".join(subject_list)\n\n # Get all the keywords in the middle, so they can be\n # all be dropped at once, eg written by, books by\n text_list = text_list[len(subject_list):]\n if text_list:\n word = text_list[0]\n while word in keywords:\n keyword += word + \" \"\n text_list = text_list[1:]\n if text_list:\n word = text_list[0]\n else:\n break\n\n # search for an author from the rest of the characters\n author_text = text[len(keyword):].strip()\n author = AS.search(author_text, False)\n if author is \"\":\n author = None\n\n # There might be old info in the extra_info (author), so \n # we need to clear it\n extra_info.clear()\n\n # add the author to extra info so it can be used in the Finna API call\n if author:\n extra_info += [\"author:\\\"\" + author + \"\\\"\"]\n elif intent['sessionAttributes'].get('author'):\n extra_info += [\n \"author:\\\"\" + intent['sessionAttributes']['author'] + \"\\\"\"\n ]\n\n # The Finna API call\n request = lookfor(term=subject, filter=extra_info)['json']\n\n return parse_subject(request, subject, {'author': author})",
"def intent_of_text_LnDOR(ChapterTextS, TargetQuestionsD, TestS, StopWords):\n \n # Chapter Text - stokenize\n StokensCT = stokenize(ChapterTextS, StopWords) \n\n # Test question - stokenize\n StokensTest = stokenize(TestS, StopWords)\n\n # Knowledge Base Dict - stokenize\n KBD_structure = stokenizeKBD(TargetQuestionsD, StopWords)\n\n # List (because list is mutable, set is not) of all stokens in document\n StokensDoc = StokensCT[:] # from chapter text\n StokensDoc.extend(StokensTest[:]) # += Test string\n\n # extend list of stokens in Doc\n for i in TargetQuestionsD:\n StokensDoc.extend(TargetQuestionsD[i][\"mq stokens\"][:]) # += KB target [matched Q]s\n StokensDoc.extend(TargetQuestionsD[i][\"ans stokens\"][:]) # += KB answers\n \n StokensTestV = set(StokensTest)\n StokensDocV = set(StokensDoc)\n StokensAntiTgtV = StokensDocV\n \n # Complement of all targets\n for i in TargetQuestionsD:\n StokensAntiTgtV = StokensAntiTgtV.difference(set(TargetQuestionsD[i][\"mq stokens\"]))\n \n # calculate confusion matrix and DOR etc.\n LnDORD = {}\n # Anti Target\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensAntiTgtV, StokensTestV) \n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN) \n \n LnDORD[\"AntiTgt\"] = {'lndor': LnDOR, 'theta': someAngle}\n\n # total occurences\n total_occ = 0\n for i in TargetQuestionsD:\n total_occ += TargetQuestionsD[i]['count']\n\n for i in TargetQuestionsD:\n StokensTgtV = set(TargetQuestionsD[i][\"mq stokens\"][:])\n\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensTgtV, StokensTestV) \n priorOR = TargetQuestionsD[i]['count'] / total_occ\n\n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN, priorOR) \n \n LnDORD[i] = {'lndor': LnDOR, 'theta': someAngle}\n # LnDORD = {i: {'lndor': , 'theta': }}, KB indices + \"AntiTgt\"\n\n return LnDORD",
"def substantiate():",
"def example(self, message, args):\n return \"Example\"",
"def test_example():\n example_text = ['''Mark and Jack welcome back to couch on crackerjacks today I'm gonna show you how to make a basic and delicious potato salad some people might call this a country style potato salad some people might refer to it as a deli style of potato salad either way it's got the perfect balance of sweet and tangy from the sugar and the vinegar and pickles and everything else that's in this it's just your basic homemade potato salad you can add any number of things to this to make it your own but I'm just going to show you how I like to make mine so without further ado let's get started so naturally I'm going to start out with my potatoes every potato salad starts with potatoes for this recipe and for my potato salad I prefer using just regular old russet potatoes they're the cheapest they're the best I've tried using Yukon Gold potatoes and red potatoes for this recipe I prefer hands down at the russet potatoes it just it makes the best potato salad for me you can use whatever kind of potatoes you like though and using a potato peeler I'm just going to peel these potatoes a little trick for you that little end on most potato peelers it's kind of rounded use that to dig out the eyes of your potato it's what I've always used it for so it's just the perfect little tool to dig out the eyes of a potato but what you want to do is just go ahead and peel your potatoes and you don't have to peel your potatoes if you don't want to if you like skin on potato salad by all means go ahead and leave the skin on it doesn't make any difference personal preference and as you're peeling your potatoes and you get one done go ahead and put them into a large pot this is going to be the same profit I cut these in that's filled up with water you want to make sure and keep your potatoes covered that will prevent your potatoes from oxidizing and turning that pinky brown color but you just want to go through and peel all of your potatoes and I am using three pounds of potatoes for this recipe now once you get all your potatoes peeled you want to go ahead and cut them up basically you want to cut these into about 3/4 inch square pieces so for these medium potatoes I cut them half I turn them 90 degrees cut them into three pea is if you will that way if it's a larger potato do four and then cut those into chunks basically like I said you want about three quarters of an inch by three quarters of an inch by three quarters of an inch pieces and then again throw your potatoes back into the water that you pulled the potatoes out of that way they do not oxidize on you now when you get all your potatoes cut up your water is going to be cloudy and it's gonna be murky and it's gonna be just full of all the starch coming off of those potatoes what you want to do is rinse your potatoes well you want to make sure that the water coming off of that is completely clear go ahead and rinse these a good three or four times and then drain them completely you want to make sure that all of that starch gets off of those potatoes then you want to go ahead and light your stove and take your pot and you want a large pot for this put it over a medium-high heat time actually even high heat or at this point take your drained potatoes and put those into your pot and you want to add enough cold water to this to come up about one inch over the top of the potatoes starting off with cool water your potatoes cook evenly as the water comes up to temperature your potatoes come up with them to temperature if you start out putting cold potatoes into boiling water the outside of the potato is gonna be mush before the inside is actually cooked and before this gets going too far I'm gonna take two large eggs and I'm gonna put those in the water with the potatoes this recipe uses hard-boiled eggs and since I'm boiling the potatoes anyway I might as well just boil the eggs right along with the potatoes so just go ahead and add two large eggs to the pot and you want to cover your pot and you want to bring this up to a boil now once your water is that a boy I'll go ahead and give your potatoes an egg a gentle stir you want to be careful with this because you don't do not want to break your eggs and you also don't want to break up the potatoes but once this comes up to a boil you want to boil this for exactly ten minutes and how to check to make sure that your potatoes are done you want to take a couple large pieces take them out put them on a spoon and using a fork you want to put the fork into the potato and you want just a little bit of give in your potatoes before they break apart if you can see there it's just the slightest little bit of give before the potato breaks up you don't want to cook these any longer than that because they they will finish cooking when you take them off heat but you want to go ahead and drain these in a colander and once they are drained well go ahead and pour your potatoes and eggs back into the pot that you cooked them in and here you can dig out your eggs and you want to put your eggs in a bowl of cold water you want to stop that cooking process as soon as possible because if you cook your eggs too long you're gonna get that dreaded green ring around the yolk go ahead and put those in a bowl of cold water to stop the cooking process immediately and then you want to keep your potatoes in the pot that you cook them in to cool and you want to cool them completely before you do anything else with them if you add a salad dressing to hot potatoes it's gonna break on you and you don't want that so just go ahead and let your potatoes steam off and cool and I'm gonna let these sit for about a half an hour before I even start making the dressing for my potato salad and while you're waiting for your potatoes to cool off you can go ahead and peel your eggs it helps to wait a little bit for your eggs to cool down before you peel them just go ahead and crack them on a countertop and then start peeling them if you peel them underneath water or running water they peel super easy so as you can see here's I mean it takes nothing to do it under water water gets under there and the shell just slips off I just go ahead and peel your egg eggs and set them off until later I'm gonna need a few vegetables for my dressing I went ahead and already cut up half of a yellow onion here off a video I thought I was recording when I wasn't you don't need to see me chopped onions anyway everybody knows how to do that I've also got two stalks of celery here I'm just going to cut the ends off as well as the tops if you want to save the tops they make a nice garnish you don't have to keep them and I'm not gonna keep them here the celery I'm going to cut these sticks or stalks into orders and then I'm going to chop those up because I don't like really big chunks of celery in my potato salad so I'm just gonna cut these into four slices and then turn them around and cut these into dices if you will and I'm just going to go ahead after I get that died and set those off to the side until I need them later now for our dressing in a large bowl and you want to make sure that you use a plenty large bowl for this because it does make a lot of potato salad I've got one and a half cups of mayonnaise this recipe really does not work with Miracle Whip so since we're gonna be adding sugar to this stick to the plain old mayonnaise I'm gonna throw my eggs in there and using the back of a fork I'm just gonna break up my eggs if you like big chunks of egg in your potato salad don't mash it up as much but I'm gonna mash this up pretty fine and then you want to add in a quarter of a cup of sugar as well as a teaspoon and a half of salt it seems like a lot of salt it really isn't because there are a lot of potatoes here two teaspoons of white vinegar just plain white distilled vinegar then you want to add two tablespoons of sweet pickle relish you could also use dill pickle relish if you wanted to I like sweet in mine and finally I'm gonna add in two teaspoons of prepared yellow mustard if you like a more mustardy potato salad you can add more mustard if you want to this perfectly acceptable and then using a spoon or a fork whatever just go ahead and mix this up well and then you want to add in your onions and celery and go ahead and get that mixed in and you want to make sure to mix all of your ingredients and get your dressing thoroughly mixed before you add the potatoes because you don't want to over mix this once you get your potatoes added so go ahead and take your cooled potatoes again make sure that they are at least room temperature you do not want them warm or hot at all but go ahead and add those into your bowl and then using a spatula I'm going to gently fold the dressing into my potatoes you want your potatoes to remain as in this large of chunks as possible so don't go crazy you know stirring it stirring stirring you want to gently fold this so your potatoes do stay as whole as possible and a little secret for you just to bind up the dressing just a little bit I'm going to add two tablespoons of instant mashed potato flakes into the finished mixture I'm just going to fold this in basically what those do the potato flakes they bind up the dressing and make the dressing firm it also helps it kind of stick to the potatoes a little bit better so you you know the dressing doesn't run off of the potatoes which can be a problem with some recipes so there you go you want to make sure that those potato flakes are evenly distributed in there and everything is well mixed together everything is combined perfectly go ahead and give this a taste make sure that the salt is ok for you if you need a little bit more salt go ahead and add it if you want to if you need more mustard or vinegar or eggs whatever now is the time to do it but you want to go ahead and cover this with a piece of cling wrap saran wrap and refrigerate this for at least four to six hours before you serve this the longer you let this sit the better it gets but there you go there's your basic all-around simple homemade deli style or country style potato salad definitely give this recipe a try if you do let me know how you like it down below in the comment section if you like this video be sure to give it a thumbs up I would greatly appreciate it subscribe for more deliciousness and to keep up to date on all my latest videos thanks so much for watching and we will see you next time''']\n\n return str(example_text)",
"def my_word_example_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if synonym_slot in slots:\n curr_word = slots[synonym_slot].value\n handler_input.attributes_manager.session_attributes[\n synonym_slot_key] = curr_word\n\n try:\n synonyms = http_get(curr_word, True)\n\n if type(synonyms[0]) == dict:\n speech = (\"A synonym for {} is {}\".format(curr_word,\n synonyms[0]['meta']['syns'][0][0]))\n synonym_list = synonyms[0]['meta']['syns'][0]\n reprompt = (\"What word would you like a synonym for?\")\n else:\n speech = (\"No synonyms for {} are available. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like a synonym for?\")\n except:\n speech = (\"No synonyms for {} are available. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like a synonym for?\")\n else:\n speech = \"I'm not sure what word to find a synonym for, please try again\"\n reprompt = (\"I didn't catch that. What word would you like me \"\n \"me to look up a synonym for?\")\n\n handler_input.attributes_manager.session_attributes[previous_key] = speech\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response",
"def tt_entails(knowledge_base, sentence):\n return False",
"def handle_sentence_complex(self, sentence, ctxinfo): \n global vocab\n global START_THRESHOLD \n global lower_attr\n \n for w_i, w in enumerate(sentence):\n case_class = w.get_case_class()\n # Does nothing if it's already lowercase or if it's not alphabetic\n\n if case_class != \"lowercase\" and case_class != \"?\":\n low_key = getattr(w, lower_attr).lower()\n token_stats = vocab[ low_key ]\n percents = get_percents( ctxinfo, token_stats )\n pref_form = get_preferred_form( percents )\n\n if case_class == \"UPPERCASE\" or case_class == \"MiXeD\" :\n if pref_form :\n setattr( w, lower_attr, pref_form ) \n # If the word is UPPERCASE or MiXed and does not have a \n # preferred form, what do you expect me to do about it? \n # Nothing, I just ignore it, it's a freaky weird creature! \n\n elif case_class == \"Firstupper\" :\n occurs = token_stats[ getattr( w, lower_attr) ]\n if ( w_i == 0 or\n re.match( \"[:\\.\\?!;]\", sentence[ w_i - 1 ].surface ) ) and \\\n float(occurs[ 1 ]) / float(occurs[ 0 ]) >= START_THRESHOLD :\n setattr( w, lower_attr, getattr( w, lower_attr ).lower() ) \n elif pref_form :\n setattr( w, lower_attr, pref_form )\n # Else, don't modify case, since we cannot know whether it\n # is a proper noun, a sentence start, a title word, a spell \n # error, etc.\n\n self.chain.handle_sentence(sentence, ctxinfo)",
"def greeting(sentence):\n for word in sentence.split():\n if word.lower() in INPUTS:\n return random.choice(RESPONSES)",
"def hey(sentence):\n if not sentence.strip():\n answer = 'Fine. Be that way!'\n elif sentence.isupper():\n answer = 'Woah, chill out!'\n elif sentence.endswith(\"?\"):\n answer = 'Sure.'\n else:\n answer = 'Whatever.'\n return answer",
"def handle_sentence_simple(self, sentence, ctxinfo):\n global text_version\n global moses_version\n global lower_attr\n \n for w in sentence :\n setattr(w, lower_attr, getattr(w, lower_attr).lower())\n self.chain.handle_sentence(sentence, ctxinfo)",
"def read_sentence(self,data):\n self.addSource(data)\n if self.checkLegal(data):\n self.addTarget(data)\n return True\n else:\n return False",
"def score_sentence(self, sentence):\n\t\t\n\t\t# YOUR CODE HERE",
"def test_model():\n test_text = \"what is the price of jug?\"\n model = spacy.load(\"../model/custom_ner_model\")\n doc = model(test_text)\n for ent in doc.ents:\n print(ent.text, ent.start_char, ent.end_char, ent.label_)",
"def hasConstantForm(self, sentence):",
"def getConstantSentenceForms(self):"
] | [
"0.67601943",
"0.6399362",
"0.6366831",
"0.63550425",
"0.6288494",
"0.62700784",
"0.6220967",
"0.6183083",
"0.6126741",
"0.6104425",
"0.60613656",
"0.60591984",
"0.6058863",
"0.59571433",
"0.5951869",
"0.59310657",
"0.5929599",
"0.5913799",
"0.5897607",
"0.589439",
"0.58943844",
"0.5884642",
"0.5851857",
"0.58299387",
"0.5825041",
"0.58182675",
"0.58036625",
"0.57902575",
"0.578956",
"0.5780092"
] | 0.653233 | 1 |
AMAZON.FallbackIntent is only available in enUS locale. This handler will not be triggered except in that locale, so it is safe to deploy on any locale. | def fallback_handler(handler_input):
# type: (HandlerInput) -> Response
speech = (
"The {} skill can't help you with that. "
"I can look up a word in the dictionary for you").format(skill_name)
reprompt = ("I can look up a word in the dictionary, "
"Just say any word in English")
handler_input.response_builder.speak(speech).ask(reprompt)
return handler_input.response_builder.response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fall_back_message():\r\n card_title = \"Fallback Message\"\r\n fallback_string = \"Sorry. I couldn't understood it. Please say again.\"\r\n should_end_session = False\r\n session_attributes = { \r\n \"speech_output\": fallback_string,\r\n \r\n \r\n }\r\n\r\n return build_response(session_attributes, build_speechlet_response(card_title, fallback_string, \"Ask me to say hello...\", should_end_session))",
"def _fallback_range(self, utterances, lang, message, fb_range):\n msg = message.reply(\n 'mycroft.skills.fallback',\n data={'utterance': utterances[0][0],\n 'lang': lang,\n 'fallback_range': (fb_range.start, fb_range.stop)}\n )\n response = self.bus.wait_for_response(msg, timeout=10)\n if response and response.data['handled']:\n ret = IntentMatch('Fallback', None, {}, None)\n else:\n ret = None\n return ret",
"def get_fallback_response():\n\n speech_output = FALLBACK_MESSAGE\n return response(speech_response(speech_output, False))",
"def get_fallback_response():\n\n speech_output = FALLBACK_MESSAGE\n return response(speech_response(speech_output, False))",
"def fallback_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = (\n \"The Transit Time skill can't help you with that. \"\n \"You can ask when the next bus is coming!\")\n reprompt = \"You can ask when the next bus is arriving!\"\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response",
"def fallback(self):\n pass",
"def fallback(self):\n pass",
"def get_fallback_url(self, request):\n tail = self.fallback_url or \"/\"\n if not tail.startswith(\"/\"):\n tail = \"/\" + tail\n return \"/\" + get_best_culture(request, self.name) + tail",
"def fallback_handler(handler_input):\n speech_text = \"See you later! Enjoy the hackathon.\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello World\", speech_text)).set_should_end_session(\n True)\n return handler_input.response_builder.response",
"def _load_transliterated_regional(self):\n# global approved, conflicts, suggestions, unknown, cldr, current\n start = self.lblFallback['text'].find('=>') + 2\n if self.lblFallback['text'][start:]:\n self.preferred.set(\\\n self._transliterate_text(self.lblFallback['text'][start:]))\n pass",
"def fallback_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = (\n \"The {} skill can't help you with that. \"\n \"You can tell me your favorite color by saying, \"\n \"my favorite color is red\").format(skill_name)\n reprompt = (\"You can tell me your favorite color by saying, \"\n \"my favorite color is red\")\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response",
"def fallback_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = (\n \"The {} skill can't help you with that. \"\n \"You can tell me your favorite color by saying, \"\n \"my favorite color is red\").format(skill_name)\n reprompt = (\"You can tell me your favorite color by saying, \"\n \"my favorite color is red\")\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response",
"def fallback(self, kw):\n print(self.fallback_text.format(kw))\n return self.ask()",
"def fallback_trans(x):\r\n t = _(x)\r\n if t == x:\r\n l = h.get_lang()\r\n h.set_lang('en', graceful_fail = True)\r\n t = _(x)\r\n if l and l[0] != 'en':\r\n h.set_lang(l[0])\r\n return t",
"def fallback_handler(handler_input):\n # type: (HandlerInput) -> Response\n session_attr = handler_input.attributes_manager.session_attributes\n\n speech_text = (\n \"The {} skill can't help you with that.\".format(SKILL_NAME))\n\n handler_input.response_builder.speak(\n speech_text).set_should_end_session(False)\n return handler_input.response_builder.response",
"def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # intents_object = get_custom_intents()\n print (\"************\")\n print (intent_request)\n # fall_back = True\n # final_function = ''\n # for temp_intent in intents_object:\n # if temp_intent == intent_name:\n # fall_back = False\n # final_function = temp_intent[1]\n # break\n # if(fall_back):\n # return custom_handlers.get_fallback_msg()\n # else:\n # return final_function(intent, session)\n \n # Dispatch to your skill's intent handlers\n if intent_name == \"welcome_intent\":\n return custom_handlers.get_welcome_msg(intent, session)\n elif intent_name == \"search_intent\":\n return custom_handlers.get_search_msg(intent, session)\n elif intent_name == \"architecture\":\n return custom_handlers.get_architecture_msg(intent, session)\n elif intent_name == \"saybye\":\n return custom_handlers.get_saybye_response(intent, session)\n elif intent_name == \"myname\":\n return custom_handlers.get_myname_response(intent, session)\n elif intent_name == \"ask\":\n return custom_handlers.get_ask_response(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return custom_handlers.get_welcome_response(intent, session)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return custom_handlers.handle_session_end_request(intent, session)\n else:\n return custom_handlers.get_fallback_msg(intent, session)",
"def testAudioFallback(self):\n if self.audioFallback in tools.AUDIO_FALLBACKS:\n self.assertEqual(\n self.audioFallback,\n self.config.audioFallback\n )\n else:\n self.assertNotEqual(\n self.audioFallback,\n self.config.audioFallback\n )\n self.assertEqual(\n tools.AUDIO_FALLBACK_DEFAULT,\n self.config.audioFallback\n )",
"def use_en(self):\n pass",
"def _load_regional(self):\n# global approved, conflicts, suggestions, unknown, cldr, current\n start = self.lblFallback['text'].find('=>') + 2\n if self.lblFallback['text'][start:]:\n self.preferred.set(self.lblFallback['text'][start:])\n pass",
"def test_fallback_language_no_current(self):\n x = SimpleModel()\n x.set_current_language(self.conf_fallback)\n x.tr_title = \"TITLE_FALLBACK\"\n\n self.assertEqual(\n x.safe_translation_getter(\"tr_title\", language_code=self.other_lang1), \"TITLE_FALLBACK\"\n )",
"def setFontFallback(self,value):\n self.PDFreactorConfiguration.in1[\"fontFallback\"] = value",
"def adaptPythonToNeutral(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptPythonToNeutral(self, *args)",
"def on_intent(request, session):\n\n intent_name = request['intent']['name']\n\n # process the intents\n if intent_name == \"comenzar\":\n return get_fact_response()\n elif intent_name == \"otravez\":\n return get_fact_response()\n elif intent_name == \"AMAZON.YesIntent\":\n return get_fact_response()\n elif intent_name == \"AMAZON.NoIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.StopIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.FallbackIntent\":\n return get_fallback_response()\n else:\n print(\"invalid Intent reply with help\")\n return get_help_response()",
"def on_intent(intent_request, session):\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"CountryStatusIntent\":\n return get_country_info(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_start_end_response(False)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return get_start_end_response(True)\n else:\n return get_start_end_response(False)",
"def on_intent(event):\n\n intent = event[\"request\"][\"intent\"][\"name\"]\n\n if intent in (\"AMAZON.CancelIntent\", \"AMAZON.StopIntent\", \"AMAZON.NoIntent\"):\n return handle_session_end_request()\n\n if intent == \"AMAZON.YesIntent\":\n if \"attributes\" in event[\"session\"] and \"previousIntent\" in \\\n event[\"session\"][\"attributes\"]:\n\n if event[\"session\"][\"attributes\"][\"previousIntent\"] == \"AMAZON.HelpIntent\":\n return main_handler(event)\n\n speech_output = event[\"session\"][\"attributes\"][\"nextStations\"]\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n speech_output = \"Sorry, something went wrong.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n if intent == \"isBikesAvailable\":\n return main_handler(event)\n\n if intent == \"AMAZON.HelpIntent\":\n return handle_help_intent()\n\n speech_output = \"Sorry, I don\\'t know that.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)",
"def _register_intent_services(bus):\n service = IntentService(bus)\n # Register handler to trigger fallback system\n bus.on(\n 'mycroft.skills.fallback',\n FallbackSkill.make_intent_failure_handler(bus)\n )\n return service",
"def unhandled_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n intent_name = get_intent_name(handler_input)\n if intent_name == 'ChallengeBossIntent':\n speech_text = 'You need to be in the boss room to challenge the boss. '\n elif intent_name == 'EnterMazeIntent':\n speech_text = 'You already have a maze in progress. Would you like to resume the maze or discard the maze? '\n elif intent_name == 'ResumeMazeIntent' or intent_name == 'DiscardMazeIntent':\n speech_text = 'You are already in a maze or you don\\'t have a maze in progress. Say enter the maze or discard the maze. '\n elif intent_name == 'LocationIntent':\n speech_text = 'You need to be in a maze to locate yourself. Say enter the maze or resume the maze. '\n elif intent_name == 'MoveIntent':\n speech_text = 'You need to be in a maze to take a move. Say enter the maze or resume the maze. '\n else:\n speech_text = 'I am not sure what you are saying. '\n\n handler_input.response_builder.speak(\n speech_text).set_should_end_session(False)\n return handler_input.response_builder.response",
"def adaptNeutralToPython(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptNeutralToPython(self, *args)",
"def test_fallback_language(self):\n x = SimpleModel()\n x.set_current_language(self.conf_fallback)\n x.tr_title = \"TITLE_FALLBACK\"\n\n x.set_current_language(self.other_lang1)\n x.tr_title = \"TITLE_XX\"\n x.save()\n\n with translation.override(self.other_lang2):\n x = SimpleModel.objects.get(pk=x.pk)\n self.assertEqual(x.tr_title, \"TITLE_FALLBACK\")",
"def handle_answer_request(intent, session):\n\n eins_list = [\"eins\", \"ein\", \"einer\", \"eine\", \"einen\", \"eines\", \"einem\"]\n \n if intent[\"name\"] == \"DontKnowIntent\":\n answer = \"weiß nicht\"\n elif \"Nummer\" in intent[\"slots\"].keys() and \"value\" in intent[\"slots\"][\"Nummer\"]:\n answer = intent[\"slots\"][\"Nummer\"][\"value\"]\n elif \"Antworten\" in intent[\"slots\"].keys() and \"value\" in intent[\"slots\"][\"Antworten\"]:\n answer = intent[\"slots\"][\"Antworten\"][\"value\"]\n else:\n answer = \"Fehler\"\n \n #Necessary to recognize \"1\":\n if answer in eins_list:\n answer = \"1\"\n elif answer == \"ein mal\":\n answer = \"einmal\"\n answer = answer.lower()\n\n print(\"handle_answer_request: \", intent, \"answer: \", answer)\n\n if \"attributes\" not in session:\n return start_game(answer, session)\n elif session[\"attributes\"][\"state\"] == \"Gameon\":\n return check_answer(answer, session)\n elif session[\"attributes\"][\"state\"] == \"Start\":\n return start_game(answer, session)\n\n return start_game(answer, session)"
] | [
"0.5877872",
"0.5590493",
"0.54645765",
"0.54645765",
"0.5408452",
"0.5357488",
"0.5357488",
"0.53020567",
"0.52696717",
"0.5181241",
"0.5174385",
"0.5174385",
"0.5167612",
"0.51544356",
"0.5071739",
"0.5060613",
"0.5031619",
"0.49786136",
"0.4932573",
"0.4868343",
"0.48520908",
"0.48400244",
"0.48286358",
"0.48147333",
"0.47970474",
"0.47958162",
"0.47594538",
"0.4735542",
"0.4662261",
"0.46455666"
] | 0.5607832 | 1 |
Return list of snapshot_ids associated with the given image | def getSnapshotsOf(image):
snapshotIds = []
deviceMapping = image.block_device_mapping # dict of devices
devices = deviceMapping.keys()
for d in devices:
snapshotId = deviceMapping[d].snapshot_id
if snapshotId is not None:
snapshotIds.append(snapshotId.encode())
return snapshotIds | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids",
"def getAmisOf(snapshot, images):\n amis = []\n for im in images:\n snapshotsOfThisIm = getSnapshotsOf(im)\n for soti in snapshotsOfThisIm:\n if soti == snapshot.id:\n amis.append(im)\n return amis",
"def get_image_ids(params: DownloadCommandParameters) -> List[str]:\n if params.retry:\n logger.info(f\"Attempting to download previously failed images.\")\n with open(recovery_file_name()) as fh:\n image_ids = json.load(fh)\n else:\n df = pd.read_csv(params.metadata_file)\n image_ids = df[df[\"dataset\"] == params.dataset][\"isic_id\"]\n\n return list(image_ids)",
"def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res",
"def cmd_account_image_ids(client, args):\n account_image_ids = client.get_account_image_ids(args.username, args.page)\n generate_output({'account_image_ids': account_image_ids})",
"def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts",
"def get_amis_of(snapshot_id):\n mes_amis = []\n # There has GOT to be a better way. Hmm... maybe not\n keys = Ims.spreadsheet.keys()\n for key in keys:\n if snapshot_id in Ims.spreadsheet[key]['associated_snapshots']:\n mes_amis.append(key)\n return mes_amis",
"def __get_image_id(self):\n return self.__get_multi_images_ids(1)",
"def getContainerSnapshots(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/snapshot' % (node,vmid),None)\n return data",
"def __get_picture_id_list(new):\n id_list = []\n\n if new.image1:\n id_list.append(1)\n if new.image2:\n id_list.append(2)\n if new.image3:\n id_list.append(3)\n if new.image4:\n id_list.append(4)\n\n return id_list",
"def get_my_image_ids(self) -> Union[List[int], None]:\n if self.imported is not True:\n logging.error(f'File {self.file_path} has not been imported')\n return None\n else:\n q = self.conn.getQueryService()\n params = Parameters()\n path_query = self.make_substitutions()\n path_query = path_query.strip('/')\n params.map = {\"cpath\": rstring(path_query)}\n results = q.projection(\n \"SELECT i.id FROM Image i\"\n \" JOIN i.fileset fs\"\n \" JOIN fs.usedFiles u\"\n \" WHERE u.clientPath=:cpath\",\n params,\n self.conn.SERVICE_OPTS\n )\n self.image_ids = [r[0].val for r in results]\n return self.image_ids",
"def find_images(diag_pre_post):\n conn = sqlite3.connect(util.DB_PATH)\n conn.text_factory = str\n cursor = conn.execute('''SELECT pid from Patient where study_id = ? ''',\n (\"LGG_reseksjonsgrad\", ))\n ids = []\n k = 0\n for row in cursor:\n k += 1\n cursor2 = conn.execute('''SELECT id from Images where pid = ? and diag_pre_post = ?''',\n (row[0], diag_pre_post))\n for _id in cursor2:\n ids.append(_id[0])\n cursor2.close()\n\n cursor.close()\n conn.close()\n return ids",
"def _get_ids_from_name_private(self, name):\r\n results = self.list_private_images(name=name)\r\n return [result['id'] for result in results]",
"def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots",
"def jail_snapshot_list(jnid = ''):\n jname = jnid\n if 'BASE-' in jnid:\n jnid = '/BASE-RW/%s@' % jnid\n else:\n jnid = '/%s@' % jnid\n \n try:\n jsnap = subprocess.check_output(\"zfs list -t snapshot |grep \"+jnid, shell=True)\n except:\n msg = \" ERROR: No zfs snapshots found for '%s'\" % (jnid)\n log(msg)\n return False\n\n jsnap = jsnap.split('\\n')\n jsnapn = []\n for i in jsnap:\n i = i.split(' ')\n while True:\n try:\n i.remove(\"\")\n except ValueError:\n break\n jsnapn.append(i)\n\n lmen = ['Number', \"'%s' current snapshots\" % jname, 'Size']\n del jsnapn[-1]\n jsn = 0\n jsnn = []\n for i in jsnapn:\n jsnn.append([jsn, i[0], i[3]])\n jsn = jsn + 1\n\n return [jsnn, lmen]",
"def get_snap_list(mnode):\n\n ret, out, _ = g.run(mnode, \"gluster snapshot list --xml\")\n if ret != 0:\n g.log.error(\"Failed to execute 'snapshot list' on node %s. \"\n \"Hence failed to get the snapshot list.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster snapshot \"\n \"list xml output.\")\n return None\n\n snap_list = []\n for snap in root.findall(\"snapList/snapshot\"):\n snap_list.append(snap.text)\n\n return snap_list",
"def snapshot_identification(snapshot):\n\t\treturn {\n\t\t\t'user_id': snapshot['user_id'],\n\t\t\t'timestamp': snapshot['timestamp'],\n\t\t\t'snapshot_id': snapshot['snapshot_id']}",
"def image_to_list(image):\r\n\r\n return list(image.getdata())",
"def get_ids(voc_path):\n ids = []\n print(\"voc\")\n\n files_images = glob.iglob(os.path.join(voc_path, \"*.JPEG\"))\n for x in files_images:\n name = os.path.splitext(os.path.basename(x))[0]\n ids.append(name)\n print(\"names: \", ids)\n return ids",
"def vm_snapshotlist(args):\n snapshot = args.snapshot\n name = args.name\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return",
"def image_list(self):\n return self._image_list",
"def __get_multi_images_ids(self, num_images=0): \n availability_images = imageInstance()\n images = availability_images.get_images()\n images_ids = []\n for image in images:\n if image.type == 'machine':\n images_ids.append( image.id.encode(\"latin-1\") )\n if num_images>1:\n random.shuffle(images_ids)\n return images_ids[:num_images]\n return images_ids",
"def get_ids_detection(self, split):\n if split == 'test': # test set has no json file. Scrape ids from directory.\n file_names = tf.io.gfile.listdir(\n os.path.dirname(self._image_path_100k.format(split, '')))\n image_names = [f[:-4] for f in file_names if f.endswith('.jpg')]\n return set(image_names)\n\n if split not in self._data:\n self.process_json(split)\n return self._data[split].keys()",
"def _get_ids_from_name_public(self, name):\r\n results = self.list_public_images(name=name)\r\n return [result['id'] for result in results]",
"def get_images_since(self, image_id=None, timestamp=None,\n offset=10, limit=0):\n\n print '%s %s %s %s' % (image_id,timestamp,limit,offset)\n\n if image_id is not None:\n\n print 'got image id'\n\n # figure out what the current id is and than grab\n # our sorted set by index assuming that all ids\n # contain an image\n next_id = int(self.rc.get('images:next_id') or 0)\n\n # how far from the end is the id given\n d = next_id - image_id\n start = next_id - d\n end = next_id - d + limit - 1\n\n print 'getting between %s %s' % (start,end)\n\n # starting back where we think this image is to + limit\n ids = self.rc.zrange('images:ids:timestamps',start,end)\n\n print 'got ids: %s' % ids\n\n elif timestamp:\n\n print 'from timestamp: %s' % timestamp\n\n # get ids from our sorted set by it's weight (aka timestamp)\n # TODO: not use inf\n ids = self.rc.zrangebyscore('images:ids:timestamps',\n timestamp,'+inf')\n\n else:\n print 'could not find images'\n ids = []\n\n # page ids\n if offset < len(ids):\n ids = ids[offset:max(len(ids),limit)]\n else:\n ids = []\n\n print 'found ids: %s' % ids\n\n # return images for each ID\n images = map(self._get_from_redis,ids)\n\n # populate image data\n map(self._populate_image_data,images)\n\n return images",
"def get_legacy_image_ids(self, content_retriever):\n pass",
"def get_legacy_image_ids(self, content_retriever):\n pass",
"def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs",
"def get_image_tags(self):\n current_images = self.images()\n tags = {tag: i['Id'] for i in current_images for tag in i['RepoTags']}\n return tags",
"def history(self) -> List[SnapshotLogEntry]:\n return self.metadata.snapshot_log"
] | [
"0.82660115",
"0.6894388",
"0.6246945",
"0.6215981",
"0.61946225",
"0.60906774",
"0.6078066",
"0.607749",
"0.5850587",
"0.5829291",
"0.5715038",
"0.57149994",
"0.5700483",
"0.56884575",
"0.5666648",
"0.5638562",
"0.56358933",
"0.56192404",
"0.55592096",
"0.55305934",
"0.5525555",
"0.5520752",
"0.5501659",
"0.54880667",
"0.5413136",
"0.5403517",
"0.5403517",
"0.53875864",
"0.5386721",
"0.5364545"
] | 0.8464934 | 0 |
Use dictionaries 'cos we'll have to crossreference to get snapshots that go with the AMIs returns list of dictionaries representing images from one region | def getImagesD(region):
images = getImages(region)
imageDicts = []
for im in images:
imageDict = {"name": im.name,
"id": im.id,
"region": im.region.name,
"state": im.state,
"created": im.creationDate,
"type": im.type,
"KEEP": getKeepTag(im),
"name_tag": get_name_tag(im),
"snapshots": getSnapshotsOf(im),
"description": im.description,
"PROD": isProduction(im)
}
imageDicts.append(imageDict)
return imageDicts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts",
"def get_images():\n images = {}\n for k, v in DB.IMAGES.iteritems():\n images[k] = v.__dict__\n return images",
"def images(self) -> dict:\n raise NotImplementedError",
"def getimgs():",
"def images_mapped(self):\n try:\n return dict([x for x in enumerate(self.images())])\n except:\n return None",
"def populate_images(self):\n print \"Populating images info...\"\n images = self.get_all_images()\n for i in images:\n\n associated_snapshots = self.get_snapshots_of(i)\n\n self.spreadsheet[i.id] = dict(name=i.name, Name_tag=self.get_name_tag(i), id=i.id,\n KEEP_tag=self.get_keep_tag(i), PROD_tag=self.is_production(i),\n region=i.region.name,\n created=i.creationDate,\n associated_snapshots=associated_snapshots,\n description=i.description)",
"def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids",
"def getAmisOf(snapshot, images):\n amis = []\n for im in images:\n snapshotsOfThisIm = getSnapshotsOf(im)\n for soti in snapshotsOfThisIm:\n if soti == snapshot.id:\n amis.append(im)\n return amis",
"def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-images option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_images()[\"items\"]:\n image = {\"id\": item[\"id\"]}\n image.update(item[\"properties\"])\n ret[image[\"name\"]] = image\n\n return ret",
"def handle_api_list_images(self, http_context):\n\n command = self.docker + ['images', '--format', '\\'{{json .}}\\'', '--no-trunc', '-a']\n images = []\n for line in subprocess.check_output(command).decode().splitlines():\n image = json.loads(line)\n image['hash'] = image['ID'].split(':')[1][:12]\n images.append(image)\n return images",
"def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res",
"def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds",
"def images_list(self, kwargs=None):\n\n try:\n scode, images = Rest.get('Image')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(images) == 0:\n Console.info(\"No images exist\")\n return\n\n n = 1\n e = {}\n for image in images:\n d = {}\n d['Ip'] = image['Ip']\n d['Id'] = image['Id']\n if image['RepoTags'] == None:\n d['Repository'] = image['RepoDigests'][0]\n else:\n d['Repository'] = image['RepoTags'][0]\n # d['Size'] = image['Size']\n d['Size(GB)'] = round(image['Size'] / float(1 << 30), 2) # Converting the size to GB\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Repository', 'Size(GB)'])))",
"def items(self):\n if self.__has_contents:\n return [dict(zip(['id', 'description', 'size', 'start_time', 'state'],\n [item['SnapshotId'], item['Description'], item['VolumeSize'],\n item['StartTime'], item['State']]))\n for item in self.__response['Snapshots']]\n else:\n return []",
"def get_image_information(client):\n\n pipeline = [{\"$match\": {\"camera_views\": {\"$exists\": 1}}}, {\"$unwind\": {\"path\": \"$camera_views\"}}, {\"$addFields\": {\n \"camera_views.average_linear_distance\": {\n \"$divide\": [\n \"$camera_views.total_linear_distance\",\n \"$camera_views.num_entities\"\n ]\n },\n \"camera_views.average_angular_distance\": {\n \"$divide\": [\n \"$camera_views.total_angular_distance\",\n \"$camera_views.num_entities\"\n ]\n },\n \"camera_views.timestamp\": \"$timestamp\",\n \"camera_views._id\": \"$_id\",\n \"camera_views.database\": client.database.name,\n \"camera_views.collection\": client.name,\n 'camera_views.file_id':\"$camera_views.images.file_id\", #Add the Color image id for downloading and testing\n }}, {\"$replaceRoot\": {\"newRoot\": \"$camera_views\"}}, {\"$project\": {\n \"_id\": 1,\n \"num_entities\": 1,\n \"average_linear_distance\": 1,\n \"average_angular_distance\": 1,\n \"timestamp\": 1,\n \"duplicate\": 1,\n \"database\":1,\n \"collection\":1,\n \"file_id\":{\"$arrayElemAt\":[\"$images.file_id\",0]}, # Only keep the first file id (The Color image)\n }}]\n pprint.pprint(pipeline)\n result = list(client.aggregate(pipeline))\n return result",
"def get_image_collection(images_directory, variant, dimensions):\n \n stains = variant.get(\"values\", [])\n primary_stain = variant.get(\"primary_value\", None) \n\n image_data = []\n\n images_directory_glob = images_directory\n if images_directory_glob[-1] != os.sep:\n images_directory_glob = images_directory_glob + os.sep\n images_directory_glob = images_directory_glob + \"*\"\n\n for image_file in glob.glob(images_directory_glob):\n file_name = os.path.basename(image_file)\n\n image_stain = None\n image_root = file_name\n for stain in stains:\n image_root = image_root.replace(stain, \"\")\n if stain in file_name:\n image_stain = stain\n\n image_dimensions = {}\n\n for key, value in dimensions.iteritems():\n if value[\"type\"] == \"String Match\":\n image_dimensions[key] = \\\n get_dimension_string_match(file_name, value.get(\"data\", []))\n elif value[\"type\"] == \"Date Parse\":\n image_dimensions[key] = get_dimension_date_parse(file_name)\n\n experiment_name = experiment.get_experiment_name(file_name)\n experiment_date = experiment.get_experiment_date(file_name)\n\n seed_source_image = file_name\n if primary_stain not in seed_source_image and primary_stain is not None:\n for stain in stains:\n seed_source_image = seed_source_image.replace(stain, primary_stain)\n \n image_data.append({\n \"file_name\": file_name,\n \"file_root\": image_root,\n \"stain\": image_stain, # TODO: Deprecate\n \"experiment_name\": experiment_name, # TODO: Deprecate\n \"experiment_date\": experiment_date, # TODO: Deprecate\n \"seed_source_image\": seed_source_image,\n \"variant\": image_stain,\n \"dimensions\": image_dimensions\n })\n\n return image_data",
"def list_images():\n return json_response(list_manifests())",
"def get_image_set_for_uid(uid: str):\n images = get_all_image_structs(uid, Config.study_size, is_accumulating=False)\n res = {'images': images}\n return res",
"def detail(self, req):\n params = {\n 'filters': self._get_filters(req),\n 'limit': self._get_limit(req),\n }\n\n if 'marker' in req.str_params:\n params['marker'] = self._get_marker(req)\n\n images = db_api.image_get_all_public(None, **params)\n\n image_dicts = [make_image_dict(i) for i in images]\n return dict(images=image_dicts)",
"def get_image_list(im, dic):\n lst = list(im.getdata())\n tiles = []\n for i in range(len(lst)):\n #print find_similar(lst[i], dic)[random.randrange(10)][1]\n tiles.append(find_similar(lst[i], dic)[random.randrange(10)][1])\n return tiles",
"def get_objects(self, image_np: np.array,\n image: Image) -> Tuple[Dict, object]:\n pass",
"def get_additional_images_downsample(widget) -> Dict[str, str]:\n images = {}\n for layer in widget.viewer.value.layers.selection:\n if layer._source.path is not None:\n images[layer._name] = str(layer._source.path)\n return images",
"def getImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n in_dict = {}\n in_dict[\"name\"] = img.name\n in_dict[\"b64str\"] = img.b64str\n in_dict[\"imgsize\"] = img.imgsize\n in_dict[\"processed\"] = img.processed\n in_dict[\"timestamp\"] = img.timestamp\n return in_dict",
"def getAllForImages(self):\n imageDict = {}\n for id, name in self.getAll().items():\n imageDict[id] = {}\n imageDict[id][\"name\"] = name\n imageDict[id][\"filename\"] = \"The_Steamer_Great_Western_small.jpg\"\n\n return imageDict",
"def loadImagesAvatar(self): \n dictionary = {}\n dictionary[\"body\"] = None\n dictionary[\"shoes\"] = None\n dictionary[\"shirt\"] = None\n dictionary[\"trousers\"] = None\n dictionary[\"skirt\"] = None\n dictionary[\"head\"] = None\n dictionary[\"hair\"] = None\n dictionary[\"mask\"] = None\n return dictionary",
"def test_get_imagelist_inmutable(self):\n images1 = self.mock_master.get_imagelist(self.region1)\n images2 = self.mock_master.get_imagelist(self.region1)\n r2dict = dict((i.id, i) for i in images2)\n self.assertEquals(images1, images2)\n self.assertNotEquals(id(images1), id(images2))\n for image in images1:\n self.assertIn(image.id, r2dict)\n image2 = r2dict[image.id]\n self.assertEquals(image, image2)\n self.assertNotEquals(id(image), id(image2))\n self.assertNotEquals(id(image.user_properties),\n id(image2.user_properties))",
"def get_images(self):\r\n if self.images is None:\r\n self.images = {}\r\n for name, img_num in self.images.iteritems():\r\n if isinstance(img_num, int):\r\n yield (name, img_num)",
"def getPictures (self, list) :\n\n result = []\n for event in list :\n eventEntry = {}\n eventEntry ['id'] = link = event.answer.id\n eventEntry ['time'] = event.timeOf\n eventEntry ['comments'] = event.answer.comments\n eventEntry ['location'] = self.where (event.answer)\n eventEntry ['problem'] = event.answer.survey ['problem_type']\n eventEntry ['pictures'] = self.pic (Picture.objects.filter (answer__id = link))\n result.append (eventEntry)\n \n return result",
"def get_imgid_dict(ann):\n return {item[1][\"file_name\"]: item[0] for item in ann.imgs.items()}",
"def detail(self, req):\n context = req.environ['nova.context']\n filters = self._get_filters(req)\n images = self._image_service.detail(context, filters=filters)\n images = common.limited(images, req)\n builder = self.get_builder(req).build\n return dict(images=[builder(image, detail=True) for image in images])"
] | [
"0.74375165",
"0.669343",
"0.6320971",
"0.6303299",
"0.6287237",
"0.6220304",
"0.6202331",
"0.6175909",
"0.60951483",
"0.6012867",
"0.60106635",
"0.59991914",
"0.59578496",
"0.5940436",
"0.5936577",
"0.5930096",
"0.5909181",
"0.5908506",
"0.5903435",
"0.58975405",
"0.5881977",
"0.58542645",
"0.5824696",
"0.5811506",
"0.58090603",
"0.5804399",
"0.5748864",
"0.5746013",
"0.5739648",
"0.57352364"
] | 0.7583085 | 0 |
return a list of dictionaries representing snapshots from one region | def getSnapshotsD(region):
# Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)
snapshots = getSnapshots(region)
snapshotsDicts = []
ims = getImages(region)
for s in snapshots:
amis = getAmisOf(s, ims)
amiIds = []
amiKeeps = []
if len(amis) == 1:
amiIds = amis[0].id.encode()
amiKeeps = getKeepTag(amis[0])
elif len(amis) == 0:
amiIds = "-------no-AMI-found"
amiKeeps = "-------no-AMI-found"
else:
for a in amis:
amiIds.append(a.id.encode())
amiKeeps.append(getKeepTag(a))
snapshotsDict = {"id": s.id,
"status": s.status,
"region": s.region.name,
"progress": s.progress,
"start_time": s.start_time,
"volume_id": s.volume_id,
"volume_size": s.volume_size,
"KEEP-tag": getKeepTag(s),
"Name": get_name_tag(s),
"AMI(s)": amiIds,
"AMI_KEEP-tags": amiKeeps,
"PROD": isProduction(s),
"Description": s.description
}
snapshotsDicts.append(snapshotsDict)
return snapshotsDicts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots",
"def items(self):\n if self.__has_contents:\n return [dict(zip(['id', 'description', 'size', 'start_time', 'state'],\n [item['SnapshotId'], item['Description'], item['VolumeSize'],\n item['StartTime'], item['State']]))\n for item in self.__response['Snapshots']]\n else:\n return []",
"def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )",
"def getImagesD(region):\n images = getImages(region)\n imageDicts = []\n for im in images:\n imageDict = {\"name\": im.name,\n \"id\": im.id,\n \"region\": im.region.name,\n \"state\": im.state,\n \"created\": im.creationDate,\n \"type\": im.type,\n \"KEEP\": getKeepTag(im),\n \"name_tag\": get_name_tag(im),\n \"snapshots\": getSnapshotsOf(im),\n \"description\": im.description,\n \"PROD\": isProduction(im)\n }\n imageDicts.append(imageDict)\n return imageDicts",
"def get_snapshots(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/snapshots\"\n\n response = self.connector.http_call(\"get\", _url)\n self.snapshots = response.json()",
"def get_snapshots(self) -> SnapshotListing:\n return self.snapshots",
"def vm_snapshotlist(args):\n snapshot = args.snapshot\n name = args.name\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return",
"def get_snapshots(self):\r\n ec2 = self.get_ec2_connection()\r\n rs = ec2.get_all_snapshots()\r\n all_vols = [self.volume_id] + self.past_volume_ids\r\n snaps = []\r\n for snapshot in rs:\r\n if snapshot.volume_id in all_vols:\r\n if snapshot.progress == '100%':\r\n snapshot.date = dateutil.parser.parse(snapshot.start_time)\r\n snapshot.keep = True\r\n snaps.append(snapshot)\r\n snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))\r\n return snaps",
"def generateInfoSnapshots(regions):\n print \"Writing snapshots info to output file %s\" % snapshots_data_output_file\n snapshots = []\n for r in regions:\n snapshots += getSnapshotsD(r)\n print \".\" # feedback for the user\n with open(snapshots_data_output_file, 'w') as f2:\n f2.write(\"SNAPSHOTS\\n\")\n f2.write(\n \"Name\\tsnapshot_id\\tKEEP-tag_of_snapshot\\tKEEP-tag_of_AMI\\tproduction?\\tassociated_AMI\\tstart_time\\tstatus\"\n \"\\tregion\\tprogress\\tassociated_volume\\tvolume_size\\tdescription\\n\\n\")\n for s in snapshots:\n f2.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s['start_time'],\n s['status'], s['region'], s['progress'], s['volume_id'], s['volume_size'], s['Description']))",
"def snapshots_created(self):\n # log.debug(\"Getting snaps created for volume {0}\".format(self.volume_id))\n snaps_info = []\n for snap in self._derived_snapshots:\n snap_info = {}\n try:\n if snap.volume_id == self.volume_id:\n snap.update()\n snap_info['snap_id'] = snap.id\n snap_info['snap_progress'] = snap.progress\n snap_info['snap_status'] = snap.status\n snap_info['snap_desc'] = snap.description\n snaps_info.append(snap_info)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n return snaps_info",
"def list_snapshots(args):\n html_doc = document.Document(get_code(args.file))\n edition, region, snapshots = html_doc.list(date=args.edition, region=args.region)\n print('Snapshots for {:s} {:%B %d, %Y}'.format(region.capitalize(), edition))\n for i in range(len(snapshots)):\n print('({:2d}) {!r:} -'.format(i, snapshots[i][1]) +\n ' {0:%B} {0.day:2}, {0:%Y %l:%M:%S.%f %p}'.format(snapshots[i][0]))",
"def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots",
"def populate_snapshots(self):\n print \"Populating snapshots info...\"\n snapshots = self.get_all_snapshots()\n\n for i in snapshots:\n\n # find the ami id(s) for this snapshot. API allows for multiple even though I don't think there would be\n associated_ami_ids = self.get_amis_of(i.id)\n\n ami_keep_tags = [Ims.spreadsheet[ami_id]['KEEP_tag'] for ami_id in associated_ami_ids]\n\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n ami_KEEP_tag=ami_keep_tags, associated_ami_ids=associated_ami_ids,\n PROD_tag=self.is_production(i), start_time=i.start_time,\n region=i.region.name, associated_volume=i.volume_id,\n volume_size=i.volume_size, description=i.description)",
"def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids",
"def list_snapshots(project):\n data = {constants.PROJECT_PARAMETER: project}\n res = requests.post(_url + \"list_snapshots/\", data=data,\n auth=(_username, _password))\n if res.status_code == 200:\n snapshots = json.loads(res.content)\n table = PrettyTable(field_names=[\"Snapshot\", \"Parent\"])\n for snapshot in snapshots:\n table.add_row(snapshot)\n click.echo(table.get_string())\n else:\n click.echo(res.content)",
"def getContainerSnapshots(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/snapshot' % (node,vmid),None)\n return data",
"def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds",
"def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs",
"def get_content(self):\r\n content = []\r\n for regiongroup in self.region_groups:\r\n for region in regiongroup.get_content():\r\n # Add date, unique_name and project to the metadata\r\n region[0]['date'] = self.extracted_date\r\n region[0]['unique_name'] = self.unique_name\r\n try:\r\n project = os.path.split(\r\n os.path.split(self.unique_name)[0]\r\n )[1]\r\n except IndexError:\r\n project = ''\r\n region[0]['project'] = project\r\n content.append(region)\r\n return content",
"def list_snapshots(self, detail=False, **params):\n url = 'snapshots'\n list_schema = schema.list_snapshots_no_detail\n if detail:\n url += '/detail'\n list_schema = schema.list_snapshots_with_detail\n if params:\n url += '?%s' % urllib.urlencode(params)\n\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(list_schema, resp, body)\n return rest_client.ResponseBody(resp, body)",
"def snapshots(self, owner=None, restorable_by=None):\r\n rs = self.connection.get_all_snapshots(owner=owner,\r\n restorable_by=restorable_by)\r\n mine = []\r\n for snap in rs:\r\n if snap.volume_id == self.id:\r\n mine.append(snap)\r\n return mine",
"def get_snapshots(dataset=''):\n # filter my tags\n return os.listdir(dataset + ZFS_DEFAULT_SNAPSHOT_DIR)",
"def get_snapshots(FIELDS='all'):\n snapinfostr = fork_and_get_output(\"zfs list -t snapshot -H -o {0}\".format(FIELDS).split())\n header = get_zfs_snap_header()\n snapinfo = snapinfostr.splitlines()\n snapobjs = []\n for snapstr in snapinfo:\n snapobjs.append(DataZFS(snapstr, header, 'snapshot'))\n return snapobjs",
"def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name",
"def getVolumesD(region):\n volumes = getVolumes(region)\n instances = getInstancesD(region)\n\n volumesDicts = []\n for v in volumesDicts:\n volumesDict = {\"id\": v.id,\n \"KEEP-tag\": getKeepTag(v),\n \"instance_KEEP-tag\": getKeepTag(getInstanceOf(v)),\n \"instance\": v.attach_data.instance_id,\n \"status\": v.status,\n \"size\": v.size,\n \"create-time\": v.create_time,\n \"region\": v.region.name,\n \"zone\": v.zone,\n \"snapshot_id\": v.snapshot_id,\n \"PROD\": isProduction(v)\n }",
"def derived_snapshots(self):\n start_time = time.time()\n log.debug(\"Getting snaps derived from volume {0}.\".format(self.volume_id))\n derived_snapshots = []\n for snap in self.app.cloud_interface.get_all_snapshots():\n try:\n if snap.volume_id == self.volume_id:\n derived_snapshots.append(snap)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n log.debug(\"Got snaps derived from volume {0} in {1} seconds: {2}\"\n .format(self.volume_id, time.time() - start_time, derived_snapshots))\n return derived_snapshots",
"def list(self, detailed=True, search_opts=None, marker=None, limit=None,\n sort=None):\n resource_type = \"snapshots\"\n url = self._build_list_url(resource_type, detailed=detailed,\n search_opts=search_opts, marker=marker,\n limit=limit, sort=sort)\n return self._list(url, resource_type, limit=limit)",
"def test_aws_service_api_snapshots_get(self):\n pass",
"def perform_snapshot(context, region, installed_region='us-east-1'):\n LOG.info('Reviewing snapshots in region %s', region)\n\n # fetch these, in case we need to figure out what applies to an instance\n configurations = dynamo.list_configurations(context, installed_region)\n LOG.debug('Fetched all possible configuration rules from DynamoDB')\n\n # build a list of any IDs (anywhere) that we should ignore\n ignore_ids = utils.build_ignore_list(configurations)\n\n # setup some lookup tables\n cache_data = utils.build_cache_maps(context, configurations, region, installed_region)\n all_instances = cache_data['instance_id_to_data']\n instance_configs = cache_data['instance_id_to_config']\n volume_snap_recent = cache_data['volume_id_to_most_recent_snapshot_date']\n\n for instance_id in set(all_instances.keys()):\n # before we go do some work\n if timeout_check(context, 'perform_snapshot'):\n break\n\n if instance_id in ignore_ids:\n continue\n\n snapshot_settings = instance_configs[instance_id]\n\n # parse out snapshot settings\n retention, frequency = utils.parse_snapshot_settings(snapshot_settings)\n\n # grab the data about this instance id, if we don't already have it\n instance_data = all_instances[instance_id]\n\n ami_id = instance_data['ImageId']\n LOG.info('Reviewing snapshots in region %s on instance %s', region, instance_id)\n\n for dev in instance_data.get('BlockDeviceMappings', []):\n # before we go make a bunch more API calls\n if timeout_check(context, 'perform_snapshot'):\n break\n\n # we probably should have been using volume keys from one of the\n # caches here, but since we're not, we're going to have to check here too\n LOG.debug('Considering device %s', dev)\n volume_id = dev['Ebs']['VolumeId']\n\n if volume_id in ignore_ids:\n continue\n\n # find snapshots\n recent = volume_snap_recent.get(volume_id)\n now = datetime.datetime.now(dateutil.tz.tzutc())\n\n # snapshot due?\n if should_perform_snapshot(frequency, now, volume_id, recent):\n LOG.debug('Performing snapshot for %s, calculating tags', volume_id)\n else:\n LOG.debug('NOT Performing snapshot for %s', volume_id)\n continue\n\n # perform actual snapshot and create tag: retention + now() as a Y-M-D\n delete_on_dt = now + retention\n delete_on = delete_on_dt.strftime('%Y-%m-%d')\n\n volume_data = utils.get_volume(volume_id, region=region)\n expected_tags = utils.calculate_relevant_tags(\n instance_data.get('Tags', None),\n volume_data.get('Tags', None))\n\n utils.snapshot_and_tag(\n instance_id,\n ami_id,\n volume_id,\n delete_on,\n region,\n additional_tags=expected_tags)",
"def list_snapshots(session, verbose):\n # type: (Session, bool) -> Union[List[str], List[Dict[str,str]]]\n if not session.network:\n raise ValueError(\"Network must be set to list snapshots\")\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_SNAPSHOTS\n )\n return _get_list(session, url_tail, {CoordConstsV2.QP_VERBOSE: verbose})"
] | [
"0.71616983",
"0.68013346",
"0.64756656",
"0.6455287",
"0.6392508",
"0.6385133",
"0.63725513",
"0.6275416",
"0.6235013",
"0.62241733",
"0.6165991",
"0.6158216",
"0.6134359",
"0.605335",
"0.6047018",
"0.6043569",
"0.6014061",
"0.5964335",
"0.5959861",
"0.59526664",
"0.5890748",
"0.58749694",
"0.58680886",
"0.58511454",
"0.58156496",
"0.58014673",
"0.5779456",
"0.57731444",
"0.5749603",
"0.57409245"
] | 0.78463835 | 0 |
return a list of dictionaries representing volumes from one region | def getVolumesD(region):
volumes = getVolumes(region)
instances = getInstancesD(region)
volumesDicts = []
for v in volumesDicts:
volumesDict = {"id": v.id,
"KEEP-tag": getKeepTag(v),
"instance_KEEP-tag": getKeepTag(getInstanceOf(v)),
"instance": v.attach_data.instance_id,
"status": v.status,
"size": v.size,
"create-time": v.create_time,
"region": v.region.name,
"zone": v.zone,
"snapshot_id": v.snapshot_id,
"PROD": isProduction(v)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_volumes(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_volumes = conn.get_all_volumes()\n except boto.exception.EC2ResponseError:\n return [] # This better not fail silently or I'll cut a person.\n return region_volumes",
"def _get_data_volumes(vm_):\n ret = []\n volumes = vm_[\"volumes\"]\n for key, value in volumes.items():\n # Verify the required 'disk_size' property is present in the cloud\n # profile config\n if \"disk_size\" not in volumes[key].keys():\n raise SaltCloudConfigError(\n \"The volume '{}' is missing 'disk_size'\".format(key)\n )\n # Use 'HDD' if no 'disk_type' property is present in cloud profile\n if \"disk_type\" not in volumes[key].keys():\n volumes[key][\"disk_type\"] = \"HDD\"\n\n # Construct volume object and assign to a list.\n volume = Volume(\n name=key,\n size=volumes[key][\"disk_size\"],\n disk_type=volumes[key][\"disk_type\"],\n licence_type=\"OTHER\",\n )\n\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in volumes[key].keys():\n volume.availability_zone = volumes[key][\"disk_availability_zone\"]\n\n ret.append(volume)\n\n return ret",
"def volumes(self) -> dict:\n return self.data[\"volumes\"]",
"def volumes(self):",
"def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))",
"def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()",
"def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})",
"def get_volumes(instance):\n if instance.cloud == 'aws':\n client = boto3.session.Session().client('ec2', instance.region)\n devices = client.describe_instance_attribute(\n InstanceId=instance.id, Attribute='blockDeviceMapping').get('BlockDeviceMappings', [])\n volumes = client.describe_volumes(VolumeIds=[device['Ebs']['VolumeId']\n for device in devices if device.get('Ebs', {}).get('VolumeId')]).get('Volumes', [])\n return {volume['Attachments'][0]['Device']: {'size': volume['Size'], 'volume_type': volume['VolumeType']} for volume in volumes}\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n volumes = {}\n for disk in compute.instances().get(instance=instance.id,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n index = disk['index']\n name = disk['deviceName'] if disk['deviceName'] not in [u'persistent-disk-0', 'boot'] else instance.id\n if 'local-ssd' in disk['deviceName']:\n size = 375.0\n disk_type = 'local-ssd'\n else:\n size = float(disk.get('diskSizeGb', 0.))\n disk_type = 'pd-ssd'\n volumes[index] = {'size': size,\n 'type': disk['type'],\n 'deviceName': disk['deviceName'],\n 'interface': disk['interface'],\n 'diskType': disk_type}\n return volumes\n raise ValueError('Unknown cloud %s' % instance.cloud)",
"def database_volume_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.Volume)\n\n volume_objs = list()\n for volume in query.all():\n nfvi_volume_data = json.loads(volume.nfvi_volume_data)\n nfvi_volume = nfvi.objects.v1.Volume(nfvi_volume_data['uuid'],\n nfvi_volume_data['name'],\n nfvi_volume_data['description'],\n nfvi_volume_data['avail_status'],\n nfvi_volume_data['action'],\n nfvi_volume_data['size_gb'],\n nfvi_volume_data['bootable'],\n nfvi_volume_data['encrypted'],\n nfvi_volume_data['image_uuid'])\n volume_obj = objects.Volume(nfvi_volume)\n volume_objs.append(volume_obj)\n return volume_objs",
"def generateInfoVolumes(regions):\n print \"\\nWriting volumes info to output file %s\" % volumes_data_output_file\n with open(volumes_data_output_file, 'w') as f1:\n f1.write(\"VOLUMES\\n\")\n f1.write(\n \"Name\\tvolume_ID\\tKEEP-tag_of_volume\\tKEEP-tag_of_instance\\tproduction?\\tvolume_attachment_state\\tassociated_instance\\tinstance_state\\tsize\\tcreate_time\\tregion\\tzone\\tassociated_snapshot\\n\\n\")\n for r in regions:\n volumes = getVolumes(r)\n print \".\" # give some feedback to the user\n for v in volumes:\n f1.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size,\n v.create_time, v.region.name, v.zone, v.snapshot_id))",
"def get_volumes(self):\n res = self.get('%s/volumes' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volumes']\n else:\n LOG.error('Get volumes failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def getImagesD(region):\n images = getImages(region)\n imageDicts = []\n for im in images:\n imageDict = {\"name\": im.name,\n \"id\": im.id,\n \"region\": im.region.name,\n \"state\": im.state,\n \"created\": im.creationDate,\n \"type\": im.type,\n \"KEEP\": getKeepTag(im),\n \"name_tag\": get_name_tag(im),\n \"snapshots\": getSnapshotsOf(im),\n \"description\": im.description,\n \"PROD\": isProduction(im)\n }\n imageDicts.append(imageDict)\n return imageDicts",
"def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts",
"def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols",
"def data():\n return volumes_fetchers.get_json_data()",
"def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()",
"def get_volume(vol_dir):\n volume = []\n # Retrieve all the dicom filepaths\n files = get_filepaths(vol_dir)\n \n for slice_nr, dicom_path in enumerate(files):\n ds = pydicom.dcmread(dicom_path)\n img = ds.pixel_array\n \n if slice_nr == 0:\n # Get this on the first slice only\n spacing = ds.PixelSpacing\n spacing.append(ds.SliceThickness)\n spacing = np.asarray(spacing)\n \n # Note: In our case, sequence name contains venc and direction info\n sequence_name = ds.SequenceName\n # print(sequence_name)\n\n volume.append(img)\n volume = np.asarray(volume)\n return volume, spacing, sequence_name",
"def calculateVolumes(data):\n print \"Calculating volumes...\"\n results = {}\n for dataLine in data:\n name = dataLine['name']\n r1 = dataLine['r1']\n r2 = dataLine['r2']\n r3 = dataLine['r3']\n r4 = dataLine['r4']\n t1 = dataLine['t1']\n t2 = dataLine['t2']\n t3 = dataLine['t3']\n volCup = (math.pi/3.0) * t1 * ((r1**2) + (r4**2) - (r1*r4))\n volPeanut = math.pi * (t1 - t2 - t3) * ((r2**2) + (r3**2) - (r2*r3)) / 3.0\n volChoc = volCup - volPeanut\n ratio = volChoc/volPeanut\n print \"Ratio for \" + name + \" is \" + str(ratio)\n results[name] = [r1, volChoc, volPeanut, volCup, ratio]\n return results",
"def list_volumes(self, node=None):\n\n data = self._perform_get(self._get_disk_path(), Disks)\n volumes = [self._to_volume(volume=v, node=node) for v in data]\n return volumes",
"def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs",
"def get_complete_volume_info_all():\n\n return_list = []\n try:\n vl, err = get_basic_volume_info_all()\n if err:\n raise Exception(err)\n # print 'vl is', vl\n\n if vl:\n for vol_info_dict in vl:\n\n rd, err = get_complete_volume_info(\n vol_info_dict['name'], vol_info_dict)\n if err:\n raise Exception(err)\n\n return_list.append(rd)\n\n except Exception, e:\n return None, 'Error getting complete volume information for all volumes: %s' % str(e)\n else:\n return return_list, None",
"def volumes(self):\n return self._volumes",
"def volumes(self, details=True):\n if details:\n vol = _volume.Volume\n else:\n vol = _volume.VolumeDetail\n\n return list(self._list(vol, paginated=False))",
"def get_surfaces_per_volume(my_core, entityset_ranges):\n\n s_p_v = {}\n for volumeset in entityset_ranges['Volumes']:\n s_p_v[volumeset] = my_core.get_child_meshsets(volumeset).size()\n return s_p_v",
"def populate_volumes(self):\n print \"Populating volumes info...\"\n volumes = self.get_all_volumes()\n for i in volumes:\n\n # handle associated instance's KEEP-tag\n associated_instance_id = i.attach_data.instance_id\n\n if associated_instance_id is None: # sometimes there is no attached instance\n instance_keep_tag = \"-------no-instance-found\"\n else:\n instance_keep_tag = Ins.spreadsheet[associated_instance_id]['KEEP_tag']\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n instance_KEEP_tag=instance_keep_tag,\n associated_instance_id=associated_instance_id,\n PROD_tag=self.is_production(i), attachment_state=i.attachment_state(),\n state=i.volume_state(), status=i.status, iops=i.iops, size=i.size,\n created=i.create_time, region=i.region.name)",
"def items(self):\n if self.__has_contents:\n return [dict(zip(['id', 'description', 'size', 'start_time', 'state'],\n [item['SnapshotId'], item['Description'], item['VolumeSize'],\n item['StartTime'], item['State']]))\n for item in self.__response['Snapshots']]\n else:\n return []",
"def get_volume_list():\n return parse_list_output(Popen('cinder list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])",
"def get_volume_snapshots(self, volume):\n LOG.debug('get_volume_snapshot starts')\n pool_name = self.configuration.rbd_pool\n volume_name = 'volume-%s' % encodeutils.safe_encode(volume[\"id\"])\n snaps_on_vol = self._get_volume_snapshots(pool_name, volume_name)\n snapshots = list()\n if snaps_on_vol is not None:\n for snap in snaps_on_vol:\n snap_name = str(snap[\"name\"])\n item = dict()\n if snap_name.startswith(\"snapshot-\"):\n # snapshot directly created on volume.\n item[\"type\"] = \"volume_snap\"\n item[\"uuid\"] = snap_name[len('snapshot-'):]\n elif snap_name.startswith(\"volume-\") and \\\n snap_name.endswith(\".clone_snap\"):\n # snapshot used for create volume on volume.\n item[\"type\"] = \"clone_snap\"\n item[\"uuid\"] = snap_name[len(\"volume-\"):-len(\".clone_snap\")]\n elif snap_name.startswith(\"backup.\") and \".snap.\" in snap_name:\n # snapshot used for backup volume.\n item[\"type\"] = \"backup_snap\"\n item[\"uuid\"] = \\\n snap_name[len(\"backup.\"):snap_name.index(\".snap.\")]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n snapshots.append(item)\n\n LOG.debug('volume snapshots: %s', snapshots)\n LOG.debug('get_volume_snapshots finished.')\n return snapshots",
"def getVolumePoints(minRes, rRes, region):\n\n # when every resolution has the same bndry buffer\n maxDx = (1. + 1.e-8) * lx / float(minRes)\n dr = pecRad / float(rRes)\n\n # shell distances inside dielectric\n rmin = 0.5 * math.sqrt(3.0) * maxDx\n rmax = epsRad - 3.0 * maxDx\n rIn = numpy.arange(rmin, rmax, dr)\n\n # shell distances outside dielectric\n rmin = epsRad + 3.0 * maxDx\n rmax = pecRad - 3.0 * maxDx\n rOut = numpy.arange(rmin, rmax, dr)\n\n if region == \"in\":\n rs = rIn\n elif region == \"out\":\n rs = rOut\n else:\n rs = numpy.concatenate([rIn, rOut])\n\n points = []\n for r in rs:\n dTheta = math.acos(1.0 - 0.5 * (dr / r)**2)\n thetaMin = math.asin(maxDx / r / math.sqrt(2.0))\n thetaMax = math.acos(0.5 * maxDx / r)\n for theta in numpy.arange(thetaMin, thetaMax, dTheta):\n sinTh = math.sin(theta)\n dPhi = dTheta / sinTh\n phiMin = math.asin(0.5 * maxDx / (r * sinTh))\n phiMax = math.acos(0.5 * maxDx / (r * sinTh))\n for phi in numpy.arange(phiMin, phiMax, dPhi):\n points.append([r * math.sin(theta) * math.cos(phi),\n r * math.sin(theta) * math.sin(phi),\n r * math.cos(theta)])\n return points",
"def get_volume_info(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n volume_info_list = []\n for volume in volumes:\n command = 'cinder show %s' % volume['id']\n volume_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n att = volume_info['attachments'].replace(\"'\", \"\\\"\").replace(\n \"u\\\"\", \"\\\"\").replace(\" None,\", \" \\\"None\\\",\")\n volume_info['device'] = json.loads(att)[0]['device']\n volume_info_list.append(volume_info)\n return volume_info_list"
] | [
"0.70386356",
"0.68808",
"0.6829185",
"0.68042403",
"0.6804185",
"0.66357",
"0.65796185",
"0.6541075",
"0.64998627",
"0.6476049",
"0.6467089",
"0.6443325",
"0.641429",
"0.64000684",
"0.62908834",
"0.62458056",
"0.61916316",
"0.6191406",
"0.611974",
"0.6119484",
"0.61047727",
"0.6095844",
"0.6070277",
"0.60485876",
"0.60419893",
"0.6039224",
"0.6025237",
"0.59674895",
"0.59635985",
"0.5837586"
] | 0.77420294 | 0 |
return a list of dictionaries representing instances for one region, will help with volumeinstanceKEEPtag lookup. Maybe. | def getInstancesD(region):
instances = getInstances(region)
instancesDicts = {"id": i.id,
"KEEP-tag": getKeepTag(i),
"instance_type": i.instance_type,
"state": i.state,
"launch_time": i.launch_time,
"security_groups": getGroups(i),
"region": i.region.name,
"PROD": isProduction(i)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances",
"def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv",
"def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name",
"def getVolumesD(region):\n volumes = getVolumes(region)\n instances = getInstancesD(region)\n\n volumesDicts = []\n for v in volumesDicts:\n volumesDict = {\"id\": v.id,\n \"KEEP-tag\": getKeepTag(v),\n \"instance_KEEP-tag\": getKeepTag(getInstanceOf(v)),\n \"instance\": v.attach_data.instance_id,\n \"status\": v.status,\n \"size\": v.size,\n \"create-time\": v.create_time,\n \"region\": v.region.name,\n \"zone\": v.zone,\n \"snapshot_id\": v.snapshot_id,\n \"PROD\": isProduction(v)\n }",
"def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})",
"def tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_))",
"def get_instances(self):\n connection = self.connection\n\n instances = []\n\n connection.row_factory = sqlite3.Row\n cur = connection.cursor()\n cur.execute(\"SELECT * FROM INSTANCES\")\n rows = cur.fetchall()\n columns = [str(i[0]).lower() for i in cur.description]\n for row in rows:\n object = dict(zip(columns, row))\n instances.append(object)\n\n instancesNoneDict = {}\n\n for instance in instances:\n if instance['harvesterid'] not in instancesNoneDict:\n instancesNoneDict[instance['harvesterid']] = {}\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n elif instance['harvesterid'] in instancesNoneDict:\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n if 'none' in instancesNoneDict[instance['harvesterid']]:\n del instancesNoneDict[instance['harvesterid']]['none']\n return instancesNoneDict",
"def getImagesD(region):\n images = getImages(region)\n imageDicts = []\n for im in images:\n imageDict = {\"name\": im.name,\n \"id\": im.id,\n \"region\": im.region.name,\n \"state\": im.state,\n \"created\": im.creationDate,\n \"type\": im.type,\n \"KEEP\": getKeepTag(im),\n \"name_tag\": get_name_tag(im),\n \"snapshots\": getSnapshotsOf(im),\n \"description\": im.description,\n \"PROD\": isProduction(im)\n }\n imageDicts.append(imageDict)\n return imageDicts",
"def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer",
"def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances",
"def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances",
"def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))",
"def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts",
"def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances",
"def get_tags(self) -> Dict:\n return self.orthanc.get_instance_tags(self.identifier)",
"def populate_instances(self):\n print \"Populating instances info...\"\n instances = self.get_all_instances()\n for i in instances:\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n PROD_tag=self.is_production(i), instance_type=i.instance_type,\n state=i.state, launched=i.launch_time, region=i.region.name)",
"def getXeprInstances():\n apilib = _loadapilib()\n instances = _findInst(apilib)\n return dict([(p, t) for p, t in instances])",
"def get_instance_templates(self):\n response = self.call_api('/global/instanceTemplates')\n return {\n template['name']: template for template in response.get('items', [])\n }",
"def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations",
"def aws_get_instances_by_id(region, instance_id, raw=True):\n client = boto3.session.Session().client('ec2', region)\n try:\n matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])\n except ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':\n raise\n return []\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances",
"def ow_instances(ow, ow_stack):\n log.info(\"ow_instances( %s )\", ow_stack)\n try:\n instances = ow.describe_instances(StackId=ow_stack)\n except Exception, e:\n print(e)\n log.info(e)\n sys.exit()\n ow_launch_data = {}\n for instance in instances['Instances']:\n created_at = datetime.datetime.strptime(\n instance['CreatedAt'], '%Y-%m-%dT%H:%M:%S+00:00').strftime('%s')\n ow_launch_data[instance['InstanceId']] = created_at\n log.info(\"instance %s, created %s\", instance, created_at)\n return ow_launch_data",
"def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []",
"def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)",
"def simplified_tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_, params={'simplify': True}))",
"def instances(self):\n return self.get('instances')",
"def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances",
"def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)",
"def ListInstances(self,\n resource_group_name: Optional[str] = None\n ) -> Dict[str, 'AZComputeVirtualMachine']:\n instances = {} # type: Dict[str, AZComputeVirtualMachine]\n az_vm_client = self.compute_client.virtual_machines\n if not resource_group_name:\n responses = common.ExecuteRequest(az_vm_client, 'list_all')\n else:\n responses = common.ExecuteRequest(\n az_vm_client,\n 'list',\n {'resource_group_name': resource_group_name})\n for response in responses:\n for instance in response:\n instances[instance.name] = AZComputeVirtualMachine(\n self.az_account,\n instance.id,\n instance.name,\n instance.location,\n zones=instance.zones)\n return instances",
"def aws_get_instances_by_name(region, name, raw=True):\n return _aws_get_instance_by_tag(region, name, 'tag:Name', raw)",
"def list_ec2(region, filter_by_kwargs):\n conn = boto.ec2.connect_to_region(region)\n instances = conn.get_only_instances()\n return lookup(instances, filter_by=filter_by_kwargs)"
] | [
"0.72672004",
"0.71622515",
"0.68902445",
"0.68787754",
"0.68357",
"0.6603997",
"0.6595504",
"0.6591907",
"0.6584421",
"0.6475704",
"0.6437322",
"0.64160633",
"0.63718474",
"0.63532615",
"0.63506424",
"0.63015544",
"0.62685406",
"0.6161752",
"0.6136149",
"0.61351824",
"0.6126755",
"0.60812336",
"0.60655797",
"0.6022264",
"0.60089386",
"0.6004393",
"0.59942245",
"0.59898496",
"0.589123",
"0.5883361"
] | 0.7885678 | 0 |
retrieve list of AMIs that refer to a given snapshot | def getAmisOf(snapshot, images):
amis = []
for im in images:
snapshotsOfThisIm = getSnapshotsOf(im)
for soti in snapshotsOfThisIm:
if soti == snapshot.id:
amis.append(im)
return amis | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_amis_of(snapshot_id):\n mes_amis = []\n # There has GOT to be a better way. Hmm... maybe not\n keys = Ims.spreadsheet.keys()\n for key in keys:\n if snapshot_id in Ims.spreadsheet[key]['associated_snapshots']:\n mes_amis.append(key)\n return mes_amis",
"def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds",
"def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids",
"def vm_snapshotlist(args):\n snapshot = args.snapshot\n name = args.name\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return",
"def get(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving snapshots\", \"/snapshots\")",
"def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts",
"def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )",
"def get_snap_list(mnode):\n\n ret, out, _ = g.run(mnode, \"gluster snapshot list --xml\")\n if ret != 0:\n g.log.error(\"Failed to execute 'snapshot list' on node %s. \"\n \"Hence failed to get the snapshot list.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster snapshot \"\n \"list xml output.\")\n return None\n\n snap_list = []\n for snap in root.findall(\"snapList/snapshot\"):\n snap_list.append(snap.text)\n\n return snap_list",
"def snapshots(self, owner=None, restorable_by=None):\r\n rs = self.connection.get_all_snapshots(owner=owner,\r\n restorable_by=restorable_by)\r\n mine = []\r\n for snap in rs:\r\n if snap.volume_id == self.id:\r\n mine.append(snap)\r\n return mine",
"def get_snapshot_list(self, base, snappref=\"SPECTRA_\"):\n #print('Looking for spectra in', base)\n powerspectra = FluxPower(maxk=self.max_k)\n for snap in range(30):\n snapdir = os.path.join(base,snappref+str(snap).rjust(3,'0'))\n #We ran out of snapshots\n if not os.path.exists(snapdir):\n snapdir = os.path.join(base,\"PART_\"+str(snap).rjust(3,'0'))\n if not os.path.exists(snapdir):\n snapdir = os.path.join(base, \"snap_\"+str(snap).rjust(3,'0'))\n if not os.path.exists(snapdir):\n continue\n #We have all we need\n if powerspectra.len() == np.size(self.zout):\n break\n try:\n ss = self._get_spectra_snap(snap, base)\n# print('Found spectra in', ss)\n if ss is not None:\n powerspectra.add_snapshot(snap,ss)\n except IOError:\n print(\"Didn't find any spectra because of IOError\")\n continue\n #Make sure we have enough outputs\n if powerspectra.len() != np.size(self.zout):\n raise ValueError(\"Found only\",powerspectra.len(),\"of\",np.size(self.zout),\"from snaps:\",powerspectra.snaps)\n return powerspectra",
"def jail_snapshot_list(jnid = ''):\n jname = jnid\n if 'BASE-' in jnid:\n jnid = '/BASE-RW/%s@' % jnid\n else:\n jnid = '/%s@' % jnid\n \n try:\n jsnap = subprocess.check_output(\"zfs list -t snapshot |grep \"+jnid, shell=True)\n except:\n msg = \" ERROR: No zfs snapshots found for '%s'\" % (jnid)\n log(msg)\n return False\n\n jsnap = jsnap.split('\\n')\n jsnapn = []\n for i in jsnap:\n i = i.split(' ')\n while True:\n try:\n i.remove(\"\")\n except ValueError:\n break\n jsnapn.append(i)\n\n lmen = ['Number', \"'%s' current snapshots\" % jname, 'Size']\n del jsnapn[-1]\n jsn = 0\n jsnn = []\n for i in jsnapn:\n jsnn.append([jsn, i[0], i[3]])\n jsn = jsn + 1\n\n return [jsnn, lmen]",
"def getami(ec2, glob):\n\treturn [\n\t\ti for i in ec2.images.filter(\n\t\t\tFilters=[{'Name': 'name', 'Values': [glob]}]\n\t\t)\n\t]",
"def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots",
"def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res",
"def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs",
"def RetrieveACISA():\n\tdb = DBConnector()\n\tcur = db.cursor()\n\n\tSQLcmd = \"SELECT * FROM snaps.SNAPsLocation\"\n\tcur.execute(SQLcmd)\n\treturnList = []\n\tcount = 0\n\tfor item in cur.fetchall():\n\t\tcount += 1\n\t\ttmplist = [item[1], item[2], count, str(item[0])]\n\t\treturnList.append(tmplist)\n\treturn returnList",
"def items(self):\n if self.__has_contents:\n return [dict(zip(['id', 'description', 'size', 'start_time', 'state'],\n [item['SnapshotId'], item['Description'], item['VolumeSize'],\n item['StartTime'], item['State']]))\n for item in self.__response['Snapshots']]\n else:\n return []",
"def list_snapshots(self, detailed=True):\n aname = \"cinder_v%s.list_snapshots\" % self.version\n with atomic.ActionTimer(self, aname):\n return (self._get_client()\n .volume_snapshots.list(detailed))",
"def get_ami_by_id ( ec2_conn, ami_id ) :\n amis = ec2_conn.get_all_images( image_ids = [ ami_id ] )\n for ami in amis :\n return ami",
"def snapshot_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"snapshot_arns\")",
"def getContainerSnapshots(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/snapshot' % (node,vmid),None)\n return data",
"def get_volume_snapshots(self, volume):\n LOG.debug('get_volume_snapshot starts')\n pool_name = self.configuration.rbd_pool\n volume_name = 'volume-%s' % encodeutils.safe_encode(volume[\"id\"])\n snaps_on_vol = self._get_volume_snapshots(pool_name, volume_name)\n snapshots = list()\n if snaps_on_vol is not None:\n for snap in snaps_on_vol:\n snap_name = str(snap[\"name\"])\n item = dict()\n if snap_name.startswith(\"snapshot-\"):\n # snapshot directly created on volume.\n item[\"type\"] = \"volume_snap\"\n item[\"uuid\"] = snap_name[len('snapshot-'):]\n elif snap_name.startswith(\"volume-\") and \\\n snap_name.endswith(\".clone_snap\"):\n # snapshot used for create volume on volume.\n item[\"type\"] = \"clone_snap\"\n item[\"uuid\"] = snap_name[len(\"volume-\"):-len(\".clone_snap\")]\n elif snap_name.startswith(\"backup.\") and \".snap.\" in snap_name:\n # snapshot used for backup volume.\n item[\"type\"] = \"backup_snap\"\n item[\"uuid\"] = \\\n snap_name[len(\"backup.\"):snap_name.index(\".snap.\")]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n snapshots.append(item)\n\n LOG.debug('volume snapshots: %s', snapshots)\n LOG.debug('get_volume_snapshots finished.')\n return snapshots",
"def get_snapshots(self) -> SnapshotListing:\n return self.snapshots",
"def list_snapshots(session, verbose):\n # type: (Session, bool) -> Union[List[str], List[Dict[str,str]]]\n if not session.network:\n raise ValueError(\"Network must be set to list snapshots\")\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_SNAPSHOTS\n )\n return _get_list(session, url_tail, {CoordConstsV2.QP_VERBOSE: verbose})",
"def share_snapshot_access_get_all_for_snapshot_instance(\n context, snapshot_instance_id, filters=None,\n with_snapshot_access_data=True, session=None):\n session = session or get_session()\n filters = copy.deepcopy(filters) if filters else {}\n filters.update({'share_snapshot_instance_id': snapshot_instance_id})\n\n query = _share_snapshot_instance_access_get_query(context, session)\n\n legal_filter_keys = (\n 'id', 'share_snapshot_instance_id', 'access_id', 'state')\n\n query = exact_filter(\n query, models.ShareSnapshotInstanceAccessMapping, filters,\n legal_filter_keys)\n\n instance_accesses = query.all()\n\n if with_snapshot_access_data:\n instance_accesses = _set_instances_snapshot_access_data(\n context, instance_accesses, session)\n\n return instance_accesses",
"def snapshot_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"snapshot_arns\")",
"def snapshot_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"snapshot_arns\")",
"def list_amis(self):\n images = self._driver.list_images(ex_owner=self.account_id)\n return images",
"def get_snapshot_children(self, snapshot):\n LOG.debug('get_snapshot_children starts.')\n pool_name = self.configuration.rbd_pool\n volume_name = \\\n 'volume-%s' % encodeutils.safe_encode(snapshot[\"volume_id\"])\n snap_name = 'snapshot-%s' % encodeutils.safe_encode(snapshot['id'])\n children = list()\n children_on_snap = \\\n self._get_snapshot_children(pool_name, volume_name, snap_name)\n if children_on_snap is not None:\n for child in children_on_snap:\n item = dict()\n if len(child) == 2:\n item[\"pool_name\"] = child[0]\n item[\"volume_name\"] = child[1]\n if child[1].startswith(\"volume-\"):\n item[\"type\"] = \"volume\"\n item[\"uuid\"] = child[1][len(\"volume-\"):]\n elif uuidutils.is_uuid_like(child[1]):\n item[\"type\"] = \"volume\"\n item[\"uuid\"] = child[1]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n children.append(item)\n\n LOG.debug('snapshot children: %s', children)\n LOG.debug('get_snapshot_children finished.')\n return children",
"def get_ami_by_name ( ec2_conn, ami_name ) :\n amis = ec2_conn.get_all_images( filters = { \"name\": [ ami_name ] } )\n for ami in amis :\n return ami"
] | [
"0.72130966",
"0.6595902",
"0.6541823",
"0.6166669",
"0.61240566",
"0.60049653",
"0.59619623",
"0.5923196",
"0.5922994",
"0.5888076",
"0.5858373",
"0.5827235",
"0.58137566",
"0.580514",
"0.57657516",
"0.57389045",
"0.5632397",
"0.56060064",
"0.5601814",
"0.56017476",
"0.5551599",
"0.5520943",
"0.55089486",
"0.54938936",
"0.5476389",
"0.54459274",
"0.54459274",
"0.541808",
"0.54120713",
"0.54077"
] | 0.77030766 | 0 |
If tag with key='KEEP' exists, return its value (can be an empty string), else it's 'notag' | def getKeepTag(obj):
if 'KEEP' in obj.tags:
return obj.tags['KEEP']
else:
return "-------no-tag"
# try:
# tag = obj.tags['KEEP']
# except:
# # Note: some with empty KEEP-tags, through web console they look the same as those untagged
# return "-----"
# return tag | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_keep_tag(obj):\n if 'KEEP' in obj.tags and len(obj.tags['KEEP'].strip()) != 0:\n return obj.tags['KEEP']\n else:\n return \"-------no-tag\"",
"def tag_word(self, w): \n if self.unknown(w):\n return self.default_tag\n else:\n return max(self.word_tags[w], key=self.word_tags[w].get)",
"def gettag(query, lemmatag=False):\n if lemmatag:\n return lemmatag\n\n tagdict = {'N': 'n',\n 'J': 'a',\n 'V': 'v',\n 'A': 'r',\n 'None': False,\n '': False,\n 'Off': False}\n\n # in case someone compiles the tregex query\n try:\n query = query.pattern\n except AttributeError:\n query = query\n \n\n qr = query.replace(r'\\w', '').replace(r'\\s', '').replace(r'\\b', '')\n firstletter = next((c for c in qr if c.isalpha()), 'n')\n return tagdict.get(firstletter.upper(), 'n')",
"def getStrNo(self, key):\n value = self.getConf(key);\n if value == \"no\":\n return None\n else:\n return value",
"def check_tag(obj, tag_name):\n rfctag = None\n if obj.get('Tags'):\n for tag in obj.get('Tags'):\n if tag.get('Key') == tag_name:\n tag_value = tag.get('Value')\n tag_value = re.sub('[,]', '/', tag_value)\n return tag_value\n continue\n if not rfctag:\n return str(\"no-record\")",
"def salvage_tag_data(tag_text):\n data = process_start_tag(tag_text)\n tag = data[0]\n attributes = data[1]\n # Jloggraph applet data\n if tag == \"param\" and \"name\" in attributes:\n if attributes[\"name\"] == \"table\" and \"value\" in attributes:\n return attributes[\"value\"]\n # Spacegroup\n if tag_is_spacegroup(tag_text):\n return tag_text\n\n # Return an empty string by default\n return \"\"",
"def _issingleton(self, tagname):\n return self.shortempty",
"def tp_key_value(str_tag):\n rgx_split = re.compile(r'[\\@\\(\\)\\{\\}]')\n str_key, str_value = '', ''\n\n # count the pieces\n lst_parts = rgx_split.split(str_tag)\n lng_parts = len(lst_parts)\n\n # and winnow the noise\n if lng_parts > 1:\n str_key = lst_parts[1]\n if lng_parts > 2:\n for str_value in lst_parts[2:]:\n if str_value != '':\n break\n\n return (str_key, str_value)",
"def get_config_value(keyword):\n if g_configs and keyword in g_configs:\n return g_configs[keyword]\n return \"\"",
"def tag_to_wordnet(tag):\n if (tag == 'ADJ'): return('a')\n elif (tag == 'ADV'): return('r')\n elif (tag == 'NOUN'): return('n')\n elif (tag == 'VERB'): return('v')\n else: return None",
"def cypher_unknownTag_keyword(self, variable_tagUnknown=\"tag_unknown\"):\n\n if not self.keyword:\n return \"\"\n return f'({variable_tagUnknown}{self.label}'+ \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\'' + \"})\"",
"def filter_tag(tags=None):\n tagdict = defaultdict(list)\n Besarkecil = lambda f: ' '.join(re.findall('[A-Z][^A-Z]*', f))\n for obj in list(tags):\n if len(obj.split(':')) == 2:\n k, v = obj.split(':')\n # filtering key Besarkecil, lowercase\n k = str(Besarkecil(k)).lower()\n # print(k)\n if k in ['cari', 'jadwal', 'keberangkatan', 'maskapai', 'type', 'ibadah', 'jumlah hari', 'rute', 'tour']:\n res = re.findall(r\"(^[A-Z][^A-Z]+)|([^\\W\\d_]+|[\\d+]+)\", v)\n arres = []\n for resple in res:\n arres.append(filter(None, resple)[0])\n # print([e for e in resple])\n # print(' '.join(arres))\n tagdict[k].append(' '.join(arres))\n return tagdict",
"def get_tag_value_or_none(node, element_name):\n tag_value = node.tags.get(element_name, 'n/a')\n\n if 'n/a' == tag_value:\n return None\n\n return tag_value",
"def filter_tag(tags=None):\n tagdict = defaultdict(list)\n Besarkecil = lambda f: ' '.join(re.findall('[A-Z][^A-Z]*', f))\n for obj in list(tags):\n if len(obj.split(':')) == 2:\n k, v = obj.split(':')\n # filtering key Besarkecil, lowercase\n k = str(Besarkecil(k)).lower()\n # print(k)\n if k in ['cari', 'jadwal', 'keberangkatan', 'maskapai', 'type', 'ibadah', 'jumlah hari', 'rute',\n 'tour']:\n res = re.findall(r\"(^[A-Z][^A-Z]+)|([^\\W\\d_]+|[\\d+]+)\", v)\n arres = []\n for resple in res:\n arres.append(filter(None, resple)[0])\n # print([e for e in resple])\n # print(' '.join(arres))\n tagdict[k].append(' '.join(arres))\n return tagdict",
"def getOptionalTag(node, tag, option=\"\"):\n try:\n return getTag(node, tag)\n except TagError:\n return option",
"def tag_word(self, w):\n return self._default_tag",
"def tag_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tag_value\")",
"def kwextract(s):\n try:\n return strip(s, \"$\").strip().split(\": \")[1]\n except IndexError:\n return \"<unknown>\"",
"def cypher_naTag_keyword(self, variable_tagNA=\"na_tag\"):\n\n if not self.keyword:\n return \"\"\n return f'({variable_tagNA}{self.label}' + \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\"{self.keyword}\"' + \"})\"",
"def gettag(query, lemmatag = False):\n import re\n if lemmatag is False:\n tag = 'n' # same default as wordnet\n # attempt to find tag from tregex query\n tagfinder = re.compile(r'^[^A-Za-z]*([A-Za-z]*)')\n tagchecker = re.compile(r'^[A-Z]{1,4}$')\n treebank_tag = re.findall(tagfinder, query.replace(r'\\w', '').replace(r'\\s', '').replace(r'\\b', ''))\n if re.match(tagchecker, treebank_tag[0]):\n if treebank_tag[0].startswith('J'):\n tag = 'a'\n elif treebank_tag[0].startswith('V') or treebank_tag[0].startswith('M'):\n tag = 'v'\n elif treebank_tag[0].startswith('N'):\n tag = 'n'\n elif treebank_tag[0].startswith('R'):\n tag = 'r'\n elif lemmatag:\n tag = lemmatag\n tagchecker = re.compile(r'^[avrn]$')\n while not re.match(tagchecker, lemmatag):\n time = strftime(\"%H:%M:%S\", localtime())\n selection = raw_input('\\n%s: WordNet POS tag \"%s\" not recognised.\\n It must be:\\n\\n ' \\\n ' a: (adjective)' \\\n ' n: (noun)' \\\n ' r: (adverb)' \\\n ' v: (verb)\\n\\nYour selection: ' % (time, lemmatag))\n lemmatag = selection\n return tag",
"def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"stitch\":\n return \"NewStitch\"\n return key",
"def keep(tag):\n if tag.name != 'span':\n return\n if tag.parent.has_attr('class'):\n for c in tag.parent['class']:\n if 'example' in c:\n return\n\n if tag.has_attr('class'):\n if 'types' in tag['class']:\n if 'customTracks' not in tag['class']:\n return True",
"def _tag_of(entry: _LexiconEntry) -> str:\n return entry[\"tag\"].upper()",
"def get_value(soup, tag, cond, default=None):\r\n ele = soup.find(tag, cond)\r\n if ele:\r\n return ele.text.strip()\r\n return default",
"def __getitem__(self, tag):\n return self.__tags.get(tag.lower(), 0)",
"def word_tag(self, word):\n if word[1] in (\"NN\", \"NNS\", \"NNP\", \"NNPS\"):\n return _wordnet.NOUN\n if word[1] in (\"JJ\", \"JJR\", \"JJS\"):\n return _wordnet.ADJ\n if word[1] in (\"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\"):\n return _wordnet.VERB\n if word[1] in (\"RB\", \"RBR\", \"RBS\"):\n return _wordnet.ADV\n\n return None",
"def mapping(tag):\n\n return gvars.METAINFO['tag_map'][tag.replace('1', '')]",
"def _get_xml_tag(doc):\n tag = type(doc).type_key.split('.')[3]\n tag = convert.str_to_camel_case(tag)\n\n return tag",
"def get_tag_value(\n service: str,\n tags: List[Any],\n tag_key: str,\n) -> str:\n capitalize = capitalize_tag_kv(service)\n matches = [\n t[f\"{'V' if capitalize else 'v'}alue\"]\n for t in tags\n if t[f\"{'K' if capitalize else 'k'}ey\"] == tag_key\n ]\n if len(matches) != 1:\n log_error(\n f\"Oops it looks like we're unable to find a match for tag {tag_key}.\"\n \"Please open an issue to help us get this fixed!\",\n )\n raise Abort()\n\n return matches[0]",
"def tag(self,name):\n return self._tags.get(name,None)"
] | [
"0.80788904",
"0.5658676",
"0.550251",
"0.54886025",
"0.53662133",
"0.53030056",
"0.52832705",
"0.5201634",
"0.51524514",
"0.5135101",
"0.51192683",
"0.5106311",
"0.5085387",
"0.50634223",
"0.50538707",
"0.5035368",
"0.49847323",
"0.4977521",
"0.49762967",
"0.49733838",
"0.49426636",
"0.49417284",
"0.4923227",
"0.49155444",
"0.4910193",
"0.49023953",
"0.48983893",
"0.48889503",
"0.4886947",
"0.48731405"
] | 0.8014554 | 1 |
Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key | def isProduction(obj):
return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tag_key_exists(self, key):\n return key in self.map",
"def hastag(obj, key):\n key = TAG_PREFIX + key\n if not isinstance(obj, unittest.TestCase):\n return hasattr(obj, key)\n tc_method = getattr(obj, obj._testMethodName)\n return hasattr(tc_method, key) or hasattr(obj, key)",
"def is_attribute(tag, kmip_version=None):\n kmip_1_0_attribute_tags = [\n Tags.UNIQUE_IDENTIFIER,\n Tags.NAME,\n Tags.OBJECT_TYPE,\n Tags.CRYPTOGRAPHIC_ALGORITHM,\n Tags.CRYPTOGRAPHIC_LENGTH,\n Tags.CRYPTOGRAPHIC_PARAMETERS,\n Tags.CRYPTOGRAPHIC_DOMAIN_PARAMETERS,\n Tags.CERTIFICATE_TYPE,\n Tags.CERTIFICATE_IDENTIFIER,\n Tags.CERTIFICATE_SUBJECT,\n Tags.CERTIFICATE_ISSUER,\n Tags.DIGEST,\n Tags.OPERATION_POLICY_NAME,\n Tags.CRYPTOGRAPHIC_USAGE_MASK,\n Tags.LEASE_TIME,\n Tags.USAGE_LIMITS,\n Tags.STATE,\n Tags.INITIAL_DATE,\n Tags.ACTIVATION_DATE,\n Tags.PROCESS_START_DATE,\n Tags.PROTECT_STOP_DATE,\n Tags.DEACTIVATION_DATE,\n Tags.DESTROY_DATE,\n Tags.COMPROMISE_OCCURRENCE_DATE,\n Tags.COMPROMISE_DATE,\n Tags.REVOCATION_REASON,\n Tags.ARCHIVE_DATE,\n Tags.OBJECT_GROUP,\n Tags.LINK,\n Tags.APPLICATION_SPECIFIC_INFORMATION,\n Tags.CONTACT_INFORMATION,\n Tags.LAST_CHANGE_DATE,\n Tags.CUSTOM_ATTRIBUTE\n ]\n kmip_1_1_attribute_tags = copy.deepcopy(kmip_1_0_attribute_tags) + [\n Tags.CERTIFICATE_LENGTH,\n Tags.X_509_CERTIFICATE_IDENTIFIER,\n Tags.X_509_CERTIFICATE_SUBJECT,\n Tags.X_509_CERTIFICATE_ISSUER,\n Tags.DIGITAL_SIGNATURE_ALGORITHM,\n Tags.FRESH\n ]\n kmip_1_2_attribute_tags = copy.deepcopy(kmip_1_1_attribute_tags) + [\n Tags.ALTERNATIVE_NAME,\n Tags.KEY_VALUE_PRESENT,\n Tags.KEY_VALUE_LOCATION,\n Tags.ORIGINAL_CREATION_DATE\n ]\n kmip_1_3_attribute_tags = copy.deepcopy(kmip_1_2_attribute_tags) + [\n Tags.RANDOM_NUMBER_GENERATOR\n ]\n kmip_1_4_attribute_tags = copy.deepcopy(kmip_1_3_attribute_tags) + [\n Tags.PKCS12_FRIENDLY_NAME,\n Tags.DESCRIPTION,\n Tags.COMMENT,\n Tags.SENSITIVE,\n Tags.ALWAYS_SENSITIVE,\n Tags.EXTRACTABLE,\n Tags.NEVER_EXTRACTABLE\n ]\n kmip_2_0_attribute_tags = copy.deepcopy(kmip_1_4_attribute_tags) + [\n Tags.CERTIFICATE_SUBJECT_CN,\n Tags.CERTIFICATE_SUBJECT_O,\n Tags.CERTIFICATE_SUBJECT_OU,\n Tags.CERTIFICATE_SUBJECT_EMAIL,\n Tags.CERTIFICATE_SUBJECT_C,\n Tags.CERTIFICATE_SUBJECT_ST,\n Tags.CERTIFICATE_SUBJECT_L,\n Tags.CERTIFICATE_SUBJECT_UID,\n Tags.CERTIFICATE_SUBJECT_SERIAL_NUMBER,\n Tags.CERTIFICATE_SUBJECT_TITLE,\n Tags.CERTIFICATE_SUBJECT_DC,\n Tags.CERTIFICATE_SUBJECT_DN_QUALIFIER,\n Tags.CERTIFICATE_ISSUER_CN,\n Tags.CERTIFICATE_ISSUER_O,\n Tags.CERTIFICATE_ISSUER_OU,\n Tags.CERTIFICATE_ISSUER_EMAIL,\n Tags.CERTIFICATE_ISSUER_C,\n Tags.CERTIFICATE_ISSUER_ST,\n Tags.CERTIFICATE_ISSUER_L,\n Tags.CERTIFICATE_ISSUER_UID,\n Tags.CERTIFICATE_ISSUER_SERIAL_NUMBER,\n Tags.CERTIFICATE_ISSUER_TITLE,\n Tags.CERTIFICATE_ISSUER_DC,\n Tags.CERTIFICATE_ISSUER_DN_QUALIFIER,\n Tags.KEY_FORMAT_TYPE,\n Tags.NIST_KEY_TYPE,\n Tags.OPAQUE_DATA_TYPE,\n Tags.PROTECTION_LEVEL,\n Tags.PROTECTION_PERIOD,\n Tags.PROTECTION_STORAGE_MASK,\n Tags.QUANTUM_SAFE,\n Tags.SHORT_UNIQUE_IDENTIFIER,\n Tags.ATTRIBUTE\n ]\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_IDENTIFIER)\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_SUBJECT)\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_ISSUER)\n kmip_2_0_attribute_tags.remove(Tags.OPERATION_POLICY_NAME)\n kmip_2_0_attribute_tags.remove(Tags.CUSTOM_ATTRIBUTE)\n\n if kmip_version == KMIPVersion.KMIP_1_0:\n return tag in kmip_1_0_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_1:\n return tag in kmip_1_1_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_2:\n return tag in kmip_1_2_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_3:\n return tag in kmip_1_3_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_4:\n return tag in kmip_1_4_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_2_0:\n return tag in kmip_2_0_attribute_tags\n else:\n all_attribute_tags = set(\n kmip_1_0_attribute_tags +\n kmip_1_1_attribute_tags +\n kmip_1_2_attribute_tags +\n kmip_1_3_attribute_tags +\n kmip_1_4_attribute_tags +\n kmip_2_0_attribute_tags\n )\n return tag in all_attribute_tags",
"def tag_dict_contains (self,\r\n tag):\r\n\r\n\r\n\r\n if self.using_database:\r\n aprint('TAGDICT CONTAINS')\r\n value_tuple = (notebookname, tag,)\r\n db_cursor.execute(\"SELECT rowid \"\r\n +\"FROM tags_to_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND tag=?;\",\r\n value_tuple)\r\n try:\r\n return db_cursor.fetchone()[0] # MIGHT BE PROBLEMATIC\r\n except:\r\n return False\r\n\r\n return str(tag) in self.tag_dict",
"def _is_incex_key(self, key, value):\n key_out = ((self.included_attributes and\n (key not in self.included_attributes)) or\n (key in self.excluded_attributes))\n value_out = True\n if isinstance(value, dict):\n for change_key in value:\n if isinstance(value[change_key], dict):\n for key in value[change_key]:\n if ((self.included_attributes and\n (key in self.included_attributes)) or\n (key not in self.excluded_attributes)):\n value_out = False\n return key_out and value_out",
"def prod(environment):\n return environment == 'live' or environment == 'debug' or environment == 'prod'",
"def can_tag(self):\n try:\n self.cork.require(role='beta-archivist')\n return True\n except Exception:\n return False",
"def is_tagged(self, instance_id, tag_name):\n tag_value = self.get_tag_for_instance(instance_id, tag_name)\n if tag_value is not None and tag_value == 'true':\n return True\n else:\n return False",
"def has_attribute(self, key):\n return key in self.__dict",
"def _does_product_contains_given_attributes(self, product, *attrs):\n\n for attribute in list(attrs[0]):\n if not product.get(attribute):\n return False\n\n return True",
"def contains(self, key):\n try:\n self.keyvaluepair_set.get(key=key)\n return True\n except KeyValuePair.DoesNotExist:\n return False",
"def __contains__(self, key):\n return key in self._tagged_values_dict and self._is_visible(key)",
"def __contains__(self, key):\n return key in self._group._opts",
"def has_tag(self, tag):\n return tag in self.tags",
"def has_tag(self, tag):\n return tag in self.tags",
"def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])",
"def has_attr(self, key):\n return key in self.attrs",
"def has(self, key):",
"def __contains__(self, key):\n return key in self._get_storage()",
"def is_tag_available(self, tag):\n return tag in self.available_tags",
"def check_if_app_engine_job(tagkey, tagvalue):\n\n if (tagkey == '@app_engine_flex') and (':' in tagvalue):\n return True\n else:\n return False",
"def IsTagExists(self, ResourceId, TagName):\n\n try:\n if self.Service == 'ec2':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 's3':\n response = self.GetBucketTagging(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagSet']])):\n return True\n elif self.Service == 'lambda':\n response = self.ListTags(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'logs':\n response = self.ListTagsLogGroup(ResourceId)\n if TagName in [x for x in response['tags']]:\n return True\n elif self.Service == 'rds':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'es':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'emr':\n response = self.DescribeCluster(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [Tag for Tag in response['Cluster']['Tags']])):\n return True\n elif self.Service == 'dynamodb':\n response = self.ListTagsOfResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'firehose':\n response = self.ListTagsForDeliveryStream(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'glacier':\n response = self.ListTagsForVault(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'kms':\n response = self.ListResourceTags(ResourceId)\n if TagName in list(map(lambda x: x['TagKey'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'apigateway':\n print('No api to list tags')\n return False\n elif self.Service == 'kinesis':\n response = self.ListTagsForStream(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'cloudtrail':\n response = self.ListTags(ResourceId)\n TagsList = map(lambda RTL: RTL['TagsList'], [RTL for RTL in response['ResourceTagList']])\n for Tags in TagsList:\n for Tag in Tags:\n if Tag['Key'] == 'Channel':\n return True\n elif self.Service == 'sqs':\n response = self.ListTags(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'secretsmanager':\n response = self.DescribeSecret(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'cloudfront':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'efs':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'sagemaker':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'redshift':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'elasticache':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'workspaces':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'ds':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'dax':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'route53':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'directconnect':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'datapipeline':\n response = self.DescribePipelines(ResourceId)\n Tags = list(map(lambda x: x['tags'], [tags for tags in response['pipelineDescriptionList']]))\n for i in Tags:\n for j in i:\n if j['key'] == 'Channel':\n return True\n else:\n raise TagNotSupportedError(self.Service)\n except Exception as e:\n raise e\n\n return False",
"def is_filter_at_key(self, key):\n\n if self.has_key(key):\n attribute_status = getattr(self, key)\n if isinstance(attribute_status, self.__class__):\n return True\n\n return False",
"def has_key(self, key):\n return key in self",
"def _is_env_per_bucket():\n\n buckets = _get_buckets()\n if isinstance(buckets, dict):\n return True\n elif isinstance(buckets, list):\n return False\n else:\n raise ValueError(\"Incorrect s3.buckets type given in config\")",
"def __contains__(self, key):\n if not isinstance(key, pm.general.Attribute):\n return super(ChannelBox, self).__contains__(key)\n else:\n for attr in self:\n if key.longName() == attr.longName():\n return True\n return False",
"def dexists(self, name, key):\n return key in self.db[name]",
"def has_key(self, key):\n return self.__dict__.has_key(key)",
"def has(self, key: str) -> Any:\n return key in self.variables",
"def _is_desired_tag(self, tag):\n if self._tags is None:\n return True\n\n if self._ignore_namespace:\n for desired_tag in self._tags:\n if tag.localname == desired_tag.localname:\n return True\n else:\n for desired_tag in self._tags:\n if tag == desired_tag:\n return True\n\n return False"
] | [
"0.63550603",
"0.60692143",
"0.59001714",
"0.586084",
"0.5793219",
"0.57157636",
"0.56831205",
"0.56796306",
"0.566162",
"0.5619104",
"0.5589269",
"0.5552662",
"0.55072737",
"0.5455847",
"0.5455847",
"0.544153",
"0.5431722",
"0.54038215",
"0.5395981",
"0.5394134",
"0.5378807",
"0.53780603",
"0.5374979",
"0.5373266",
"0.53722644",
"0.53570455",
"0.5352436",
"0.53460425",
"0.5342837",
"0.5340622"
] | 0.7877264 | 0 |
Write volumes to file | def generateInfoVolumes(regions):
print "\nWriting volumes info to output file %s" % volumes_data_output_file
with open(volumes_data_output_file, 'w') as f1:
f1.write("VOLUMES\n")
f1.write(
"Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n")
for r in regions:
volumes = getVolumes(r)
print "." # give some feedback to the user
for v in volumes:
f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size,
v.create_time, v.region.name, v.zone, v.snapshot_id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_inventory_file(inventory_item):\n try:\n with open('inventory', 'w') as file:\n file.write(inventory_item)\n except OSError:\n pass",
"def write(self, filename):\n pass",
"def write(self, filename):\n pass",
"def add_writable_file_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt, # type: Optional[Text]\n tmpdir_prefix # type: Text\n ):\n if self.inplace_update:\n self._add_volume_binding(volume.resolved, volume.target, writable=True)\n else:\n if host_outdir_tgt:\n # shortcut, just copy to the output directory\n # which is already going to be mounted\n log.debug('shutil.copy({}, {})'.format(volume.resolved, host_outdir_tgt))\n shutil.copy(volume.resolved, host_outdir_tgt)\n else:\n log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))\n tmpdir = tempfile.mkdtemp(dir=self.tmpdir)\n file_copy = os.path.join(\n tmpdir, os.path.basename(volume.resolved))\n log.debug('shutil.copy({}, {})'.format(volume.resolved, file_copy))\n shutil.copy(volume.resolved, file_copy)\n self._add_volume_binding(file_copy, volume.target, writable=True)\n ensure_writable(host_outdir_tgt or file_copy)",
"def write(cls, vas):\n with open(Y, 'w') as f_i:\n for items in vas:\n f_i.write('%s ' % items)\n print(\"File written successfully. Check out \\\"output.txt\\\" file\")\n f_i.close()",
"def write_to_file(self, filename: str) -> None:",
"def setupVolumes(volumes: Volumes) -> None:\n volumesList = readProcessJson(\n [\"podman\", \"volume\", \"ls\", \"--format\", \"json\"])\n existingVolumes: Set[str] = set()\n if volumesList:\n for volume in volumesList:\n existingVolumes.add(volume['name'])\n for volume in volumes.values():\n if volume.name not in existingVolumes:\n log.info(f\"Creating volume {volume.name}\")\n execute([\"podman\", \"volume\", \"create\", volume.name])\n if volume.files:\n for file in volume.files:\n path = Path(\"~/.local/share/containers/storage/volumes/\"\n f\"{volume.name}/_data/{file.name}\").expanduser()\n if not path.exists():\n log.info(f\"Writting {path}\")\n path.write_text(file.content)",
"def write_to_file(inventory):\n env = Environment(loader=FileSystemLoader('templates'), trim_blocks=True)\n output_template = env.get_template('output.j2')\n # create and clean an 'outputs' folder\n path = \"./outputs\"\n try:\n shutil.rmtree(path, ignore_errors = True, onerror = None)\n except:\n print('Error while deleting directory')\n os.mkdir(path)\n os.chdir(path)\n for node, node_data in inventory.items():\n if 'outputs' in node_data:\n os.mkdir(node)\n for command, output in node_data['outputs'].items():\n # when creating filenames based on command, swap 'spaces' with 'underscores':\n command = re.sub(r\"\\s\", r\"_\", command)\n open(f\"{node}/{command}.txt\", 'a').write(\n output_template.render(node=node, data=output))\n print(\"\\n\" + f\"Job complete. If data gathering was successful, see 'outputs' directory.\")\n return inventory",
"def volumes(self):",
"def add_volume_info(self, vi):\n vol_num = vi.volume_number\n self.volume_info_dict[vol_num] = vi\n if self.fh:\n self.fh.write(vi.to_string() + \"\\n\")",
"def write_to_disk(self):\n\n\t\t# print \"--------------------------------------------------------WRITING PIECE %r TO DISK\" %self.index\n\t\ttry:\n\t\t\tos.makedirs(PATH)\n\t\texcept:\n\t\t\tpass\n\t\tself.piece_file_name = os.path.join(PATH, self.torrent.name+'.'+'00'+str(self.index))\n\t\t# print \"Saving piece to file name: \", self.piece_file_name\n\t\tpiece_file = open(self.piece_file_name, 'w')\n\t\tpiece_file.write(self.data)\n\t\tpiece_file.close()",
"def write (self, file):\n\t\tfile.write (self.pack ())",
"def write(self, filename, data):\n owner_rw = 0600\n fd = os.open(filename, os.O_WRONLY | os.O_CREAT, owner_rw)\n # In case file existed already with wrong permissions, fix them.\n os.chmod(filename, owner_rw)\n os.write(fd, data)\n os.close(fd)",
"def save(self, filepath: str | Path) -> None:\n extension = Path(filepath).suffix\n if extension.lower() in VIDEO_TYPES:\n video_writer = imageio.get_writer(filepath, macro_block_size=None)\n for slice in self.volume:\n slice = slice.astype(\"uint8\")\n video_writer.append_data(slice)\n video_writer.close()\n elif extension.lower() in IMAGE_TYPES:\n base = Path(filepath).stem\n print(\n \"Saving OCT as sequential slices {}_[1..{}]{}\".format(\n base, len(self.volume), extension\n )\n )\n full_base = Path(filepath).with_suffix(\"\")\n self.volume = np.array(self.volume).astype(\"float64\")\n self.volume *= 255.0 / self.volume.max()\n for index, slice in enumerate(self.volume):\n filename = \"{}_{}{}\".format(full_base, index, extension)\n cv2.imwrite(filename, slice)\n elif extension.lower() == \".npy\":\n np.save(filepath, self.volume)\n else:\n raise NotImplementedError(\n \"Saving with file extension {} not supported\".format(extension)\n )",
"def _write(fdesc, data):\n while data:\n count = os.write(fdesc, data)\n data = data[count:]",
"def write(self, fname):\n pass",
"def add_file_or_directory_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt # type: Optional[Text]\n ):\n if not volume.resolved.startswith(\"_:\"):\n self._add_volume_binding(volume.resolved, volume.target) # this one defaults to read_only",
"def update_volumes():\n print 'do something useful here'",
"def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)",
"def write_vectors(self, filename):\n svu.write_realvectors(self,filename)",
"def writetofile(self,direction,value):\r\n output = str(\"{},{} \\n\".format(direction,value))\r\n self.new_file.write(output)",
"def _save(self, name, content):\n full_path = self.path(name)\n with caches['default'].lock('{}_{}'.format(full_path, 'reader')):\n with caches['default'].lock('{}_{}'.format(full_path, 'writer')):\n if cache.islocked(full_path) is False:\n with cache.lock(full_path):\n cache.set(full_path, 'storage')\n try:\n directory = os.path.dirname(full_path)\n\n # Create any intermediate directories that do not exist.\n if self.__volume.exists(directory) is False:\n try:\n if self.directory_permissions_mode is not None:\n # os.makedirs applies the global umask, so we reset it,\n # for consistency with file_permissions_mode behavior.\n self.volume.makedirs(directory, self.directory_permissions_mode)\n else:\n self.volume.makedirs(directory)\n except FileNotFoundError:\n # There's a race between os.path.exists() and os.makedirs().\n # If os.makedirs() fails with FileNotFoundError, the directory\n # was created concurrently.\n pass\n if not os.path.isdir(directory):\n raise IOError(\"%s exists and is not a directory.\" % directory)\n\n # There's a potential race condition between get_available_name and\n # saving the file; it's possible that two threads might return the\n # same name, at which point all sorts of fun happens. So we need to\n # try to create the file, but if it already exists we have to go back\n # to get_available_name() and try again.\n\n while True:\n try:\n # This file has a file path that we can move.\n if hasattr(content, 'temporary_file_path'):\n file_move_safe(content.temporary_file_path(), full_path)\n\n # This is a normal uploadedfile that we can stream.\n else:\n # The current umask value is masked out by os.open!\n fd = self.__volume.open(full_path, self.OS_OPEN_FLAGS, 0o666)\n _file = None\n try:\n for chunk in content.chunks():\n if _file is None:\n _file = fd.dup()\n _file.write(chunk)\n finally:\n if _file is not None:\n _file.close()\n fd.close()\n except FileExistsError:\n # A new name is needed if the file exists.\n name = self.get_available_name(name)\n full_path = self.path(name)\n else:\n # OK, the file save worked. Break out of the loop.\n break\n\n if self.file_permissions_mode is not None:\n self.__volume.chmod(full_path, self.file_permissions_mode)\n finally:\n cache.delete(full_path)\n # Store filenames with forward slashes, even on Windows.\n return (True, name.replace('\\\\', '/'))\n return (False, cache.get(full_path))",
"def write_data(filepath, container, ind=\"\\t\", is_sui=False, create_dirs=False, print_on_success=True):\n\n if create_dirs:\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n with open(filepath, mode=\"w\", encoding=\"utf8\", newline=\"\\r\\n\") as f:\n\n if not is_sui:\n f.write(\"SiiNunit\\n\")\n f.write(\"{\\n\")\n\n count = len(container)\n for i, unit in enumerate(container):\n _write_unit(f, unit, ind)\n\n if i < count - 1:\n f.write(\"\\n\")\n\n if not is_sui:\n f.write(\"}\\n\")\n\n if print_on_success:\n file_type = \"SUI\" if is_sui else \"SII\"\n lprint(\"I WRITTING %s FILE to: %r\", (file_type, filepath))\n\n return True",
"def flow_write(filename,uv,v=None):\n nBands = 2\n\n if v is None:\n assert(uv.ndim == 3)\n assert(uv.shape[2] == 2)\n u = uv[:,:,0]\n v = uv[:,:,1]\n else:\n u = uv\n\n assert(u.shape == v.shape)\n height,width = u.shape\n f = open(filename,'wb')\n TAG_CHAR = b'PIEH'\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n # arrange into matrix form\n tmp = np.zeros((height, width*nBands))\n tmp[:,np.arange(width)*2] = u\n tmp[:,np.arange(width)*2 + 1] = v\n tmp.astype(np.float32).tofile(f)\n f.close()",
"def write(self, filename):\n bvh_string = self.generate_bvh_string()\n if filename[-4:] == '.bvh':\n filename = filename\n else:\n filename = filename + '.bvh'\n with open(filename, 'w') as outfile:\n outfile.write(bvh_string)",
"def put(self, filename, data, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n file_obj = open(file_path, \"w\")\n file_obj.write(data)",
"def write(self, filename, data):\n raise NotImplementedError",
"def flow_write(filename,uv,v=None):\n nBands = 2\n\n if v is None:\n assert(uv.ndim == 3)\n assert(uv.shape[2] == 2)\n u = uv[:,:,0]\n v = uv[:,:,1]\n else:\n u = uv\n\n assert(u.shape == v.shape)\n height,width = u.shape\n f = open(filename,'wb')\n # write the header\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n # arrange into matrix form\n tmp = np.zeros((height, width*nBands))\n tmp[:,np.arange(width)*2] = u\n tmp[:,np.arange(width)*2 + 1] = v\n tmp.astype(np.float32).tofile(f)\n f.close()",
"def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)",
"def writable(path):"
] | [
"0.62202305",
"0.59282184",
"0.59282184",
"0.5797909",
"0.57411486",
"0.570655",
"0.5696261",
"0.5676645",
"0.5656309",
"0.56229556",
"0.5577266",
"0.5544393",
"0.55409503",
"0.5499961",
"0.5476858",
"0.5472212",
"0.54446274",
"0.5439978",
"0.54305",
"0.54144007",
"0.53972524",
"0.53766423",
"0.53749436",
"0.536948",
"0.53655744",
"0.53638554",
"0.53552955",
"0.5350401",
"0.5343024",
"0.5334971"
] | 0.59575117 | 1 |
Returns the application directory. | def get_appdir():
return APP_PATH | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def app_dir(self):\n return self._app_dir",
"def appdata_dir(self) -> str:\n return os.path.join(self._project_dir, 'appdata')",
"def thisdir():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n return os.path.dirname(__file__)",
"def this_folder():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n return os.path.dirname(__file__)",
"def get_root_directory() -> str:\n return \"{}/../\".format(get_cur_directory(__file__))",
"def root_dir():\r\n return Path(__file__).parent.parent",
"def GetPackageDirectory():\n return os.path.dirname(__file__)",
"def get_app_root():\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n return sys._MEIPASS\n except AttributeError:\n if 'pytest' in sys.modules:\n for arg in reversed(sys.argv):\n path = os.path.realpath(arg.split('::')[0])\n if os.path.exists(path):\n return path if os.path.isdir(path) else os.path.dirname(path)\n else:\n return os.path.dirname(os.path.realpath(sys.argv[0]))",
"def config_directory(self):\n\n return self.get_raw(\"config_directory\")",
"def _get_vispy_app_dir():\n # Define default user directory\n user_dir = os.path.expanduser('~')\n\n # Get system app data dir\n path = None\n if sys.platform.startswith('win'):\n path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')\n path = path1 or path2\n elif sys.platform.startswith('darwin'):\n path = os.path.join(user_dir, 'Library', 'Application Support')\n # On Linux and as fallback\n if not (path and os.path.isdir(path)):\n path = user_dir\n\n # Maybe we should store things local to the executable (in case of a\n # portable distro or a frozen application that wants to be portable)\n prefix = sys.prefix\n if getattr(sys, 'frozen', None): # See application_dir() function\n prefix = os.path.abspath(os.path.dirname(sys.path[0]))\n for reldir in ('settings', '../settings'):\n localpath = os.path.abspath(os.path.join(prefix, reldir))\n if os.path.isdir(localpath):\n try:\n open(os.path.join(localpath, 'test.write'), 'wb').close()\n os.remove(os.path.join(localpath, 'test.write'))\n except IOError:\n pass # We cannot write in this directory\n else:\n path = localpath\n break\n\n # Get path specific for this app\n appname = '.vispy' if path == user_dir else 'vispy'\n path = os.path.join(path, appname)\n return path",
"def get_directory() -> str:\n return directory",
"def root_dir():\n return dirname(dirname(__file__))",
"def get_root_dir():\n return os.path.dirname(os.path.dirname(__file__))",
"def AppPath(self):\n\t\treturn self.acad.Path",
"def rootdir():\n return util.path(__file__).parent.parent.abspath()",
"def path_to_program_dir(self):\n\tpath = sys.argv[0]\n\n\tif not os.path.isdir(path):\n\t path = os.path.dirname(path)\n\n\tif not path: return '.'\n\n\treturn path",
"def get_current_directory():\n\treturn os.path.dirname(os.path.abspath(__file__))",
"def acquire_package_directory():\n top_plugin_dir = os.path.realpath(os.path.join(os.getcwd(),\n os.path.dirname(__file__)))\n expected_package_dir = '/extras/MockApp'\n app_dir = top_plugin_dir + expected_package_dir\n return app_dir",
"def root_dir():\n return os.path.dirname(os.path.realpath(__file__ + '/..'))",
"def root_dir():\n return os.path.dirname(os.path.realpath(__file__ + '/..'))",
"def get_config_dir() -> str:\n # Get the system app configuration standard location\n if 'APPDATA' in os.environ:\n return os.environ['APPDATA']\n elif 'XDG_CONFIG_HOME' in os.environ:\n return os.environ['XDG_CONFIG_HOME']\n else:\n return os.path.join(os.environ['HOME'], '.config')",
"def directory_root():\n\timport os\n\treturn os.path.join(os.path.dirname(__file__), '../..')",
"def dirname(self):\n module_system = str(self.conf.module.__name__).split(\".\")[-1]\n return root_path(module_system, self.conf.name)",
"def program_dir():\n if (Win32() and (hasattr(sys, 'frozen') or imp.is_frozen('__main__'))):\n # running from exe generated by py2exe\n return os.path.dirname(sys.executable)\n else:\n return sys.path[0]\n # return os.path.dirname(os.path.abspath(sys.argv[0]))",
"def configPath(self):\n return os.path.dirname(__file__)",
"def app_package_path(self) -> str:\n return self._app_package_path",
"def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory",
"def app_config_home(self) -> str:\n if self.app_config_has(\"app_config_home_directory\"):\n return self.app_config()[\"app_config_home_directory\"]\n return os.path.join(os.path.expanduser(\"~\"), '.aiscalator')",
"def get_main_dir():\n return os.path.dirname(os.getcwd())",
"def config_dir(self) -> str:\n if not self._config_dir:\n self._config_dir = self._detect_config_dir()\n return self._config_dir"
] | [
"0.8366667",
"0.7430168",
"0.73666745",
"0.7327246",
"0.731593",
"0.73079205",
"0.7295801",
"0.7282597",
"0.7222026",
"0.7218878",
"0.72007245",
"0.7196958",
"0.71940666",
"0.7193182",
"0.71729344",
"0.7168441",
"0.7165291",
"0.7127867",
"0.70903516",
"0.70903516",
"0.708375",
"0.7078099",
"0.7051507",
"0.7051216",
"0.7017435",
"0.7017429",
"0.7015702",
"0.6992589",
"0.69919926",
"0.6990434"
] | 0.8947229 | 0 |
Return the TSV file corresponding to the current annotation level. | def tsv_name():
if PAR['level'] == 1:
return 'col.tsv'
else:
return 'myc.tsv' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tsv_value(self):\n return self.tsv_file.getvalue()",
"def export_tsv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".tsv\",\n filetypes=((\"tab seperated values\", \"*.tsv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile, dialect='excel-tab')\n else:\n raise ExportAborted('Export cancelled by user.')",
"def get_ap_file(self):\n with open(self.trendfile, 'r') as readfile:\n data = json.load(readfile)\n return data['trendtable']",
"def _current_vlog_fn(level):\n return getattr(vlog.Vlog, _LOG_MAPPING[level].__name__)",
"def create_tsv(df, filename=None):\n table = df.to_string()\n lines = table.splitlines()\n index_name = lines.pop(1).strip()\n lines[0] = index_name + lines[0][len(index_name):]\n table = '\\n'.join(lines)\n if filename is not None:\n with open(filename, 'w') as f:\n f.write(table)\n else:\n return table",
"def get_time_trace_file(root_dir, exp_name, plane_num):\n exp_dir = os.path.join(root_dir, exp_name)\n time_trace_dir = os.path.join(exp_dir, 'time_trace')\n plane_string = 'plane{0:02d}'.format(plane_num+1)\n plane_dir = os.path.join(time_trace_dir, plane_string)\n trace_file = os.path.join(plane_dir, 'timetrace_roi.mat')\n return trace_file",
"def gencode_gtf(self):\n return op.join(self.root_dir, \"gencode.annotation.gtf\")",
"def tsv_header(self):\n return self.tsv_lines[0]",
"def _get_ann_file(self):\n prefix = 'instances' if 'test' not in self.image_set else 'image_info'\n return os.path.join(self.data_path, 'annotations',\n prefix + '_' + self.image_set + '.json')",
"def write_files_tsv(md, import_path, target_path, import_type='flat'):\n\n target_path = pathlib.Path(target_path)\n import_path = pathlib.Path(import_path)\n\n if import_type == 'flat':\n md = md # If we implement other import types (e.g., plates), filter md\n else:\n raise ValueError('Currently only \\'flat\\' import types implemented')\n\n target_project = \"Project:name:\" + md.project.astype(str) + \"/\"\n target_dataset = \"Dataset:name:\" + md.dataset.astype(str)\n tsv_target = target_project + target_dataset\n filepath = str(target_path) + \"/\" + md.filename\n df = pd.DataFrame({\"target\": tsv_target, \"path\": filepath})\n files_tsv = df.to_csv(sep='\\t', header=False,\n index=False, quoting=csv.QUOTE_NONE)\n files_tsv_path = import_path / 'files.tsv'\n with open(files_tsv_path, 'w') as f:\n f.write(files_tsv)\n return files_tsv_path",
"def _cmd_export_jtv(args):\n sample_ids = list(map(core.fbase, args.filenames))\n table = export.merge_samples(args.filenames)\n formatter = export.EXPORT_FORMATS[\"jtv\"]\n outheader, outrows = formatter(sample_ids, table)\n write_tsv(args.output, outrows, colnames=outheader)",
"def outputLevelCsv(self):\n # extract level information from result info\n extract_level = []\n extract_level = [item for item in self._result_info if self._result_info[2][0:5]=='LEVEL']\n if extract_level == []:\n print('No Result of LEVEL')\n return None\n # copy need information\n for i, item in enumerate(extract_level):\n self._level_csv_list[i][0] = item[1]\n self._level_csv_list[i][1] = item[2].split('-')[1]\n self._level_csv_list[i][2] = item[2].split('-')[2]\n self._level_csv_list[i][3] = item[4]\n # set csv file name\n csv_file_name = self._filename.rsplit('.', 1)[1] + '.csv'\n # write csv\n with open(csv_file_name, 'w') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerows(self._level_csv_list)",
"def write_tsv(self, filename):\n f = open(filename,'wb')\n wr = csv.writer(f,delimiter='\\t',quoting=csv.QUOTE_ALL)\n colrow = []\n for col in self.cols:\n colrow.append('<undefined>' if len(col) == 0 else unicode(iter(col).next()).encode('unicode-escape'))\n wr.writerow(colrow)\n for row in self.data:\n strrow = []\n for cell in row:\n strrow.append('' if cell is None else unicode(cell).encode('unicode-escape'))\n wr.writerow(strrow)\n f.close()",
"def to_tsv(obj: ConfiguredBaseModel, file: str) -> str:\n\n # Extract headers and rows from object\n if isinstance(obj, Entity):\n headers = obj.dict().keys()\n rows = [list(obj.dict().values())]\n elif isinstance(obj, (AssociationCountList, HistoPheno, Results)):\n if not obj.items:\n headers = get_headers_from_obj(obj)\n rows = []\n else:\n headers = obj.items[0].dict().keys()\n rows = [list(item.dict().values()) for item in obj.items]\n else:\n raise TypeError(FMT_INPUT_ERROR_MSG)\n\n fh = open(file, \"w\") if file else sys.stdout\n writer = csv.writer(fh, delimiter=\"\\t\")\n writer.writerow(headers)\n for row in rows:\n writer.writerow(list(row))\n if file:\n fh.close()\n console.print(f\"\\nOutput written to {file}\\n\")\n\n return",
"def annotate_tsv_freq(in_tsv_gz,annotation_tsv):\n sys.stderr.write(\"Reading TSV file ...\\n\")\n nicollo = pd.read_csv(BOLLI, sep=\"\\t\")\n nicollo = nicollo.iloc[:,[1,2,4,5,23]]\n nicollo_counts = nicollo.groupby(['CHR','START'])['MT'].count()\n nol_var = nicollo.drop(['WT','MT'], axis = 1) \n nol_var = nol_var.set_index(['CHR', 'START'])\n\n #nicollo_counts = nicollo.groupby([\"CHR\",\"START\",\"WT\",\"MT\"]).size().reset_index(name=\"count\")\n #nicollo_counts = nicollo_counts[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n\n mmrf = pd.read_csv('/ifs/res/leukgen/home/yellapav/MMRF/MMRF_CoMMpass_IA9_All_Canonical_Variants.txt', sep=\"\\t\")\n mmrf=mmrf.iloc[:,[0,1,2,4,5,19,23]]\n mmrf=mmrf.drop_duplicates()\n\n mmrfM=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].median()\n mmrfC=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].count()\n mmrfQ25=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.25)\n mmrfQ75=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.75)\n \n\n #anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\")\n anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\", low_memory=False)\n #anno_tsv[anno_tsv['FILTER'] == \"PASS\"]\n counts_tsv=anno_tsv.groupby([\"CHR\",\"START\",\"REF\",\"ALT\"]).size().reset_index(name=\"count\")\n counts_tsv=counts_tsv[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n counts_median=anno_tsv.groupby(['CHR','START'])['TARGET_VAF'].median()\n\n\n\n inFile = gzip.open(in_tsv_gz,'r')\n \n sys.stderr.write(\"Annotating ...\\n\")\n for record in inFile:\n record=record.decode(\"utf-8\")\n record=record.rstrip()\n recArr=record.split(\"\\t\")\n \n cl = [] \n freq = [] \n medVAF = [] \n Q25 = [] \n Q75 = [] \n positions = [] \n normal = \"0\" \n normalVAF = \"0\" \n bolli_cl = [] \n bolli_freq = [] \n bolli_positions = [] \n bolli_anno = [] \n flag = 0\n bolli_flag = 0\n if record.startswith(\"#\"):\n continue\n\n if recArr[0] == \"ID_VARIANT\":\n cl = \"MMRF_Class\"\n freq = \"MMRF_Frequency\"\n medVAF = \"MMRF_VAF\"\n Q25 = \"MMRF_Q25\"\n Q75 = \"MMRF_Q75\"\n positions = \"MMRF_Positions\"\n normal = \"Normals_Frequency\"\n normalVAF = \"Normals_median_VAF\"\n bolli_cl = \"Bolli_Class\"\n bolli_freq = \"Bolli_Frequency\"\n bolli_positions = \"Bolli_Positions\"\n bolli_anno = \"Bolli_Annotation\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions, bolli_cl, bolli_freq, bolli_anno, bolli_positions, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n continue\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in mmrfC.index:\n cl = \"genomic_exact\"\n freq = str(mmrfC.loc[(chrom,pos)])\n medVAF = str(mmrfM.loc[(chrom,pos)])\n Q25 = str(mmrfQ25.loc[(chrom,pos)])\n Q75 = str(mmrfQ75.loc[(chrom,pos)])\n positions = str(pos)\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n flag = 1\n if flag == 0:\n mmrfCsub=mmrfC.loc[chrom]\n if not mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].empty:\n for i in mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].index.values:\n cl = \"genomic_close\"\n freq.append(str(mmrfC.loc[(chrom,i)]))\n medVAF.append(str(mmrfM.loc[(chrom,i)]))\n Q25.append(str(mmrfQ25.loc[(chrom,i)]))\n Q75.append(str(mmrfQ75.loc[(chrom,i)]))\n positions.append(str(i))\n freq = (\":\".join(freq))\n medVAF = (\":\".join(medVAF))\n Q25 = (\":\".join(Q25))\n Q75 = (\":\".join(Q75))\n positions = (\":\".join(positions))\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n else:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in nicollo_counts.index:\n bolli_cl = \"genomic_exact\"\n bolli_freq = str(nicollo_counts.loc[(chrom,pos)]) \n bolli_positions = str(pos)\n bolli_anno = str(nol_var.loc[chrom, pos]['Variant_class'].values[0])\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n bolli_flag = 1\n\n\n if bolli_flag == 0: \n nicollo_counts_sub=nicollo_counts.loc[chrom]\n if not nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].empty:\n for i in nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].index.values:\n #if not nicollo_counts_sub.ix[start:end].empty:\n # for i in nicollo_counts_sub.ix[start:end].index.values:\n #print(\"XXXXXXX\",i, nicollo_counts_sub.loc[(chrom,i)], start, end)\n bolli_cl = \"genomic_close\"\n bolli_freq.append(str(nicollo_counts.loc[(chrom,i)]))\n bolli_anno.append(str(nol_var.loc[(chrom,i)]['Variant_class'].values[0]))\n bolli_positions.append(str(i))\n bolli_freq = (\":\".join(bolli_freq))\n bolli_positions = (\":\".join(bolli_positions))\n bolli_anno = (\":\".join(bolli_anno))\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n else:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_positions = \"NA\"\n bolli_anno = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_anno = \"NA\"\n bolli_positions = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n normal = \"0\"\n normalVAF = \"0\"\n try:\n chrom=str(recArr[3])\n pos=int(recArr[4])\n normal = counts_tsv.loc[(chrom,pos),\"count\"]\n normal = normal.ix[0]\n normal = str(normal)\n\n normalVAF = str(counts_median.loc[(chrom,pos)])\n\n record = [ record, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n\n except:\n normal = \"0\"\n normalVAF = \"0\"\n record = [ record, str(normal), str(normalVAF) ]\n record = (\"\\t\".join(record))\n print(record)",
"def get_filepath(self, **kwargs) -> str:\n return f'dag_{self.dag_id}.py'",
"def exportVTK(self, fname):\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n sWrite = vtk.vtkXMLStructuredGridWriter()\r\n sWrite.SetInputData(self.Grid)\r\n sWrite.SetFileName(filename + \".vts\")\r\n sWrite.Write()\r\n elif self.GridType == \"vtkUnstructuredGrid\":\r\n sWrite = vtk.vtkXMLUnstructuredGridWriter()\r\n sWrite.SetInputData(self.Grid)\r\n sWrite.SetFileName(filename + \".vtu\")\r\n sWrite.Write()\r\n else:\r\n print(\"Grid type is not recognized\")",
"def collapsed_to_sirv_abundance(self):\n return op.join(self.collapse_to_sirv_dir, \"touse.abundance.txt\")",
"def _original_vlog_fn(level):\n return _LOG_MAPPING[level]",
"def read_tsv(path):\n return pd.read_csv(path, sep=\"\\t\", index_col=0)",
"def write_tsv(df, path):\n df.to_csv(path, sep=\"\\t\", compression=\"gzip\")",
"def get_level(self, level):\n return",
"def get_tag(level: int) -> str:\n return LEVEL_TAGS[level]",
"def to_tsv(self, out_dir, sep=\"\\t\", prefix=None, **kwargs):\n os.makedirs(out_dir, exist_ok=True) # create dirs if non-existent\n prefix = f\"{prefix}_\" if prefix else \"\"\n fpaths = [\n os.path.join(out_dir, f\"{prefix}{suf}.tsv\")\n for suf in [\"data\", \"sample_meta\"]\n ]\n self.data.to_csv(fpaths[0], sep=\"\\t\", **kwargs)\n self.sample_meta.to_csv(fpaths[1], sep=\"\\t\", **kwargs)",
"def print_tsv(data, filename):\n with open(filename, 'wt') as fout:\n writefile = partial(print, sep='\\t', file=fout)\n writefile('Sample', *expected_header)\n for sample in data:\n for entry in data[sample]:\n writefile(sample, *(entry[field] for field in expected_header))",
"def convert_tracefilename(self, filepath):\n filename, extension = os.path.splitext(os.path.basename(filepath))\n return filename + '.csv'",
"def kmer_vector2tsv_file(filename, kmer_vector, min_length, max_length,\n enable_gzip=False):\n try:\n fh = gzip.open if enable_gzip else open\n with fh(filename, 'wt') as out:\n for index, count in enumerate(kmer_vector):\n seq = number2multisize_patten(index, min_length, max_length)\n out.write('{seq}\\t{count}\\n'.format(seq=seq,\n count=count))\n return filename\n except Exception:\n print('Not able to create [%s]\\n' % filename)\n raise",
"def _tf(file_path):\n return os.path.join(test_pipeline_dir, file_path)",
"def save_tsv_file(parsed_data):\n result_file.write('\\t'.join(parsed_data) + '\\n')",
"def getSceneFile(self):\n logger.debug(\"Func: getSceneFile\")\n return -1"
] | [
"0.5939053",
"0.5729971",
"0.54536223",
"0.51073116",
"0.5047787",
"0.5043582",
"0.50271034",
"0.5015402",
"0.48891845",
"0.48566693",
"0.48362452",
"0.48115683",
"0.4806834",
"0.4803698",
"0.48016763",
"0.47835353",
"0.47271547",
"0.47254357",
"0.47245374",
"0.47214988",
"0.46874157",
"0.46772584",
"0.46699622",
"0.46611837",
"0.4651956",
"0.46019983",
"0.45954007",
"0.45713425",
"0.45518386",
"0.45490912"
] | 0.6666001 | 0 |
Indicate whether the current level is level 1 (colonization). | def colonization():
return get('level') == 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_single_level(self):\n return self.fragments_tree.height <= 2",
"def is_flat(self):\n if self.master:\n return self.master.is_flat\n\n return len(self.levels) == 1",
"def is_top_level(self) -> bool:\n return self._indent == ''",
"def top_left_dot(self) -> bool:\n return bool(self._getindicator(1))",
"def master(self):\n return self.depth == 0",
"def unnecessary_colon(self):\n if self.line.endswith(':'):\n return True",
"def first_level_text_is_displayed(self):\n first_level_text = self.driver.find_element_by_name(self.FIRST_LEVEL_TEXT_NAME)\n return first_level_text.is_displayed()",
"def is_multi_level(self):\n return self.is_flag_set(StatefulParser.FLAGS.MULTI_LEVEL)",
"def is_1DNN(self):\n if not self.dims == 1:\n return False\n if not set(self.__m__.keys()) <= set(((0,),(1,),(-1,))):\n return False\n\n return True",
"def node_leaf(self):\r\n return self.zero_son is None and self.one_son is None",
"def has_leaf(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"has_leaf\"))\r\n return self._hvac_mode == \"eco\"",
"def isSetInitialLevel(self):\n return _libsbml.QualitativeSpecies_isSetInitialLevel(self)",
"def isAutoLevel(self):\n return self.getAutoLevelFunction() is not None",
"def path_is_base(self, path):\n\n return path is not None and len(path) == len(self.levels)",
"def is_core(self):\n #core_stems = (\n # 'Algebra','Geometry','Precalculus','Calculus',\n # 'Biology','Chemistry','Physics','Living Environment','Global Environment','Scientific Literacy',\n # 'History','Economics',\n # 'Literature','Language','Writing','AP','Sem',\n # 'Korean',\n # )\n #core = False\n #for stem in core_stems:\n # if stem in self.title:\n # core = True\n \n return self.level>0",
"def is_up(self):\n \n return self.is_level('up')",
"def is_leaf(self) -> bool:\n return self.data_bytes[0] == ProofPath._KeyPrefix.LEAF",
"def is_lattice(self):\n return hasattr(self,\"uc\") and len(self.maximals())==1 and type(self.get_meet())!=str and type(self.get_join())!=str",
"def is_diagonal(self):\n return self.is_upper() and self.is_lower()",
"def not_known_depth_header(pair):\n _, parent = pair\n return (not parent.title or\n not title_label_pair(\n parent.title, self.appendix_letter, self.part))",
"def bottom_left_dot(self) -> bool:\n return bool(self._getindicator(2))",
"def is_root(self):\n return self.root in [-1, self]",
"def is_leaf(self):\n return len(self.blocks) == 0",
"def is_leaf(self):\r\n return self.num_children() == 0",
"def tree_has_single_path(self, node):\n num_children = len(node.children)\n if num_children > 1:\n return False\n elif num_children == 0:\n return True\n else:\n return True and self.tree_has_single_path(node.children[0])",
"def is_root(self):\n return self.parent == None",
"def is_root(self):\n return self.parent_id is None",
"def is_leaf(self):\n return len(self.child_list) == 0",
"def is_root(self):\n return \"/\" == self.h5_path",
"def has_parent_key(self):\n if self.is_root():\n return False\n try:\n self.parent_key()\n return True\n except ParseException:\n return False"
] | [
"0.6308522",
"0.60663605",
"0.592682",
"0.57595533",
"0.5757749",
"0.56357706",
"0.56121695",
"0.55404365",
"0.5528773",
"0.55099",
"0.55035543",
"0.54934734",
"0.5432446",
"0.5344056",
"0.53366804",
"0.53177035",
"0.5317035",
"0.5309982",
"0.52847475",
"0.52829605",
"0.52761316",
"0.5269463",
"0.5268106",
"0.5261028",
"0.5255746",
"0.5245973",
"0.52141136",
"0.5199503",
"0.51957625",
"0.5194776"
] | 0.7897029 | 0 |
Indicate whether the current level is level 2 (AM fungal structures). | def intra_struct():
return get('level') == 2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_subgroup(self, right):\n if right.level() == 1:\n return True\n if is_Gamma0(right):\n return self.level() % right.level() == 0\n if is_Gamma1(right):\n if right.level() >= 3:\n return False\n elif right.level() == 2:\n return self.level() == 2\n # case level 1 dealt with above\n else:\n return GammaH_class.is_subgroup(self, right)",
"def is_single_level(self):\n return self.fragments_tree.height <= 2",
"def is_multi_level(self):\n return self.is_flag_set(StatefulParser.FLAGS.MULTI_LEVEL)",
"def is_2d(self) -> bool:\n return self.layers == 1 and self.times == 1",
"def _contains_sl2(self, a,b,c,d):\n return (c % self.level() == 0)",
"def even(self):\n return self._ % 2 == 0",
"def has_2D(self):\n\t\tif self.have_fastas is False:\n\t\t\tself._extract_fastas_from_fast5()\n\t\t\tself.have_fastas = True\n\n\t\tif self.fastas.get('twodirections') is not None:\n\t\t\treturn True\n\t\treturn False",
"def cat_l2_supported():\n return common.CAT_L2_CAP in SYSTEM_CAPS",
"def colonization():\n\n return get('level') == 1",
"def has_action2(self, feature):\n return feature in self._action2",
"def check_lighting_state_room2():\n if timer_lights_on_off_room2() == room2_lux():\n pass\n else:\n light_room2(timer_lights_on_off_room1())",
"def level(self):\n return self.init_v[2]",
"def is_depth(self):\n return self._is_depth",
"def isSecond(self):\n return _libsbml.Unit_isSecond(self)",
"def test_build19_level2_additions():\n f = Level2File(get_test_data('Level2_KDDC_20200823_204121.ar2v'))\n assert f.vcp_info.vcp_version == 1\n assert f.sweeps[0][0].header.az_spacing == 0.5",
"def hasTwoSons(self):\n \n return self._leftSon is not None and self._rightSon is not None",
"def isSetBindingSite2(self):\n return _libsbml.InSpeciesTypeBond_isSetBindingSite2(self)",
"def checkL3v2Compatibility(self):\n return _libsbml.SBMLDocument_checkL3v2Compatibility(self)",
"def level_unlocked(self) -> bool:\r\n return self.player_profile.is_level_unlocked(self.level_num)",
"def requires_2sa(self):\n return (\n self.data.get(\"hsaChallengeRequired\", False)\n and self.data[\"dsInfo\"].get(\"hsaVersion\", 0) >= 1\n )\n # FIXME: Implement 2FA for hsaVersion == 2 # pylint: disable=fixme",
"def isAutoLevel(self):\n return self.getAutoLevelFunction() is not None",
"def is_even(self):\n return True",
"def is_crossing_len2(self, gp: GriddedPerm) -> bool:\n return (\n len(gp) == 2\n and gp.occupies(self.first_cell)\n and gp.occupies(self.second_cell)\n )",
"def is_level(self, state):\n \n logging.info('checking state '+state+' against self '+str(self.state))\n result = False\n if('up' == state):\n result = (self.state == 255)\n elif('down' == state):\n result = (self.state == 0)\n elif(state.isdigit()):\n state = int(state)\n result = (abs(self.state - int(255*state/100)) < 2)\n return result",
"def has_other_half(self) -> bool:\n return self.layout in (\n \"flip\",\n \"split\",\n \"transform\",\n \"meld\",\n \"aftermath\",\n \"adventure\",\n \"modal_dfc\",\n )",
"def has_stereo(gra):\n return bool(atom_stereo_keys(gra) or bond_stereo_keys(gra))",
"def check_version_2(dataset):\n\n if float(dataset.get('version')) >= 2.0 \\\n if dataset.get('version') else False:\n return True\n else:\n return False",
"def is_i2s_enabled(self):\n return ((self.get_control() & CONTROL_ENABLE) > 0)",
"def isPower2(num):\n\treturn ((num & (num - 1)) == 0) and num > 0",
"def isPower2(num):\n\treturn ((num & (num - 1)) == 0) and num > 0"
] | [
"0.6321547",
"0.62854135",
"0.62632716",
"0.6087753",
"0.59138495",
"0.5752362",
"0.5700526",
"0.56212634",
"0.55640566",
"0.55273724",
"0.5408885",
"0.5408016",
"0.53746355",
"0.53725296",
"0.5317222",
"0.5312037",
"0.5294672",
"0.52933645",
"0.52883124",
"0.5273141",
"0.5252779",
"0.5240626",
"0.5239407",
"0.5237722",
"0.5227749",
"0.52274907",
"0.5226634",
"0.52169967",
"0.5210894",
"0.5210894"
] | 0.66111106 | 0 |
Builds AMFinder commandline parser. | def build_arg_parser():
main = ArgumentParser(description='AMFinder command-line arguments.',
allow_abbrev=False,
formatter_class=RawTextHelpFormatter)
subparsers = main.add_subparsers(dest='run_mode', required=True,
help='action to be performed.')
_ = training_subparser(subparsers)
_ = prediction_subparser(subparsers)
_ = diagnostic_subparser(subparsers)
return main | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_parser(self, parser: ArgumentParser) -> None:",
"def create_parser() -> configargparse.ArgParser:\n parser = configargparse.ArgParser(default_config_files=[\n \"/etc/lookout/analyzer.conf\", \"~/.config/lookout/analyzer.conf\"],\n formatter_class=ArgumentDefaultsHelpFormatterNoNone,\n auto_env_var_prefix=\"lookout_\")\n slogging.add_logging_args(parser)\n subparsers = parser.add_subparsers(help=\"Commands\", dest=\"command\")\n\n def add_parser(name, help):\n return subparsers.add_parser(\n name, help=help, formatter_class=ArgumentDefaultsHelpFormatterNoNone)\n\n list_parser = add_parser(\"list\", \"Print globally available analyzers.\")\n list_parser.set_defaults(handler=list_analyzers)\n\n run_parser = add_parser(\n \"run\", \"Launch a new service with the specified (one or more) analyzers.\")\n run_parser.set_defaults(handler=run_analyzers)\n add_analyzer_arg(run_parser)\n run_parser.add(\"-c\", \"--config\", is_config_file=True,\n help=\"Path to the configuration file with option defaults.\")\n run_parser.add(\"-s\", \"--server\", required=True,\n help=\"Lookout server address, e.g. localhost:1234.\")\n run_parser.add(\"-w\", \"--workers\", type=int, default=1,\n help=\"Number of threads which process Lookout events.\")\n add_model_repository_args(run_parser)\n run_parser.add_argument(\"--request-server\", default=\"auto\",\n help=\"Address of the data retrieval service. \\\"same\\\" means --server.\")\n\n init_parser = add_parser(\"init\", \"Initialize the model repository.\")\n init_parser.set_defaults(handler=init_repo)\n add_model_repository_args(init_parser)\n\n tool_parser = add_parser(\"tool\", \"Invoke the tooling of a given analyzer.\")\n tool_parser.set_defaults(handler=run_analyzer_tool)\n tool_parser.add(\"analyzer\", help=\"Fully qualified package name with an analyzer.\")\n tool_parser.add(\"args\", nargs=argparse.REMAINDER)\n\n package_parser = add_parser(\n \"package\",\n \"Package several analyzers to a Docker container and write a sample Docker Compose config \"\n \"for Lookout.\")\n package_parser.set_defaults(handler=package_cmdline_entry)\n add_analyzer_arg(package_parser)\n package_parser.add(\"-w\", \"--workdir\", help=\"Generate files in this directory.\",\n default=tempfile.mkdtemp(prefix=\"lookout_package_\"))\n package_parser.add(\"--requirements\", help=\"Path to a custom requirements.txt\")\n package_parser.add(\"-r\", \"--repo\", help=\"GitHub repository name to watch. \"\n \"Example: \\\"src-d/lookout\\\".\",\n required=True)\n package_parser.add(\"-u\", \"--user\", help=\"GitHub user name which will send review comments.\",\n required=True)\n paturl = \"https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/\" # noqa\n package_parser.add(\"-t\", \"--token\", help=\"GitHub token for -u/--user. See \" + paturl,\n required=True)\n package_parser.add(\"-y\", \"--yes\", help=\"Run the commands in the end.\",\n action=\"store_true\")\n package_parser.add(\"-n\", \"--no\", help=\"Do not run the commands in the end.\",\n action=\"store_true\")\n return parser",
"def build_parser(self):\n parser = argparse.ArgumentParser(\n description=\"Run Crystal Matching algorithm attempting to translate co-ordinates \"\n \"on an input image to the coordinate-space of an output image while \"\n \"accounting for possible movement of crystals in the sample.\")\n\n if sys.version_info[0] < 3:\n parser.add_argument('Formulatrix_image',\n metavar=\"Formulatrix_image_path\",\n type=file,\n help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '\n 'this image.')\n else:\n parser.add_argument('Formulatrix_image',\n metavar=\"Formulatrix_image_path\",\n type=argparse.FileType('r'),\n help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '\n 'this image.')\n parser.add_argument('beamline_stack_path',\n metavar=\"beamline_stack_path\",\n help=\"A path pointing at a directory which stores images to be stacked or a path to a stacked image.\")\n parser.add_argument('selected_points',\n metavar=\"x,y\",\n nargs='*',\n help=\"Comma-separated co-ordinates of selected points to be translated from the marked image \"\n \"to the target image.\")\n parser.add_argument('-o','--output',\n metavar=\"focused_image_path\",\n help=\"Specify directory for the stacked image. \"\n \"A file called 'processed.tif' will be created in the directory.\"\n \"'processed.tif' will be created in log directory if this is not set.\")\n parser.add_argument('--config',\n metavar=\"path\",\n action=ReadableConfigDir,\n default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME),\n help=\"Sets the configuration directory.\")\n parser.add_argument('--scale',\n metavar=\"scale\",\n help=\"The scale between the Formulatrix and beamline image given as the resolution of each \"\n \"image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value \"\n \"must be specified for each image using the format \"\n \"'[Formulatrix_image_resolution]:[beamline_image_resolution]'.\")\n parser.add_argument('-j', '--job',\n metavar=\"job_id\",\n help=\"Specify a job_id - this will be reported in the output to help identify this run.\")\n parser.add_argument('--to_json',\n action='store_true',\n help=\"Output a JSON object.\")\n parser.add_argument('--version',\n action='version',\n version=VersionHandler.version_string())\n parser.add_argument('--log',\n metavar=\"path\",\n help=\"Write log files to the directory specified by path.\")\n self.parser = parser",
"def make_parser():\n parser_ = argparse.ArgumentParser(\n description=\"\"\"\n A tool to retrieve history from\n (almost) any browser on (almost) any platform\n\n██████╗ ██████╗ ██████╗ ██╗ ██╗███████╗███████╗██████╗ ██╗ ██╗██╗███████╗████████╗ ██████╗ ██████╗ ██╗ ██╗\n██╔══██╗██╔══██╗██╔═══██╗██║ ██║██╔════╝██╔════╝██╔══██╗ ██║ ██║██║██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗╚██╗ ██╔╝\n██████╔╝██████╔╝██║ ██║██║ █╗ ██║███████╗█████╗ ██████╔╝█████╗███████║██║███████╗ ██║ ██║ ██║██████╔╝ ╚████╔╝\n██╔══██╗██╔══██╗██║ ██║██║███╗██║╚════██║██╔══╝ ██╔══██╗╚════╝██╔══██║██║╚════██║ ██║ ██║ ██║██╔══██╗ ╚██╔╝\n██████╔╝██║ ██║╚██████╔╝╚███╔███╔╝███████║███████╗██║ ██║ ██║ ██║██║███████║ ██║ ╚██████╔╝██║ ██║ ██║\n╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══╝╚══╝ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝\n \"\"\", # noqa: E501\n epilog=\"\"\"\n Checkout the GitHub repo\n https://github.com/pesos/browser-history\n if you have any issues or want to help contribute\"\"\",\n formatter_class=RawDescriptionHelpFormatter,\n )\n\n parser_.add_argument(\n \"-t\",\n \"--type\",\n default=\"history\",\n help=f\"\"\"\n argument to decide whether to retrieve history or bookmarks.\n Should be one of {AVAILABLE_TYPES}.\n Default is history.\"\"\",\n )\n parser_.add_argument(\n \"-b\",\n \"--browser\",\n default=\"all\",\n help=f\"\"\"\n browser to retrieve history or bookmarks from. Should be one\n of all, default, {AVAILABLE_BROWSERS}.\n Default is all (gets history or bookmarks from all browsers).\n \"\"\",\n )\n\n parser_.add_argument(\n \"-f\",\n \"--format\",\n default=\"infer\",\n help=f\"\"\"\n Format to be used in output. Should be one of {AVAILABLE_FORMATS}.\n Default is infer (format is inferred from the output file's\n extension. If no output file (-o) is specified, it defaults to csv)\"\"\",\n )\n\n parser_.add_argument(\n \"-o\",\n \"--output\",\n default=None,\n help=\"\"\"\n File where history output or bookmark output is to be written.\n If not provided, standard output is used.\"\"\",\n )\n\n parser_.add_argument(\n \"-p\",\n \"--profile\",\n default=None,\n help=\"\"\"\n Specify the profile from which to fetch history or bookmarks. If\n not provided all profiles are fetched\n \"\"\",\n )\n\n parser_.add_argument(\n \"--show-profiles\",\n default=None,\n metavar=\"BROWSER\",\n help=f\"\"\"\n List all available profiles for a given browser where browser\n can be one of default, {AVAILABLE_BROWSERS}. The browser\n must always be provided.\n \"\"\",\n )\n\n parser_.add_argument(\n \"-v\", \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n\n return parser_",
"def build_parser():\n def commaSplitter(str):\n \"\"\"\n Argparse a comm-seperated list\n \"\"\"\n # leave this here as a reminder of what I should do to make the argument parsing more robust\n\n # if sqrt != int(sqrt):\n # msg = \"%r is not a perfect square\" % string\n # raise argparse.ArgumentTypeError(msg)\n # return value\n return str.split(',')\n\n def existing_file(fname):\n \"\"\"\n Argparse type for an existing file\n \"\"\"\n if not os.path.isfile(fname):\n raise ValueError(\"Invalid file: \" + str(fname))\n return fname\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument('-p', '--prefix', help='dont really know what this does...',\n action='store', default='patient', dest='prefix')\n parser.add_argument('-d', '--date', help='dont really know what this does...',\n action='store', default='', dest='sampledate')\n parser.add_argument('template', type=argparse.FileType('r'), help='BEAST config template file')\n parser.add_argument('fasta', type=argparse.FileType('r'), help='file of sequences (in FASTA format)')\n\n return parser",
"def build_parser ():\n\n parser = argparse.ArgumentParser (description = __doc__)\n\n parser.add_argument (\n '-v', '--verbose', dest='verbose', action='count',\n help='increase output verbosity', default=0\n )\n parser.add_argument (\n '-l', '--live', dest='get_live_data', action='store_true',\n help='get live data from OSM database',\n )\n parser.add_argument (\n '-e', '--edit', action='store_true',\n help='edit the OSM database',\n )\n parser.add_argument (\n '-u', '--user', dest='my_edits', action='store_true',\n help='only report about my edits',\n )\n parser.add_argument (\n '--min-length', dest=\"min_length\", type=float, default=1000.0,\n help='way must be longer than this to get a ref (in m) (default=1000)',\n )\n parser.add_argument (\n '--batch-size', dest=\"batch_size\", type=int, default=10,\n help='apply OSM edits in changesets of this size (default=10)',\n )\n return parser",
"def build_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--reference', required=True, help=\"Reference Genome URL\")\n parser.add_argument('-n', '--normal', required=True, help='Normal BAM URL. Format: UUID.normal.bam')\n parser.add_argument('-t', '--tumor', required=True, help='Tumor BAM URL. Format: UUID.tumor.bam')\n parser.add_argument('-d', '--dbsnp', required=True, help='dbsnp_132_b37.leftAligned.vcf URL')\n parser.add_argument('-c', '--cosmic', required=True, help='b37_cosmic_v54_120711.vcf URL')\n parser.add_argument('-u', '--mutect', required=True, help='Mutect.jar')\n parser.add_argument('-w', '--work_dir', required=True, help='Where you wanna work from? (full path please)')\n\n return parser",
"def buildParser():\n\n parser = argparse.ArgumentParser(\n description='Script to parse bagfile to json file')\n parser.add_argument('-b', '--bag', help='Bag file to read',\n required=True, type=str)\n parser.add_argument('-i', '--include',\n help='list or regex for topics to include',\n required=False, nargs='*')\n parser.add_argument('-e', '--exclude',\n help='list or regex for topics to exclude',\n required=False, nargs='*')\n parser.add_argument('-o', '--output',\n help='name of the output file',\n required=True)\n return parser",
"def build_argparser(self):\n firstletters = ''\n for name, (categ, rest) in self.data.items():\n firstletters += name[0]\n\n self.argparser = argparse.ArgumentParser(\n usage='m3 x {} [arguments]'.format(self.name))\n\n for name, (categ, rest) in self.data.items():\n argargs = {}\n if rest.get('help'):\n argargs['help'] = rest['help']\n if rest.get('type') == 'flag':\n argargs['action'] = 'store_true'\n argargs['required'] = False\n elif 'default' not in rest:\n argargs['required'] = True\n if firstletters.count(name[0]) == 1:\n self.argparser.add_argument('-' + name[0],\n '--' + name, **argargs) # noqa: T484\n else:\n self.argparser.add_argument('--' + name, **argargs) # noqa:T484",
"def _CreateParser():\n parser = commandline.ArgumentParser(description=__doc__, caching=True)\n\n # TODO(rcui): Have this use the UI-V2 format of having source and target\n # device be specified as positional arguments.\n parser.add_argument('--force', action='store_true', default=False,\n help='Skip all prompts (i.e., for disabling of rootfs '\n 'verification). This may result in the target '\n 'machine being rebooted.')\n sdk_board_env = os.environ.get(cros_chrome_sdk.SDKFetcher.SDK_BOARD_ENV)\n parser.add_argument('--board', default=sdk_board_env,\n help=\"The board the Chrome build is targeted for. When \"\n \"in a 'cros chrome-sdk' shell, defaults to the SDK \"\n \"board.\")\n parser.add_argument('--build-dir', type='path',\n help='The directory with Chrome build artifacts to '\n 'deploy from. Typically of format '\n '<chrome_root>/out/Debug. When this option is used, '\n 'the GYP_DEFINES environment variable must be set.')\n parser.add_argument('--target-dir', type='path',\n default=None,\n help='Target directory on device to deploy Chrome into.')\n parser.add_argument('-g', '--gs-path', type='gs_path',\n help='GS path that contains the chrome to deploy.')\n parser.add_argument('--nostartui', action='store_false', dest='startui',\n default=True,\n help=\"Don't restart the ui daemon after deployment.\")\n parser.add_argument('--nostrip', action='store_false', dest='dostrip',\n default=True,\n help=\"Don't strip binaries during deployment. Warning: \"\n 'the resulting binaries will be very large!')\n parser.add_argument('-p', '--port', type=int, default=remote.DEFAULT_SSH_PORT,\n help='Port of the target device to connect to.')\n parser.add_argument('-t', '--to',\n help='The IP address of the CrOS device to deploy to.')\n parser.add_argument('-v', '--verbose', action='store_true', default=False,\n help='Show more debug output.')\n parser.add_argument('--mount-dir', type='path', default=None,\n help='Deploy Chrome in target directory and bind it '\n 'to the directory specified by this flag.'\n 'Any existing mount on this directory will be '\n 'umounted first.')\n parser.add_argument('--mount', action='store_true', default=False,\n help='Deploy Chrome to default target directory and bind '\n 'it to the default mount directory.'\n 'Any existing mount on this directory will be '\n 'umounted first.')\n\n group = parser.add_argument_group('Advanced Options')\n group.add_argument('-l', '--local-pkg-path', type='path',\n help='Path to local chrome prebuilt package to deploy.')\n group.add_argument('--sloppy', action='store_true', default=False,\n help='Ignore when mandatory artifacts are missing.')\n group.add_argument('--staging-flags', default=None, type=ValidateGypDefines,\n help=('Extra flags to control staging. Valid flags are - '\n '%s' % ', '.join(chrome_util.STAGING_FLAGS)))\n # TODO(stevenjb): Remove --strict entirely once removed from the ebuild.\n group.add_argument('--strict', action='store_true', default=False,\n help='Deprecated. Default behavior is \"strict\". Use '\n '--sloppy to omit warnings for missing optional '\n 'files.')\n group.add_argument('--strip-flags', default=None,\n help=\"Flags to call the 'strip' binutil tool with. \"\n \"Overrides the default arguments.\")\n group.add_argument('--ping', action='store_true', default=False,\n help='Ping the device before connection attempt.')\n group.add_argument('--mash', action='store_true', default=False,\n help='Copy additional files for mus+ash. Will not fit in '\n 'the default target-dir.')\n\n group = parser.add_argument_group(\n 'Metadata Overrides (Advanced)',\n description='Provide all of these overrides in order to remove '\n 'dependencies on metadata.json existence.')\n group.add_argument('--target-tc', action='store', default=None,\n help='Override target toolchain name, e.g. '\n 'x86_64-cros-linux-gnu')\n group.add_argument('--toolchain-url', action='store', default=None,\n help='Override toolchain url format pattern, e.g. '\n '2014/04/%%(target)s-2014.04.23.220740.tar.xz')\n\n # GYP_DEFINES that Chrome was built with. Influences which files are staged\n # when --build-dir is set. Defaults to reading from the GYP_DEFINES\n # enviroment variable. WILL BE DEPRECATED.\n parser.add_argument('--gyp-defines', default=None, type=ValidateGypDefines,\n help=argparse.SUPPRESS)\n\n # GN_ARGS (args.gn) used to build Chrome. Influences which files are staged\n # when --build-dir is set. Defaults to reading from the GN_ARGS env variable.\n # CURRENLY IGNORED, ADDED FOR FORWARD COMPATABILITY.\n parser.add_argument('--gn-args', default=None, type=ValidateGnArgs,\n help=argparse.SUPPRESS)\n\n # Path of an empty directory to stage chrome artifacts to. Defaults to a\n # temporary directory that is removed when the script finishes. If the path\n # is specified, then it will not be removed.\n parser.add_argument('--staging-dir', type='path', default=None,\n help=argparse.SUPPRESS)\n # Only prepare the staging directory, and skip deploying to the device.\n parser.add_argument('--staging-only', action='store_true', default=False,\n help=argparse.SUPPRESS)\n # Path to a binutil 'strip' tool to strip binaries with. The passed-in path\n # is used as-is, and not normalized. Used by the Chrome ebuild to skip\n # fetching the SDK toolchain.\n parser.add_argument('--strip-bin', default=None, help=argparse.SUPPRESS)\n return parser",
"def setup_parser():\r\n parser = argparse.ArgumentParser(description='Freeseer Recording Utility',\r\n formatter_class=argparse.RawTextHelpFormatter)\r\n parser.add_argument(\"-v\", \"--version\", action='version',\r\n version=textwrap.dedent('''\\\r\n Freeseer {version} ({platform})\r\n Python {pymajor}.{pyminor}.{pymicro}\r\n PyGst {pygst_version}\r\n PyQt {pyqt_version}\r\n Qt {qt_version}\r\n Yapsy {yapsy_version}\r\n '''.format(version=__version__,\r\n platform=sys.platform,\r\n pymajor=sys.version_info.major,\r\n pyminor=sys.version_info.minor,\r\n pymicro=sys.version_info.micro,\r\n pygst_version=pygst._pygst_version,\r\n pyqt_version=QtCore.PYQT_VERSION_STR,\r\n qt_version=QtCore.QT_VERSION_STR,\r\n yapsy_version=yapsy.__version__)))\r\n\r\n # Configure Subparsers\r\n subparsers = parser.add_subparsers(dest='app', help='Command List')\r\n setup_parser_record(subparsers)\r\n setup_parser_config(subparsers)\r\n setup_parser_talk(subparsers)\r\n setup_parser_report(subparsers)\r\n setup_parser_upload(subparsers)\r\n return parser",
"def build_parser() -> ArgumentParser:\n parser = ArgumentParser(prog=\"bartender\")\n parser.add_argument(\"--version\", action=\"version\", version=version(\"bartender\"))\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n\n serve_sp = subparsers.add_parser(\"serve\", help=\"Serve web service\")\n serve_sp.set_defaults(func=serve)\n\n perform_sp = subparsers.add_parser(\"perform\", help=\"Async Redis queue job worker\")\n perform_sp.add_argument(\n \"--config\",\n default=Path(\"config.yaml\"),\n type=Path,\n help=\"Configuration with schedulers that need arq workers\",\n )\n perform_sp.add_argument(\n \"--destination\",\n nargs=\"+\",\n help=\"\"\"Name of destinations to run workers for.\n Each destination must have `scheduler.type:arq`.\n By default runs workers for all destinations with `scheduler.type:arq`.\"\"\",\n dest=\"destination_names\",\n )\n perform_sp.set_defaults(func=perform)\n\n add_generate_token_subcommand(subparsers)\n\n return parser",
"def cmdline_parser():\n\n # http://docs.python.org/dev/howto/argparse.html\n parser = argparse.ArgumentParser(description=__doc__)\n \n parser.add_argument(\"--verbose\",\n action=\"store_true\",\n help=\"Be verbose\")\n parser.add_argument(\"--debug\",\n action=\"store_true\",\n help=\"Enable debugging\")\n parser.add_argument(\"-b\", \"--bam\",\n required=True,\n help=\"Input BAM file matching vcf\")\n parser.add_argument(\"-i\", \"--vcf\",\n help=\"Input VCF file containing variants to analyze\"\n \" (clashes with --var)\")\n parser.add_argument(\"-v\", \"--var\",\n help=\"Report reads for this variant only. Format: chr:pos:ref-alt\"\n \" (clashes with --vcf)\")\n default = 0\n parser.add_argument(\"--mq-filter\",\n dest=\"min_mq\",\n type=int,\n default=default,\n help=\"Ignore reads with mapping quality below this value (default=%d)\" % default)\n default = 5\n parser.add_argument(\"--bq-filter\",\n dest=\"min_bq\",\n type=int,\n default=default,\n help=\"Ignore reads with bases below this value (default=%d)\" % default)\n parser.add_argument(\"-a\", \"--use-orphan\",\n action=\"store_true\",\n help=\"Don't ignore orphan-reads / anomalous read-pairs\")\n\n return parser",
"def setup_parser(self):\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument('words', metavar='W', nargs='+', help=POSITIONAL_HELP)\n parser.add_argument('-a','--any', dest=\"search_funct\", action=\"store_const\", \n const='any', default='all', help=SEARCH_HELP)\n parser.add_argument('-o','--only-id', action='store_true', help=ID_HELP)\n parser.add_argument('-u', '--update', action='store_true', help=UPDATE_HELP)\n return parser",
"def build_parser(self, add_help=True):\n self.parser = argparse.ArgumentParser(\n description=self.description, add_help=add_help\n )\n self.parser.prog = f\"python -m {self.package}.{self.module_name}\"\n self.parser.add_argument(\n \"config_file\", help=\"Path/name of YAML configuration file for NEMO nowcast.\"\n )",
"def _make_parser(self, **kwargs):\n\n kwargs.setdefault('help', self.help)\n kwargs.setdefault('formatter_class',argparse.RawDescriptionHelpFormatter)\n kwargs.setdefault('description', self.description)\n kwargs.setdefault('name', self.name)\n names = (kwargs.get('name') or self.name).split('.')\n \n def _get_subparser(a):\n if a._subparsers:\n for action in a._subparsers._actions:\n if isinstance(action, argparse._SubParsersAction):\n return action\n raise RuntimeError('could not find adequate subparser')\n return a.add_subparsers(dest='command',\n title='commands',\n metavar='COMMAND')\n def _get_parser(node, idx, names):\n name = names[idx]\n if name in node.choices:\n return node.choices[name]\n args = {\n 'name' : name,\n 'help' : 'a group of sub-commands',\n }\n return node.add_parser(**args)\n \n parser = ACMD_PARSER\n node = _get_subparser(parser)\n\n for i,n in enumerate(names[:-1]):\n node = _get_subparser(parser)\n parser = _get_parser(node, i, names)\n \n node = _get_subparser(parser)\n kwargs['name'] = names[-1]\n parser = node.add_parser(**kwargs)\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser(\n description='CLI for SMS',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Add subcommands\n subparsers = parser.add_subparsers(title='subcommands', dest='cmd')\n\n # Downlink Unitdata\n downlink_unitdata_parser = subparsers.add_parser(\n 'DU', help=\"Send downlink unitdata to SMSOrc8rGW service\",\n )\n downlink_unitdata_parser.add_argument('imsi', help='e.g. 001010000090122 (no prefix required)')\n downlink_unitdata_parser.add_argument('data', help='Data as a hex string e.g. 1fc13a00')\n downlink_unitdata_parser.set_defaults(func=send_downlink_unitdata)\n\n return parser",
"def create_parser():\n desc_str = (\n \"\"\"Look at the results of inference with cbayes scripts.\"\"\"\n )\n\n parser = argparse.ArgumentParser(description=desc_str)\n \n parser.add_argument('-dir', '--directory',\n help = 'name of the cbayes ouput directory',\n type = str,\n required = True\n )\n \n # do the parsing\n args = parser.parse_args()\n\n return args",
"def get_parser():\n parser = ArgumentParser(description=\"Script used to generate Freeplane \"\n + \"mindmap files\")\n\n # This is use when people in Linaro aren't using their email address.\n parser.add_argument('--disable-altname', required=False,\n action=\"store_true\", default=False,\n help=\"Use alternative names (from cfg.yaml) to the tree\")\n\n parser.add_argument('--assignee', required=False,\n action=\"store_true\", default=False,\n help=\"Add assignees (from cfg.yaml) to the tree\")\n\n parser.add_argument('-a', '--author', required=False,\n action=\"store_true\", default=False,\n help=\"If set, git statistic only count the commit \"\n + \"from the author\")\n\n parser.add_argument('-p', '--path', required=False, action=\"store\",\n default=\"/home/jyx/devel/optee_projects/reference/linux\",\n help='Full path to the kernel tree')\n\n parser.add_argument('-s', '--since', required=False, action=\"store\",\n default=None,\n help='Used with the git log --since command')\n\n parser.add_argument('-o', '--output', required=False, action=\"store\",\n default=\"linux-kernel.mm\",\n help='Output filename')\n\n parser.add_argument('-v', required=False, action=\"store_true\",\n default=False,\n help='Output some verbose debugging info')\n\n return parser",
"def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager",
"def command_line():\n version = ' '.join([__version__, __build__])\n parser = ArgumentParser(\n prog='moniker',\n description='Simple batch file renaming tool.',\n )\n parser.add_argument(\n '-v', '--version', action='version',\n version=\"%s v%s\" % (basename(sys.argv[0]), version)\n )\n parser.add_argument(\n '--depth',\n type=int,\n default=0,\n metavar='depth',\n help='Tiers of file heiarcy explored',\n )\n parser.add_argument(\n '--replace',\n nargs=2,\n default=('', ''),\n metavar='replace',\n help='glob pattern to match'\n )\n parser.add_argument(\n 'directory',\n default='.',\n help='target directory root',\n )\n return parser",
"def makeParser():\n parser = argparse.ArgumentParser(\n description=(\n \"Print a JSON object containing reference to read \"\n \"distances extracted from a SAM file.\"\n )\n )\n\n parser.add_argument(\n \"--samFile\",\n action=\"append\",\n required=True,\n help=\"The SAM file(s) to load. May be repeated.\",\n )\n\n parser.add_argument(\n \"--minMatchingReads\",\n type=int,\n help=(\n \"The minimum number of reads that must match a reference for it \"\n \"to be included.\"\n ),\n )\n\n parser.add_argument(\n \"--scoreTag\",\n help=(\n \"The score tag to use for the alignment score. If not given, \"\n \"1 will be used to indicate that a read matched a reference \"\n \"(non-matches are not included). The default is no score tag, \"\n 'which is not that useful. A good choice is \"AS\", for the '\n \"alignment score, but that has to be present in the SAM file, \"\n \"which means that the aligner (bowtie2, bwa, etc. has to have \"\n \"produced such a tag.\"\n ),\n )\n\n parser.add_argument(\n \"--verbose\", action=\"store_true\", help=\"Print extra information.\"\n )\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('manga_name',\n type = str,\n help = \"Input the name of the manga.\"\n )\n parser.add_argument('-b','--begin',\n type = int,\n help = 'Input the starting chapter.Defaults to first chapter.'\n )\n parser.add_argument('-e','--end',\n type = int,\n help = 'Input the ending chapter.Defaults to the last possible chapter.'\n )\n parser.add_argument('-c','--chapter',\n type = int,\n help = 'Provide if you want to download only one chapter.'\n )\n parser.add_argument('-t','--target',\n type = str,\n help = 'The location where manga has to be downloaded.Defaults to the current directory.',\n default = '.'\n )\n parser.add_argument('-s','--site',\n type = str,\n help = 'The site through which the manga has to be downloaded. Defaults to MangaPanda.',\n default = 'mangapanda'\n )\n\n return parser",
"def build_parser():\n # Inherit package arguments\n parents = sisr.bin.build_parser(),\n\n parser = argparse.ArgumentParser(\n description=\"Test SiSR super-resolution network\",\n parents=parents)\n\n return parser",
"def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')",
"def build_args():\n parser = argparse.ArgumentParser(description='Validates, edits, or creates a 22 XML file')\n subparsers = parser.add_subparsers(help='sub-command help')\n \n add_branch_parser(subparsers)\n add_edit_parser(subparsers)\n add_finalize_parser(subparsers)\n add_grade_parser(subparsers)\n add_new_parser(subparsers)\n add_validate_parser(subparsers)\n add_validate_document_parser(subparsers)\n \n return parser.parse_args()",
"def generate_main_parser() -> ArgumentParser:\n # Create parser\n parser = ArgumentParser(\n description=\"Command line interface tool for iic2343.\",\n )\n\n # Add version flag\n parser.add_argument(\n \"-v\",\n \"--version\",\n action=\"version\",\n version=f\"iic2343 version {iic2343.__version__}\",\n )\n\n # Create subparsers\n subparsers = parser.add_subparsers(help=\"Action to be executed.\")\n\n # Serial ports subparser\n generate_serial_ports_subparser(subparsers)\n\n return parser",
"def build_parser():\n parser = argparse.ArgumentParser(description='Bag reader')\n parser.add_argument('-b', '--bag',\n help='Bag files to read',\n required=True,\n nargs='+',\n type=str)\n parser.add_argument('-i', '--info',\n help='List topics and fields within topics',\n required=False,\n action='store_true')\n parser.add_argument('-s', '--stats',\n help='Display how many messages were published on each topic',\n required=False,\n action='store_true')\n parser.add_argument('-t', '--topic',\n help='Topics to write to csv file',\n required=False,\n action='store',\n nargs='+',\n type=str)\n parser.add_argument('-o', '--output_file',\n help='Output file name',\n required=False,\n action='store',\n nargs='+',\n dest='out_file',\n type=str)\n\n return parser",
"def cmd_line_parser():\n usage = \"usage: %prog [options]\\n\"\n opt_parser = OptionParser(usage=usage)\n opt_parser.add_option(\"--ai\", action=\"store\", dest=\"alternative_input\",\n help=\"an alternative input file (works only with load_from_pickle)\")\n opt_parser.add_option(\"--dl\", action=\"store\", dest=\"dumped_lexicon\",\n help=\"a dumped lexicon file (works only with load_from_pickle\")\n opt_parser.add_option(\"--dotest\", action=\"store_true\", dest=\"dotest\", default=False,\n help=\"use this flag if you want to apply testing\")\n opt_parser.add_option(\"-t\", action=\"store\", dest=\"test_parses\",\n help=\"the output file for the test parses\")\n opt_parser.add_option(\"-n\", action=\"store\", dest=\"train_parses\",\n help=\"the output file for the train parses\")\n opt_parser.add_option(\"-i\", dest=\"inp_file\", default=\"trainFiles/trainPairs\",\n help=\"the input file names (with the annotated corpus)\")\n opt_parser.add_option(\"--devel\", dest=\"development_mode\", default=False, action=\"store_true\",\n help=\"development mode\")\n\n return opt_parser",
"def get_parser(self):\n parser = ArgumentParser()\n parser.add_argument(\n \"-c\", default='', dest='cmd',\n help=(\"just like python -c or sh -c (pass in a command)\"))\n parser.add_argument(\n \"-e\", \"--exec\", default='', dest='execfile',\n help='a filename to execute')\n parser.add_argument(\n \"-v\", '--version', default=False, dest='version',\n action='store_true',\n help=(\"show version information\"))\n parser.add_argument(\"--shell\", dest=\"shell\",\n default=False, help=\"application shell\",\n action='store_true')\n parser.add_argument(\"--config\", dest='config',\n default=\"\",\n help=\"use config file\")\n return parser"
] | [
"0.69869363",
"0.6829675",
"0.6746405",
"0.6673381",
"0.66188794",
"0.658462",
"0.6580858",
"0.6543717",
"0.65387374",
"0.6531024",
"0.6523515",
"0.6503992",
"0.6499173",
"0.64667153",
"0.6449182",
"0.6419928",
"0.6393331",
"0.6363311",
"0.6360073",
"0.6353206",
"0.63527614",
"0.63351756",
"0.63256425",
"0.63177186",
"0.630223",
"0.6298224",
"0.6294251",
"0.62868905",
"0.6282018",
"0.62447035"
] | 0.7635935 | 0 |
Returns absolute paths to input files. | def abspath(files):
files = sum([glob.glob(x) for x in files], [])
return [os.path.abspath(x) for x in files] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inputpathabs(self):\n return os.path.abspath(self.inputpath)",
"def get_file_list(input_dir):\n\tfile_paths = [input_dir +'/' + f for f in listdir(input_dir) if isfile(join(input_dir, f)) ]\n\treturn file_paths",
"def sources_absolute_paths(self):\r\n abs_target_base = os.path.join(get_buildroot(), self.target_base)\r\n for src in self.sources:\r\n yield os.path.join(abs_target_base, src)",
"def get_input_files(dir_path):\n return [os.path.join(dir_path,f) for f in os.listdir(dir_path)\n if os.path.isfile(os.path.join(dir_path,f))]",
"def input_path(self, filename):\n\n return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))",
"def _get_file_paths(self):\n return [os.path.join(self.path, self.mode, 'waveforms', file_name + '.npy') for file_name in self.file_names]",
"def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list",
"def get_paths(input_folder):\n list_files = []\n conll_folder = glob.glob(input_folder + '/*.json')\n \n for filename in conll_folder:\n list_files.append(filename)\n\n return list_files",
"def get_image_path(raw_input_dir: str) -> list:\n result = []\n for root, dirs, files in os.walk(raw_input_dir):\n for file in files:\n result.append(os.path.join(root, file))\n return result",
"def get_paths(file_path):\n return glob(path.join(file_path, '*'))",
"def input_path(self, filename):\n\n return self.filename_path_join(self.input_dir, filename)",
"def filepaths(self):\n pass",
"def getpaths_fromfile(input_prefix_, file_handle_):\n\n input_paths = []\n\n for line in file_handle_:\n line = line.strip()\n if line != \"\":\n dirname = line\n path = os.path.join(input_prefix_, \"%s*\" % dirname)\n input_paths.append(tuple([dirname, path]))\n\n return input_paths",
"def handle_files_args(*paths_args):\n paths = []\n\n for paths_arg in paths_args:\n # Handle paths implicitly rooted at user home dir\n paths_arg = os.path.expanduser(paths_arg)\n\n # Expand wildcards\n paths_arg = glob.glob(paths_arg)\n\n # Create list of pathlib.Path objects\n paths.extend([pathlib.Path(path_arg) for path_arg in paths_arg])\n\n return paths",
"def get_input_files(self, action):\n assert action == \"run\", \"Unsupported action\"\n return self.path_tpl.format(infix=\"\", ext=\".bam\")",
"def inputFiles(self):\n return (self.matrixFile,)",
"def test_get_output_filepaths(self):\r\n\r\n output_dir = \".\"\r\n\r\n fasta_fp = \"seqs.fna\"\r\n\r\n qual_fp = \"seqs.qual\"\r\n\r\n expected_fasta_fp = \"./seqs_filtered.fasta\"\r\n expected_qual_fp = \"./seqs_filtered.qual\"\r\n\r\n actual_fasta_fp, actual_qual_fp =\\\r\n get_output_filepaths(output_dir, fasta_fp, qual_fp)\r\n\r\n self.assertEqual(actual_fasta_fp, expected_fasta_fp)\r\n self.assertEqual(actual_qual_fp, expected_qual_fp)\r\n\r\n # Test for relative paths\r\n output_dir = \"test/\"\r\n\r\n fasta_fp = \"../seqs.fna\"\r\n\r\n qual_fp = \"quality_scores/seqs.qual\"\r\n\r\n expected_fasta_fp = \"test/seqs_filtered.fasta\"\r\n expected_qual_fp = \"test/seqs_filtered.qual\"\r\n\r\n actual_fasta_fp, actual_qual_fp =\\\r\n get_output_filepaths(output_dir, fasta_fp, qual_fp)\r\n\r\n self.assertEqual(actual_fasta_fp, expected_fasta_fp)\r\n self.assertEqual(actual_qual_fp, expected_qual_fp)",
"def find_all_infilepaths(in_dir):\n workdir = os.getcwd()\n os.chdir(in_dir)\n\n infiles_paths = dict()\n for infilename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n pos = infilename.split('_')\n pos[-1] = pos[-1].split('.')[0]\n pos = tuple(list(map(lambda s: int(s), pos)))\n num_pos = _3d_to_numeric\n infiles_paths[num_pos] = os.path.join(in_dir, infilename)\n\n os.chdir(workdir)\n return infiles_paths",
"def detectFiles(self, input):\n output = []\n if os.path.isfile(input):\n output.append(input)\n else:\n input = os.path.join(input, '*') if os.path.isdir(input) else input\n for file in glob.glob(input):\n output.append(file)\n return output",
"def inputFiles(self):\n inputfiles = set()\n for f in self:\n [ inputfiles.add(x) for x in f['input']]\n return list(inputfiles)",
"def get_paths(input_folder: str) -> list[str]:\n\n return [f for f in os.listdir(input_folder) if f[-4:] == '.txt' and f[:3] != 'top']",
"def collect_input_files(input_directory_path: Path) -> Generator[Path, None, None]:\n return input_directory_path.glob('**/*')",
"def input_files_from_path(path):\n import glob\n input_files = None\n if type(path) is list:\n input_files = []\n for p in path:\n if '*' in p:\n input_files.extend(glob.glob(p))\n else: # neither wildcard nor comma separated list\n input_files.append(p)\n else:\n if ',' in path:\n input_files = path.split(',')\n elif '*' in path:\n input_files = glob.glob(path)\n else: # neither wildcard nor comma separated list\n input_files = [path]\n input_files = [os.path.abspath(f) for f in input_files]\n return [f for f in input_files if os.path.exists(f) or f.startswith('/store')]",
"def full_path(startPath,files):\n\n files = list_strings(files)\n base = os.path.split(startPath)[0]\n return [ os.path.join(base,f) for f in files ]",
"def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]",
"def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths",
"def get_files_paths(self):\n return self.__files_paths",
"def resolve_file_paths(local_path):\n local_path = os.path.abspath(local_path)\n files = []\n if local_path.find('*') > -1:\n # Supplied path is a pattern - relative directory will be the\n # path up to the first wildcard\n ref_dir_str = local_path.split('*')[0].rstrip('/\\\\')\n if not os.path.isdir(ref_dir_str):\n ref_dir_str = os.path.dirname(ref_dir_str)\n ref_dir = pathlib.Path(ref_dir_str)\n pattern = local_path[len(ref_dir_str + os.pathsep):]\n files = [str(f) for f in ref_dir.glob(pattern) if f.is_file()]\n local_path = ref_dir_str\n else:\n if os.path.isdir(local_path):\n # Supplied path is a directory\n files = [os.path.join(local_path, f) for f in os.listdir(local_path)\n if os.path.isfile(os.path.join(local_path, f))]\n elif os.path.isfile(local_path):\n # Supplied path is a file\n files.append(local_path)\n local_path = os.path.dirname(local_path)\n return local_path, files",
"def output_files(self):\n output_files = []\n for split in self.split_files:\n output_files.extend(split.filepaths)\n return output_files",
"def get_file_list(input_list):\n if not isinstance(input_list, Iterable)\\\n or isinstance(input_list, str):\n raise BirdVoxClassifyError('input_list must be a non-string iterable')\n file_list = []\n for item in input_list:\n if os.path.isfile(item):\n file_list.append(os.path.abspath(item))\n elif os.path.isdir(item):\n for fname in os.listdir(item):\n path = os.path.join(item, fname)\n if os.path.isfile(path):\n file_list.append(path)\n else:\n raise BirdVoxClassifyError(\n 'Could not find input at path {}'.format(item))\n\n return file_list"
] | [
"0.6970691",
"0.6879517",
"0.6868738",
"0.6868052",
"0.67055565",
"0.6700424",
"0.65872943",
"0.657331",
"0.6540866",
"0.651123",
"0.65038157",
"0.65028954",
"0.6485323",
"0.6448438",
"0.63885504",
"0.6380545",
"0.63705975",
"0.63195634",
"0.6316908",
"0.63139904",
"0.62974036",
"0.6260034",
"0.62238383",
"0.6191406",
"0.61830497",
"0.617022",
"0.6137567",
"0.6127645",
"0.6106573",
"0.6082255"
] | 0.71486807 | 0 |
Creating a custom time entry, minimum must is hour duration and project param | def createTimeEntry(self, hourduration, description=None, projectid=None, projectname=None,
taskid=None, clientname=None, year=None, month=None, day=None, hour=None,
billable=False, hourdiff=-2):
data = {
"time_entry": {}
}
if not projectid:
if projectname and clientname:
projectid = (self.getClientProject(clientname, projectname))['data']['id']
elif projectname:
projectid = (self.searchClientProject(projectname))['data']['id']
else:
print('Too many missing parameters for query')
exit(1)
if description:
data['time_entry']['description'] = description
if taskid:
data['time_entry']['tid'] = taskid
year = datetime.now().year if not year else year
month = datetime.now().month if not month else month
day = datetime.now().day if not day else day
hour = datetime.now().hour if not hour else hour
timestruct = datetime(year, month, day, hour + hourdiff).isoformat() + '.000Z'
data['time_entry']['start'] = timestruct
data['time_entry']['duration'] = hourduration * 3600
data['time_entry']['pid'] = projectid
data['time_entry']['created_with'] = 'NAME'
data['time_entry']['billable'] = billable
response = self.postRequest(Endpoints.TIME_ENTRIES, parameters=data)
return self.decodeJSON(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def running_custom_hour(arg):\n pass",
"def __init__(self, hour, minute=0, second=0, microsecond=0, tzinfo=None):",
"def timeField(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, changeCommand: Script=None, defineTemplate: AnyStr=\"\",\n docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None, dragCommand:\n Script=None, dropCallback: Script=None, editable: bool=True, enable: bool=True,\n enableBackground: bool=True, enableKeyboardFocus: bool=True, enterCommand:\n Script=None, exists: bool=True, fullPathName: bool=True, height: Union[int,\n bool]=0, highlightColor: Union[List[float, float, float], bool]=None, isObscured:\n bool=True, manage: bool=True, noBackground: bool=True, numberOfPopupMenus:\n bool=True, parent: Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, precision:\n Union[int, bool]=0, preventOverride: bool=True, receiveFocusCommand: Script=None,\n statusBarMessage: AnyStr=\"\", step: Union[time, bool]=None, useTemplate: AnyStr=\"\",\n value: Union[time, bool]=None, visible: bool=True, visibleChangeCommand:\n Union[Script, bool]=None, width: Union[int, bool]=0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def test_issue_add_time(self):\n pass",
"def __init__(__self__, *,\n duration_hours: pulumi.Input[int],\n schedule: pulumi.Input['ScheduleArgs'],\n start_time: pulumi.Input[str],\n not_allowed_dates: Optional[pulumi.Input[Sequence[pulumi.Input['DateSpanArgs']]]] = None,\n start_date: Optional[pulumi.Input[str]] = None,\n utc_offset: Optional[pulumi.Input[str]] = None):\n if duration_hours is None:\n duration_hours = 24\n pulumi.set(__self__, \"duration_hours\", duration_hours)\n pulumi.set(__self__, \"schedule\", schedule)\n pulumi.set(__self__, \"start_time\", start_time)\n if not_allowed_dates is not None:\n pulumi.set(__self__, \"not_allowed_dates\", not_allowed_dates)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if utc_offset is not None:\n pulumi.set(__self__, \"utc_offset\", utc_offset)",
"def __init__(self, hour=0, minute=0, second=0):\n self.hour = hour\n self.minute = minute\n self.second = second",
"def _setup_volunteer_hours(\n volunteer,\n npf_admin,\n org,\n project,\n datetime_start,\n datetime_end,\n description=\"Manually tracked time \",\n event_type=\"MN\",\n is_verified=False,\n action_type='req'\n):\n event = Event.objects.create(\n project=project,\n is_public=True,\n description=\"finished event\",\n location=\"test_location\",\n coordinator=npf_admin,\n event_type=event_type,\n datetime_start=datetime_start,\n datetime_end=datetime_end\n )\n\n volunteer_timelog = UserTimeLog.objects.create(\n user=volunteer,\n event=event,\n datetime_start=datetime_start,\n datetime_end=datetime_end,\n is_verified=is_verified\n )\n\n actiontimelog = AdminActionUserTime.objects.create(\n user=npf_admin,\n usertimelog=volunteer_timelog,\n action_type=action_type\n )\n\n return volunteer_timelog, actiontimelog, event",
"def constructTimeLineItem(self):\n\t\treturn",
"def __init__(self,\n label=None,\n validators=None,\n format='%I:%M%p', # 1:45PM\n **kwargs):\n super(TimeField, self).__init__(label, validators, **kwargs)\n self.format = format",
"def __init__(self, offset_hours: int) -> None:\r\n self.offset = datetime.timedelta(hours=offset_hours)",
"def __init__(self, name=\"\", time=None):\n super().__init__(\"time\", name)\n self.time = time",
"def create_base_entry(vin=\"INVALID\", time_unix=None):\n\t\treturn LogEntry(vin=vin, app_id=\"INVALID\", time_unix=time_unix)",
"def create_time(given_time: Any | None) -> str | None:\n if not given_time:\n return None\n if datetime_time := arg_to_datetime(given_time):\n return datetime_time.strftime(DATE_FORMAT)\n else:\n raise DemistoException(\"Time parameter supplied in invalid, make sure to supply a valid argument\")",
"def __init__(self, negative: bool, hours: int, minutes: int):\n self.negative = negative\n self.hours = hours\n self.minutes = minutes",
"def make_entries(self, user=None, projects=None, dates=None,\n hours=1, minutes=0):\n if not user:\n user = self.user\n if not projects:\n projects = self.default_projects\n if not dates:\n dates = self.default_dates\n for project in projects:\n for day in dates:\n self.log_time(project=project, start=day,\n delta=(hours, minutes), user=user)",
"def __init__(self, day, hour, minute):\n self.day = day\n self.hour = hour\n self.minute = minute",
"def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]",
"def __init__(self, dt=60*60*24):\n pass",
"def __init__(self, time, metadata):\n self.time = time\n self.metadata = metadata",
"def setSubmitTime(t):",
"def test_time_entry_creation(self):\n #customer data\n customer_name = u'RFCCustomer'\n #project data\n project_name = u'A new project'\n project_id = 'a-new-project'\n #entry data\n entry_date = datetime.date(2011, 05, 26)\n entry_hours = '2:30'\n entry_location = u'RedTurtle Technology'\n entry_description = u'Trying to create ticket for API tests'\n entry_ticket = '45'\n\n #Start to create customer, project and time entry for project\n session = DBSession()\n project = Project(name=project_name, id=project_id)\n customer = Customer(name=customer_name)\n customer.add_project(project)\n session.add(customer)\n transaction.commit()\n\n #Try to get errors\n resp = self.proxy.create_new_simple_time_entry(1, entry_date,\n entry_hours, entry_description,\n entry_location, project_id)\n self.assertEqual(resp['message'], u\"'int' object has no attribute 'decode'\")\n\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n entry_date,\n u'9000',\n entry_description,\n entry_location,\n project_id)\n\n self.assertEqual(resp['message'], u'Cannot parse time (must be HH:MM)')\n\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n entry_date,\n u'19:40',\n entry_description,\n entry_location,\n project_id)\n\n self.assertEqual(resp['message'], u'Time value too big (must be <= 16:00)')\n\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n entry_date,\n entry_hours,\n entry_description,\n entry_location,\n 100)\n self.assertEqual(resp['message'], u'Not able to get the project with id 100')\n\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n '2011 01 01',\n entry_hours,\n entry_description,\n entry_location,\n 100)\n self.assertEqual(resp['message'], u\"time data '2011 01 01' does not match format '%Y-%m-%d'\")\n\n #Let's try to create a simple time entry\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n entry_date,\n entry_hours,\n entry_description,\n entry_location,\n project_id)\n\n self.assertRegexpMatches(resp['message'], u'Correctly added time entry \\d+ for %s ticket #%s' %(project_id, entry_ticket))\n\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n entry_date,\n entry_hours,\n '',\n entry_location,\n project_id)\n self.assertEqual(resp['message'], u\"Description is required.\")\n\n #Now try to create a more complex time entry\n entry_start = datetime.datetime(2011, 01, 01, 15, 30)\n entry_end = datetime.datetime(2011, 01, 01, 17, 30)\n entry_ticket = '#99'\n\n resp = self.proxy.create_new_advanced_time_entry(99,\n entry_start,\n entry_end,\n entry_description,\n entry_location,\n 10)\n self.assertEqual(resp['message'], u\"'int' object has no attribute 'decode'\")\n\n resp = self.proxy.create_new_advanced_time_entry(entry_ticket,\n entry_start,\n entry_end,\n entry_description,\n entry_location,\n 100)\n self.assertEqual(resp['message'], u'Not able to get the project with id 100')\n\n resp = self.proxy.create_new_advanced_time_entry(entry_ticket,\n '2011 08 24',\n entry_end,\n entry_description,\n entry_location,\n 10)\n self.assertEqual(resp['message'], u\"time data '2011 08 24' does not match format '%Y-%m-%d %H:%M:%S'\")\n\n resp = self.proxy.create_new_advanced_time_entry(entry_ticket,\n entry_start,\n entry_end,\n entry_description,\n entry_location,\n project_id)\n self.assertRegexpMatches(resp['message'], u'Correctly added time entry \\d+ for %s ticket #%s' %(project_id, entry_ticket))",
"def __timeRestriction():\n restriction = {\"M\": [\"7:00\", \"9:30\"],\n \"A\": [\"16:00\", \"19:30\"]}\n return restriction",
"def __add__(self, *args, **kwargs):\n return _uhd_swig.time_spec_t___add__(self, *args, **kwargs)",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Take the local timezone into account when calculating datetimes\n self.local_tz = pytz.timezone(settings.TIME_ZONE)\n\n self.end_time = self.start_time + timedelta(minutes=self.duration)",
"def add_time_to_title( self, title ):\n begin = self.begin; end = self.end\n if 'span' in self.metadata:\n interval = self.metadata['span']\n elif 'given_kw' in self.metadata and 'span' in self.metadata['given_kw']:\n interval = self.metadata['given_kw']['span']\n else:\n interval = self.time_interval( )\n formatting_interval = self.time_interval()\n if formatting_interval == 600:\n format_str = '%H:%M:%S'\n elif formatting_interval == 3600:\n format_str = '%Y-%m-%d %H:%M'\n elif formatting_interval == 86400:\n format_str = '%Y-%m-%d'\n elif formatting_interval == 86400*7:\n format_str = 'Week %U of %Y'\n\n if interval < 600:\n format_name = 'Seconds'\n time_slice = 1\n elif interval < 3600 and interval >= 600:\n format_name = 'Minutes'\n time_slice = 60\n elif interval >= 3600 and interval < 86400:\n format_name = 'Hours'\n time_slice = 3600\n elif interval >= 86400 and interval < 86400*7:\n format_name = 'Days'\n time_slice = 86400\n elif interval >= 86400*7:\n format_name = 'Weeks'\n time_slice = 86400*7\n else:\n format_str = '%x %X'\n format_name = 'Seconds'\n time_slice = 1\n\n begin_tuple = time.gmtime(begin); end_tuple = time.gmtime(end)\n added_title = '\\n%i %s from ' % (int((end-begin)/time_slice), format_name)\n added_title += time.strftime('%s to' % format_str, begin_tuple)\n if time_slice < 86400:\n add_utc = ' UTC'\n else:\n add_utc = ''\n added_title += time.strftime(' %s%s' % (format_str, add_utc), end_tuple)\n return title + added_title",
"def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 3\n s.min_from = 0\n s.hour_to = 3\n s.min_to = 59\n s.interval = 60*60*6 \n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 0 )",
"def test_format_optional_time_field(self):\n formatted_time = jiratimereport.format_optional_time_field(99960, \"\")\n expected_result = \"27:46:00\"\n self.assertEqual(expected_result, formatted_time)",
"def __init__(self, hrs=0, mins=0, secs=0):\n #Calculate total seconds to represent\n totalsecs = hrs*3600 + mins*60 + secs\n self.hours = totalsecs //3600\n leftoversecs = totalsecs % 3600\n self.minutes = leftoversecs // 60\n self.seconds = leftoversecs % 60",
"def __init__(self, year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):",
"async def test_process_set_custom_time(self):\n xknx = XKNX()\n self.datetime = DateTime(\n xknx,\n \"TestDateTime\",\n group_address=\"1/2/3\",\n broadcast_type=\"TIME\",\n localtime=False,\n )\n assert self.datetime.remote_value.value is None\n\n test_time = time.strptime(\"9:13:14\", \"%H:%M:%S\")\n await self.datetime.set(test_time)\n telegram = xknx.telegrams.get_nowait()\n assert telegram == Telegram(\n destination_address=GroupAddress(\"1/2/3\"),\n payload=GroupValueWrite(DPTArray((0x9, 0xD, 0xE))),\n )\n await self.datetime.process(telegram)\n assert self.datetime.remote_value.value == test_time"
] | [
"0.63917255",
"0.63114077",
"0.5988722",
"0.5984777",
"0.59670573",
"0.59660786",
"0.59379464",
"0.5916419",
"0.58417237",
"0.58348185",
"0.5833585",
"0.5783263",
"0.5764132",
"0.5757329",
"0.5752414",
"0.57444865",
"0.57442397",
"0.5735035",
"0.57336044",
"0.5732334",
"0.5727091",
"0.5717379",
"0.57122076",
"0.5709428",
"0.5699456",
"0.56896895",
"0.56881404",
"0.56756496",
"0.56464684",
"0.5635764"
] | 0.68871313 | 0 |
Fast query given the Client's name and Project's name | def getClientProject(self, clientName, projectName):
for client in self.getClients():
if client['name'] == clientName:
cid = client['id']
if not cid:
print('Could not find such client name')
return None
for projct in self.getClientProjects(cid):
if projct['name'] == projectName:
pid = projct['id']
if not pid:
print('Could not find such project name')
return None
return self.getProject(pid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def searchClientProject(self, name):\n for client in self.getClients():\n try:\n for project in self.getClientProjects(client['id']):\n if project['name'] == name:\n return project\n except Exception:\n continue\n\n print('Could not find client by the name')\n return None",
"def get_client_projects(self, client=None):\n if type(client) is Client:\n return [p for p in self.project_list if client.client_id == p.client_id]",
"def query_project(self, project_query_options):\n\n query = \"select * from project where \"\n row_names = [\"Proj_ID\", \"Cus_ID\", \"Emp_ID\", \"Proj_Date\",\n \"Proj_Descrpt\", \"Proj_EstDateSt\", \"Proj_EstDateEnd\",\n \"Proj_EstBudget\", \"Proj_ActDateSt\",\n \"Proj_ActDateEnd\", \"Proj_ActCost\"]\n\n entries = project_query_options\n options_index = []\n arguments = []\n\n index = 0\n for item in entries:\n if item is not None:\n arguments.append(item)\n options_index.append(index)\n index += 1\n\n count = 0\n for arg in arguments:\n if count == 0:\n query = query + \"{}='{}' \".format(\n row_names[options_index[count]],\n arg)\n else:\n query = query + \"and {}='{}' \".format(\n row_names[options_index[count]],\n arg)\n count += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)",
"def project_by_name(self,project_name=''):\n logger.debug(f'project_by_name project_name={project_name}')\n return self.get('{}/groups/byName/{}'.format(ApiVersion.CM1.value,project_name))",
"def run(self, query, project=\"odyssey-193217\"):\n\t\tfrom google.cloud import bigquery\n\t\tjob_config = bigquery.QueryJobConfig()\n\t\tclient = bigquery.Client(project=project)\n\t\tresult = client.query(query,job_config=job_config)\n\t\tjob_config.allowLargeResults = True\n\t\tresult.__done_timeout = 99999999\n\t\treturn list(result)",
"def test_list_project_request(self):\n pass",
"def search_key_for_project(project):\n elements = []\n elements.append(project['name'])\n elements.append(project['client'])\n elements.append(project['project_state'])\n elements.append(str(project['project_code']))\n return u' '.join(elements)",
"def _to_client_query(self, client):\n ancestor_client_key = None\n if self.ancestor is not None:\n ancestor_client_key = self.ancestor.to_client_key()\n\n # Resolve ValueProvider arguments.\n self.filters = self._set_runtime_filters()\n if isinstance(self.namespace, ValueProvider):\n self.namespace = self.namespace.get()\n\n return query.Query(\n client,\n kind=self.kind,\n project=self.project,\n namespace=self.namespace,\n ancestor=ancestor_client_key,\n filters=self.filters,\n projection=self.projection,\n order=self.order,\n distinct_on=self.distinct_on)",
"def get_projects():\n return Project.query.all()",
"def get_one_project_by_name(ctx, project_name):\n pprint(ctx.obj.groups.byName[project_name].get().data)",
"def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)",
"def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]",
"def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name",
"def request_project_by_key(cfg, project_key):\n\n url = cjm.request.make_cj_url(cfg, \"project\", project_key)\n response = cjm.request.make_cj_request(cfg, url)\n return response.json()",
"def get_all_projects(engine): \n # Query db\n# sql = (\"SELECT a.project_id, \"\n# \" b.o_number, \"\n# \" a.project_name, \"\n# \" a.project_description \"\n# \"FROM nivadatabase.projects a, \"\n# \" nivadatabase.projects_o_numbers b \"\n# \"WHERE a.project_id = b.project_id \"\n# \"ORDER BY a.project_id\")\n sql = (\"SELECT project_id, \"\n \" project_name, \"\n \" project_description \"\n \"FROM nivadatabase.projects \"\n \"ORDER BY project_id\")\n df = pd.read_sql(sql, engine)\n\n return df",
"def test_list_project(self):\n pass",
"def project_search_json():\n q = request.args.get('q')\n if q is None or len(q) < 3:\n return jsonify(projects=[])\n limit = request.args.get('limit') or 10\n q = \"%%%s%%\" % q\n projects = Project.query.filter(or_(\n Project.name.like(q),\n Project.summary.like(q),\n Project.longtext.like(q),\n Project.autotext.like(q),\n )).limit(limit).all()\n projects = expand_project_urls(\n [p.data for p in projects],\n request.host_url\n )\n return jsonify(projects=projects)",
"def find(self, **kwargs):\n return super(ClientsTable, self).records('clients', **kwargs)",
"def get_public_projects_query():\n return Q(access_policy=AccessPolicy.OPEN)",
"def get_project_ids(self, node=None, name=None):\n project_ids = []\n queries = []\n # Return all project_ids in the data commons if no node is provided or if node is program but no name provided\n if name == None and ((node == None) or (node == \"program\")):\n print(\"Getting all project_ids you have access to in the data commons.\")\n if node == \"program\":\n print(\n \"Specify a list of program names (name = ['myprogram1','myprogram2']) to get only project_ids in particular programs.\"\n )\n queries.append(\"\"\"{project (first:0){project_id}}\"\"\")\n elif name != None and node == \"program\":\n if isinstance(name, list):\n print(\n \"Getting all project_ids in the programs '\" + \",\".join(name) + \"'\"\n )\n for program_name in name:\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"program\",name:\"%s\"}){project_id}}\"\"\"\n % (program_name)\n )\n elif isinstance(name, str):\n print(\"Getting all project_ids in the program '\" + name + \"'\")\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"program\",name:\"%s\"}){project_id}}\"\"\"\n % (name)\n )\n elif isinstance(node, str) and isinstance(name, str):\n print(\n \"Getting all project_ids for projects with a path to record '\"\n + name\n + \"' in node '\"\n + node\n + \"'\"\n )\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"%s\",submitter_id:\"%s\"}){project_id}}\"\"\"\n % (node, name)\n )\n elif isinstance(node, str) and name == None:\n print(\n \"Getting all project_ids for projects with at least one record in the node '\"\n + node\n + \"'\"\n )\n query = \"\"\"{node (first:0,of_type:\"%s\"){project_id}}\"\"\" % (node)\n df = pd.json_normalize(self.sub.query(query)[\"data\"][\"node\"])\n project_ids = project_ids + list(set(df[\"project_id\"]))\n if len(queries) > 0:\n for query in queries:\n res = self.sub.query(query)\n df = pd.json_normalize(res[\"data\"][\"project\"])\n project_ids = project_ids + list(set(df[\"project_id\"]))\n my_ids = sorted(project_ids, key=str.lower)\n print(my_ids)\n return my_ids",
"def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)",
"def project_show(ctx, args):\n for project_id in args:\n data = ctx.obj.get_project_by_project_id(project_id)\n output_json_data(data)",
"def list_(ctx, search, backend):\n projects = ctx.obj['projects_db'].search(search, backend=backend)\n projects = sorted(projects, key=lambda project: project.name.lower())\n ctx.obj['view'].search_results(projects)",
"def query_client(self, client_id):\n try:\n return self.client_model.objects.get(client_id=client_id)\n except self.client_model.DoesNotExist:\n return None",
"def query_projects(request):\n try:\n filters = request.data\n except AttributeError:\n filters = FILTER\n projects = _get_projects(filters)\n projects_as_json = serializers.serialize('json', projects)\n return HttpResponse(json.dumps(projects_as_json), content_type='json')",
"def get_user_projects(username):\n\n tx = cypher_transaction()\n query = \"\"\"\n MATCH (p:project)-[:OWNED_BY]->(u:user {username:{uname}})\n RETURN p\n \"\"\"\n tx.append(query, parameters={'uname': username})\n results = _first(tx.commit())\n projects = []\n for r in results:\n proj, = r.values\n print(\"* {0}\".format(proj['name']))\n projects.append(proj)\n return projects",
"def test_get_projects(self):\n pass",
"def get_project(name):\n tx = cypher_transaction()\n query = \"\"\"MATCH (n:project) WHERE n.name={project_name} RETURN n\"\"\"\n tx.append(query, parameters={'project_name': name})\n result = tx.commit()\n\n # Returns a result of the form [[\n # Record(\n # columns=('n',),\n # values=(Node('http://localhost:7474/db/data/node/233'),)\n # )\n # ]]\n return _first(result)[0].values[0]",
"def find_projects(self, project_name: Optional[str] = None) -> List[Project]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from projects\n WHERE (?1 IS NULL OR project_name = ?1)\n \"\"\",\n (project_name,),\n )\n rows = c.fetchall()\n return [\n Project(self, str(r[\"project_id\"]), row=r, _used_new_call=True)\n for r in rows\n ]",
"def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)"
] | [
"0.68639714",
"0.61412024",
"0.5989478",
"0.5867039",
"0.5781275",
"0.5674645",
"0.566956",
"0.56622523",
"0.5642613",
"0.5634638",
"0.55459017",
"0.5535563",
"0.5528021",
"0.5521917",
"0.5521528",
"0.54804444",
"0.5467839",
"0.5415431",
"0.5400909",
"0.53992367",
"0.53678286",
"0.53645825",
"0.53416485",
"0.5323191",
"0.5322763",
"0.5321236",
"0.5310382",
"0.53022796",
"0.5294313",
"0.52851266"
] | 0.6326702 | 1 |
Update data for an existing client. If the name or notes parameter is not supplied, the existing data on the Toggl server will not be changed. | def updateClient(self, id, name=None, notes=None):
data = {}
data['client'] = {}
data['client']['name'] = name
data['client']['notes'] = notes
response = self.postRequest(Endpoints.CLIENTS + '/{0}'.format(id), parameters=data, method='PUT')
return self.decodeJSON(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_client(client_name, updated_client_name): # Operacion modificar\n global clients\n\n if client_name in clients:\n index = clients.index(client_name)\n clients[index] = updated_client_name\n else:\n print(\"Client isn\\'t in the client list\")",
"def test_update_client(self):\n url = '/api/v1/pediatras/{}/'.format(self.app_client.id)\n\n data = {\n \"name\": \"Ernesto\"\n }\n\n request = self.client.patch(url, data)\n self.assertEqual(request.status_code, status.HTTP_200_OK)",
"def update_client(\n body: ClientmodelClientUpdateRequest,\n client_id: str,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = UpdateClient.create(\n body=body,\n client_id=client_id,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def do_PUT(self):\n note_details = NoteDetails\n if self.path == '/note/api/update':\n response_data=note_details.update_data(self)\n Response(self).jsonResponse(status=200, data=response_data)",
"def set(self, client):\n if not client:\n raise SurvoxAPIMissingParameter('client')\n c = self.get()\n if not c:\n raise SurvoxAPIRuntime('No client available named: {name}'.format(name=self.name))\n return self.api_put(endpoint=self.url, data=client)",
"def update_client(self, display_name, player=PLAYER_IDENTIFIER):\n self.state.update_client(display_name, player)",
"def update(self, klient):\n try:\n # pobierz z bazy klienta\n inv_oryg = self.getById(klient.id)\n if inv_oryg != None:\n # klient jest w bazie: usuń go\n self.delete(klient)\n self.add(klient)\n\n except Exception as e:\n #print \"klient update error:\", e\n raise RepositoryException('error updating klient %s' % str(klient))",
"async def update_client_async(\n body: ClientmodelClientUpdateRequest,\n client_id: str,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = UpdateClient.create(\n body=body,\n client_id=client_id,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )",
"def test_update_client(self):\n pass",
"def test_client_update(self):\n pass",
"def test_04_update_client(self):\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n ClientsUnitTest._client_dao.add(client)\n self.assertTrue(\n ClientsUnitTest._client_dao.get_client(client.user_id))\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n ClientsUnitTest._client_dao.modify(client)\n client = ClientsUnitTest._client_dao.get_client(client.user_id)\n self.assertEqual(client.user_id, int(test_str[0]))\n self.assertEqual(client.host_name, test_str[1])\n self.assertEqual(client.user_name, test_str[2])\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n self.assertTrue(ClientsUnitTest._client_dao.delete(client))\n\n try:\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n self.assertTrue(ClientsUnitTest._client_dao.modify(client))\n\n except ClientNotFoundException as error:\n print(error.get_message())\n\n except DBException as error:\n print(error.get_message())",
"def createClient(self, name, wid, notes=None):\n\n data = {}\n data['client'] = {}\n data['client']['name'] = name\n data['client']['wid'] = wid\n data['client']['notes'] = notes\n\n response = self.postRequest(Endpoints.CLIENTS, parameters=data)\n return self.decodeJSON(response)",
"def put(self, id_cliente):\n data = request.json\n cliente = update_cliente(id_cliente, data)\n if not cliente:\n api.abort(404)\n else:\n return cliente",
"def update_a_note(self, note_id, data):\n return self.client._put(\"/notes/{}\".format(note_id), json=data)",
"def patch(cls, username, cad_model_name):\n client = ClientModel.find_user_by_username(username)\n jwt_id = get_jwt_identity()\n if client is None:\n return {'msg': gettext('client_profile_client_does_not_exist')}, 400\n if jwt_id != client.id:\n # Unauthorized delete\n return {'msg': gettext('cad_model_unauthorized_to_update')}, 403\n # Validate form entries\n spec_schema = CADSpecificationSchema(only=cad_spec_keys, partial=True)\n update_specs = spec_schema.load(request.form)\n # Set up a counter to check property update counts\n count = 0\n if cad_model_name != update_specs['cad_model_name']:\n return {'msg': gettext('cad_model_name_mismatch')}, 400\n\n if update_specs == {}:\n return {'msg': gettext('cad_model_update_info_empty')}\n # Defensive: Ensure a CAD model name is always submitted\n if update_specs['cad_model_name'] == '':\n return {'msg': gettext('cad_model_name_cannot_be_empty')}\n\n client_folder = f'client_{jwt_id}'\n current_cad = CADModel.find_cad_model_by_name(cad_model_name)\n\n if current_cad is None:\n return {'msg': gettext('cad_model_does_not_exist')}, 400\n\n object_key = f'{client_folder}/{update_specs[\"cad_model_name\"]}'\n # Get a presigned POST url\n ps_data = create_presigned_post_url(\n s3_client, bucket_name, object_key)\n url, fields = ps_data['url'], ps_data['fields']\n\n # Update cad_model_key\n if 'cad_model_length' in update_specs:\n current_cad.cad_model_length = update_specs['cad_model_length']\n count += 1\n\n if 'cad_model_height' in update_specs:\n current_cad.cad_model_height = update_specs['cad_model_height']\n count += 1\n\n if 'cad_model_width' in update_specs:\n current_cad.cad_model_width = update_specs['cad_model_width']\n count += 1\n\n if 'cad_model_material' in update_specs:\n current_cad.cad_model_material = update_specs['cad_model_material']\n count += 1\n\n if 'cad_model_visibility' in update_specs:\n current_cad.cad_model_visibility = update_specs['cad_model_visibility']\n count += 1\n\n if 'cad_model_mesh_percent' in update_specs:\n current_cad.cad_model_mesh_percent = update_specs['cad_model_mesh_percent']\n count += 1\n\n if count > 0:\n current_cad.save_cad_model_to_db()\n return {\n 'url': url,\n 'fields': fields\n }, 200\n # return {'msg': gettext('cad_model_update_info_empty')}, 400",
"def upsert_client_rate():\n print(request)\n new_client_dict = request.json\n new_client_dict_keys = new_client_dict.keys()\n new_client_dict_values = new_client_dict.values()\n # We want to update if the client exist in the client_rate.json data\n for i in range(1, len(new_client_dict)+1):\n if new_client_dict_keys[i] is None or new_client_dict_values[i] is None:\n continue\n else:\n update_client_rates(new_client_dict_keys[i], new_client_dict_values[i])\n # Or insert a new client-rate pair into client_rate.json data\n # After getting post request - how to update json file?\n return request.get_json()",
"def put(self):\n client_data = self.data\n comment_id = client_data['comment_id']\n\n try:\n comment = self.event_comment_table.get_item(CommentID=comment_id)\n except:\n self.write_json_with_status(400,{\n 'result' : 'fail',\n 'reason' : 'invalid comment id'\n })\n\n if self.current_userid != comment[\"CreatorID\"]:\n self.write_json_with_status(403,{\n 'result' : 'fail',\n 'reason' : 'Anthantication failed'\n })\n\n comment['Coentent'] = client_data['data']\n comment['Timestamp'] = str(time.time())\n comment.partial_save();\n\n self.write_json({\n 'comment_id' : comment_id,\n 'Timestamp' : comment['Timestamp']\n })",
"def test_update_pacient(self):\n url = '/api/v1/pacientes/{}/'.format(self.app_client.id)\n\n data = {\n \"name\": \"Ernesto\"\n }\n\n request = self.client.patch(url, data)\n self.assertEqual(request.status_code, status.HTTP_200_OK)",
"def update(self, an_id: id = None, where_key: str = None, name: str = None, data=None, notes: str = None,\n modified_by: str = None, created_by: str = None, my_conn: Optional[dict] = None,\n t_log: Optional[TimeLogger] = None, verbose: bool = None):\n\n if my_conn is None:\n my_conn = self.my_conn\n else:\n self.my_conn = my_conn\n\n if verbose is True and t_log is None:\n t_log = TimeLogger()\n\n my_conn = my_connect(my_conn=my_conn, t_log=t_log, verbose=verbose)\n conn = my_conn['conn']\n db_params = my_conn['db_params']\n\n if where_key is None:\n where_key = self.id_name()\n\n if an_id is None:\n warn(\"No Record ID Specified\", NoRecordIDError)\n else:\n if data is None:\n data = {}\n\n data.update(add_field('name', name))\n data.update(add_field('notes', notes))\n data.update(add_field('created_by', created_by))\n\n # If there is no data, then skip. Of course one could still change modified by:\n if len(data) > 0 or modified_by is not None:\n\n # Always require a modified by and because one can change data without specifying a modifer,\n # this is necessary. We don't check it before the previous if, because we don't want to create\n # a modified_by if not data was set and no modified_by was set.\n if modified_by is None:\n modified_by = db_params['user']\n\n data.update(modified_by=modified_by)\n\n fields = data.keys()\n\n sql = \"UPDATE {table} SET {fields} WHERE {pkey} = {a_value}\"\n\n if verbose:\n print('Data:\\n', data)\n print('\\nFields:\\n', fields)\n\n query = SQL(sql).format(\n table=Identifier(self.table_name),\n fields=SQL(', ').join(\n Composed([Identifier(k), SQL(' = '), Placeholder(k)]) for k in fields\n ),\n pkey=Identifier(where_key),\n a_value=Placeholder('where_key')\n )\n\n data.update(where_key=an_id)\n\n cur = conn.cursor(cursor_factory=NamedTupleCursor)\n\n if verbose:\n print(query.as_string(conn))\n print(cur.mogrify(query, data))\n\n try:\n cur.execute(query, data)\n except OperationalError as error:\n print(error)\n\n conn.commit()\n\n cur.close()\n\n self.pull_data()",
"def update(self):\n self._client.patch(self)",
"def update_client(self, old_client=None, new_client=None):\n old_is_client = type(old_client) is Client\n new_is_client = type(new_client) is Client\n\n # cancel if these are no clients\n if not old_is_client and not new_is_client:\n return False\n\n # try to change the id (and its files) first\n old_index = self.get_client_index(old_client)\n id_available = self.set_client_id(\n client=old_client,\n client_id=new_client.client_id\n )\n\n # only go on, if the ID is possible\n if id_available:\n self.client_list[old_index] = new_client\n return True\n else:\n return False",
"def ClientUserInfoChanged(self, clientnum):\n cl = Client(clientnum)\n \n current_name = cl[\"name\"]\n new_name = current_name[::-1] #reverse the string\n cl[\"name\"] = new_name #update userinfo (effective in game)\n #short version : cl[\"name\"] = cl_[\"name\"][::-1]",
"def fusion_api_update_client_certificate(self, aliasname, body, api=None, headers=None):\n return self.client_certificate.put(aliasname, body, api, headers)",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def update_data():\n pass",
"def update(self, identifier, data):\n self.client.request_with_method(Methods.UPDATE % (self.name, identifier,),\n data=data)",
"def update(self, path, **kwargs):\n client = self.connect(VAULT_TOKEN)\n\n existing = client.read(path)\n if existing is None:\n existing = {}\n else:\n existing = existing[\"data\"]\n\n existing.update(kwargs)\n\n client.write(path, **existing)"
] | [
"0.67937523",
"0.65493184",
"0.6435495",
"0.62011623",
"0.6047726",
"0.5984437",
"0.5956502",
"0.5945212",
"0.5844981",
"0.57898957",
"0.5773103",
"0.57370985",
"0.5677678",
"0.55478555",
"0.5545997",
"0.55407304",
"0.55150896",
"0.55113846",
"0.5479551",
"0.5476245",
"0.5462785",
"0.54450554",
"0.54415727",
"0.5432323",
"0.5432323",
"0.5432323",
"0.5432323",
"0.5420038",
"0.54002213",
"0.539261"
] | 0.7835998 | 0 |
This method will run all the episodes with epsilon greedy strategy | def run_epsilon(env, num_of_bandits, iterations, episodes):
# Initialize total mean rewards array per episode by zero
epsilon_rewards = np.zeros(iterations)
for i in range(episodes):
print(f"Running Epsilon episode:{i}")
n = 1
action_count_per_bandit = np.ones(num_of_bandits)
mean_reward = 0
total_rewards = np.zeros(iterations)
mean_reward_per_bandit = np.zeros(num_of_bandits)
env.reset()
epsilon = 0.5
for j in range(iterations):
a = get_epsilon_action(epsilon, env, mean_reward_per_bandit)
observation, reward, done, info = env.step(a)
# Update counts
n += 1
action_count_per_bandit[a] += 1
# Update mean rewards
mean_reward = mean_reward + (
reward - mean_reward) / n
# Update mean rewards per bandit
mean_reward_per_bandit[a] = mean_reward_per_bandit[a] + (
reward - mean_reward_per_bandit[a]) / action_count_per_bandit[a]
# Capture mean rewards per iteration
total_rewards[j] = mean_reward
# Update mean episode rewards once all the iterations of the episode are done
epsilon_rewards = epsilon_rewards + (total_rewards - epsilon_rewards) / (i + 1)
return epsilon_rewards | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self) -> None:\n for episode in range(1, self.episodes + 1):\n print('Episode:', episode)\n steps, state_action_history = self.run_one_episode()\n self.steps_per_episode.append(steps)\n if episode % parameters.CACHING_INTERVAL == 0 or steps < 1000:\n visualize.animate_track(state_action_history, f'agent-{episode}')\n\n print('Training completed.')\n visualize.plot_steps_per_episode(self.steps_per_episode)\n visualize.plot_epsilon(self.agent.epsilon_history)\n\n if parameters.VISUALIZE_FINAL_GAME:\n print('Showing one episode with the greedy strategy.')\n self.agent.epsilon = 0\n steps, state_action_history = self.run_one_episode()\n print(f'Episode completed in {steps} steps.')\n visualize.animate_track(state_action_history)",
"def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n player = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, player)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, player, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, player = self.game.getNextState(board, player, action)\n\n r = self.game.getGameEnded(board, player)\n\n if r != 0:\n ex = [(x[0], x[2], r * ((-1) ** (x[1] != player))) for x in trainExamples]\n return ex",
"def run_episode(self, mode=0, eps=0.):\n if mode==0:\n eps = 0.\n done = False\n score = 0 \n \n while not done:\n state = self.env_info.vector_observations[0] # get the current state\n action = self.agent.act(state, eps=eps) # get an action using epsilon greedy policy\n self.env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = self.env_info.vector_observations[0] # get the next state\n reward = self.env_info.rewards[0] # get the reward\n done = self.env_info.local_done[0] # see if episode has finished\n \n if mode == 1:\n self.agent.step(state, action, reward, next_state, done)\n \n score += reward\n \n self.reset_env() # reset the environment\n \n return score",
"def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n\n r = self.game.getGameEnded(board, self.curPlayer)\n\n if r != 0:\n return [(x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))) for x in trainExamples]",
"def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n\n r = self.game.getGameEnded(board, self.curPlayer)\n\n if r != 0:\n return [(x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))) for x in trainExamples]",
"def train(self, episodes=2000, max_steps=99):\n\n for episode in range(episodes):\n state = self.env.reset()\n\n for step in range(max_steps):\n explore_eploit_tradeoff = np.random.uniform()\n\n if explore_eploit_tradeoff > self.epsilon:\n action = np.argmax(self.q_table[state, :])\n else:\n action = self.env.action_space.sample()\n\n new_state, reward, done, info = self.env.step(action)\n\n self.q_table[state, action] = self.q_table[state, action] \\\n + self.lr * (reward + self.gamma * np.amax(\n self.q_table[new_state, :]\n ) - self.q_table[state, action]\n )\n\n state = new_state\n if done:\n break\n exp_ = np.exp(-self.decay_rate * episode)\n self.epsilon = self.min_eps + exp_ * (self.max_eps - self.min_eps)",
"def q_learning(env, agent, num_episodes, batch_size, epsilon, epsilon_min, epsilon_decay, folder):\n \n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n\n for i_episode in range(num_episodes):\n if epsilon > epsilon_min and i_episode > 500:\n # complete random exploration 500 episodes, \n # then decrase exploration till epsilon less than epsilon_min\n epsilon *= epsilon_decay\n sys.stdout.flush()\n\n state = env.reset()\n state = np.reshape(state, [1, env.nS])\n\n \n for t in range(MAX_STEP):\n\n ## Decide action\n action = agent.act(state, epsilon)\n ## Advance the game to the next frame based on the action\n next_state, reward, done, _ = env.step(action)\n\n env.my_render(folder)\n\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t+1\n\n next_state = np.reshape(next_state, [1, env.nS])\n ## Remember the previous state, action, reward, and done\n agent.remember(state, action, reward, next_state, done)\n ## make next_state the new current state for the next frame.\n state = next_state ## change to copy.copy(next_state), if it is a array\n\n if len(agent.memory) > batch_size:\n agent.replay(batch_size) \n\n if done: \n break\n \n mean_score = stats.episode_rewards[i_episode]/stats.episode_lengths[i_episode]\n print(\"episode: {}/{}, score: {}, e: {:.2}, steps:{}, mean score:{:.2}\"\n .format(i_episode, num_episodes, stats.episode_rewards[i_episode], epsilon, \n stats.episode_lengths[i_episode], \n mean_score))\n #if(i_episode > 200):\n write_csv(folder, i_episode, stats.episode_lengths[i_episode], mean_score)\n if(i_episode%50 == 0):\n agent.save(folder + \"_qn\" + str(i_episode) + \".h5\") \n agent.save(folder + \"_qn-final\" + \".h5\") \n\n return stats",
"def train(n_episodes=1000, max_n_steps=300, eps_start=1.0, eps_end=0.01, eps_decay=0.995, strCheckpointFile='checkpoint.pth'):\n\n global env\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n num_saves = 0\n for i_episode in range(1, n_episodes + 1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n last_t = max_n_steps\n for t in range(max_n_steps):\n action = agent.act(state, eps) # agent returns an epsilon-greedy action based on state\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n agent.step(state, action, reward, next_state, done) # records experience and learns (depending on settings)\n state = next_state\n score += reward\n if done:\n last_t = t + 1\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print('\\rEpisode {}\\tNum steps: {}\\tAverage Score: {:.2f}'.format(i_episode, last_t, np.mean(scores_window)))\n # if i_episode % 100 == 0:\n # print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window) >= 13: # win condition in course\n if num_saves == 0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode - 100, np.mean(scores_window)))\n print('\\nTraining will continue and the checkpoint will be overwritten every 100 episodes')\n print('\\nSaving a checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n else:\n if i_episode % 100 == 0:\n print('\\nSaving another checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n num_saves += 1\n\n env.close()\n\n # plot the scores\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n # plt.show()\n plt.savefig('training_score_by_episode.png')\n return scores",
"def run(self):\n time.sleep(np.random.rand())\n np.random.seed(np.int32(time.time() % 1000 * self.id))\n \n # Put this in a while loop that checks a shared variable\n # Will keep running episodes until the shared variable reports False\n while(self.exit_flag == 0):\n for experience in self.run_episode():\n print(experience.state, experience.reward)\n self.training_q.put(experience)",
"def train(self, num_episodes = 10000, verbose = True):\n start_time = datetime.now().replace(microsecond=0)\n for e in range(num_episodes):\n S_old = self.env.reset()\n steps = 0\n # there is an interal limit of 100 steps\n while steps < 1000:\n steps += 1\n A = self.epsilon_greedy(S_old)\n S_new, reward, done, info = self.env.step(A)\n self.Q[S_old, A] = self.Q[S_old, A] + self.alpha * \\\n (reward + self.gamma * np.max(self.Q[S_new, :]) - self.Q[S_old, A])\n if done:\n break\n S_old = S_new\n if verbose:\n clear_output(wait=True)\n now_time = datetime.now().replace(microsecond=0)\n print(\"Epoch: {}/{} - Steps: {:4} - Duration: {}\".format(e+1, num_episodes, steps, now_time-start_time))\n\n return self.Q",
"def episodes(self, num_episodes, num_steps_per_episode):\n for ep in range(num_episodes):\n self.start_episode()\n for step in range(num_steps_per_episode):\n next_sensation,reward = self.env(self.next_action)\n self.collect_data(self.last_sensation, self.next_action, reward, next_sensation)\n self.next_action = self.agent(next_sensation,reward)\n self.last_sensation = next_sensation\n if self.last_sensation == TERMINAL_STATE:\n break",
"def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n g_losses = []\n g_losses_window = deque(maxlen=100)\n s_losses = []\n s_losses_window = deque(maxlen=100)\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)\n score = 0\n ball_reward_val = 0.0\n \n g_states = env_info[g_brain_name].vector_observations # get initial state (goalies)\n s_states = env_info[s_brain_name].vector_observations # get initial state (strikers)\n# s2_states = env_info[s2_brain_name].vector_observations # get initial state (strikers)\n\n g_scores = np.zeros(num_g_agents) # initialize the score (goalies)\n s_scores = np.zeros(num_s_agents) # initialize the score (strikers) \n# s2_scores = np.zeros(num_s2_agents) # initialize the score (strikers) \n \n #for t in range(max_t):\n while True:\n action_g_0 = g_agent.act(g_states[0], eps) # always pick state index 0\n action_s_0 = s_agent.act(s_states[0], eps)\n action_s_2 = s_agent.act(s_states[2], eps)\n# action_s2_0 = s2_agent.act(s2_states[0], eps) \n# action_s2_0 = np.asarray( [np.random.choice(s2_action_size)] )\n \n # Set other team to random\n action_g_1 = np.asarray( [np.random.choice(g_action_size)] ) \n action_s_1 = np.asarray( [np.random.choice(s_action_size)] )\n action_s_3 = np.asarray( [np.random.choice(s_action_size)] )\n# action_s2_1 = np.asarray( [np.random.choice(s2_action_size)] )\n \n # Train simultaneously\n #action_g_1 = g_agent.act(g_states[1], eps) # always pick state index 1\n #action_s_1 = s_agent.act(s_states[1], eps) \n \n # Combine actions\n actions_g = np.array( (action_g_0, action_g_1) ) \n actions_s = np.array( (action_s_0, action_s_1, action_s_2, action_s_3 ) )\n# actions_s2 = np.array( (action_s2_0, action_s2_1) )\n# actions = dict( zip( [g_brain_name, s_brain_name, s2_brain_name], [actions_g, actions_s, actions_s2] ) )\n actions = dict( zip( [g_brain_name, s_brain_name], [actions_g, actions_s] ) )\n \n env_info = env.step(actions) \n # get next states\n g_next_states = env_info[g_brain_name].vector_observations \n s_next_states = env_info[s_brain_name].vector_observations\n# s2_next_states = env_info[s2_brain_name].vector_observations\n \n # check if episode finished\n done = np.any(env_info[g_brain_name].local_done)\n \n # get reward and update scores\n g_rewards = env_info[g_brain_name].rewards\n s_rewards = env_info[s_brain_name].rewards\n# s2_rewards = env_info[s2_brain_name].rewards\n \n # Modify RED striker reward -Only when goal is scored\n if done:\n new_s_reward = modify_reward(s_rewards[0])\n s_rewards[0] = new_s_reward\n new_s_reward = modify_reward(s_rewards[2])\n s_rewards[2] = new_s_reward\n# new_s2_reward = modify_reward(s2_rewards[0])\n# s2_rewards[0] = new_s2_reward\n \n # Update scores\n g_scores += g_rewards\n s_scores += s_rewards\n# s2_scores += s2_rewards\n \n # Add in ball reward for striker\n ball_reward_val += ball_reward(s_states[0])\n \n # store experiences\n g_agent.step(g_states[0], action_g_0, g_rewards[0], \n g_next_states[0], done)\n s_agent.step(s_states[0], action_s_0, s_rewards[0] + ball_reward(s_states[0]), # adding ball reward\n s_next_states[0], done)\n s_agent.step(s_states[2], action_s_2, s_rewards[2] + ball_reward(s_states[2]), # adding ball reward\n s_next_states[2], done)\n# s2_agent.step(s2_states[0], action_s2_0, s2_rewards[0] + ball_reward(s2_states[0]), # adding ball reward\n# s2_next_states[0], done)\n\n if done:\n break\n \n g_states = g_next_states\n s_states = s_next_states\n# s2_states = s2_next_states\n \n # learn\n if len(g_agent.memory) > 64: #check memory to batch size\n goalie_loss = g_agent.learn(g_agent.memory.sample(), 0.99) # discount = 0.99\n striker_loss = s_agent.learn(s_agent.memory.sample(), 0.99) # discount = 0.99 \n# _ = s2_agent.learn(s2_agent.memory.sample(), 0.99) # discount = 0.99 \n \n g_losses.append(goalie_loss.item())\n g_losses_window.append(goalie_loss.item())\n #print(goalie_loss.item())\n s_losses.append(striker_loss.item())\n s_losses_window.append(striker_loss.item())\n \n score = g_scores[0] + s_scores[0] #+ s2_scores[0]\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n \n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\t Goalie Loss:' \\\n '{:.5f}\\t Striker Loss: {:.5f}' \\\n '\\t Ball Reward: {:.2f}'.format(i_episode, \\\n np.mean(scores_window), \\\n np.mean(g_losses_window), \\\n np.mean(s_losses_window), \\\n ball_reward_val), end=\"\")\n #print(s_states[0][0:56])\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\t Goalie Loss:' \\\n '{:.5f}\\t Striker Loss: {:.5f}\\n' \\\n '\\t Ball Reward: {:.2f}'.format(i_episode, \\\n np.mean(scores_window), \\\n np.mean(g_losses_window), \\\n np.mean(s_losses_window), \\\n ball_reward_val))\n \n # TODO: ---------- CHANGE OUTPUT FILE NAMES ----------\n torch.save(g_agent.qnetwork_local.state_dict(), 'goalie3_dqn_V1_mod.pth')\n torch.save(s_agent.qnetwork_local.state_dict(), 'striker3_dqn_V1_mod.pth')\n return scores",
"def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n print('------ ITER ' + str(i) + '------')\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for eps in range(self.args.numEps):\n print('------ Self Play Episode ' + str(eps) + '------')\n self.mcts = TSPMCTS(self.args, self.game, self.nnet) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n self.trainExamplesHistory.pop(0)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n\n # training new network\n if self.args.numEps > 0:\n self.nnet.train(trainExamples)\n nmcts = TSPMCTS(self.args, self.game, self.nnet)\n\n print('PLAYING GAMES')\n if self.args.arenaCompare:\n arena = SinglePlayerArena(lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n wins, losses = arena.playSinglePlayerGames(self.args.arenaCompare)\n print('WINS/LOSSES: %d / %d' % (wins, losses))",
"def test(self):\n total_steps = 0\n running_scores = np.zeros(len(self.agents))\n\n for e in range(self.run_settings.test_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = np.array(rewards)\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n if self.run_settings.verbose:\n self.print_action(env_actions)\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores += np.array(rewards)\n\n if done:\n running_scores += scores\n\n if len(scores) == 1:\n scores = scores[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}\"\n .format(e+1, step, scores))\n if self.run_settings.verbose:\n print(\"Average game scores: {}\".format(running_scores / self.run_settings.test_episodes))",
"def run_multiple_episodes(self, episodes_no, policy):\n for _ in range(episodes_no):\n self.q_learning_episode(policy)\n policy.update_epsilon()\n\n policy.reset()\n return self.q_values",
"def run_episode(self):\n # Reset environment\n self.agent.env.reset()\n done = False\n step_count = 0\n total_reward = 0\n\n while not done:\n reward, done = self.agent.explore()\n step_count += 1\n if step_count % 100 == 0:\n print('step count {}'.format(step_count))\n total_reward += self.agent.params['gamma']**step_count * reward\n return step_count, total_reward",
"def train_dqn(self, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n self.scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(1, n_episodes+1):\n env_info = self.env.reset(train_mode=True)[self.brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n for t in range(max_t):\n action = self.agent.act(state, eps)\n env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n self.agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n self.scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n # we use 15.0 just to be sure\n if np.mean(scores_window)>=self.threshold:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n break\n return self.scores",
"def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n log.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n log.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')",
"def executeEpisode(self, mcts, game, args):\n trainExamples = []\n board = game.getInitBoard()\n curPlayer = 1\n episodeStep = 0\n state_counter = Counter()\n\n moves = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = game.getCanonicalForm(board, curPlayer)\n temp = int(episodeStep < self.args['tempThreshold'])\n\n pi = mcts.getActionProb(canonicalBoard, temp=temp)\n sym = game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n state_counter.update(game.stringRepresentation(board)) #count the visit to the board\n\n board, curPlayer = game.getNextState(board, curPlayer, action)\n\n r = game.getGameEnded(board, curPlayer)\n \n moves += 1\n\n if moves >= self.args['maxMoves']:\n r = 1e-4\n\n if r != 0:\n return ([(x[0], x[2], r * ((-1) ** (x[1] != curPlayer))) for x in trainExamples], state_counter)",
"def explore(self):\n\n i = 0\n while True:\n i += 1\n \n state_counts = {game.__class__.__name__: Counter() for game in self.games} \n\n policies_prime = []\n pi_sum = 0\n v_sum = 0\n counter = 0\n \n # bookkeeping\n log.info(f'Starting Exploration Iteration #{i} ...')\n\n # for task in tasks...\n for _ in range(self.args['taskBatchSize']):\n\n # create deepcopy for training a theta'\n policy_prime = copy.deepcopy(self.nnet)\n \n # sample a game (task)\n game = np.random.choice(self.games, p=self.probs)\n log.info(f'Sampled game {type(game).__name__} ...')\n\n # multiprocess to get our training examples\n iterationTrainExamples = deque([], maxlen=self.args['maxlenOfQueue'])\n iterationTrainExamples = run_apply_async_multiprocessing(self.executeEpisode, [(MCTS(game, self.nnet, self.args), type(game)(), self.args.copy())] * self.args['numEps'], self.args['numWorkers'], desc='Self Play')\n iterationTrainExamples, iter_counters = zip(*iterationTrainExamples)\n\n iterationTrainExamples = list(itertools.chain.from_iterable(iterationTrainExamples))\n state_counts[game.__class__.__name__] += sum(iter_counters, Counter())\n\n # shuffle examples before training\n shuffle(iterationTrainExamples)\n\n # train our network\n pi_v_losses = policy_prime.train(iterationTrainExamples)\n\n policies_prime.append(policy_prime.state_dict())\n\n for pi,v in pi_v_losses:\n pi_sum += pi\n v_sum += v\n counter += 1\n \n # compute average parameters and load into self.nnet\n self.nnet.load_average_params(policies_prime)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n pmcts = MCTS(self.games[0], self.pnet, self.args)\n\n\n # Arena if we choose to run it\n if self.args['arenaComparePerGame'] > 0:\n # ARENA\n nmcts = MCTS(self.games[0], self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena()\n pwins, nwins, draws = arena.playGames(self.pnet, self.nnet, self.args, self.games)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args['updateThreshold']:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='best.pth.tar')\n\n log.info('Iteration Complete. Writing counts to \"%s/%s\"...', *self.args['json_folder_file'])\n # create the json file\n path = os.path.join(self.args['json_folder_file'][0], self.args['json_folder_file'][1])\n with open(path, 'a+') as f:\n if os.stat(path).st_size == 0: ## file just created/empty\n log.info('No counts found. Writing to empty file.')\n old_counts = {game.__class__.__name__: Counter() for game in self.games}\n else: ## load the counts from the file\n log.info('Loading counts...')\n f.seek(0)\n str_counts = f.read()\n # print('STRING OF JSON:', type(str_counts), str_counts)\n old_counts = json.loads(str_counts)\n old_counts = {game: Counter(v) for game, v in old_counts.items()}\n master_counts = {game.__class__.__name__: state_counts[game.__class__.__name__]+old_counts[game.__class__.__name__] for game in self.games}\n # countiung logic: turn {gametype -> Counter} into {gametype -> {state -> count}}\n master_counts = {game: dict(counter) for game, counter in master_counts.items()}\n log.info('Writing...')\n f.truncate(0) #clear file\n json.dump(master_counts, f)\n log.info('Counts written to json file \"%s/%s\"...', *self.args['json_folder_file'])",
"def run(self, num_episodes):\n for _ in xrange(num_episodes):\n self._env.reset()\n curr_state = self._env.state\n while not self._env.is_terminal(curr_state):\n reward = self._policy.take_action_and_get_reward()\n next_state = self._env.state\n self._update_parameters(curr_state, reward, next_state)\n curr_state = next_state\n # Estimate the TD-fixpoint.\n self.theta = np.dot(np.linalg.pinv(self._A), self._b)\n # Calculate current MSVE.\n self._calc_msve()",
"def run_all_episodes(self, episode_count):\n # Holds final result\n step_arr = []\n reward_arr = []\n new_abstr = {}\n detached_states = []\n\n if 'abstraction_type' in self.params.keys() and self.params['abstraction_type'] == 'discretization':\n self.agent.make_abstraction()\n\n while self.episode_count < episode_count:\n # Run episode, record results\n steps, reward = self.run_episode()\n step_arr.append(steps)\n reward_arr.append(reward)\n self.episode_count += 1\n if self.episode_count % 1 == 0:\n print('Episode {} finished with step count {}'.format(self.episode_count, steps))\n\n # Create temporal abstraction if applicable\n if 'make_abstraction' in self.params.keys() and self.episode_count in self.params['make_abstraction']:\n self.agent.make_abstraction()\n new_abstr = self.agent.params['s_a'].abstr_dict\n\n # Detach states if applicable\n if 'refine_abstraction' in self.params.keys() and self.episode_count in self.params['refine_abstraction']:\n newly_detached = self.agent.refine_abstraction()\n detached_states.extend(newly_detached)\n print('final abstraction')\n for i in range(len(self.agent.params['s_a'].cell_to_abstract_cell)):\n for key, value in self.agent.params['s_a'].cell_to_abstract_cell[i].items():\n print(key, value)\n\n return step_arr, reward_arr, new_abstr, detached_states",
"def evolve(self, env, num_generations, num_episodes, num_frames):\n for gen in range(num_generations):\n\n if Trainer.VERBOSE:\n print(\"Generation:\", gen)\n\n # Generate new root Teams\n self.generation()\n\n # Evaluate current agents\n self.evaluation(env, num_episodes, num_frames)\n\n # Perform selection\n self.selection()\n\n # Return to top-performing agent. Typically not used, but nice to have\n ranked_agents = sorted(self.agent_pop, key=lambda rt : rt.team.fitness, reverse=True)\n return ranked_agents[0]",
"def train_by_episode(self):\n # only REINFORCE and REINFORCE with baseline\n # use the ff code\n # convert the rewards to returns\n rewards = []\n gamma = 0.99\n for item in self.memory:\n [_, _, _, reward, _] = item\n rewards.append(reward)\n # rewards = np.array(self.memory)[:,3].tolist()\n\n # compute return per step\n # return is the sum of rewards from t til end of episode\n # return replaces reward in the list\n for i in range(len(rewards)):\n reward = rewards[i:]\n horizon = len(reward)\n discount = [math.pow(gamma, t) for t in range(horizon)]\n return_ = np.dot(reward, discount)\n self.memory[i][3] = return_\n\n # train every step\n for item in self.memory:\n self.train(item, gamma=gamma)",
"def train(self, episodes, epsilon_initial, epsilon_min, epsilon_stop_episode,\n network_update_freq, gamma, memory_capacity, batch_size):\n\n memory = ReplayMemory(memory_capacity)\n\n tot_steps = 0\n running_loss = 0\n\n depsilon = (epsilon_initial-epsilon_min)/epsilon_stop_episode\n\n for episode in range(episodes):\n\n if epsilon_initial > epsilon_min:\n epsilon_initial -= depsilon\n\n if episode % network_update_freq == 0:\n # Update target network\n self.NN_target.load_state_dict(self.NN.state_dict())\n\n if (episode + 1) % 10 == 0:\n print(f'Episode {episode + 1}/{episodes} completed!')\n print(f'Average steps per episode: {tot_steps / 10}')\n writer.add_scalar('training loss', running_loss / tot_steps, episode)\n self.plotValue()\n tot_steps = 0\n running_loss = 0\n\n state, done = self.env.reset()\n\n\n while not done:\n tot_steps += 1\n\n action = self.chooseAction(epsilon_initial, state)\n\n reward, next_state, done= self.env.transitionState(state, action)\n\n #score += reward\n reward = torch.tensor([[reward]], device=device)\n done = torch.tensor([[done]], device=device)\n\n # Saves the transition\n memory.push(self.RBF[state], self.RBF[next_state], reward, done)\n\n # Perform one step of batch gradient descent\n running_loss += self.optimizeModel(memory, batch_size, gamma)\n\n state = next_state\n\n writer.close()",
"def terminal_test(self):\n\n for self.cur_ep in tqdm.tqdm(range(1, self.episodes + 1), ascii=True, unit='episodes'):\n\n # Nombre de passages dans la boucle principale\n step = 1\n\n cur_state = self.env.reset()\n\n done = False\n\n while not done:\n\n # Choix au hasard entre :\n if np.random.random() > self.epsilon:\n # Action à partir de la q-table\n action = np.argmax(self.agent.get_q_values(np.array(cur_state)))\n\n else:\n # Action random\n action = np.random.randint(0, self.env.ACTION_SPACE_SIZE)\n\n # On effectue une action avec le serpent\n new_state, reward, done = self.env.step(action)\n\n # Ajout d'un exemple dans la mémoire\n self.agent.update_training_set((cur_state, action, reward, new_state, done))\n\n # Entrainement éventuel\n self.agent.train()\n\n cur_state = new_state\n step += 1\n\n if self.epsilon > self.MIN_EPSILON:\n self.epsilon *= self.EPSILON_DECAY\n self.epsilon = max(self.MIN_EPSILON, self.epsilon)\n\n if self.save_model:\n self.agent.save_model(self.model_file_name)",
"def dqn(self, n_episodes, checkpoint, eps_start=1., eps_end=0.1, eps_decay=0.995, alg=\"ddqn\"):\n\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(1, n_episodes + 1):\n env_info = self.env.reset(train_mode=True)[self.brain_name] # reset the environment\n\n state = self.get_state(env_info.visual_observations[0], 0) # get the current state\n score = 0\n for t in range(300):\n action = self.agent.act(state, eps).astype(np.int32) # select an action\n env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = self.get_state(env_info.visual_observations[0], t)\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n # get the next state\n self.agent.step(state, action, reward, next_state, done, alg)\n score += reward # update the score\n state = next_state # roll over the state to next time step\n if done: # exit loop if episode finished\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window) >= 13.0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format\n (i_episode - 100, np.mean(scores_window)))\n torch.save(self.agent.q_network_local.state_dict(), checkpoint)\n break\n return scores",
"def run(agent, env, num_episodes = 20000, mode = 'train'):\n\t scores=[]\n\t max_avg_score=-np.inf\n\t for i_episode in range(1, num_episodes + 1):\n\t # Initialize episode\n\t state=env.reset()\n\t action=agent.reset_episode(state)\n\t total_reward=0\n\t done=False\n\n\t # Roll out steps until done\n\t while not done:\n\t state, reward, done, info=env.step(action)\n\t total_reward += reward\n\t action=agent.act(state, reward, done, mode)\n\n\t # Save final score\n\t scores.append(total_reward)\n\n\t # Print episode stats\n\t if mode == 'train':\n\t if len(scores) > 100:\n\t avg_score=np.mean(scores[-100:])\n\t if avg_score > max_avg_score:\n\t max_avg_score=avg_score\n\n\t if i_episode % 100 == 0:\n\t print(\"\\rEpisode {}/{} | Max Average Score: {}\".format(i_episode,\n\t num_episodes, max_avg_score), end = \"\")\n\t sys.stdout.flush()\n\n\t return scores\n\n\tscores=run(q_agent, env)\n\n\t# Plot scores obtained per episode\n\tplt.plot(scores); plt.title(\"Scores\")\n\n\tdef plot_scores(scores, rolling_window = 100):\n\t\t\"\"\"Plot scores and optional rolling mean using specified window.\"\"\"\n\t\tplt.plot(scores); plt.title(\"Scores\");\n\t\trolling_mean=pd.Series(scores).rolling(rolling_window).mean()\n\t\tplt.plot(rolling_mean);\n\t\treturn rolling_mean\n\n\trolling_mean=plot_scores(scores)\n\n\t# Run in test mode and analyze socres obtained\n\ttest_scores=run(q_agent, env, num_episodes = 100, mode = 'test')\n\tprint(\"[TEST] Completed {} episodes with avg. score = {}\".format(\n\t len(test_scores), np.mean(test_scores)))\n\t_=plot_scores(test_scores, rolling_window = 10)\n\n\n\tdef plot_q_table(q_table):\n \"\"\"Visualize max Q-value for each state and corresponding action.\"\"\"\n\t q_image=np.max(q_table, axis = 2) # max Q-value for each state\n\t q_actions=np.argmax(q_table, axis = 2) # best action for each state\n\n\t fig, ax=plt.subplots(figsize = (10, 10))\n\t cax=ax.imshow(q_image, cmap = 'jet');\n\t cbar=fig.colorbar(cax)\n\t for x in range(q_image.shape[0]):\n\t for y in range(q_image.shape[1]):\n\t ax.text(x, y, q_actions[x, y], color = 'white',\n\t horizontalalignment = 'center', verticalalignment = 'center')\n\t ax.grid(False)\n\t ax.set_title(\"Q-table, size: {}\".format(q_table.shape))\n\t ax.set_xlabel('position')\n\t ax.set_ylabel('velocity')\n\n\n\tplot_q_table(q_agent.q_table)\n\n\n\tstate_grid_new=create_uniform_grid(\n\t env.observation_space.low, env.observation_space.high, bins = (20, 20))\n\tq_agent_new=QLearningAgent(env, state_grid_new)\n\tq_agent_new.scores=[]\n\n\n\tq_agent_new.scores += run(q_agent_new, env,\n\t num_episodes = 50000) # accumulate scores\n\trolling_mean_new=plot_scores(q_agent_new.scores)\n\n\ttest_scores= run(q_agent_new, env, num_episodes = 100, mode = 'test')\n\tprint(\"[TEST] Completed {} episodes with avg. score = {}\".format(\n\t len(test_scores), np.mean(test_scores)))\n\t_=plot_scores(test_scores)\n\n\tplot_q_table(q_agent_new.q_table)\n\n\tstate=env.reset()\n\tscore=0\n\timg=plt.imshow(env.render(mode='rgb_array'))\n\tfor t in range(1000):\n\t\taction=q_agent_new.act(state, mode = 'test')\n\t\timg.set_data(env.render(mode='rgb_array'))\n\t\tplt.axis('off')\n\t\tdisplay.display(plt.gcf())\n\t\tdisplay.clear_output(wait = True)\n\t\tstate, reward, done, _=env.step(action)\n\t\tsocre += reward\n\t\tif done:\n\t\t\tprint('Score: ', socre)\n\t\t\tbreak\n\tenv.close()",
"def ddpg(n_episodes=1000, print_every=50):\r\n \r\n scores = []\r\n scores_deque = deque(maxlen=print_every)\r\n log = open(\"log.txt\",\"w+\")\r\n for i_episode in range(1, n_episodes+1):\r\n env_info = env.reset(train_mode=True)[BRAIN_NAME]\r\n agent.reset()\r\n state = env_info.vector_observations # get the current state\r\n score = np.zeros(NUM_AGENTS)[:, None] \r\n \r\n while True:\r\n action = agent.act(state) # select an action\r\n env_info = env.step(action)[BRAIN_NAME] # send the action to the environment\r\n next_state = env_info.vector_observations # get the next state\r\n reward = np.array(env_info.rewards)[:, None] # get the reward\r\n done = np.array(env_info.local_done )[:, None] # see if episode has finished\r\n agent.step(state, action, reward, next_state, done) # take step with agent (including learning)\r\n score += reward # update the score\r\n state = next_state # roll over the state to next time step\r\n if np.any(done): \r\n break\r\n scores_deque.append(score.mean()) \r\n scores.append(score.mean()) \r\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), end=\"\")\r\n if i_episode % print_every == 0:\r\n string ='\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque))\r\n print(string)\r\n log.write(string)\r\n torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')\r\n torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')\r\n \r\n if np.mean(scores_deque)>=30.0 and i_episode>=100:\r\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))\r\n torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')\r\n torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')\r\n break\r\n log.close()\r\n return scores",
"def run_episode(self, environment):\n state = environment.reset()\n self.steps_done = 0\n while True:\n state_tensor = FloatTensor([state])\n position = self.Q.sample_from_softmax_policy(state_tensor)\n action = position + 1\n next_state, reward, done, _ = environment.step(position.item())\n self.memory.push((state_tensor, action,))\n self.learn(state_tensor, action, next_state, reward)\n state = next_state\n self.steps_done += 1\n if done:\n break\n history = environment.close()\n return history"
] | [
"0.7450819",
"0.7309439",
"0.71770984",
"0.7124558",
"0.7124558",
"0.7051895",
"0.703537",
"0.69566625",
"0.684824",
"0.68205166",
"0.6802857",
"0.68013686",
"0.67991036",
"0.6790752",
"0.675303",
"0.67489773",
"0.67405975",
"0.6739792",
"0.67348915",
"0.6732355",
"0.6706088",
"0.6702963",
"0.6692553",
"0.6661335",
"0.6645239",
"0.6628636",
"0.6566244",
"0.6561998",
"0.6550673",
"0.6542252"
] | 0.74749076 | 0 |
Train agent over given number of iterations. Each iteration consists of self play over n_episodes and then a learn step where agent updates network based on random sample from replay buffer | def train(self, iters, n_episodes):
for i in range(iters):
self.self_play(n_episodes)
self.learn() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def learn(self, num_episodes=10000):\n for i in range(num_episodes):\n self.actor()\n self.learner()",
"def trainAgent(self):\r\n\t\tfor episode in range(self.TOT_EPISODES):\r\n\t\t\t#reset environment, stacked frames every episode.\r\n\t\t\tstate = self.env.reset()\r\n\t\t\trewards = 0\r\n\t\t\t#preprocess and stack the frame/state.\r\n\t\t\tstate, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\tself.stacked_frames, state, True)\r\n\t\t\t\r\n\t\t\tfor step in range(self.MAX_STEPS):\r\n\t\t\t#for every step in episode:\r\n\t\t\t\r\n\t\t\t\tif (step%100==0):\r\n\t\t\t\t\tprint(\"Episode No.: \", episode, \"Step No.: \", step)\r\n\t\t\t\t\r\n\t\t\t\t#agent acts - explores or exploitation of the model\r\n\t\t\t\taction = self.dqn.predictAction(state)\r\n\t\t\t\t#reduce epsilon for more exploitation later.\r\n\t\t\t\tself.dqn.decayEpsilon()\r\n\t\t\t\t#Perform the action and get the next_state, reward, and done vals.\r\n\t\t\t\tnext_state, reward, done, _ = self.env.step(action)\r\n\t\t\t\t#append this state to the frame. Pass the previous stacked frame.\r\n\t\t\t\tnext_state, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\t\tself.stacked_frames, next_state, False)\r\n\t\t\t\trewards+=reward\r\n\t\t\t\t\r\n\t\t\t\t#add this experience into memory (experience buffer)\r\n\t\t\t\tself.dqn.remember(state, action, reward, next_state, done)\r\n\t\t\t\t\r\n\t\t\t\tstate = next_state\r\n\t\t\t\t\r\n\t\t\t\tif done:\r\n\t\t\t\t\tprint(\"took %d steps\" %step)\r\n\t\t\t\t\tprint(\"Earned a total of reward equal to \", rewards)\r\n\t\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\t\t# TRAIN\r\n\t\t\t\tself.dqn.replay()\r\n\t\t\t\t#sync target_model and model weights every 10k steps.\r\n\t\t\t\tif step % 10000 == 9999:\r\n\t\t\t\t\tself.dqn.target_train()\r\n\t\t\t\r\n\t\t\t# Save the network every 1000 iterations\r\n\t\t\tif episode % 5 == 4:\r\n\t\t\t\tprint(\"Saving Network\")\r\n\t\t\t\tself.dqn.save_network(self.path)",
"def train(self, num_episodes=10000):\n\n self.game.restart()\n\n self.exp_states = defaultdict(int)\n\n for i in tqdm(range(num_episodes)):\n\n self.game.deal_cards()\n\n possible_actions = self.game.get_actions()\n\n player_state = self.game.get_player_state()\n player_action = self.player.get_action(player_state,\n possible_actions,\n explore_exploit='explore')\n\n # Bookkeep visited states (?)\n player_state_str = np.array2string(player_state)\n self.exp_states[player_state_str] += 1\n\n opponent_state = self.game.get_opponent_state()\n opponent_action = self.opponent.get_action(opponent_state,\n possible_actions)\n\n self.game.set_player_action(player_action)\\\n .set_opponent_action(opponent_action)\n\n player_score, opponent_score = self.game.get_scores()\n\n reward = self._get_reward(player_score, opponent_score)\n self.player.learn(player_state,\n player_action,\n reward)\n self.player.learn(opponent_state,\n opponent_action,\n -reward)\n \n print(\"Training done!\")",
"def train(\n self, num_episodes, max_episode_length, reward_network=None,\n ):\n\n for _ in range(num_episodes):\n self.train_episode(max_episode_length)\n\n if self.training_i % self.play_interval == 0:\n self.play(\n max_episode_length,\n self.render,\n reward_network=reward_network,\n )",
"def train(self, env):\n\n\t\tmin_average_reward_for_stopping = 195\n\t\tconsecutive_successful_episodes_to_stop = 10\n\t\tlast_10_rewards = deque(maxlen=consecutive_successful_episodes_to_stop)\n\n\t\tnum_Episodes = []\n\t\tEpisode_Rewards = []\n\n\t\tfor episode in range(self.episodes):\n\t\t\tstate = env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\t\t\tdone = False\n\t\t\ttotal_reward = 0\n\n\t\t\twhile not done:\n\t\t\t\taction = self.act(state)\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\t\t\t\tnext_state = np.reshape(next_state, [1, self.state_size])\n\t\t\t\tself.remember(state, action, reward, next_state, done)\n\t\t\t\tstate = next_state\n\t\t\t\ttotal_reward += reward\n\n\t\t\tnum_Episodes.append(episode)\n\t\t\tEpisode_Rewards.append(total_reward)\n\t\t\tlast_10_rewards.append(total_reward)\n\t\t\tlast_10_avg_reward = np.mean(last_10_rewards)\n\t\t\tprint(episode, last_10_avg_reward)\n\n\t\t\t# call experience relay\n\t\t\tif len(self.memory) >= self.batch_size:\n\t\t\t\tself.replay(self.batch_size)\n\t\t\t# Stopping criteria\n\t\t\tif len(\n\t\t\t\t\tlast_10_rewards) == consecutive_successful_episodes_to_stop \\\n\t\t\t\t\tand last_10_avg_reward > min_average_reward_for_stopping:\n\t\t\t\tprint(\"Solved after {} epsiodes\".format(episode))\n\t\t\t\tbreak",
"def train_experience_replay(self, epochs, batch_size, iterations_per_epoch, capacity, n_obs, **kwargs):\n\n # Initialize losses dictionary and memory replay buffer\n losses = dict()\n mem = MemoryReplayBuffer(capacity)\n\n for ep in range(1, epochs+1):\n losses[ep] = []\n with tqdm(total=iterations_per_epoch, desc='Training epoch {}'.format(ep)) as p_bar:\n\n for it in range(1, iterations_per_epoch+1):\n \n # Determine n_obs and generate data on-the-fly\n if type(n_obs) is int:\n n_obs_it = n_obs\n else:\n n_obs_it = n_obs()\n # Simulate and add to buffer\n params, sim_data = self._forward_inference(batch_size, n_obs_it, **kwargs)\n mem.store(params, sim_data)\n\n # Sample from buffer\n params, sim_data = mem.sample()\n\n # One step backprop\n loss = self._train_step(params, sim_data)\n \n # Store loss into dictionary\n losses[ep].append(loss)\n\n # Update progress bar\n p_bar.set_postfix_str(\"Epoch {0},Iteration {1},Loss: {2:.3f},Running Loss: {3:.3f}\"\n .format(ep, it, loss, np.mean(losses[ep])))\n p_bar.update(1)\n\n # Store after each epoch, if specified\n if self.manager is not None:\n self.manager.save()\n return losses",
"def agents_train(self, game_step, episode_now, args):\n # update all trainers, if not in display or benchmark mode\n if episode_now < args.learning_start_episode: return \n if self.update_cnt > 0 and self.var >= self.min_var: self.var *= args.var_discount\n #if episode_now > self.last_update_episode and (episode_now - args.learning_start_episode) % args.learning_fre == 0:\n if game_step % args.learning_fre_step == 0:\n if self.update_cnt == 0: print('\\r=start training ...'+' '*100)\n self.last_update_episode = episode_now\n self.update_cnt += 1\n\n # update every agent in different memory batch\n for agent_idx, (actor_c, actor_t, critic_c, critic_t, opt_a, opt_c) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, \\\n self.critics_tar, self.optimizers_a, self.optimizers_c)):\n # del if opt_c == None: continue # jump to the next model update\n\n # sample the experience\n _obs_n_o, _action_n, _rew_n, _obs_n_n, _done_n = self.memory.sample( \\\n args.batch_size, agent_idx) # Note_The func is not the same as others\n \n # --use the date to update the CRITIC\n rew = torch.tensor(_rew_n, device=args.device, dtype=torch.float) # set the rew to gpu\n done_n = torch.tensor(~_done_n, dtype=torch.float, device=args.device) # set the rew to gpu\n action_cur_o = torch.from_numpy(_action_n).to(args.device, torch.float)\n obs_n_o = torch.from_numpy(_obs_n_o).to(args.device, torch.float)\n obs_n_n = torch.from_numpy(_obs_n_n).to(args.device, torch.float)\n\n action_tar = torch.cat([a_t(obs_n_n[:, self.obs_size[idx][0]:self.obs_size[idx][1]]).detach() \\\n for idx, a_t in enumerate(self.actors_tar)], dim=1)\n q = critic_c(obs_n_o, action_cur_o).reshape(-1) # q \n q_ = critic_t(obs_n_n, action_tar).reshape(-1) # q_ \n q_ = q_*args.gamma*done_n + rew*torch.tensor(args.reward_scale_par, device=args.device) # q_*gamma*done + reward\n loss_c = torch.nn.MSELoss()(q, q_.detach()) # bellman equation\n opt_c.zero_grad()\n loss_c.backward()\n nn.utils.clip_grad_norm_(critic_c.parameters(), args.max_grad_norm)\n opt_c.step()\n\n # --use the data to update the ACTOR\n # There is no need to cal other agent's action\n opt_c.zero_grad()\n model_out, policy_c_new = actor_c( \\\n obs_n_o[:, self.obs_size[agent_idx][0]:self.obs_size[agent_idx][1]], model_original_out=True)\n # update the aciton of this agent\n action_cur_o[:, self.action_size[agent_idx][0]:self.action_size[agent_idx][1]] = policy_c_new \n loss_pse = torch.mean(torch.pow(model_out, 2))\n loss_a = torch.mul(torch.tensor(-1.0, device=args.device), torch.mean(critic_c(obs_n_o, action_cur_o)))\n\n opt_a.zero_grad()\n (2e-3*loss_pse+loss_a).backward()\n #loss_a.backward()\n nn.utils.clip_grad_norm_(actor_c.parameters(), args.max_grad_norm)\n opt_a.step()\n\n # save the model to the path_dir ---cnt by update number\n #if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model == 0:\n if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model_step == 0:\n time_now = time.strftime('%y%m_%d%H%M')\n print('=time:{} step:{} save'.format(time_now, game_step))\n model_file_dir = os.path.join(args.save_dir, '{}_{}_{}'.format( \\\n args.scenario_name, time_now, game_step))\n if not os.path.exists(model_file_dir): # make the path\n os.mkdir(model_file_dir)\n for agent_idx, (a_c, a_t, c_c, c_t) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, self.critics_tar)):\n torch.save(a_c, os.path.join(model_file_dir, 'a_c_{}.pt'.format(agent_idx)))\n torch.save(a_t, os.path.join(model_file_dir, 'a_t_{}.pt'.format(agent_idx)))\n torch.save(c_c, os.path.join(model_file_dir, 'c_c_{}.pt'.format(agent_idx)))\n torch.save(c_t, os.path.join(model_file_dir, 'c_t_{}.pt'.format(agent_idx)))\n\n # update the tar par\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, args.tao) \n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, args.tao)",
"def self_play(self, n_episodes): \n eps = self.eps(self.agent.learning_iters)\n experiences = self_play_episodes(self.mdp, self.agent, n_episodes, eps) \n for state, action, reward, next_state, done in experiences:\n self.agent.replay_buffer.push(state, action, reward, next_state, done)",
"def train(self):\n tic = time.time()\n means = []\n stds = []\n steps = 0\n scores_window = deque(maxlen=100)\n for e in range(1,self.episodes):\n\n self.noise.step()\n episode_scores = []\n obs = self.env.reset()\n for t in range(self.tmax):\n actions = self.act(obs)\n next_obs,rewards,dones = self.env.step(actions)\n\n # Store experience\n if np.max(rewards) > 0:\n print('hit the ball over the net',rewards)\n self.R.add(obs.reshape(1,48),obs,actions,rewards,next_obs.reshape(1,48),next_obs,dones)\n obs = next_obs\n # Score tracking\n episode_scores.append(np.max(rewards))\n \n # Learn\n if len(self.R) > self.min_buffer_size:\n for _ in range(self.SGD_epoch):\n # Update each agent\n for i in range(self.num_agents):\n self.learn(i)\n # update target networks\n self.update_targets_all()\n \n steps += int(t)\n means.append(np.mean(episode_scores))\n stds.append(np.std(episode_scores))\n scores_window.append(np.sum(episode_scores))\n if e % 4 == 0:\n toc = time.time()\n r_mean = np.mean(scores_window)\n r_max = max(scores_window)\n r_min = min(scores_window)\n r_std = np.std(scores_window)\n plot(self.name,means,stds)\n print(\"\\rEpisode: {} out of {}, Steps {}, Rewards: mean {:.2f}, min {:.2f}, max {:.2f}, std {:.2f}, Elapsed {:.2f}\".format(e,self.episodes,steps,r_mean,r_min,r_max,r_std,(toc-tic)/60))\n if np.mean(scores_window) > self.winning_condition:\n print('Env solved!')\n # save scores\n pickle.dump([means,stds], open(str(self.name)+'_scores.p', 'wb'))\n # save policy\n self.save_weights(self.critic_path,self.actor_path)\n break",
"def train(self,env, iter_n=2000):\n\n\t\tfor i in range(iter_n):\n\t\t\tif i > 50:\n\t\t\t\tif all(reward > 195 for reward in self.step_count[-10:]):\n\t\t\t\t\tprint('solved at episode {}'.format(i))\n\t\t\t\t\tbreak\n\t\t\tstate = self.env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\n\t\t\tepisode_complete = False\n\t\t\tstep = 0\n\t\t\twhile not episode_complete and (step < self.max_steps):\n\t\t\t\taction = self.define_action(state)\n\t\t\t\tnew_state, reward, episode_complete, info = env.step(action)\n\t\t\t\tnew_state = np.reshape(new_state, [1, self.state_size])\n\n\t\t\t\tself.memory.append((state, action, reward, new_state, episode_complete))\n\t\t\t\tself.round_reward += reward\n\t\t\t\tstate = new_state\n\t\t\t\tstep += 1\n\t\t\t\tif episode_complete:\n\t\t\t\t\tself.round_reward += -10\n\t\t\t\t\tself.update_target_model()\n\t\t\t\t\tself.print_results(i, iter_n, step)\n\t\t\t\t\tif i != 0: # Update totals in memory if not the first run\n\t\t\t\t\t\tself.update_totals(i, step)\n\t\t\t\tif len(self.memory) > self.training_iter:\n\t\t\t\t\tself.replay()\n\t\t\tif self.epsilon > self.epsilon_min:\n\t\t\t\tself.epsilon *= self.epsilon_decay\n\n\t\treturn self.all_iterations, self.all_rewards, self.step_count",
"def train(self, n_steps=5000):\n all_rewards = []\n losses = []\n epsilons = []\n episode_reward = 0\n\n state = self.env.reset()\n for frame_idx in range(1, n_steps + 1):\n\n epsilon = self.epsilon_schedule(frame_idx)\n epsilons.append(epsilon)\n action = self.act(state, epsilon)\n next_state, reward, done, _ = self.env.step(action)\n episode_reward += reward\n self.replay_buffer.append(state, action, reward, next_state, done)\n\n if len(self.replay_buffer) >= self.learn_start:\n loss = self._compute_loss()\n self._update_parameters(loss)\n losses.append(loss.item())\n\n if done:\n state = self.env.reset()\n all_rewards.append(episode_reward)\n episode_reward = 0\n\n if frame_idx % self.target_update_rate == 0:\n self._update_target()\n\n state = next_state\n\n self._plot(all_rewards, losses, epsilons)",
"def train(n_episodes=1000, max_n_steps=300, eps_start=1.0, eps_end=0.01, eps_decay=0.995, strCheckpointFile='checkpoint.pth'):\n\n global env\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n num_saves = 0\n for i_episode in range(1, n_episodes + 1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n last_t = max_n_steps\n for t in range(max_n_steps):\n action = agent.act(state, eps) # agent returns an epsilon-greedy action based on state\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n agent.step(state, action, reward, next_state, done) # records experience and learns (depending on settings)\n state = next_state\n score += reward\n if done:\n last_t = t + 1\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print('\\rEpisode {}\\tNum steps: {}\\tAverage Score: {:.2f}'.format(i_episode, last_t, np.mean(scores_window)))\n # if i_episode % 100 == 0:\n # print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window) >= 13: # win condition in course\n if num_saves == 0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode - 100, np.mean(scores_window)))\n print('\\nTraining will continue and the checkpoint will be overwritten every 100 episodes')\n print('\\nSaving a checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n else:\n if i_episode % 100 == 0:\n print('\\nSaving another checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n num_saves += 1\n\n env.close()\n\n # plot the scores\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n # plt.show()\n plt.savefig('training_score_by_episode.png')\n return scores",
"def train(Game, agent, episodes=1000):\n a = agent\n # eps_start = a.epsilon\n # eps_end = a.epsilon_min\n # eps_dec = np.exp(1/episodes * np.log(eps_end/eps_start))\n # a.epsilon_decrement = eps_dec\n times_taken = np.zeros(episodes)\n print(\"Training starting\")\n for n in range(episodes):\n start_time = time.time()\n g = Game()\n print(\"EPISODE\", n+1)\n while not g.success:\n state = 1.0*g.get_state()\n action = a.action(state)\n reward = g.play(action)\n # print(g.success)\n # print(\"reward: \", reward)\n # print(state)\n # print(action)\n # print(g.get_state())\n a.train(state, action, reward, g.get_state(), g.success)\n end_time = time.time()\n times_taken[n] = end_time - start_time\n print(\"Training complete ({} episodes)\".format(episodes))\n return times_taken",
"def train(self,\n num_episodes = 100,\n num_steps = 500000,\n max_steps_per_episode = 10000,\n target_interval = 10000,\n learning_interval = 4,\n frame_skip = 1,\n warmup_steps = None,\n pretrain_steps = None,\n output_freq = 50,\n save_freq = 5, \n store_memory = False):\n \n # prefill memory with random transitions if requested\n if warmup_steps is not None:\n self._random_warmup(warmup_steps)\n \n # pretrain the agent on its on own memory\n if pretrain_steps is not None:\n self._pretrain(pretrain_steps, target_interval)\n \n # logging initialization\n self._score, self._q_values, self._losses = 0., [], []\n raw_frames = np.zeros(shape = (max_steps_per_episode, *self.env._unprocessed_frame.shape), dtype = np.uint8)\n\n episode_idx = 0\n while episode_idx < num_episodes or self._step_counter < num_steps:\n # reset environment and get first state\n self._start_episode()\n \n for i in range(max_steps_per_episode):\n \n #-------------------------------------------------------------------------------#\n #####################\n # Interactive Phase #\n #####################\n \n # choose an action, observe reactions of the environment and\n # add this experience to the agent's memory \n if self._step_counter % frame_skip == 0: \n action = self._make_decision()\n new_frame, reward, done, _ = self.env.step(action)\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n # update current state\n self._current_state[0, :(self.num_stacked_frames-1)] = self._current_state[0, 1:]\n self._current_state[0, self.num_stacked_frames-1] = new_frame\n #-------------------------------------------------------------------------------#\n \n \n #-------------------------------------------------------------------------------#\n ##################\n # Learning Phase #\n ##################\n \n # perform a parameter update of the current policy model\n if self._step_counter % learning_interval == 0:\n self._batch_update()\n \n # update the target model\n if self._step_counter % target_interval == 0:\n self._update_target_model()\n #-------------------------------------------------------------------------------#\n \n # logging\n self._score += self.env._unprocessed_reward\n raw_frames[i] = self.env._unprocessed_frame\n \n \n self._step_counter += 1\n \n if self.env.was_real_done:\n self.logger.add_episode_logs(self._step_counter, self._score, self._q_values, self._losses, raw_frames[:i])\n self._score, self._q_values, self._losses = 0., [], []\n break\n \n if done:\n self.env.reset()\n \n \n if not self.env.was_real_done:\n self.memory.add_experience(action, reward, new_frame, 1, True)\n self.logger.add_episode_logs(self._step_counter, self._score, self._q_values, self._losses, raw_frames[:i])\n self._score, self._q_values, self._losses = 0., [], []\n \n if episode_idx%(num_episodes/output_freq)==0:\n validation_score, validation_frames = self.test(record = True, max_steps_per_episode = max_steps_per_episode)\n #validation_score, validation_frames = 0, []\n lower_idx = int(clip(episode_idx-(num_episodes/output_freq)+1, 0, num_episodes-1))\n self.logger.show_progress(lower_idx, episode_idx, validation_score, validation_frames, self.policy_network.model)\n \n if episode_idx%(num_episodes/save_freq)==0:\n self.logger.make_plots()\n self.logger.save_all(self.policy_network.model, self.memory, store_memory)\n \n \n\n episode_idx += 1 \n print('==========================\\ntraining session completed\\n==========================\\n\\n\\n=======\\nSummary\\n======='\n )\n self.logger.show_progress(0, num_episodes, summary = True)\n self.logger.make_plots()\n self.logger.save_all(self.policy_network.model, self.memory, store_memory)",
"def train(\n env: DiscreteEnvironment[TState, TAction],\n agent: DiscreteAgent[TState, TAction],\n n_episodes: int,\n on_action: Callable[[TState, TAction, float, int], None] = None,\n on_episode_end: Callable[[int], None] = None,\n) -> None:\n for ep in range(n_episodes):\n t = 0\n while not env.terminated:\n s, a, r = agent.act_and_train(t) # returns (S_t, A_t, R_t)\n if on_action:\n on_action(s, a, r, t)\n t += 1\n agent.episode_end()\n if on_episode_end:\n on_episode_end(t)\n env.reset()",
"def train(self):\n total_steps = 0\n scores_history = [deque(maxlen=self.run_settings.averaging_window)\n for a in range(len(self.agents))]\n averages_history = [[] for a in range(len(self.agents))]\n\n for e in range(self.run_settings.num_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = rewards\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Train agents\n if total_steps > 0 and total_steps % self.run_settings.train_every == 0:\n for agent in self.agents:\n agent.train(self.run_settings)\n\n # Save agent model\n if total_steps > 0 and total_steps % self.run_settings.save_every == 0:\n for agent in self.agents:\n agent.save()\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores = [scores[a] + rewards[a] for a in range(len(self.agents))]\n # Push to agent Memories\n for a in range(len(self.agents)):\n self.agents[a].push_memory(states[a], actions[a], rewards[a], done)\n\n if done:\n averages = []\n for a in range(len(scores_history)):\n scores_history[a].append(scores[a])\n averages.append(np.mean(scores_history[a]))\n averages_history[a].append(averages[a])\n\n if len(scores) == 1:\n scores = scores[0]\n averages = averages[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}. Averages: {}\"\n .format(e+1, step, scores, averages))\n if (self.run_settings.graph_every > 0 and e > 0\n and e % self.run_settings.graph_every == 0):\n self.plot_results(averages_history)",
"def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n log.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n log.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')",
"def train(self, n_episodes):\n for episode in trange(n_episodes):\n policy_loss, entropy, episode_reward = self.train_step()\n self.writer.add_scalar('policy_loss', policy_loss, episode)\n self.writer.add_scalar('entropy', entropy, episode)\n self.writer.add_scalar('episode_reward', episode_reward, episode)",
"def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n print('------ ITER ' + str(i) + '------')\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for eps in range(self.args.numEps):\n print('------ Self Play Episode ' + str(eps) + '------')\n self.mcts = TSPMCTS(self.args, self.game, self.nnet) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n self.trainExamplesHistory.pop(0)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n\n # training new network\n if self.args.numEps > 0:\n self.nnet.train(trainExamples)\n nmcts = TSPMCTS(self.args, self.game, self.nnet)\n\n print('PLAYING GAMES')\n if self.args.arenaCompare:\n arena = SinglePlayerArena(lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n wins, losses = arena.playSinglePlayerGames(self.args.arenaCompare)\n print('WINS/LOSSES: %d / %d' % (wins, losses))",
"def train_episode(self):\n state = self.env.reset()\n states = []\n actions = []\n rewards = []\n for _ in range(self.options.steps):\n probs = self.actor_baseline.predict([[state]])[0][0]\n action = np.random.choice(len(probs), p=probs)\n\n next_state, reward, done, _ = self.step(action)\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n\n state = next_state\n\n if done:\n break\n\n # Compute and store returns in G\n G = np.zeros_like(rewards)\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n\n # One-hot encoding for actions\n actions_one_hot = np.zeros([len(actions), self.env.action_space.n])\n actions_one_hot[np.arange(len(actions)), actions] = 1\n\n # Compute one-hot encoded deltas\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n deltas = [[0]]\n\n # Update actor and state estimator\n self.actor_baseline.fit(x=[np.array(states)],\n y={'actor_output': deltas, 'baseline_output': returns},\n epochs=1, batch_size=self.options.batch_size, verbose=0)",
"def train_agent(self):\n # Retrieve collected experiences from memory\n experiences = np.array(self.replay.get_all())\n # rewards = np.array([h['reward'] for h in experiences])\n #rewards = experiences[:,2]\n rewards = np.array([r[2] for r in experiences])\n\n # Discount and normalize rewards\n norm_rewards = self.discount_rewards_and_normalize(rewards)\n\n # Shuffle for better learning\n shuffled_experiences = np.random.shuffle(experiences)\n\n # Feed the experiences through the network with rewards to compute and\n # minimize the loss.\n\n feed={\n self.X: [r[0] for r in experiences],\n self.rewards:norm_rewards,\n self.ep_actions:experiences[:,1]\n }\n self.tf_sess.run(self.train,feed_dict=feed)\n\n pass",
"def run(self):\n # Observe the game by randomly sampling actions from the environment\n # and performing those actions\n self.__observe__()\n for i in xrange(self.num_epochs):\n self.environment.resetStatistics()\n time_now = time.time()\n for j in xrange(self.train_steps_per_epoch):\n # Get action using epsilon-greedy strategy\n action = self.__sample_epsilon_action__()\n # Perform action based on epsilon-greedy search and store the transitions\n # in experience replay\n self.__supply_action_to_environment__(action)\n # If the environment is in the terminal state, reset the environment, and\n # perform self.stack_num actions to reset the environment\n self.isGameOver()\n if j % self.train_frequency == 0:\n # print \"Started training\"\n # Sample minibatch of size self.minibatch_size from experience replay\n minibatch = self.experience_replay.sample()\n minibatch_states, minibatch_action, minibatch_reward, minibatch_next_states, \\\n minibatch_terminals = minibatch\n cost = self.network.train_network(minibatch_states,\n minibatch_action,\n minibatch_reward,\n minibatch_terminals,\n minibatch_next_states)\n if j % self.record_frequency == 0:\n total_score, num_games = self.environment.getStatistics()\n avg_score = total_score / num_games\n self.network.record_average_qvalue(\n self.experience_replay.getCurrentState(),\n i * self.train_steps_per_epoch + j,\n self.epsilon, avg_score)\n # Epsilon annealing\n self.__anneal_epsilon__()\n # if self.time_step % 1000 == 0:\n # print \"Cost at iteration\", self.time_step, \" is\", cost\n # print \"Value of epsilon is\", self.epsilon\n self.steps += 1\n if j % self.copy_steps == 0:\n self.network.copy_weights()\n total_score, num_games = self.environment.getStatistics()\n time_taken = (time.time() - time_now)\n logger.info(\"Finished epoch %d: Steps=%d; Time taken=%.2f\",\n i, j, time_taken)\n logger.info(\"\\tNumber of games: %d; Average reward: %.2f\", num_games, (total_score / num_games))\n logger.info(\"\\tFinal epsilon value for epoch: %f\", self.epsilon)\n self.network.create_checkpoint()",
"def train_agent(iterations, modeldir, logdir, policydir):\n\n # TODO: add code to instantiate the training and evaluation environments\n\n\n # TODO: add code to create a reinforcement learning agent that is going to be trained\n\n\n tf_agent.initialize()\n\n eval_policy = tf_agent.policy\n collect_policy = tf_agent.collect_policy\n\n tf_policy_saver = policy_saver.PolicySaver(collect_policy)\n\n # Use reverb as replay buffer\n replay_buffer_signature = tensor_spec.from_spec(tf_agent.collect_data_spec)\n replay_buffer_signature = tensor_spec.add_outer_dim(replay_buffer_signature)\n table = reverb.Table(\n REPLAY_BUFFER_TABLE_NAME,\n max_size=REPLAY_BUFFER_CAPACITY,\n sampler=reverb.selectors.Uniform(),\n remover=reverb.selectors.Fifo(),\n rate_limiter=reverb.rate_limiters.MinSize(1),\n signature=replay_buffer_signature,\n ) # specify signature here for validation at insertion time\n\n reverb_server = reverb.Server([table])\n\n replay_buffer = reverb_replay_buffer.ReverbReplayBuffer(\n tf_agent.collect_data_spec,\n sequence_length=None,\n table_name=REPLAY_BUFFER_TABLE_NAME,\n local_server=reverb_server,\n )\n\n replay_buffer_observer = reverb_utils.ReverbAddEpisodeObserver(\n replay_buffer.py_client, REPLAY_BUFFER_TABLE_NAME, REPLAY_BUFFER_CAPACITY\n )\n\n # Optimize by wrapping some of the code in a graph using TF function.\n tf_agent.train = common.function(tf_agent.train)\n\n # Evaluate the agent's policy once before training.\n avg_return = compute_avg_return_and_steps(\n eval_env, tf_agent.policy, NUM_EVAL_EPISODES\n )\n\n summary_writer = tf.summary.create_file_writer(logdir)\n\n for i in range(iterations):\n # TODO: add code to collect game episodes and train the agent\n\n\n logger = tf.get_logger()\n if i % EVAL_INTERVAL == 0:\n avg_return, avg_episode_length = compute_avg_return_and_steps(\n eval_env, eval_policy, NUM_EVAL_EPISODES\n )\n with summary_writer.as_default():\n tf.summary.scalar(\"Average return\", avg_return, step=i)\n tf.summary.scalar(\"Average episode length\", avg_episode_length, step=i)\n summary_writer.flush()\n logger.info(\n \"iteration = {0}: Average Return = {1}, Average Episode Length = {2}\".format(\n i, avg_return, avg_episode_length\n )\n )\n\n summary_writer.close()\n\n tf_policy_saver.save(policydir)",
"def train(env, agents, data_log, n_episodes=10000, n_steps=None, generate_val_data=False, record_env=None, trainer=None):\n # Setup logging and start code\n logger = logging.getLogger('root')\n step_tot = 0\n logger.info(env.observation_space[0].high)\n alphas = [agent.alpha for agent in trainer.agents]\n data_log.log_var(\"alphas\", alphas)\n\n ep_generator = range(n_episodes) if n_episodes else itertools.count()\n # Start training\n for i in ep_generator:\n # Do some logging\n logger.info(\"episode:\" + str(i))\n data_log.set_episode(i)\n\n # Periodically store networks\n if i % 250 == 0: #was 25\n store_networks(trainer, agents, data_log)\n\n # Run a single episode\n score, step, extra_data = run_episode(env, agents, render=False, store_data=True, trainer=trainer)\n\n # Do more logging\n logger.info(\"Score: \" + str(score))\n step_tot += step\n data_log.set_step(step_tot)\n data_log.log_var(\"score\", score)\n alphas = [agent.alpha for agent in trainer.agents]\n data_log.log_var(\"alphas\", alphas)\n\n # Break training loop\n if n_steps and step_tot > n_steps:\n break\n\n #Periodically save logs\n if i % 50 == 0: #was 5\n logger.info(\"Saving log...\")\n data_log.save()\n logger.info(\"Saved log\")\n\n # Save logs one last time\n logger.info(\"Saving log...\")\n data_log.save()\n logger.info(\"Saved log\")\n return",
"def train_network(self, batch, episode_nr):\n global eps, eps_min, eps_decay\n for exp in batch:\n S = exp[0]\n S = process_state(S)\n action_number = exp[1]\n r = exp[2]\n S_new = exp[3]\n S_new = process_state(S_new)\n terminal = exp[4]\n\n if not terminal: # If agent is not at its final destination\n target = (r + gamma*np.amax(self.target.predict(S_new)[0]))\n else:\n target = r\n target_f = self.policy.predict(S)\n\n target_f[0][action_number] = target # Update something???\n self.policy.fit(S, target_f, epochs=1, verbose=0) # Train network # Verbose - makes training line?\n if self.epsilon > self.eps_min and episode_nr > 10:\n self.epsilon *= self.eps_decay # Decrease exploration rate",
"def q_learning(env, agent, num_episodes, batch_size, epsilon, epsilon_min, epsilon_decay, folder):\n \n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n\n for i_episode in range(num_episodes):\n if epsilon > epsilon_min and i_episode > 500:\n # complete random exploration 500 episodes, \n # then decrase exploration till epsilon less than epsilon_min\n epsilon *= epsilon_decay\n sys.stdout.flush()\n\n state = env.reset()\n state = np.reshape(state, [1, env.nS])\n\n \n for t in range(MAX_STEP):\n\n ## Decide action\n action = agent.act(state, epsilon)\n ## Advance the game to the next frame based on the action\n next_state, reward, done, _ = env.step(action)\n\n env.my_render(folder)\n\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t+1\n\n next_state = np.reshape(next_state, [1, env.nS])\n ## Remember the previous state, action, reward, and done\n agent.remember(state, action, reward, next_state, done)\n ## make next_state the new current state for the next frame.\n state = next_state ## change to copy.copy(next_state), if it is a array\n\n if len(agent.memory) > batch_size:\n agent.replay(batch_size) \n\n if done: \n break\n \n mean_score = stats.episode_rewards[i_episode]/stats.episode_lengths[i_episode]\n print(\"episode: {}/{}, score: {}, e: {:.2}, steps:{}, mean score:{:.2}\"\n .format(i_episode, num_episodes, stats.episode_rewards[i_episode], epsilon, \n stats.episode_lengths[i_episode], \n mean_score))\n #if(i_episode > 200):\n write_csv(folder, i_episode, stats.episode_lengths[i_episode], mean_score)\n if(i_episode%50 == 0):\n agent.save(folder + \"_qn\" + str(i_episode) + \".h5\") \n agent.save(folder + \"_qn-final\" + \".h5\") \n\n return stats",
"def train_episode(self, max_episode_length):\n\n # Populate the buffer\n self.populate_buffer(max_episode_length)\n\n # weight updates\n replay_samples = self.replay_buffer.sample(self.buffer_sample_size)\n state_batch = torch.from_numpy(replay_samples[0]).to(DEVICE)\n action_batch = torch.from_numpy(replay_samples[1]).to(DEVICE)\n reward_batch = (\n torch.from_numpy(replay_samples[2]).to(DEVICE).unsqueeze(1)\n )\n next_state_batch = torch.from_numpy(replay_samples[3]).to(DEVICE)\n dones = (\n torch.from_numpy(replay_samples[4])\n .type(torch.long)\n .to(DEVICE)\n .unsqueeze(1)\n )\n\n # alpha must be clamped with a minumum of zero, so use exponential.\n alpha = self.log_alpha.exp().detach()\n\n with torch.no_grad():\n # Figure out value function\n next_actions, log_next_actions, _ = self.policy.sample(\n next_state_batch\n )\n target_q1, target_q2 = self.avg_q_net(\n next_state_batch, next_actions\n )\n target_q = torch.min(target_q1, target_q2)\n next_state_values = target_q - alpha * log_next_actions\n\n # Calculate Q network target\n done_floats = dones.type(torch.float)\n q_target = reward_batch.clone()\n q_target += self.gamma * done_floats * next_state_values\n\n # Q net outputs values for all actions, so we index specific actions\n q1, q2 = self.q_net(state_batch, action_batch)\n q1_loss = F.mse_loss(q1, q_target)\n q2_loss = F.mse_loss(q2, q_target)\n\n # policy loss\n actions_pi, log_probs_pi, action_dist = self.policy.sample(state_batch)\n q1_pi, q2_pi = self.q_net(state_batch, actions_pi)\n q_pi = torch.min(q1_pi, q2_pi)\n policy_loss = ((alpha * log_probs_pi) - q_pi).mean()\n\n # update parameters\n self.q_optim.zero_grad()\n q1_loss.backward()\n self.q_optim.step()\n\n self.q_optim.zero_grad()\n q2_loss.backward()\n self.q_optim.step()\n\n self.policy_optim.zero_grad()\n policy_loss.backward()\n self.policy_optim.step()\n\n # automatic entropy tuning\n alpha_loss = (\n self.log_alpha * (log_probs_pi + self.entropy_target).detach()\n )\n alpha_loss = -alpha_loss.mean()\n\n if self.entropy_tuning:\n self.alpha_optim.zero_grad()\n alpha_loss.backward()\n self.alpha_optim.step()\n\n # Step average Q net\n move_average(self.q_net, self.avg_q_net, self.tau)\n\n # logging\n self.tbx_logger(\n {\n \"loss/q1 loss\": q1_loss.item(),\n \"loss/q2 loss\": q2_loss.item(),\n \"loss/pi loss\": policy_loss.item(),\n \"loss/alpha loss\": alpha_loss.item(),\n \"Q/avg_q_target\": q_target.mean().item(),\n \"Q/avg_q1\": q1.mean().item(),\n \"Q/avg_q2\": q2.mean().item(),\n \"Q/avg_reward\": reward_batch.mean().item(),\n \"Q/avg_V\": next_state_values.mean().item(),\n \"H/alpha\": alpha.item(),\n \"H/pi_entropy\": action_dist.entropy().mean(),\n \"H/pi_log_pi\": log_probs_pi.mean(),\n },\n self.training_i,\n )\n\n self.training_i += 1\n self.checkpointer.increment_counter()",
"def train(self):\n if len(self.experience) < self.minibatch_size:\n return\n\n # sample a minibatch_size of random episode with a number of transitions >= unrollings_num\n random_episodes_indecies = np.random.choice(len(self.experience), self.minibatch_size)\n random_episodes = []\n for index in random_episodes_indecies:\n episode = self.experience[index]\n\n # 0:random_transitions_space is the range from which a random transition\n # can be picked up while having unrollings_num - 1 transitions after it\n random_transitions_space = len(episode) - self.unrollings_num\n random_start = np.random.choice(random_transitions_space, 1)\n\n random_episodes.append(episode[random_start:random_start + self.unrollings_num])\n\n state_shape = tuple([self.minibatch_size, self.unrollings_num] + self.state_shape)\n\n # prepare the training data\n states = np.empty(state_shape, dtype=np.float32)\n next_states = np.empty(state_shape, dtype=np.float32)\n rewards = np.empty((self.minibatch_size, self.unrollings_num, ), dtype=np.float32)\n transition_action_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n next_legal_actions_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n\n for i, episode in enumerate(random_episodes):\n for j, transition in enumerate(episode):\n state, action, reward, nextstate, next_legal_actions = transition\n\n states[i,j], rewards[i,j], next_states[i,j] = state, reward, nextstate\n transition_action_filters[i,j][action] = 1.0\n next_legal_actions_filters[i,j][next_legal_actions] = 1.0\n\n self.prediction_nn.clearLSTMS(self.session)\n self.target_nn.clearLSTMS(self.session)\n\n loss,_ = self.session.run([self.loss, self.finalize], {\n self.states: states,\n self.next_states: next_states,\n self.rewards: np.reshape(rewards, (self.minibatch_size * self.unrollings_num, )),\n self.transition_action_filters: np.reshape(transition_action_filters, (self.minibatch_size * self.unrollings_num, self.actions_count)),\n self.next_legal_actions_filters: np.reshape(next_legal_actions_filters, (self.minibatch_size * self.unrollings_num, self.actions_count))\n })\n\n if self.iteration != 0 and self.iteration % self.freeze_period == 0:\n self.target_nn.assign_to(self.prediction_nn, self.session)\n\n self.iteration += 1\n\n return loss, self.iteration",
"def train(self, batch_size=64, n_episodes=100, max_episode_length=3000, save_path=\"last_save.h5\",\n load_path=None):\n\n self.explore = True # Explore if needed\n\n self._play_through(n_episodes=n_episodes, max_episode_length=max_episode_length, save_path=save_path,\n callbacks=self._train_callbacks_factory())",
"def train_agent(\n self,\n *,\n env,\n test_env,\n save_name,\n train_every=1,\n max_episodes=1000,\n center_returns=True,\n render=True,\n ):\n\n agent = self.create_agent(env)\n\n for episode in range(1, max_episodes + 1):\n obs = env.reset()\n done = False\n\n episode_return = 0.0\n while not done:\n action = agent.act(obs, deterministic=False)\n next_obs, reward, done, _ = env.step(action)\n episode_return += reward\n agent.store_step(obs, action, reward, next_obs, done)\n obs = next_obs\n\n if render:\n env.render()\n\n if episode % train_every == 0:\n agent.perform_training(\n gamma=self.gamma, center_returns=center_returns\n )\n torch.save(agent, f\"saved_agents/{save_name}\")\n\n print(\"Episode {} -- return={}\".format(episode, episode_return))\n return agent"
] | [
"0.7785768",
"0.76625186",
"0.76138806",
"0.7555034",
"0.7430718",
"0.7399803",
"0.73953015",
"0.73881173",
"0.7361592",
"0.72675776",
"0.72441524",
"0.7235535",
"0.72085416",
"0.71934515",
"0.71493644",
"0.71422166",
"0.7102493",
"0.7057307",
"0.70422393",
"0.70264834",
"0.70260596",
"0.697359",
"0.69708925",
"0.6915815",
"0.6906336",
"0.6871906",
"0.6855419",
"0.68539417",
"0.6850543",
"0.6828136"
] | 0.81745845 | 0 |
generate pair message between node Hv and Hw. since the cat operation, msgs from hv > hw and hw > hv are different | def __init__(self, dim_hv, dim_hw, msg_dim):
super(PairMessageGenerator, self).__init__()
self.dim_hv, self.dim_hw, self.msg_dim = dim_hv, dim_hw, msg_dim
self.in_dim = dim_hv + dim_hw # row * feature_dim, 2048
self.mlp = nn.Sequential(
nn.LayerNorm(self.in_dim), # this layer norm is important to create diversity
nn.Linear(self.in_dim, self.msg_dim),
nn.LeakyReLU(0.2)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_HHH(self, msg):\n self.prev_head = self.head\n self.head = msg.headx, msg.heady, msg.headz\n # self.torso = msg.torsox, msg.torsoy, msg.torsoz\n # self.Rhand = msg.Rhandx, msg.Rhandy, msg.Rhandz\n # self.Lhand = msg.Lhandx, msg.Lhandy, msg.Lhandz\n\n # if the distance to Kinect is 0, then that means it's not seeing anything\n if self.head[2] == 0:\n self.person = None\n else:\n xpos, ypos, zpos = self.kinect_transform(self.head[0], self.head[1], self.head[2])\n\n #either sets up a new presence or updates a person's presence\n if self.person is None:\n self.person = Coordinates(1, xpos, ypos, zpos)\n else:\n self.person.set_Coordinates(xpos, ypos, zpos)",
"def differentiate(hp):\n if hp.force_skip:\n G.add_edge(\"input\", \"output\")\n for node in G.nodes(data=True):\n node_id, node_data = node\n log(\"differentiate\", node_id, node_data)\n node_data[\"output\"] = None\n node_data[\"op\"] = None\n if node_data[\"shape\"] is \"square\" or \"output\" in node_id:\n if node_id == \"output\":\n d_out = node_data[\"output_shape\"][-1]\n node_type = hp.last_layer\n activation = \"tanh\"\n else:\n node_type = str(np.random.choice(['sepconv1d', 'transformer',\n 'k_conv1', 'k_conv2', 'k_conv3',\n \"deep\", \"wide_deep\"],\n 1, p=hp.layer_distribution).item(0))\n activation = str(np.random.choice([ 'tanh', 'linear', 'relu', 'selu',\n 'elu', 'sigmoid', 'hard_sigmoid', 'exponential', 'softmax',\n 'softplus', 'softsign', 'gaussian', 'sin', 'cos', 'swish'],\n 1, p=hp.activation_distribution).item(0))\n d_out = None\n node_data[\"force_residual\"] = random.random() < hp.p_force_residual\n node_data[\"activation\"] = clean_activation(activation)\n node_data[\"node_type\"] = node_type\n node_data['style'] = \"\"\n if node_type == 'sepconv1d':\n if d_out is None:\n d_out = safe_sample(hp.min_filters, hp.max_filters)\n node_data[\"filters\"] = d_out\n node_data[\"kernel_size\"] = 1\n if node_type == \"transformer\":\n if d_out is None:\n d_out = safe_sample(hp.min_units, hp.max_units) * hp.attn_heads\n node_data[\"d_model\"] = d_out\n node_data[\"n_heads\"] = 2 if d_out % 2 == 0 else 1\n if \"k_conv\" in node_type or node_type in [\"deep\", \"wide_deep\"]:\n layers = design_layers(hp, d_out, activation)\n if d_out is None:\n d_out = layers[-1][0]\n node_data[\"stddev\"] = hp.stddev\n node_data['layers'] = layers\n node_data[\"d_out\"] = d_out\n if node_type in [\"deep\", \"wide_deep\"]:\n node_data['kernel'] = node_type\n else:\n node_data['kernel'] = \"wide_deep\" if random.random() < hp.p_wide_deep else \"deep\"\n label = f\"{node_type}\"\n log(f\"set {node_id} to {label}\")\n node_data[\"label\"] = label\n node_data[\"color\"] = \"green\"\n # we handle recurrent shapes:\n try:\n feedback_node_id = f\"{node_id}_feedback\"\n input_shape = (None, d_out)\n log(f\"attempt to set input_shape for {feedback_node_id} to {input_shape}\")\n feedback_node = G.node[feedback_node_id]\n feedback_node[\"input_shape\"] = input_shape\n node_data[\"gives_feedback\"] = True\n except Exception as e:\n log(\"ERROR HANDLING FEEDBACK SHAPE:\", e)",
"def __init__(self, *args, **kwds):\n if args or kwds:\n super(hw_msg, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg._Header.Header()\n if self.temp_1_curr is None:\n self.temp_1_curr = 0\n if self.temp_1_min is None:\n self.temp_1_min = 0\n if self.temp_1_max is None:\n self.temp_1_max = 0\n if self.temp_2_curr is None:\n self.temp_2_curr = 0\n if self.temp_2_min is None:\n self.temp_2_min = 0\n if self.temp_2_max is None:\n self.temp_2_max = 0\n if self.temp_3_curr is None:\n self.temp_3_curr = 0\n if self.temp_3_min is None:\n self.temp_3_min = 0\n if self.temp_3_max is None:\n self.temp_3_max = 0\n if self.temp_4_curr is None:\n self.temp_4_curr = 0\n if self.temp_4_min is None:\n self.temp_4_min = 0\n if self.temp_4_max is None:\n self.temp_4_max = 0\n if self.temp_5_curr is None:\n self.temp_5_curr = 0\n if self.temp_5_min is None:\n self.temp_5_min = 0\n if self.temp_5_max is None:\n self.temp_5_max = 0\n if self.temp_6_curr is None:\n self.temp_6_curr = 0\n if self.temp_6_min is None:\n self.temp_6_min = 0\n if self.temp_6_max is None:\n self.temp_6_max = 0\n if self.akku_voltage_curr is None:\n self.akku_voltage_curr = 0\n if self.akku_voltage_min is None:\n self.akku_voltage_min = 0\n if self.akku_voltage_max is None:\n self.akku_voltage_max = 0\n if self.hals_motor_voltage_curr is None:\n self.hals_motor_voltage_curr = 0\n if self.hals_motor_voltage_min is None:\n self.hals_motor_voltage_min = 0\n if self.hals_motor_voltage_max is None:\n self.hals_motor_voltage_max = 0\n if self.hals_logik_voltage_curr is None:\n self.hals_logik_voltage_curr = 0\n if self.hals_logik_voltage_min is None:\n self.hals_logik_voltage_min = 0\n if self.hals_logik_voltage_max is None:\n self.hals_logik_voltage_max = 0\n if self.tablett_logik_voltage_curr is None:\n self.tablett_logik_voltage_curr = 0\n if self.tablett_logik_voltage_min is None:\n self.tablett_logik_voltage_min = 0\n if self.tablett_logik_voltage_max is None:\n self.tablett_logik_voltage_max = 0\n if self.arm_logik_voltage_curr is None:\n self.arm_logik_voltage_curr = 0\n if self.arm_logik_voltage_min is None:\n self.arm_logik_voltage_min = 0\n if self.arm_logik_voltage_max is None:\n self.arm_logik_voltage_max = 0\n if self.tablett_motor_voltage_curr is None:\n self.tablett_motor_voltage_curr = 0\n if self.tablett_motor_voltage_min is None:\n self.tablett_motor_voltage_min = 0\n if self.tablett_motor_voltage_max is None:\n self.tablett_motor_voltage_max = 0\n if self.hals_motor_current_curr is None:\n self.hals_motor_current_curr = 0\n if self.hals_motor_current_min is None:\n self.hals_motor_current_min = 0\n if self.hals_motor_current_max is None:\n self.hals_motor_current_max = 0\n if self.hals_logik_current_curr is None:\n self.hals_logik_current_curr = 0\n if self.hals_logik_current_min is None:\n self.hals_logik_current_min = 0\n if self.hals_logik_current_max is None:\n self.hals_logik_current_max = 0\n if self.tablett_logik_current_curr is None:\n self.tablett_logik_current_curr = 0\n if self.tablett_logik_current_min is None:\n self.tablett_logik_current_min = 0\n if self.tablett_logik_current_max is None:\n self.tablett_logik_current_max = 0\n if self.arm_logik_current_curr is None:\n self.arm_logik_current_curr = 0\n if self.arm_logik_current_min is None:\n self.arm_logik_current_min = 0\n if self.arm_logik_current_max is None:\n self.arm_logik_current_max = 0\n if self.tablett_motor_current_curr is None:\n self.tablett_motor_current_curr = 0\n if self.tablett_motor_current_min is None:\n self.tablett_motor_current_min = 0\n if self.tablett_motor_current_max is None:\n self.tablett_motor_current_max = 0\n else:\n self.header = std_msgs.msg._Header.Header()\n self.temp_1_curr = 0\n self.temp_1_min = 0\n self.temp_1_max = 0\n self.temp_2_curr = 0\n self.temp_2_min = 0\n self.temp_2_max = 0\n self.temp_3_curr = 0\n self.temp_3_min = 0\n self.temp_3_max = 0\n self.temp_4_curr = 0\n self.temp_4_min = 0\n self.temp_4_max = 0\n self.temp_5_curr = 0\n self.temp_5_min = 0\n self.temp_5_max = 0\n self.temp_6_curr = 0\n self.temp_6_min = 0\n self.temp_6_max = 0\n self.akku_voltage_curr = 0\n self.akku_voltage_min = 0\n self.akku_voltage_max = 0\n self.hals_motor_voltage_curr = 0\n self.hals_motor_voltage_min = 0\n self.hals_motor_voltage_max = 0\n self.hals_logik_voltage_curr = 0\n self.hals_logik_voltage_min = 0\n self.hals_logik_voltage_max = 0\n self.tablett_logik_voltage_curr = 0\n self.tablett_logik_voltage_min = 0\n self.tablett_logik_voltage_max = 0\n self.arm_logik_voltage_curr = 0\n self.arm_logik_voltage_min = 0\n self.arm_logik_voltage_max = 0\n self.tablett_motor_voltage_curr = 0\n self.tablett_motor_voltage_min = 0\n self.tablett_motor_voltage_max = 0\n self.hals_motor_current_curr = 0\n self.hals_motor_current_min = 0\n self.hals_motor_current_max = 0\n self.hals_logik_current_curr = 0\n self.hals_logik_current_min = 0\n self.hals_logik_current_max = 0\n self.tablett_logik_current_curr = 0\n self.tablett_logik_current_min = 0\n self.tablett_logik_current_max = 0\n self.arm_logik_current_curr = 0\n self.arm_logik_current_min = 0\n self.arm_logik_current_max = 0\n self.tablett_motor_current_curr = 0\n self.tablett_motor_current_min = 0\n self.tablett_motor_current_max = 0",
"def make_nap_visual_msg( i_curr, i_prev, str_curr, str_prev ):\n nap_visual_edge_msg = NapVisualEdgeMsg()\n nap_visual_edge_msg.c_timestamp = S_timestamp[i_curr]\n nap_visual_edge_msg.prev_timestamp = S_timestamp[i_prev]\n nap_visual_edge_msg.goodness = sim_scores_logistic[i_prev]\n nap_visual_edge_msg.curr_image = CvBridge().cv2_to_imgmsg( S_thumbnail[i_curr].astype('uint8'), \"bgr8\" )\n nap_visual_edge_msg.prev_image = CvBridge().cv2_to_imgmsg( S_thumbnail[i_prev].astype('uint8'), \"bgr8\" )\n nap_visual_edge_msg.curr_label = str_curr #str(i_curr) #+ '::%d,%d' %(nInliers,nMatches)\n nap_visual_edge_msg.prev_label = str_prev #str(i_prev)\n\n return nap_visual_edge_msg",
"def make_nap_visual_msg( i_curr, i_prev, str_curr, str_prev ):\n nap_visual_edge_msg = NapVisualEdgeMsg()\n nap_visual_edge_msg.c_timestamp = S_timestamp[i_curr]\n nap_visual_edge_msg.prev_timestamp = S_timestamp[i_prev]\n nap_visual_edge_msg.goodness = sim_scores_logistic[i_prev]\n nap_visual_edge_msg.curr_image = CvBridge().cv2_to_imgmsg( S_thumbnail[i_curr].astype('uint8'), \"bgr8\" )\n nap_visual_edge_msg.prev_image = CvBridge().cv2_to_imgmsg( S_thumbnail[i_prev].astype('uint8'), \"bgr8\" )\n nap_visual_edge_msg.curr_label = str_curr #str(i_curr) #+ '::%d,%d' %(nInliers,nMatches)\n nap_visual_edge_msg.prev_label = str_prev #str(i_prev)\n\n return nap_visual_edge_msg",
"def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)",
"def homothick():\n return se2hmt(binary([[1,1,1],\n [0,0,0],\n [0,0,0]]),\n binary([[0,0,0],\n [0,1,0],\n [1,1,1]]))",
"def create_ising_wishbone(h, w, **kwargs):\n assert h == 2 # Only works for 2 branches\n G = nx.empty_graph(h * w)\n n = w\n G.add_edges_from([(v, v+1) for v in range(n-1)])\n G.add_edges_from([(v, v+1) for v in range(n,2*n-1)])\n G.add_edges_from([(v, v+n) for v in range(n // 2)]) # Connect first half of nodes\n return nx.to_numpy_matrix(G)",
"def _cat_directions(self, h):\n if self.cfg.bidirectional:\n new_h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n else:\n new_h = h\n lsize, bsize, hsize = new_h.size()\n new_h = new_h.view(lsize, bsize, hsize, 1, 1)\n new_h = new_h.expand(lsize, bsize, hsize, self.cfg.grid_size[1], self.cfg.grid_size[0])\n return new_h",
"def generate_huawei_2g_node_level_discrepancies(self):\n engine = create_engine('postgresql://bodastage:password@database/bts')\n vendor_pk = 2\n tech_pk = 1\n schema_name = 'hua_cm_2g'\n\n conn = psycopg2.connect(\"dbname=bts user=bodastage password=password host=database\")\n conn.autocommit = True\n cur = conn.cursor()\n\n # Get MO\n sql = \"\"\"\n SELECT DISTINCT\n t3.name as mo,\n t3.pk as pk,\n t3.affect_level\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 7 -- BSC\n \"\"\".format(vendor_pk, tech_pk)\n cur.execute(sql)\n mo_list = cur.fetchall()\n\n for mo in mo_list:\n mo_name, mo_pk, mo_affect_level = mo\n\n # Get parameters\n sql = \"\"\"\n SELECT \n t2.name as pname,\n t2.pk as pk\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n INNER JOIN network_entities t4 on t4.pk = t3.affect_level\n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 7 -- BSC\n WHERE\n t3.name = '{}'\n \"\"\".format(vendor_pk, tech_pk, mo_name)\n cur.execute(sql)\n\n parameters = cur.fetchall()\n\n attr_list = [p[0] for p in parameters]\n\n str_param_values = \",\".join([\"t_mo.{0}{1}{0}\".format('\"', p) for p in attr_list])\n str_param_names = \",\".join([\"{0}{1}{0}\".format('\\'', p) for p in attr_list])\n\n # Join all cell level mos with the primary cell mo i.e. GCELL.\n # p_mo for primary MO\n cell_level_join = \"\"\" INNER JOIN {0}.BSCBASIC p_mo ON p_mo.neid = t_mo.neid \n AND p_mo.module_type = t_mo.module_type \"\"\".format(schema_name)\n\n # Add new entries\n sql = \"\"\"\n INSERT INTO network_audit.baseline_node_parameters \n (node, mo, parameter, bvalue, nvalue, vendor, technology, age, modified_by, added_by, date_added, date_modified)\n SELECT TT1.* FROM (\n SELECT\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 1 as age,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value \n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1\n LEFT JOIN network_audit.baseline_node_parameters TT2 on TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT2.node is NULL\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Delete old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.* FROM \n network_audit.baseline_node_parameters TT2\n LEFT JOIN \n (\n select\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT1.node IS NULL\n )\n DELETE FROM network_audit.baseline_node_parameters t1\n WHERE t1.pk IN (SELECT pk from rd)\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Update old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.pk, TT1.* FROM \n network_audit.baseline_node_parameters TT2\n INNER JOIN \n (\n select\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n trim(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n )\n UPDATE network_audit.baseline_node_parameters AS nb\n SET \n date_modified = rd.date_added, \n age=DATE_PART('day',AGE(nb.date_added, rd.date_added))\n FROM \n rd \n where \n rd.pk = nb.pk\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)",
"def generate_message(self):\r\n intent = torch.zeros(10)\r\n rand = 0\r\n if self.action_base == C:\r\n rand = torch.normal(mean=self.TRUTH_MEAN,std=self.TRUTH_STD,generator = self.generator)\r\n else: #self.action_base == D:\r\n rand = torch.normal(mean=self.DECEIVE_MEAN,std=self.DECEIVE_STD,generator = self.generator)\r\n if rand < 0.1: intent[0] = 1\r\n elif rand < 0.2: intent[1] = 1\r\n elif rand < 0.3: intent[2] = 1\r\n elif rand < 0.4: intent[3] = 1\r\n elif rand < 0.5: intent[4] = 1\r\n elif rand < 0.6: intent[5] = 1\r\n elif rand < 0.7: intent[6] = 1\r\n elif rand < 0.8: intent[7] = 1\r\n elif rand < 0.9: intent[8] = 1\r\n elif rand > 0.9: intent[9] = 1 #the truth is more likely anyways.\r\n return intent",
"def _cat_directions(self, h):\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h",
"def _cat_directions(self, h):\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h",
"def _cat_directions(self, h):\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h",
"def _generate_G_from_H(H, variable_weight=False):\n n_edge = H.shape[1]\n # the weight of the hyperedge\n W_vector = np.ones(n_edge)\n W = sp.diags(W_vector, format=\"csr\")\n # W = np.ones(n_edge)\n # the degree of the node\n DV = H\n DV = DV.sum(axis=1)\n #DV = np.sum(H * W, axis=1)\n # the degree of the hyperedge\n #DE = np.sum(H, axis=0)\n DE = H.sum(axis=0)\n invDE = np.squeeze(np.asarray(np.power(DE, -1)))\n invDE = sp.diags(invDE, format=\"csr\")\n DV2 = sp.diags(np.squeeze(np.asarray(np.power(DV, -0.5))), format=\"csr\")\n\n # invDE = np.mat(np.diag(np.power(DE, -1)))\n # DV2 = np.mat(np.diag(np.power(DV, -0.5)))\n HT = H.T\n\n if variable_weight:\n DV2_H = DV2 * H\n invDE_HT_DV2 = invDE * HT * DV2\n #invDE_HT = invDE * HT\n return DV2_H.toarray(), W_vector, invDE_HT_DV2.toarray()\n else:\n W = sp.diags(W_vector, format=\"csr\")\n G = DV2 * H * W * invDE * HT * DV2\n return G",
"def create_output(self, messages):",
"def lhco_line(self):\n if not self.check_def(['eta','phi','pt','mass','pid']): \n sys.exit('Particle error: some attribute not defined')\n\n jet=[1,2,3,4,5,6,21]\n inv_list=[12,14,16,18,1000022,1000023,1000024,1000025,1000035]\n\n #define pid-> type\n pid_to_type={11:1,-11:1,13:2,-13:2,15:3,-15:3,22:0}\n for data in jet:\n pid_to_type[data]=4\n pid_to_type[-data]=4\n for data in inv_list:\n pid_to_type[data]=6\n pid_to_type[-data]=6\n\n\n \n type=''\n for key in pid_to_type.keys():\n if self.pid==key:\n type=pid_to_type[key]\n break\n \n if type=='':\n print 'Warning unknown type'\n return ''\n\n text =' '+str(type) #type LHCO\n text+=' '+str(self.eta) #ETA\n text+=' '+str(self.phi) #PHI\n text+=' '+str(self.pt) #PT\n text+=' '+str(self.mass) #JMASS\n if self.pid in [11,13]: #NTRK\n text+=' -1' \n else:\n text+=' 1'\n if self.pid in [-5,5]: #BTAG\n text+=' 2'\n else:\n text+=' 0'\n text+=' 0' #HAD/EM\n text+=' 0' #DUMMY 1\n text+=' 0' #DUMMY 2\n \n return text",
"def generate_huawei_2g_cell_level_discrepancies(self):\n engine = create_engine('postgresql://bodastage:password@database/bts')\n vendor_pk = 2\n tech_pk = 1\n schema_name = 'hua_cm_2g'\n\n conn = psycopg2.connect(\"dbname=bts user=bodastage password=password host=database\")\n conn.autocommit = True\n cur = conn.cursor()\n\n # Get MO\n sql = \"\"\"\n SELECT DISTINCT\n t3.name as mo,\n t3.pk as pk,\n t3.affect_level\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 1\n \"\"\".format(vendor_pk, tech_pk)\n cur.execute(sql)\n mo_list = cur.fetchall()\n\n for mo in mo_list:\n mo_name, mo_pk, mo_affect_level = mo\n\n # Get parameters\n sql = \"\"\"\n SELECT \n t2.name as pname,\n t2.pk as pk\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n INNER JOIN network_entities t4 on t4.pk = t3.affect_level\n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n WHERE\n t3.name = '{}'\n \"\"\".format(vendor_pk, tech_pk, mo_name)\n cur.execute(sql)\n\n parameters = cur.fetchall()\n\n attr_list = [p[0] for p in parameters]\n\n str_param_values = \",\".join([\"t_mo.{0}{1}{0}\".format('\"', p) for p in attr_list])\n str_param_names = \",\".join([\"{0}{1}{0}\".format('\\'', p) for p in attr_list])\n\n # Join all cell level mos with the primary cell mo i.e. GCELL\n cell_level_join = \"\"\" INNER JOIN {0}.GCELL gcell ON gcell.\"CELLID\" = t_mo.\"CELLID\" AND gcell.neid = t_mo.neid \n AND gcell.module_type = t_mo.module_type \"\"\".format(schema_name)\n\n # Add new entries\n sql = \"\"\"\n INSERT INTO network_audit.network_baseline \n (node, site, cellname, mo, parameter, bvalue, nvalue, vendor, technology, age, modified_by, added_by, date_added, date_modified)\n SELECT TT1.* FROM (\n SELECT\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 1 as age,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1\n LEFT JOIN network_audit.network_baseline TT2 on TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT2.cellname is NULL\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Delete old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.* FROM \n network_audit.network_baseline TT2\n LEFT JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT1.cellname IS NULL\n )\n DELETE FROM network_audit.network_baseline t1\n WHERE t1.pk IN (SELECT pk from rd)\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Update old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.pk, TT1.* FROM \n network_audit.network_baseline TT2\n INNER JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n trim(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n )\n UPDATE network_audit.network_baseline AS nb\n SET \n date_modified = rd.date_added, \n age=DATE_PART('day',AGE(nb.date_added, rd.date_added))\n FROM \n rd \n where \n rd.pk = nb.pk\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)",
"def make_nap_msg( i_curr, i_prev, edge_color=None):\n nap_msg = NapMsg() #edge msg\n nap_msg.c_timestamp = S_timestamp[i_curr]\n nap_msg.prev_timestamp = S_timestamp[i_prev]\n nap_msg.goodness = sim_scores_logistic[i_prev]\n\n if edge_color is None:\n edge_color = (0,1.0,0)\n\n if len(edge_color) != 3:\n edge_color = (0,1.0,0)\n\n nap_msg.color_r = edge_color[0] #default color is green\n nap_msg.color_g = edge_color[1]\n nap_msg.color_b = edge_color[2]\n return nap_msg",
"def make_nap_msg( i_curr, i_prev, edge_color=None):\n nap_msg = NapMsg() #edge msg\n nap_msg.c_timestamp = S_timestamp[i_curr]\n nap_msg.prev_timestamp = S_timestamp[i_prev]\n nap_msg.goodness = sim_scores_logistic[i_prev]\n\n if edge_color is None:\n edge_color = (0,1.0,0)\n\n if len(edge_color) != 3:\n edge_color = (0,1.0,0)\n\n nap_msg.color_r = edge_color[0] #default color is green\n nap_msg.color_g = edge_color[1]\n nap_msg.color_b = edge_color[2]\n return nap_msg",
"def morphology(seed=425, th=120):\n \n # impact parameters\n M = 1e8*u.Msun\n B = 19.85*u.kpc\n V = 220*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 3000\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xr = 20*u.kpc + np.random.randn(Nstar)*0.02*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n \n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n\n plt.close()\n fig, ax = plt.subplots(3,1,figsize=(12,8), sharex=True)\n \n c_init = mpl.cm.Blues_r(1)\n c_fin0 = mpl.cm.Blues_r(0.5)\n c_fin = mpl.cm.Blues_r(0.2)\n \n eta = coord.Angle(np.arctan2(np.sqrt(stream['x'][0].to(u.kpc).value**2 + stream['x'][1].to(u.kpc).value**2),xr.to(u.kpc).value)*u.rad)\n xi = np.arctan2(stream['x'][1].to(u.kpc).value, stream['x'][0].to(u.kpc).value)\n xi = coord.Angle((xi - np.median(xi))*u.rad)\n \n vlabel = ['x', 'y', 'z']\n \n for i in range(3):\n plt.sca(ax[i])\n im = plt.scatter(xi.deg, eta.deg, c=stream['v'][i].value, s=20)\n \n plt.xlim(-60, 50)\n plt.ylim(55, 35)\n plt.gca().set_aspect('equal')\n \n if i==2:\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n \n divider = make_axes_locatable(plt.gca())\n cax = divider.append_axes(\"right\", size=\"3%\", pad=0.1)\n plt.colorbar(im, cax=cax)\n plt.ylabel('$V_{{{}}}$ [km s$^{{-1}}$]'.format(vlabel[i]))\n \n plt.tight_layout()",
"def __call__(self, h):\n\n Wh = self.W(h)\n p_yt = F.log_softmax(Wh) # should be (B x V)\n\n return p_yt",
"def output_loss_and_grads(self, h, V, c, y):\n\n loss, dh, dV, dc = 0.0, [], np.zeros_like(self.V), np.zeros_like(self.c)\n # calculate the output (o) - unnormalized log probabilities of classes\n # calculate yhat - softmax of the output\n # calculate the cross-entropy loss\n # calculate the derivative of the cross-entropy softmax loss with respect to the output (o)\n # calculate the gradients with respect to the output parameters V and c\n # calculate the gradients with respect to the hidden layer h\n for t in range(self.sequence_length):\n hp = h[:, t, :] # BS x H\n #o = self.output(hp, V, c) # leng x BS\n o = self.output(hp, V, c) # BS x leng\n #exp = np.exp(o) # leng x BS\n exp = np.exp(o) # BS x leng\n #s = exp / np.sum(exp, axis=0, keepdims=True) # leng x BS\n s = exp / np.sum(exp, axis=1, keepdims=True) # BS x leng\n yp = y[:, t, :]\n #dO = s - yp # leng x BS\n dO = s - yp # BS x leng\n #dV += np.dot(dO, hp.T) # ( leng x BS ) * ( H x BS ).T = leng x H\n dV += np.dot(hp.T, dO) # ( BS x H ).T * ( BS x leng ) = H x leng\n #dc += np.sum(dO, axis=1).reshape([-1, 1]) #\n dc += np.sum(dO, axis=0).reshape([1, -1]) #\n #dh.append(np.dot(self.V.T, dO)) # ( leng x H ).T * ( leng x BS ) = ( BS x H )\n dh.append(np.dot(dO, self.V.T)) # ( BS x leng ) * ( H x leng ).T = ( BS x H )\n loss += -np.sum(np.log(s)*yp)\n return loss, np.array(dh), dV, dc",
"def connectInfo(self,compInfo, node, nodeDic, numNodesSub,subcktName):\n connInfo = []\n print \"compinfo-------->\",compInfo\n sourcesInfo = self.separateSource(compInfo)\n for eachline in compInfo:\n words = eachline.split()\n print \"eachline----->\",eachline\n print \"eachline[0]------->\",eachline[0]\n if eachline[0]=='r' or eachline[0]=='R' or eachline[0]=='c' or eachline[0]=='C' or eachline[0]=='d' or eachline[0]=='D' \\\n or eachline[0]=='l' or eachline[0]=='L' or eachline[0]=='v' or eachline[0]=='V':\n conn = 'connect(' + words[0] + '.p,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='q' or eachline[0]=='Q':\n print \"Inside Transistor--->\"\n print \"Node Dict------>\",nodeDic\n conn = 'connect(' + words[0] + '.C,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.B,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.E,' + nodeDic[words[3]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='m' or eachline[0]=='M':\n conn = 'connect(' + words[0] + '.D,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.G,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.S,' + nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.B,' + nodeDic[words[4]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['f','h','F','H']:\n vsource = words[3]\n sourceNodes = sourcesInfo[vsource]\n sourceNodes = sourceNodes.split()\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[sourceNodes[0]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[sourceNodes[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['g','e','G','E']:\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[words[4]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['g','e','G','E']:\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[words[4]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='x' or eachline[0]=='X':\n templine = eachline.split()\n temp = templine[0].split('x')\n index = temp[1]\n for i in range(0,len(templine),1):\n if templine[i] in subcktName: #Ask Manas Added subcktName in function Call\n subname = templine[i]\n nodeNumInfo = self.getSubInterface(subname, numNodesSub)\n for i in range(0, numNodesSub[subname], 1):\n #conn = 'connect(' + subname + '_instance' + index + '.' + nodeDic[nodeNumInfo[i]] + ',' + nodeDic[words[i+1]] + ');'\n conn = 'connect(' + subname + '_instance' + index + '.' + 'n'+ nodeNumInfo[i] + ',' + nodeDic[words[i+1]] + ');'\n connInfo.append(conn)\n else:\n continue\n if '0' or 'gnd' in node:\n conn = 'connect(g.p,n0);'\n connInfo.append(conn)\n \n return connInfo",
"def _create_msg(self, tr_id, i_triples, i_type, r_triples, r_type, confirm):\n params = SSAP_UPDATE_PARAM_TEMPLATE % (str(i_type).upper(),\n str(i_triples),\n str(r_type).upper(),\n str(r_triples),\n str(confirm).upper())\n tmp = SSAP_MESSAGE_TEMPLATE % (str(self.node_id), str(self.targetSS),\n self.tr_type, str(tr_id), params)\n return tmp",
"def h(self,node):\n return 0",
"def womcom(hop,num,weights):\n import logging\n import matplotlib.pyplot as plt\n import numpy as np\n from tmath.wombat.inputter import inputter\n from tmath.wombat import HOPSIZE\n hopnum=[0]\n weight=[0]\n while (hopnum[0] < 1) or (hopnum[0] > HOPSIZE):\n hopnum[0]=inputter('Enter first hopper: ','int',False)\n if (weights):\n weight[0]=inputter('Enter weight for first hopper: ','float',False)\n for i in range(1,num):\n hoploop=0\n weightloop=0\n if (num > 3):\n print('Use hopper 99 to end')\n while (hoploop < 1) or (hoploop > HOPSIZE):\n hoploop=inputter('Enter next hopper: ','int',False)\n if (hoploop == 99):\n break\n if (hoploop == 99):\n break\n if (hop[hopnum[0]].wave[0] != hop[hoploop].wave[0]) or \\\n (hop[hopnum[0]].wave[1] != hop[hoploop].wave[1]) or \\\n (hop[hopnum[0]].wave[-1] != hop[hoploop].wave[-1]):\n print('Hoppers to not have the same wavelength scale!')\n return hop\n hopnum.append(hoploop)\n if (weights):\n weightloop=inputter('Enter next weight: ','float',False)\n weight.append(weightloop)\n if (weights) and (abs(sum(weight)-1.0) > 0.00001):\n print('Weights do not add to 1.0')\n return hop\n if (not weights):\n weight=[1./len(hopnum)]*len(hopnum)\n newflux=np.zeros(len(hop[hopnum[0]].flux))\n logging.debug('Combining spectra:')\n \n for i in range(0,len(hopnum)):\n newflux=newflux+hop[hopnum[i]].flux*weight[i]\n logging.debug('Combining {} with weight {}'.format(hop[hopnum[i]].obname,\\\n weight[i]))\n hopout=0\n while (hopout < 1) or (hopout > HOPSIZE):\n hopout=inputter('Enter hopper to store combined spectrum: ','int',False)\n hop[hopout].wave=hop[hopnum[0]].wave.copy()\n hop[hopout].flux=newflux.copy()\n hop[hopout].obname=hop[hopnum[0]].obname\n hop[hopout].header=hop[hopnum[0]].header\n hop[hopout].var=hop[hopnum[0]].var.copy()\n plt.cla()\n plt.plot(hop[hopout].wave,hop[hopout].flux,drawstyle='steps-mid',color='k')\n plt.xlabel('Wavelength')\n plt.ylabel('Flux')\n plt.title(hop[hopout].obname)\n xmin,xmax=plt.xlim()\n ymin,ymax=plt.ylim()\n print('\\nPlotting combined spectrum in black, components in color\\n')\n for i in range(0,len(hopnum)):\n plt.plot(hop[hopnum[i]].wave,hop[hopnum[i]].flux,drawstyle='steps-mid')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n \n #FIX var\n return hop",
"def CryoFan(m_dot, p_in, T_in, p_out, T_out):\r\n\r\n def Q_Boehmwind(vol_flow, Rho):\r\n # Efficiency of the Boehmwind CryoFan in -.\r\n # Fit function of measured data for rpm = 22000.\r\n # Parameter: Volume flow needs to be in m³/h.\r\n efficiency_Boehmwind = 0.01 *(1.5962e-3*vol_flow**4 - 1.0414e-2*vol_flow**3 - 2.8084*vol_flow**2 + 2.3715e1*vol_flow + 9.1550) #-\r\n # Dynamic loss of the Boehmwind CryoFan in W/rho.\r\n # Fit function of measured data for rpm = 22000.\r\n # Parameter: Volume flow needs to be in m³/h.\r\n dynLoss_Boehmwind = -3.1011e-4*vol_flow**4 - 3.0597e-3*vol_flow**3 + 1.6961e-2*vol_flow**2 + 2.9853e-1*vol_flow + 4.6333e-2 #W/rho\r\n\r\n # Friction loss\r\n Q_friction = dynLoss_Boehmwind * Rho #W\r\n # Dynamic heat load\r\n Q_dynamic = Q_friction/efficiency_Boehmwind - Q_friction #W\r\n # Static heat load\r\n # Using the given value for operation at 30 K and 20 bara\r\n Q_static = 7.0 #W\r\n\r\n return Q_friction + Q_dynamic + Q_static\r\n\r\n # Calculation of a mean rho\r\n Rho_in = hp.HeCalc(3, 0, 1, p_in, 2, T_in, 1) #kg/m³\r\n Rho_out = hp.HeCalc(3, 0, 1, p_out, 2, T_out, 1) #kg/m³\r\n Rho = 0.5 * (Rho_in + Rho_out) #kg/m³\r\n # Calculation of a mean cp\r\n Cp_in = hp.HeCalc(14, 0, 1, p_in, 2, T_out, 1) #J/(kgK)\r\n Cp_out = hp.HeCalc(14, 0, 1, p_out, 2, T_out, 1) #J/(kgK)\r\n Cp = 0.5 * (Cp_in + Cp_out) #J/(kgK)\r\n # Mean volume flow\r\n vol_flow = m_dot / Rho * 3600 #m³/h\r\n\r\n ## Heat loads\r\n # Estimating the different heat loads that are applied on the system by the cryofan\r\n # Static heat load and the heat load from the fan efficiency will be dissipated across the fan.\r\n # Friction losses will occur in the piping of system and application.\r\n # Since the friction losses are small in the respective application it is assumed that all friction loss occurs at the CryoFan aswell!\r\n # ->Tested the friction loss in a remote cooling application and it was negligible\r\n # Boehmwind CryoFan\r\n # Call of the function for the Boehmwind CryoFan\r\n Q_CryoFan = Q_Boehmwind(vol_flow, Rho)\r\n\r\n # New temperature due to the heat load of the Cryofan\r\n T_out = T_in + Q_CryoFan/(Cp * m_dot)\r\n\r\n # Prepare the output of the results\r\n h_out = hp.HeCalc(9, 0, 1, p_out, 2, T_out, 1) #J/kg\r\n\r\n print(\"Cryofan heat load: \", Q_CryoFan)\r\n\r\n state_out = {\"h\": h_out, \"T\": T_out, \"p\": p_out}\r\n return state_out",
"def test_weight_hh(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents\n cfg = Config()\n gene1, gene2 = get_gru_node_gene(0, cfg.genome)\n \n # Ratio of 0.5, so possible to cross to both parents\n p1 = False\n p2 = False\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0.5)\n for value in gene3.weight_hh:\n for v in value:\n if v == 0:\n p1 = True\n elif v == 1:\n p2 = True\n else:\n raise self.failureException(\"Must be mutated to one of parent's values\")\n if p1 and p2: break\n self.assertTrue(p1 and p2)\n \n # Ratio of 1, so always inherits from first parent\n for _ in range(10):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=1)\n self.assertEqual(np.linalg.norm(gene3.weight_hh - gene1.weight_hh), 0)\n self.assertNotEqual(np.linalg.norm(gene3.weight_hh - gene2.weight_hh), 0)\n \n # Ratio of 0, so always inherits from second parent\n for _ in range(10):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0)\n self.assertNotEqual(np.linalg.norm(gene3.weight_hh - gene1.weight_hh), 0)\n self.assertEqual(np.linalg.norm(gene3.weight_hh - gene2.weight_hh), 0)",
"def pair (cls):\n a_to_b = MessageChannel()\n b_to_a = MessageChannel()\n a = cls(a_to_b, b_to_a)\n b = cls(b_to_a, a_to_b)\n return (a,b)"
] | [
"0.54044586",
"0.5386834",
"0.5290445",
"0.51632214",
"0.51632214",
"0.51127476",
"0.51023054",
"0.50814384",
"0.50715846",
"0.50035816",
"0.499038",
"0.4964276",
"0.4964276",
"0.4964276",
"0.49613354",
"0.49603233",
"0.49329245",
"0.48807725",
"0.48755825",
"0.48755825",
"0.4848897",
"0.48343876",
"0.48325709",
"0.4828111",
"0.48240665",
"0.48137748",
"0.48127112",
"0.48031512",
"0.47873393",
"0.4786016"
] | 0.58693963 | 0 |
Import all model data using the loader. | def import_data(self):
self.models = []
for o in self.loader.load():
klass = self.type_for(o)
if hasattr(klass, "from_api"):
self.models.append(klass.from_api(o))
else:
self.models.append(klass(o))
return self.models | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_all():\n\n # count the number of files loaded\n count = 0\n\n # get model name\n model_name_list = [model for data_models in settings.OBJECT_DATA_MODELS\n for model in data_models]\n\n model_name_list += [model for model in settings.OTHER_DATA_MODELS]\n\n # import models one by one\n for model_name in model_name_list:\n import_model(model_name)\n\n # import localized strings\n import_localized_strings(settings.LANGUAGE_CODE)",
"def load_models(self):\n logger.info('Loading {name} data'.format(name=self.__class__.__name__))\n for type_name, type_ in self.data_dict.iteritems():\n # An exclude for correlations. Isn't created nor has an ID.\n if type_name == \"correlations_main\":\n continue\n task_response = self.do_task(\n self.types[type_name],\n type_['taskId']\n )\n self.data_dict[type_name]['job_id'] = json.loads(\n task_response.content\n )['JobId']\n logger.info(\n 'Load {name} response: '.format(name=type_name) +\n task_response.content\n )\n\n print(\"Loaded model\")",
"def load_data(self):\n\n self._load_train_data()\n self._load_test_data()",
"def load(self):\n if self.verbosity:\n self.header(\"Loading data files\")\n\n model_list = [\n x for x in get_model_list() if os.path.exists(x.objects.get_csv_path())\n ]\n\n if self.resume_mode:\n # get finished load command logs of last update\n prev_loaded = [\n x.file_name\n for x in self.log_record.called.filter(\n command='loadcalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} models already loaded.\".format(len(prev_loaded)))\n # remove these from model_list\n model_list = [x for x in model_list if x._meta.db_table not in prev_loaded]\n\n if self.verbosity:\n model_list = progress.bar(model_list)\n for model in model_list:\n call_command(\n \"loadcalaccessrawfile\",\n model.__name__,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n app_name=self.app_name,\n )",
"def load(self, *args, **kw):\n if self._loaded:\n return\n args = args or self._loader[1]\n kw = kw or self._loader[2]\n loaded_models = self._loader[0](*args, **kw)\n for m in loaded_models:\n if isinstance(m, Model):\n self.add(m)\n else:\n self.add(self.model_class(**m))\n self._loaded = True",
"def loadParts(self):\n for i in range(15):\n self.model_parts[i] = loadModel(\"ato_{}.pkl\".format(str(i)))",
"def __init__(self, loader):\n self.loader = loader\n self.models = []",
"def _load_training_data(self):\n self._save_training_data()",
"def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")",
"def import_all_model_modules():\r\n import brokerage.model\r\n # ensure that these imports don't get auto-deleted! they have side effects.\r\n brokerage.model",
"def load_model(self):\n pass",
"def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return",
"def __load(self, model_name):\n\n print(\"Loading model.\")\n tstart = datetime.now()\n\n # Temporary directory to extract the zipped information\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Unzip the directory that contains the saved model(s)\n with zipfile.ZipFile(model_name + \".zip\", \"r\") as zip_ref:\n zip_ref.extractall(dirpath)\n\n # Load metadata\n metadata = pickle.load(open(dirpath + \"/metadata.pickle\", \"rb\"))\n\n # Re-load metadata\n self.__dict__.update(metadata)\n\n # Load all sub-models\n try:\n self.__mol_to_latent_model = load_model(\n dirpath + \"/mol_to_latent_model.h5\"\n )\n except:\n print(\"'mol_to_latent_model' not found, setting to None.\")\n self.__mol_to_latent_model = None\n\n self.__latent_to_states_model = load_model(\n dirpath + \"/latent_to_states_model.h5\"\n )\n self.__batch_model = load_model(dirpath + \"/batch_model.h5\")\n \n # Build sample_model out of the trained batch_model\n self.__build_sample_model(batch_input_length=1) # Single-output model\n self.__build_sample_model(\n batch_input_length=256 # could also be self.batch_size\n ) # Multi-output model\n\n print(\"Loading finished in %i seconds.\" % ((datetime.now() - tstart).seconds))",
"def load(\n self,\n modelLoadPath\n ):\n pass",
"def load_model(self):\n self._logger.debug(f\"Loading Spacy Data Model : {self._model}... Could take time.\")\n self._nlp = spacy.load(self._model)\n self._logger.debug(\"Successfully loaded Spacy Data !\")\n\n # === Load entities ===\n if PIPE_ENTITY not in self._nlp.pipe_names:\n self._nlp.add_pipe(PIPE_ENTITY, last=True)\n\n entity_pipe = self._nlp.get_pipe(PIPE_ENTITY)\n for entity in self._entities:\n entity_pipe.add_label(entity)\n\n # === Load categories ===\n if PIPE_INTENT not in self._nlp.pipe_names:\n self._nlp.add_pipe(PIPE_INTENT, last=True)\n\n intent_pipe = self._nlp.get_pipe(PIPE_INTENT)\n for intent in self._intents:\n intent_pipe.add_label(intent)",
"def load(self, dataset, model_dir):\n raise NotImplementedError",
"def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()",
"def import_model(model_name, clear=True):\n file_name = os.path.join(settings.GAME_DIR, settings.WORLD_DATA_FOLDER, model_name)\n import_file(file_name, model_name, widecard=True, clear=clear)",
"def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')",
"def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True",
"def load_model(self):\n Thread(target=self.__load_model).start()",
"def load_model(self) -> Any:",
"def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()",
"def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)",
"def load_bundles(self):\n path = os.path.join(self.user_directory, \"bundles\")\n for name in os.listdir(path):\n if not name.startswith(\"__\") and os.path.isdir(path + \"/\" + name):\n bundle = Bundle(self, name)\n self.bundles[name] = bundle\n for bundle in self.bundles.values():\n bundle.setup(self, self.loader)\n\n for model in self.models:\n type(model).extend(model)\n for model in self.models:\n self.data_connector.repository_manager.add_model(model)",
"def load_all(self, file):\n self.model = load_model(file + \"_model.h5\")",
"def loadAll(self, path):\n self.model = keras.models.load_model(path+\"/model\")\n with open(path + \"/modelConfig.json\") as f:\n config = json.load(f)\n firstLayerConfig = config['config']['layers'][0]['config']\n lastLayerConfig = config['config']['layers'][-1]['config']\n self.lookBack = firstLayerConfig['batch_input_shape'][-1]\n self.forecast = lastLayerConfig['units']",
"def load(path_to_model):\n pass",
"def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )",
"def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )"
] | [
"0.7716706",
"0.6954782",
"0.6742698",
"0.67201203",
"0.6513403",
"0.64677274",
"0.64014745",
"0.6369573",
"0.6368797",
"0.63448185",
"0.633244",
"0.6321181",
"0.62659895",
"0.6258032",
"0.6243625",
"0.6167777",
"0.6158548",
"0.6115253",
"0.608663",
"0.608182",
"0.6074489",
"0.6064093",
"0.6046113",
"0.6038184",
"0.602964",
"0.6023165",
"0.59820455",
"0.59791785",
"0.5966759",
"0.5966759"
] | 0.7674908 | 1 |
Test GenBank parsing invalid product line raises ValueError | def test_invalid_product_line_raises_value_error(self):
def parse_invalid_product_line():
rec = SeqIO.read(path.join('GenBank', 'invalid_product.gb'),
'genbank')
self.assertRaises(ValueError, parse_invalid_product_line) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testBadLine(self):\n\n self.assertRaises(\n ValueError,\n tools._trackInfo,\n 'not a real line'\n )",
"def test_readbadformat(self):\n\n self.assertRaises(ParseError, self.hw, self.badfile)",
"def test_invalid_regref(self, parse_input_mocked_metadata):\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved for register references\"):\n parse_input_mocked_metadata(\"float q0 = 5\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved for register references\"):\n parse_input_mocked_metadata(\"float array q4 =\\n\\t-0.1, 0.2\")",
"def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))",
"def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.bad_line))",
"def test_step_with_non_number():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n HISTOGRAM bigfoot STEP \"hello there\" X temperature_mid\n \"\"\"\n\n # TODO make exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)",
"def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: C2Line(self.bad_line))",
"def test_wrong_type_error(self, parse_input_mocked_metadata):\n with pytest.raises(ValueError, match=\"invalid value\"):\n bb = parse_input_mocked_metadata(\n \"for int m in [1, 4.2, 9]\\n\\tMZgate(0, 1) | [0, 1]\"\n )",
"def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')",
"def test_validate_input_valid(self):\n final_config = self.dtm1.validate_input('00001111')\n nose.assert_equal(final_config[0], 'q4')\n nose.assert_equal(str(final_config[1]), 'TMTape(\\'xxxxyyyy.\\')')",
"def test_read_line(self):\n\n expected_data = ['\\\"lu, jr\\\"','ming-yuan','\\\"DRUG,1\\\"',135.999,True,3]\n input_string = '001,\\\"LU, JR\\\",MING-YUAN,\\\"DRUG,1\\\",135.999\\n'\n data = read_line(input_string)\n self.assertEqual(expected_data[0],data[0])\n self.assertEqual(expected_data[1],data[1])\n self.assertEqual(expected_data[2],data[2])\n self.assertAlmostEqual(expected_data[3],data[3])\n self.assertEqual(expected_data[4],data[4])\n self.assertAlmostEqual(expected_data[5],data[5])\n\n #Check for odd numers of quotation marks\n input_string = '001,\\\"LU\\\",\\\"MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for missing fields\n input_string = '001,,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for corrupted fields\n input_string = '001x,LU,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,1ag5\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])",
"def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)",
"def test_invalid_format(self):\n input_file = self.copy_and_mark_for_cleanup(\"Medline/pubmed_result1.txt\")\n\n cline = XXmotifCommandline(outdir=self.out_dir, seqfile=input_file)\n\n try:\n stdout, stderr = cline()\n except ApplicationError as err:\n self.assertEqual(err.returncode, 255)\n else:\n self.fail(f\"Should have failed, returned:\\n{stdout}\\n{stderr}\")",
"def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)",
"def test_addr_zip_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_addr_zip(val))",
"def test_team_reg_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_team_reg(val))",
"def test_gender_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_gender(val))",
"def test_release_tag_for_invalid_version(self) -> None:\n with self.assertRaisesRegexp(ValueError, \"Unable to parse version foo.bar.ba\"):\n release_tag()",
"def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')",
"def test_invalid_aggregation():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot X date BY YEAR Y temperature_mid ARGMIN\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)",
"def test_parse_invalid_file(self):\n with pytest.raises(ParserError):\n self.parser.parse(\"invalid csv\")",
"def test_no_specification_error():\n try:\n bad_arm = survey.get_spiral_slice()\n except SyntaxError:\n assert True\n else:\n assert False",
"def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'",
"def test_lsusb_parse_error_generic(self):\n self.assertRaises(ParseError, jc.parsers.lsusb.parse, self.generic_lsusb_t, quiet=True)",
"def test_bins_with_non_number():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n HISTOGRAM bigfoot X temperature_mid BINS hi\n \"\"\"\n\n # TODO Make exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)",
"def test_parse_invalid_version(self):\n version = VersionNumberScaleMeasurement.parse_version(\"This is not a version number\")\n self.assertEqual(Version(\"0\"), version)",
"def test_noQuantity(self):\n # result = self.parser.parse(\"d6\")\n\n # TODO\n # self.assertIsNone(result)",
"def test_schema_invalid_format(self):\n bad_schema = [int, int, float, float, str]\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)",
"def test_constructor_short_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.short_line))",
"def test_constructor_short_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: C2Line(self.short_line))"
] | [
"0.6412361",
"0.63490635",
"0.6333902",
"0.6294638",
"0.62427443",
"0.6210081",
"0.6208923",
"0.6203845",
"0.6167732",
"0.61114794",
"0.6096026",
"0.60889727",
"0.6073056",
"0.60688126",
"0.6063661",
"0.60588723",
"0.6050436",
"0.6005442",
"0.5976354",
"0.5972584",
"0.59714663",
"0.59529483",
"0.59517145",
"0.5950122",
"0.59473485",
"0.5932474",
"0.59285265",
"0.5907993",
"0.5878064",
"0.5860889"
] | 0.8058622 | 0 |
\b Lists all your published apps. $ 21 publish list Results from the list command are paginated. Use 'n' to move to the next page and 'p' to move to the previous page. You can view detailed admin information about an app by specifying it's id at the prompt. | def list(ctx):
# pylint: disable=redefined-builtin
_list_apps(ctx.obj['config'], ctx.obj['client']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _list_apps(config, client):\n logger.info(\"Listing all the published apps by {}: \".format(config.username), fg=\"green\")\n current_page = 0\n total_pages = get_search_results(config, client, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n\n next_page = get_next_page(prompt_resp, current_page)\n\n if next_page == -1:\n model_id = prompt_resp\n display_app_info(config, client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n else:\n get_search_results(config, client, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return",
"def ls():\n cfgmgr = ConfigManager()\n apps = cfgmgr['apps']\n for i in apps:\n print(fc(\"- {g}{appname}{rst}\", appname=i))",
"def ListApps(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()",
"def list_apps(self) -> list:\n apps = self.app.list_apps()\n app_list = [app[\"title\"] for app in apps]\n return app_list",
"def list_apps(self):\n with hide(\"output\", \"running\"):\n result = local((\"redis-cli -h {host} -p 6379 -n {db} keys \\\"*\\\"\"\n .format(host=self.host,\n db=REDIS_APPLICATION_DB_NUM)),\n capture=True)\n\n if len(result.stdout) > 0:\n return result.stdout\n else:\n print(\"Clipper has no applications registered\")\n return \"\"",
"def listapps(self):\n return jsoncall.do_call(\"listapps\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password},\n self.connection)",
"def app_list():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n appls_query = Applic.query(ancestor = base_key).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n userid = user.user_id()\n #return userid\n appls_query = Applic.query(Applic.user==userid).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n redirect('/')",
"def app_list(request):\n return render(request, 'mdm/app_list.html', {})",
"def list_apps(request, pk=0):\n context = {'items': [], 'resource_type': 'App'}\n\n if pk == 0:\n context['h2'] = \"Managed Applications\"\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n refresh_managed_software_status()\n apps = MacOSApp.objects.filter(merged_into__isnull=True).reverse()\n if not request.user.has_perm('devices.manage_apps'):\n apps = apps.filter(managed=True).exclude(installed__isnull=True, pending_install__isnull=True)\n for app in apps:\n assignment_count = app.pending_install.count()\n installed_on = app.installed.all()\n data = {'meta': app, 'assignment_count': assignment_count, 'installed': installed_on}\n context['items'].append(data)\n else:\n if not request.user.has_perm('devices.manage_apps'):\n raise PermissionDenied\n\n device = get_object_or_404(Laptop, pk=pk)\n context['h2'] = \"Applications on {}\".format(device.name)\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n context['device_view'] = True\n context['device_id'] = pk\n apps = MacOSApp.objects.filter(pending_install__in=[device])\n apps |= MacOSApp.objects.filter(installed__in=[device])\n for app in apps:\n status = 'Not assigned'\n for entry in app.installed.all():\n if entry == device:\n status = 'Installed'\n for entry in app.pending_install.all():\n if entry == device:\n status = 'Assigned'\n data = {'meta': app, 'status': status}\n context['items'].append(data)\n\n return render(request, 'mdm/resource_list.html', context)",
"def apps():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Applications', level=1)\r\n apps = get_qlik_sense.get_apps()\r\n num_of_apps = len(apps)\r\n table = document.add_table(rows=num_of_apps+1, cols=7)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'App name'\r\n row.cells[1].text = 'App description'\r\n row.cells[2].text = 'Publish time'\r\n row.cells[3].text = 'Stream'\r\n row.cells[4].text = 'File size'\r\n row.cells[5].text = 'Owner userId'\r\n row.cells[6].text = 'Owner userName'\r\n for app in range(num_of_apps):\r\n row = table.rows[app+1]\r\n row.cells[0].text = str(apps[app][0])\r\n row.cells[1].text = str(apps[app][1])\r\n row.cells[2].text = str(apps[app][2])\r\n row.cells[3].text = str(apps[app][3])\r\n row.cells[4].text = str(apps[app][4])\r\n row.cells[5].text = str(apps[app][5])\r\n row.cells[6].text = str(apps[app][6])\r\n document.add_page_break()",
"def listapps(parser):\n\n print('Function List')\n subparsers_actions = [\n # pylint: disable=protected-access\n action for action in parser._actions\n # pylint: disable=W0212\n if isinstance(action, argparse._SubParsersAction)]\n # there will probably only be one subparser_action,\n # but better safe than sorry\n for subparsers_action in subparsers_actions:\n # get all subparsers and print help\n for choice, subparser in subparsers_action.choices.items():\n print(\"Function: '{}'\".format(choice))\n print(subparser.format_help())\n # print(parser.format_help())",
"def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))",
"def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def view(args):\n print(\"List of all available phonebooks:\")\n for file in glob.glob(\"*.ph\"):\n print(file)",
"def ListApps(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()",
"async def get_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_APPS, params=params)",
"async def getList(author, page):\n availableCommands = await _generateList(author, False)\n availableCommands.sort(key=lambda x: x['name'])\n totalPages = math.floor(len(availableCommands)/10) + 1\n if page == 100:\n page = totalPages\n if page > totalPages or page < 1:\n return False\n availableCommands = availableCommands[(page-1)*10:(page)*10]\n return assembleEmbed(\n title=f\"List of Commands for `{author}` (Page {page}/{totalPages})\",\n desc=\"\\n\".join([f\"`{c['name']}` - {c['description']}\" for c in availableCommands])\n )",
"def get_search_results(config, client, page):\n resp = client.get_published_apps(config.username, page)\n resp_json = resp.json()\n search_results = resp_json[\"results\"]\n if search_results is None or len(search_results) == 0:\n logger.info(\n click.style(\"You haven't published any apps to the marketplace yet. Use \", fg=\"blue\") +\n click.style(\"21 publish submit {PATH_TO_MANIFEST_FILE}\", bold=True, fg=\"blue\") +\n click.style(\" to publish your apps to the marketplace.\", fg=\"blue\"), fg=\"blue\")\n return 0\n\n total_pages = resp_json[\"total_pages\"]\n logger.info(\"\\nPage {}/{}\".format(page + 1, total_pages), fg=\"green\")\n headers = [\"id\", \"Title\", \"Url\", \"Rating\", \"Is up\", \"Is healthy\", \"Average Uptime\",\n \"Last Update\"]\n rows = []\n for r in search_results:\n rating = \"Not yet Rated\"\n if r[\"rating_count\"] > 0:\n rating = \"{:.1f} ({} rating\".format(r[\"average_rating\"],\n int(r[\"rating_count\"]))\n if r[\"rating_count\"] > 1:\n rating += \"s\"\n rating += \")\"\n rows.append([r[\"id\"],\n r[\"title\"],\n r[\"app_url\"],\n rating,\n str(r[\"is_up\"]),\n str(r[\"is_healthy\"]),\n \"{:.2f}%\".format(r[\"average_uptime\"] * 100),\n util.format_date(r[\"last_update\"])])\n\n logger.info(tabulate(rows, headers, tablefmt=\"simple\"))\n\n return total_pages",
"def get_apps(self, limit, offset=None):\n params = {'v': WIT_API_VERSION}\n if limit:\n params['limit'] = limit\n if offset:\n params['offset'] = offset\n return req(self.logger, self.access_token, 'GET', '/apps', params)",
"def django_show_docs():\r\n app = wingapi.gApplication\r\n app.ExecuteCommand('show-document', section=\"howtos/django\")",
"def list(default_view):\n ListCommandExecutor(default_view).list()",
"def publish_info_in_pagebrowser():\n env.run('bin/django create_pagebrowser_books')",
"def show_applications_toc():\n if not cache.get(APPLICATIONS_TOC_CACHE_KEY):\n from django.utils.importlib import import_module\n from sveedocuments.models import Page\n \n apps_infos = []\n for appname, apptitle, appdesc, appkwargs in settings.PUBLISHED_APPS:\n title = apptitle or appname\n desc = appdesc\n doc_link = appkwargs.get('doc_link', None)\n demo_link = appkwargs.get('demo_link', None)\n download_link = appkwargs.get('download_link', None)\n github_link = None\n \n # Links can be tuple, that is assumed to be passed by a reverse url with first \n # element as url name and second argument as args list\n if doc_link and not isinstance(doc_link, basestring):\n doc_link = reverse(doc_link[0], args=doc_link[1])\n \n if demo_link and not isinstance(demo_link, basestring):\n demo_link = reverse(demo_link[0], args=demo_link[1])\n \n if download_link and not isinstance(download_link, basestring):\n download_link = reverse(download_link[0], args=download_link[1])\n \n # Determine some optionnals urls from a schema where we insert the appname\n if not download_link and appkwargs.get('pypi', False):\n download_link = \"http://pypi.python.org/pypi/{0}\".format(appname)\n \n if appkwargs.get('github', False):\n github_link = \"https://github.com/sveetch/{0}\".format(appname)\n if not download_link:\n download_link = \"{0}/tags\".format(github_link)\n \n # Try to get introduction from the module __doc__ attribute\n if not desc:\n try:\n mod = import_module(appname)\n except ImportError:\n pass\n else:\n if mod.__doc__.strip():\n desc = mod.__doc__.strip()\n \n # Try to get some informations from the document Page if it exists\n try:\n page_instance = Page.objects.get(slug=appname)\n except Page.DoesNotExist:\n pass\n else:\n title = page_instance.title\n doc_link = page_instance.get_absolute_url() or doc_link\n \n apps_infos.append({\n 'title': title,\n 'desc': desc,\n 'doc_link': doc_link,\n 'demo_link': demo_link,\n 'download_link': download_link,\n 'github_link': github_link,\n })\n \n cache.set(APPLICATIONS_TOC_CACHE_KEY, {'application_toc': tuple(apps_infos)})\n \n return cache.get(APPLICATIONS_TOC_CACHE_KEY)",
"def retr_auth_apps() :\n\n\t\t\t_logger.info( '...retr_auth_apps...' )\n\t\t\toutput = []\n\t\t\tdb = mongo.db.auth_apps\n\n\t\t\tcur = db.find()\n\t\t\tif cur.count() == 0 :\n\t\t\t\traise mongo_no_resource_exception( 'no authorized apps found' )\n\t\t\tfor app in db.find() :\n\t\t\t\toutput.append( { 'moniker' : app['moniker'] ,\n\t\t\t\t\t\t\t 'description' : app['description'] ,\n\t\t\t\t\t\t\t\t 'url' : app['url'] } )\n\n\t\t\treturn jsonify( {'result' : output} )",
"def get_all_apps(self):\n return list(self.apps.values())",
"def list():\n index = config.index\n output_format = \"%-7s %-20s %s\"\n click.secho(output_format % (\"ID\", \"CREATED\", \"BACKENDS\"), fg=\"cyan\")\n for archive in sorted(index.archives(), key=lambda x: x[\"id\"]):\n # Print it out\n click.echo(\n output_format\n % (\n archive[\"id\"],\n datetime.datetime.fromtimestamp(archive[\"created\"]).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n ),\n \", \".join(sorted(archive[\"backend_names\"])),\n )\n )",
"def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')",
"def app_list(self, third_only=False):\n return self.adb.app_list(third_only)"
] | [
"0.71768415",
"0.6380013",
"0.6237213",
"0.61973405",
"0.6124043",
"0.60990864",
"0.60854894",
"0.5944503",
"0.5898133",
"0.58913547",
"0.5877315",
"0.5836518",
"0.5767014",
"0.5754838",
"0.5688991",
"0.56420964",
"0.56412876",
"0.55870926",
"0.5555698",
"0.5543384",
"0.5500558",
"0.5498956",
"0.54931974",
"0.5486188",
"0.5478611",
"0.54709584",
"0.5469451",
"0.5467937",
"0.54616815",
"0.5453046"
] | 0.64636713 | 1 |
\b Removes a published app from the Marketplace. $ 21 publish remove [yes] {app_id} \b Removes all published apps from the Marketplace. $ 21 publish remove [yes] all \b | def remove(ctx, app_id, all, assume_yes):
if all and not app_id:
for _app_id in _get_all_app_ids(ctx.obj['config'], ctx.obj['client']):
_delete_app(ctx.obj['config'], ctx.obj['client'], _app_id, assume_yes)
elif app_id and not all:
_delete_app(ctx.obj['config'], ctx.obj['client'], app_id, assume_yes)
else:
logger.info(ctx.command.get_help(ctx))
sys.exit(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove():\n run('pew rm {0}'.format(package_name()))",
"def app_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete app /app.name:\"{1}\"'.format(self.APP_CMD, name))",
"def remove_app(self):\n \n pass",
"def delete_app(AppId=None):\n pass",
"def delete_app(short_name):\r\n delete_memoized(get_app, short_name)",
"def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))",
"def delete_app(self, name):\n raise NotImplementedError",
"def remove_hero(apps, schema_editor):\n pass",
"def delete(self, application_id):",
"def remove_app(self, app_name):\n self.remove_list_setting('applications', 'installed_apps',\n app_name)",
"def _delete_app(config, client, app_id, assume_yes):\n title = client.get_app_full_info(config.username, app_id).json()['app_info']['title']\n if assume_yes or click.confirm(\n \"Are you sure that you want to delete App '{} ({})'?\".format(app_id, title)):\n try:\n resp = client.delete_app(config.username, app_id)\n resp_json = resp.json()\n deleted_title = resp_json[\"deleted_title\"]\n logger.info(\"App {} ({}) was successfully removed from the marketplace.\".format(app_id, deleted_title))\n except ServerRequestError as e:\n if e.status_code == 404:\n logger.info(\"The app with id '{}' does not exist in the marketplace.\".format(app_id), fg=\"red\")\n elif e.status_code == 403:\n logger.info(\n \"You don't have permission to delete the app with id '{}'. You \"\n \"can only delete apps that you have published.\".format(app_id), fg=\"red\")",
"def remove(self, package):\n self.driver.remove_app(package)",
"def remove(name):\n if name==\"autopy\":\n print(\"\\n\\tUNINSTALLING WORKING MODULE WILL CAUSE ERRORS AND MAKE YOUR CODE UNUSABLE\\n\")\n choice=input(f\"Are you sure to remove {name}?\\nEnter YES,PROCEED to continue:\")\n if choice == 'YES,PROCEED':os.system(f'python -m pip uninstall {name}')\n else:print(\"Operetion Cancelled\")",
"def delete_app(self,*app_names):\n\n for app in app_names:\n shutil.rmtree(os.path.join(self._main,app))\n \n self._remove_extra_css_apps()\n self._remove_extra_templates_apps()\n self._update_delete_app_or_page()",
"def _uninstall(package_name, remove_all, app_id, cli, app):\n\n package_manager = _get_package_manager()\n err = package.uninstall(\n package_manager, package_name, remove_all, app_id, cli, app)\n if err is not None:\n emitter.publish(err)\n return 1\n\n return 0",
"def remove_app(self, app):\n try:\n membership = self.membership_class.objects.get(obj=self, app=app)\n except self.membership_class.DoesNotExist:\n return False\n else:\n membership.delete()\n index_webapps.delay([app.pk])\n return True",
"def remove_tag(args):",
"def uninstall_app(self, package, keepdata=False):\n return self.adb.uninstall(package, keepdata)",
"def rm(args):\n args.delete = True\n return remove(args)",
"def clear_app(package):\n G.DEVICE.clear_app(package)",
"def delete_application(self, method=\"POST\", short_name=\"sampleapp\"):\r\n if method == \"POST\":\r\n return self.app.post(\"/app/%s/delete\" % short_name,\r\n follow_redirects=True)\r\n else:\r\n return self.app.get(\"/app/%s/delete\" % short_name,\r\n follow_redirects=True)",
"def clear_app(self, package):\n return self.adb.clear_app(package)",
"def removeItem(*args):",
"def removeItem(*args):",
"def remove_apps(self):\n self.membership_class.objects.filter(obj=self).delete()",
"def remove_compiled_app():\r\n app = get_app()\r\n remove_compiled_application(apath(app, r=request))\r\n session.flash = T('compiled application removed')\r\n redirect(URL('site'))",
"def do_remove(self, arg):\n jail_destroy('remove', arg)",
"def cli(env, dry_run):\n\n tag_manager = TagManager(env.client)\n empty_tags = tag_manager.get_unattached_tags()\n\n for tag in empty_tags:\n if dry_run:\n click.secho(f\"(Dry Run) Removing {tag.get('name')}\", fg='yellow')\n else:\n result = tag_manager.delete_tag(tag.get('name'))\n color = 'green' if result else 'red'\n click.secho(f\"Removing {tag.get('name')}\", fg=color)",
"def _installed_apps_remove(self):\n config.remove_plugin(self.module_path)",
"def deleteApp(appName):\n logger.debug('[FLASKWEB /delete/app/<appName>] Request to delete App `%s`', appName)\n applist = [a['name'] for a in db.getAllApps()]\n if appName not in applist:\n return returnError(\"Application %s does not exist\" % appName, 404)\n\n logger.info(\"[FLASKWEB] DELETING all versions of app, `%s`\")\n db.deleteAllApps(appName)\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(app=appName, status='DELETED, files remain on server')), 200\n else:\n applist = db.getAllApps()\n versions = {a['name']: db.getVersions(a['name'], limit=5) for a in applist}\n return render_template('apps.html', applist=applist, versions=versions)"
] | [
"0.6542366",
"0.63983655",
"0.635603",
"0.62508076",
"0.6105173",
"0.60880226",
"0.5995787",
"0.59228504",
"0.5885536",
"0.58168525",
"0.57712317",
"0.5742939",
"0.56851757",
"0.56586546",
"0.5657139",
"0.5622254",
"0.5595414",
"0.5593344",
"0.55384624",
"0.55296105",
"0.55245054",
"0.5517181",
"0.5510308",
"0.5510308",
"0.55049574",
"0.5477803",
"0.5466714",
"0.54657143",
"0.54256463",
"0.54005426"
] | 0.67598 | 0 |
\b Publishes an app to 21 Marketplace. $ 21 publish submit path_to_manifest/manifest.yaml The contents of the manifest file should follow the guidelines specified at | def submit(ctx, manifest_path, marketplace, skip, parameters):
if parameters is not None:
try:
parameters = _parse_parameters(parameters)
except:
logger.error(
"Manifest parameter overrides should be in the form 'key1=\"value1\" "
"key2=\"value2\".",
fg="red")
return
_publish(ctx.obj['client'], manifest_path, marketplace, skip, parameters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _publish(client, manifest_path, marketplace, skip, overrides):\n try:\n manifest_json = check_app_manifest(manifest_path, overrides, marketplace)\n app_url = \"{}://{}\".format(manifest_json[\"schemes\"][0], manifest_json[\"host\"])\n app_ip = urlparse(app_url).hostname\n\n if not skip:\n address = get_zerotier_address(marketplace)\n\n if address != app_ip:\n wrong_ip = click.style(\"It seems that the IP address that you put in your manifest file (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\") is different than your current 21market IP (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\")\\nAre you sure you want to continue publishing with \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\"?\")\n if not click.confirm(wrong_ip.format(app_ip, address, app_ip)):\n switch_host = click.style(\"Please edit \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" and replace \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" with \") +\\\n click.style(\"[{}].\", bold=True)\n logger.info(switch_host.format(manifest_path, app_ip, address))\n return\n\n except exceptions.ValidationError as ex:\n # catches and re-raises the same exception to enhance the error message\n publish_docs_url = click.style(\"https://21.co/learn/21-publish/\", bold=True)\n publish_instructions = \"For instructions on publishing your app, please refer to {}\".format(publish_docs_url)\n raise exceptions.ValidationError(\n \"The following error occurred while reading your manifest file at {}:\\n{}\\n\\n{}\"\n .format(manifest_path, ex.args[0], publish_instructions),\n json=ex.json)\n\n app_name = manifest_json[\"info\"][\"title\"]\n app_endpoint = \"{}://{}{}\".format(manifest_json[\"schemes\"][0],\n manifest_json[\"host\"],\n manifest_json[\"basePath\"])\n\n logger.info(\n (click.style(\"Publishing {} at \") + click.style(\"{}\", bold=True) + click.style(\" to {}.\"))\n .format(app_name, app_endpoint, marketplace))\n payload = {\"manifest\": manifest_json, \"marketplace\": marketplace}\n try:\n response = client.publish(payload)\n except ServerRequestError as e:\n if e.status_code == 403 and e.data.get(\"error\") == \"TO600\":\n logger.info(\n \"The endpoint {} specified in your manifest has already been registered in \"\n \"the marketplace by another user.\\nPlease check your manifest file and make \"\n \"sure your 'host' field is correct.\\nIf the problem persists please contact \"\n \"[email protected].\".format(app_endpoint), fg=\"red\")\n return\n else:\n raise e\n\n if response.status_code == 201:\n response_data = response.json()\n mkt_url = response_data['mkt_url']\n permalink = response_data['permalink']\n logger.info(\n click.style(\n \"\\n\"\n \"You have successfully published {} to {}. \"\n \"You should be able to view the listing within a few minutes at {}\\n\\n\"\n \"Users will be able to purchase it, using 21 buy, at {} \",\n fg=\"magenta\")\n .format(app_name, marketplace, permalink, mkt_url)\n )",
"def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system('twine upload dist/*')\n sys.exit()",
"def publish_manifest(ctx, name, tag, image, signed_push=False):\n manifest_spec = {\"image\": \"{}:{}\".format(name, tag)}\n src_images = []\n\n for img in image:\n img_splitted = img.replace(' ', '').split(',')\n if len(img_splitted) != 2:\n print(\"Impossible to parse source format for: '{}'\".format(img))\n raise Exit(code=1)\n\n platform_splitted = img_splitted[1].split('/')\n if len(platform_splitted) != 2:\n print(\"Impossible to parse platform format for: '{}'\".format(img))\n raise Exit(code=1)\n\n src_images.append(\n {\"image\": img_splitted[0], \"platform\": {\"architecture\": platform_splitted[1], \"os\": platform_splitted[0]}}\n )\n manifest_spec[\"manifests\"] = src_images\n\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:\n temp_file_path = f.name\n yaml.dump(manifest_spec, f, default_flow_style=False)\n\n print(\"Using temp file: {}\".format(temp_file_path))\n ctx.run(\"cat {}\".format(temp_file_path))\n\n try:\n result = retry_run(ctx, \"manifest-tool push from-spec {}\".format(temp_file_path))\n if result.stdout:\n out = result.stdout.split('\\n')[0]\n fields = out.split(\" \")\n\n if len(fields) != 3:\n print(\"Unexpected output when invoking manifest-tool\")\n raise Exit(code=1)\n\n digest_fields = fields[1].split(\":\")\n\n if len(digest_fields) != 2 or digest_fields[0] != \"sha256\":\n print(\"Unexpected digest format in manifest-tool output\")\n raise Exit(code=1)\n\n digest = digest_fields[1]\n length = fields[2]\n\n if signed_push:\n cmd = \"\"\"\n notary -s https://notary.docker.io -d {home}/.docker/trust addhash \\\n -p docker.io/{name} {tag} {length} --sha256 {sha256} \\\n -r targets/releases\n \"\"\"\n retry_run(ctx, cmd.format(home=os.path.expanduser(\"~\"), name=name, tag=tag, length=length, sha256=digest))\n finally:\n os.remove(temp_file_path)",
"def main(owner: str, repository: str, token: str, tag: Optional[str]) -> None:\n if tag is None:\n today = datetime.date.today()\n tag = f\"{today:%Y.%-m.%-d}\"\n\n try:\n publish_release(\n owner=owner,\n repository_name=repository,\n token=token,\n tag=tag,\n )\n except Exception as error:\n click.secho(f\"error: {error}\", fg=\"red\")\n sys.exit(1)",
"def publish():\n pass",
"def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)",
"def pub_deploy(args, project=\"\", account=\"\", api_key=\"\"):\n base_url, api_key, updated = get_project_connect(\n 'djaodjin',\n base_url=DEFAULT_API_ENDPOINT,\n api_key=api_key)\n project, account, updated = get_project_account(\n project=project, account=account)\n if updated:\n save_config()\n\n api_container_url = \\\n \"%(base_url)s/api/containers/%(organization)s/apps/%(app)s/\" % {\n 'base_url': base_url,\n 'organization': str(account),\n 'app': str(project)}\n data = None\n container_location = args[0] if args else None\n if container_location:\n data = {'location': container_location}\n resp = requests.post(api_container_url, data=data, auth=(api_key, \"\"))\n LOGGER.info(\"POST %s returns %d %s\",\n api_container_url, resp.status_code, resp.text)",
"def deploy():\n local('appcfg.py --no_cookies [email protected] update .',\n capture=False)",
"def deploy(fingerengine, fingerprint):\n\n base = 'http://{0}:{1}'.format(fingerengine.options.ip, fingerprint.port)\n uri = '/manager/html/upload'\n war_file = fingerengine.options.deploy\n war_path = parse_war_path(war_file)\n cookies = checkAuth(fingerengine.options.ip, fingerprint.port,\n fingerprint.title, fingerprint.version)\n if not cookies:\n utility.Msg(\"Could not get auth for %s:%s\" %\n (fingerengine.options.ip, fingerprint.port), LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}...\".format(war_file))\n\n if fingerprint.version in ['6.0', '7.0', '8.0']:\n # deploying via the gui requires a CSRF token\n (csrf, c) = fetchCSRF(base, cookies)\n if not csrf:\n return\n else:\n # set CSRF and refresh session id\n uri += '?org.apache.catalina.filters.CSRF_NONCE={0}'\n uri = uri.format(csrf)\n cookies = (c, cookies[1])\n\n # read in payload\n try:\n tag = 'deployWar'\n if fingerprint.version in ['4.0', '4.1']:\n tag = 'installWar'\n files = {tag : (war_path + '.war', open(war_file, 'rb'))}\n except Exception, e:\n utility.Msg(e, LOG.ERROR)\n return\n\n # deploy\n response = utility.requests_post(base + uri, files=files, cookies=cookies[0],\n auth=cookies[1])\n\n if response.status_code is 200 and \"OK\" in response.content:\n utility.Msg(\"Deployed {0} to /{1}\".format(war_file, war_path), LOG.SUCCESS)\n elif 'Application already exists' in response.content:\n utility.Msg(\"Application {0} is already deployed\".format(war_file), LOG.ERROR)\n elif response.status_code is 403:\n utility.Msg(\"This account does not have permissions to remotely deploy. Try\"\\\n \" using manager_deploy\", LOG.ERROR)\n else:\n utility.Msg(\"Failed to deploy (HTTP %d)\" % response.status_code, LOG.ERROR)",
"def test_publish_deployment_run(self):\n pass",
"def finish_publish(hash, metadata, engine_id=None, username=USER):\n identity = \"%s@%s\" % (username, get_config('domain'))\n library = Library.objects.get(identity=identity)\n library.add_item(\n engine_id=engine_id,\n origin=identity,\n metadata=metadata\n )\n return \"OK\"",
"def deploy_go_app(app_name, uri):\n execute(local_fetch_s3_artifact, uri)\n execute(deploy_artifact, app_name, uri)\n execute(create_symlink,\n '{}/config/config.yaml'.format(get_app_basedir(app_name)),\n '{}/etc/config.yaml'.format(get_current_release_dir(app_name)))",
"def main(pkg_dir, years):\n pkgname = os.path.basename(pkg_dir)\n identifier = clean_name('archlinux_pkg_' + pkgname)\n metadata = {\n #'collection': ['test_collection', 'open_source_software'],\n #'collection': ['open_source_software'],\n 'collection': ['archlinuxarchive'],\n 'mediatype': 'software',\n 'publisher': 'Arch Linux',\n 'creator': 'Arch Linux',\n 'subject': ['archlinux', 'archlinux package'],\n }\n metadata['title'] = pkgname + \" package archive from Arch Linux\"\n metadata['subject'].append(pkgname)\n upload_pkg(identifier, pkgname, metadata, pkg_dir, years)",
"def upload():\n sh('python setup.py register sdist upload')",
"def deploy():\n build()\n collect()\n commit()\n push()",
"def submit_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ManifestSubmitResponse:\n manifest, validation = _validate_manifest(request, schema)\n if not manifest or validation.status == ResponseStatus.FAIL:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=validation.details\n )\n\n try:\n with get_repository(get_client_id(), DataCollection.MANIFEST) as repository:\n manifest_hash = manifest.crypto_hash().to_hex()\n _ = repository.set(\n {\"manifest_hash\": manifest_hash, \"manifest\": manifest.to_json_object()}\n )\n return ManifestSubmitResponse(manifest_hash=manifest_hash)\n except Exception as error:\n print(sys.exc_info())\n raise HTTPException(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Submit manifest failed\",\n ) from error",
"def publish_release(ctx):\n rel = _get_release()\n rel.update_release(rel.title, rel.raw_data[\"body\"], draft=False)",
"def publish_files():\n print(\"Publishing files to the internet...\", end=\"\", flush=True)\n import subprocess\n try:\n subprocess.run(\"./upload.sh\", timeout=120.0)\n print(\"done.\\n\")\n except:\n print(\"failed.\\n\")",
"def pub_upload(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n upload_theme(args, base_url, api_key, prefix=project)",
"def deploy_api(dist_file, apt_req_file):\n _set_credentials()\n provision()\n _deploy_apt_requirements(apt_req_file)\n _deploy_python_package(dist_file)\n _sighup_api()\n _verify_api_heartbeat()\n send_build_stat(PROJECT_NAME, env.stage)",
"def run_post_publishers():\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n\n version = m_env.get_current_version()\n\n # check if we have a proper version\n if not version:\n return\n\n # check if it is a Representation\n from anima.repr import Representation\n if Representation.repr_separator in version.take_name:\n return\n\n if version.is_published:\n from anima.publish import (run_publishers, staging, PRE_PUBLISHER_TYPE,\n POST_PUBLISHER_TYPE)\n # before doing anything run all publishers\n type_name = ''\n if version.task.type:\n type_name = version.task.type.name\n\n # before running use the staging area to store the current version\n staging['version'] = version\n run_publishers(type_name, publisher_type=POST_PUBLISHER_TYPE)\n # do not forget to clean up the staging area\n staging.clear()",
"def deploy_app(self, app_info):\n raise NotImplementedError",
"def publish(self,toolname):\n\n self.logger.info(\"publishing '%s'\" % (toolname))\n\n po = self.catalog.load_pageobject('ToolsStatusApprovedAdminPage',toolname)\n po.goto_page()\n\n # click the publish link\n publish_status,output = po.do_publish()\n\n # wait for the output success / failure block to appear\n if publish_status is False:\n raise RuntimeError(\"finalizetool failed: %s\" % (output))\n\n # mark project as created\n self.flip_tool_status('ToolsStatusApprovedAdminPage',toolname,'Published')\n\n # check that the tool is in the published state\n tool_state = po.get_tool_state()\n if tool_state.lower() != 'Published'.lower():\n raise Exception('Incorrect tool state: %s, expected \"Published\"'\\\n % tool_state)",
"def deploy_application(target_environment, config_file, branch, force): # noqa\n # read in and parse configuration\n app = config.AppConfiguration.load(\n config_file or\n os.path.join(settings.app_conf_dir, '%s.conf' % target_environment)\n )\n app_name = app.app_name\n branch = branch or app.default_branch or git.get_current_branch()\n\n # get the contents of the proposed deployment\n release = heroku.HerokuRelease.get_latest_deployment(app_name)\n\n remote_hash = release.commit\n if app.use_pipeline:\n # if we are using pipelines, then the commit we need is not the\n # local one, but the latest version on the upstream app, as this\n # is the one that will be deployed.\n upstream_release = heroku.HerokuRelease.get_latest_deployment(app.upstream_app) # noqa\n local_hash = upstream_release.commit\n else:\n local_hash = git.get_branch_head(branch)\n\n if local_hash == remote_hash:\n click.echo(u\"Heroku application is up-to-date, aborting deployment.\")\n return\n\n files = git.get_files(remote_hash, local_hash)\n commits = git.get_commits(remote_hash, local_hash)\n\n post_deploy_tasks = app.post_deploy_tasks\n\n click.echo(\"\")\n click.echo(\"Comparing %s..%s\" % (remote_hash, local_hash))\n click.echo(\"\")\n click.echo(\"The following files have changed since the last deployment:\\n\") # noqa\n if len(files) == 0:\n click.echo(\" (no change)\")\n else:\n click.echo(\"\".join([\" * %s\\n\" % f for f in files]))\n click.echo(\"\")\n click.echo(\"The following commits will be included in this deployment:\\n\") # noqa\n if len(commits) == 0:\n click.echo(\" (no change)\")\n else:\n click.echo(\"\".join([\" [%s] %s\\n\" % (c[0], c[1]) for c in commits]))\n\n # ============== summarise actions ==========================\n click.echo(\"\")\n click.echo(\"Summary of deployment options:\") # noqa\n click.echo(\"\")\n click.echo(\" ----- Deployment SETTINGS -----------\")\n click.echo(\"\")\n click.echo(\" Git branch: %s\" % branch)\n click.echo(\" Target env: %s (%s)\" % (target_environment, app_name))\n click.echo(\" Force push: %s\" % force)\n # pipeline promotion - buildpack won't run\n click.echo(\" Pipeline: %s\" % app.use_pipeline)\n if app.use_pipeline:\n click.echo(\" Promote: %s\" % app.upstream_app)\n click.echo(\" Release tag: %s\" % app.add_tag)\n click.echo(\"\")\n click.echo(\" ----- Post-deployment commands ------\")\n click.echo(\"\")\n\n if not post_deploy_tasks:\n click.echo(\" (None specified)\")\n else:\n [click.echo(\" %s\" % x) for x in post_deploy_tasks]\n\n click.echo(\"\")\n # ============== / summarise actions ========================\n\n # put up the maintenance page if required\n maintenance = utils.prompt_for_action(\n u\"Do you want to put up the maintenance page?\",\n False\n )\n\n if not utils.prompt_for_pin(\"\"):\n exit(0)\n\n if maintenance:\n click.echo(\"Putting up maintenance page\")\n heroku.toggle_maintenance(app_name, True)\n\n if app.use_pipeline:\n click.echo(\"Promoting upstream app: %s\" % app.upstream_app)\n heroku.promote_app(app.upstream_app)\n else:\n click.echo(\"Pushing to git remote\")\n git.push(\n remote=git.get_remote_url(app_name),\n local_branch=branch,\n remote_branch='master',\n force=force\n )\n\n if post_deploy_tasks:\n click.echo(\"Running post-deployment tasks:\")\n run_post_deployment_tasks(post_deploy_tasks)\n\n if maintenance:\n click.echo(\"Pulling down maintenance page\")\n heroku.toggle_maintenance(app_name, False)\n\n release = heroku.HerokuRelease.get_latest_deployment(app_name)\n\n if app.add_tag:\n click.echo(\"Applying git tag\")\n message = \"Deployed to %s by %s\" % (app_name, release.deployed_by)\n git.apply_tag(commit=local_hash, tag=release.version, message=message)\n\n click.echo(release)",
"def PublishIt(name, path, comments, task=os.getenv('TASK'), status=\"WORK IN PROGRESS\"):\n\n db = get_connection()\n\n PubCollections = db['submissions']\n\n # creation of the dailies submission entry\n publishDict = dict()\n publishDict['date'] = now\n publishDict['type'] = \"publish\"\n publishDict['user_name'] = main_user\n publishDict['task'] = task\n publishDict['status'] = status\n publishDict['asset'] = name\n publishDict['path'] = path\n publishDict['comment'] = comments\n PubCollections.save(publishDict)\n notifications.push_notifications({\"name\": main_user, \"email\": os.getenv('USER_EMAIL')}, users_list, \"publish\", shot, now)",
"def _deploy_apps_function():\n app_integration_package = AppIntegrationPackage(config=config, version=apps_version)\n app_integration_package.create_and_upload()\n return app_integration_package",
"def deploy(fingerengine, fingerprint):\n\n\tcfm_path = abspath(fingerengine.options.deploy)\n\tcfm_file = parse_war_path(cfm_path, True)\n\tdip = fingerengine.options.ip\n\n\tcookie = checkAuth(dip, fingerprint.port, title, fingerprint.version)[0]\n\tif not cookie:\n\t\tutility.Msg(\"Could not get auth\", LOG.ERROR)\n\t\treturn\n\n\tutility.Msg(\"Preparing to deploy {0}...\".format(cfm_file))\n\tutility.Msg(\"Fetching web root...\", LOG.DEBUG)\n\n\troot = fetch_webroot(dip, fingerprint, cookie)\n\tif not root:\n\t\tutility.Msg(\"Unable to fetch web root.\", LOG.ERROR)\n\t\treturn\n\t\n\t# create the scheduled task\n\tutility.Msg(\"Web root found at %s\" % root, LOG.DEBUG)\n\tutility.Msg(\"Creating scheduled task...\")\n\n\tif not create_task(dip, fingerprint, cfm_file, root, cookie):\n\t\treturn\n\n\t# invoke the task\n\tutility.Msg(\"Task %s created, invoking...\" % cfm_file)\n\trun_task(dip, fingerprint, cfm_path, cookie)\n\n\t# cleanup\n\tutility.Msg(\"Cleaning up...\")\n\tif not delete_task(dip, fingerprint, cfm_file, cookie):\n\t\tutility.Msg(\"Failed to remove task. May require manual removal.\", LOG.ERROR)",
"def installApp(dev, apkFile=None, appPackage=None, outFile=None, local=False):\n certFile = scriptRoot + '/certs/localtest.me.pem'\n with ServerContext(LocalMarketServer(certFile, config.officialServer)) as server:\n if apkFile:\n server.setApk(apkFile.read())\n elif appPackage:\n print('Downloading apk')\n apps = listApps(True)\n if appPackage not in apps:\n raise Exception('Unknown app: %s' % appPackage)\n server.setApk(apps[appPackage].release.asset)\n\n print('Starting task')\n xpdData = server.getXpd()\n\n print('Starting communication')\n # Point the camera to the web api\n result = installer.install(dev, server.host, server.port, xpdData, printStatus)\n if result.code != 0:\n raise Exception('Communication error %d: %s' % (result.code, result.message))\n\n result = server.getResult()\n\n if not local:\n try:\n RemoteAppStore(config.appengineServer).sendStats(result)\n except:\n pass\n\n print('Task completed successfully')\n\n if outFile:\n print('Writing to output file')\n json.dump(result, outFile, indent=2)\n\n return result",
"def deploy_app(host_=None):\n run_command_on_selected_server(_deploy_app, host_=host_)",
"def test_publish(self):\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.login_as(adminuser,adminpass)\n\n self.contribtool.publish(TOOLNAME)"
] | [
"0.7300059",
"0.67660546",
"0.6331592",
"0.6170584",
"0.60555214",
"0.6053848",
"0.5974832",
"0.58902156",
"0.5870017",
"0.58022076",
"0.56858075",
"0.56083417",
"0.5599532",
"0.5585206",
"0.5573604",
"0.5570114",
"0.5532284",
"0.5491696",
"0.5456021",
"0.5449518",
"0.5443404",
"0.54393977",
"0.54283106",
"0.54259217",
"0.54234827",
"0.5418377",
"0.5414494",
"0.5389343",
"0.53819203",
"0.537537"
] | 0.69711465 | 1 |
Parses parameters string and returns a dict of overrides. This function assumes that parameters string is in the form of '"key1="value1" key2="value2"'. Use of single quotes is optional but is helpful for strings that contain spaces. | def _parse_parameters(parameters):
if not re.match(r'^(\w+)="([^=]+)"(\s{1}(\w+)="([^=]+)")*$', parameters):
raise ValueError
# first we add tokens that separate key/value pairs.
# in case of key='ss sss ss', we skip tokenizing when we se the first single quote
# and resume when we see the second
replace_space = True
tokenized = ""
for c in parameters:
if c == '\"':
replace_space = not replace_space
elif c == ' ' and replace_space:
tokenized += "$$"
else:
tokenized += c
# now get the tokens
tokens = tokenized.split('$$')
result = {}
for token in tokens:
# separate key/values
key_value = token.split("=")
result[key_value[0]] = key_value[1]
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_params( self ):\n paramDic={}\n # Parameters are on the 3rd arg passed to the script\n paramStr=sys.argv[2]\n print paramStr\n if len(paramStr)>1:\n paramStr = paramStr.replace('?','')\n \n # Ignore last char if it is a '/'\n if (paramStr[len(paramStr)-1]=='/'):\n paramStr=paramStr[0:len(paramStr)-2]\n \n # Processing each parameter splited on '&' \n for param in paramStr.split(\"&\"):\n try:\n # Spliting couple key/value\n key,value=param.split(\"=\")\n except:\n key=param\n value=\"\"\n \n key = urllib.unquote_plus(key)\n value = urllib.unquote_plus(value)\n \n # Filling dictionnary\n paramDic[key]=value\n print paramDic\n return paramDic",
"def parse_key_value_pairs(arg_string):\n try:\n return {key: value for (key, value) in [tuple(str(arg).split('=', 1)) for arg in arg_string]}\n except ValueError:\n raise click.ClickException(\"argument string must be in the form x=y\")",
"def _parse_config_args(args):\r\n config_dict = dict()\r\n for config_str in args:\r\n try:\r\n components = config_str.split('=')\r\n if len(components) >= 2:\r\n config_dict[components[0]] = \"=\".join(components[1:])\r\n\r\n except:\r\n print \"Warning: could not interpret config value '{0}'\".format(config_str)\r\n pass\r\n\r\n return config_dict",
"def parse_from_str(self, config_str):\n if not config_str:\n return {}\n config_dict = {}\n try:\n for kv_pair in config_str.split(','):\n if not kv_pair: # skip empty string\n continue\n k, v = kv_pair.split('=')\n config_dict[k.strip()] = eval_str_fn(v.strip())\n return config_dict\n except ValueError:\n raise ValueError('Invalid config_str: {}'.format(config_str))",
"def update_drum_params(input_args, default_params):\n try:\n as_dict = ast.literal_eval(str(input_args))\n except ValueError:\n base_params = get_base_params(0, 0, 0, 0)\n print(f'The input string: `{input_args}` is not in the right format.')\n print('The input and each key should be enclosed in quotes.')\n print('Heres an example:')\n example = \"\"\" -kick \"{'div':2}\" \"\"\"\n print('\\t', example)\n print('Poissible parameters are: ')\n [print('\\t', k) for k in base_params.keys()]\n except Exception as e:\n print(e)\n\n for k, v in as_dict.items():\n if k in default_params:\n default_params[k] = v\n return default_params",
"def urllib_unquote_parameters(inputstring):\r\n\r\n if type(inputstring) is not str:\r\n raise TypeError(\"urllib_unquote_parameters' inputstring parameter must be a string, not '\"+str(type(inputstring))+\"'\")\r\n\r\n keyvalpairs = inputstring.split(\"&\")\r\n res = {}\r\n\r\n for quotedkeyval in keyvalpairs:\r\n # Throw ValueError if there is more or less than one '='.\r\n quotedkey, quotedval = quotedkeyval.split(\"=\")\r\n key = urllib_unquote_plus(quotedkey)\r\n val = urllib_unquote_plus(quotedval)\r\n res[key] = val\r\n\r\n return res",
"def parse_params(params):\n pairs = params.split(' ')\n content = dict()\n for key, value in [pair.split('=') for pair in pairs]:\n content[key] = int(value)\n return content",
"def __parse_options_dict(options_str):\n # type: (str) -> Dict[str, str]\n opts = options_str.split('&') # type: List[str]\n res = {} # Type: Dict\n\n for opt in opts:\n key, value = opt.split('=') # type: List[str, str]\n res[key] = value # type: str\n\n return res",
"def parse_request_arg_dict(arg, exception_class=Exception):\n arg_dict = {}\n arg_pairs = arg.split(';')\n for arg_pair in arg_pairs:\n try:\n arg_name, arg_value = arg_pair.split('=', 1)\n except Exception as error:\n logging.exception(error)\n raise exception_class(\n 'there is no `=` in %s' % arg_pair\n )\n arg_dict[arg_name] = arg_value\n return arg_dict",
"def _parse_params(params):\n for key, value in params.items():\n if value.lower() in ('none', 'null', ''):\n params[key] = None\n elif value.lower() == 'true':\n params[key] = True\n elif value.lower() == 'false':\n params[key] = False\n elif value.isdigit() or (value[0] == '-' and value[1:].isdigit()):\n params[key] = int(value)\n elif ',' in value:\n params[key] = list(map(lambda x: x.strip(), value.split(',')))\n else:\n try:\n params[key] = float(value)\n except:\n pass\n return params",
"def parse_function_params(params: Text) -> Dict:\n function_meta = {\"args\": [], \"kwargs\": {}}\n\n params_str = params.strip()\n if params_str == \"\":\n return function_meta\n\n args_list = params_str.split(\",\")\n for arg in args_list:\n arg = arg.strip()\n if \"=\" in arg:\n key, value = arg.split(\"=\")\n function_meta[\"kwargs\"][key.strip()] = parse_string_value(value.strip())\n else:\n function_meta[\"args\"].append(parse_string_value(arg))\n\n return function_meta",
"def split_params(param_string):\n\t#TODO: check for negatives i.e. alpha--1\n\tparts = param_string.split('_')\n\tparams = {}\n\n\tfor i in range(len(parts)):\n\t\tparam = split_items(parts[i])\n\t\tif len(param) < 2:\n\t\t\ttry:\n\t\t\t\tparts[i+1] = parts[i] + \"_\" + parts[i+1]\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tcontinue\n\t\telif len(param) == 2:\n\t\t\tparams[param[0]] = param[1]\n\t\telif len(param) == 3 and len(param[1]) == 0:\n\t\t\tparams[param[0]] = -param[2]\n\t\telse:\n\t\t\tparams[param[0]] = param[1:]\n\treturn params",
"def str2dict(string):\n res_dict = {}\n for keyvalue in string.split(','):\n (key, value) = keyvalue.split('=', 1)\n res_dict[key] = value\n return res_dict",
"def params_commandline(lista):\n if len(lista)%2!=0:\n print('Error: The number of parameter names and values does not match')\n sys.exit()\n dict={}\n for i in range(0,len(lista),2):\n key=lista[i]\n if type(key)!=type(''):\n raise 'Keyword not string!'\n #replace commas in case they're present\n if key[0]=='-':key=key[1:]\n lista[i+1]=replace(lista[i+1],',',' ')\n values=tuple(split(lista[i+1]))\n if len(values)<1:\n mensaje='No value(s) for parameter '+key\n raise mensaje\n dict[key]=values\n if len(dict[key])==1: dict[key]=dict[key][0]\n return dict",
"def parseCommandLine(argv):\n parameters = {}\n for p in argv[1:]: # skip 0th element (module name)\n pair = split(p, '=', 1)\n if (2 != len(pair)):\n print 'bad parameter: %s (had no equals sign for pairing)' % p\n sys.exit()\n else:\n parameters[pair[0]] = pair[1]\n return parameters",
"def parse_query_string(self, params):\n results = {}\n\n for key, val in params.items():\n lookup_len = len(self.query_string_lookup) + 1\n\n if key[0:lookup_len] == '{}['.format(self.query_string_lookup) and key[-1] == ']':\n results[key[lookup_len:-1]] = val\n\n return results",
"def parse_config_string(config_string, issue_warnings=True):\n config_dict = {}\n my_splitter = shlex.shlex(config_string, posix=True)\n my_splitter.whitespace = ','\n my_splitter.whitespace_split = True\n for kv_pair in my_splitter:\n kv_pair = kv_pair.strip()\n if not kv_pair:\n continue\n kv_tuple = kv_pair.split('=', 1)\n if len(kv_tuple) == 1:\n if issue_warnings:\n MsafConfigWarning.warn(\n (\"Config key '%s' has no value, ignoring it\" %\n kv_tuple[0]), stacklevel=1)\n else:\n k, v = kv_tuple\n # subsequent values for k will override earlier ones\n config_dict[k] = v\n return config_dict",
"def _parse_parameter_overrides(self, **parameter_overrides):\n\n par = self.parameter_set.parameters.copy()\n\n array_len = None\n array_keys = []\n\n for key, val in parameter_overrides.items():\n if key not in par.keys(): # don't add invalid keys\n msg = '{} is not a valid parameter, ignoring'\n warnings.warn(msg.format(key))\n else:\n if np.isscalar(val):\n par[key] = float(val)\n else: # is an array\n if array_len is None:\n array_len = len(val)\n if len(val) != array_len:\n msg = ('All array valued parameters must have the '\n 'same length.')\n raise ValueError(msg)\n array_keys.append(key)\n par[key] = val\n\n return par, array_keys, array_len",
"def handle_log_output(original_parameters_string: Optional[Any]) -> Dict[str, Any]:\n if original_parameters_string is None:\n return {}\n\n if isinstance(original_parameters_string, bytes):\n mystr = original_parameters_string.decode(\"utf-8\")\n elif isinstance(original_parameters_string, str):\n mystr = original_parameters_string\n else:\n mystr = str(original_parameters_string)\n\n if mystr.strip() == \"\":\n return {}\n\n urlencoded = False\n try:\n parameters = orjson.loads(mystr)\n except orjson.JSONDecodeError:\n try:\n parameters = urllib.parse.parse_qs(mystr)\n urlencoded = True\n except Exception: # pragma: no cover\n return original_parameters_string\n\n return obfuscate_dict(parameters, urlencoded=urlencoded)",
"def override(self, config_dict_or_str, allow_new_keys=False):\r\n if isinstance(config_dict_or_str, str):\r\n if not config_dict_or_str:\r\n return\r\n elif '=' in config_dict_or_str:\r\n config_dict = self.parse_from_str(config_dict_or_str)\r\n elif config_dict_or_str.endswith('.yaml'):\r\n config_dict = self.parse_from_yaml(config_dict_or_str)\r\n else:\r\n raise ValueError(\r\n 'Invalid string {}, must end with .yaml or contains \"=\".'.format(\r\n config_dict_or_str))\r\n elif isinstance(config_dict_or_str, dict):\r\n config_dict = config_dict_or_str\r\n else:\r\n raise ValueError('Unknown value type: {}'.format(config_dict_or_str))\r\n\r\n self._update(config_dict, allow_new_keys)",
"def input_arguments(lines, lower = False):\n\n var_dict = {}\n if len(lines) == 0: return var_dict\n if lines[-1] != '\\n': lines += '\\n'\n lines = re.sub('#.*\\n', '#', lines) # convert all comments into _delimiters\n for block in lines.split('#'):\n name = None\n # look for assignment\n for item in block.split('='):\n value = None\n new_name = item\n # if value is string\n for s in _quote:\n item_str = item.split(s)\n if len(item_str) > 2: # found quotation marks\n value = item_str[1] # the string in the first _quote\n new_name = item_str[-1].strip() # last term\n # value not a string\n if value is None:\n value = item\n for s in _delimiter:\n try:\n value = list(filter(None, value.split(s)))[0] # always take the first meaningful string\n except IndexError:\n value = ''\n break\n for s in _delimiter:\n try:\n new_name = list(filter(None, new_name.split(s)))[-1] # always take the last meaningful string\n except IndexError:\n new_name = ''\n break\n if is_valid_variable_name(name) and value is not None:\n if lower: name = name.lower()\n var_dict.update({name : value})\n name = new_name\n return var_dict",
"def parse_query_string(self, params):\n results = {}\n\n for key, val in params.items():\n lookup_len = len(self.query_string_lookup) + 1\n\n if key[0:lookup_len] == '{}['.format(self.query_string_lookup) and key[-1] == ']':\n results[key[lookup_len:-1]] = val if val.lower() != 'null' else None\n\n return results",
"def _parseOptions(self, optionsString):\n\n options = dict()\n pairs = optionsString.split(\";\")\n for pair in pairs:\n if not pair or \"=\" not in pair:\n continue\n\n key, value = pair.split(\"=\")\n options[key] = int(value)\n\n return options",
"def parse_kwargs(kwargs_list: List[str]) -> Dict[str, Any]:\n\n kwargs_dict = {}\n\n for kwarg in kwargs_list:\n key = kwarg[2:].split('=')[0]\n value = '='.join(kwarg.split('=')[1:])\n\n try:\n if re.match(r'^(-)?[0-9]+$', value):\n value = int(value)\n\n elif re.match(r'^(-)?[0-9]*.[0-9]+$', value) or re.match(r'^(-)?[0-9]*(\\.)?[0-9]+e(-|\\+)[0-9]+$', value):\n value = float(value)\n\n elif re.match(r'^\\[.*]$', value) or re.match(r'^\\{.*}$', value):\n value = json.loads(value)\n\n elif value.lower() in ('true', 'false'):\n value = value.lower() == 'true'\n\n elif value.lower() == 'none':\n value = None\n\n except:\n logging.warning(f'Could not automatically parse argument \"{key}.\" Its type will remain string.')\n\n kwargs_dict[key] = value\n\n return kwargs_dict",
"def _parse_args(argv):\n result = {}\n for arg in argv:\n k, v = arg.split(\"=\")\n result[k] = v\n return result",
"def fromstring(self, description):\n self.header = {}\n # Split string either on commas or whitespace, for good measure\n param_vals = [p.strip() for p in description.split(',')] \\\n if ',' in description else description.split()\n params = [p for p in self]\n min_len = min(len(params), len(param_vals))\n for param, param_val in zip(params[:min_len], param_vals[:min_len]):\n param.value_str = param_val\n for param in params[min_len:]:\n param.value = param.default_value",
"def parsekv(inputString):\n mDict = dict()\n parts = inputString.split('&')\n for item in parts:\n if (item.count('=') != 1):\n raise ValueError(\"Need a singular = sign in str. %s\" % (item, ))\n key, value = item.split('=')\n # If we can convert the string value to an int, great, otherwise\n # leave it as a string.\n try:\n mDict[key] = int(value)\n except ValueError:\n mDict[key] = value\n return mDict",
"def parseConfig(self, filename):\n parameters = {}\n try:\n f = open(filename)\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)\n print('cannot open', filename)\n raise\n else:\n for line in f:\n # Remove text after comment character.\n if self.comment_char in line:\n line, comment = line.split(self.comment_char,\n 1) # Split on comment character, keep only the text before the character\n\n # Find lines with parameters (param=something)\n if self.param_char in line:\n parameter, value = line.split(self.param_char, 1) # Split on parameter character\n parameter = parameter.strip() # Strip spaces\n value = value.strip()\n parameters[parameter] = value # Store parameters in a dictionary\n\n f.close()\n\n return parameters",
"def update_params(argv: list, prm: dict):\n\n\tfor a in argv[1:]:\n\t\ttoks = a.split('=',1)\n\t\tif len(toks)<2: continue\n\t\tk,v = toks[:2]\n\t\tif k not in prm: continue\n\t\tprm[k] = v",
"def quote_all(parameters: Dict[str, Any]) -> Dict[str, Any]:\n return {key: quote_plus(value) if isinstance(value, str) else value for key, value in parameters.items()}"
] | [
"0.65870714",
"0.65518653",
"0.65437955",
"0.65169543",
"0.63385206",
"0.6322526",
"0.63106567",
"0.6298732",
"0.6283422",
"0.6264256",
"0.62140757",
"0.6202048",
"0.6163683",
"0.60954064",
"0.6093808",
"0.6043404",
"0.60339767",
"0.6015657",
"0.5971249",
"0.5954985",
"0.5940959",
"0.59400064",
"0.59167373",
"0.58862746",
"0.5881694",
"0.58210146",
"0.5772958",
"0.575196",
"0.57452476",
"0.5730277"
] | 0.74429375 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.