query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
get a stub that sends requests to a given peer | def get_stub(self, peer: PeerID) -> AuthRPCWrapper:
stub = super().get_stub(self.p2p, peer)
return AuthRPCWrapper(stub, AuthRole.CLIENT, self.authorizer, service_public_key=None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def server_prober(stub, request):\n try:\n status = stub.ProbeStatus(request)\n except Exception as e:\n new_stub = shift_server()\n return new_stub\n else:\n return stub",
"def shift_server():\n global ALIVE\n if ALIVE == 0:\n ALIVE = 1\n elif ALIVE == 1:\n ALIVE = 0\n channel = grpc.insecure_channel(PORTS[ALIVE])\n stub = dist_bank_pb2_grpc.DistBankStub(channel)\n return stub",
"def fake_twisted_request(*args, **kwargs):\n kwargs.setdefault(\n 'Request', lambda channel: Request(channel=channel, queued=False))\n request = fake_nevow_request(*args, **kwargs)\n request.finish = lambda: next(request.finish.counter)\n request.finish.counter = itertools.count()\n return request",
"def get_hanlder(self, req, server_addr, peer, retries, timeout):\n raise NotImplemented()",
"def create_downloader_rpc_stub(address=None):\n return xmlrpc.client.ServerProxy(address or config.ARIA2_RPC_ADDRESS,\n allow_none=True)",
"def test_peers_get(self):\n pass",
"def start(self):\n if self._real_send:\n raise RuntimeError('Mocker has already been started')\n\n self._real_send = requests.Session.send\n\n def _fake_get_adapter(session, url):\n return self._adapter\n\n def _fake_send(session, request, **kwargs):\n real_get_adapter = requests.Session.get_adapter\n requests.Session.get_adapter = _fake_get_adapter\n\n try:\n return self._real_send(session, request, **kwargs)\n except exceptions.NoMockAddress:\n if not self._real_http:\n raise\n finally:\n requests.Session.get_adapter = real_get_adapter\n\n return self._real_send(session, request, **kwargs)\n\n requests.Session.send = _fake_send",
"def get_socket(self, version, src_addr=None):\n socket = ProxyProtocolSocket(version, src_addr=src_addr)\n socket.sendall = Mock(name='mock-sendall')\n socket.send = Mock(name='mock-send')\n socket.getpeername = Mock(name='mock-getpeername')\n socket.getsockname = Mock(name='mock-getsockname')\n return socket",
"def rpc_request(method, params, url=LOCAL):\n client = HTTPClient(url)\n return client.request(method, params)",
"def _create_stub(target, port):\n channel = gnmi_pb2_grpc.grpc.insecure_channel(target + ':' + port)\n return gnmi_pb2_grpc.gNMIStub(channel)",
"def request(method, host='127.0.0.1', port='44443'):\n\n try:\n json_query = json.dumps(REQ[method], indent=4)\n print \"============ Request ==============\"\n print json_query\n s = create_socket(host, int(port))\n s.sendall(json_query + \"\\r\\n\\r\\n\")\n except KeyError as e:\n print \"method: <%s> does not exist.\" % method\n sys.exit(-1)\n except Exception as e:\n raise\n\n json_response = \"\"\n while True:\n msg = s.recv(1024)\n json_response += msg\n if json_response[-4:] == \"\\r\\n\\r\\n\":\n break\n\n print \"============= Reply ===============\"\n print json_response\n s.close()",
"def test_peers_post(self):\n pass",
"def _send_request(self):\n url = self.config['url']\n agent = Agent(reactor)\n response = (yield agent.request(\n 'GET',\n url.encode(\"ASCII\"),\n ))\n\n d = defer.Deferred()\n response.deliverBody(ReceiveBody(d))\n defer.returnValue((yield d))",
"def create_lower_stub(self):\n if self.lower_layer_addr:\n channel = grpc.insecure_channel(self.lower_layer_addr)\n self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print(\"no lower layer has been specified\")",
"def requestsmock():\n with requests_mock.mock() as m:\n yield m",
"def test_portforward(self):\n realServerFactory = protocol.ServerFactory()\n realServerFactory.protocol = lambda: self.serverProtocol\n realServerPort = reactor.listenTCP(0, realServerFactory, interface=\"127.0.0.1\")\n self.openPorts.append(realServerPort)\n self.proxyServerFactory = TestableProxyFactory(\n \"127.0.0.1\", realServerPort.getHost().port\n )\n proxyServerPort = reactor.listenTCP(\n 0, self.proxyServerFactory, interface=\"127.0.0.1\"\n )\n self.openPorts.append(proxyServerPort)\n\n nBytes = 1000\n received = []\n d = defer.Deferred()\n\n def testDataReceived(data):\n received.extend(iterbytes(data))\n if len(received) >= nBytes:\n self.assertEqual(b\"\".join(received), b\"x\" * nBytes)\n d.callback(None)\n\n self.clientProtocol.dataReceived = testDataReceived\n\n def testConnectionMade():\n self.clientProtocol.transport.write(b\"x\" * nBytes)\n\n self.clientProtocol.connectionMade = testConnectionMade\n\n clientFactory = protocol.ClientFactory()\n clientFactory.protocol = lambda: self.clientProtocol\n\n reactor.connectTCP(\"127.0.0.1\", proxyServerPort.getHost().port, clientFactory)\n\n return d",
"def patch():\n\n config(\"127.0.0.1\", 9050)\n\n socket.socket = socks.socksocket\n socket.create_connection = create_connection",
"def test_service_call(self, create_connection):\n create_connection.return_value = Mock()\n create_connection.return_value.recv = Mock(return_value=msgpack.packb(None))\n\n adress = ('127.0.0.1', 20001)\n method_name = 'foo'\n method_params = [12]\n\n expected_data = service_call.encode_call(method_name, method_params)\n\n service_call.call(adress, method_name, method_params)\n\n create_connection.assert_any_call(adress)\n create_connection.return_value.sendall.assert_any_call(expected_data)",
"def blockchain_requests(self, api_port, endpoint):\n SERVER_IP = '129.108.7.2'\n url = \"http://\" + SERVER_IP + \":\" + str(api_port) + endpoint\n #print(\"URL requesting: \" + url)\n r = requests.get(url)\n return r.json()",
"def test_mock_stream_unary_call(import_path):\n from .pb import test_pb2\n from .pb import test_pb2_grpc\n\n class TestServicer(test_pb2_grpc.GRPCTestServicer):\n\n _server_id = random.randint(2, 2 ** 10)\n\n def ClientStreamingMethod(self, request_stresam, context):\n client_data = []\n\n for request in request_stresam:\n client_data.append(request.request_data)\n\n response = test_pb2.Response(\n server_id=self._server_id, response_data=\"\".join(client_data)\n )\n return response\n\n with ServiceMock(\n \"localhost:50051\", TestServicer, assert_all_method_fired=False\n ):\n request_data = []\n\n def request_stream():\n for i in range(10):\n data = random.choice(string.ascii_letters)\n request_data.append(data)\n\n request = test_pb2.Request(client_id=1, request_data=data,)\n yield request\n\n with grpc.insecure_channel(\"localhost:50051\") as channel:\n stub = test_pb2_grpc.GRPCTestStub(channel)\n response = stub.ClientStreamingMethod(request_stream())\n\n assert response.server_id == TestServicer._server_id\n assert request_data != []\n assert response.response_data == \"\".join(request_data)",
"def test_service_call_answer(self, create_connection):\n create_connection.return_value = Mock()\n adress = ('127.0.0.1', 20001)\n return_data = msgpack.packb([1, 'bar'])\n\n create_connection.return_value.recv = Mock(return_value=return_data)\n\n result = service_call.call(adress, 'foo')\n self.assertEqual(result, [1, 'bar'])",
"def test_request(comms):\n kernel_comm, frontend_comm = comms\n\n def handler(a, b):\n return a + b\n\n kernel_comm.register_call_handler('test_request', handler)\n\n res = frontend_comm.remote_call(blocking=True).test_request('a', b='b')\n\n assert res == 'ab'",
"def _send_jsonrpc_request(parity_url, request, getter):\n request_string = json.dumps(request)\n responses = requests.post(\n parity_url,\n data=request_string,\n headers={\"content-type\": \"application/json\"}\n ).json()\n full_response = []\n assert type(responses) == list\n for response in responses:\n try:\n full_response += getter(response)\n except Exception as e:\n print(\"Exception while processing response:\")\n print(e)\n return full_response",
"def makeRequest(method,\r\n dsthost,\r\n dstport,\r\n srchost,\r\n srcport,\r\n toaddr=None,\r\n fromaddr=None,\r\n maxforwards=70,\r\n extension=\"jack\",\r\n contact=None,\r\n callid=None,\r\n cseqnum=1,\r\n localtag=None,\r\n contenttype=None,\r\n content='',\r\n accept='application/sdp',\r\n useragent='BROKEN-SYSTEMS',\r\n auth=None,\r\n ):\r\n superheaders = dict()\r\n headers = dict()\r\n finalheaders = dict()\r\n uri = 'sip:%s@%s' %(extension,dsthost)\r\n superheaders['Via'] = 'SIP/2.0/UDP %s:%s;branch=z9hG4bK-%s;rport' %(srchost,srcport,random.getrandbits(32))\r\n if toaddr is None:\r\n toaddr = '\"%s\"<sip:%s@%s>'%(extension,extension,dsthost[0])\r\n headers['To'] = toaddr\r\n if fromaddr is None:\r\n fromaddr = '\"%s\"<sip:%s@%s>'%(extension,extension,dsthost[0])\r\n headers['From'] = fromaddr\r\n if localtag is None:\r\n localtag = random.getrandbits(80)\r\n headers['From'] += ';tag=%s' %(localtag)\r\n headers['User-Agent'] = useragent\r\n if callid is None:\r\n callid = '%s' %(random.getrandbits(32))\r\n headers['Call-ID'] = callid\r\n if contact is None:\r\n contact = '\"%s\" <sip:%s:1.1.1.1>' %(extension,extension) # XXX points to nowhere !!\r\n headers['Contact'] = contact\r\n headers['CSeq'] = '%s %s' %(cseqnum,method)\r\n headers['Max-Forwards'] = maxforwards\r\n headers['Accept'] = accept\r\n finalheaders['Content-Length'] = len(content)\r\n if contenttype is None and finalheaders['Content-Length'] > 0:\r\n contentype = 'application/sdp'\r\n if contenttype is not None:\r\n finalheaders['Content-Type'] = contenttype\r\n if auth is not None:\r\n if auth['algorithm'] == 'MD5':\r\n response = getMD5ChallengeResponse(auth['username'],\r\n auth['realm'],\r\n auth['password'],\r\n method,\r\n uri,\r\n auth['nonce'])\r\n else:\r\n raise TypeError, \"only supports 'MD5' digest algorithm\"\r\n if auth['proxy']:\r\n finalheaders['Proxy-Authorization'] = \\\r\n 'Digest username=\"%s\",realm=\"%s\",nonce=\"%s\",uri=\"%s\",response=\"%s\",algorithm=MD5' % (auth['username'],\r\n auth['realm'],\r\n auth['nonce'],\r\n uri,\r\n response)\r\n else:\r\n finalheaders['Authorization'] = \\\r\n 'Digest username=\"%s\",realm=\"%s\",nonce=\"%s\",uri=\"%s\",response=\"%s\",algorithm=MD5' % (auth['username'],\r\n auth['realm'],\r\n auth['nonce'],\r\n uri,\r\n response)\r\n\r\n reqpkt = '%s %s SIP/2.0\\r\\n' %(method,uri)\r\n for header in superheaders.iteritems():\r\n reqpkt += '%s: %s\\r\\n' %header\r\n for header in headers.iteritems():\r\n reqpkt += '%s: %s\\r\\n' %header\r\n for header in finalheaders.iteritems():\r\n reqpkt += '%s: %s\\r\\n' %header\r\n reqpkt += '\\r\\n'\r\n reqpkt += content\r\n return reqpkt",
"def create_upper_stub(self):\n if self.upper_layer_addr:\n channel = grpc.insecure_channel(self.upper_layer_addr)\n self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print(\"no upper layer has been specified\")",
"def test_init_connection_reset_counter(self, mock_send_request):\n self.client.initialize()\n resp = '{\"id\": %d, \"result\": 123, \"error\": null, \"callback\": null}'\n mock_send_request.side_effect = (resp % (i,) for i in range(10))\n\n for _ in range(0, 10):\n self.client.some_rpc()\n\n self.assertEqual(next(self.client._counter), 10)\n self.client._make_connection()\n self.assertEqual(next(self.client._counter), 0)",
"def DoStubHttp(status, mime, resp_body):\n def Request(unused_self, unused_url, method, body, headers):\n _ = method, body, headers # unused kwargs\n response = httplib2.Response({\n 'status': status,\n 'content-type': mime,\n })\n return response, resp_body\n return mock.patch('httplib2.Http.request', new=Request)",
"def stubbed_sender() -> Account:\n return Account(bytes([8] * PublicKey.LENGTH))",
"def gotProtocol(self,p): \n p.send_hello()",
"def test_make_request_method(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL, \r\n mut.METHOD_KEY: SAMPLE_METHOD})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.post.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.post.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)"
]
| [
"0.58286816",
"0.56329334",
"0.55941427",
"0.55800456",
"0.5554966",
"0.5400264",
"0.53861904",
"0.5328715",
"0.53022385",
"0.5250319",
"0.523606",
"0.5232564",
"0.52295834",
"0.5191481",
"0.5185965",
"0.5153067",
"0.5133774",
"0.513366",
"0.5125457",
"0.51184845",
"0.51178515",
"0.51073396",
"0.5075375",
"0.506743",
"0.50613326",
"0.50413543",
"0.5020262",
"0.50137395",
"0.50118786",
"0.49683282"
]
| 0.5873532 | 0 |
Sends message to slack | def send_to_slack(message):
if debug:
print(message)
return True
else:
slack_data = {'text': message, 'channel': slack_channel, "username": slack_username,
"icon_emoji": slack_icon_emoji}
response = requests.post(
slack_webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'},
proxies=proxyDict
)
if response.status_code != 200:
raise ValueError(
'Request to slack returned an error %s, the response is:\n%s'
% (response.status_code, response.text)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_slack(self, message):\n self.slack_client.api_call('chat.postMessage', channel=self.slack_channel, text=message, username=self.username, icon_emoji=self.slack_icon_emoji)\n print(\"Slack Notification sent\")",
"def send_message_to_slack(text):\n\n try:\n post = {\n \"text\": \":fire: :sad_parrot: *SSL Certificate BACKUP SCRIPT Status for HTTPD Proxy:* :sad_parrot: :fire:\",\n \"attachments\": [\n {\n \"text\": \"{0}\".format(text),\n \"color\": \"#B22222\",\n \"attachment_type\": \"default\",\n \"fields\": [\n {\n \"title\": \"Priority\",\n \"value\": \"High\",\n \"short\": \"false\"\n }\n ],\n \"footer\": \"AWS HTTPD\",\n \"footer_icon\": \"https://platform.slack-edge.com/img/default_application_icon.png\"\n }\n ]\n }\n\n ssm_param_name = 'slack_notification_webhook'\n ssm = boto3.client('ssm', config=CONFIG, region_name='eu-west-2')\n try:\n response = ssm.get_parameter(\n Name=ssm_param_name, WithDecryption=True)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ParameterNotFound':\n LOGGER.info(\n 'Slack SSM parameter %s not found. No notification sent', ssm_param_name)\n return\n else:\n logging.error(\n \"Unexpected error when attempting to get Slack webhook URL: %s\", e)\n return\n if 'Value' in response['Parameter']:\n url = response['Parameter']['Value']\n\n json_data = json.dumps(post)\n req = urllib.request.Request(\n url,\n data=json_data.encode('ascii'),\n headers={'Content-Type': 'application/json'})\n LOGGER.info('Sending notification to Slack')\n response = urllib.request.urlopen(req)\n\n else:\n LOGGER.info(\n 'Value for Slack SSM parameter %s not found. No notification sent', ssm_param_name)\n return\n\n except Exception as err:\n logging.error(\n 'The following error has occurred on line: %s',\n sys.exc_info()[2].tb_lineno)\n logging.error(str(err))",
"def send_message(channel, message):\n slack_client = get_client()\n slack_client.chat_postMessage(channel=channel, text=message, as_user=True)",
"def slack(message):\n slack_hook = 'https://hooks.slack.com/services/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'\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n requests.post(slack_hook, json.dumps({'text': message}), headers=headers)",
"def slackMessage(sMessage):\n sChannel = '#' + getConfig('slack', 'channel')\n print(\"Posting slack message to %s: %s\" % (sChannel, sMessage))\n requests.post(getConfig('slack', 'url'), data=json.dumps({'text': sMessage,\n 'channel': sChannel,\n 'user': getConfig('slack', 'user'),\n 'icon_emoji': getConfig('slack', 'emoji')}))",
"def send_message_slack(message, color):\n icon_emoji = \":loudspeaker:\"\n title = f\"Connectivity Results for Node - {node_name} in {cluster_name}\"\n slack_data = {\n \"username\": str.upper(dc_number) + \" \" + \"Connectivity Notification From Node Doctor\",\n \"icon_emoji\": icon_emoji,\n \"channel\": channel,\n \"attachments\": [\n {\n \"color\": color,\n \"title\": title,\n \"text\": message\n }\n ]\n }\n print(json.dumps(slack_data))\n byte_length = str(sys.getsizeof(slack_data))\n headers = {'Content-Type': \"application/json\", 'Content-Length': byte_length}\n try:\n response = requests.post(url, data=json.dumps(slack_data), headers=headers)\n if response.status_code != 200:\n raise Exception(response.status_code, response.text)\n sys.exit(0)\n except ConnectionError as e:\n print(\"Exception when posting message to slack: %s\\n\" % e)\n sys.exit(0)",
"def send_slack_notification(url: str, title: str, message: str):\n\n content = {\n \"text\": f\"{title}\",\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"{message}\",\n },\n }\n ],\n }\n\n response = requests.post(url, json=content)\n\n # Raise exception if response is not 200\n response.raise_for_status()",
"def slack_post(title=\"Test\", message=\"Hello world!\", color=\"#999999\"):\n attach = dict(fallback=message, title=title, text=message, color=color)\n r = client.chat_postMessage(\n channel=CHANNEL, attachments=[attach], username=f\"{HOSTNAME} DBA alert\"\n )\n return r",
"def post_to_channel(self, text):\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=self.config.SLACK_CHANNEL,\n text=text,\n username='pybot',\n icon_emoji=':robot_face:'\n )",
"def notify_via_slack(webhook_url, msg):\n slack_data = {\"text\": msg}\n post(webhook_url, json=slack_data)",
"def _report_message_to_slack(self, message):\n self.log.debug(\"Forwarding message to slack\")\n url = self.cfg['pmon']['slack.hook']\n payload = json.dumps(self._make_slack_payload(message))\n headers = {'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Content-Encoding': 'utf8',\n 'Content-Length': str(len(payload))}\n try:\n rsp = requests.post(url, data=payload, headers=headers)\n if rsp.status_code != requests.codes.ok:\n self.log.warn(\"problem sending to slack: {0}\".format(rsp.status_code))\n except Exception as x:\n self.log.error(str(x))",
"def log_to_slack(self, message):\n log.info(f'MC: {message}')\n if not hasattr(self, 'slack_webhook'):\n log.info(f'MC: Not logging to slack because slack_webhook is not '\n f'defined.')\n return\n try:\n payload = {\"text\": message}\n if hasattr(self, 'slack_channel'):\n payload[\"channel\"] = self.slack_channel\n res = requests.post(self.slack_webhook, json=payload)\n res_text = res.text\n except Exception as e:\n res_text = repr(e)\n if res_text != 'ok':\n log.warning(f'MC: Error while logging to slack: {res_text}')",
"def send_slack(msg=None, attachments=None):\n if SLACK_TOKEN is not None:\n for channel in SLACK_CHANNELS:\n message = {\n 'username': 'Consent Updater',\n 'icon_emoji': ':file_folder:',\n 'channel': channel\n }\n if msg:\n message['text'] = msg\n if attachments:\n message['attachments'] = attachments\n\n resp = requests.post('https://slack.com/api/chat.postMessage',\n headers={\n 'Authorization': 'Bearer '+SLACK_TOKEN},\n json=message)",
"def notify_slack_message(token, channel, message):\n client = WebClient(token=token)\n try:\n response = client.chat_postMessage(\n channel=channel,\n text=message)\n except SlackApiError as e:\n print(f\"Got an error: {e.response['error']}\")",
"def test_slackWH_send_good(get_slackwebhook, capsys):\n s = get_slackwebhook\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out",
"def send_user_message(self, channel_id, message):\n self.slack_client.api_call('chat.postMessage', as_user='true', channel=channel_id, text=message)",
"def send(self):\n payload = self.format_payload()\n\n # Makes sure that the required fields are provided before\n # sending the payload.\n if not self.webhook_url:\n print ('Error: Webhook URL is required.')\n\n elif not payload:\n print ('Error: Message payload cannot be empty.')\n\n else:\n try:\n request = requests.post(self.webhook_url,\n data=json.dumps(payload),\n headers={'Content-Type': 'application/json'})\n\n request.raise_for_status()\n\n except requests.exceptions.RequestException as error:\n print('Error: %s' % error)",
"def send_msg(workspace_message):\n\n workspace = Workspace.query.filter_by(id=workspace_message.workspace_id).first()\n message = Message.query.filter_by(id=workspace_message.message_id).first()\n\n if workspace and message:\n client = WebClient(token=workspace.token)\n\n # Send message to a channel\n if message.channel:\n try:\n response = client.chat_postMessage(channel='#'+message.channel, text=message.message,\n as_user=True)\n print(f\"Message send with response: {response}\")\n\n except SlackApiError as e:\n # You will get a SlackApiError if \"ok\" is False\n assert e.response[\"ok\"] is False\n assert e.response[\"error\"] # str like 'invalid_auth', 'channel_not_found'\n print(f\"Got an error: {e.response['error']}\")\n\n #send DM\n else:\n\n # user_info = client.users_lookupByEmail(email=\"[email protected]\") #todo: delete\n try:\n response = client.chat_postMessage(channel=message.direct_user_id, text=message.message,\n as_user=True)\n\n print(f\"Message send with response: {response}\")\n\n except SlackApiError as e:\n # You will get a SlackApiError if \"ok\" is False\n assert e.response[\"ok\"] is False\n assert e.response[\"error\"] # str like 'invalid_auth', 'channel_not_found'\n print(f\"Got an error: {e.response['error']}\")",
"def send(self, post, hook):\r\n\r\n # Put together the POST payload, and save emojis by encode using utf-8\r\n body = post.format_slack()\r\n\r\n # Shoot the message to slack using the hook we setup at <workspace>.slack.com\r\n r = requests.post(hook, headers={'Content-Type': 'application/json'}, data=body.encode('utf-8'))\r\n\r\n write_to_file(post)",
"def test_slackP_send(get_slackpost, capsys):\n s = get_slackpost\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out",
"def send_message(userid):\n\tsc.api_call(\n\t\t\"chat.postMessage\",\n\t\tchannel=userid,\n\t\ttext=\"Hey there, just wanted to remind you to join <#CQCKS8UN6|secret-snowflake-fa19> by Wednesday night, if you want to participate in Secret Santa this year. It will be lots of fun!\",\n\t\tusername=\"Reminder\",\n\t\ticon_emoji=\":santa:\"\n\t)",
"def send(slack_client,\n channel: str,\n text: str = '',\n thread_ts: str = None,\n **kwargs\n ):\n\n if text:\n text_block = {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": text,\n }\n }\n blocks = kwargs.get('blocks')\n if blocks and isinstance(blocks, list):\n blocks.insert(0, text_block)\n else:\n blocks = [text_block]\n\n kwargs['blocks'] = blocks\n print(blocks)\n\n return slack_client.api_call(\"chat.postMessage\",\n channel=channel,\n thread_ts=thread_ts,\n as_user=True,\n **kwargs)",
"def send_slack_message(hook, summary, cmd, label=None):\n py_version = '%d.%d' % (sys.version_info.major, sys.version_info.minor)\n label_sec = \" of _%s_\" % label if label else ''\n message = \"Nosetests%s failed in *Python %s* when running:\\n`%s`\"\\\n % (label_sec, py_version, cmd)\n data_json = {'text': message,\n 'attachments': [{'color': 'danger',\n 'text': '```%s```' % summary,\n 'mrkdwn_in': ['text', 'pretext']}]}\n resp = requests.post(hook, data=json.dumps(data_json),\n headers={'Content-type': 'application/json'})\n if resp.status_code is not 200:\n print(\"Message failed to send.\")\n print(resp.reason)\n return",
"def post_slack_msg(credentials,\n text,\n username,\n channel):\n # Read credentials\n config = configparser.ConfigParser()\n config.read(credentials)\n\n sc = SlackClient(config.get('SLACK', 'token'))\n sc.api_call(\n 'chat.postMessage',\n as_user=True,\n channel=channel,\n username=username,\n text=text\n )",
"def alert(self, message):\n try:\n self.send_message(message)\n except Exception as err:\n logger.exception(\n f\"Slack notification to {self.username} failed with {err.__repr__()}\"\n )",
"def send_message(self, msg) -> object:\n payload = {'content': str(msg)}\n try:\n return requests.post(url = self.__webhooks, data = payload)\n except exceptions.ConnectionError as cer:\n print(cer)\n exit(1)",
"def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending notification in Slack\")",
"def __notify_slack(self):\n\t\ttry:\n\t\t\tprint(\"[+] Sending Slack notifications...\")\n\t\t\tslack_http_headers = {\n\t\t\t\t'User-Agent': 'GitHubScrap',\n\t\t\t\t'Content-type': 'application/json',\n\t\t\t}\n\t\t\tslack_http_data = {}\n\t\t\tfor ix in range(0,len(self.final_results[\"results\"]),SLACK_CHUNK_SIZE):\n\t\t\t\tdata_to_send = \"\"\n\t\t\t\tchunk_results = self.final_results[\"results\"][ix:ix+SLACK_CHUNK_SIZE]\n\t\t\t\tfor url in chunk_results:\n\t\t\t\t\tdata_to_send += \"{} ({})\\n\".format(url[\"query\"], url[\"link\"])\n\n\t\t\t\tslack_http_data.update({\n\t\t\t\t\t'text': data_to_send,\n\t\t\t\t})\n\t\t\t\trequests.post(\n\t\t\t\t\tself.slack_webhook,\n\t\t\t\t\theaders = slack_http_headers,\n\t\t\t\t\tdata = json.dumps(slack_http_data),\n\t\t\t\t)\n\t\t\t\tsleep(SLACK_HTTP_DELAY)\n\n\t\texcept Exception as exception:\n\t\t\traise MsgException('Slack notifications could not be sent', exception)",
"def __send_msg_by_webex__(self, text):\n\n print(\"NotifyManager __send_msg_by_webex__ enters\")\n\n # http header\n http_header = {\"Content-type\": \"application/json\",\n \"Authorization\": \"Bearer \" + self.__webex_token__}\n\n # http body\n body = {\"roomId\": self.__webex_space__,\n \"text\": text}\n\n try:\n response = requests.post(\n url=self.__webex_url__,\n json=body,\n headers=http_header)\n print(response.status_code)\n except Exception as e:\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n print(\"Error! Exception is found to post message over webex teams\")\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n print(e)\n return\n\n if response.status_code == 200:\n pass\n elif response.status_code == 401:\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n print(\"Webex Team Token Expired\")\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n else:\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n print(\"Fail to send message=%s\" % text)\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n print(response.status_code)\n print(response.text)\n\n return",
"def flask_slack_test():\n _log('@channel: slack is working?')\n return 'slack test'"
]
| [
"0.84101546",
"0.7684406",
"0.7598026",
"0.759701",
"0.7475869",
"0.7443716",
"0.73746884",
"0.73610026",
"0.7242635",
"0.72030175",
"0.71689487",
"0.7116941",
"0.70332277",
"0.70283216",
"0.69573796",
"0.6946794",
"0.69313985",
"0.6919115",
"0.68725723",
"0.68689644",
"0.6864406",
"0.6815691",
"0.6793692",
"0.6786455",
"0.6773396",
"0.6761524",
"0.6757202",
"0.6734745",
"0.6726237",
"0.6715896"
]
| 0.7816742 | 1 |
Test that the number of reported tune results is correct | def testNumIters(self):
ray_params = RayParams(cpus_per_actor=1, num_actors=2)
analysis = tune.run(
self.train_func(ray_params),
config=self.params,
resources_per_trial=ray_params.get_tune_resources(),
num_samples=2)
self.assertSequenceEqual(
list(analysis.results_df["training_iteration"]),
list(analysis.results_df["config.num_boost_round"])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_trials(self):",
"def test_getSampleCount(self):\r\n self.assertEqual(self.estimator1.getSampleCount(), 1)",
"def test_per_dqn(self):\n model = PERDQNLightning(self.hparams)\n result = self.trainer.fit(model)\n\n self.assertEqual(result, 1)",
"def test_num_evals(self):\n\t\tdetails = self.watcher.describe()\t\t\n\t\tself.assertTrue((details.M * details.rf == details.num_evals).all())",
"def test_num_evals(self):\n\t\tdetails = self.watcher.describe()\t\t\n\t\tself.assertTrue((details.M * details.rf == details.num_evals).all())",
"def test_make_results_verbose1(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\ttest.do_run()\n\t\ttest.make_results_verbose()\n\t\tobj_ut = test.results_verbose\n\t\tself.assertEqual(obj_ut, [['100', 'not good', 2, -1, 0],\n\t\t\t['100', 'not very good', 4, -1, 0]])",
"def testResults(self):\n problem = problems.simple()\n optimizer = meta.MetaOptimizer(net=dict(\n net=\"CoordinateWiseDeepLSTM\",\n net_options={\n \"layers\": (),\n \"initializer\": \"zeros\"\n }))\n minimize_ops = optimizer.meta_minimize(problem, 5)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n cost, final_x = train(sess, minimize_ops, 1, 2)\n\n # Torch results\n torch_cost = 0.7325327\n torch_final_x = 0.8559\n\n self.assertAlmostEqual(cost, torch_cost, places=4)\n self.assertAlmostEqual(final_x[0], torch_final_x, places=4)",
"def test_svm_vs_vm_count():\n assert templates.svms() >= templates.vm_count()",
"def test_svm_vs_vm_count():\n assert environments.svms() >= environments.vm_count()",
"def count_benchmarks():\n return len(setup_storage().fetch_benchmark({}))",
"def test_k_again(self):\n\n loc_, sloc_, test_loc, test_sloc = self.k__.get_totals()\n self.assertEqual(loc_, 0)\n self.assertEqual(sloc_, 0)\n self.assertEqual(test_loc, 0)\n self.assertEqual(test_sloc, 0)\n\n self.k__.add_counts('py', 7, 3)\n self.check_counts('py', 7, 3, 0, 0)\n self.k__.add_counts('py', 5, 2)\n self.check_counts('py', 12, 5, 0, 0)\n\n self.k__.add_test_counts('py', 8, 0)\n self.check_counts('py', 12, 5, 8, 0)\n self.k__.add_test_counts('py', 9, 0)\n self.check_counts('py', 12, 5, 17, 0)\n\n # no test source lines, so no test percentage\n expected = \"py:%d/%d\" % (12 + 17, 5)\n # DEBUG\n # print(\"Expected: %s\" % expected)\n # END\n self.assertEqual(self.k__.pretty_counts('py'), expected)",
"def testKarma(self):\n\n\t\t\t\tspinner.Synonym.objects.add('directory', 'catalog', 10, True)\n\t\t\t\tspinner.Synonym.objects.add('list', 'directory', 20, True)\n\t\t\t\tspinner.Synonym.objects.add('directory', 'guide', 10, True)\n\n\t\t\t\tsynonyms = spinner.Synonym.objects.get_synonyms(['directory'])[0]\n\t\t\t\t\n\t\t\t\tfor word in synonyms:\n\t\t\t\t\t\tif word.total_karma < 10:\n\t\t\t\t\t\t\t\tassert False, 'Karma was not recorded correctly'",
"def test_get_results_verbose(self):\n\t\tpass",
"def test_figure4(self):\n\n topics = get_topics('msmarco-passage-dev-subset')\n qrels = get_qrels('msmarco-passage-dev-subset')\n\n self.assertEqual(len(topics), 6980)\n self.assertEqual(len(qrels), 6980)\n\n # Compute the average length of queries:\n avg_qlen = sum([len(topics[t]['title'].split()) for t in topics])/len(topics)\n\n # Compute the average number of relevance judgments per query:\n avg_qrels = sum([len(qrels[t]) for t in topics])/len(topics)\n\n self.assertAlmostEqual(avg_qlen, 5.925, delta=0.001)\n self.assertAlmostEqual(avg_qrels, 1.065, delta=0.001)",
"def test_generate_nb_testing(self):\n pass",
"def test_ngram():\n # Some examples of functions usage\n trigram_counts, bigram_counts, unigram_counts, token_count = train_ngrams(S_train)\n print \"#trigrams: \" + str(len(trigram_counts))\n print \"#bigrams: \" + str(len(bigram_counts))\n print \"#unigrams: \" + str(len(unigram_counts))\n print \"#tokens: \" + str(token_count)\n perplexity = evaluate_ngrams(S_dev, trigram_counts, bigram_counts, unigram_counts, token_count, 0.5, 0.4)\n print \"#perplexity: \" + str(perplexity)\n ### YOUR CODE HERE\n print(vocabsize)\n ### END YOUR CODE",
"def test_get_virtual_machine_count_metrics1(self):\n pass",
"def test_get_measure_parameters(self):\n pass",
"def test_frequency(self):\n self.assertAlmostEqual(self.tunneling.frequency.value_si, self.frequency, 4)",
"def test_frequency(self):\n self.assertAlmostEqual(self.tunneling.frequency.value_si, self.frequency, 4)",
"def getTestResults():",
"def test_best_val(self, te_acc):\n self.test_val = te_acc",
"def get_learn_after_each_trial(self):\r\n return 0",
"def test_prediction(self):\n learner, held_out_student_idx = self.set_up_learner(True, False, 1e9)\n\n # store all node's data and reference IDs for CPDs and param_node dicts\n orig_datas = {key: node.data for key, node in learner.nodes.iteritems()}\n orig_cpd_ids = {key: id(node.cpd) for key, node in learner.nodes.iteritems()}\n orig_param_node_ids = {key: id(node.param_nodes)\n for key, node in learner.nodes.iteritems()}\n orig_fields = {}\n for field in ('callback', 'max_iterations', 'log_posterior', 'iter'):\n orig_fields[field] = getattr(learner, field)\n\n prob_correct = get_online_rps(learner, held_out_student_idx, max_iterations=1000)\n\n # get the test node with all the appended test responses\n test_correct = learner.nodes[TEST_RESPONSES_KEY].data\n\n valid_idx = np.isfinite(prob_correct)\n num_nan_rp = len(prob_correct) - np.sum(valid_idx)\n # check that number of NaN RPs equals total number of students\n self.assertEqual(num_nan_rp, len(np.unique(held_out_student_idx)))\n online_pc = Metrics.online_perc_correct(test_correct, held_out_student_idx)\n np.testing.assert_array_almost_equal(prob_correct[valid_idx], online_pc[valid_idx],\n decimal=6)\n\n # test that the original quantities are not modified\n for key in orig_datas:\n self.assertTrue(learner.nodes[key].data is orig_datas[key])\n np.testing.assert_array_equal(learner.nodes[key].data, orig_datas[key])\n self.assertTrue(id(learner.nodes[key].cpd) == orig_cpd_ids[key])\n self.assertTrue(id(learner.nodes[key].param_nodes) == orig_param_node_ids[key])\n for field, value in orig_fields.iteritems():\n self.assertTrue(getattr(learner, field) is value)\n\n # test that running online prediction again yields the same result; this time modify learner\n prob_correct_mod = get_online_rps(learner, held_out_student_idx, max_iterations=1000,\n copy_learner=False)\n np.testing.assert_equal(prob_correct_mod, prob_correct)\n # original responses should not have been modified, but thetas should have been\n for key in orig_datas:\n if key == THETAS_KEY:\n self.assertFalse(learner.nodes[key].data is orig_datas[key])\n else:\n self.assertTrue(learner.nodes[key].data is orig_datas[key])\n self.assertTrue(id(learner.nodes[key].cpd) == orig_cpd_ids[key])\n self.assertTrue(id(learner.nodes[key].param_nodes) == orig_param_node_ids[key])",
"def test_len_trainset(self):\n self.assertEqual(self.__dataset.get_train_len, 10000)",
"def test_calculate_all_metrics_precision():\n pass",
"def test_update():\n learner = optlearner.VolatilityLearner()\n\n for reward in [0, 1]:\n slow_pIk = slow_update(learner, reward)\n learner._update(reward)\n yield npt.assert_array_equal, slow_pIk, learner.pIk\n learner.reset()",
"def test_get_virtual_machine_count_metrics(self):\n pass",
"def test_song_counts(self):\n self.assertEqual(self.show.total_song_count, 19)\n self.assertEqual(self.show.set1_song_count, 9)\n self.assertEqual(self.show.set2_song_count, 8)\n self.assertEqual(self.show.set3_song_count, 0)\n self.assertEqual(self.show.encore_song_count, 1)\n self.assertEqual(self.show.encore2_song_count, 1)",
"def number_results(self):\n pass"
]
| [
"0.69545174",
"0.6756261",
"0.66614467",
"0.6616982",
"0.6616982",
"0.63123876",
"0.63104653",
"0.6298489",
"0.6272745",
"0.62067825",
"0.6199662",
"0.61982805",
"0.61766785",
"0.617246",
"0.6156057",
"0.6126327",
"0.61195016",
"0.61114615",
"0.6091615",
"0.6091615",
"0.607921",
"0.60748523",
"0.60549843",
"0.60491025",
"0.6030695",
"0.60253453",
"0.60216856",
"0.60159963",
"0.60120463",
"0.60056174"
]
| 0.68699944 | 1 |
Test if error is thrown when using Tune with elastic training. | def testElasticFails(self):
ray_params = RayParams(
cpus_per_actor=1, num_actors=1, elastic_training=True)
with self.assertRaises(TuneError):
tune.run(
self.train_func(ray_params),
config=self.params,
resources_per_trial=ray_params.get_tune_resources(),
num_samples=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testTrainingStopElastic(self):\n # The `train()` function raises a RuntimeError\n ft_manager = FaultToleranceManager.remote()\n\n ft_manager.schedule_kill.remote(rank=0, boost_round=3)\n ft_manager.schedule_kill.remote(rank=1, boost_round=6)\n ft_manager.delay_return.remote(\n rank=0, start_boost_round=4, end_boost_round=5)\n\n delay_callback = DelayedLoadingCallback(\n ft_manager, reload_data=True, sleep_time=0.1)\n die_callback = DieCallback(ft_manager, training_delay=0.25)\n\n with self.assertRaises(RuntimeError):\n train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[die_callback],\n num_boost_round=20,\n ray_params=RayParams(\n elastic_training=True,\n max_failed_actors=1,\n max_actor_restarts=1,\n num_actors=2,\n distributed_callbacks=[delay_callback]))",
"def test_verbose_non_bool_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(verbose=1)",
"def testTrainingContinuationElasticFailed(self):\n\n additional_results = {}\n keep_actors = {}\n\n def keep(actors, *args, **kwargs):\n keep_actors[\"actors\"] = actors.copy()\n return DEFAULT\n\n with patch(\"xgboost_ray.main._shutdown\") as mocked:\n mocked.side_effect = keep\n bst = train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[_fail_callback(self.die_lock_file)],\n num_boost_round=20,\n ray_params=RayParams(\n max_actor_restarts=1,\n num_actors=2,\n elastic_training=True,\n max_failed_actors=1),\n additional_results=additional_results)\n\n self.assertEqual(20, get_num_trees(bst))\n\n x_mat = xgb.DMatrix(self.x)\n pred_y = bst.predict(x_mat)\n self.assertSequenceEqual(list(self.y), list(pred_y))\n print(f\"Got correct predictions: {pred_y}\")\n\n actors = keep_actors[\"actors\"]\n # End with two working actors since only the training failed\n self.assertTrue(actors[0])\n self.assertTrue(actors[1])\n\n # Two workers finished, so n=32\n self.assertEqual(additional_results[\"total_n\"], 32)",
"def test_failed_elastic():\n test_file = os.path.join(DATA_DIR, 'failed_elastic.out')\n\n with pytest.raises(CRYSTOUT_Error) as ex:\n CRYSTOUT(test_file)\n assert 'Inadequate elastic calculation' in ex.msg",
"def test_notrunerror(self, MetricClass):\n m = MetricClass()\n with pytest.raises(NotRunError):\n RandomTrader(seed=42).evaluate(m)",
"def test_http_error(self):\n self.assertEqual(-1, self.__uft.failed_tests('raise'))\n self.assertEqual(-1, self.__uft.passed_tests('raise'))\n self.assertEqual(-1, self.__uft.skipped_tests('raise'))",
"def check_elasticity(mt, threshold = 0.1):\r\n\t\r\n if mt['elasticity'] == None:\r\n return False\r\n elif mt['elasticity']['elastic_tensor'][0][0] < threshold: # This is a temporary implement.\r\n return False\r\n else:\r\n return True",
"def test_training(self):\n\t\tpass",
"def test(self):\n print(\"Calculating final training error...\")\n (err_mean, err_h), kld = self.compute_reconst_kld_errors(self.train_data_loader)\n self.training_logger.log_error(\"Train reconstruction error\", err_mean, err_h)\n if kld is not None:\n kld_mean, kld_h = kld\n self.training_logger.log_error(\"Train KL divergence\", kld_mean, kld_h)\n\n print(\"Calculating final test error...\")\n (err_mean, err_h), kld = self.compute_reconst_kld_errors(self.test_data_loader)\n self.training_logger.log_error(\"Test reconstruction error\", err_mean, err_h)\n if kld is not None:\n kld_mean, kld_h = kld\n self.training_logger.log_error(\"Test KL divergence\", kld_mean, kld_h)",
"def test_qa_train_effectiveness():\n # use a non-fine-tuned model so we DEFINITELY get an improvement\n happy = HappyQuestionAnswering(\"BERT\", \"bert-base-uncased\")\n args = QATrainArgs(num_train_epochs=3)\n before_loss = happy.eval(\"../data/qa/train-eval.csv\").loss\n happy.train(\"../data/qa/train-eval.csv\", args=args)\n after_loss = happy.eval(\"../data/qa/train-eval.csv\").loss\n\n assert after_loss < before_loss",
"def test_no_errors(self):\n raised = False\n try:\n _ = RandomForest(n_estimators=1, max_depth=1, criterion=\"entropy\")\n except Exception as error:\n print(error)\n raised = True\n self.assertFalse(raised)",
"def validate(self, epn, num_samples_to_test = 1000):\n batch_size = epn.batch_size\n dataloader = torch.utils.data.DataLoader(dataset = self, batch_size = batch_size, shuffle=True)\n num_samples_evaluated = 0\n num_correct = 0\n for batch_idx, (x_data, y_target) in enumerate(dataloader):\n epn.randomize_initial_state(batch_size = batch_size)\n epn.set_x_state(x_data)\n s = epn.evolve_to_equilbrium(y_target = None, beta = 0)\n compared = s[:,epn.iy].argmax(dim = 1) == y_target[:].argmax(dim = 1)\n num_samples_evaluated += batch_size\n num_correct += torch.sum(compared)\n if num_samples_evaluated > num_samples_to_test:\n break\n error = (1-num_correct.item()/num_samples_evaluated)\n return error",
"def _check_ts_test(self):\n try:\n assert self.model_fit is not None\n except AssertionError:\n self._uvts_cls_logger.exception(\"Model has to be fitted first! Please call ts_fit(...)\")\n\n try:\n assert self._test_dt is not None\n except(KeyError, AssertionError):\n self._uvts_cls_logger.exception(\"Nothing to test. \"\n \"Call ts_forecast() or specify amount of test data \"\n \"when initializing the object.\")\n return -1\n else:\n # self._mode = 'test'\n return 0",
"def test_wrong_predicate(self):\n\n with pytest.raises(ValueError):\n naughty_case_opt = lq.optimizers.CaseOptimizer(\n (lambda var: False, lq.optimizers.Bop()),\n default_optimizer=optimizers.Adam(0.01),\n )\n\n # Simple MNIST model\n mnist = tf.keras.datasets.mnist\n (train_images, train_labels), _ = mnist.load_data()\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=naughty_case_opt,\n metrics=[\"acc\"],\n )\n\n # Should raise on first call to apply_gradients()\n model.fit(train_images[:1], train_labels[:1], epochs=1)",
"def testTrainingStop(self):\n # The `train()` function raises a RuntimeError\n with self.assertRaises(RuntimeError):\n train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[_kill_callback(self.die_lock_file)],\n num_boost_round=20,\n ray_params=RayParams(max_actor_restarts=0, num_actors=2))",
"def test_training():\n assert init_engine('train', [\"config=first_run_test/default.yaml\"]).run() is None",
"def check(self) -> None:\n # validate training config\n super().check()",
"def __validate__(self):\n if self.train:\n assert self.random is not None",
"def _raise_index_error(is_gt, tracker, seq):\n if is_gt:\n err = 'Cannot load gt data from sequence %s, because there are not enough ' \\\n 'columns in the data.' % seq\n raise TrackEvalException(err)\n else:\n err = 'Cannot load tracker data from tracker %s, sequence %s, because there are not enough ' \\\n 'columns in the data.' % (tracker, seq)\n raise TrackEvalException(err)",
"def error(self, x, t):\n predict = self.model.predict(x)\n if t == predict:\n return 0\n else:\n return 1",
"def test_fails(self):\n raise FoolishError(\"I am a broken test\")",
"def test_episodenotfound(self):\n self.assertRaises(tvdb_episodenotfound, lambda:self.t['Scrubs'][1][30])",
"def test_error_on_dataloader_passed_to_fit(tmpdir):\n\n # only train passed to fit\n model = EvalModelTemplate()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n auto_scale_batch_size='power',\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True))\n\n with pytest.raises(MisconfigurationException):\n trainer.tune(model, **fit_options)",
"def test_train_bad():\n\tdf = pd.read_csv(\"test/sample_features.csv\")\n\tdf['test'] = 'test'\n\n\ty_train = df['price']\n\tX_train = df.loc[:, df.columns != 'price']\n\n\tparams = {'n_estimators': 5, 'random_state': 2}\n\n\twith pytest.raises(ValueError) as e:\n\t\trf_test = train(X_train, y_train, params)\n\n\tassert str(e.value) == 'Wrong data type of inputs for training a random forest regression model!'",
"def check(self) -> None:\n # validate pruning config\n super().check()\n\n assert self.config[\"TRAIN_CONFIG\"][\"MODEL_NAME\"] in {\n \"densenet\",\n \"quant_densenet\",\n \"simplenet\",\n \"quant_simplenet\",\n }, f\"{self.config['TRAIN_CONFIG']['MODEL_NAME']} is not supported\"",
"def had_error(self):\n return self.data.exception_type == TestOutcome.ERROR",
"def test_kraus_error(self):\n A0 = [[1, 0], [0, np.sqrt(1 - 0.3)]]\n A1 = [[0, 0], [0, np.sqrt(0.3)]]\n targets = [A0, A1]\n error = kraus_error(targets)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1)\n kraus = circ[0]\n self.assertEqual(kraus['name'], 'kraus')\n self.assertEqual(kraus['qubits'], [0])\n for op in kraus['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus QuantumError\")",
"def test_invalid_input_checkpoint(self):\n msg1 = (\n 'Must raise `TypeError` or `ValueError` when input `checkpoint` '\n 'is invalid.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n -2, 0.0, 1.0, math.nan, -math.nan, math.inf, -math.inf, 0j, 1j, '',\n b'', (), [], {}, set(), object(), lambda x: x, type, None,\n NotImplemented, ...\n )\n\n for invalid_input in examples:\n with self.assertRaises(\n (TypeError, ValueError),\n msg=msg1\n ) as ctx_man:\n lmp.util.load_tokenizer_by_config(\n checkpoint=invalid_input,\n config=self.config\n )\n\n if isinstance(ctx_man.exception, TypeError):\n self.assertEqual(\n ctx_man.exception.args[0],\n '`checkpoint` must be an instance of `int`.',\n msg=msg2\n )\n else:\n self.assertEqual(\n ctx_man.exception.args[0],\n '`checkpoint` must be bigger than or equal to `-1`.',\n msg=msg2\n )",
"def test_xfailed_not_mentioned_exception():\n assert False",
"def train_digits(self):\n try:\n # TODO: Make decision taking validation into account validation\n metrics_result = self.model.train()\n logging.info(\"model performance is {}\".format(metrics_result))\n return metrics_result is not None\n # TODO: Apply specific exceptions and log,\n except:\n logging.error(\"Prediction Error:\", sys.exc_info()[0])\n raise ValueError()"
]
| [
"0.6430487",
"0.58070004",
"0.5790569",
"0.57876545",
"0.57636374",
"0.57558906",
"0.5712776",
"0.5573787",
"0.5553651",
"0.5549851",
"0.5505793",
"0.5493457",
"0.5484655",
"0.5477935",
"0.547709",
"0.5445432",
"0.54301304",
"0.5421374",
"0.54188627",
"0.5409754",
"0.5395708",
"0.5391431",
"0.539122",
"0.533566",
"0.53325033",
"0.53287333",
"0.53211826",
"0.5313464",
"0.53133035",
"0.5302065"
]
| 0.76031387 | 0 |
If save_path is a file, use this path. Otherwise, join save_path and filename. If file does not end with suffix or best_suffix, then append suffix. If best==True, then append best_suffix. | def ckpt_path(save_path, filename=None, best=False):
if os.path.isfile(save_path):
return save_path
if not filename:
raise Exception("If save_path is not a file, must provide a filename.")
suffix = '.ckpt'
best_suffix = '.ckpt'
save_path = os.path.join(save_path, filename)
save_path = os.path.expanduser(save_path)
if not save_path.endswith(suffix) and not save_path.endswith(best_suffix):
save_path = save_path + suffix
if best:
save_path = save_path + '_best'
return save_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_save_path(self, save_path, save_name):\n if save_name.endswith('.txt'):\n path = os.path.join(save_path, save_name)\n else:\n self.save_name = save_name+'.txt'\n path = os.path.join(save_path, save_name)\n return path",
"def save_best(self, sess, score):\n if score > self.best_score:\n self.best_score = score\n path_prefix = self.saver.save(sess, self.config.save_path,\n self.global_step)\n self.best_model_path = path_prefix\n return path_prefix\n return \"Skip saving\"",
"def savePathJoin(self, path):\n return os.path.join(self.user[\"Save\"], path)",
"def get_save_name(suffix, load_save_folder, save_file_name):\n _dialog = gtk.FileChooserDialog(\"Save...\", None,\n gtk.FILE_CHOOSER_ACTION_SAVE, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,\n gtk.STOCK_SAVE, gtk.RESPONSE_OK))\n _dialog.set_default_response(gtk.RESPONSE_OK)\n if save_file_name is not None:\n _dialog.set_current_name(save_file_name + suffix)\n return do_dialog(_dialog, suffix, load_save_folder)",
"def default_save(self,suffix=EMPTYCHAR,extra=EMPTYCHAR):\r\n\r\n pass",
"def append_to_filename(filepath: str, name_suffix: str, new_ext: Optional[str] = None) -> str:\n ext = new_ext or filepath_ext(filepath)\n name = filepath_name_only(filepath)\n return str(pathlib.Path(filepath).with_name(name+name_suffix).with_suffix(ext))",
"def _getSavePath(self):\n path, filename = os.path.split(self.path)\n savePath = os.path.join(path, os.path.splitext(filename)[0])\n return savePath",
"def get_save_path(op: Save, calculation: \"FileCalculation\") -> str:\n from PartSegCore.analysis.save_functions import save_dict\n\n extension = save_dict[op.algorithm].get_default_extension()\n rel_path = os.path.relpath(calculation.file_path, calculation.base_prefix)\n rel_path = os.path.splitext(rel_path)[0]\n if op.directory:\n file_name = os.path.basename(rel_path)\n base_rel_path = os.path.dirname(rel_path)\n return os.path.join(calculation.result_prefix, base_rel_path, op.directory, file_name + op.suffix + extension)\n return os.path.join(calculation.result_prefix, rel_path + op.suffix + extension)",
"def save_checkpoint(self, state, is_best, filename='checkpoint.pth.tar'):\n filename = os.path.join(self.experiment_dir, filename)\n torch.save(state, filename)\n if is_best:\n filename_best = os.path.join(self.experiment_dir,'best.pth.tar')\n torch.save(state,filename_best)\n best_pred = state['best_pred']\n with open(os.path.join(self.experiment_dir, 'best_pred.txt'), 'w') as f:\n f.write(str(best_pred))\n if not os.path.exists(os.path.join(self.directory,'best_pred.txt')):\n with open(os.path.join(self.directory,'best_pred.txt'),'w') as f:\n f.write(str(best_pred))\n shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))\n else:\n with open(os.path.join(self.directory,'best_pred.txt'),'r') as f:\n max_iou = float(f.readline())\n if best_pred > max_iou:\n with open(os.path.join(self.directory,'best_pred.txt'),'w') as f:\n f.write(str(best_pred))\n shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))",
"def get_best_model_file_save_path(self):\n \n if self.best_model_file_saved_at_least_once:\n \n return self.absolute_model_file_save_path\n \n # create the base name path if not exists\n \n absolute_dirname = os.path.dirname(self.absolute_model_file_save_path)\n\n if not os.path.exists(absolute_dirname):\n\n os.makedirs(absolute_dirname)\n \n # update the model with respective path\n \n self.sql_model_instance.model_path = self.relative_model_file_save_path\n \n self.db.session.add(self.sql_model_instance)\n self.db.session.commit()\n \n # change the variable state to True\n \n self.best_model_file_saved_at_least_once = True\n \n return self.absolute_model_file_save_path",
"def force_suffix(fname, suffix):\r\n head, tail = pp.split(fname)\r\n if len(tail) == 0:\r\n return head\r\n if suffix[0] == \".\":\r\n suffix = suffix[1:]\r\n fpart, fext = pp.splitext(tail)\r\n newp = pp.join(head, fpart + \".\" + suffix)\r\n return pp.normpath(newp)",
"def get_save_name():\n if ARGV.get(FILE_OPT):\n return ARGV.get(FILE_OPT)\n return FILE_DEFAULT",
"def get_file_full_path(self):\n return self.save_dir + os.sep + self.save_file_without_ext + self.save_file_ext",
"def saving(file_path, save=True, file_type='png'):\n\n index_saving = 1\n while os.path.exists(file_path + \"_{}.{}\".format(index_saving, file_type)):\n index_saving += 1\n\n if save == True:\n plt.savefig(file_path + \"_{}.{}\".format(index_saving, file_type))\n else:\n return file_path + \"_{}.{}\".format(index_saving, file_type)",
"def save_last():\n if g.last_opened:\n open_save_view(\"save\", g.last_opened)\n\n else:\n saveas = \"\"\n\n # save using artist name in postion 1\n if not g.model.is_empty:\n saveas = g.model.songs[0].title[:18].strip()\n saveas = re.sub(r\"[^-\\w]\", \"-\", saveas, re.UNICODE)\n\n # loop to find next available name\n post = 0\n\n while g.userpl.get(saveas):\n post += 1\n saveas = g.model.songs[0].title[:18].strip() + \"-\" + str(post)\n\n open_save_view(\"save\", saveas)",
"def save_to(self, save_path=\"./\", run_flag='', save_method=\"pickle\"):\n # TODO: Finish the save_method parameters\n time_stamp = self.time_stamp\n time_stamp = self.time_stamp + \"_\" + run_flag\n save_path = os.path.join(save_path, time_stamp)\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n if self.feature_importance_pool:\n file_path = os.path.join(save_path, \"feature_importances.pkl\")\n save_file(file_path, self.feature_importance_pool)\n\n if self.feature_importance_hist:\n file_path = os.path.join(save_path, \"feature_importances_hist.png\")\n save_file(file_path, self.feature_importance_hist[0])\n\n if self.area_under_curve_pool:\n file_path = os.path.join(save_path, \"auc_fpr_tpr.pkl\")\n save_file(file_path, self.area_under_curve_pool)\n\n if self.receiver_operating_characteristic_curve:\n file_path = os.path.join(save_path, \"roc_curve.png\")\n save_file(file_path, self.receiver_operating_characteristic_curve[0])\n\n if self.training_report_pool:\n file_path = os.path.join(save_path, \"training_report.pkl\")\n save_file(file_path, self.training_report_pool)\n\n if self.learning_line:\n file_path = os.path.join(save_path, \"learning_curve.png\")\n save_file(file_path, self.learning_line[0])\n\n file_path = os.path.join(save_path, time_stamp + \"_object.pkl\")\n with open(file_path, 'wb') as opfh:\n pickle.dump(self, opfh)",
"def _save_model(self, metric=None, suffix=\"\", do_symlink=False):\n # Construct file name\n fname = self.exp_id\n if metric:\n print(\"Saving best model based on {}\".format(metric.name), flush=True)\n fname += \"-val{:03d}.best.{}_{:.3f}\".format(\n self.vctr, metric.name, metric.score\n )\n if suffix:\n fname += \"-{}\".format(suffix)\n fname = os.path.join(self.save_path, (fname + \".ckpt\"))\n\n # Save the file\n model_dict = {\n \"model\": self.model.state_dict(),\n \"history\": self.state_dict(),\n }\n\n # Add optimizer states\n if self.optimizer is not None:\n model_dict[\"optimizer\"] = self.optimizer.state_dict()\n\n # Add opti scheduler\n if self.opti_scheduler is not None:\n model_dict[\"optimizer_scheduler\"] = self.opti_scheduler.state_dict()\n\n torch.save(model_dict, fname)\n\n # Also create a symbolic link to the above checkpoint for the metric\n if metric and do_symlink:\n symlink = \"{}.best.{}.ckpt\".format(self.exp_id, metric.name)\n symlink = Path(self.save_path) / Path(symlink)\n if symlink.exists():\n old_ckpt = symlink.resolve()\n symlink.unlink()\n old_ckpt.unlink()\n if symlink.is_symlink():\n symlink.unlink()\n symlink.symlink_to(fname)\n\n return fname",
"def add_filename_suffix(filepath, suffix):\r\n root, extension = splitext(basename(filepath))\r\n return root + suffix + extension",
"def filename_path_join(path, filename):\n\n # Raise an error if filename is None\n if filename is None:\n raise ValueError(\"Cannot create a path to a filename set as None\")\n\n # Return an absolute path unchanged\n elif os.path.isabs(filename):\n return filename\n\n # If path is set, join filename to it and return that\n elif path is not None:\n return InferelatorDataLoader.make_path_safe(os.path.join(path, filename))\n\n # If path is not set, convert the filename to absolute and return it\n else:\n return InferelatorDataLoader.make_path_safe(filename)",
"def get_savename(outdir, plot_name, extension):\n save_name = '.'.join([plot_name, extension])\n save_name = '/'.join([outdir, save_name])\n return save_name",
"def get_suffix_ml_model():\n suffix = ''\n \n # consider if the model uses tail or not\n if gml.USE_TAIL: \n suffix += '_use_tail'\n else: \n suffix += '_no_tail'\n\n # consider the way of picking target variable for the model\n if gml.WAY_MODEL_TGT == 'absolute':\n suffix += '_absolute'\n elif gml.WAY_MODEL_TGT == 'relative':\n suffix += '_relative'\n else: \n exit('error on the function that gets suffix')\n\n return suffix",
"def save_weights(self, save_dir: Path, is_best: bool = False) -> None:\n if is_best:\n save_path = save_dir.expanduser() / self.BEST_SAVE_NAME\n else:\n save_path = save_dir.expanduser() / self.SAVE_NAME\n torch.save(self.model.state_dict(), save_path)",
"def save_best_params(output_dir, best_params, gene, model_options, predictor='classify'):\n\n if not isinstance(model_options.training_data, str):\n training_data = '.'.join(model_options.training_data)\n else:\n training_data = model_options.training_data\n\n output_file = construct_filename(output_dir,\n 'params',\n '.pkl',\n gene,\n training_data,\n model_options.model,\n predictor,\n s=model_options.seed)\n\n with open(output_file, 'wb') as f:\n pkl.dump(best_params, f)",
"def save_file(file_name, suffix, content):\n full_path = os.path.abspath(file_name)\n filename, file_extension = os.path.splitext(full_path)\n save_path = '_'.join([filename, suffix]) + file_extension\n with open(save_path, 'w') as f:\n f.write(content)\n return save_path",
"def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")",
"def build_base_filename(self):\n if self.stream:\n self.stream.close()\n self.stream = None\n\n # remove old suffix\n # if self.suffix_time != \"\":\n # index = self.baseFilename.find(\".\" + self.suffix_time)\n # if index == -1:\n # index = self.baseFilename.rfind(\".\")\n # self.baseFilename = self.baseFilename[:index]\n\n # add new suffix\n current_time_tuple = time.localtime()\n self.suffix_time = time.strftime(self.suffix, current_time_tuple)\n self.baseFilename = self._get_format_filename()\n\n self.mode = 'a'\n if not self.delay:\n self.stream = self._open()",
"def safety_save(path, limit=100):\n k = 1\n while os.path.isfile(path) and (k < limit):\n fname, fext = os.path.splitext(path)\n if fname.find('(') + 1:\n path = fname[0:fname.find('(') + 1] + str(k) + ')' + fext\n else:\n path = fname + '(' + str(k) + ')' + fext\n k += 1\n return path",
"def save_model(self, is_best, state, epoch):\n path = os.path.join(self.logpath_models, 'model-%d.pth.tar' % epoch)\n torch.save(state, path)\n if is_best:\n shutil.copyfile(path, path + 'model_best.pth.tar')",
"def build_base_filename(self):\n if self.stream:\n self.stream.close()\n self.stream = None\n\n # remove old suffix\n if self.suffix_time != \"\":\n index = self.baseFilename.find(\".\" + self.suffix_time)\n if index == -1:\n index = self.baseFilename.rfind(\".\")\n self.baseFilename = self.baseFilename[:index]\n\n # add new suffix\n current_time_tuple = time.localtime()\n self.suffix_time = time.strftime(self.suffix, current_time_tuple)\n self.baseFilename = self.baseFilename + \".\" + self.suffix_time\n self.mode = 'a'\n\n # create soft links\n index = self.baseFilename.rfind(\".\")\n os.unlink(self.baseFilename[:index])\n os.symlink(self.baseFilename, self.baseFilename[:index])\n\n if not self.delay:\n self.stream = self._open()",
"def add_suffix_to_filename(filename, suffix):\n name, ext = os.path.splitext(filename)\n return ''.join([name, suffix, ext])"
]
| [
"0.63478696",
"0.6004457",
"0.5805976",
"0.56676304",
"0.56426597",
"0.559139",
"0.55741334",
"0.5572959",
"0.5547498",
"0.55432194",
"0.55162865",
"0.54719764",
"0.5463675",
"0.5340421",
"0.53071415",
"0.527985",
"0.5276037",
"0.5257298",
"0.521184",
"0.51786846",
"0.5165969",
"0.51582515",
"0.51245177",
"0.51245",
"0.5108931",
"0.510483",
"0.50843835",
"0.5064668",
"0.5056341",
"0.5050865"
]
| 0.6397553 | 0 |
[indique si la fleur est en fruit] | def est_fruit(self):
if self.age > 20 and self.age <31 and (self.fecondee==True):
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def one_v_one(description):\n return \"forebrain\" in description",
"def is_berry(fruit):\n if fruit == 'strawberry' or fruit == 'cherry' or fruit == 'blackberry':\n is_berry = True\n else:\n is_berry = False\n return is_berry",
"def checkFood(self, food):\n pass",
"def fruit_nb(x):\r\n return len([y for y in metamer(x) if Feature(y, 'fruit')])",
"def violent_sexual_apply(x): \n v_lst = ['Assault w/ Intent to Rape',\n 'Assault to Ravish', # make sure this is spelled correctly \n 'Rape']\n for elem in x:\n if elem in v_lst:\n return 1\n return 0",
"def inscricao(self):\n\n return True",
"def wife(backpack):\n print(\"\\nYour wife says: \")\n if \"corn\" in backpack:\n if backpack['corn'][0] < 20:\n print(\"-You need to gather 20 corn cob so get back to work! \")\n enter()\n else:\n print(\"-Ahh you are a bastard but I know your dream...\\nNow go to city and buy your ticket my love :* \")\n enter()\n return True # because of this we can change lvl\n if \"corn\" not in backpack:\n print(\"-Where have u been u f...... drunkard, \\nget back to work and collect 20 corn cobs! \")\n enter()",
"def fruit_ram(x):\r\n return sum([fruit_nb(y) for y in Sons(x)])",
"def burglary(x): \n for elem in x:\n if elem == 'Burglary':\n return 1\n return 0",
"def is_berry(fruit_name):\n\n\tberries = [\"strawberry\", \"cherry\", \"blackberry\"]\n\n\tif fruit_name in berries:\n\t\treturn True\n\telse:\n\t\treturn False",
"def singPlur(repFausses):\r\n if repFausses <= 1:\r\n return \"faute\"\r\n else:\r\n return \"fautes\"",
"def addfruit(self, pos, val, dead_snake=-1):\n if all(not s.onSnake(pos) for a, s in self.snakes.items() if a != dead_snake) \\\n and not pos in list(self.fruits.keys()):\n self.fruits[pos] = val\n return True\n return False",
"def limpiar_type(palabra):\n if palabra == 'Boat' or palabra == 'Boatomg':\n palabra = 'Boating'\n return palabra\n \n return palabra",
"def findFood(self,name):\n\t\tname = name.lower()\n\t\treturn dictfood.has_key(name)",
"def cheer(self, songs):\n if self.favourite_song in songs:\n return \"Whoo!\"",
"def especies_tiburon(palabra):\n \n palabra = str(palabra)\n palabra = palabra.strip()\n respuesta = palabra.lower()\n especies_dict = {'white' :'White Shark', 'hammerhead': 'Hammerhead Shark', 'tiger' : 'Tiger Shark', 'nurse' : 'Grey Nurse Shark', 'invalid' : 'Uknowm', 'copper' : 'Cooper Shark', 'cooper' : 'Cooper Shark', 'lemon' : 'Lemon Shark', 'sand' : 'Sand Shark', 'bull' : 'Bull Shark', 'zambezi' : 'Zambezi Shark', 'tawney' : 'Tawney Shark', 'blue' : 'Blue Pointer', 'bronze' : 'Bronze Whaler Shark', 'mako' : 'Mako Shark', 'wobbegong' : 'Wobbegong Shark', 'blacktip' : 'Blatip Shark', 'spinner' : 'Spinner Shark', 'brown' : 'Brown Shark', 'basking' : 'Basking Shark', 'goblin' : 'Goblin Shark'}\n\n for key, value in especies_dict.items():\n if key in respuesta:\n respuesta = value\n return respuesta\n \n return ' Regular Shark'",
"def favorite_animal(users_animal):\n return f'Wow, {users_animal} is my favorite animal, too!'",
"def echo(self, foetus):\n Allele_semblable = 0\n for Allele in range(3):\n if self.allele[Allele] in foetus.allele and self.allele[Allele] != 0.0:\n Allele_semblable = Allele\n if Allele_semblable == 0:\n Allele_Echo = self.allele[Allele_semblable + 1]\n for Alleles_foetus in range(3):\n if foetus.allele[Alleles_foetus] - 1 == Allele_Echo:\n foetus.informatif = 3\n elif Allele_semblable == 1:\n Allele_Echo = self.allele[Allele_semblable - 1]\n for Alleles_foetus in range(3):\n if foetus.allele[Alleles_foetus] - 1 == Allele_Echo:\n foetus.informatif = 3",
"def test_Fuselage_full():\n fus = Fuselage()\n assert('OML' in fus)",
"def uncategorized_apply(x): \n uncategorized_lst = ['Rescue of Prisoner',\n 'Conspiracy',\n 'Felony']\n for elem in x:\n if elem in uncategorized_lst:\n return 1\n return 0",
"def _(animal):\n print(\"Searching the garden's animals\")\n return animal in _animals",
"def possessif(nom):\n\n CA = nom[1]\n\n\n rand = randint(0,5)\n\n if CA == \"-1\" or CA == \"-3\" or CA == \"-5\" or CA == \"-7\" or CA == \"-8\" or CA == \"-4\" or Premiere_lettre_voyelle(nom[0]):\n if rand == 0:\n return \"mon \"\n elif rand == 1:\n return \"ton \"\n elif rand == 2:\n return \"son \"\n elif rand == 3:\n return \"notre \"\n elif rand == 4:\n return \"votre \"\n elif rand == 5:\n return \"leur \"\n \n elif (CA == \"-2\" or CA == \"-6\" or CA == \"-9\"):\n if rand == 0:\n return \"ma \"\n elif rand == 1:\n return \"ta \"\n elif rand == 2:\n return \"sa \"\n elif rand == 3:\n return \"notre \"\n elif rand == 4:\n return \"votre \"\n elif rand == 5:\n return \"leur \"\n else:\n return False",
"def affiche_favoris():\r\n # Liste des favoris utilisés pour la fonction \"select_favorite\"\r\n favorite_dict = {}\r\n # pour les produits dans Count\r\n cursor.execute('USE openfoodfacts;')\r\n cursor.execute(\"\"\"SELECT F1.name as Product, F2.name as Substitute \\\r\n FROM Backup \\\r\n INNER JOIN Food F1 ON Backup.produit_id = F1.id \r\n INNER JOIN Food F2 ON Backup.substitut_id = F2.id\"\"\")\r\n favorite = cursor.fetchall()\r\n index = 1\r\n for i in favorite:\r\n favorite_tuple = (i[0], i[1])\r\n print(\"\\n {}. {}, Peut être remplacé par {}.\".format(index, \\\r\n favorite_tuple[0], favorite_tuple[1]))\r\n favorite_dict[index] = favorite_tuple\r\n index += 1\r\n\r\n if not favorite_dict:\r\n print (\"La liste des favoris est vide.\")\r\n else:\r\n print('Choisissez un chiffre pour plus de détail.')\r\n select_favorite(favorite_dict)",
"def telofase(self):\n\n\n var = self.varianza()\n if var >= 6.5:\n self.guardar_imagen('telofase')\n return True\n return False",
"def on_fruit(self):\r\n if self.grid_pos in self.app.fruit:\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0):\r\n return True\r\n # in the x-direction \r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1):\r\n return True\r\n # in the y-direction\r\n\r\n return False",
"def fruit_ms(x):\r\n return sum([Feature(y, 'fruit') for y in metamer(x) if Feature(y, 'fruit')])",
"def fechou(self):\n return self.tot_rodada == self.rodada",
"def mot_possible(mot:str,lettres:str)->bool:\r\n retour = False\r\n L=[]\r\n for i in lettres:\r\n if presente(i,mot)!=-1 :\r\n L.append(i)\r\n if len(L)>=len(mot):\r\n retour = True\r\n\r\n return(retour)",
"def appears(self):",
"def has_siete_de_velo(self):\n for card in self._cards[\"oro\"]:\n if card.value == 7:\n return True\n\n return False"
]
| [
"0.6146996",
"0.58440363",
"0.5632827",
"0.5613998",
"0.5475524",
"0.54726166",
"0.5427652",
"0.5422366",
"0.5414871",
"0.5388045",
"0.5361428",
"0.5353682",
"0.5340952",
"0.53375876",
"0.5328255",
"0.5300246",
"0.5295049",
"0.52606523",
"0.5251567",
"0.5237527",
"0.5227937",
"0.52147126",
"0.5180616",
"0.5141335",
"0.5126326",
"0.51237696",
"0.51224154",
"0.5118411",
"0.510982",
"0.509604"
]
| 0.6689607 | 0 |
Tests whether ``MessageApplication.__new__`` works as intended. | def test__MessageApplication__new__0():
message_application = MessageApplication()
_assert_fields_set(message_application) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test__MessageApplication__new__1():\n cover = Icon(ICON_TYPE_STATIC, 12)\n icon = Icon(ICON_TYPE_STATIC, 23)\n description = 'Afraid'\n name = 'Chata'\n \n message_application = MessageApplication(\n cover = cover,\n description = description,\n icon = icon,\n name = name,\n )\n \n _assert_fields_set(message_application)\n \n vampytest.assert_eq(message_application.cover, cover)\n vampytest.assert_eq(message_application.description, description)\n vampytest.assert_eq(message_application.icon, icon)\n vampytest.assert_eq(message_application.name, name)",
"def test__MessageApplication__create_empty():\n message_application_id = 202304140000\n \n message_application = MessageApplication._create_empty(message_application_id)\n \n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)",
"def test__MessageApplication__precreate__1():\n message_application_id = 202304140002\n \n cover = Icon(ICON_TYPE_STATIC, 12)\n icon = Icon(ICON_TYPE_STATIC, 23)\n description = 'Afraid'\n name = 'Chata'\n \n message_application = MessageApplication.precreate(\n message_application_id,\n cover = cover,\n description = description,\n icon = icon,\n name = name,\n )\n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)\n \n vampytest.assert_eq(message_application.cover, cover)\n vampytest.assert_eq(message_application.description, description)\n vampytest.assert_eq(message_application.icon, icon)\n vampytest.assert_eq(message_application.name, name)",
"def test_11_a_create_application_errors(self, mock):\r\n with self.flask_app.app_context():\r\n self.register()\r\n # Required fields checks\r\n # Issue the error for the app.name\r\n res = self.new_application(name=\"\")\r\n err_msg = \"An application must have a name\"\r\n assert \"This field is required\" in res.data, err_msg\r\n\r\n # Issue the error for the app.short_name\r\n res = self.new_application(short_name=\"\")\r\n err_msg = \"An application must have a short_name\"\r\n assert \"This field is required\" in res.data, err_msg\r\n\r\n # Issue the error for the app.description\r\n res = self.new_application(long_description=\"\")\r\n err_msg = \"An application must have a description\"\r\n assert \"This field is required\" in res.data, err_msg\r\n\r\n # Issue the error for the app.short_name\r\n res = self.new_application(short_name='$#/|')\r\n err_msg = \"An application must have a short_name without |/$# chars\"\r\n assert '$#&\\/| and space symbols are forbidden' in res.data, err_msg\r\n\r\n # Now Unique checks\r\n self.new_application()\r\n res = self.new_application()\r\n err_msg = \"There should be a Unique field\"\r\n assert \"Name is already taken\" in res.data, err_msg\r\n assert \"Short Name is already taken\" in res.data, err_msg",
"def test_11_create_application(self, mock):\r\n # Create an app as an anonymous user\r\n with self.flask_app.app_context():\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page\" in res.data, res\r\n\r\n res = self.new_application()\r\n assert self.html_title(\"Sign in\") in res.data, res.data\r\n assert \"Please sign in to access this page.\" in res.data, res.data\r\n\r\n # Sign in and create an application\r\n res = self.register()\r\n\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Create an Application\") in res.data, res\r\n assert \"Create the application\" in res.data, res\r\n\r\n res = self.new_application(long_description='My Description')\r\n assert \"<strong>Sample App</strong>: Update the application\" in res.data\r\n assert \"Application created!\" in res.data, res\r\n\r\n app = db.session.query(App).first()\r\n assert app.name == 'Sample App', 'Different names %s' % app.name\r\n assert app.short_name == 'sampleapp', \\\r\n 'Different names %s' % app.short_name\r\n\r\n assert app.long_description == 'My Description', \\\r\n \"Long desc should be the same: %s\" % app.long_description",
"def test__MessageApplication__precreate__0():\n message_application_id = 202304140001\n \n message_application = MessageApplication.precreate(message_application_id)\n \n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)",
"def test_create_app():\n assert not create_app().testing\n assert create_app({'TESTING': True}).testing",
"def test_application_is_not_subclassed():\n\n with pytest.raises(ApplicationIsNotSubclassedError):\n app = Application()",
"def test_application_instance_already_set():\n\n with pytest.raises(ApplicationInstanceAlreadySetError):\n app = ApplicationMock()",
"def create_app(self):\n raise NotImplementedError",
"def test_application_is_singleton():\n\n app = PyrinUnitTestApplication()\n assert app == application_services.get_current_app()",
"def test_get_application(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first application', rpc._get_application('appli_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_application('app')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: application app unknown in Supvisors',\n exc.exception.text)",
"def testInstance(self):\n self.assertTrue(isinstance(self, AppswellUnitTest))",
"def test_app_is_created(app):\n assert app.name == \"myapp.app\"",
"def __new__(cls, debug=False):\n if not App.instance:\n App.instance = App.__OnlyApp(debug)\n return App.instance",
"def initialize(self, application):",
"def test_bob_new(self):\n messages = list(self.bob_inbox.new)\n self.assertEqual(3, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def test_app_exists(self):\n self.assertFalse(current_app is None)",
"def get_application_instance(self, app_name):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def test_alice_new(self):\n messages = list(self.alice_inbox.new)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def application():\n yield create_test_application()",
"def test_app_construction(s):\n empty_app = s['empty-app']\n simple_app = s['simple-app']\n derived_app = s['derived-app']\n\n assert hasattr(empty_app, \"definitions\")\n assert empty_app.definitions == {}\n assert 'appDef' in simple_app.definitions\n assert 'appDef' in derived_app.definitions\n assert 'derivedDef' in derived_app.definitions\n assert 'derivedDef' not in simple_app.definitions",
"def _create_app_instance(script_info):\n return create_app()",
"def create():\n\n return App()",
"def app():\n if __QAPPLICATION_SINGLETON is None:\n raise cuegui.Exception.ApplicationNotRunningException()\n return __QAPPLICATION_SINGLETON",
"def create_application(self, application_name): \n self.__obj = GenericModel({ x:None for x in self.swagger_map.values()}, self.swagger_types, self.swagger_map)\n self.application_name = application_name",
"def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)",
"def test_01_create(self, namespace=None, name=None, contact_user=None):\n self.direct_login_user_1()\n\n data = {}\n if namespace:\n data['namespace'] = namespace\n\n if name:\n data['name'] = name\n\n if contact_user:\n data['contact_user'] = contact_user\n\n new_application = self.create_application(**data)\n\n self._objects_to_delete.append(new_application)\n self.my_context_dict[\"new_application\"] = new_application\n\n app_found = False\n for app in self.api_handler.User.me().applications():\n if app.id == new_application.id:\n app_found = True\n break\n self.assertTrue(app_found)\n\n # merchants\n new_application.merchants()\n\n return new_application",
"def test_get_message_new(self):\n message = mommy.make(\n 'connectmessages.Message', thread=self.thread, sender=self.sender)\n thread = Thread.public.by_user(user=self.user)[0]\n messages = thread.messages_for_user(self.user)\n self.assertEqual(messages[0], message)\n self.assertFalse(messages[0].read)",
"def test_new(self):\n self.assertEqual(len(p.new('foo')), 168)"
]
| [
"0.7948441",
"0.6673767",
"0.6402658",
"0.62554973",
"0.6174546",
"0.6069933",
"0.58999586",
"0.587314",
"0.58070815",
"0.57901084",
"0.5787674",
"0.56917095",
"0.56328523",
"0.5568717",
"0.55345166",
"0.55326825",
"0.54664826",
"0.54464966",
"0.5405169",
"0.54016685",
"0.5397699",
"0.5389949",
"0.5353584",
"0.534713",
"0.5346892",
"0.53468066",
"0.5345931",
"0.53401697",
"0.5331706",
"0.52962714"
]
| 0.7738738 | 1 |
Tests whether ``MessageApplication.__new__`` works as intended. | def test__MessageApplication__new__1():
cover = Icon(ICON_TYPE_STATIC, 12)
icon = Icon(ICON_TYPE_STATIC, 23)
description = 'Afraid'
name = 'Chata'
message_application = MessageApplication(
cover = cover,
description = description,
icon = icon,
name = name,
)
_assert_fields_set(message_application)
vampytest.assert_eq(message_application.cover, cover)
vampytest.assert_eq(message_application.description, description)
vampytest.assert_eq(message_application.icon, icon)
vampytest.assert_eq(message_application.name, name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test__MessageApplication__new__0():\n message_application = MessageApplication()\n _assert_fields_set(message_application)",
"def test__MessageApplication__create_empty():\n message_application_id = 202304140000\n \n message_application = MessageApplication._create_empty(message_application_id)\n \n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)",
"def test__MessageApplication__precreate__1():\n message_application_id = 202304140002\n \n cover = Icon(ICON_TYPE_STATIC, 12)\n icon = Icon(ICON_TYPE_STATIC, 23)\n description = 'Afraid'\n name = 'Chata'\n \n message_application = MessageApplication.precreate(\n message_application_id,\n cover = cover,\n description = description,\n icon = icon,\n name = name,\n )\n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)\n \n vampytest.assert_eq(message_application.cover, cover)\n vampytest.assert_eq(message_application.description, description)\n vampytest.assert_eq(message_application.icon, icon)\n vampytest.assert_eq(message_application.name, name)",
"def test_11_a_create_application_errors(self, mock):\r\n with self.flask_app.app_context():\r\n self.register()\r\n # Required fields checks\r\n # Issue the error for the app.name\r\n res = self.new_application(name=\"\")\r\n err_msg = \"An application must have a name\"\r\n assert \"This field is required\" in res.data, err_msg\r\n\r\n # Issue the error for the app.short_name\r\n res = self.new_application(short_name=\"\")\r\n err_msg = \"An application must have a short_name\"\r\n assert \"This field is required\" in res.data, err_msg\r\n\r\n # Issue the error for the app.description\r\n res = self.new_application(long_description=\"\")\r\n err_msg = \"An application must have a description\"\r\n assert \"This field is required\" in res.data, err_msg\r\n\r\n # Issue the error for the app.short_name\r\n res = self.new_application(short_name='$#/|')\r\n err_msg = \"An application must have a short_name without |/$# chars\"\r\n assert '$#&\\/| and space symbols are forbidden' in res.data, err_msg\r\n\r\n # Now Unique checks\r\n self.new_application()\r\n res = self.new_application()\r\n err_msg = \"There should be a Unique field\"\r\n assert \"Name is already taken\" in res.data, err_msg\r\n assert \"Short Name is already taken\" in res.data, err_msg",
"def test_11_create_application(self, mock):\r\n # Create an app as an anonymous user\r\n with self.flask_app.app_context():\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page\" in res.data, res\r\n\r\n res = self.new_application()\r\n assert self.html_title(\"Sign in\") in res.data, res.data\r\n assert \"Please sign in to access this page.\" in res.data, res.data\r\n\r\n # Sign in and create an application\r\n res = self.register()\r\n\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Create an Application\") in res.data, res\r\n assert \"Create the application\" in res.data, res\r\n\r\n res = self.new_application(long_description='My Description')\r\n assert \"<strong>Sample App</strong>: Update the application\" in res.data\r\n assert \"Application created!\" in res.data, res\r\n\r\n app = db.session.query(App).first()\r\n assert app.name == 'Sample App', 'Different names %s' % app.name\r\n assert app.short_name == 'sampleapp', \\\r\n 'Different names %s' % app.short_name\r\n\r\n assert app.long_description == 'My Description', \\\r\n \"Long desc should be the same: %s\" % app.long_description",
"def test__MessageApplication__precreate__0():\n message_application_id = 202304140001\n \n message_application = MessageApplication.precreate(message_application_id)\n \n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)",
"def test_create_app():\n assert not create_app().testing\n assert create_app({'TESTING': True}).testing",
"def test_application_is_not_subclassed():\n\n with pytest.raises(ApplicationIsNotSubclassedError):\n app = Application()",
"def test_application_instance_already_set():\n\n with pytest.raises(ApplicationInstanceAlreadySetError):\n app = ApplicationMock()",
"def create_app(self):\n raise NotImplementedError",
"def test_application_is_singleton():\n\n app = PyrinUnitTestApplication()\n assert app == application_services.get_current_app()",
"def test_get_application(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first application', rpc._get_application('appli_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_application('app')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: application app unknown in Supvisors',\n exc.exception.text)",
"def testInstance(self):\n self.assertTrue(isinstance(self, AppswellUnitTest))",
"def test_app_is_created(app):\n assert app.name == \"myapp.app\"",
"def __new__(cls, debug=False):\n if not App.instance:\n App.instance = App.__OnlyApp(debug)\n return App.instance",
"def initialize(self, application):",
"def test_bob_new(self):\n messages = list(self.bob_inbox.new)\n self.assertEqual(3, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def test_app_exists(self):\n self.assertFalse(current_app is None)",
"def get_application_instance(self, app_name):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def test_alice_new(self):\n messages = list(self.alice_inbox.new)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def application():\n yield create_test_application()",
"def test_app_construction(s):\n empty_app = s['empty-app']\n simple_app = s['simple-app']\n derived_app = s['derived-app']\n\n assert hasattr(empty_app, \"definitions\")\n assert empty_app.definitions == {}\n assert 'appDef' in simple_app.definitions\n assert 'appDef' in derived_app.definitions\n assert 'derivedDef' in derived_app.definitions\n assert 'derivedDef' not in simple_app.definitions",
"def _create_app_instance(script_info):\n return create_app()",
"def create():\n\n return App()",
"def app():\n if __QAPPLICATION_SINGLETON is None:\n raise cuegui.Exception.ApplicationNotRunningException()\n return __QAPPLICATION_SINGLETON",
"def create_application(self, application_name): \n self.__obj = GenericModel({ x:None for x in self.swagger_map.values()}, self.swagger_types, self.swagger_map)\n self.application_name = application_name",
"def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)",
"def test_01_create(self, namespace=None, name=None, contact_user=None):\n self.direct_login_user_1()\n\n data = {}\n if namespace:\n data['namespace'] = namespace\n\n if name:\n data['name'] = name\n\n if contact_user:\n data['contact_user'] = contact_user\n\n new_application = self.create_application(**data)\n\n self._objects_to_delete.append(new_application)\n self.my_context_dict[\"new_application\"] = new_application\n\n app_found = False\n for app in self.api_handler.User.me().applications():\n if app.id == new_application.id:\n app_found = True\n break\n self.assertTrue(app_found)\n\n # merchants\n new_application.merchants()\n\n return new_application",
"def test_get_message_new(self):\n message = mommy.make(\n 'connectmessages.Message', thread=self.thread, sender=self.sender)\n thread = Thread.public.by_user(user=self.user)[0]\n messages = thread.messages_for_user(self.user)\n self.assertEqual(messages[0], message)\n self.assertFalse(messages[0].read)",
"def test_new(self):\n self.assertEqual(len(p.new('foo')), 168)"
]
| [
"0.7738738",
"0.6673767",
"0.6402658",
"0.62554973",
"0.6174546",
"0.6069933",
"0.58999586",
"0.587314",
"0.58070815",
"0.57901084",
"0.5787674",
"0.56917095",
"0.56328523",
"0.5568717",
"0.55345166",
"0.55326825",
"0.54664826",
"0.54464966",
"0.5405169",
"0.54016685",
"0.5397699",
"0.5389949",
"0.5353584",
"0.534713",
"0.5346892",
"0.53468066",
"0.5345931",
"0.53401697",
"0.5331706",
"0.52962714"
]
| 0.7948441 | 0 |
Tests whether ``MessageApplication._create_empty`` works as intended. | def test__MessageApplication__create_empty():
message_application_id = 202304140000
message_application = MessageApplication._create_empty(message_application_id)
_assert_fields_set(message_application)
vampytest.assert_eq(message_application.id, message_application_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test__MessageApplication__new__0():\n message_application = MessageApplication()\n _assert_fields_set(message_application)",
"def test__MessageApplication__new__1():\n cover = Icon(ICON_TYPE_STATIC, 12)\n icon = Icon(ICON_TYPE_STATIC, 23)\n description = 'Afraid'\n name = 'Chata'\n \n message_application = MessageApplication(\n cover = cover,\n description = description,\n icon = icon,\n name = name,\n )\n \n _assert_fields_set(message_application)\n \n vampytest.assert_eq(message_application.cover, cover)\n vampytest.assert_eq(message_application.description, description)\n vampytest.assert_eq(message_application.icon, icon)\n vampytest.assert_eq(message_application.name, name)",
"def test_initial_length_equals_zero(self):\r\n msg_list = messages.MessageList()\r\n self.assertEqual(msg_list.length(), 0)",
"def test__MessageApplication__precreate__1():\n message_application_id = 202304140002\n \n cover = Icon(ICON_TYPE_STATIC, 12)\n icon = Icon(ICON_TYPE_STATIC, 23)\n description = 'Afraid'\n name = 'Chata'\n \n message_application = MessageApplication.precreate(\n message_application_id,\n cover = cover,\n description = description,\n icon = icon,\n name = name,\n )\n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)\n \n vampytest.assert_eq(message_application.cover, cover)\n vampytest.assert_eq(message_application.description, description)\n vampytest.assert_eq(message_application.icon, icon)\n vampytest.assert_eq(message_application.name, name)",
"def is_empty(self):\n if len(self.messages) < 1:\n return True\n else:\n return False",
"def test__MessageApplication__precreate__0():\n message_application_id = 202304140001\n \n message_application = MessageApplication.precreate(message_application_id)\n \n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)",
"def test_empty_messages(self):\n self.failureResultOf(self.producer.send_messages(\"topic\"), ValueError)\n self.failureResultOf(self.producer.send_messages(\"topic\", msgs=[]), ValueError)",
"def test_empty(self):\n pass",
"def test_new_queue_is_empty(self):\n queue = Queue_()\n self.assertTrue(queue.empty())\n self.assertEqual(queue.size(), 0)",
"def test_create_app():\n assert not create_app().testing\n assert create_app({'TESTING': True}).testing",
"def test_11_a_create_application_errors(self, mock):\r\n with self.flask_app.app_context():\r\n self.register()\r\n # Required fields checks\r\n # Issue the error for the app.name\r\n res = self.new_application(name=\"\")\r\n err_msg = \"An application must have a name\"\r\n assert \"This field is required\" in res.data, err_msg\r\n\r\n # Issue the error for the app.short_name\r\n res = self.new_application(short_name=\"\")\r\n err_msg = \"An application must have a short_name\"\r\n assert \"This field is required\" in res.data, err_msg\r\n\r\n # Issue the error for the app.description\r\n res = self.new_application(long_description=\"\")\r\n err_msg = \"An application must have a description\"\r\n assert \"This field is required\" in res.data, err_msg\r\n\r\n # Issue the error for the app.short_name\r\n res = self.new_application(short_name='$#/|')\r\n err_msg = \"An application must have a short_name without |/$# chars\"\r\n assert '$#&\\/| and space symbols are forbidden' in res.data, err_msg\r\n\r\n # Now Unique checks\r\n self.new_application()\r\n res = self.new_application()\r\n err_msg = \"There should be a Unique field\"\r\n assert \"Name is already taken\" in res.data, err_msg\r\n assert \"Short Name is already taken\" in res.data, err_msg",
"def test_empty_phonebook_is_consistent(self):\n self.assertTrue(self.phonebook.is_consistent())",
"def test_post_empty_data(self):\n response = self.app.post('/_ah/push-handlers/receive_message')\n self.assertEqual(response.status_int, 200)\n self.assertEqual(response.body, \"No request body received\")\n self.assertRaises(ValueError)",
"def testQueueisEmpty(self):\n self.mgr.isGoproBusy = True\n self.mgr.processMsgQueue()\n self.assertFalse( self.mgr.isGoproBusy )",
"def test_conversation_with_zero_messages(self):\n response = self.client.post('/conversation/{}/message'.format(self.conversation.id), {\n \"sender_id\": self.user_a.id,\n \"receiver_id\": self.user_b.id,\n \"text\": \"test message\"\n }, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Message.objects.count(), 1)\n self.assertEqual(Message.objects.get().text, 'test message')",
"def test_app_exists(self):\n self.assertFalse(current_app is None)",
"def test_empty_message(config, valid_connection):\n sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD)\n with pytest.raises(ValueError):\n sms.send(YESSS_TO, \"\")\n with pytest.raises(sms.EmptyMessageError):\n sms.send(YESSS_TO, \"\")",
"def test__synchronize_empty(self):\n # Access to a protected member _synchronize of a client class\n # pylint: disable=W0212\n existing_apps = ['proid.app#0', 'proid.app#1', 'proid.app#2']\n glob.glob.return_value = (app for app in existing_apps)\n\n zkclient = kazoo.client.KazooClient()\n self.evmgr._synchronize(zkclient, [])\n\n os.unlink.assert_has_calls(\n [\n mock.call(os.path.join(self.cache, app))\n for app in existing_apps\n ],\n any_order=True\n )\n self.assertFalse(treadmill.eventmgr.EventMgr._cache.called)",
"def is_empty(self):\n raise NotImplimentedError",
"def i_am_empty():\n pass",
"def empty(cls) -> EnvelopeStructure:\n return _EmptyEnvelopeStructure()",
"def is_empty(self):\n return False",
"def is_empty(self):\n return False",
"def is_empty(self):\n return False",
"def is_empty(self):\n return False",
"def is_empty(self):\n return False",
"def is_empty(self):\n return False",
"def test_create_empty(self):\n z2_symmetries = Z2Symmetries(\n symmetries=[],\n sq_paulis=[],\n sq_list=[],\n )\n self.assertTrue(z2_symmetries.is_empty())",
"def test_none_message(self):\n d = self.producer.send_messages(\"topic\", key=b\"key\", msgs=[None])\n d.addErrback(lambda f: None) # Handle the cancellation failure from producer.stop().\n\n self.assertNoResult(d)",
"def test_11_create_application(self, mock):\r\n # Create an app as an anonymous user\r\n with self.flask_app.app_context():\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page\" in res.data, res\r\n\r\n res = self.new_application()\r\n assert self.html_title(\"Sign in\") in res.data, res.data\r\n assert \"Please sign in to access this page.\" in res.data, res.data\r\n\r\n # Sign in and create an application\r\n res = self.register()\r\n\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Create an Application\") in res.data, res\r\n assert \"Create the application\" in res.data, res\r\n\r\n res = self.new_application(long_description='My Description')\r\n assert \"<strong>Sample App</strong>: Update the application\" in res.data\r\n assert \"Application created!\" in res.data, res\r\n\r\n app = db.session.query(App).first()\r\n assert app.name == 'Sample App', 'Different names %s' % app.name\r\n assert app.short_name == 'sampleapp', \\\r\n 'Different names %s' % app.short_name\r\n\r\n assert app.long_description == 'My Description', \\\r\n \"Long desc should be the same: %s\" % app.long_description"
]
| [
"0.68074656",
"0.6550681",
"0.64747626",
"0.64632654",
"0.64058954",
"0.6405656",
"0.6332513",
"0.6200997",
"0.61500394",
"0.6069676",
"0.60523784",
"0.6006606",
"0.59653187",
"0.59143025",
"0.58991927",
"0.58978826",
"0.58634174",
"0.585233",
"0.5838295",
"0.58012694",
"0.5790304",
"0.575653",
"0.575653",
"0.575653",
"0.575653",
"0.575653",
"0.575653",
"0.57450163",
"0.5703623",
"0.57032937"
]
| 0.84340566 | 0 |
Tests whether ``MessageApplication.precreate`` works as intended. | def test__MessageApplication__precreate__0():
message_application_id = 202304140001
message_application = MessageApplication.precreate(message_application_id)
_assert_fields_set(message_application)
vampytest.assert_eq(message_application.id, message_application_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test__MessageApplication__precreate__1():\n message_application_id = 202304140002\n \n cover = Icon(ICON_TYPE_STATIC, 12)\n icon = Icon(ICON_TYPE_STATIC, 23)\n description = 'Afraid'\n name = 'Chata'\n \n message_application = MessageApplication.precreate(\n message_application_id,\n cover = cover,\n description = description,\n icon = icon,\n name = name,\n )\n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)\n \n vampytest.assert_eq(message_application.cover, cover)\n vampytest.assert_eq(message_application.description, description)\n vampytest.assert_eq(message_application.icon, icon)\n vampytest.assert_eq(message_application.name, name)",
"def PreCreate(self, pre):\n pass",
"def PreCreate(self, pre):\n pass",
"def PreCreate(self, pre):\n pass",
"def PreCreate(self, pre):\n pass",
"def PreCreate(self, pre):\n pass",
"def PreCreate(self, pre):\r\n pass",
"def PreCreate(self, pre):\r\n pass",
"def PreCreate(self, pre):\r\n \r\n pass",
"def test__MessageApplication__create_empty():\n message_application_id = 202304140000\n \n message_application = MessageApplication._create_empty(message_application_id)\n \n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)",
"def test_create_app():\n assert not create_app().testing\n assert create_app({'TESTING': True}).testing",
"def run_precondition(self) -> bool:\n return self._does_apply",
"def procPreRun(proc):\n\tif 'b' in proc.config._notify.when['pipeline']:\n\t\tlogger.debug('Notifying process begins')\n\t\tEMAIL.send('proc', proc, 'begin')",
"def is_smart_guard_notify_before_creation(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsSmartGuardNotifyBeforeCreation', self.handle))",
"def test__MessageApplication__new__0():\n message_application = MessageApplication()\n _assert_fields_set(message_application)",
"def _should_initialize_check_run(self, payload):\n action = payload.get('action')\n return action in self.initialize_actions or self.initialize_actions is None",
"def test_stage_pre_boot(self, mock_stage_pre_boot):\n app = MDFakeFSTestSite(\n \"MDWeb\",\n app_options={}\n )\n app.start()\n\n self.assertTrue(mock_stage_pre_boot.called)",
"def PreExecute(self):\n return True",
"def pre_service_appliance_create(self, resource_dict):\n pass",
"def test_create_app():\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.create_application_state(\"test_app\", ray.ObjectRef.nil())\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING",
"def _create(self, parsed_args):\n if self.create:\n try:\n resp = self.tapis_client.apps.add(body=self.document)\n self.messages.append(\n ('create', 'Created Tapis app {} revision {}'.format(\n resp.get('id'), resp.get('revision'))))\n return True\n except Exception as exc:\n if self.ignore_errors:\n self.messages.append(('create', exc))\n return False\n else:\n raise\n\n return True",
"def has_before(self):\n\n try:\n return self._validate_before()\n except TypeError:\n return False",
"def pre_service_instance_create(self, resource_dict):\n pass",
"def pre_flight_checks(self):\n #=======================================================================\n #\n # TODO: Place any system checks here.\n #\n #=======================================================================\n return True",
"def _pre(self):\n if self.has_state_info(\"pre\"):\n self.get_state_info(\"pre\")",
"def pre_setup(self) -> None:\n if self.__setup_done:\n self.base_logger.error(\"pre_setup was erroneously called twice\")\n raise SetupAlreadyDoneError()",
"def pre_service_appliance_set_create(self, resource_dict):\n pass",
"def can_create_application(self):\n permission = 'nominations.add_application'\n user = self.request.user\n local_group = find_local_group_by_user(user)\n if local_group is not None:\n can_create = user.localgroupprofile.has_permission_for_local_group(\n local_group,\n permission\n )\n else:\n can_create = False\n\n return can_create",
"def pre_project_create(self, resource_dict):\n pass",
"def premain(self):\r\n return self._premain"
]
| [
"0.818382",
"0.6619707",
"0.6619707",
"0.6619707",
"0.6619707",
"0.6619707",
"0.64790577",
"0.64790577",
"0.6401003",
"0.5679449",
"0.5576985",
"0.55662435",
"0.5539417",
"0.5460761",
"0.5426637",
"0.5404863",
"0.54007214",
"0.5384768",
"0.5382076",
"0.5349702",
"0.53280884",
"0.52804035",
"0.5260405",
"0.5247629",
"0.524322",
"0.5217668",
"0.52116376",
"0.51991457",
"0.5191636",
"0.5188311"
]
| 0.82373095 | 0 |
Tests whether ``MessageApplication.precreate`` works as intended. | def test__MessageApplication__precreate__1():
message_application_id = 202304140002
cover = Icon(ICON_TYPE_STATIC, 12)
icon = Icon(ICON_TYPE_STATIC, 23)
description = 'Afraid'
name = 'Chata'
message_application = MessageApplication.precreate(
message_application_id,
cover = cover,
description = description,
icon = icon,
name = name,
)
_assert_fields_set(message_application)
vampytest.assert_eq(message_application.id, message_application_id)
vampytest.assert_eq(message_application.cover, cover)
vampytest.assert_eq(message_application.description, description)
vampytest.assert_eq(message_application.icon, icon)
vampytest.assert_eq(message_application.name, name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test__MessageApplication__precreate__0():\n message_application_id = 202304140001\n \n message_application = MessageApplication.precreate(message_application_id)\n \n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)",
"def PreCreate(self, pre):\n pass",
"def PreCreate(self, pre):\n pass",
"def PreCreate(self, pre):\n pass",
"def PreCreate(self, pre):\n pass",
"def PreCreate(self, pre):\n pass",
"def PreCreate(self, pre):\r\n pass",
"def PreCreate(self, pre):\r\n pass",
"def PreCreate(self, pre):\r\n \r\n pass",
"def test__MessageApplication__create_empty():\n message_application_id = 202304140000\n \n message_application = MessageApplication._create_empty(message_application_id)\n \n _assert_fields_set(message_application)\n vampytest.assert_eq(message_application.id, message_application_id)",
"def test_create_app():\n assert not create_app().testing\n assert create_app({'TESTING': True}).testing",
"def run_precondition(self) -> bool:\n return self._does_apply",
"def procPreRun(proc):\n\tif 'b' in proc.config._notify.when['pipeline']:\n\t\tlogger.debug('Notifying process begins')\n\t\tEMAIL.send('proc', proc, 'begin')",
"def is_smart_guard_notify_before_creation(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsSmartGuardNotifyBeforeCreation', self.handle))",
"def test__MessageApplication__new__0():\n message_application = MessageApplication()\n _assert_fields_set(message_application)",
"def _should_initialize_check_run(self, payload):\n action = payload.get('action')\n return action in self.initialize_actions or self.initialize_actions is None",
"def test_stage_pre_boot(self, mock_stage_pre_boot):\n app = MDFakeFSTestSite(\n \"MDWeb\",\n app_options={}\n )\n app.start()\n\n self.assertTrue(mock_stage_pre_boot.called)",
"def PreExecute(self):\n return True",
"def pre_service_appliance_create(self, resource_dict):\n pass",
"def test_create_app():\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.create_application_state(\"test_app\", ray.ObjectRef.nil())\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING",
"def _create(self, parsed_args):\n if self.create:\n try:\n resp = self.tapis_client.apps.add(body=self.document)\n self.messages.append(\n ('create', 'Created Tapis app {} revision {}'.format(\n resp.get('id'), resp.get('revision'))))\n return True\n except Exception as exc:\n if self.ignore_errors:\n self.messages.append(('create', exc))\n return False\n else:\n raise\n\n return True",
"def has_before(self):\n\n try:\n return self._validate_before()\n except TypeError:\n return False",
"def pre_service_instance_create(self, resource_dict):\n pass",
"def pre_flight_checks(self):\n #=======================================================================\n #\n # TODO: Place any system checks here.\n #\n #=======================================================================\n return True",
"def _pre(self):\n if self.has_state_info(\"pre\"):\n self.get_state_info(\"pre\")",
"def pre_setup(self) -> None:\n if self.__setup_done:\n self.base_logger.error(\"pre_setup was erroneously called twice\")\n raise SetupAlreadyDoneError()",
"def pre_service_appliance_set_create(self, resource_dict):\n pass",
"def can_create_application(self):\n permission = 'nominations.add_application'\n user = self.request.user\n local_group = find_local_group_by_user(user)\n if local_group is not None:\n can_create = user.localgroupprofile.has_permission_for_local_group(\n local_group,\n permission\n )\n else:\n can_create = False\n\n return can_create",
"def pre_project_create(self, resource_dict):\n pass",
"def premain(self):\r\n return self._premain"
]
| [
"0.82373095",
"0.6619707",
"0.6619707",
"0.6619707",
"0.6619707",
"0.6619707",
"0.64790577",
"0.64790577",
"0.6401003",
"0.5679449",
"0.5576985",
"0.55662435",
"0.5539417",
"0.5460761",
"0.5426637",
"0.5404863",
"0.54007214",
"0.5384768",
"0.5382076",
"0.5349702",
"0.53280884",
"0.52804035",
"0.5260405",
"0.5247629",
"0.524322",
"0.5217668",
"0.52116376",
"0.51991457",
"0.5191636",
"0.5188311"
]
| 0.818382 | 1 |
Performs dynamic decoding with `decoders`. Calls prepare() once and step() repeatedly on `Decoder` object. | def dynamic_ensemble_decode(
decoders,
encoder_outputs,
bridges,
target_modalities,
helper,
parallel_iterations=32,
swap_memory=False):
var_scope = tf.get_variable_scope()
# Properly cache variable values inside the while_loop
if var_scope.caching_device is None:
var_scope.set_caching_device(lambda op: op.device)
def _create_ta(d):
return tf.TensorArray(
dtype=d, clear_after_read=False,
size=0, dynamic_size=True)
decoder_output_removers = nest.map_structure(lambda dec: DecoderOutputRemover(
dec.mode, dec.output_dtype._fields, dec.output_ignore_fields), decoders)
# initialize first inputs (start of sentence) with shape [_batch*_beam,]
initial_finished, initial_input_symbols = helper.init_symbols()
initial_time = tf.constant(0, dtype=tf.int32)
initial_input_symbols_embed = nest.map_structure(
lambda modality: _embed_words(modality, initial_input_symbols, initial_time),
target_modalities)
inputs_preprocessing_fns = []
inputs_postprocessing_fns = []
initial_inputs = []
initial_decoder_states = []
decoding_params = []
for dec, enc_out, bri, inp in zip(decoders, encoder_outputs, bridges, initial_input_symbols_embed):
with tf.variable_scope(dec.name):
inputs_preprocessing_fn, inputs_postprocessing_fn = dec.inputs_prepost_processing_fn()
inputs = inputs_postprocessing_fn(None, inp)
dec_states, dec_params = dec.prepare(enc_out, bri, helper) # prepare decoder
dec_states = stack_beam_size(dec_states, helper.beam_size)
dec_params = stack_beam_size(dec_params, helper.beam_size)
# add to list
inputs_preprocessing_fns.append(inputs_preprocessing_fn)
inputs_postprocessing_fns.append(inputs_postprocessing_fn)
initial_inputs.append(inputs)
initial_decoder_states.append(dec_states)
decoding_params.append(dec_params)
initial_outputs_tas = nest.map_structure(
lambda dec_out_rem, dec: nest.map_structure(
_create_ta, dec_out_rem.apply(dec.output_dtype)),
decoder_output_removers, decoders)
def body_infer(time, inputs, decoder_states, outputs_tas, finished,
log_probs, lengths, infer_status_ta):
"""Internal while_loop body.
Args:
time: Scalar int32 Tensor.
inputs: A list of inputs Tensors.
decoder_states: A list of decoder states.
outputs_tas: A list of TensorArrays.
finished: A bool tensor (keeping track of what's finished).
log_probs: The log probability Tensor.
lengths: The decoding length Tensor.
infer_status_ta: structure of TensorArray.
Returns:
`(time + 1, next_inputs, next_decoder_states, next_outputs_tas,
next_finished, next_log_probs, next_lengths, next_infer_status_ta)`.
"""
# step decoder
outputs = []
cur_inputs = []
next_decoder_states = []
for dec, inp, pre_fn, stat, dec_params in \
zip(decoders, inputs, inputs_preprocessing_fns, decoder_states, decoding_params):
with tf.variable_scope(dec.name):
inp = pre_fn(time, inp)
out, next_stat = dec.step(inp, stat, dec_params)
cur_inputs.append(inp)
outputs.append(out)
next_decoder_states.append(next_stat)
next_outputs_tas = []
for out_ta, out, rem in zip(outputs_tas, outputs, decoder_output_removers):
ta = nest.map_structure(lambda ta, out: ta.write(time, out),
out_ta, rem.apply(out))
next_outputs_tas.append(ta)
logits = []
for dec, modality, out in zip(decoders, target_modalities, outputs):
logits.append(_compute_logits(dec, modality, out))
# sample next symbols
sample_ids, beam_ids, next_log_probs, next_lengths \
= helper.sample_symbols(logits, log_probs, finished, lengths, time=time)
gathered_states = []
for next_stat in next_decoder_states:
gathered_states.append(gather_states(next_stat, beam_ids))
cur_inputs = nest.map_structure(lambda inp: gather_states(inp, beam_ids),
cur_inputs)
infer_status = BeamSearchStateSpec(
log_probs=next_log_probs,
predicted_ids=sample_ids,
beam_ids=beam_ids,
lengths=next_lengths)
infer_status_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
infer_status_ta, infer_status)
next_finished, next_input_symbols = helper.next_symbols(time=time, sample_ids=sample_ids)
next_inputs_embed = nest.map_structure(lambda modality: _embed_words(modality, next_input_symbols, time + 1),
target_modalities)
next_finished = tf.logical_or(next_finished, finished)
next_inputs = []
for dec, cur_inp, next_inp, post_fn in zip(decoders, cur_inputs, next_inputs_embed, inputs_postprocessing_fns):
with tf.variable_scope(dec.name):
next_inputs.append(post_fn(cur_inp, next_inp))
return time + 1, next_inputs, gathered_states, next_outputs_tas, \
next_finished, next_log_probs, next_lengths, infer_status_ta
initial_log_probs = tf.zeros_like(initial_input_symbols, dtype=tf.float32)
initial_lengths = tf.zeros_like(initial_input_symbols, dtype=tf.int32)
initial_infer_status_ta = nest.map_structure(_create_ta, BeamSearchStateSpec.dtypes())
loop_vars = [initial_time, initial_inputs, initial_decoder_states,
initial_outputs_tas, initial_finished,
# infer vars
initial_log_probs, initial_lengths, initial_infer_status_ta]
res = tf.while_loop(
lambda *args: tf.logical_not(tf.reduce_all(args[4])),
body_infer,
loop_vars=loop_vars,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
final_infer_status = nest.map_structure(lambda ta: ta.stack(), res[-1])
return final_infer_status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle_decode(self, encoded_data):\n \n config.COD_PROMPT = config.DEC_PROMPT\n print config.DEC_PROMPT + \" decoding...\"\n \n # while there is another decoder, run each item through the next decoder\n data = encoded_data\n success = False\n for decoder in self.decoder_list:\n current_decoder = decoder()\n success, data = self.recursive_decoder(current_decoder.decode, data)\n if not success:\n break\n print config.DEC_PROMPT + \"%s decoded to '%s'\" % ( current_decoder.name(),data)\n return success, data",
"def decoder(self, embedded_inputs, decoder_input0,\n decoder_hidden0, encoder_outputs):\n pass",
"def build_decoder(opt, embeddings):\n return TransformerDecoder(opt.dec_layers, opt.dec_rnn_size, opt.heads, opt.transformer_ff, opt.dropout, embeddings)",
"def _decode_infer(self, decoder, _encoder_output, features, labels):\r\n\r\n return decoder(_encoder_output, labels)",
"def _construct_encoders_decoders(self):\n self.enc_inp = {}\n self.dec_out = {}\n if self.encode_hints:\n self.enc_hint = {}\n if self.decode_diffs:\n self.node_dec_diff = hk.Linear(1)\n self.edge_dec_diff = (hk.Linear(1), hk.Linear(1), hk.Linear(1))\n self.graph_dec_diff = (hk.Linear(1), hk.Linear(1))\n if self.decode_hints:\n self.dec_hint = {}\n\n for name in self.spec:\n stage, loc, t = self.spec[name]\n if stage == _Stage.INPUT:\n self.enc_inp[name] = [hk.Linear(self.hidden_dim)]\n if loc == _Location.EDGE and t == _Type.POINTER:\n # Edge pointers need two-way encoders\n self.enc_inp[name].append(hk.Linear(self.hidden_dim))\n\n elif stage == _Stage.OUTPUT:\n if loc == _Location.NODE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_out[name] = (hk.Linear(1),)\n elif t == _Type.CATEGORICAL:\n self.dec_out[name] = (hk.Linear(self.nb_dims[name]),)\n elif t == _Type.POINTER:\n self.dec_out[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.EDGE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_out[name] = (hk.Linear(1), hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_out[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims),\n hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_out[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.GRAPH:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_out[name] = (hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_out[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_out[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n else:\n raise ValueError('Incorrect location')\n\n elif stage == _Stage.HINT:\n if self.encode_hints:\n self.enc_hint[name] = [hk.Linear(self.hidden_dim)]\n if loc == _Location.EDGE and t == _Type.POINTER:\n # Edge pointers need two-way encoders\n self.enc_hint[name].append(hk.Linear(self.hidden_dim))\n\n if self.decode_hints:\n if loc == _Location.NODE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_hint[name] = (hk.Linear(1),)\n elif t == _Type.CATEGORICAL:\n self.dec_hint[name] = (hk.Linear(self.nb_dims[name]),)\n elif t == _Type.POINTER:\n self.dec_hint[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.EDGE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_hint[name] = (hk.Linear(1), hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_hint[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims),\n hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_hint[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.GRAPH:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_hint[name] = (hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_hint[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_hint[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n else:\n raise ValueError('Incorrect location')",
"def _decoder(self, inputs, z_dimension, mcd):\n \n latent_inputs = Input(shape=(z_dimension,), name=\"z_sampling\")\n x = latent_inputs\n x = Dense(\n self.hidden_size // 4,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size // 3,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size // 2,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n outputs = Dense(\n self.n_dims,\n activation=self.output_activation,\n kernel_initializer=self.weight_init,\n )(x)\n \n self.decoder = Model(latent_inputs, outputs, name=\"decoder\")\n \n outputs = self.decoder(self.encoder(inputs)[0])\n \n return self.decoder, outputs",
"def build_decoder(opt, embeddings):\n dec_type = \"ifrnn\" if opt.decoder_type == \"rnn\" and opt.input_feed \\\n else opt.decoder_type\n return str2dec[dec_type].from_opt(opt, embeddings)",
"def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 64*64*3, bias=False), nn.ReLU(),\n View((-1, 3, 64, 64)),\n )",
"def _build_decoder(self, hparams, inputs, initial_state, is_training):\n ## Decoder.\n with tf.variable_scope(\"trajectory_decoder\"):\n if hparams.decoder_type == \"fc\":\n regression = self._build_fc_decoder(hparams, inputs, is_training)\n final_states = None\n \n elif hparams.decoder_type == \"rnn\":\n list_dummy_input = []\n with tf.name_scope(\"dummy_input\"):\n for gpu_idx in range(self.num_gpu):\n with tf.device(tf.DeviceSpec(device_type=\"GPU\", device_index=gpu_idx)), tf.name_scope(\"tower_{:d}\".format(gpu_idx)):\n list_dummy_input.append(tf.zeros(tf.stack([self.target_length, self.batch_size[gpu_idx], 1])))\n \n with tf.variable_scope(\"rnn\"):\n if hparams.encoder_type == \"cnn\":\n with tf.variable_scope(\"rnn_initial_state\"):\n initial_state = self._make_initial_states(hparams, inputs)\n\n net, final_states = self._build_rnn_decoder(hparams, list_dummy_input, initial_state, is_training)\n\n with tf.name_scope(\"time_batch_transpose\"):\n net = list_ops.list_transpose(net, perm=[1, 0, 2])\n \n with tf.variable_scope(\"projection\"):\n regression = self._build_output_projection(hparams, net, is_training)\n\n else:\n raise ValueError(\"Unknown decoder type {:s}.\".format(hparams.decoder_type))\n\n return regression, final_states",
"def recursive_decoder(self, decoder, encoded_data, full_body = False):\n decoded_data = []\n \n success = True\n \n try:\n \n #If string or full_body flag is set, apply the codec to the entire body of data\n if isinstance(encoded_data,basestring) or full_body:\n decoded_data.append(decoder(encoded_data))\n\n # if its a dictionary, apply codec to the key and also the value. If the value is a container, call recursive decoder.\n elif type(encoded_data) is dict:\n \n decoded_portion = {}\n for encoded_key, encoded_value in encoded_data.items():\n decoded_key = decoder(encoded_key)\n \n if type(encoded_value) is list or type(encoded_value) is dict:\n success, data = self.recursive_decoder(decoder, encoded_value)\n decoded_value = data\n else:\n decoded_value = decoder(encoded_value)\n \n decoded_portion[decoded_key] = decoded_value\n\n decoded_data.append(decoded_portion)\n # If the contents is a list or tuple, recursively decode each element by sending itself to the function\n elif type(encoded_data) is list or type(encoded_data) is tuple:\n\n for encoded_portion in encoded_data:\n \n success, data = self.recursive_decoder(decoder, encoded_portion)\n if success:\n decoded_data.append(data)\n else:\n return (False, None)\n \n else:\n print config.COD_PROMPT + 'Data was not formatted as dict, list/tuple, string!'\n raise\n \n ## NOTE: If nested multiple commands breaks, this is likely the culprit\n if len(decoded_data) == 1:\n decoded_data = decoded_data[0]\n \n except Exception, e:\n print config.COD_PROMPT + \" Issue in codec while trying to code %s\" % (encoded_data)\n return (False, None)\n \n return success, decoded_data",
"def decoder(self, z):\n data = {}\n for key, decoder in self.decoders_func.items():\n data[key] = decoder(z)\n return data",
"def _DecodeStep():\n _, decode_dict = self._model.ConstructDecodeGraph()\n self.decode_nm = py_utils.NestedMap(decode_dict)\n return [self._OutfeedEnqueue(decode_dict)]",
"def call(self,\n inputs,\n cache=None,\n decode_loop_step=None,\n padded_decode=False):\n attention_bias = inputs[\"attention_bias\"]\n target_ids = inputs[\"target_ids\"]\n all_encoder_outputs = inputs[\"all_encoder_outputs\"]\n self_attention_bias = inputs[\"self_attention_bias\"]\n if not isinstance(all_encoder_outputs, list):\n all_encoder_outputs = [all_encoder_outputs]\n\n target_embeds = self.embedding_lookup(target_ids)\n if decode_loop_step is None:\n target_embeds = self.embedding_postprocessor(target_embeds)\n else:\n target_embeds = self._decoding_step_time_signal(target_embeds,\n decode_loop_step)\n decoder_inputs = dict(\n decoder_inputs=target_embeds,\n encoder_outputs=all_encoder_outputs,\n self_attention_mask=self_attention_bias,\n attention_mask=attention_bias)\n if self.multi_channel_cross_attention:\n decoder_inputs[\"doc_attention_probs\"] = inputs[\"doc_attention_probs\"]\n decode_outputs, cache = self.decoder(\n decoder_inputs, cache, decode_loop_step if padded_decode else None)\n return decode_outputs",
"def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 256), # B, 256\n View((-1, 256, 1, 1)), # B, 256, 1, 1\n nn.SELU(),\n nn.ConvTranspose2d(256, 64, 4), # B, 64, 4, 4\n nn.SELU(),\n nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32\n nn.SELU(),\n nn.ConvTranspose2d(32, 3, 4, 2, 1), # B, nc, 64, 64\n nn.ReLU()\n )",
"def _FinalizeDecode(self,\n dataset_name,\n dec_metrics,\n start_time,\n global_step,\n buffered_decode_out,\n futures=None):\n if futures:\n # Wait for all async postprocessing jobs to finish.\n for future in futures:\n future.get()\n elapsed_secs = time.time() - start_time\n # TODO(xingwu): simplify summaries format.\n summaries = {k: v.Summary(k) for k, v in dec_metrics.items()}\n for k, v in dec_metrics.items():\n if k.startswith('num_samples_in_batch'):\n cumulative_key = 'cumulative_num_examples' + k.removeprefix(\n 'num_samples_in_batch')\n summaries[cumulative_key] = tf.Summary(value=[\n tf.Summary.Value(tag=cumulative_key, simple_value=v.total_value)\n ])\n example_rate = v.total_value / elapsed_secs\n speed_key = 'examples/sec' + k.removeprefix('num_samples_in_batch')\n summaries[speed_key] = tf.Summary(\n value=[tf.Summary.Value(tag=speed_key, simple_value=example_rate)])\n\n self._WriteSummaries(\n os.path.basename(self._program_dir), dataset_name, global_step,\n summaries)\n decode_out_path = os.path.join(self._program_dir,\n 'decoder_out_%09d' % global_step)\n decode_finalize_args = base_model.DecodeFinalizeArgs(\n decode_out_path=decode_out_path, decode_out=buffered_decode_out)\n self._task.DecodeFinalize(decode_finalize_args)\n\n # Result is not returned as a signal for \"done\", unlike for training.\n self._ReportVizierMetrics(global_step, dec_metrics)\n self._dataset_summaries[dataset_name] = summaries\n return dataset_name, summaries",
"def decoder_setup_1():\n decoder = RetinaDecoder(\n # pre-pooling\n {'op': 'avg', 'kernel': (1, 2, 2), 'causal': True},\n # grouped temporal conv stacks:\n [\n {\n 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1),\n 'stride': 1, 'groups': 15, 'acivation': nn.ReLU,\n 'pool': {'op': 'avg', 'kernel': (2, 2, 2), 'causal': True}\n }\n ],\n # spatial conv layers: {in, out, kernel, stride}\n [\n # {'in': 15, 'out': 64, 'kernel': (1, 3, 3), 'stride': 1}\n ],\n # for each ConvRNN cell:\n [\n\n ],\n # temporal convolution stack(s)\n [\n {\n 'in': 15, 'out': [128, 256, 128], 'kernel': (2, 3, 3),\n 'stride': 1, 'groups': 1, 'acivation': nn.ReLU\n }\n ],\n # ConvTranspose layers: {in, out, kernel, stride}\n [\n {'in': 128, 'out': 64, 'kernel': (3, 3, 3), 'stride': (2, 2, 2)},\n {'in': 64, 'out': 16, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)},\n ],\n # post conv layers\n [\n {'in': 16, 'out': 8, 'kernel': (1, 3, 3), 'stride': 1},\n {'in': 8, 'out': 1, 'kernel': (1, 1, 1), 'stride': 1}\n ],\n )\n return decoder",
"def _build_decoder(self, encoder_outputs, encoder_state, hparams):\n\t\ttgt_sos_id = tf.cast(tf.constant(hparams.sos_id), tf.int32)\n\t\ttgt_eos_id = tf.cast(tf.constant(hparams.eos_id), tf.int32)\n\n\t\tmaximum_iterations = self._get_infer_maximum_iterations(hparams)\n\n\t\t# Decoder\n\t\twith tf.variable_scope('decoder') as decoder_scope:\n\t\t\tcell, decoder_initial_state = self._build_decoder_cell(hparams, encoder_state)\n\t\t\t\n\t\t\tlogits = tf.no_op()\n\t\t\tdecoder_outputs = None\n\n\t\t\t# Train or Eval\n\t\t\tif self.mode != 'infer':\n\t\t\t\tdecoder_emb_input = tf.nn.embedding_lookup(self.embedding_decoder, self.decoder_input_data)\n\n\t\t\t\t# helper\n\t\t\t\thelper = tf.contrib.seq2seq.TrainingHelper(\n\t\t\t\t\tdecoder_emb_input, self.seq_length_decoder_input_data)\n\t\t\t\t\n\t\t\t\t# decoder\n\t\t\t\tmy_decoder = tf.contrib.seq2seq.BasicDecoder(\n\t\t\t\t\tcell,\n\t\t\t\t\thelper,\n\t\t\t\t\tdecoder_initial_state)\n\t\t\t\t\n\t\t\t\t# dynamic decoding\n\t\t\t\toutputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(\n\t\t\t\t\tmy_decoder,\n\t\t\t\t\tswap_memory=True,\n\t\t\t\t\tscope=decoder_scope)\n\t\t\t\t\n\t\t\t\tsample_id = outputs.sample_id\n\t\t\t\tlogits = self.output_layer(outputs.rnn_output)\n\t\t\telse:\n\t\t\t\tinfer_mode = hparams.infer_mode\n\t\t\t\tstart_tokens = tf.fill([self.batch_size], tgt_sos_id)\n\t\t\t\tend_token = tgt_eos_id\n\t\t\t\t_info(' decoder by infer_mode={} beam_width={}'.format(infer_mode, hparams.beam_width))\n\n\t\t\t\tif infer_mode == 'greedy':\n\t\t\t\t\thelper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n\t\t\t\t\t\tself.embedding_decoder, start_tokens, end_token)\n\t\t\t\telif infer_mode == 'beam_search':\n\t\t\t\t\tbeam_width = hparams.beam_width\n\t\t\t\t\tlength_penalty_weight = hparams.length_penalty_weight\n\t\t\t\t\tcoverage_penalty_weight = hparams.coverage_penalty_weight\n\n\t\t\t\t\t# beam search do not require helper\n\t\t\t\t\tmy_decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n\t\t\t\t\t\tcell=cell,\n\t\t\t\t\t\tembedding=self.embedding_decoder,\n\t\t\t\t\t\tstart_tokens=start_tokens,\n\t\t\t\t\t\tend_token=end_token,\n\t\t\t\t\t\tinitial_state=decoder_initial_state,\n\t\t\t\t\t\tbeam_width=beam_width,\n\t\t\t\t\t\toutput_layer=self.output_layer,\n\t\t\t\t\t\tlength_penalty_weight=length_penalty_weight,\n\t\t\t\t\t\tcoverage_penalty_weight=coverage_penalty_weight)\n\t\t\t\telse:\n\t\t\t\t\t_error('Unknown infer_mode {}'.format(infer_mode))\n\t\t\t\t\traise ValueError\n\t\t\t\t\n\t\t\t\tif infer_mode != 'beam_search':\n\t\t\t\t\tmy_decoder = tf.contrib.seq2seq.BasicDecoder(\n\t\t\t\t\t\tcell,\n\t\t\t\t\t\thelper,\n\t\t\t\t\t\tdecoder_initial_state,\n\t\t\t\t\t\toutput_layer=self.output_layer)\t\t# apply to the RNN output prior to storing the result or sampling\n\t\t\t\t\n\t\t\t\toutputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(\n\t\t\t\t\tmy_decoder,\n\t\t\t\t\tmaximum_iterations=maximum_iterations,\n\t\t\t\t\tswap_memory=True,\n\t\t\t\t\tscope=decoder_scope)\n\t\t\t\n\t\t\t\tif infer_mode == 'beam_search':\n\t\t\t\t\tsample_id = outputs.predicted_ids\n\t\t\t\telse:\n\t\t\t\t\tlogits = outputs.rnn_output\n\t\t\t\t\tsample_id = outputs.sample_id\n\n\t\treturn logits, sample_id, final_context_state",
"def _DecodeStep(self,\n sess,\n step,\n dec_metrics,\n global_step,\n buffered_decode_out,\n postprocess_futures,\n dataset_name,\n threadpool=None):\n tf.logging.info(f'Decoding step {step}')\n fetch_start = time.time()\n if py_utils.IsEagerMode():\n async_executor = executor.new_executor(enable_async=True)\n with context.executor_scope(async_executor):\n cpu_pt = self.infeed_fn()\n\n if isinstance(self.tpu_outs, dict):\n tpu_out = self.tpu_outs[dataset_name]\n else:\n tpu_out = self.tpu_outs\n decode_out_dict = _FetchDecodeOut(tpu_out, sess)\n if py_utils.IsEagerMode():\n # Ensure that the infeed ops are finished\n # This is necessary to ensure that any state in the infeed ops is\n # synchronized before the next device loop. Otherwise we might see that\n # a device loop still using the same data batches in the last device loop.\n async_executor.wait()\n decode_out_dict = _UpdateCpuPassThroughData(decode_out_dict, cpu_pt)\n\n tf.logging.info(f'Finished TPU decoding on step {step}')\n dec_metrics['decode_secs'].Update(time.time() - fetch_start)\n if self.params.postprocess_all_at_once:\n # Accumulate decode_out_dicts and skip postprocess until the end.\n self._decode_out_dict_lst.append(decode_out_dict)\n else:\n self._RunPostProcess(threadpool, step, decode_out_dict, dec_metrics,\n global_step, buffered_decode_out,\n postprocess_futures)",
"def decoder_setup_2():\n decoder = RetinaDecoder(\n # pre-pooling\n {'op': 'avg', 'kernel': (1, 2, 2), 'causal': False},\n # grouped temporal conv stacks:\n [\n {\n 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1),\n 'stride': 1, 'groups': 15, 'acivation': nn.ReLU,\n 'pool': {'op': 'avg', 'kernel': (2, 2, 2), 'causal': False}\n }\n ],\n # spatial conv layers: {in, out, kernel, stride}\n [\n\n ],\n # for each ConvRNN cell:\n [\n\n ],\n # temporal convolution stack(s)\n [\n {\n 'in': 15, 'out': [128, 256, 128], 'kernel': (2, 3, 3),\n 'stride': 1, 'groups': 1, 'acivation': nn.ReLU\n }\n ],\n # ConvTranspose layers: {in, out, kernel, stride}\n [\n {'in': 128, 'out': 64, 'kernel': (3, 3, 3), 'stride': (2, 2, 2)},\n {'in': 64, 'out': 1, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)},\n ],\n # post conv layers\n [\n\n ],\n )\n return decoder",
"def decoder_model(self, input_shape, layers, filters, latent_dims):\n\n flat_dims = self.encoder.get_layer('Flatten_dims').output_shape\n pool_dims = self.encoder.get_layer('Flatten_dims').input_shape\n\n latent_inputs = Input(shape=(latent_dims,), name='z_sampling')\n latent_to_reshape = Dense(flat_dims[-1], activation='relu')(latent_inputs)\n reshape_to_up = Reshape(pool_dims[1:])(latent_to_reshape)\n \n l = [reshape_to_up]\n\n for i in range(0,layers):\n l.append(UpSampling2D(size=(2,2), data_format='channels_last',\n name='Upsample_'+str(i))(l[i*2]))\n l.append(Conv2D(filters[-i-1], (3,3), padding='same',\n data_format='channels_last', name='DeConv_'+str(i),\n activation='relu')(l[i*2+1]))\n\n l.append(Conv2D(1, (3,3), padding='same',\n data_format='channels_last', name='decoder_output',\n activation='sigmoid')(l[-1]))\n\n decoder = Model(latent_inputs, l[-1], name='decoder')\n\n decoder.summary()\n\n return decoder",
"def get_decoders(status=None, path=None, file=None, name=None, parents=False, offset=0, limit=common.database_limit, sort=None, search=None):\n status = Decoder.__check_status(status)\n all_decoders = []\n\n for decoder_file in Decoder.get_decoders_files(status=status, limit=None)['items']:\n if glob(decoder_file['path']):\n all_decoders.extend(Decoder.__load_decoders_from_file(decoder_file['file'], decoder_file['path'], decoder_file['status']))\n\n if not all_decoders:\n if (glob(common.default_decoder_xml)):\n all_decoders.extend(Decoder.__load_decoders_from_file(\"decoder.xml\", common.default_decoder_xml, Decoder.S_ENABLED))\n if (glob(common.custom_decoder_xml)):\n all_decoders.extend(Decoder.__load_decoders_from_file(\"local_decoder.xml\", common.custom_decoder_xml, Decoder.S_ENABLED))\n\n decoders = list(all_decoders)\n for d in all_decoders:\n if path and path != d.path:\n decoders.remove(d)\n continue\n if file and file != d.file:\n decoders.remove(d)\n continue\n if name and name != d.name:\n decoders.remove(d)\n continue\n if parents and 'parent' in d.details:\n decoders.remove(d)\n continue\n\n if search:\n decoders = search_array(decoders, search['value'], search['negation'])\n\n if sort:\n decoders = sort_array(decoders, sort['fields'], sort['order'], Decoder.SORT_FIELDS)\n else:\n decoders = sort_array(decoders, ['file', 'position'], 'asc')\n\n return {'items': cut_array(decoders, offset, limit), 'totalItems': len(decoders)}",
"def _decode(self, input_dict):\n encoder_outputs = input_dict['encoder_output']['outputs']\n enc_src_lengths = input_dict['encoder_output']['src_length']\n if self._mode == 'train':\n spec = (\n input_dict['target_tensors'][0]\n if 'target_tensors' in input_dict\n else None\n )\n spec_length = (\n input_dict['target_tensors'][1]\n if 'target_tensors' in input_dict\n else None\n )\n\n _batch_size = tf.shape(encoder_outputs)[0]\n\n training = self._mode == 'train'\n regularizer = self.params.get('regularizer', None)\n\n if self.params.get('enable_postnet', True):\n if 'postnet_conv_layers' not in self.params:\n raise ValueError(\n 'postnet_conv_layers must be passed from config file if postnet is'\n 'enabled'\n )\n\n num_audio_features = self._n_feats\n\n output_projection_layer = tf.layers.Dense(\n name='output_proj', units=num_audio_features, use_bias=True\n )\n stop_token_projection_layer = tf.layers.Dense(\n name='stop_token_proj', units=1, use_bias=True\n )\n\n prenet = None\n if self.params.get('enable_prenet', True):\n prenet = Prenet(\n self.params.get('prenet_units', 256),\n self.params.get('prenet_layers', 2),\n self.params.get('prenet_dropout', 0.5),\n self.params.get('prenet_enable_dropout', True),\n self.params.get('prenet_activation', tf.nn.relu),\n self.params['dtype'],\n )\n\n cell_params = {}\n cell_params['num_units'] = self.params['decoder_cell_units']\n decoder_cells = [\n single_cell(\n cell_class=self.params['decoder_cell_type'],\n cell_params=cell_params,\n zoneout_prob=self.params.get('zoneout_prob', 0.0),\n dp_output_keep_prob=1.0\n - self.params.get('dropout_prob', 0.1),\n training=training,\n )\n for _ in range(self.params['decoder_layers'])\n ]\n\n if self.params['attention_type'] is not None:\n attention_mechanism = self._build_attention(\n encoder_outputs,\n enc_src_lengths,\n self.params.get('attention_bias', False),\n )\n\n attention_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)\n\n attentive_cell = AttentionWrapper(\n cell=attention_cell,\n attention_mechanism=attention_mechanism,\n alignment_history=True,\n output_attention='both',\n )\n\n decoder_cell = attentive_cell\n\n if self.params['attention_type'] is None:\n decoder_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)\n\n if self._mode == 'train':\n train_and_not_sampling = True\n helper = TacotronTrainingHelper(\n inputs=spec,\n sequence_length=spec_length,\n prenet=None,\n model_dtype=self.params['dtype'],\n mask_decoder_sequence=self.params.get(\n 'mask_decoder_sequence', True\n ),\n )\n elif self._mode == 'eval' or self._mode == 'infer':\n train_and_not_sampling = False\n inputs = tf.zeros(\n (_batch_size, 1, num_audio_features),\n dtype=self.params['dtype'],\n )\n helper = TacotronHelper(\n inputs=inputs,\n prenet=None,\n mask_decoder_sequence=self.params.get(\n 'mask_decoder_sequence', True\n ),\n )\n else:\n raise ValueError('Unknown mode for decoder: {}'.format(self._mode))\n decoder = TacotronDecoder(\n decoder_cell=decoder_cell,\n helper=helper,\n initial_decoder_state=decoder_cell.zero_state(\n _batch_size, self.params['dtype']\n ),\n attention_type=self.params['attention_type'],\n spec_layer=output_projection_layer,\n stop_token_layer=stop_token_projection_layer,\n prenet=prenet,\n dtype=self.params['dtype'],\n train=train_and_not_sampling,\n )\n\n if self._mode == 'train':\n maximum_iterations = tf.reduce_max(spec_length)\n else:\n maximum_iterations = tf.reduce_max(enc_src_lengths) * 10\n\n outputs, final_state, sequence_lengths = tf.contrib.seq2seq.dynamic_decode(\n # outputs, final_state, sequence_lengths, final_inputs = dynamic_decode(\n decoder=decoder,\n impute_finished=False,\n maximum_iterations=maximum_iterations,\n swap_memory=self.params.get('use_swap_memory', False),\n output_time_major=self.params.get('time_major', False),\n parallel_iterations=self.params.get('parallel_iterations', 32),\n )\n\n decoder_output = outputs.rnn_output\n stop_token_logits = outputs.stop_token_output\n\n with tf.variable_scope('decoder'):\n # If we are in train and doing sampling, we need to do the projections\n if train_and_not_sampling:\n decoder_spec_output = output_projection_layer(decoder_output)\n stop_token_logits = stop_token_projection_layer(\n decoder_spec_output\n )\n decoder_output = decoder_spec_output\n\n ## Add the post net ##\n if self.params.get('enable_postnet', True):\n dropout_keep_prob = self.params.get(\n 'postnet_keep_dropout_prob', 0.5\n )\n\n top_layer = decoder_output\n for i, conv_params in enumerate(self.params['postnet_conv_layers']):\n ch_out = conv_params['num_channels']\n kernel_size = conv_params['kernel_size'] # [time, freq]\n strides = conv_params['stride']\n padding = conv_params['padding']\n activation_fn = conv_params['activation_fn']\n\n if ch_out == -1:\n ch_out = self._n_feats\n\n top_layer = conv_bn_actv(\n layer_type='conv1d',\n name='conv{}'.format(i + 1),\n inputs=top_layer,\n filters=ch_out,\n kernel_size=kernel_size,\n activation_fn=activation_fn,\n strides=strides,\n padding=padding,\n regularizer=regularizer,\n training=training,\n data_format=self.params.get(\n 'postnet_data_format', 'channels_last'\n ),\n bn_momentum=self.params.get('postnet_bn_momentum', 0.1),\n bn_epsilon=self.params.get('postnet_bn_epsilon', 1e-5),\n )\n top_layer = tf.layers.dropout(\n top_layer,\n rate=1.0 - dropout_keep_prob,\n training=training,\n )\n\n else:\n top_layer = tf.zeros(\n [\n _batch_size,\n maximum_iterations,\n outputs.rnn_output.get_shape()[-1],\n ],\n dtype=self.params['dtype'],\n )\n\n if regularizer and training:\n vars_to_regularize = []\n vars_to_regularize += attentive_cell.trainable_variables\n vars_to_regularize += (\n attention_mechanism.memory_layer.trainable_variables\n )\n vars_to_regularize += output_projection_layer.trainable_variables\n vars_to_regularize += (\n stop_token_projection_layer.trainable_variables\n )\n\n for weights in vars_to_regularize:\n if 'bias' not in weights.name:\n # print(\"Added regularizer to {}\".format(weights.name))\n if weights.dtype.base_dtype == tf.float16:\n tf.add_to_collection(\n 'REGULARIZATION_FUNCTIONS', (weights, regularizer)\n )\n else:\n tf.add_to_collection(\n ops.GraphKeys.REGULARIZATION_LOSSES,\n regularizer(weights),\n )\n\n if self.params.get('enable_prenet', True):\n prenet.add_regularization(regularizer)\n\n if self.params['attention_type'] is not None:\n alignments = tf.transpose(\n final_state.alignment_history.stack(), [1, 2, 0]\n )\n else:\n alignments = tf.zeros([_batch_size, _batch_size, _batch_size])\n\n spectrogram_prediction = decoder_output + top_layer\n\n mag_spec_prediction = tf.zeros([_batch_size, _batch_size, _batch_size])\n\n stop_token_prediction = tf.sigmoid(stop_token_logits)\n outputs = [\n decoder_output,\n spectrogram_prediction,\n alignments,\n stop_token_prediction,\n sequence_lengths,\n mag_spec_prediction,\n ]\n\n return {'outputs': outputs, 'stop_token_prediction': stop_token_logits}",
"def _DecodeFn():\n _, decode_dict = self._model.ConstructDecodeGraph(\n input_batch=inp_instance.TpuDequeueBatch())\n self.decode_nm = py_utils.NestedMap(decode_dict)\n return self.decode_nm.Flatten()",
"def _define_decoder(self):\n raise NotImplementedError",
"def decode(self, probs, sizes=None):\n raise NotImplementedError",
"def forward(self, inputs_encoder, inputs_decoder):\n states_encoder = self.encoder(inputs_encoder)\n outputs_decoder, states_decoder = self.decoder(inputs_decoder, states_encoder)\n return outputs_decoder, states_decoder",
"def decoder_setup_5():\n decoder = RetinaDecoder(\n # pre-pooling\n {'op': 'avg', 'kernel': (1, 2, 2), 'causal': True},\n # grouped temporal conv stacks:\n [\n {\n 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1),\n 'stride': 1, 'groups': 15, 'acivation': nn.ReLU,\n 'pool': {'op': 'avg', 'kernel': (2, 2, 2), 'causal': True}\n }\n ],\n # spatial conv layers: {in, out, kernel, stride}\n [\n # {'in': 15, 'out': 64, 'kernel': (1, 3, 3), 'stride': 1}\n ],\n # for each ConvRNN cell:\n [\n\n ],\n # temporal convolution stack(s)\n [\n {\n 'in': 15, 'out': [128, 256, 128], 'kernel': (2, 3, 3),\n 'stride': 1, 'groups': 1, 'acivation': nn.ReLU\n }\n ],\n # ConvTranspose layers: {in, out, kernel, stride}\n [\n {'in': 128, 'out': 64, 'kernel': (3, 3, 3), 'stride': (2, 2, 2)},\n {'in': 64, 'out': 1, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)},\n ],\n # post conv layers\n [\n\n ],\n )\n return decoder",
"def _add_input_decoder(self, inputs, seq_len, enc_fw, enc_bw):\n with tf.variable_scope(\"decoder\"):\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True, initial_state_fw=enc_fw, initial_state_bw=enc_bw)\n\n return fw_states, bw_states",
"def DecodeFunc(self):\n\n def _DecodeStep():\n \"\"\"Decode call to be compiled for TPU.\"\"\"\n _, decode_dict = self._model.ConstructDecodeGraph()\n self.decode_nm = py_utils.NestedMap(decode_dict)\n return [self._OutfeedEnqueue(decode_dict)]\n\n @tpu_function.on_device_training_loop\n def DecodeLoopFn():\n return tpu_training_loop.repeat(\n self._steps_per_loop, _DecodeStep, inputs=[])\n\n self._compile_op, self.decode_loop = tpu.split_compile_and_shard(\n DecodeLoopFn,\n num_shards=self.data_parallelism,\n device_assignment=py_utils.GetTpuDeviceAssignment())\n\n # Pack the list of outfeed ops with structure in decode_nm.\n decode_tensors = self.decode_nm.Pack(self._OutfeedDequeue(self.decode_nm))\n cpu_pt = self._task.input.DequeueCpuPassthrough()\n return decode_tensors, cpu_pt",
"def create_decoder():\n # Create decoder instance and add predictors\n \n try:\n decoder = decoding.DECODER_REGISTRY[args.decoder](args)\n except Exception as e:\n logging.fatal(\"An %s has occurred while initializing the decoder: %s\"\n \" Stack trace: %s\" % (sys.exc_info()[0],\n e,\n traceback.format_exc()))\n sys.exit(\"Could not initialize decoder.\")\n\n add_predictor(decoder)\n return decoder"
]
| [
"0.6254693",
"0.6162041",
"0.61465585",
"0.58814126",
"0.58171636",
"0.5766235",
"0.5721679",
"0.57103765",
"0.56933624",
"0.56929505",
"0.5663599",
"0.5662473",
"0.5655391",
"0.56234926",
"0.5605497",
"0.55574185",
"0.55421275",
"0.5532227",
"0.5525167",
"0.5495278",
"0.54796517",
"0.5477568",
"0.5476332",
"0.54585546",
"0.5444621",
"0.5441082",
"0.54362434",
"0.5420883",
"0.5420635",
"0.5404299"
]
| 0.646081 | 0 |
Creates ensemble weights from `weight_scheme`. Now, only weight_scheme="average" is available. | def get_ensemble_weights(self, num_models):
if self._weight_scheme == "average":
return [1.0 / float(num_models)] * int(num_models)
# TODO can also directly process weights, like "0.1,0.1"
raise NotImplementedError("This weight scheme is not implemented: {}."
.format(self._weight_scheme)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_weights_(self):\n raise NotImplementedError",
"def my_assign_weights(context, data):\n pass",
"def _initialize_weights(self):\n pass",
"def init_sg_weights(self):\n n = self.weights_shape[0] # size of current layer\n # pylint: disable=no-member\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n # pylint: enable=no-member\n self.sg_weights = [A, B, C]",
"def standardize_weights(y,\n sample_weight=None,\n class_weight=None,\n sample_weight_mode=None):\n # Iterator may return sample_weight as 1-tuple\n if isinstance(sample_weight, tuple):\n sample_weight = sample_weight[0]\n if sample_weight_mode is not None and sample_weight_mode != 'samplewise':\n if sample_weight_mode != 'temporal':\n raise ValueError('\"sample_weight_mode '\n 'should be None or \"temporal\". '\n 'Found: ' + str(sample_weight_mode))\n if len(y.shape) < 3:\n raise ValueError('Found a sample_weight array for '\n 'an input with shape ' + str(y.shape) + '. '\n 'Timestep-wise sample weighting (use of '\n 'sample_weight_mode=\"temporal\") is restricted to '\n 'outputs that are at least 3D, i.e. that have '\n 'a time dimension.')\n if sample_weight is not None and len(sample_weight.shape) != 2:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weighting, '\n 'you should pass a 2D sample_weight array.')\n else:\n if sample_weight is not None and len(sample_weight.shape) != 1:\n raise ValueError(\n 'Found a sample_weight array with shape {}. In order to '\n 'use timestep-wise sample weights, you should specify '\n 'sample_weight_mode=\"temporal\" in compile(); founssd \"{}\" '\n 'instead. If you just mean to use sample-wise weights, '\n 'make sure your sample_weight array is 1D.'.format(\n sample_weight.shape, sample_weight_mode))\n\n if sample_weight is not None:\n if len(sample_weight.shape) > len(y.shape):\n raise ValueError('Found a sample_weight with shape' +\n str(sample_weight.shape) + '.'\n 'Expected sample_weight with rank '\n 'less than or equal to ' + str(len(y.shape)))\n\n if (not tensor_util.is_tf_type(sample_weight) and\n y.shape[:sample_weight.ndim] != sample_weight.shape):\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + ' for an input with shape ' +\n str(y.shape) + '. '\n 'sample_weight cannot be broadcast.')\n\n # Class weights applied per-sample.\n class_sample_weight = None\n if isinstance(class_weight, dict):\n if len(y.shape) > 2:\n raise ValueError('`class_weight` not supported for '\n '3+ dimensional targets.')\n\n if tensor_util.is_tf_type(y):\n # Few classes are expected, so densifying is reasonable.\n keys = np.array(sorted(class_weight.keys()))\n values = np.array([class_weight[i] for i in keys])\n weight_vector = np.zeros(np.max(keys) + 1)\n weight_vector[:] = np.nan\n weight_vector[keys] = values\n\n y_classes = smart_cond.smart_cond(\n len(y.shape.as_list()) == 2 and backend.shape(y)[1] > 1,\n lambda: backend.argmax(y, axis=1),\n lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))\n class_sample_weight = array_ops.gather(weight_vector, y_classes)\n gen_array_ops.check_numerics(\n class_sample_weight,\n 'Invalid classes or class weights detected. NaN values indicate that '\n 'an appropriate class weight could not be determined.')\n class_sample_weight = math_ops.cast(class_sample_weight, backend.floatx())\n if sample_weight is not None:\n sample_weight = math_ops.cast(\n tensor_conversion.convert_to_tensor_v2_with_dispatch(sample_weight),\n backend.floatx(),\n )\n else:\n y_classes = y\n if len(y.shape) == 2:\n if y.shape[1] > 1:\n y_classes = np.argmax(y, axis=1)\n elif y.shape[1] == 1:\n y_classes = np.reshape(y, y.shape[0])\n\n class_sample_weight = np.asarray(\n [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n if len(class_sample_weight) != len(y_classes):\n # subtract the sets to pick all missing classes\n existing_classes = set(y_classes)\n existing_class_weight = set(class_weight.keys())\n raise ValueError(\n '`class_weight` must contain all classes in the data.'\n ' The classes %s exist in the data but not in '\n '`class_weight`.' % (existing_classes - existing_class_weight))\n\n if class_sample_weight is not None and sample_weight is not None:\n # Multiply weights if both are provided.\n return class_sample_weight * sample_weight\n if sample_weight is not None:\n return sample_weight\n if class_sample_weight is not None:\n return class_sample_weight\n return None",
"def init_weights(model):\n ...",
"def standardize_weights(y, sample_weight=None, class_weight=None, sample_weight_mode=None):\n # Iterator may return sample_weight as 1-tuple\n if isinstance(sample_weight, tuple):\n sample_weight = sample_weight[0]\n if sample_weight_mode is not None:\n if sample_weight_mode != 'temporal':\n raise ValueError('\"sample_weight_mode '\n 'should be None or \"temporal\". '\n 'Found: ' + str(sample_weight_mode))\n if len(y.shape) < 3:\n raise ValueError('Found a sample_weight array for '\n 'an input with shape ' + str(y.shape) + '. '\n 'Timestep-wise sample weighting (use of '\n 'sample_weight_mode=\"temporal\") is restricted to '\n 'outputs that are at least 3D, i.e. that have '\n 'a time dimension.')\n if sample_weight is not None and len(sample_weight.shape) != 2:\n raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) +\n '. '\n 'In order to use timestep-wise sample weighting, '\n 'you should pass a 2D sample_weight array.')\n else:\n if sample_weight is not None and len(sample_weight.shape) != 1:\n raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) +\n '. '\n 'In order to use timestep-wise sample weights, '\n 'you should specify '\n 'sample_weight_mode=\"temporal\" '\n 'in compile(). If you just mean to use '\n 'sample-wise weights, make sure your '\n 'sample_weight array is 1D.')\n\n if sample_weight is not None:\n if len(sample_weight.shape) > len(y.shape):\n raise ValueError('Found a sample_weight with shape' + str(sample_weight.shape) + '.'\n 'Expected sample_weight with rank '\n 'less than or equal to ' + str(len(y.shape)))\n\n if (not tensor_util.is_tensor(sample_weight)\n and y.shape[:sample_weight.ndim] != sample_weight.shape):\n raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) +\n ' for an input with shape ' + str(y.shape) + '. '\n 'sample_weight cannot be broadcast.')\n\n # Class weights applied per-sample.\n class_sample_weight = None\n if isinstance(class_weight, dict):\n if len(y.shape) > 2:\n raise ValueError('`class_weight` not supported for ' '3+ dimensional targets.')\n\n if len(y.shape) == 2:\n if y.shape[1] > 1:\n y_classes = K.argmax(y, axis=1)\n # y_classes = np.argmax(y, axis=1)\n elif y.shape[1] == 1:\n y_classes = np.reshape(y, y.shape[0])\n else:\n y_classes = y\n\n # class_sample_weight = np.asarray(\n # [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n keys = list(map(lambda x: tf.cast(x, tf.int32), class_weight.keys()))\n values = list(map(lambda x: tf.cast(x, tf.int32), class_weight.values()))\n key_value = tf.contrib.lookup.KeyValueTensorInitializer(keys, values)\n class_weight_table = tf.contrib.lookup.HashTable(key_value, -1)\n class_sample_weight = class_weight_table.lookup(tf.cast(y_classes, tf.int32))\n class_weight_table.init.run(session=K.get_session())\n\n # print(K.get_session().run(class_sample_weight))\n # class_sample_weight = np.asarray(\n # [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n # if len(class_sample_weight) != len(y_classes):\n # subtract the sets to pick all missing classes\n # existing_classes = set(y_classes)\n # existing_class_weight = set(class_weight.keys())\n # raise ValueError('`class_weight` must contain all classes in the data.'\n # ' The classes %s exist in the data but not in '\n # '`class_weight`.' % (existing_classes - existing_class_weight))\n\n if class_sample_weight is not None and sample_weight is not None:\n # Multiply weights if both are provided.\n return class_sample_weight * sample_weight\n if sample_weight is not None:\n return sample_weight\n if class_sample_weight is not None:\n return class_sample_weight\n return None",
"def _create_weights(self):\n gate_size = self._hidden_size * self._num_gates\n # Compute the shape of weight and bias.\n matrix_shapes, bias_shapes = [], []\n for layer in range(self._num_layers):\n for direction in range(self._num_directions):\n layer_input_size = self._input_size if layer == 0 \\\n else self._hidden_size * self._num_directions\n w_ih_shape = [gate_size, layer_input_size]\n w_hh_shape = [gate_size, self._hidden_size]\n b_ih_shape, b_hh_shape = [gate_size], [gate_size]\n matrix_shapes.extend([w_ih_shape, w_hh_shape])\n bias_shapes.extend([b_ih_shape, b_hh_shape])\n # Create single float32 weights.\n weights_count = 0\n self._weights_shapes = matrix_shapes + bias_shapes\n for shape in self._weights_shapes:\n weights_count += math_util.prod(shape)\n self._weights = Tensor([weights_count])\n self._weights.requires_grad = True",
"def get_weights(self):",
"def init_weights(w_shape, layer_index, weight_initializer):\n\n return tf.Variable(weight_initializer(w_shape), name=\"weight{}\".format(layer_index))",
"def weight(self, weight_scheme, weight_name='weight', unique_key='identity',\n subset=None, report=True, path_report=None, inplace=True, verbose=True):\n if subset:\n if isinstance(subset, str):\n if self.is_filter(subset):\n subset = {subset: 0}\n else:\n raise ValueError('{} is not a valid filter_var'.format(subset))\n ds = self.filter('subset', subset, False)\n meta, data = ds.split()\n else:\n meta, data = self.split()\n engine = qp.WeightEngine(data, meta=meta)\n engine.add_scheme(weight_scheme, key=unique_key, verbose=verbose)\n engine.run()\n\n org_wname = weight_name\n if report:\n print(engine.get_report())\n print()\n if path_report:\n df = engine.get_report()\n full_file_path = '{} ({}).xlsx'.format(path_report, weight_name)\n df.to_excel(full_file_path)\n print('Weight report saved to:\\n{}'.format(full_file_path))\n s_name = weight_scheme.name\n s_w_name = 'weights_{}'.format(s_name)\n if inplace:\n weight_description = '{} weights'.format(s_name)\n data_wgt = engine.dataframe(s_name)[[unique_key, s_w_name]]\n data_wgt.rename(columns={s_w_name: org_wname}, inplace=True)\n if org_wname not in self._meta['columns']:\n self.add_meta(org_wname, 'float', weight_description)\n self.update(data_wgt, on=unique_key)\n else:\n wdf = engine.dataframe(weight_scheme.name)\n return wdf.rename(columns={s_w_name: org_wname})",
"def init_weights(self, load_weights=None):\n if load_weights:\n # TODO\n pass\n else:\n # x: lower layer nodes n\n # y: current layer nodes n\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member\n self.biases = np.random.randn(y, 1) # pylint: disable=no-member",
"def _generate_weights(self):\n weights = [random.uniform(0, 1) for x in range(self.num_weights)]\n return self._normalize_weights(weights)",
"def initialize_weights(self):\n weights_initializer.WeightsInitializer.initialize_layer_or_model(\n self._batch)",
"def get_weights(model):\n args = get_args()\n if args.weight is not None:\n model.load_weights(args.weight)",
"def getWeights(self, format='list'):\n timer = None\n if conf.config.getboolean(\"Reports\", \"outputTimesForSections\"):\n timer = Timer()\n timer.start_timing()\n synapse_list = self._get_synaptic_data()\n if conf.config.getboolean(\"Reports\", \"outputTimesForSections\"):\n timer.take_sample()\n \n if format == 'list':\n weights = list()\n for row in synapse_list.get_rows():\n weights.extend(row.weights)\n return weights \n elif format == 'array':\n weights = numpy.zeros((self.projection_edge.prevertex.atoms, \n self.projection_edge.postvertex.atoms))\n rows = synapse_list.get_rows()\n for pre_atom, row in enumerate(rows):\n for post_atom, weight in zip(row.target_indices, row.weights):\n weights[pre_atom][post_atom] = weight\n return weights",
"def initialize_weights(self):\n tf.nest.map_structure(\n weights_initializer.WeightsInitializer.initialize_layer_or_model,\n self._layer_nest)",
"def default_weight_initializer(self):\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y, x)/np.sqrt(x)\n for x, y in zip(self.sizes[:-1], self.sizes[1:])]",
"def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)",
"def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)\n # Tie weights if needed\n self.tie_weights()",
"def copy_cluster_weights(shape, weight_file, method=\"bilinear\"):\n\n # gets the temporary folder path\n temp_path = get_temp_folder()\n short_name = get_prefix_less_name(shape)\n\n for node in weight_file:\n if not weight_file[node]:\n continue\n cmds.deformerWeights(weight_file[node], im=True, shape=short_name,\n deformer=node, path=temp_path, method=method,\n vertexConnections=True)",
"def _scale_weights(self, max_weight):\n scale_factor = np.divide(1, max_weight)\n for exp in self.experts:\n exp.weight = exp.weight * scale_factor",
"def init_weights(layer):\r\n layer_name = layer.__class__.__name__\r\n if layer_name.find(\"Conv\") != -1:\r\n layer.weight.data.normal_(0.0, 0.02)\r\n elif layer_name.find(\"BatchNorm\") != -1:\r\n layer.weight.data.normal_(1.0, 0.02)\r\n layer.bias.data.fill_(0)",
"def weight_setup(self, weighting):\n if weighting == \"overlap\":\n self.weights = overlap_generator(overlap, self.graph)\n elif weighting == \"unit\":\n self.weights = overlap_generator(unit, self.graph)\n elif weighting == \"min_norm\":\n self.weights = overlap_generator(min_norm, self.graph)\n else:\n self.weights = overlap_generator(normalized_overlap, self.graph)",
"def init_weights(layer):\n layer_name = layer.__class__.__name__\n if layer_name.find(\"Conv\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(0.0, 0.02)\n elif layer_name.find(\"BatchNorm\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(1.0, 0.02)\n layer.bias.data.fill_(0)",
"def init_weights_dict(layer_shape, max_order, std_scale=0.4, ring_count=None):\n\n if isinstance(max_order, int):\n rotation_orders = range(-max_order, max_order+1)\n else:\n diff_order = max_order[1]-max_order[0]\n rotation_orders = range(-diff_order, diff_order+1)\n weights_dict = {}\n for order in rotation_orders:\n if ring_count is None:\n ring_count = np.maximum(layer_shape[0]/2, 2)\n sh = [ring_count,] + list(layer_shape[2:])\n weights_dict[order] = Conv2d.get_weights(sh, std_scale=std_scale)\n return weights_dict",
"def weights(self, algo):\n weights = np.zeros(\n (self.n_pop, self.states.size(), algo.n_turbines), dtype=FC.DTYPE\n )\n weights[:] = self.states.weights(algo)[None, :, :] / self.n_pop\n return weights.reshape(self.size(), algo.n_turbines)",
"def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)",
"def _create_weight_shaped_variables(self, nodes_per_layer, mean=None, stddev=None, name_prefix=\"Weights\", trainable=True):\n weights = []\n\n for layer_idx in range(1, len(nodes_per_layer)):\n num_in = nodes_per_layer[layer_idx-1]\n num_out = nodes_per_layer[layer_idx]\n shape = [num_in, num_out]\n\n if stddev:\n initial = tf.truncated_normal(shape=shape, stddev=stddev, mean=mean if mean else 0.0)\n else:\n initial = tf.constant(0.0, shape=shape)\n\n W = tf.Variable(\n initial,\n name=name_prefix + str(layer_idx),\n trainable=trainable\n )\n weights.append(W)\n\n return weights",
"def generate_weights(sizes):\n weights = {}\n weights[\"w\"] = []\n weights[\"b\"] = []\n for i in range(len(sizes)-2):\n weights[\"w\"].append(np.random.randn(sizes[i], sizes[i+1]))\n weights[\"b\"].append(np.random.randn(sizes[i+1]))\n weights[\"w_final\"] = np.random.randn(sizes[-2], sizes[-1])/np.sqrt(sizes[-1])\n weights[\"b_final\"] = np.random.randn(sizes[-1])\n return weights"
]
| [
"0.6262821",
"0.61145437",
"0.60712385",
"0.6055485",
"0.6023289",
"0.6004731",
"0.5968039",
"0.5957508",
"0.5849291",
"0.58011365",
"0.5791296",
"0.57805634",
"0.5744831",
"0.57313424",
"0.5698215",
"0.5691682",
"0.5655356",
"0.5649265",
"0.5646294",
"0.5644736",
"0.5632182",
"0.56153256",
"0.56073874",
"0.56062883",
"0.5579245",
"0.557812",
"0.55702746",
"0.5556776",
"0.5554944",
"0.5548186"
]
| 0.7043752 | 0 |
Display the matrix using matplotlib, the title and the colormap of the plot are deduced from the 'kind' parameter. | def show_matrix(matrix,kind="temperature"):
if kind=="temperature":
cmap = "bwr"
plt.title("Temperature")
elif kind=="habitat":
cmap = "Greens"
plt.title("Habitat")
else:
cmap = "Blues"
plt.imshow(matrix,
interpolation='None',
cmap=cmap,
vmin=0,
vmax=1,
aspect="equal",)
plt.xlabel("x")
plt.ylabel("y")
plt.xticks([])
plt.yticks([])
plt.colorbar(orientation="horizontal", fraction=0.045) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plotmatrix(figurename,matrix,format='png',title=None,**kwargs):\n \n clip_min = kwargs.pop('clip_min', -np.inf)\n clip_max = kwargs.pop('clip_max', np.inf)\n cmap = kwargs.pop('cmap',cm.Reds)\n fig = plt.figure()\n if 'ticklabels1' in kwargs:\n plt.yticks(range(matrix.shape[0]))\n plt.gca().set_yticklabels(kwargs.pop('ticklabels1'))\n \n if 'ticklabels2' in kwargs:\n plt.xticks(range(matrix.shape[1]))\n plt.gca().set_xticklabels(kwargs.pop('ticklabels2'))\n \n cax = plt.imshow(np.clip(matrix, a_min=clip_min, a_max=clip_max),\n interpolation='nearest',\n cmap=cmap,\n **kwargs)\n if title != None:\n plt.title(title)\n \n if 'label' not in kwargs:\n plt.colorbar()\n else:\n plt.colorbar().set_label(kwargs['label'])\n \n plt.show()\n\n if format == 'png':\n fig.savefig(figurename,dpi=600)\n elif format == 'pdf':\n from matplotlib.backends.backend_pdf import PdfPages\n pp = PdfPages(figurename)\n pp.savefig(fig,dpi=600)\n pp.close()\n \n plt.close(fig)",
"def mshow(matrix, cmap=None, norm=None, aspect=None, interpolation=None,\n alpha=1.0, vmin=None, vmax=None, origin=None, extent=None):\n\tplt.imshow(matrix,cmap=cmap, norm=norm, aspect=aspect, interpolation=interpolation,\n alpha=1.0, vmin=vmin, vmax=vmax, origin=origin, extent=extent)\n\tplt.colorbar()\n\tplt.show()",
"def plot(self, title=None, vmin=None, vmax=None, tlim=None, ax=None,\n cmap='RdBu_r', show=True, colorbar=True,\n xlabel=True, ylabel=True):\n return plot_gat_matrix(self, title=title, vmin=vmin, vmax=vmax,\n tlim=tlim, ax=ax, cmap=cmap, show=show,\n colorbar=colorbar, xlabel=xlabel, ylabel=ylabel)",
"def heat_matrix (m, caption, ticks_labels_x, ticks_labels_y, colormap):\n\n plt.matshow (m, fignum = 0, aspect = 'auto', cmap = colormap[0], norm = colormap[1])\n plt.colorbar ()\n\n plt.xticks (ticks_labels_x[0], ticks_labels_x[1], rotation='vertical')\n plt.yticks (ticks_labels_y[0], ticks_labels_y[1])\n axes = plt.gca ()\n axes.tick_params (direction = 'out', pad = 5)\n\n plt.title (caption, y = 20.0)",
"def plot_matrix(self, matrix: np.ndarray):\n sns.heatmap(matrix, annot=True)\n plt.show()",
"def plot_real_matrix(M, name='', outpath=None, fig='auto', climv=None, cmap=\"coolwarm\", show=False, close=True,\n fontsize=None):\n if climv is None:\n climv = np.max(np.abs(M.ravel()))\n climvs = (-climv, climv)\n else:\n climvs = climv\n\n if fig == 'auto':\n fig = plt.gcf()\n plt.clf()\n\n a = fig.add_subplot(1, 1, 1)\n img = a.imshow(M, cmap=\"coolwarm\", interpolation='none')\n a.set_title(name, fontsize=fontsize)\n cbar = plt.colorbar(img, orientation='horizontal')\n cbar.set_clim(climvs)\n\n if outpath is not None and outpath != 'none':\n print('outputting matrix image to ', outpath)\n plt.savefig(outpath + '.png')\n if show:\n plt.show()\n if close:\n plt.clf()",
"def plot_mat(self, parameter='s', fig=None, ylim=1.1, label=None):\n if parameter not in ['s', 'y']:\n raise Exception('Invalid parameter.')\n matrix = getattr(self, parameter)\n if fig is None:\n fig = plt.figure(figsize=(15.0, 10.0))\n for i in range(2):\n for j in range(2):\n subplotnum = 2*i+j+1 # add_subplot needs the +1 as indexing starts with 1\n ax = fig.add_subplot(2,2,subplotnum)\n ax.plot(self.f/1e9, matrix[:,i,j].real, label=('Re '+label if label else None))\n ax.plot(self.f/1e9, matrix[:,i,j].imag, label=('Im '+label if label else None))\n ax.set_xlabel('f [GHz]')\n ax.set_ylabel(parameter.upper()+r'$_{%d%d}$'%(i+1,j+1))\n ax.set_ylim([-ylim,ylim]) \n ax.set_xlim([min(self.f/1e9), max(self.f/1e9)])\n fig.tight_layout() \n\n return fig",
"def matplotlibDisplay(img, title=\"Image\", colorFlag = 'gray'):\n plt.imshow(img, colorFlag)\n plt.title(title)\n plt.xticks([])\n plt.yticks([])\n plt.show()",
"def plotMatrix(matrix:np.ndarray,if_log=False,title=\"Matrix\",plotType=\"static\",range_color=None,\n axis2genome=False,genome_coord1=None,genome_coord2=None,resolution = None):\n import plotly.express as px \n from IPython.display import Image\n\n if(if_log == True): \n matrix = np.log10(matrix)\n\n fig = px.imshow(matrix,color_continuous_scale=px.colors.sequential.Viridis,range_color=range_color)\n fig = fig.update_layout(template='simple_white').update_layout(width=650,height=600)\n\n if (axis2genome):\n import re\n #manually change axis\n posx = re.split(\"[:-]\",genome_coord2)\n xvals = np.percentile([np.round(i) for i in range(0,matrix.shape[1])],(0,25,50,75,100),interpolation='midpoint')\n xtexts = xvals*resolution + int(posx[1].replace(\",\",\"\")) + resolution/2\n xtexts = [genomecoord2human(i) for i in xtexts]\n\n posy = re.split(\"[:-]\",genome_coord1)\n yvals = np.percentile([np.round(i) for i in range(0,matrix.shape[0])],(0,25,50,75,100),interpolation='midpoint')\n ytexts = yvals*resolution + int(posy[1].replace(\",\",\"\")) + resolution/2\n ytexts = [genomecoord2human(i) for i in ytexts]\n\n fig = fig.update_xaxes(ticktext = xtexts,tickvals = xvals).update_yaxes(ticktext = ytexts,tickvals = yvals)\n\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))",
"def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()",
"def show_with_matplotlib(img, title):\n\n # Convert BGR image to RGB:\n img_RGB = img[:, :, ::-1]\n\n # Show the image using matplotlib:\n plt.imshow(img_RGB)\n plt.title(title)\n plt.show()",
"def heat_plot(matrix, filename, xTicks, yTicks, xLabel='X', yLabel='Y'):\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tcax = ax.matshow(matrix, vmin=0, vmax=1)\n\tfig.colorbar(cax)\n\tticks = np.arange(0, matrix.shape[0], 1)\n\tax.set_xticks(ticks)\n\tax.set_yticks(ticks)\n\tax.set_xticklabels(xTicks)\n\tax.set_yticklabels(yTicks)\n\tax.set_xlabel(xLabel)\n\tax.set_ylabel(yLabel)\n\tplt.savefig(filename)\n\tplt.close()",
"def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()",
"def show(self, **kwargs):\n n = self.guess.shape[0]\n cmap_name = 'tab20' if n > 10 else 'tab10'\n clrs = ((1., 1., 1.),) + get_cmap(cmap_name).colors\n cmap = ListedColormap(np.delete(clrs, slice(n+1, None), axis=0))\n self.image = self.__make_image()\n plt.imshow(self.image, cmap = cmap, origin = 'lower', interpolation = None,\n resample=False, vmin=-1.5, vmax=n-0.5, **kwargs)\n plt.colorbar(label='index', ticks=list(range(-1, n)))\n\n # The __test attribute is used not to block test\n block = not hasattr(self, f'_{self.__class__.__name__}__test')\n plt.show(block=block)",
"def plot_real_matrix(M, name='', outpath=None, fig='auto', climv=None, cmap=\"coolwarm\", show=False, close=True,\n fontsize=None):\n import lepm.plotting.plotting as leplt\n return leplt.plot_real_matrix(M, name='', outpath=None, fig='auto', climv=None, cmap=\"coolwarm\", show=False, close=True,\n fontsize=None)",
"def show():\n\tplt.show()",
"def plot(self, *args, ax=None,\n cmap_name='seismic', if_show=False, **kwargs):\n import matplotlib.pylab as plt\n\n if ax is None:\n ax = plt.gca()\n plt.sca(ax)\n\n plt.imshow(\n self, *args, cmap=plt.get_cmap(cmap_name),\n extent=self.extent, **kwargs\n )\n\n if if_show:\n plt.colorbar(orientation='horizontal')\n plt.show()",
"def plot_matrix(matrix, yaxis=None, xaxis=None, **kwargs):\n\n # Make new matplotlib figure.\n fig = pyplot.figure()\n ax = fig.add_subplot(1, 1, 1)\n fig.subplots_adjust(top=0.85)\n cax = ax.matshow(matrix, interpolation=kwargs.get('interpolation', 'bilinear'))\n cb = fig.colorbar(cax)\n cb.set_label(kwargs.get('cblabel', ''))\n\n # Set figure and axis titles\n fig.suptitle(kwargs.get('title', ''))\n ax.set_title(kwargs.get('subtitle', ''), fontsize=8)\n ax.set_ylabel(kwargs.get('ylabel', ''), fontsize=10)\n ax.set_xlabel(kwargs.get('xlabel', ''), fontsize=10)\n\n # Set the ticks and tick labels. Reverse y axis to align x/y origin\n yaxis_locs = range(0, len(yaxis), int(len(yaxis) / 10))\n ax.yaxis.set_ticks_position('left')\n ax.yaxis.set_major_locator(mticker.FixedLocator(yaxis_locs))\n ax.yaxis.set_major_formatter(mticker.FixedFormatter(['%1.2f' % yaxis[x] for x in yaxis_locs]))\n ax.invert_yaxis()\n\n xaxis_locs = range(0, len(xaxis), int(len(xaxis) / 10))\n ax.xaxis.set_ticks_position('bottom')\n ax.xaxis.set_major_locator(mticker.FixedLocator(xaxis_locs))\n ax.xaxis.set_major_formatter(mticker.FixedFormatter(['%1.2f' % xaxis[x] for x in xaxis_locs]))\n ax.grid(None)\n\n return fig",
"def _show(self, a):\n fig = plt.figure()\n fig.set_size_inches((2, 2))\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n plt.set_cmap('hot')\n ax.imshow(a, aspect='equal')\n plt.show()",
"def showImage( iImage, iTitle='', iTranspose=False, iCmap=cm.Greys_r ):\n # preslikaj koordinate barvne slike \n if len(iImage.shape)==3 and iTranspose:\n iImage = np.transpose( iImage, [1,2,0])\n plt.figure()\n if iImage.dtype.kind in ('u','i'):\n vmin_ui = np.iinfo(iImage.dtype).min\n vmax_ui = np.iinfo(iImage.dtype).max\n plt.imshow(iImage, cmap = iCmap, vmin=vmin_ui, vmax=vmax_ui)\n else:\n plt.imshow(iImage, cmap = iCmap)\n plt.axes().set_aspect('equal', 'datalim')\n plt.suptitle( iTitle )\n plt.xlabel('Koordinata x')\n plt.ylabel('Koordinata y')\n # podaj koordinate in indeks slike\n def format_coord(x, y):\n x = int(x + 0.5)\n y = int(y + 0.5)\n try:\n return \"%s @ [%4i, %4i]\" % (iImage[y, x], x, y)\n except IndexError:\n return \"IndexError\" \n plt.gca().format_coord = format_coord\n #plt.axes('equal') # should, but doesnt work\n plt.show()",
"def cov_plot(self, matrix, station=\"\", hour = \"\", date=\"\" , averaged = \"\" ):\n var = self.var_dics[self.var]['name'] \n fig,ax = plt.subplots()\n date = self.date_prettyfier(date)\n hour = str(hour).replace('0','00:00').replace('1','12:00')\n if not averaged:\n title = \"Stat: \" + station + ', H: ' + hour + ', Date: ' + date + ', ' + var\n filename = 'Cov_' + station + '_hour_' + hour.replace(':','') + '_date_' + str(date).replace('/','') + '_' +var\n \n elif averaged :\n title = var.replace('temp','Temp.') + \" , Stat: \" + station + ', H: ' + str(hour) + ', Date: ' + str(date)\n filename ='Cov_' + station + '_hour_' + str(hour).replace(':','') + '_averaged_' + str(date).replace('/','') + '_' + var \n\n plt.title(title.replace('_', ' ' ), y=1.03, fontsize = self.font-2)\n\n num = len(matrix[0,:])\n Num = range(num)\n\n vmin, vmax = -3, 3\n if self.var == 'direction': \n vmin, vmax = -10, 10\n color_map= plt.imshow(matrix, interpolation= 'nearest', cmap = 'RdYlBu', vmin = vmin, vmax = vmax ) # nearest serves for discreete grid # cmaps blue, seismic \n plt.ylim(-0.5, 15.5)\n plt.xlim(-0.5, 15.5)\n plt.xticks(Num, Num)\n plt.xlabel('Pressure level an_dep [hPa]', fontsize = self.font-2)\n plt.yticks(Num, Num)\n plt.ylabel('Pressure level fg_dep [hPa]', fontsize = self.font-2)\n ax.set_xticklabels(labels = self.pretty_pressure, fontsize = self.font-4, rotation=45)\n ax.set_yticklabels(labels = self.pretty_pressure, fontsize = self.font-4)\n\n bar = plt.colorbar()\n bar.ax.set_ylabel(\"Covariance\", fontsize = self.font)\n \n for i in Num: # creating text labels\n for j in Num:\n value = '{0:.2f}'.format(matrix[i,j])\n text = ax.text( j,i, value , ha = 'center' , va = 'center', color = 'black', fontsize = 5)\n\n if not os.path.isdir('plots/covariances/'+station): os.mkdir('plots/covariances/'+station)\n plt.savefig('plots/covariances/' + station + '/' + filename + '.png', bbox_inches='tight', dpi = 200)\n plt.close()",
"def visualize_AQ(self):\n M = np.matrix(self.data[0])\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_aspect('equal')\n plt.imshow(M, interpolation='nearest', cmap=plt.cm.YlOrRd)\n plt.colorbar()\n plt.show()",
"def visualize(self, name):\n size = self.experiments[name]['size']\n matrix = [[self.experiments[name]['hi-c'][0][i+size*j] \\\n for i in xrange(size)] \\\n for j in xrange(size)]\n plt.imshow(log2(matrix), origin='lower')\n plt.show()",
"def show_figure(self):\n pylab.show()",
"def plot_matrix(A, O, word_dict, normalize=True):\n Osize = O.shape\n Onew = O\n\n if normalize:\n Onew = np.zeros(Osize)\n Anew = np.zeros(A.shape)\n for row in range(Osize[0]):\n Onew[row, :] = O[row, :]/max(O[row, :])\n Anew[row, :] = A[row, :]/max(A[row, :])\n\n plt.imshow(Onew, aspect='auto', cmap='magma', interpolation='nearest')\n plt.colorbar(orientation='horizontal', aspect=100)\n plt.clim(vmin=0, vmax=1)\n plt.tight_layout()\n plt.show()\n\n fig, ax1 = plt.subplots(1, 1)\n ax1.imshow(Onew[:, :100], aspect='auto', cmap='magma', interpolation='nearest', vmin=0.0, vmax=1.0)\n ax1.set_xticks(range(100))\n ax1.set_xticklabels(word_dict[:100], rotation=90)\n plt.show() # display\n\n plt.matshow(A, aspect='auto', cmap='magma')\n plt.colorbar()\n plt.show()",
"def show():\n plt.show()",
"def show():\n plt.show()",
"def show():\n plt.show()",
"def draw_matrix(mat, th1=None, th2=None, clim=None, cmap=None):\r\n if th1 is not None:\r\n m2 = tsu.thresholded_arr(mat, th1, th2)\r\n else:\r\n m2 = mat\r\n ax = plt.matshow(m2, cmap=cmap)\r\n if clim is not None:\r\n ax.set_clim(*clim)\r\n plt.colorbar()\r\n return ax",
"def heatmap(mat, x_label=None, y_label=None, axes=None,\n title=None, save=False):\n sns.heatmap(mat)\n plt.show()"
]
| [
"0.73393327",
"0.6824164",
"0.6754047",
"0.6652263",
"0.66464734",
"0.66156816",
"0.66119856",
"0.6557846",
"0.6461226",
"0.6460314",
"0.64144105",
"0.64109826",
"0.64026165",
"0.6360712",
"0.6353278",
"0.6324502",
"0.6316336",
"0.6307886",
"0.63031375",
"0.6224941",
"0.61822534",
"0.6168588",
"0.6158209",
"0.6137407",
"0.6125949",
"0.6103519",
"0.6103519",
"0.6103519",
"0.6087636",
"0.6084285"
]
| 0.81292045 | 0 |
Wrap BaseForest._fit. This is a temporary measure prior to the BaseRegressor refactor. | def _fit(self, X, y):
return BaseTimeSeriesForest._fit(self, X, y) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fit(self):\n raise NotImplementedError",
"def fit(self):\n raise NotImplementedError # pragma: no cover",
"def _fit(self, dataset):\n raise NotImplementedError()",
"def fit(self):\n raise NotImplementedError('')",
"def fit():\n pass",
"def fit(self, X):\n raise NotImplementedError('Abstract method \"fit\" must be '\n 'specialised!')",
"def inner_fit(self):\n pass",
"def inner_fit(self):\n pass",
"def fit(self, X, y, dview = None):\n\t\t#Get classes\n\t\tclasses, y[:] = numpy.unique(y[:], return_inverse=True)\n\t\tself.classes_ = classes\n\t\tself.n_classes_ = classes.shape[0]\n\t\tforests = []\n\n\t\tfeatureFunction = self.featureFunction\n\t\tfor i in range(self.n_forests):\n\t\t\tprint(\"forest : \",i+1,\" / \",self.n_forests)\n\t\t\tif (i != 0):\n\t\t\t\tif(self.specialisation == 'global'):\n\t\t\t\t\tacc = forest.getFeatureImportance()\n\t\t\t\t\tfeatureFunction.random_weight = acc\n\t\t\t\telif(self.specialisation =='per_class'):\n\t\t\t\t\tacc_per_class = forest.getFeatureImportanceByClass()\n\t\t\t\t\tfeatureFunction.random_weight_per_class = acc_per_class\n\n\t\t\tforest = deepcopy(self.forest)\n\t\t\tforest.featureFunction = featureFunction\n\t\t\tforest.fit(X, y, dview)\n\t\t\tforests.append(forest)\n\n\t\t# Collect newly grown Forests\n\t\tself.forests_.extend(forests)",
"def partial_fit(self, X):\n return super().partial_fit(X)",
"def fit(self, x):\n raise NotImplementedError()",
"def _fit_base_estimator(self, X, y):\n if not isinstance(X,pd.DataFrame) and self._feature_columns is not None and self._label_binarier is False:\n\n X=pd.DataFrame(X,index=None,columns=self._feature_columns,dtype=np.float)\n\n #X=pd.DataFrame(X,columns=self._feature_columns,dtype=np.float)\n X['model']=X['model'].astype(np.int)#.astype('category')\n #print('transpose')\n if isinstance(self.base_estimator_,GBDTLRClassifier):\n return sklearn.base.clone(self.base_estimator_).fit(X, y, gbdt__categorical_feature=[65])\n else:\n return sklearn.base.clone(self.base_estimator_).fit(X, y,categorical_feature=[65])\n\n if self._label_binarier is True:\n return sklearn.base.clone(self.base_estimator_).fit(X,y)",
"def fit(self, *_):\n return self",
"def fit(self, *_):\n return self",
"def fit(self, *_):\n return self",
"def fit(self, x):\n pass",
"def reset_fit(self):\n raise NotImplementedError()",
"def partial_fit(self, X, y=...):\n ...",
"def partial_fit(self, X, y=...):\n ...",
"def _pre_fit(self):\n pass",
"def _fit(self, x_train, y_train, x_valid, y_valid, regressor_callback=None):",
"def fit(self, X):\n raise NotImplementedError",
"def fit(self, data):\n raise NotImplementedError(\"To be implemented in sub classes\")",
"def test_fit_returns_self(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=\"a\")\n\n x_fitted = x.fit(df)\n\n assert x_fitted is x, \"Returned value from BaseTransformer.fit not as expected.\"",
"def _fit(self, df):\n return df",
"def fit(self, *args, **kwargs):\n return self",
"def fit(self, X):\n self.n_classes_ = 2\n self.n_features_ = X.shape[1]\n\n X, y = self._sample_synthetic(X)\n return _forest.BaseForest.fit(self, X, y)",
"def fit(self, x: np.array, t: np.array, y: np.array) -> None:\n\n self.forest.fit(x, y)",
"def _fit(self, y, X, fh):\n names, forecasters = self._check_forecasters()\n self._fit_forecasters(forecasters, y, X, fh)\n return self",
"def fit(self, X):\n self._fit_X = X"
]
| [
"0.69541925",
"0.68776846",
"0.6877322",
"0.6838088",
"0.6818408",
"0.6792884",
"0.67886466",
"0.67886466",
"0.67186964",
"0.66682506",
"0.6631208",
"0.6604409",
"0.65886986",
"0.65886986",
"0.65886986",
"0.65496397",
"0.647449",
"0.646983",
"0.646983",
"0.64621794",
"0.6417843",
"0.6413735",
"0.63925076",
"0.637496",
"0.63437295",
"0.6318488",
"0.63040435",
"0.6290042",
"0.624796",
"0.62175804"
]
| 0.72887385 | 0 |
Override sklearn forest predict with BaseRegressor predict. | def predict(self, X):
return BaseRegressor.predict(self, X) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rf_predict(\n X_train: Array,\n y_train: Array,\n X_test: Array,\n y_test: Array = None,\n uncertainty: str = \"full\",\n **kwargs,\n) -> tuple[Array, Array, RandomForestRegressor]:\n forest = RandomForestRegressor(random_state=0, **kwargs)\n forest.fit(X_train, y_train)\n\n y_pred, y_var = forest.predict(X_test, uncertainty=uncertainty)\n return y_pred, y_var**0.5, forest",
"def predict_feature(feature_to_predict: str, data: pd.core.frame.DataFrame):\n X = data.copy()\n y = X[feature_to_predict]\n X.drop([feature_to_predict], axis=1, inplace=True)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.25, random_state=0)\n regressor = RandomForestRegressor(random_state=42)\n regressor.fit(X_train, y_train)\n importances = regressor.feature_importances_\n prediction_score = regressor.score(X_test, y_test)\n cols = X.columns.to_list()\n dat = {x[0]: x[1] for x in zip(cols, importances)}\n dat[feature_to_predict] = 0\n return prediction_score, dat",
"def randomforest_predict(self, x) -> np.array:\r\n if self.rfModel is None:\r\n print(\"random forest not trained, please run randomforest_fit first!\")\r\n return None\r\n else:\r\n return self.rfModel.predict(x)",
"def fit_predict(self, x_data, y_data, custom_kfold=None, regressor=False):\n if regressor:\n self.r2_scores, self.mse_scores = self.predict_and_cv_score_regression(x_data, y_data, custom_kfold)\n else:\n self.f1_scores, self.recall_scores, self.precision_scores, self.accuracy_scores = self.predict_and_cv_score(x_data, y_data, custom_kfold)",
"def predict_random_forest(X_test, model):",
"def predict(self, X):\n raise NotImplementedError('Abstract method \"predict\" must be '\n 'specialised!')",
"def predict(self):\n return _RateElasticNetRegressor.predict(self)",
"def predict_trend(self, features, data_tier):\n prediction = self.clf_trend[data_tier].predict(features)\n return prediction[0]",
"def predict(self, X, forest_size=None):\n forest_predictions = self._base_estimator_predictions(X)\n\n if self._models_parameters.normalize_D:\n forest_predictions /= self._forest_norms\n\n return self._omp.predict(forest_predictions, forest_size)",
"def spark_RandomForestRegressor(*args, **kwargs):\n return RandomForestRegressor(*args, **kwargs)",
"def predict(self):\n raise NotImplementedError",
"def _predict(self, X):\n raise NotImplementedError",
"def _fit_predict(X_train, y_train, X_test):\n raise NotImplementedError()",
"def Model_Train(datasample):\r\n datasample=df_to_array(datasample)\r\n train_features=datasample[:,:-1]\r\n train_labels=datasample[:,-1]\r\n rf = RandomForestRegressor(n_estimators= n_trees)\r\n rf.fit(train_features,train_labels)\r\n return rf",
"def predict(self, samples): \n return self.random_forest.predict(samples)",
"def predict(self, X):\n raise NotImplementedError",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self, **kwargs):\n raise NotImplementedError",
"def predict(model, dataframe, features, target_name='score', inplace=True):\n if inplace is False:\n dataframe = dataframe.copy()\n dataframe[target_name] = model.predict(dataframe[features])\n return dataframe",
"def _predict(self, testX):\n pass",
"def predict(self, data):\n\t\traise NotImplementedError",
"def test_fit_predict() -> None:\n mapie = MapieRegressor()\n mapie.fit(X_toy, y_toy)\n mapie.predict(X_toy)",
"def model(df,x,y):\n clf = RandomForestClassifier()\n clf.fit(x, y)\n \n prediction = int(clf.predict(df))\n #prediction_proba = clf.predict_proba(df)\n columns = [\"Iris setosa\", \"Iris versicolor\", \"Iris virginica\"]\n prediction_proba = pd.DataFrame(clf.predict_proba(df), columns=columns)\n \n\n dict_pred = {0 : \"Iris setosa\", \n 1 : \"Iris versicolor\", \n 2 : \"Iris virginica\"}\n\n pred = dict_pred[prediction]\n\n pred_proba = prediction_proba\n return pred, pred_proba",
"def predict(self, X: np.ndarray) -> np.ndarray:\n return self._rf.predict(X)",
"def fit_predict(self, X, y=None):\n return super().fit_predict(X, y)",
"def predict(self, X):\n check_is_fitted(self, ['estimators_', 'final_estimator_'])\n return self.final_estimator_.predict(self.transform(X))",
"def predict(self, X):",
"def predict(self, X):"
]
| [
"0.6617279",
"0.65540373",
"0.65315056",
"0.65308774",
"0.645649",
"0.6376762",
"0.63436574",
"0.6324994",
"0.63162875",
"0.6204367",
"0.620004",
"0.6184136",
"0.6168078",
"0.6139887",
"0.6136203",
"0.6109132",
"0.61044425",
"0.61044425",
"0.61044425",
"0.61018896",
"0.6093291",
"0.6039084",
"0.6020641",
"0.6006822",
"0.59910685",
"0.5984637",
"0.59826326",
"0.5980861",
"0.5979173",
"0.5979173"
]
| 0.6686548 | 0 |
adjust the price (target) with 5% in a new columns adj_price and adj_price_sqrm | def add_average_discount_to_target(data):
data['adj_price'] = data.price.map(lambda x: (x - x * 0.05))
data['adj_price_sqrm'] = data.price_sqrm.map(lambda x: (x - x * 0.05))
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_sell_price(price: float):\n return round(price * (1 + CONF.trade_advantage_in_percent / 100), 1)",
"def adjust_price(self, price):\n precision = self._price_limits[3] or 8\n tick_size = self._price_limits[2] or 0.00000001\n\n # adjusted price at precision and by step of pip meaning\n return truncate(round(price / tick_size) * tick_size, precision)",
"def _compute_vwap(df):\n q = df['foreignNotional']\n p = df['price']\n vwap = np.sum(p * q) / np.sum(q)\n df['vwap'] = vwap\n return df",
"def calc_price(self):\n price = self.price\n action = self.action\n mortage = 5 # here set mortage multiplier \n\n if action == 'RESIDENTIAL_SALE':\n return price * 12 * mortage\n\n\n if price >= 10000:\n return price * 0.7\n elif price < 10000 & price >= 5000:\n return price * 0.55\n elif price < 5000 & price >= 2800:\n return price * 0.475\n else:\n return price * 0.4",
"def _correct_back_adjusted_prices(self, price_df):\n final_adj_close = price_df.iloc[-1]['Adj Close']\n if final_adj_close > 0.0:\n final_close = price_df.iloc[-1]['Close']\n if not np.allclose(final_close, final_adj_close):\n adj_factor = final_close / final_adj_close\n price_df['Adj Close'] *= adj_factor",
"def trend_price_up(self):\n raise NotImplementedError()",
"def desired_price(self, new_desired_price):\n self._desired_price = new_desired_price",
"def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)",
"def adjust_weights(weights, target, learn_rate):\r\n\r\n for w in range(0, len(target)):\r\n weights[w] += learn_rate * (target[w] - weights[w])",
"def add_weights(df):\n df['weight'] = 0\n denom = np.sum(df.loc[df.pc_GDP_growth > 0].pc_GDP_growth)\n df.loc[df.pc_GDP_growth > 0, 'weight'] = df.pc_GDP_growth / denom \n return df",
"def _adjust_fdr(df: pd.DataFrame) -> pd.DataFrame:\r\n\r\n df = df.sort_values(by='p').reset_index(drop=True)\r\n df['q'] = df.p * len(df.index) / (df.index + 1)\r\n df['q'] = df.q.mask(df.q > 1.0, 1.0)\r\n\r\n return df",
"def price_SL_TP_rel(price, volume, relative_stop_loss, relative_take_profit):\n price_stop_loss = rel_value(price, -sign(volume)*relative_stop_loss)\n price_take_profit = rel_value(price, sign(volume)*relative_take_profit)\n return(price_stop_loss, price_take_profit)",
"def support(stock):\n output= stock_min(stock)+(stock_min(stock)*.05)\n return output",
"def volatility_targeting(self, returns, target_vol=0.01):\n weight = target_vol / (returns.rolling(252).std() * np.sqrt(252)).fillna(0)\n weight.replace([np.inf, -np.inf], 0, inplace=True)\n weight = weight.shift(1).fillna(0)\n return weight",
"def mid_market_price(orders: pandas.DataFrame):\n return numpy.mean((best_bid_price(orders), best_ask_price(orders)))",
"def conversion_rate(self, price):\n\n price = ( price - 20 ) / 2\n\n a = self.a_conversion_rate\n b = self.b_conversion_rate\n c = self.c_conversion_rate\n d = self.d_conversion_rate\n e = self.e_conversion_rate\n # price_min = self.price_min\n # Probabilities of conversion given a price\n return c * np.exp ( a * ( price - e) ** (1/ (2 * b) ) ) * (d - 2*price) ** (3/2)",
"def _dynamic_price(self):\n adjust = PriceAdjustmentCalc(self)\n signals.satchmo_price_query.send(self, adjustment=adjust,\n slug=self.product.slug, discountable=self.product.is_discountable)\n return adjust.final_price()",
"def add_profit(df_gimmes, bet_size):\n df_gimmes['Bet_on_A'] = bet_size * \\\n (df_gimmes['best_ML_B']/100 + 1) / \\\n (df_gimmes['best_ML_A']/100.0 +\n df_gimmes['best_ML_B']/100.0 + 2)\n\n df_gimmes['Bet_on_B'] = bet_size - df_gimmes['Bet_on_A']\n\n df_gimmes['Profit_A'] = df_gimmes['Bet_on_A'] * \\\n df_gimmes['best_ML_A'] / 100.0 + \\\n df_gimmes['Bet_on_A'] - bet_size\n\n df_gimmes['Profit_B'] = df_gimmes['Bet_on_B'] * \\\n df_gimmes['best_ML_B'] / 100.0 + \\\n df_gimmes['Bet_on_B'] - bet_size\n\n return df_gimmes",
"def vol_from_price( self, price ):\n def target_func( price, vol ):\n return self.price_from_vol( vol ) - price \n \n return brentq( partial( target_func, price ), 1e-8, 10 )",
"def implied_discount_factor(p1: Instrument, c1: Instrument, p2: Instrument, c2: Instrument) -> float:\n return (c1.price - p1.price - c2.price + p2.price)/ (c2.strike - c1.strike)",
"def price_to_seven_year_earnings_ratio_less_than_25(self):\n\n note = ''\n # check if 'EPS' exists\n if 'EPS' not in self.stock.main_df.columns:\n note = note + 'Could not find EPS on MacroTrends. '\n\n # check if Current price is not 0\n if self.stock.stats_dict['Current Price'] == 0:\n note = note + 'Could not find current price on MacroTrends. '\n\n if note != '':\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A', note)\n return\n\n curr_price = self.stock.stats_dict['Current Price']\n df = self.stock.main_df\n\n average = 0\n # i want to use previous year if current year is empty\n if not np.isnan(df.iloc[0]['EPS']):\n # present year is there\n past_7_years_df = df.iloc[0: 7]['EPS']\n average = past_7_years_df.mean()\n elif np.isnan(df.iloc[0]['EPS']):\n # present year is not there\n past_7_years_df = df.iloc[1: 8]['EPS']\n average = past_7_years_df.mean()\n if np.isnan(df.iloc[1]['EPS']):\n # past year is not there either\n past_7_years_df = df.iloc[2: 9]['EPS']\n average = past_7_years_df.mean()\n if np.isnan(df.iloc[2]['EPS']):\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A',\n 'Must not have filed their annual report for {}'.format(\n self.current_year - 2))\n return\n\n if average == 0:\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A',\n 'No average found')\n return\n elif (curr_price / average) <= 25:\n criteria_passed = 'Yes'\n else:\n criteria_passed = 'No'\n\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', round((curr_price / average), 2),\n criteria_passed, '7 Year Average EPS = {}'.format(round(average, 2)))",
"def PP_EMP_AVG_PLA_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','PP_EMP_AVG_PLA']]\n Feature_DF.loc[:,'PP_EMP_AVG_PLA_TRS'] = Feature_DF.loc[:,'PP_EMP_AVG_PLA'].pow(1/2)\n Feature_DF = Feature_DF.loc[:,['HNAME','PP_EMP_AVG_PLA_TRS']]\n\n return Feature_DF",
"def approximate(spot_price, strike_price, time_2_maturity, r, q, target, step_sigma, start_sigma, option='call'):\n # determine direction first\n fair_price = bs_option(spot_price, strike_price, time_2_maturity, r, q, start_sigma, option)\n print('initial fair price = ' + str(fair_price))\n sigma_to_test = start_sigma\n if fair_price > target:\n # step backward\n while fair_price > target:\n sigma_to_test -= (step_sigma / 10000)\n fair_price = bs_option(spot_price, strike_price, time_2_maturity, r, q, sigma_to_test, option)\n return sigma_to_test\n elif fair_price < target:\n # step forward\n while fair_price < target:\n sigma_to_test += (step_sigma / 10000)\n fair_price = bs_option(spot_price, strike_price, time_2_maturity, r, q, sigma_to_test, option)\n return sigma_to_test\n else:\n return start_sigma",
"def stock_price_summary(price_changes):\n\n gains = 0.0\n losses = 0.0\n\n for change in price_changes:\n if change > 0:\n gains += change\n elif change < 0:\n losses += change\n\n return (math.floor(gains*100)/100, math.ceil(losses*100)/100)",
"def add_eqns(df):\n\n def lett(col): return alpha[list(df.columns).index(col)]\n for i in df.index:\n row = str(i + 3)\n if df.loc[i, 'Deleted'] != 'Total':\n df.loc[i, 'M/M_Total'] = '=IF(' + lett('Deleted') + row + '<>\"\",0,' + lett('# Molds') + row + '*' + lett('Price/Mold') + row + '+' + lett('Model Price') + row + ')'\n df.loc[i, 'Unit_Total'] = '=IF(' + lett('Deleted') + row + '<>\"\",0,' + lett('# Units') + row + '*' + lett('Price/Unit') + row + ')'\n df.loc[i, 'Line_Total'] = '=IF(' + lett('Deleted') + row + '<>\"\",0,' + 'SUM(' + lett('M/M_Total') + row + ',' + lett('Unit_Total') + row + '))'\n return df",
"def RC_SUR_AVG_SUR_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RC_SUR_AVG_SUR']]\n Feature_DF.loc[:,'RC_SUR_AVG_SUR_TRS'] = Feature_DF.loc[:,'RC_SUR_AVG_SUR'].pow(4)\n Feature_DF = Feature_DF.loc[:,['HNAME','RC_SUR_AVG_SUR_TRS']]\n\n return Feature_DF",
"def get_commission(self, price):\n return 2.0 + price * 0.00008",
"def _weighted_mean_absolute_percentage_error_update(preds: Tensor, target: Tensor) ->Tuple[Tensor, int]:\n _check_same_shape(preds, target)\n sum_abs_error = (preds - target).abs().sum()\n sum_scale = target.abs().sum()\n return sum_abs_error, sum_scale",
"def RS_ELO_HP_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','RS_ELO_HP']]\n Feature_DF.loc[:,'RS_ELO_HP_TRS'] = Feature_DF.loc[:,'RS_ELO_HP'].pow(4/5)\n Feature_DF = Feature_DF.loc[:,['HNAME','RS_ELO_HP_TRS']]\n\n return Feature_DF",
"def adjust_cost(self) -> None:\n\n n_iterations = self.array.shape[-1]\n n_year = len(self.array.year.values)\n\n # If uncertainty is not considered, the cost factor equals 1.\n # Otherwise, a variability of +/-30% is added.\n\n if n_iterations == 1:\n cost_factor = 1\n else:\n if \"reference\" in self.array.value.values.tolist():\n cost_factor = np.ones((n_iterations, 1))\n else:\n cost_factor = np.random.triangular(0.7, 1, 1.3, (n_iterations, 1))\n\n # Correction of hydrogen tank cost, per kg\n # Correction of fuel cell stack cost, per kW\n if \"FCEV\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (1.078e58 * np.exp(-6.32e-2 * self.array.year.values) + 3.43e2)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (3.15e66 * np.exp(-7.35e-2 * self.array.year.values) + 2.39e1)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n # Correction of energy battery system cost, per kWh\n list_batt = [\n i\n for i in [\"BEV\", \"PHEV-e\", \"PHEV-c-p\", \"PHEV-c-d\"]\n if i in self.array.powertrain\n ]\n if len(list_batt) > 0:\n self.array.loc[\n dict(powertrain=list_batt, parameter=\"energy battery cost per kWh\")\n ] = np.reshape(\n (2.75e86 * np.exp(-9.61e-2 * self.array.year.values) + 5.059e1)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of power battery system cost, per kW\n list_pwt = [\n i\n for i in [\n \"ICEV-p\",\n \"ICEV-d\",\n \"ICEV-g\",\n \"PHEV-c-p\",\n \"PHEV-c-d\",\n \"FCEV\",\n \"HEV-p\",\n \"HEV-d\",\n ]\n if i in self.array.powertrain\n ]\n\n if len(list_pwt) > 0:\n self.array.loc[\n dict(powertrain=list_pwt, parameter=\"power battery cost per kW\")\n ] = np.reshape(\n (8.337e40 * np.exp(-4.49e-2 * self.array.year.values) + 11.17)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of combustion powertrain cost for ICEV-g\n if \"ICEV-g\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"ICEV-g\", parameter=\"combustion powertrain cost per kW\")\n ] = np.clip(\n np.reshape(\n (5.92e160 * np.exp(-0.1819 * self.array.year.values) + 26.76)\n * cost_factor,\n (1, n_year, n_iterations),\n ),\n None,\n 100,\n )"
]
| [
"0.563933",
"0.5638393",
"0.56193507",
"0.5573973",
"0.5563542",
"0.5502309",
"0.5340116",
"0.5308329",
"0.530606",
"0.52370214",
"0.5208151",
"0.51937336",
"0.5174869",
"0.51740295",
"0.5157858",
"0.51511836",
"0.5143063",
"0.5140745",
"0.51331997",
"0.5132746",
"0.5126579",
"0.5123096",
"0.51049936",
"0.51042396",
"0.50984275",
"0.5096294",
"0.5094549",
"0.5091185",
"0.508972",
"0.5088396"
]
| 0.6864149 | 0 |
Additional column for the region needed for mapping the population, green areas etc. | def add_region_feature(data):
data.loc[:, 'region'] = data.loc[:, 'district'].apply(
lambda x: mapping.SOFIA_NEIGHBOURHOOD_TO_REGION_MAPPING[x]
)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_population_feature(data):\n\n data.loc[:, 'population'] = data.loc[:, 'region'].apply(\n lambda x: mapping.SOFIA_REGIONS_POPULATION_MAPPING[x]\n )\n data['adj_population'] = data.population.map(lambda x: (x + x * 0.1))\n\n return data",
"def add_loc_cols(df):\r\n\r\n\tdf['STATE'] = [int(i[1:3]) for i in df.gisjoin]\r\n\tdf['COUNTY'] = [int(i[4:7]) for i in df.gisjoin]\r\n\tdf['TRACT'] = [int(i[7:-4]) for i in df.gisjoin]\r\n\tdf['BLOCK'] = [int(i[-4:]) for i in df.gisjoin]\r\n\r\n\tif df.STATE[0] > 9:\r\n\t\traise Exception(\"Warning! Code might be incorrect for states with fips code > 9\")\r\n\r\n\treturn df",
"def which_region(self, g):\n raise NotImplementedError",
"def which_region(self, g):\n return NotImplementedError",
"def region_gids(self, region, region_col='state'):\n gids = self.meta\n gids = gids[gids[region_col] == region].index.values\n\n return gids",
"def region(self):\n # type: () -> string_types\n return self._region",
"def col(self):\n return self.address.col",
"def add_region_of_interest(self, event: str):\n\n mesh = self.comm.lasif.find_event_mesh(event)\n m = UnstructuredMesh.from_h5(mesh)\n mesh_layers = np.sort(np.unique(m.elemental_fields[\"layer\"]))[::-1].astype(int)\n layers = m.elemental_fields[\"layer\"]\n o_core_idx = layers[np.where(m.elemental_fields[\"fluid\"] == 1)[0][0]]\n o_core_idx = np.where(mesh_layers == o_core_idx)[0][0]\n correct_layers = mesh_layers[o_core_idx:]\n roi = np.zeros_like(layers)\n for layer in correct_layers:\n roi = np.logical_or(roi, layers == layer)\n\n m.attach_field(\"ROI\", roi)\n m.write_h5(mesh)",
"def geocode(df, col):\r\n pass",
"def add_Longhurst_Province_raster_to_array(ds):\n import geopandas\n from rasterio import features\n from affine import Affine\n # Get the shape files\n provinces = geopandas.read_file('/work/home/ts551/data/longhurst_v4_2010')\n shapes = [(shape, n) for n, shape in enumerate(provinces.geometry)]\n # Now add the existing array\n ds_tmp = ds[list(ds.data_vars)[0]].copy().mean(dim='time')\n # Add raster the provinces onto this\n ds_tmp['LonghurstProvince'] = rasterize(shapes, ds_tmp.coords)\n # Then update the variable\n ds['LonghurstProvince'] = ds_tmp['LonghurstProvince']\n # Add Some attributes\n attrs = {\n 'Long name': 'Longhurst Provinces',\n 'data downloaded from': 'http://www.marineregions.org/downloads.php#longhurst',\n 'version': 'Version 4 - March 2010',\n 'Citations': \"Longhurst, A.R et al. (1995). An estimate of global primary production in the ocean from satellite radiometer data. J. Plankton Res. 17, 1245-1271 ; Longhurst, A.R. (1995). Seasonal cycles of pelagic production and consumption. Prog. Oceanogr. 36, 77-167 ; Longhurst, A.R. (1998). Ecological Geography of the Sea. Academic Press, San Diego. 397p. (IMIS) ; Longhurst, A.R. (2006). Ecological Geography of the Sea. 2nd Edition. Academic Press, San Diego, 560p.\",\n }\n ds['LonghurstProvince'].attrs = attrs\n return ds",
"def select_regions(data, region_col, regions, combine_subregions=True):",
"def conclusion_summary_map(self):\n pass",
"def region(self):\n return self._region",
"def region(self):\n return self._region",
"def color_column(self):\n return 8",
"def region(cls):\n return cls.REGION",
"def geo_region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"geo_region\")",
"def geo_region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"geo_region\")",
"def region(self):\n return self._get(\"region\")",
"def region(self) -> str:\n return self.__region",
"def to_region(self):\n\n coords = self.convert_coords()\n log.debug(coords)\n viz_keywords = ['color', 'dash', 'dashlist', 'width', 'font', 'symsize',\n 'symbol', 'symsize', 'fontsize', 'fontstyle', 'usetex',\n 'labelpos', 'labeloff', 'linewidth', 'linestyle',\n 'point', 'textangle', 'fontweight']\n\n if isinstance(coords[0], SkyCoord):\n reg = self.shape_to_sky_region[self.region_type](*coords)\n elif isinstance(coords[0], PixCoord):\n reg = self.shape_to_pixel_region[self.region_type](*coords)\n else:\n self._raise_error(\"No central coordinate\")\n\n reg.visual = RegionVisual()\n reg.meta = RegionMeta()\n\n # both 'text' and 'label' should be set to the same value, where we\n # default to the 'text' value since that is the one used by ds9 regions\n label = self.meta.get('text',\n self.meta.get('label', \"\"))\n if label != '':\n reg.meta['label'] = label\n for key in self.meta:\n if key in viz_keywords:\n reg.visual[key] = self.meta[key]\n else:\n reg.meta[key] = self.meta[key]\n reg.meta['include'] = self.include\n return reg",
"def generateBounds(regionFilename, latitudeRange, longitudeRange): \n rastData = Dataset(regionFilename)\n\n #setting up values for raster data\n latsRast = np.array(rastData[\"lat\"][:])\n lonsRast = np.array(rastData[\"lon\"][:])\n regionOfInterest = np.array(rastData[\"Band1\"][:][:])\n\n\n regionArray = np.zeros((len(longitudeRange),len(latitudeRange)))\n\n\n for lat in latitudeRange:\n closestLatIndex = np.where( np.abs(latsRast-lat) == np.abs(latsRast-lat).min())[0][0]\n for lon in longitudeRange:\n closestLonIndex = np.where( np.abs(lonsRast-lon) == np.abs(lonsRast-lon).min())[0][0]\n\n #If lat long of MERRA data box is offshore or in region (values 1 in raster) set them equal to 1 for master Array, else they are left as zeros\n if (regionOfInterest[closestLatIndex][closestLonIndex] == 1):\n latIndex = np.where(latitudeRange == lat)[0][0]\n lonIndex = np.where(longitudeRange == lon)[0][0]\n regionArray[lonIndex][latIndex] = 1\n\n\n #for debugging\n ''' \n ax = sns.heatmap(regionArray)\n plt.show()\n '''\n return regionArray",
"def ffgs_regions():\n return [\n ('Hispaniola', 'hispaniola'),\n ('Central America', 'centralamerica')\n ]",
"def getRegion(self):\n # if self.orientation[0] == 'h':\n # r = (self.bounds.top(), self.bounds.bottom())\n # else:\n # r = (self.bounds.left(), self.bounds.right())\n r = [(self.lines[0].value()), (self.lines[1].value())]\n return (min(r), max(r))",
"def description(self):\n return \"create a <b>region</b> with edge at cursor\"",
"def add_missing_ROI_cols(self, shape_df: pd.DataFrame) -> pd.DataFrame:\n\n if not \"Color BGR\" in shape_df.columns:\n shape_df[\"Color BGR\"] = [(255, 255, 255)] * len(shape_df)\n if not \"Thickness\" in shape_df.columns:\n shape_df[\"Thickness\"] = [5] * len(shape_df)\n if not \"Color name\" in shape_df.columns:\n shape_df[\"Color name\"] = \"White\"\n\n return shape_df",
"def region(self):\n return self.random_element(self._regions)",
"def add_climatology_cols(df):\n return df",
"def test_choropleth_pass():\n m = view(world, column=\"pop_est\")",
"def region(self):\n return regions.lookup(self.state)"
]
| [
"0.5962177",
"0.5869616",
"0.56979346",
"0.56446475",
"0.5435717",
"0.5430769",
"0.5374926",
"0.5374112",
"0.5373336",
"0.5350224",
"0.532746",
"0.5324471",
"0.5274696",
"0.5274696",
"0.5257323",
"0.5238317",
"0.5227888",
"0.5227888",
"0.5214392",
"0.5172477",
"0.5164993",
"0.5162404",
"0.5155328",
"0.51532626",
"0.5117126",
"0.51167536",
"0.5108652",
"0.5103722",
"0.50983447",
"0.5081634"
]
| 0.656565 | 0 |
The population is divided into regions in Sofia. The last available data was in 2011 that shows that between 2001 and 2011 the population growth was a little more than 10 % average for all the districts. This means that we will adjust the number by +10% before the next population count in 2021. | def add_population_feature(data):
data.loc[:, 'population'] = data.loc[:, 'region'].apply(
lambda x: mapping.SOFIA_REGIONS_POPULATION_MAPPING[x]
)
data['adj_population'] = data.population.map(lambda x: (x + x * 0.1))
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def at_birth(df,variable,npoint):\n return df.groupby('cell')[['{}'.format('{}'.format(variable)),'pred_growth_rate']].apply(lambda x: x.head(npoint).mean()).rename(columns={'pred_length_box_um':'{}_at_birth'.format(variable)})",
"def average_population_grade(population):\r\n total = 0\r\n for individual in population :\r\n total += get_individual_fitness(individual)\r\n return total/POPULATION_COUNT",
"def calculate_iron_hemoglobin_time_lag_effective_fraction(df, years):\n final = pd.DataFrame()\n data = df.reset_index()\n for i in list(range(0, len(years))):\n current = (data.loc[data.year == years[i]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n if i == 0:\n for draw in list(range(0, 1000)):\n current[f'draw_{draw}'] = 1\n else:\n prior = (data.loc[data.year == years[i - 1]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n current = 1 - ((current - prior) * 0.75 / current)\n current['year'] = years[i]\n final = pd.concat([final, current])\n final = final.reset_index().set_index([c for c in data.columns if 'draw' not in c]).sort_index()\n return final",
"def relative_population(data, population, ageclass):\n total_pop = data[population].sum(axis=0)\n data['rel_pop'] = (data[population]/total_pop)*100000\n relative_pop = data.pivot(columns=ageclass, values='rel_pop').sum(axis=0)\n return relative_pop",
"def occupation_distribution(data):",
"def average_city(g):\n average = 0\n ctr = 0\n \n for key in g.city_dict:\n average = average + g.city_dict[key].get_population()\n ctr = ctr + 1\n \n \n return (average / ctr)",
"def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga",
"def runRandomEntryStrat(self):\n start, end = self.randomDays()\n \n gain = (self.df.adj_close[end] - getInfl(self.df.adj_close[start], start.year, end.year)) / \\\n getInfl(self.df.adj_close[start], start.year, end.year)\n #if gain > 6:\n # print \"Windfall: \", start, end, gain\n return gain",
"def forebears (WFROM,WTO,efrom, eto, g=25):\n \n c.execute(\"\"\"\n SELECT wyear, eyear, count (eyear), wnationality\n FROM clean \n WHERE (eyear IS NOT Null) AND (wyear IS NOT Null)\n AND WYEAR >= ? and WYEAR <= ? \n AND eyear >= ? AND eyear <= ? \n GROUP BY wyear, eyear\n ORDER BY wyear, eyear\"\"\", (WFROM, WTO, efrom, eto))\n\n years = c.fetchall()\n epigraphtotal = sum (s for (x,y,s,n) in years)\n #plt.xlim(WFROM, WTO)\n #plt.ylim(100, -1500)\n #colors = list(mcolors.TABLEAU_COLORS.keys()) *20\n #print(colors)\n \n \n gen =dd(lambda: dd(int))\n gentotal= dd(int)\n for (x,y,s,n) in years:\n gen[generation(x,g)][generation(y-x,g)] += 1\n gentotal[generation(x,g)] +=1\n \n for x in gen:\n for y in gen[x]:\n print(x, y, gen[x][y], gentotal[x])\n\n \n\n plt.figure(figsize=(10, 5))\n ax=plt.axes()\n\n\n #df.plot(colormap=gray) \n cumtotal = [0]*len(gen)\n\n for d in range(0,-200, -1):\n #for d in range(min(gen.keys()),max(gen.keys()),-1):\n xv = list(gen.keys())\n yv = [rat(gen[x][d],gentotal[x]) for x in xv]\n plt.bar(xv, yv, bottom=cumtotal,\n tick_label=[x*g for x in xv])\n cumtotal = [x + y for x, y in zip(yv, cumtotal)]\n #colors.pop()\n #print(d, cumtotal)\n plt.xlabel('Year of Work (in generations)')\n plt.ylabel(f'Share of Distance to forebear (in {g} year generations)')\n plt.title(f'Distance back vs Year of Work ({epigraphtotal} epigraphs)')\n plt.savefig(f\"figs/eg-forebear-{WFROM}:{WTO}-{efrom}:{eto}-{g}.png\")\n plt.close()",
"def east_asia_pacific_countries():\r\n east_asia_pacific_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in east_asia_pacific:\r\n east_asia_pacific_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in east_asia_pacific_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians",
"def high_income_countries():\r\n high_countries_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in high_countries:\r\n high_countries_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in high_countries_data:\r\n if idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians",
"def inflation_ratio(old_year,new_year):\n # todo memoization\n amount = 1\n url_address = \"https://data.bls.gov/cgi-bin/cpicalc.pl?cost1={}&year1={}&year2={}\".format(int(amount),int(old_year),int(new_year))\n print(url_address)\n with urllib.request.urlopen(url_address) as url:\n\n html_doc = url.read()\n\n soup = BeautifulSoup(html_doc,\"lxml\")\n\n\n return float(soup.find(\"span\", {\"id\": \"answer\"}).contents[0][1:])",
"def topOccupations(years):\n\toccs = []\n\tfor year in years:\n\t\tfilename = \"SmartEnergy\" +str(year) +\".xlsx\"\n\t\tDB = pd.read_excel(filename, sheet_name= \"Report1_Data\", usecols =[\"BGTOCC\",\"Job Postings\"])\n\t\t# occs.append(DB.head(5))\n\t\toccs.append(DB)\n\t\tTopOccs =pd.concat(occs, keys = list(years), names =['year'])\n\t\n\t# Which occupations appear throughout the years, which are new and which no longer appear?\n\t\n\t\n\tv = TopOccs.BGTOCC.value_counts().sort_index()\t\n\t\n\t# create list of occupations that are posted each year\n\trecurringOccs = v[v==8].index.tolist()\n\t\n\treturn TopOccs\n\t# emergingOccs = \n\t# print(TopOccs.loc[2012,:])",
"def lower_middle_income_countries():\r\n lower_middle_countries_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in lower_middle_countries:\r\n lower_middle_countries_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in lower_middle_countries_data:\r\n if idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians",
"def average_city_size(self):\r\n average = 0\r\n total = 0\r\n for code, node in self.vertices.items():\r\n average += node.population\r\n total += 1\r\n return average // total",
"def get_country_counts_growths(df, country, field):\n\n # Filter our DataFrame so it only reads data from the country we are interested in.\n filtered_df = df[df[\"country\"] == country].copy()\n\n # We add 2 new columns to know the daily totals and their percent change.\n filtered_df[\"difference\"] = filtered_df[field].diff()\n filtered_df[\"change\"] = filtered_df[\"difference\"].pct_change()\n\n # We drop all NaN values.\n filtered_df.dropna(inplace=True)\n\n # We format the previous 2 columns so they can be easier to read.\n filtered_df[\"difference\"] = filtered_df[\"difference\"].apply(int)\n\n filtered_df[\"change\"] = filtered_df[\"change\"].apply(\n lambda x: str(np.round(x * 100, 2)) + \"%\")\n\n print(filtered_df[[field, \"difference\", \"change\"]][-10:])",
"def eady_growth_rate(data):\n N2 = ixr.brunt_vaisala(data)\n f = 2.0*omega*xruf.sin(xruf.deg2rad(data.lat))\n\n dz = ixr.domain.calculate_dz(data)\n du = ixr.domain.diff_pfull(data.ucomp, data)\n\n N = xruf.sqrt(N2.where(N2 > 0))\n\n egr = 0.31*du/dz*f/N\n return np.abs(egr)",
"def low_income_countries():\r\n low_countries_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in low_countries:\r\n low_countries_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in low_countries_data:\r\n if idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians",
"def yearlyDepreciation():\n return .10",
"def upper_middle_income_countries():\r\n upper_middle_countries_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in upper_middle_countries:\r\n upper_middle_countries_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in upper_middle_countries_data:\r\n if idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians",
"def returns_over_max_drawdown(tot_returns_dict, year, lifetime_maximum_drawdown):\n\n return round(tot_returns_dict[year] / abs(lifetime_maximum_drawdown), 2)",
"def calculate_world_daywise(countries_daywise_df):",
"def five_years_avg_dividend(self) -> float:\n return self._five_years_avg_dividend",
"def test_census_county_population():\n dataframe = get_county_population_dataframe()\n boulder_county_row = dataframe.loc[dataframe['county_fips'] == 8013]\n boulder_county_population = boulder_county_row.get('county_population')\n assert float(boulder_county_population) == 326196",
"def dateByWorldPopulationRank(sex, region, dob, rank):\n # check that all arguments have the right type (even though it's not very pythonic)\n if not isinstance(sex, basestring) or not isinstance(region, basestring) or not isinstance(dob, date):\n raise TypeError('One or more arguments did not match the expected parameter type')\n\n # confirm that sex and region contain valid values\n if sex not in SEXES:\n raise InvalidSexError(sex)\n if region not in dataStore.countries:\n raise InvalidCountryError(region)\n\n # check the various date requirements\n if dob < date(1920, 1, 1) or dob > date(2079, 12, 31): # the end date has been chosen arbitrarily and is probably wrong\n raise BirthdateOutOfRangeError(dob, 'between 1920-01-01 and 2079-12-31')\n\n # internally, the algorithm works with k-ranks\n rank = rank / 1000.0\n\n # prefetch the extrapolation table\n table = dataStore[sex, region]\n\n # The number of years from input birth to '2100/01/01'\n length_time = relativedelta(date(2100, 1, 1), dob).years\n\n # Make sure that difference between DOB and final Date < 100\n l_max = min(int(np.floor(length_time/10)*10), 100)\n\n xx = []\n for jj in range(1, (len(range(10, l_max+10, 10))+1)):\n try:\n xx.append(_calculateRankByDate(table, dob, dob + relativedelta(days = jj*3650)))\n except Exception:\n # Breaks the function if either the birthdate is too late for some rank or the rank is too high for some birthdate\n raise DataOutOfRangeError(detail='The input data is out of range: the birthdate is too late for the rank or the rank is too high for the birthdate')\n\n # check the array for NaN?\n xx = np.array(xx) # convert xx from list to array\n #nanIndex = np.where(np.isnan(xx)) # return array of index positions for NANs\n\n ''' NEED TO BREAK THE FUNCTION IF CC IS TRUE - NOT YET IMPLEMENTED '''\n # check to see if all of the Ranks are less than the wRank\n if np.all(xx < rank):\n raise DataOutOfRangeError(detail='The input data is out of range: the person is too young')\n\n #print xx\n # now find the interval containing wRank\n #print rank\n #print np.amin(np.where((xx < rank) == False))\n Upper_bound = (np.amin(np.where((xx < rank) == False))+1)*10 # +1 because of zero index\n Lower_bound = Upper_bound-10\n\n if xx[1] > rank:\n Lower_bound = 2\n\n if Lower_bound < 2:\n # I don't know what this error means, but if Lower_bound is < 2, then range_2 will start with a value < 0\n # which means _calculateRankByDate() will be called with a negative age, and that will fail\n raise DataOutOfRangeError()\n\n # Define new range\n range_2 = np.arange(Lower_bound-2, Upper_bound+1) # +1 due to zero index\n\n # locate the interval\n xx_ = np.zeros((len(range_2),2))\n\n # given that interval, do a yearly interpolation\n #print range_2\n for kk in range_2:\n #print kk\n xx_[(kk - np.amin(range_2)),0] = _calculateRankByDate(table, dob, dob + relativedelta(years=kk))\n xx_[(kk - np.amin(range_2)),1] = kk*365\n\n # Search again for the yearly interval containing wRank\n if xx_[1,0] > rank:\n Lower_bound = 0\n Upper_bound = xx_[-1,1]\n else:\n Upper_bound = xx_[np.amin(np.where((xx_[:,0] < rank) == False)),1]\n Lower_bound = xx_[np.amax(np.where((xx_[:,0] < rank) == True)),1]\n\n range_3 = np.arange(Lower_bound, Upper_bound+1)\n #print (range_3)\n\n #xx_ = np.zeros((len(range_3),2))\n\n # From this point on, this stuff is within a year (daily), due to the fact that the evolution of the rank is linear\n # we do linear interpolation to get the exact day faster\n end_point = range_3[len(range_3)-1]\n first_point = range_3[0]\n # print end_point, first_point\n\n # Get the rank for the first and last days in range_3\n rank_end = _calculateRankByDate(table, dob, dob + relativedelta(days=end_point))\n rank_first = _calculateRankByDate(table, dob, dob + relativedelta(days=first_point))\n\n # This gives us the age when we reach wRank and the exact date\n final_age = np.interp(rank, [rank_first, rank_end], [Lower_bound, Upper_bound])\n final_date = dob + relativedelta(days=final_age)\n #print final_age, final_date\n\n ''' CHECK THESE INTERPOLATION VALUES '''\n #now we also want to plot our life-path, so we do spline interpolation for the stuff we calculated in the first step\n # (i.e. the ranks over decades) and interpolate using bSplines.\n #xx_interp = InterpolatedUnivariateSpline((np.arange(10, l_max+1, 10)*365),xx)\n # print xx_interp\n #x_interp = xx_interp((np.arange(1,36501,365)))\n # print x_interp\n\n # find the rank nearest to wRank\n #find_r = np.amin(np.where(abs(x_interp - rank)))\n # print find_r\n\n # The value this function returns\n #exactAge = round(final_age/365, 1)\n #age = math.floor(final_age/365)\n #DATE = final_date\n\n #pd.DataFrame({'exactAge': pd.Series([exactAge], index = ['1']), 'age': pd.Series([age],index = ['1']), 'DATE': pd.Series([DATE], index = ['1'])})\n return final_date",
"def calc_base_year_data(base_year_vehicles_df):\n pass",
"def portfolio_growth_risk(avg_annual_returns, max_daily_rolling_drawdown):\n\n return round(avg_annual_returns / abs(max_daily_rolling_drawdown.mean()*100), 2)",
"def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0",
"def smoothen(region, y_data='counts', interval=3, add_column=True):\n intensity = region.get_data(column=y_data)\n odd = int(interval / 2) * 2 + 1\n even = int(interval / 2) * 2\n cumsum = np.cumsum(np.insert(intensity, 0, 0))\n avged = (cumsum[odd:] - cumsum[:-odd]) / odd\n for _ in range(int(even / 2)):\n avged = np.insert(avged, 0, avged[0])\n avged = np.insert(avged, -1, avged[-1])\n\n if add_column:\n region.add_column(\"averaged\", avged, overwrite=True)\n\n return avged",
"def europe_central_asia_countries():\r\n europe_central_asia_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in europe_central_asia:\r\n europe_central_asia_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in europe_central_asia_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians"
]
| [
"0.5535828",
"0.5507328",
"0.54799646",
"0.5471713",
"0.5456382",
"0.54437935",
"0.54347974",
"0.5403029",
"0.5403002",
"0.5346823",
"0.52930796",
"0.52916986",
"0.52910703",
"0.52808404",
"0.52757365",
"0.52700174",
"0.52281874",
"0.52259725",
"0.5220774",
"0.51936775",
"0.51876706",
"0.5180072",
"0.51743287",
"0.51670814",
"0.5152967",
"0.5142478",
"0.5140703",
"0.51365346",
"0.5132791",
"0.5130811"
]
| 0.5599366 | 0 |
Additional column for counting the big supermarkets per district. | def add_supermarkets_feature(data):
data.loc[:, 'nr_supermarkets'] = data.loc[:, 'district'].apply(
lambda x: mapping.SOFIA_NEIGHBOURHOOD_SUPERMARKET_MAPPING[x])
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def per_cell_animal_count(self):\n print self.island.individuals()",
"def print_number_of_entities(self, entity_col):\n for df in self:\n print(\"# of entities: \", len(df[entity_col].unique()))",
"def __len__(self):\r\n if self.is_superset:\r\n length = 0\r\n for ds in self.data:\r\n length += len(ds)\r\n return length\r\n else:\r\n return len(self.data)",
"def __len__(self):\n return self.data.index.get_level_values(0).to_series().nunique()",
"def nb_leafy_rameau(x):\r\n return sum([nb_leafy_rameau_cat(x, cat) for cat in ['small', 'medium', 'large']])",
"def codonfreqs_kmerdf(kmertable): \n codon_counts_kmer = np.zeros(( len(codons_nonstop) ))\n for kmer in kmertable['kmer']:\n current_kmer_codons = [ kmer[(i*3):((i*3)+3)] for i in range(3) ] # ! hard coded for length L=3\n for codon in current_kmer_codons:\n current_index = codons_nonstop.index(codon)\n codon_counts_kmer[current_index] += 1 \n codon_counts_kmer /= np.sum(codon_counts_kmer)\n\n return np.around(codon_counts_kmer, 5)",
"def columnCount(self, parent): # pylint: disable=unused-argument\n return 5",
"def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)",
"def higher_taxonomy(self):\n return self.metadata.groupby(['Higher Taxonomy']\n ).size().reset_index().rename(columns={0: 'Organisms'})",
"def num_species_on_map(self):\n # tot_herbivores = 0\n # tot_carnivores = 0\n # for cells in itertools.chain.from_iterable(self.map):\n # curr_herbivore, curr_carnivore = cells.num_species_per_cell()\n # tot_herbivores += curr_herbivore\n # tot_carnivores += curr_carnivore\n\n return (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))\n\n # (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))",
"def sum_by_university(self,df):\n with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also\n df_univ = df.groupby(['Coll/Univ']).sum()\n \n df_univ = df_univ.sort_values('PB')\n df_top_univ = df_univ[-30:]\n \n #Visual bargraph for top 30 Colleges and number of pro-bowl appearances they produce\n df_univ_PB = df_top_univ['PB']\n univ_plot = df_univ_PB.plot(kind=\"barh\", fontsize=4)\n univ_plot.set_xlabel(\"Pro bowl appearances\")\n univ_plot.set_title(\"PRO BOWL APPEARANCES, BY COLLEGE/UNIVERSITY, 2010-2020\")\n plt.show()\n \n return",
"def compute_number_of_associated_companies(row):\n derived_series = pd.read_json(json.dumps(row['company_derived']), typ='series')\n derived_series = pd.Series(derived_series)\n derived_string = derived_series.to_string()\n if derived_string.count('|') > 0:\n row[\"multiple_companies_derived_count\"] = derived_string.count('|') + 1\n elif \"none\" in derived_string:\n row[\"multiple_companies_derived_count\"] = 0\n else:\n row[\"multiple_companies_derived_count\"] = 1\n return row[\"multiple_companies_derived_count\"]",
"def getColumnCount(self) -> int:\n ...",
"def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)",
"def columnCount(self, parent):\n return 1",
"def _choose_clusters_num(database_type: str, synthetic_data_dim: int) -> int:\n data_dim: int = 1\n if database_type == DatabaseType.Synthetic:\n data_dim = synthetic_data_dim\n elif database_type in [DatabaseType.ThreeDRoadNetwork, DatabaseType.IndividualHouseholdElectricPowerConsumption]:\n data_dim = 2\n elif database_type == DatabaseType.HouseSalesInKingCounty:\n data_dim = 8\n return 2 * (data_dim + 1) ** 2 + 2",
"def n_levels(self):\n return self.primary_header['Number of levels']",
"def number_of_herbivores_island(self):\n return np.sum(self.herbivores_on_island)",
"def lower_taxonomy(self):\n return self.metadata.groupby(['Higher Taxonomy', 'Lower Taxonomy']\n ).size().reset_index().rename(columns={0: 'Organisms'})",
"def class_size(self):\n\t\tif self.subject.count()==0:\n\t\t\treturn student.objects.all().filter(reg=self).count()\n\t\telse:\n\t\t\treturn self.grade_set.all().distinct().count()",
"def add_country_counts(labels_df):\n counts = labels_df.groupby([\"country\"]).size().reset_index(name=\"num_country_labels\")\n labels_df = pd.merge(labels_df, counts, on='country')\n counts = labels_df.groupby([\"country\"])['article_id'].nunique().reset_index(name=\"num_country_articles\")\n return pd.merge(labels_df, counts, on='country')",
"def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()",
"def data_center_count(self) -> int:\n return pulumi.get(self, \"data_center_count\")",
"def size(self, subset=None):\n return self[subset].index.get_level_values(0).to_series().nunique()",
"def no_of_colgs_in_all_loc():\n #Method 1\n\n\n #Method2\n c = College.objects.all()\n c = c.values('location').annotate(cn = Count('location'))\n for i in c:\n print(i['location'],i['cn'])",
"def columnCount(self, parent_midx):\n return self._cols_nb",
"def columnCount(self, parent_midx):\n return self._cols_nb",
"def get_num_pos_neg_kmers(st: Subtype, df: DataFrame) -> Tuple[int, int]:\n dfst = df[(df['subtype'] == str(st.subtype))]\n return dfst[dfst['is_pos_kmer']].shape[0], dfst[~dfst['is_pos_kmer']].shape[0]",
"def test_group_small_cols(self):\n taxa = DataTableFactory(PACKET_DIR).taxonomy()\n taxa = group_small_cols(taxa, top=2)\n self.assertEqual(taxa.shape[1], 3)",
"def NoOfBIERSubDomains(self):\r\n\t\treturn self._get_attribute('noOfBIERSubDomains')"
]
| [
"0.5330675",
"0.5282973",
"0.5255991",
"0.5128409",
"0.51240784",
"0.5026904",
"0.502193",
"0.49998674",
"0.49924496",
"0.49710825",
"0.49595797",
"0.49419037",
"0.49380037",
"0.48994735",
"0.4898089",
"0.489673",
"0.48857796",
"0.48539412",
"0.4830413",
"0.48281297",
"0.4813685",
"0.48109588",
"0.4805701",
"0.48012844",
"0.47951132",
"0.4788645",
"0.4788645",
"0.47817937",
"0.4776413",
"0.47685814"
]
| 0.68234015 | 0 |
Additional bool column for metro station in district. | def add_metro_feature(data):
data.loc[:, 'has_metro'] = data.loc[:, 'district'].apply(
lambda x: mapping.SOFIA_METRO_TRUE_FALSE_MAPPING[x])
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def becomes_single_column(self):\n return self.becomes_column() and not self.is_gps()",
"def get_boolean(self, df):\n if not self.value:\n return False\n elif not self.par:\n return ()\n if self.variable_type == 'interval':\n return self._interval_boolean(df)\n elif self.variable_type == 'list':\n return self._list_boolean(df)",
"def add_hospital_feature(data):\n data.loc[:, 'has_hospital'] = data.loc[:, 'district'].apply(\n lambda x: mapping.SOFIA_HOSPITALS_TRUE_FALSE_MAPPING[x])\n\n return data",
"def exercise1():\n print(exercise1.__doc__)\n area = data['Area square miles'] = pd.Series([46.87, 176.53, 97.92])\n data['Boolean'] = city_names.apply(lambda name: \"San\" in name) & area.apply(lambda val: val > 50)\n print(data)",
"def is_stationary(self):\n\n # in datafold there is no handling of this attribute, if required this has to\n # be implemented\n raise NotImplementedError(\"base class\")",
"def discoverable(self):\n return sa.Column(sa.Boolean(), default=False)",
"def isStation(self) -> bool:\n return self.station",
"def bool(self) -> bool:\n if isinstance(self, ps.DataFrame):\n df = self\n elif isinstance(self, ps.Series):\n df = self.to_dataframe()\n return df.head(2)._to_internal_pandas().bool()",
"def get_bool2(self):\n pass",
"def _convert_bool(self) -> pd.Series:\n\n if self.requires_nan:\n dtype = \"float\"\n else:\n dtype = \"bool\"\n\n return self._convert(dtype=dtype)",
"def get_boolean_fields():\n exclude = ['is_montessori', 'is_special_ed']\n\n fields = []\n for field in Location._meta.fields:\n if field.get_internal_type() == 'NullBooleanField' and \\\n not field.get_attname() in exclude:\n fields.append((field.get_attname(),field.verbose_name,))\n\n return fields",
"def has_intermediate_table(self):\r\n if (self.intermediate_spherical_right is not None or self.intermediate_cylinder_right is not None\r\n or self.intermediate_axis_right is not None or self.intermediate_av_right is not None or\r\n self.intermediate_dnp_right is not None\r\n or self.intermediate_spherical_left is not None or self.intermediate_cylinder_left is not None\r\n or self.intermediate_axis_left is not None or self.intermediate_av_left is not None or\r\n self.intermediate_dnp_left is not None):\r\n return True\r\n return False",
"def is_gps(self):\n row_type = self.get_type()\n is_gps = row_type in ('hidden geopoint', 'geopoint')\n return is_gps",
"def setStation(self, isStation: bool) -> None:",
"def add_met_real_forecast_station_col_to_rlkpis(self):\r\n rl_kpi_siteId_df = pd.DataFrame(self.rlkpi.imputed_rl_kpis_df.site_id.unique(), columns=['site_id'])\r\n\r\n # Assign the nearest station from met-real to the site Id\r\n rl_kpi_siteId_df['met-real-station_no'] = \\\r\n rl_kpi_siteId_df['site_id'].apply(lambda x: self.dt.find_nearest_station_no(x,self.dtrldf))\r\n\r\n #rl_kpi_met_data = pd.merge(self.rl_kpis_df, rl_kpi_siteId_df, on='site_id', how='inner')\r\n met_forecast_dist_df = self.dtrldf[self.dtrldf['Unnamed: 0'].isin(self.metfcast.list_stations)]\r\n met_forecast_dist_df.reset_index(drop=True, inplace=True)\r\n\r\n #Assign the nearest station from met-forecast to the site Id\r\n rl_kpi_siteId_df['met-forecast-station_no'] = \\\r\n rl_kpi_siteId_df['site_id'].apply(lambda x: self.dt.find_nearest_station_no(x, met_forecast_dist_df))\r\n # Add met-real-station_no & met-forecast-station_no in rl_kpis_df\r\n rl_kpi_met_data = pd.merge(self.rlkpi.imputed_rl_kpis_df, rl_kpi_siteId_df, on='site_id', how='inner')\r\n self.train_data = rl_kpi_met_data",
"def setTrue(self):\n self.cond = CT.TRUE\n self.left = self.right = None\n self.z3 = BoolSort().cast(True)\n self.cleaned = self.Z3Simplified = self.customSimplified = self.checked = True\n self.customSimplifiedValue = CE.TRUE",
"def schema():\n return vol.Schema({\"venus\": cv.boolean, \"mars\": cv.boolean, \"jupiter\": cv.boolean})",
"def _handle_boolean(\n *, artifacts: types.ColumnArtifacts # pylint: disable=unused-argument\n) -> Boolean:\n return Boolean",
"def get_bool(self, x, y, name):\r\n\r\n value = self.get_tile(x, y).get(name)\r\n return value in (True, 1, 'true', 'yes', 'True', 'Yes', '1', 'on', 'On')",
"def is_mineral_field(self) -> bool:\n return self.type_data.has_minerals",
"def is_on(self):\n return bool(self.arest.data.get('state'))",
"def CONST_BOOL(self, t):\n t.value = False if t.value == '#false' else True\n return t",
"def add_ground_truth_state_to_info(self):\n self._is_ground_truth_state_exposed = True\n return",
"def negate_on_dbsnp_if_pathogenic(row):\n is_on_dbsnp = row[\"is_on_dbsnp\"]\n\n if \"Pathogenic\" in row[\"clin_info\"] or \"Likely_pathogenic\" in row[\"clin_info\"]:\n is_on_dbsnp = 0\n\n return is_on_dbsnp",
"def boolean(self, column, nullable=False):\n self._last_column = self.table.add_column(column, \"boolean\", nullable=nullable)\n return self",
"def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.Boolean()",
"def is_dualtor(tbinfo):\n return \"dualtor\" in tbinfo[\"topo\"][\"name\"]",
"def hide_satellite_managed():\n try:\n return strtobool(connexion.request.headers.get('Hide-Satellite-Managed', 'false'))\n except ValueError:\n return False",
"def is_on(self):\n pass",
"def is_on(self) -> bool:\n val = bool(self._cluster_handler.cluster.get(self._zcl_attribute))\n return (not val) if self.inverted else val"
]
| [
"0.5492909",
"0.54807097",
"0.5474053",
"0.54708606",
"0.541572",
"0.5336358",
"0.53200907",
"0.5193005",
"0.50475645",
"0.50054103",
"0.49636742",
"0.49614355",
"0.4942129",
"0.49269125",
"0.49171716",
"0.48763788",
"0.48655918",
"0.48403075",
"0.4835898",
"0.4823209",
"0.48213354",
"0.4818367",
"0.4812201",
"0.48120174",
"0.47959718",
"0.47923815",
"0.4790572",
"0.47899637",
"0.47888687",
"0.47881794"
]
| 0.60921067 | 0 |
Additional bool column for hospitals in each district. | def add_hospital_feature(data):
data.loc[:, 'has_hospital'] = data.loc[:, 'district'].apply(
lambda x: mapping.SOFIA_HOSPITALS_TRUE_FALSE_MAPPING[x])
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testHealthAssessHosp(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"hosp\")\n\n self.util.boolPropertyTest(self, attr, \"hosp\")",
"def exercise1():\n print(exercise1.__doc__)\n area = data['Area square miles'] = pd.Series([46.87, 176.53, 97.92])\n data['Boolean'] = city_names.apply(lambda name: \"San\" in name) & area.apply(lambda val: val > 50)\n print(data)",
"def update(self):\n self.haveDistrict = len(self.districts()) > 0",
"def attended_college(df):\n\n # Checking to see if the posting requires a college degree\n if df.degree == 'NONE' or df.degree == 'HIGH_SCHOOL':\n return False\n else:\n return True",
"def is_holiday(df):\n\n # make list of public holidays\n\n holidays = [\n '1/1',\n '2/1',\n '3/1',\n '4/1',\n '5/1',\n '6/1',\n '7/1',\n '15/2',\n '16/2',\n '17/3',\n '23/3',\n '24/3',\n '25/3',\n '26/3',\n '27/3',\n '28/3',\n '29/3',\n '30/3',\n '31/3',\n '1/4',\n '2/4',\n '3/4',\n '4/4',\n '5/4',\n '6/4',\n '7/4',\n '8/4',\n '1/5',\n '7/5',\n '4/6',\n '6/8',\n '29/10',\n '30/10',\n '31/10',\n '1/11',\n '2/11',\n '21/12',\n '22/12',\n '23/12',\n '24/12',\n '25/12',\n '26/12',\n '27/12',\n '28/12',\n '29/12',\n '30/12',\n '31/12'\n ]\n\n # make date from day and month features \n\n date = str(df.iloc[0].day) + '/' + str(df.iloc[0].month)\n\n # check if the date is in the list\n # 1 if yes, 0 if no\n\n if date in holidays:\n df['SCHOOL_OFF'] = 1\n else:\n df['SCHOOL_OFF'] = 0\n \n # set dtype\n\n df.SCHOOL_OFF = df.SCHOOL_OFF.astype('category')\n\n return df",
"def add_metro_feature(data):\n data.loc[:, 'has_metro'] = data.loc[:, 'district'].apply(\n lambda x: mapping.SOFIA_METRO_TRUE_FALSE_MAPPING[x])\n\n return data",
"def create_wanderer_column(gender_column,age_column):\n return np.logical_and([gender_column==1], [age_column>=10]).transpose()",
"def create_wanderer_column(gender, age):\n return np.logical_and([gender == 1], [age >= 10]).transpose()",
"def is_a_holiday(df):\n\n try:\n\n # school, public, bank holidays\n\n holiday_dates = [\n '1/1/18', '2/1/18', '3/1/18', '4/1/18', '5/1/18', '6/1/18',\n '7/1/18', '15/2/18', '16/2/18', '17/3/18', '23/3/18',\n '24/3/18', '25/3/18', '26/3/18', '27/3/18', '28/3/18',\n '29/3/18', '30/3/18', '31/3/18', '1/4/18', '2/4/18', '3/4/18',\n '4/4/18', '5/4/18', '6/4/18', '7/4/18', '8/4/18', '1/5/18',\n '7/5/18', '4/6/18', '6/8/18', '29/10/18', '30/10/18',\n '31/10/18', '1/11/18', '2/11/18', '21/12/18', '22/12/18',\n '23/12/18', '24/12/18', '25/12/18', '26/12/18', '27/12/18',\n '28/12/18', '29/12/18', '30/12/18', '31/12/18']\n\n # make list of holidays into datetime series\n\n holiday_dates = pd.to_datetime(holiday_dates)\n\n # define and create a range of datetimes\n # covering the summer break\n\n start = pd.to_datetime('28/6/2018')\n end = pd.to_datetime('1/Sep/2018')\n summer_break = pd.DatetimeIndex(start=start, end=end, freq='d')\n\n # join the holiday date sets\n\n holiday = summer_break.append(holiday_dates)\n\n # initialise a new feature: 0, not a holiday\n\n df['holiday'] = '0'\n\n # find all row numbers corresponding to a holiday\n\n holiday_rows = []\n\n for item in holiday:\n temp_list = df.loc[df.DAYOFSERVICE == item].index.to_list()\n for i in temp_list:\n holiday_rows.append(i)\n \n # change the value of rows with holiday date \n \n df.loc[holiday_rows, \"holiday\"] = '1'\n\n print('— holiday marker added')\n \n return df\n\n except:\n\n print(\"Problem with add_holiday function\")",
"def discoverable(self):\n return sa.Column(sa.Boolean(), default=False)",
"def is_country_column_present_in_vendor_profile_dialed_digits_page(self):\n return self.is_specific_column_present(self.dialed_digits_grid_div_id, self.column_name_country)",
"def is_country_column_present_in_view_price_list_details_dialed_digits_page(self):\n return self.is_specific_column_present(self.vendor_price_list_detail_dial_digits_grid_div_id, self.column_name_country)",
"def holiday(self):\n \n holidays = [] # array of holidays for the organization. \n if self.time_stamp in holidays:\n return True\n return False",
"def becomes_single_column(self):\n return self.becomes_column() and not self.is_gps()",
"def college_selectivity():",
"def negate_on_dbsnp_if_pathogenic(row):\n is_on_dbsnp = row[\"is_on_dbsnp\"]\n\n if \"Pathogenic\" in row[\"clin_info\"] or \"Likely_pathogenic\" in row[\"clin_info\"]:\n is_on_dbsnp = 0\n\n return is_on_dbsnp",
"def set_display_columns(self, set_true=[], set_false=[]):\n for i in range(len(self.fields)):\n if self.fields[i].name in set_true:\n self.fields[i].display = True\n elif self.fields[i].name in set_false:\n self.fields[i].display = False",
"def __contains(self, other):\n return _VirtualBooleanColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"contains\",\n operand1=self,\n operand2=other\n )",
"def compute_store_state_holiday(df):\n return df[\"store\"].astype(str) + \"_\" + df[\"state_holiday\"].ne(\"0\").astype(str)",
"def hospital_viewer():\r\n name = request.args[\"address\"]\r\n hospitals = get_zipcode_hospitals(name)\r\n hospitals['coordinate'] = 'end_point='+hospitals['name'].astype(str)+'&'+'end_lng=' + hospitals['lon'].astype(str)+'&'+'end_lat='+hospitals['lat'].astype(str)\r\n\r\n\r\n if len(hospitals) > 0:\r\n\r\n #genetrate folium map\r\n hospitals_coordinates = hospitals[[\"lat\", \"lon\"]].values.tolist()\r\n\r\n map=make_folium_map(hospitals_coordinates)\r\n\r\n return render_template(\r\n \"page3_2h.html\",\r\n num_hospitals=get_num_hospitals(name),\r\n address=name,\r\n hospitals=hospitals[[\"name\", \"address\", \"contact\", \"coordinate\"]].values,\r\n map=map._repr_html_()\r\n )\r\n else:\r\n\r\n lng=get_address(name)[1]\r\n lat=get_address(name)[0]\r\n near_hospital = find_5near_hospitals(lng, lat)\r\n near_hospital['coordinate'] = 'end_point='+near_hospital['name'].astype(str)+'&'+'end_lng=' + near_hospital['lon'].astype(str)+'&'+'end_lat='+near_hospital['lat'].astype(str)\r\n\r\n return render_template(\r\n \"page3_2h_nohospital.html\",\r\n address=name,\r\n near_hospital_table=near_hospital[[\"name\", \"address\", \"contact\", \"coordinate\", \"distance\"]].values,\r\n )",
"def include_hxl_row(dv_columns, hxl_columns):\n return bool(set(hxl_columns).intersection(set(dv_columns)))",
"def is_country_column_present_in_re_analysis_page(self):\n return self.is_specific_column_present(self.re_analysis_grid_div_id, self.column_name_country)",
"def is_hometown(town):\n if town == 'orlando':\n is_hometown = True\n else:\n is_hometown = False\n return is_hometown",
"def add_ground_truth_state_to_info(self):\n self._is_ground_truth_state_exposed = True\n return",
"def testClinicalPatientHBI(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"hbi\")\n\n self.util.boolPropertyTest(self, attr, \"hbi\")",
"def addBooleanColumnFromCriteria(inputDataToAssess,assessItems,newColumnName):\n \n import pandas as pd\n import re\n \n inputDataToAssess[newColumnName]=False\n \n #necessary, due to escape nonsense\n inputDataToAssess=inputDataToAssess.replace(regex=True, to_replace='\\\\\\\\',value='/')\n \n for index, row in assessItems.iterrows():\n \n curReplaceVal=row[0]\n currentRegexExpression=re.compile(curReplaceVal)\n CurrentBoolVec=inputDataToAssess[inputDataToAssess.columns[0]].str.contains(currentRegexExpression,na=False)\n inputDataToAssess[newColumnName].loc[CurrentBoolVec]=True\n\n return inputDataToAssess;",
"def _handle_boolean(\n *, artifacts: types.ColumnArtifacts # pylint: disable=unused-argument\n) -> Boolean:\n return Boolean",
"def get_boolean(self, df):\n if not self.value:\n return False\n elif not self.par:\n return ()\n if self.variable_type == 'interval':\n return self._interval_boolean(df)\n elif self.variable_type == 'list':\n return self._list_boolean(df)",
"def _bool_encode(self, d):\n for k, v in d.items():\n if isinstance(v, bool):\n d[k] = str(v).lower()\n \n return d",
"def is_country_column_present_in_vendor_profile_destinations_page(self):\n return self.is_specific_column_present(self.destinations_grid_div_id, self.column_name_country)"
]
| [
"0.5407677",
"0.53736824",
"0.53419423",
"0.5297726",
"0.4970331",
"0.48377848",
"0.48371673",
"0.47672835",
"0.47128576",
"0.4695646",
"0.46453887",
"0.46452537",
"0.46426442",
"0.45490533",
"0.4541565",
"0.45203283",
"0.44817874",
"0.4474086",
"0.44707337",
"0.4461174",
"0.44574606",
"0.44521362",
"0.44351342",
"0.4423985",
"0.44214147",
"0.44118902",
"0.4405757",
"0.43893626",
"0.4363458",
"0.43563157"
]
| 0.7443698 | 0 |
Based on mk_level2_fsf.py by Russell Poldrack (Simplified BSD license). | def mk_level2_fsf(
of,
model,
subj,
task=None,
runs=None,
fsf_fname=_opj('%(modeldir)s', 'task%(task)03d_2ndlvl.fsf'),
feat_inputdir=_opj('%(modeldir)s', 'task%(task).3i_run%(run).3i.feat'),
fsfstub_fname=None,
result_dir=None,
overwrite_results=True,
):
# try to determine the task automatically from the model
task = set([c['task'] for c in of.get_model_conditions(model)])
if not len(task) == 1:
raise ValueError("ambiguous task list for model %.3i, "
"use `task` argument to disambiguate" % (model,))
task = list(task)[0]
if runs is None:
# process all runs in the absence of a selection
runs = of.get_task_bold_run_ids(task)[subj]
# few convenience shortcuts
subdir = _opj(of.basedir, 'sub%03d' % subj)
modelbasedir = _opj(subdir, 'model')
modeldir= _opj(modelbasedir, 'model%03d' % model)
# read the conditions_key file, throw away any condition that is not relevant for
# the current task
conditions = [c for c in of.get_model_conditions(model) if c['task'] == task]
expandvars = {
'sub': subj,
'subdir': subdir,
'modeldir': modeldir,
'task': task,
}
# load custom contrasts
contrasts = of.get_model_contrasts(model,).get(task, {})
# write to a file or into a string
if fsf_fname is None:
from cStringIO import StringIO
outfile = StringIO()
else:
outfilename = fsf_fname % expandvars
outfile = open(outfilename,'w')
outfile.write('# Automatically generated\n')
# first get common lines from stub file
if not fsfstub_fname is None:
stubfile=open(fsfstub_fname,'r')
for l in stubfile:
outfile.write(l)
stubfile.close()
# now add custom lines
# TODO
# first check for empty EV file
#empty_evs=[]
#for r in range(len(runs)):
# if os.path.exists("%s/%s/sub%03d/model/model%03d/onsets/task%03d_run%03d/empty_evs.txt"%(basedir,taskid,subnum, modelnum,tasknum,runs[r])):
# evfile=open("%s/%s/sub%03d/model/model%03d/onsets/task%03d_run%03d/empty_evs.txt"%(basedir,taskid,subnum,modelnum,tasknum,runs[r]),'r')
# empty_evs=[int(x.strip()) for x in evfile.readlines()]
# evfile.close()
outfile.write('\n\n### AUTOMATICALLY GENERATED PART###\n\n')
if result_dir is None:
result_dir = _opj('%(modeldir)s', 'task%(task)03d_2ndlvl.gfeat')
result_dir = result_dir % expandvars
outfile.write('set fmri(outputdir) "%s"\n' % (result_dir,))
outfile.write('set fmri(npts) %d\n' % len(runs)) # number of runs
outfile.write('set fmri(multiple) %d\n' % len(runs)) # number of runs
outfile.write('set fmri(ncopeinputs) %d\n'
% int(len(conditions) + 1 + len(contrasts))) # nmbr of copes
for r in range(len(runs)):
rexpandvars = expandvars.copy()
rexpandvars['run'] = runs[r]
outfile.write('set feat_files(%d) "%s"\n'
% (int(r+1), feat_inputdir % rexpandvars))
outfile.write('set fmri(evg%d.1) 1\n' % int(r + 1))
outfile.write('set fmri(groupmem.%d) 1\n' % int(r + 1))
# TODO: remove here when TODO below is dealt with
for c in range(len(conditions) + 1 + len(contrasts)): # nmbr of copes
outfile.write('set fmri(copeinput.%d) 1\n' % int(c + 1))
# TODO
# need to figure out if any runs have empty EVs and leave them out
# if not c+1 in empty_evs:
# outfile.write('set fmri(copeinput.%d) 1\n'%int(c+1))
# else:
# outfile.write('set fmri(copeinput.%d) 0\n'%int(c+1))
if fsf_fname is None:
# return the FSF file content as a string
outfile.seek(0)
fsf = outfile.read()
outfile.close()
return fsf
else:
# return the filename
outfile.close()
return outfilename | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mk_level1_fsf(\n of,\n model,\n subj,\n task,\n run,\n bold_fname=_opj('%(subdir)s', 'BOLD', 'task%(task)03d_run%(run)03d',\n 'bold.nii.gz'),\n # TODO more flexible output location\n fsf_fname=_opj('%(modeldir)s', 'task%(task).3i_run%(run).3i_1stlvl.fsf'),\n # TODO not per-subject images by force\n brain_img_fname=_opj('%(subdir)s', 'anatomy', 'highres001_brain.nii.gz'),\n brain_mask_fname=None,\n example_func_fname=None,\n # TODO if relative, relative to BOLD\n confound_fname=_opj('%(subdir)s', 'BOLD', 'task%(task)03d_run%(run)03d',\n 'qa', 'confound.txt'),\n fsfstub_fname=None,\n result_dir=None,\n smoothing_kernelsize=0,\n use_inplane=False,\n nonlin_reg=False,\n add_temporal_deriv=True,\n skipvols=0,\n tr=None,\n overwrite_results=True,\n initxfm2std_fname=None,\n ):\n\n import nibabel as nb\n\n # few convenience shortcuts\n subdir = _opj(of.basedir, 'sub%03d' % subj)\n modelbasedir = _opj(subdir, 'model')\n modeldir= _opj(modelbasedir, 'model%03d' % model)\n\n # read the conditions_key file, throw away any condition that is not relevant for\n # the current task\n conditions = [c for c in of.get_model_conditions(model) if c['task'] == task]\n\n expandvars = {\n 'sub': subj,\n 'subdir': subdir,\n 'modeldir': modeldir,\n 'task': task,\n 'run': run,\n }\n\n # check for orthogonalization file\n orth={}\n ##orthfile = _opj(of.basedir, 'models', 'model%03d' % model, 'orthogonalize.txt')\n ##if os.path.exists(orthfile):\n ## f=open(orthfile)\n ## for l in f.readlines():\n ## orth_tasknum=int(l.split()[0].replace('task',''))\n ## if orth_tasknum==task:\n ## orth[int(l.split()[1])]=int(l.split()[2])\n ## f.close()\n\n # check for QA dir\n #qadir='%s/BOLD/task%03d_run%03d/QA'%(subdir,task,run)\n\n contrasts_all = of.get_model_contrasts(model,)\n contrasts=[]\n if contrasts_all.has_key(task):\n contrasts=contrasts_all[task]\n\n scan_key = of.get_scan_properties()\n\n # write to a file or into a string\n if fsf_fname is None:\n from cStringIO import StringIO\n outfile = StringIO()\n else:\n outfilename = fsf_fname % expandvars\n outfile = open(outfilename,'w')\n\n outfile.write('# Automatically generated by mk_fsf.py\\n')\n\n # first get common lines from stub file\n if not fsfstub_fname is None:\n stubfile=open(fsfstub_fname,'r')\n for l in stubfile:\n outfile.write(l)\n stubfile.close()\n\n # figure out how many timepoints there are\n bold_img_path = bold_fname % expandvars\n bold_img = nb.load(bold_img_path)\n # should be 4D\n ntp = bold_img.shape[3]\n\n if tr is None:\n # try our luck with the image header\n hdr = bold_img.get_header()\n tinc = hdr.get_zooms()[3]\n unit = hdr.get_xyzt_units()[1]\n if unit == 'sec':\n tr = tinc\n elif unit == 'msec':\n tr = tinc / 1000.\n else:\n raise ValueError(\"unkown time unit, cannot determine TR\")\n\n outfile.write('\\n\\n### AUTOMATICALLY GENERATED PART###\\n\\n')\n # now add custom lines\n outfile.write( 'set fmri(regstandard_nonlinear_yn) %d\\n' % int(nonlin_reg))\n # Delete volumes\n outfile.write('set fmri(ndelete) %d\\n' % skipvols)\n\n if result_dir is None:\n result_dir = _opj('%(modeldir)s', 'task%(task)03d_run%(run)03d.feat')\n result_dir = result_dir % expandvars\n\n outfile.write('set fmri(outputdir) \"%s\"\\n' % (result_dir,))\n outfile.write('set feat_files(1) \"%s\"\\n' % (_stripext(bold_img_path),))\n if use_inplane is True:\n # XXX THIS IS TODO\n outfile.write('set fmri(reginitial_highres_yn) 1\\n')\n outfile.write('set initial_highres_files(1) \"%s/anatomy/inplane001_brain.nii.gz\"\\n'\n % subdir)\n else:\n outfile.write('set fmri(reginitial_highres_yn) 0\\n')\n\n outfile.write('set highres_files(1) \"%s\"\\n'\n % (_stripext(brain_img_fname % expandvars),))\n outfile.write('set fmri(npts) %d\\n' % ntp)\n outfile.write('set fmri(tr) %0.2f\\n' % tr)\n nevs=len(conditions)\n outfile.write('set fmri(evs_orig) %d\\n' % nevs)\n # TODO support other convolution schemes\n outfile.write('set fmri(evs_real) %d\\n' % (2 * nevs))\n outfile.write('set fmri(smooth) %d\\n' % smoothing_kernelsize)\n outfile.write('set fmri(ncon_orig) %d\\n'\n % (len(conditions) + 1 + len(contrasts)))\n outfile.write('set fmri(ncon_real) %d\\n'\n % (len(conditions) + 1 + len(contrasts)))\n\n # loop through EVs\n convals_real = np.zeros(nevs * 2)\n convals_orig = np.zeros(nevs)\n # XXX what is this?\n empty_evs=[]\n\n for ev, cond in enumerate(conditions):\n outfile.write('\\n\\nset fmri(evtitle%d) \"%s\"\\n'\n % (ev + 1, cond['name']))\n condfile = _opj('%(modeldir)s', 'onsets', 'task%(task)03d_run%(run)03d',\n 'cond%03d.txt' % (ev + 1)) % expandvars\n if os.path.exists(condfile):\n outfile.write('set fmri(shape%d) 3\\n'%(ev + 1))\n outfile.write('set fmri(custom%d) \"%s\"\\n'%(ev + 1, condfile))\n else:\n # shape 10 is \"empty (all zeros)\"\n outfile.write('set fmri(shape%d) 10\\n' % (ev + 1))\n print '%s is missing, using empty EV' % condfile\n empty_evs.append(ev + 1)\n\n outfile.write('set fmri(convolve%d) 3\\n' % (ev + 1))\n outfile.write('set fmri(convolve_phase%d) 0\\n' % (ev + 1))\n outfile.write('set fmri(tempfilt_yn%d) 1\\n' % (ev + 1))\n outfile.write('set fmri(deriv_yn%d) %i\\n'\n % (ev + 1, add_temporal_deriv))\n\n # first write the orth flag for zero, which seems to be turned on whenever\n # anything is orthogonalized\n if orth.has_key(ev + 1):\n outfile.write('set fmri(ortho%d.0) 1\\n' % int(ev + 1))\n else:\n outfile.write('set fmri(ortho%d.0) 0\\n' % int(ev + 1))\n for evn in range(1, nevs + 1):\n if orth.has_key(ev + 1):\n if orth[ev + 1] == evn:\n outfile.write('set fmri(ortho%d.%d) 1\\n' % (ev + 1, evn))\n else:\n outfile.write('set fmri(ortho%d.%d) 0\\n' % (ev + 1, evn))\n else:\n outfile.write('set fmri(ortho%d.%d) 0\\n' % (ev + 1, evn))\n\n # default contrast setup\n # make a T contrast for each EV\n outfile.write('set fmri(conpic_real.%d) 1\\n' % (ev + 1))\n outfile.write('set fmri(conpic_orig.%d) 1\\n' % (ev + 1))\n outfile.write('set fmri(conname_real.%d) \"%s\"\\n'\n % (ev + 1, cond['name']))\n outfile.write('set fmri(conname_orig.%d) \"%s\"\\n'\n % (ev + 1, cond['name']))\n for evt in range(nevs * 2):\n outfile.write('set fmri(con_real%d.%d) %d\\n'\n %(ev + 1, evt + 1, int(evt == (ev * 2))))\n if (evt == (ev * 2)):\n convals_real[evt] = 1\n for evt in range(nevs):\n outfile.write('set fmri(con_orig%d.%d) %d\\n'\n % (ev + 1, evt + 1, int(evt == ev)))\n if (evt == ev):\n convals_orig[evt] = 1\n\n if len(empty_evs) > 0:\n empty_ev_file = open(\n _opj('%(modeldir)s', 'onsets', 'task%(task)03d_run%(run)03d',\n 'empty_evs.txt') % expandvars, 'w')\n for eev in empty_evs:\n empty_ev_file.write('%d\\n' % eev)\n empty_ev_file.close()\n\n # make one additional contrast across all conditions\n outfile.write('set fmri(conpic_real.%d) 1\\n' % (ev + 2))\n outfile.write('set fmri(conpic_orig.%d) 1\\n' % (ev + 2))\n outfile.write('set fmri(conname_real.%d) \"all\"\\n' % (ev + 2))\n outfile.write('set fmri(conname_orig.%d) \"all\"\\n' % (ev + 2))\n\n for evt in range(nevs * 2):\n outfile.write('set fmri(con_real%d.%d) %d\\n'\n %(ev + 2, evt + 1, convals_real[evt]))\n for evt in range(nevs):\n outfile.write('set fmri(con_orig%d.%d) %d\\n'\n % (ev + 2, evt + 1, convals_orig[evt]))\n\n # add custom contrasts\n if len(contrasts) > 0:\n contrastctr = ev + 3\n for c in contrasts.iterkeys():\n outfile.write('set fmri(conpic_orig.%d) 1\\n' % contrastctr)\n outfile.write('set fmri(conpic_real.%d) 1\\n' % contrastctr)\n outfile.write('set fmri(conname_real.%d) \"%s\"\\n' % (contrastctr, c))\n outfile.write('set fmri(conname_orig.%d) \"%s\"\\n' % (contrastctr, c))\n cveclen = len(contrasts[c])\n con_real_ctr = 1\n for evt in range(nevs):\n outfile.write('set fmri(con_real%d.%d) %s\\n'\n % (contrastctr,\n con_real_ctr,\n contrasts[c][evt]))\n outfile.write('set fmri(con_real%d.%d) 0\\n'\n % (contrastctr, con_real_ctr + 1))\n con_real_ctr += 2\n for evt in range(nevs):\n if evt < cveclen:\n outfile.write('set fmri(con_orig%d.%d) %s\\n'\n % (contrastctr, evt + 1, contrasts[c][evt]))\n else:\n outfile.write('set fmri(con_orig%d.%d) 0\\n'\n % (contrastctr, evt + 1))\n contrastctr += 1\n\n # Add confound EVs text file\n if not confound_fname is None:\n confoundfile = confound_fname % expandvars\n if os.path.exists(confoundfile):\n outfile.write('set fmri(confoundevs) 1\\n')\n outfile.write('set confoundev_files(1) \"%s\"\\n' % confoundfile)\n else:\n outfile.write('set fmri(confoundevs) 0\\n')\n else:\n outfile.write('set fmri(confoundevs) 0\\n')\n\n if overwrite_results:\n outfile.write('set fmri(overwrite_yn) 1\\n')\n\n if not brain_mask_fname is None:\n # don't rely on BET but use a specified mask\n outfile.write('set fmri(alternative_mask) \"%s\"\\n'\n % (brain_mask_fname % expandvars,))\n\n if not example_func_fname is None:\n # don't rely on extracting a example image from the BOLD input,\n # but use a specified image\n outfile.write('set fmri(alternative_example_func) \"%s\"\\n'\n % (example_func_fname % expandvars,))\n\n # Standard space registration initialisation transform\n if not initxfm2std_fname is None:\n outfile.write('set fmri(init_standard) \"%s\"\\n'\n % (initxfm2std_fname % expandvars,))\n\n if fsf_fname is None:\n # return the FSF file content as a string\n outfile.seek(0)\n fsf = outfile.read()\n outfile.close()\n return fsf\n else:\n # return the filename\n outfile.close()\n return outfilename",
"def f2d(forfile, opath):\n return f2suff(forfile, opath, 'd')",
"def generate_gff( mapfile, funtax_orf_file ):\n annotation2assembly_map = pd.read_table(mapfile,\n names=['annotation','assembly','length'],\n index_col='annotation')\n funtax_gff = pd.read_table( funtax_orf_file.name, engine='python', encoding='ISO-8859-1', quoting=3)\n funtax_gff['seqid'] = funtax_gff.join(annotation2assembly_map, on='Contig_Name')['assembly']\n funtax_gff['source'] = 'Prodigal_v2.00'\n funtax_gff['type'] = 'CDS'\n funtax_gff['score'] = 100.0\n funtax_gff['phase'] = 0\n funtax_gff['attributes'] = funtax_gff['ORF_ID'].str.replace(r'(.*)', r'ID=\\1;')\n return funtax_gff[['seqid','source', 'type','start', 'end', 'score', 'strand','phase','attributes']]",
"def CanonicalFromDMAP2 (D0, D1):\n\n if butools.checkInput:\n if D0.shape[0]!=2:\n raise Exception(\"CanonicalFromDMAP2: size is not 2!\")\n if not CheckDMAPRepresentation(D0, D1):\n\t raise Exception(\"CanonicalFromDMAP2: Input is not a valid DMAP representation!\")\n\n ev = la.eigvals(D0)\n ix = np.argsort(-np.abs(np.real(ev)))\n ev = ev[ix]\n\n s1=ev[0]\n s2=ev[1]\n\n if s2>=0:\n G0, G1 = CanonicalFromMAP2 (D0-ml.eye(2),D1)\n G0 = G0 + ml.eye(2)\n return (G0, G1)\n\n #s2 is negative\n av = DRPSolve (la.inv(ml.eye(2)-D0)*D1)\n\n gamma = la.eigvals(la.inv(ml.eye(2)-D0)*D1)\n ix = np.argsort(-np.abs(np.real(gamma)))\n gamma = gamma[ix]\n gamma = gamma[1]\n\n w1=1.0/(s1-s2)*(np.sum(D0,1)-s2*ml.ones((2,1)))\n w2=ml.ones((2,1))-w1\n\n W=np.hstack((w1, w2))\n A=(1.0-s1)*(av*W)\n a1=A[0,0]\n\n if gamma>=0:\n a=-(1/(2*(-1+s1)*(-1+s1+s2)**2))*(1-4*s1+a1*s1+5*s1**2-a1*s1**2-2*s1**3-2*s2-a1*s2+5*s1*s2-3*s1**2*s2+s2**2+a1*s2**2-s1*s2**2-gamma+3*s1*gamma-a1*s1*gamma-3*s1**2*gamma+a1*s1**2*gamma+s1**3*gamma+s2*gamma+a1*s2*gamma-2*s1*s2*gamma+s1**2*s2*gamma-a1*s2**2*gamma+math.sqrt((-1+s1+s2)**2*((-1+s1**2*(-2+gamma)+gamma+s2*(1+a1-a1*gamma)+s1*(3-a1-s2-2*gamma+a1*gamma))**2-4*(-1+s1)*(-s1**3*(-1+gamma)+a1*(-1+s2)*s2*(-1+gamma)+s1**2*(-2+a1+s2+2*gamma-a1*gamma)+s1*(1-a1-s2-gamma+a1*gamma)))))\n b=1+(a*(-1+s1+s2-s1*s2)*gamma)/((a-1)*(-s1*s2+a*(-1+s1+s2)))\n\n G0=ml.matrix([[s1+s2, a*(1-s1-s2)], [s1*s2/(a*(s1+s2-1)), 0]])\n G1=ml.matrix([[(1-a)*(1-s1-s2), 0], [b*(1+s1*s2/(a*(1-s1-s2))), (1-b)*(1+s1*s2/(a*(1-s1-s2)))]])\n else:\n #gamma<0\n a=(a1*s1-a1*s1**2+s2-a1*s2-3*s1*s2+2*s1**2*s2-s2**2+a1*s2**2+s1*s2**2+s1*gamma-a1*s1*gamma-2*s1**2*gamma+a1*s1**2*gamma+s1**3*gamma+a1*s2*gamma-a1*s2**2*gamma+math.sqrt(-4*(-1+s1)*s1*s2*(-1+s1+s2)*(a1*(s1-s2)*(-1+gamma)+(-1+s1)*(s2+(-1+s1)*gamma))+(a1*(-s1+s1**2+s2-s2**2)*(-1+gamma)+(-1+s1)*((-1+2*s1)*s2+s2**2+(-1+s1)*s1*gamma))**2))/(2*(-1+s1+s2)*(a1*(s1-s2)*(-1+gamma)+(-1+s1)*(s2+(-1+s1)*gamma)))\n b=-((a*(1-s1)*(1-s2)*gamma)/((a-1)*(-a+a*s1+a*s2-s1*s2)))\n\n G0=ml.matrix([[s1+s2, a*(1-s1-s2)],[s1*s2/(a*(s1+s2-1)), 0]])\n G1=ml.matrix([[0, (1-a)*(1-s1-s2)],[b*(1-s1*s2/(a*(s1+s2-1))), (1-b)*(1-s1*s2/(a*(s1+s2-1)))]])\n return (G0, G1)",
"def build_messy_lookup_lad(source,dest):\n la = QuickGrid().open(source)\n\n lookup = QuickGrid()\n lookup.header = [\"gss-code\",\"local-authority-code\"]\n\n possible = [\"gss-code\",\"archaic-gss-code\"]\n possible = [p for p in possible if p in la.header]\n for r in la:\n for p in possible:\n if r[p]:\n values = r[p].split(\",\")\n for v in values:\n lookup.add([v,r[\"local-authority-code\"]])\n \n lookup.save(dest,force_unicode=True)",
"def __make_tree(self, wd, root=\"d1\", create=True):\n d1 = \"%s/%s\" % (wd, root)\n t1 = FSTree(d1)\n d2 = \"%s/d2\" % d1\n t2 = t1.add(d2)\n if create:\n hdfs.mkdir(d2)\n for t, d, bn in ((t1, d1, \"f1\"), (t2, d2, \"f2\")):\n f = \"%s/%s\" % (d, bn)\n if create:\n hdfs.dump(self.data, f, mode=\"wb\")\n t.add(f, 0)\n return t1",
"def _fp_setup2(self):\n # TODO: right now it's hard to implement this required stage",
"def map2mw_F(d,k1,entry):\n if k1 in map2mw_special_F:\n return map2mw_special_F[k1]\n regexes = [\n u'<ab>f2.</ab> de {%(.*?)%}',\n u'<ab>f2.</ab> {%(.*?)%}',\n #u'<ab>f2.</ab> du <ab>c.</ab> de {%(.*?)%}',\n\n ]\n line = entry.datalines[0] # first line of entry in bur.txt\n for regex in regexes:\n m = re.search(regex,line)\n if m:\n root = m.group(1) # root in \n root_slp1=roman_slp1_mw(root,'verb',d)\n if root_slp1 != None:\n return root_slp1\n\n return '?'",
"def d2transf_df2(self,f):\r\n raise NotImplementedError",
"def tree_case_2(self, prune: int=1):\n\n types_h = self.Node('types.h')\n malloc_h = self.Node('malloc.h')\n stdlib_h = self.Node('stdlib.h', [types_h, malloc_h])\n bar_h = self.Node('bar.h', [stdlib_h])\n blat_h = self.Node('blat.h', [stdlib_h])\n blat_c = self.Node('blat.c', [blat_h, bar_h])\n blat_o = self.Node('blat.o', [blat_c])\n\n expect = \"\"\"\\\n+-blat.o\n +-blat.c\n +-blat.h\n | +-stdlib.h\n | +-types.h\n | +-malloc.h\n +-bar.h\n\"\"\"\n if prune:\n expect += \"\"\" +-[stdlib.h]\n\"\"\"\n else:\n expect += \"\"\" +-stdlib.h\n +-types.h\n +-malloc.h\n\"\"\"\n\n lines = expect.split('\\n')[:-1]\n lines = ['[E BSPACN ]' + l for l in lines]\n withtags = '\\n'.join(lines) + '\\n'\n\n return blat_o, expect, withtags",
"def convert_fus(ast):\n\n parent_fn_name = ast.name_short\n prefix_list = {\"p\": \"p.\", \"r\": \"r.\", \"g\": \"c.\"}\n prefix = prefix_list[parent_fn_name]\n\n fus1_ns = ast.args[0].namespace\n fus1_val = ast.args[0].value\n\n arg_fus = ast.args[1]\n fus_args = [None, \"?\", \"?\"]\n for idx, arg in enumerate(arg_fus.args):\n fus_args[idx] = arg\n\n fus2_ns = fus_args[0].namespace\n fus2_val = fus_args[0].value\n\n if fus_args[1] == \"?\":\n fus1_range = fus_args[1]\n else:\n fus1_range = f'\"{prefix}1_{fus_args[1].value}\"'\n\n if fus_args[2] == \"?\":\n fus2_range = fus_args[2]\n else:\n fus2_range = f'\"{prefix}{fus_args[2].value}_?\"'\n\n fus = Function(\"fus\", version=version, parent=ast)\n fus.args = [\n NSArg(fus1_ns, fus1_val, fus),\n StrArg(fus1_range, fus),\n NSArg(fus2_ns, fus2_val, fus),\n StrArg(fus2_range, fus),\n ]\n\n # Remove BEL\n ast_args = ast.args\n ast_args.pop(0)\n ast_args.pop(0)\n\n if ast_args == [None]:\n ast_args = []\n\n ast.args = []\n ast.add_argument(fus)\n\n if len(ast_args) > 0:\n ast.args.extend(ast_args)\n\n return ast",
"def __getforminfo(lnode, rnode):\n if (lnode.prop=='Nucleus') and (rnode.prop=='Satellite'):\n nucspan = lnode.eduspan\n form = 'NS'\n elif (lnode.prop=='Satellite') and (rnode.prop=='Nucleus'):\n nucspan = rnode.eduspan\n form = 'SN'\n elif (lnode.prop=='Nucleus') and (rnode.prop=='Nucleus'):\n nucspan = (lnode.eduspan[0], rnode.eduspan[1])\n form = 'NN'\n else:\n raise ValueError(\"\")\n return (form, nucspan)",
"def w2f(sents,i,j,filename,freq):\n w = sents[i][j][0] #current word\n pos = sents[i][j][1] #POS of current word\n f = [ \n 'bias', #non-contextual feature \n 'w=' + w, #current word \n 'w.istitle=%s' % w.istitle(), #first letter - capitalized\n 'pos=' + pos, # POS tag\n 'w.intitle=%s' % contained_in_title(w, filename), # w matches title\n 'w.lowtitle=%s' % lower_in_title(w, filename), # w lower matches title\n 'w.freq=%s' % frequency(w, freq), # freq of w \n 'w.stopword=%s' % stop_word(w), # # stop word\n ]\n \n # previous word features\n if j>0:\n pw = sents[i][j-1][0] #previous word\n ppos = sents[i][j-1][1] #POS of previous word\n f.extend([ \n 'pw=' + pw, # previous word \n 'pw.istitle=%s' % pw.istitle(), #first letter - capitalized\n 'ppos=' + ppos, # POS tag\n 'pw.intitle=%s' % contained_in_title(pw, filename), # w matches title\n 'pw.lowtitle=%s' % lower_in_title(pw,filename), # w lower matches title\n 'pw.freq=%s' % frequency(pw, freq), # freq of w\n 'pw.stopword=%s' % stop_word(w), # # stop word\n ])\n else: \n f.append('BOS') #first word of a sentence\n\n # next word features\n if j<len(sents[i])-1:\n nw = sents[i][j+1][0] #next word\n npos = sents[i][j+1][1] #POS of next word\n f.extend([ \n 'nw=' + nw, # previous word\n 'nw.istitle=%s' % nw.istitle(), #first letter - capitalized\n 'npos=' + npos, #POS tag\n 'nw.intitle=%s' % contained_in_title(nw, filename), # w matches title\n 'nw.lowtitle=%s' % lower_in_title(nw,filename), # w lower matches title\n 'nw.freq=%s' % frequency(nw, freq), # freq of w\n 'nw.stopword=%s' % stop_word(w), # # stop word\n ])\n else: \n f.append('EOS') # last word of a sentence\n\n #if j>1: ...\n #if j<len(sents[i])-2: ...\n #if j>0 and j<len(sents[i])-1: ...\n return f",
"def d2l(sents,f,freq):\n return [s2l(sents,i,f,freq) for i in range(len(sents))]",
"def tree2lff(args):\n lff.tree2lff()",
"def d2f(sents,f,freq):\n return [s2f(sents,i,f,freq) for i in range(len(sents))]",
"def project_L2(kv, f):\n from .assemble import bsp_mass_1d\n lv = load_vector(kv, f)\n M = bsp_mass_1d(kv)\n return scipy.sparse.linalg.spsolve(M, lv)",
"def f2(\n self, sample_sets, indexes=None, windows=None, mode=\"site\", span_normalise=True\n ):\n return self.__k_way_sample_set_stat(\n self._ll_tree_sequence.f2,\n 2,\n sample_sets,\n indexes=indexes,\n windows=windows,\n mode=mode,\n span_normalise=span_normalise,\n )",
"def standarization_oftags(a2_data, tag0='UO2 Dm=0.72 Tf=600'):\n if 'A2' in a2_data:\n tag = a2_data['A2'].keys()\n if len(tag) != 1:\n tag = tag0\n print 'warning, tags=', tag\n else:\n tag = tag[0]\n return a2_data['A2'][tag]['data']\n if 'data' in a2_data:\n return a2_data['data']\n raise RuntimeError('cant read a2 data')",
"def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2):\r\n return d2f_dg2*(dg_dx**2) + df_dg*d2g_dx2",
"def Build_FT(filename):\n\ttree = ET.parse(filename)\n\troot = tree.getroot()\n\tglobal dict\n\tglobal leaves\n\tglobal nonleaves\n\tfor n in root:\n\t\tname=n.attrib['id']\n\t\t#nonleaves.append(name)\n\t\t\n\t\tif name not in dict:\n\t\t\tdict[name]=[]\n\t\tfor i in range(len(n)):\n\t\t\tif i==0:\n\t\t\t\tif n[i].text.upper()==\"AND\":\n\t\t\t\t\tdict[name].append(1)\n\t\t\t\telse:\n\t\t\t\t\tdict[name].append(0)\n\t\t\telse:\n\t\t\t\tdict[name].append(n[i].text)\t\t\t\t\n\t\t\t\tif n[i].text not in dict:\t# Also create entry for kid nodes\t\t\t\t\t\t\n\t\t\t\t\tdict[n[i].text]=[]\n\t\t\t\t\t\n\t\"\"\"Test whether a node is leaf node, if yes add it to list leaves\"\"\"\n\tfor i in dict:\n\t\tdict_all[i]=-1\n\t\tif not dict[i]:\n\t\t\tleaves.append(i)",
"def discVar2FeatureOld( var, varname, lims = [1,5], collapse = [False, False], ctxt = 'contains'):\n nums = ['zero','one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten']\n \n vals = range(lims[0], lims[1]+1) \n \n #init fdict\n fdict = dict() \n for k, val in enumerate(vals):\n fdict[(ctxt + ' %s ' + varname) % (nums[val])] = False\n \n if collapse[0] == True: var = max([var, lims[0]])\n if collapse[1] == True: var = min([var, lims[1]])\n \n if var >= lims[0] and var <= lims[1]: #if collapse = False, ignore vals outside lims\n fdict[(ctxt + ' %s ' + varname) % (nums[var])] = True \n \n return fdict",
"def d2logpdf_df2(self, f, y, extra_data=None):\r\n link_f = self.gp_link.transf(f)\r\n d2logpdf_dlink2 = self.d2logpdf_dlink2(link_f, y, extra_data=extra_data)\r\n dlink_df = self.gp_link.dtransf_df(f)\r\n dlogpdf_dlink = self.dlogpdf_dlink(link_f, y, extra_data=extra_data)\r\n d2link_df2 = self.gp_link.d2transf_df2(f)\r\n return chain_2(d2logpdf_dlink2, dlink_df, dlogpdf_dlink, d2link_df2)",
"def create_level_maps(max_depth, level_tags, summary_tags):\n\n level_maps = []\n for i in range(0, max_depth - 1):\n level_map = dict()\n level = max_depth - i\n for summary in summary_tags[i]:\n all_sub_tags = [att for att in level_tags[i] if summary[:-4] == \".\".join(att.split('.')[:-1])]\n matching_tag_above = [att for att in level_tags[i + 1] if summary[:-4] == att]\n level_map[summary] = all_sub_tags + matching_tag_above\n\n level_maps.append(level_map)\n\n return level_maps",
"def ft_invert(log2, chf, frz_generator, params, loc=0, scale=1, xmax=0, xshift=0,\n suptitle='', wraps=None, disc_calc='density'):\n\n # number of buckets\n n = 1 << log2\n if xmax is None:\n xmax = n\n\n # make frozen object\n if callable(frz_generator):\n if scale is None:\n # freq dists (Poisson) do not allow scaling\n frz = frz_generator(*params, loc=loc)\n frz.pdf = frz.pmf\n # for subsequent use\n scale = 1\n else:\n frz = frz_generator(*params, loc=loc, scale=scale)\n\n # spatial upto xmax; used to create exact using the scipy stats object\n # sampling interval (wavelength) = xmax / n\n if xmax == 0:\n xmax = frz.isf(1e-17)\n # sampling domain, for exact and to \"label\" the Fourier Transform output\n # xs = np.arange(n) * xmax / n\n bs = xmax / n\n xs = np.arange(n) * bs + xshift\n if callable(frz_generator):\n if disc_calc == 'density':\n exact = frz.pdf(xs)\n exact = exact / exact.sum() * (frz.cdf(xs[-1]) - frz.cdf(xs[0]))\n else:\n xs1 = np.hstack((xs - bs / 2, xs[-1] + bs / 2))\n exact = -np.diff(frz.sf(xs1))\n else:\n # pass in values\n exact = frz_generator\n\n # convert chf to ft including scale and loc effects\n def loc_ft(t):\n nonlocal params, loc, scale\n # ft(t)= int f(x)exp(-2πi t x)dx\n # chf(t) = int f(x)exp(i t x)dx\n t1 = -t * 2 * np.pi\n ans = chf(*params, t1 * scale)\n if loc != 0:\n # for some reason ans *= np.exp(-t1 * loc) does not work\n ans = ans * np.exp(t1 * loc * 1j)\n return ans\n\n # sampling interval = bs = xmax / n [small bs, high sampling rate]\n # sampling freq is 1 / bs = n / xmax, the highest sampling freq for inverting the FT\n # note xmax = n * bs, so n / xmax = 1 / bs.\n # f(x) = int_R fhat(t) exp(2πi tx)dt ≈ int_-f_max_f^max_f ...\n f_max = n / xmax\n # sample the FT; using real fft, only need half the range\n ts = np.arange(n // 2 + 1) * f_max / n # ts = np.arange(n // 2 + 1) / xmax\n fx = loc_ft(ts)\n # for debugging\n ft_invert.fx = fx\n ft_invert.ts = ts\n x = irfft(fx)\n if xshift != 0:\n x = np.roll(x, -int(xshift / bs))\n\n # plotting\n fig, axs = plt.subplots(2, 2, figsize=(2 * 3.5, 2 * 2.45), constrained_layout=True)\n ax0, ax1, ax2, ax3 = axs.flat\n\n for ax in axs[0].flat:\n ax.plot(xs, exact, label='exact', lw=1.5)\n ax.plot(xs, x, label='xs irfft', ls=':', lw=1.5)\n ax.legend(fontsize='x-small')\n ax0.set(title='Density', xlabel='Outcome, x')\n # mn = min(np.log10(exact).min(), np.log10(x).min())\n mn0 = np.log10(x).min() * 1.25\n mn = 10 ** np.floor(mn0)\n mx = max(np.log10(exact).max(), np.log10(x).max())\n mx = 10 ** np.ceil(mx)\n if np.isnan(mn):\n mn = 1e-17\n if np.isnan(mx):\n mx = 1\n ax1.set(yscale='log', ylim=[mn, mx], title='Log density', xlabel='Outcome, x')\n\n # amplitude and phase\n ax2.plot(ts, np.abs(fx), '-', lw=1.5, c='C3')\n ax2.set(title='Amplitude',\n ylabel='|ft|', yscale='log', xlabel='frequency')\n if log2 <= 8:\n ax3.plot(ts, np.cumsum(np.angle(fx)) / (2 * np.pi), '-', marker='.', ms=3, c='C2')\n else:\n ax3.plot(ts, np.cumsum(np.angle(fx)) / (2 * np.pi), c='C2')\n ax3.set(title='Cumulative phase',\n ylabel='cumsum(arg(ft)) / $2\\pi$', xlabel='frequency')\n\n if suptitle != '':\n fig.suptitle(suptitle)\n\n if wraps is not None:\n fig2, ax = plt.subplots(1, 1, figsize=(3.5 * 2, 2.45 * 2), constrained_layout=True)\n rt = exact.copy()\n ax.plot(xs, exact, label='exact', lw=1.5)\n ax.plot(xs, x, label='xs irfft', ls=':', lw=1.5)\n for b in wraps:\n xs2 = b * n / f_max + xs\n adj = frz.pdf(xs2)\n adj = adj / np.sum(adj) * (frz.cdf((b + 1) * n / f_max) - frz.cdf(b * n / f_max))\n rt += adj\n ax.plot(xs, rt, label=f'wrap {b}', lw=.5)\n ax.set(yscale='log', ylim=[mn, mx], title='Aliasing analysis',\n xlabel='Outcome, x', ylabel='Log density')\n ax.legend(fontsize='x-small', ncol=2, loc='upper right')\n\n return pd.DataFrame({'x': xs, 'p': x, 'p_exact': exact}).set_index('x')",
"def ft2d(im, shift_after=True, shift_before=True, ret='complex', s=None, norm=\"ortho\"):\n if np.ndim(im) < 2:\n print('Too few dimensions')\n raise ValueError('Too few dimensions for ft2d')\n # return im\n else:\n return ft(im, shift_after=shift_after, shift_before=shift_before, ret=ret, axes=(-2, -1), s=s, norm=norm)",
"def build_messy_lookup(source,dest,ref_col):\n la = QuickGrid().open(source)\n od = QuickGrid().open(join(\"source_files\",\"local_authority_data_names.csv\"))\n\n lookup = QuickGrid()\n lookup.header = [\"la name\",ref_col]\n\n possible = [\"official-name\",\"alt-name-1\",\"alt-name-2\",\"alt-name-3\"]\n possible = [p for p in possible if p in la.header]\n for r in la:\n for p in possible:\n if r[p]:\n lookup.add([r[p],r[ref_col]])\n \n current_names = [x[0] for x in lookup]\n\n for r in od:\n if r[\"name\"] not in current_names:\n code = r[\"local-authority\"].split(\":\")[1]\n lookup.add([r[\"name\"],code])\n \n lookup.save(dest,force_unicode=True)",
"def map_addr_tree_2(s, d, tors1, tors2):\n s_d = crc8(0, s, 0x31)%2\n if s_d == 1:\n tors1, tors2 = tors2, tors1\n n1 = len(tors1)\n n2 = len(tors2)\n #s_out, d_out = crc8(0, s, 0x31)%n1 + 1, crc8(0, d, 0x1d)%n2 + 1\n s_out, d_out = random.randint(0, n1-1) + 1, random.randint(0, n2-1) + 1\n s_out, d_out = tors1[s_out-1], tors2[d_out-1]\n return s_out, d_out",
"def createFieldMapping(sgidPoints):\n # Create field mappings\n sgidFMs = arcpy.FieldMappings()\n\n # Perform some field renaming\n mapPairs = [\n ('State', 'State'),\n ('City', 'Inc_Muni'),\n ('CountyID', 'County'),\n ('ZipCode', 'Zip_Code'),\n ('PrefixDir', 'StN_PreDir'),\n ('StreetName', 'StreetName'),\n ('StreetType', 'StN_PosTyp'),\n ('SuffixDir', 'StN_PosDir'),\n ('AddNum', 'Add_Number'),\n ('LandmarkName', 'landmkName'),\n ('Building', 'Building'),\n ('UnitType', 'Unit'),\n ('AddSource', 'AddAuth'),\n ('AddSystem', 'UniqWithin'),\n ('LoadDate', 'LastUpdate')]\n\n for p in mapPairs:\n print p\n sgidFMs.addFieldMap(getRenameFieldMap(sgidPoints, p[0], p[1]))\n\n return sgidFMs",
"def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2):\n if np.all(dg_dx==1.) and np.all(d2g_dx2 == 0):\n return d2f_dg2\n dg_dx_2 = np.clip(dg_dx, -np.inf, _lim_val_square)**2\n #dg_dx_2 = dg_dx**2\n return d2f_dg2*(dg_dx_2) + df_dg*d2g_dx2"
]
| [
"0.531481",
"0.5053373",
"0.48686805",
"0.47372434",
"0.4731332",
"0.47068286",
"0.46767756",
"0.46725553",
"0.46366653",
"0.4628272",
"0.45832887",
"0.45784652",
"0.4534064",
"0.45229214",
"0.44999015",
"0.44965637",
"0.4456667",
"0.44559452",
"0.4419189",
"0.44119292",
"0.44020185",
"0.43925464",
"0.43865138",
"0.43655828",
"0.43649277",
"0.4355611",
"0.4345913",
"0.43420824",
"0.43410504",
"0.4340547"
]
| 0.53041863 | 1 |
Run through the logic of the unquote function. | def test_unquote(self):
self.assertEqual(unquote('foo%23bar'), 'foo#bar')
self.assertEqual(unquote('foo%23bar', ['#']), 'foo%23bar')
with self.assertRaises(TypeError):
unquote(None)
self.assertEqual(unquote(""), "")
self.assertEqual(unquote("abc123"), "abc123") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unquote(s, *a, **kw):\n return quote(s, *a, **kw)",
"def unquote(value, *args, **kwargs):\n return decode(parse.unquote(value), *args, **kwargs)",
"def unquote():\n def _unquote(quoted):\n return quoted.subexpression\n yield (\"(λ &[any] . any)\", _unquote)",
"def test_unquote_correctness(\n unquoter_key: str,\n quoted: str,\n unquoted: str,\n) -> None:\n unquoter = unquoters[unquoter_key]\n assert unquoted == unquoter(quoted)",
"def _unquote(src, encoding=\"utf-8\"):\n return urllib.unquote(src).decode(encoding)",
"def unquoter(datum):\n if isinstance(datum, Pair):\n return evaluate(snek_to_py(datum), env)\n elif isinstance(datum, str):\n val = evaluate(datum, env)\n return val\n elif isinstance(datum, list):\n val = evaluate(datum, env)\n return unquoter(val)\n else:\n return datum",
"def unquote(cls, value):\n if six.PY2:\n return unquote(value).decode(\"utf8\")\n else:\n return unquote(value.decode(\"ascii\"))",
"def test_unescape__double_quote(self) -> None:\n escaped: str = \""\"\n unescaped: str = '\"'\n\n self.assertEqual(first=unescape(value=escaped), second=unescaped)",
"def test_unescape__single_quote(self) -> None:\n escaped: str = \"'\"\n unescaped: str = \"'\"\n\n self.assertEqual(first=unescape(value=escaped), second=unescaped)",
"def ntriples_unquote(input: str) -> str:\n old_validate = ntriples.validate\n try:\n ntriples.validate = False\n return ntriples.unquote(input)\n finally:\n ntriples.validate = old_validate",
"def un_quote(param):\n return param.replace(\"\\'\", \"\").replace(\"\\\"\", \"\").replace(\"\\\\\", \"\")",
"def _Unquote(s):\n if not hasattr(s, 'strip'):\n return s\n # Repeated to handle both \"'foo'\" and '\"foo\"'\n return s.strip(\"\\\"'\")",
"def _Unquote(s):\n if not hasattr(s, 'strip'):\n return s\n # Repeated to handle both \"'foo'\" and '\"foo\"'\n return s.strip(\"'\").strip('\"').strip(\"'\")",
"def unpolish(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def _unquote(s, encoding='utf-8'):\n return urllib.unquote(s).decode(encoding)",
"def test_unquote(self):\n fwa = FakeWikiArchivo('abcd <a href=\"/wiki/f%C3%B3u\">FooBar</a> dcba')\n _, r = self.peishranc(fwa)\n self.assertEqual(r, [(u'fóu', SCORE_PEISHRANC)])",
"def make_unquote_correctness_pairs() -> List[Tuple[str, str]]:\n result = []\n\n def add_pair(escape: str, unescaped: str) -> None:\n result.append((f\"\\\\{escape}\", unescaped))\n result.append((f\"\\\\\\\\{escape}\", f\"\\\\{escape}\"))\n result.append((f\"\\\\\\\\\\\\{escape}\", f\"\\\\{unescaped}\"))\n\n chars = \"A1a\\\\\\nøæå\"\n for char in chars:\n code_point = ord(char)\n add_pair(f\"u{code_point:04x}\", char)\n add_pair(f\"u{code_point:04X}\", char)\n add_pair(f\"U{code_point:08x}\", char)\n add_pair(f\"U{code_point:08X}\", char)\n\n string_escapes = \"tbnrf'\"\n for char in string_escapes:\n add_pair(f\"{char}\", string_escape_map[char])\n\n # special handling because «\"» should not appear in string, and add_pair\n # will add it.\n result.append(('\\\\\"', '\"'))\n result.append(('\\\\\\\\\\\\\"', '\\\\\"'))\n\n # special handling because «\\» should not appear in string, and add_pair\n # will add it.\n result.append((\"\\\\\\\\\", \"\\\\\"))\n result.append((\"\\\\\\\\\\\\\\\\\", \"\\\\\\\\\"))\n\n return result",
"def unicode_unquote(value):\n return unquote(value).decode('utf-8')",
"def ntriples_unquote_validate(input: str) -> str:\n old_validate = ntriples.validate\n try:\n ntriples.validate = True\n return ntriples.unquote(input)\n finally:\n ntriples.validate = old_validate",
"def dequote(self, in_str):\n in_str = in_str.replace(\"'\", \"\")\n in_str = in_str.replace('\"', \"\")\n return in_str",
"def fully_unquote(s):\n new_s = urllib.parse.unquote(s)\n c = 0\n while new_s != s:\n s = new_s\n new_s = urllib.parse.unquote(s)\n c += 1\n assert c <= 10\n return new_s",
"def unquote(uri):\r\n uri = uri.encode('ascii')\r\n unquoted = urllib_unquote(uri)\r\n return unquoted.decode('utf-8')",
"def quote_unescape(value, lf='&mjf-lf;', quot='&mjf-quot;'):\n return value.replace(lf, '\\n').replace(quot, '\"')",
"def _unescape_identifier(self, value):\n\n return value.replace('\"\"', '\"')",
"def unquote_safe(s, unsafe_list):\n # note: this build utf8 raw strings ,then does a .decode('utf8') at the end.\n # as a result it's doing .encode('utf8') on each block of the string as it's processed.\n res = _utf8(s).split('%')\n for i in xrange(1, len(res)):\n item = res[i]\n try:\n raw_chr = _hextochr[item[:2]]\n if raw_chr in unsafe_list or ord(raw_chr) < 20:\n # leave it unescaped (but uppercase the percent escape)\n res[i] = '%' + item[:2].upper() + item[2:]\n else:\n res[i] = raw_chr + item[2:]\n except KeyError:\n res[i] = '%' + item\n except UnicodeDecodeError:\n # note: i'm not sure what this does\n res[i] = unichr(int(item[:2], 16)) + item[2:]\n o = \"\".join(res)\n return _unicode(o)",
"def unquote(s):\n res = s.split('%')\n # fastpath\n if len(res) == 1:\n return s\n s = res[0]\n for item in res[1:]:\n try:\n s += _hextochr[item[:2]] + item[2:]\n except KeyError:\n s += '%' + item\n except UnicodeDecodeError:\n s += unichr(int(item[:2], 16)) + item[2:]\n return s",
"def unquote(string, encoding='utf-8', errors='replace'):\n if isinstance(string, bytes):\n return unquote_to_bytes(string).decode(encoding, errors)\n if '%' not in string:\n string.split\n return string\n if encoding is None:\n encoding = 'utf-8'\n if errors is None:\n errors = 'replace'\n bits = _asciire.split(string)\n res = [bits[0]]\n append = res.append\n for i in range(1, len(bits), 2):\n append(unquote_to_bytes(bits[i]).decode(encoding, errors))\n append(bits[i + 1])\n return ''.join(res)",
"def unlex(tokens):",
"def unquote(inline, fullquote = 1, retain = 0, **keywargs):\n if keywargs.has_key('escapechar'):\n escapechar = keywargs['escapechar']\n else:\n escapechar = True\n outline = ''\n quotes = [\"'\",'\"']\n escape = 0\n index = 0\n quotechar = None\n inline = inline.strip()\n while index < len(inline):\n thischar = inline[index]\n index += 1\n if not quotechar and thischar not in quotes:\n return -1\n elif not quotechar:\n quotechar = thischar\n if retain:\n outline += thischar\n continue\n if escape:\n outline += thischar\n escape = 0\n continue\n if thischar in quotes:\n if thischar == quotechar:\n if retain:\n outline += thischar\n if not fullquote:\n return outline.replace('&mjf-quot;','\\\"').replace('&mjf-lf;','\\n'), inline[index:]\n elif index == len(inline):\n return outline.replace('&mjf-quot;','\\\"').replace('&mjf-lf;','\\n')\n else:\n return None\n else:\n outline += thischar\n continue\n if thischar == '\\\\' and escapechar: # a continue here to *not* retain the escape character \n escape = 1\n continue\n outline += thischar\n return None",
"def unquote(inline, fullquote=True, retain=False):\n mat = inquotes.match(inline)\n if mat is None:\n if inline.strip()[0] not in '\\'\\\"': # not quoted\n return inline\n else:\n # badly quoted\n raise UnQuoteError('Value is badly quoted: \"%s\"' % inline) \n quoted, rest = mat.groups()\n if fullquote and rest.strip():\n # badly quoted\n raise UnQuoteError('Value is badly quoted: \"%s\"' % inline)\n if not retain:\n quoted = quoted[1:-1]\n if not fullquote:\n return quoted, rest\n else:\n return quoted"
]
| [
"0.6642955",
"0.66328686",
"0.6418451",
"0.61958",
"0.61934257",
"0.6182384",
"0.61153316",
"0.60987306",
"0.606723",
"0.60570073",
"0.5972834",
"0.59154767",
"0.5902977",
"0.5894338",
"0.58805674",
"0.58460903",
"0.57806385",
"0.57626176",
"0.5584996",
"0.5537941",
"0.55198497",
"0.54296756",
"0.5424568",
"0.5406001",
"0.5400892",
"0.5355763",
"0.5319182",
"0.5293402",
"0.52681285",
"0.526105"
]
| 0.6665324 | 0 |
Run through the logic of the normalize_path function. | def test_normalize_path(self):
self.assertEqual(normalize_path("//////"), "/")
self.assertEqual(normalize_path("//"), "/")
self.assertEqual(normalize_path("//foo/bar//baz"), "/foo/bar/baz")
self.assertEqual(normalize_path("//foo/bar//baz/"), "/foo/bar/baz/")
self.assertEqual(normalize_path("//f%20oo/bar"), "/f oo/bar") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _normalize_path(path):\n if path is None:\n directory = BASE_PATH\n path = ''\n else:\n path = op.normpath(path)\n directory = op.normpath(op.join(BASE_PATH, path))\n\n if not is_in_folder(BASE_PATH, directory):\n abort(404)\n\n if not op.exists(directory):\n abort(404)\n\n return BASE_PATH, directory, path",
"def normalizePath(path):\n\tfrom os.path import normpath, sep\n\tresult = normpath(path)\n\tresult = result.replace(\"/\",sep)\n\tresult = result.replace(\"\\\\\",sep)\n\treturn adaptPath(result)",
"def _normalize_path(path):\n\n i = 0\n for c in path:\n if c != \"/\":\n break\n i = i + 1\n\n if i:\n return path[(i - 1) :]\n\n return path",
"def normalizePath(path):\n if path == None or len(path) == 0 or path == '/':\n return '/'\n buff = '/' + path if path[0] != '/' else path\n return buff.replace('//', '/')",
"def standardize_path(path):\n path.rstrip('/')\n if not path.startswith('.*'):\n path = '/' + path\n path = re.compile('/+').sub('/', path)\n return path",
"def pathnormalize(p):\n return os.path.normcase(os.path.normpath(p))",
"def normalize(path):\n return os.path.normcase(os.path.realpath(path))",
"def canonicalPath(path_or_object):",
"def sanatize_path(self, path):\n # Remove extra whitespace\n path = path.strip()\n\n # Remove slash from end of path\n path = path.rstrip(os.sep)\n\n return path",
"def _sanitize_relative_path(self, path):\n last = None\n path = os.path.normpath(path)\n while path != last:\n last = path\n # Note: os.path.join treats '/' as os.sep on Windows\n path = path.lstrip(os.sep).lstrip('/')\n path = path.lstrip(os.pardir).lstrip('..')\n drive, path = os.path.splitdrive(path) # for Windows\n return path",
"def _normalized_path(path):\n return os.path.abspath(os.path.expanduser(path))",
"def _NormalizePath(path: str) -> str:\n components = path.split(\"/\")\n normalized_components = [\n _NormalizePathComponent(component) for component in components\n ]\n if (normalized_components[1] == \"api\" and\n (len(normalized_components) == 2 or normalized_components[2] != \"v2\")):\n # We describe the v2 API in the OpenAPI description.\n normalized_components.insert(2, \"v2\")\n\n normalized_path = \"/\".join(normalized_components)\n if not normalized_path.startswith(\"/\"):\n normalized_path = \"/\" + normalized_path\n\n return normalized_path",
"def normalize_path(path, config):\n\n for alias, link in config[\"Aliases\"].items():\n if path.startswith(alias):\n return path.replace(alias, link, 1)\n\n return path",
"def _normpath(self, path):\n return os.path.normpath(os.path.normcase(path))",
"def translate_path(self, path):\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n # Don't forget explicit trailing slash when normalizing. Issue17324\n trailing_slash = path.rstrip().endswith('/')\n try:\n path = urllib.parse.unquote(path, errors='surrogatepass')\n except UnicodeDecodeError:\n path = urllib.parse.unquote(path)\n path = normpath(path)\n words = path.split('/')\n words = filter(None, words)\n path = self.serve_path # use own path here (no cwd)\n for word in words:\n if os.path.dirname(word) or word in (os.curdir, os.pardir):\n # Ignore components that are not a simple file/directory name\n continue\n path = os.path.join(path, word)\n if trailing_slash:\n path += '/'\n return path",
"def osnorm(self):\n import os\n if os.sep=='/' and \"\\\\\" in str(self):\n return Path(os.path.normpath(str(self).replace('\\\\','/' )))\n elif os.sep=='\\\\' and \"/\" in str(self):\n return Path(os.path.normpath(str(self).replace('/','\\\\' )))\n else:\n return self.norm()",
"def find_and_normalize(value):\n if isinstance(value, dict):\n # if value is a dictionary, try to normalize the values\n return {k: normalize_dict_value(v) for k, v in value.items()}\n return normalize_path(value)",
"def sanitize_paths(self):\n\n for wadfile in self.files:\n if wadfile.path:\n path, filename = os.path.split(wadfile.path)\n if len(filename) < 255:\n continue\n\n basename, ext = os.path.splitext(filename)\n wadfile.path = os.path.join(path, f\"{basename[:255-17-len(ext)]}.{wadfile.path_hash:016x}{ext}\")",
"def normalizeNativePath(path: unicode) -> unicode:\n ...",
"def translate_path(self, path):\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n # Don't forget explicit trailing slash when normalizing. Issue17324\n trailing_slash = path.rstrip().endswith('/')\n try:\n path = urllib.parse.unquote(path, errors='surrogatepass')\n except UnicodeDecodeError:\n path = urllib.parse.unquote(path)\n path = posixpath.normpath(path)\n words = path.split('/')\n words = filter(None, words)\n path = os.getcwd()\n for word in words:\n if os.path.dirname(word) or word in (os.curdir, os.pardir):\n # Ignore components that are not a simple file/directory name\n continue\n path = os.path.join(path, word)\n if trailing_slash:\n path += '/'\n return path",
"def normdirpath(path):\n if not path.endswith('/') and path != '':\n path += '/'\n return path",
"def _cleanpath(self, path):\n \n slashes = self.remotepathsep*2\n while slashes in path:\n path = path.replace(slashes,self.remotepathsep)\n \n if path.endswith(self.remotepathsep):\n path = path[:-1]\n \n return path",
"def _cleanpath(self, path):\n \n slashes = self.remotepathsep*2\n while slashes in path:\n path = path.replace(slashes,self.remotepathsep)\n \n if path.endswith(self.remotepathsep):\n path = path[:-1]\n \n return path",
"def normalize_cdmi_url(self, path):\n # Turn URL path into OS path for manipulation\n mypath = url2pathname(path)\n if not os.path.isabs(mypath):\n mypath = os.path.join(url2pathname(self.pwd()), mypath)\n # normalize path\n mypath = os.path.normpath(mypath)\n if path.endswith(\"/\") and not mypath.endswith(\"/\"):\n mypath += \"/\"\n url = self.cdmi_url + pathname2url(mypath)\n return url",
"def normalize_path(img_file):\n\n img_file = img_file.split('/')[-1]\n img_file = 'data/IMG/'+img_file.split('\\\\')[-1]\n return img_file",
"def normalized_path(pathstring: str) -> str:\n pathstring = os.path.abspath(pathstring)\n return pathstring.replace('\\\\', '/').rstrip()",
"def clean_path(path):\n return resolved_path(path)",
"def normalizePath(p: str, *pathParts: List[str]) -> str:\n p1 = os.path.abspath(os.path.expanduser(p))\n if len(pathParts)>0:\n allPathParts = [ p1 ]\n allPathParts.extend(pathParts)\n p1 = os.path.join(*allPathParts)\n p2 = os.path.abspath(p1)\n return p2",
"def translate_path(self, path):\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n # Don't forget explicit trailing slash when normalizing. Issue17324\n trailing_slash = path.rstrip().endswith('/')\n path = posixpath.normpath(urllib.unquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = self.working_dir\n for word in words:\n _drive, word = os.path.splitdrive(word)\n _head, word = os.path.split(word)\n if word in (os.curdir, os.pardir):\n continue\n path = os.path.join(path, word)\n if trailing_slash:\n path += '/'\n return path",
"def test_fix_path(self):\n\n expected = \"hello\" + PyFunceble.directory_separator + \"world\" + PyFunceble.directory_separator # pylint: disable=line-too-long\n actual = Directory(\"/hello/world\").fix_path()\n\n self.assertEqual(expected, actual)\n\n actual = Directory(\"\\\\hello\\\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(\"hello\\\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(r\"hello\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(r\"hello/world/\").fix_path()\n self.assertEqual(expected, actual)"
]
| [
"0.7186743",
"0.69471556",
"0.6927698",
"0.6767035",
"0.6454062",
"0.6428511",
"0.6416044",
"0.6339321",
"0.63254476",
"0.63088715",
"0.62542826",
"0.61721075",
"0.61079854",
"0.60850936",
"0.6003305",
"0.6003291",
"0.5992366",
"0.59839135",
"0.5982311",
"0.5890047",
"0.5879354",
"0.5876299",
"0.5876299",
"0.5859488",
"0.5831104",
"0.58247846",
"0.5807733",
"0.58020604",
"0.5792901",
"0.57788473"
]
| 0.7038862 | 1 |
Lists all values for a key. | def list_values(key):
return meta.list_values(key=key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getall(self, key):\n return self.values.get(key, [])",
"def get_for_key(self, key) -> list:\n return [res[key] for res in self.list]",
"def getlist(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError:\n return []",
"def get_list(key):\n ret = hookenv.action_get(key)\n return ret.split() if ret else []",
"def GET(self, key):\n header('Content-Type', 'application/json')\n return dumps(list_values(key=key))",
"def hvals(self, key):\n return self._command(b'HVALS', key)",
"def getlist(self, key):\n try:\n vals = _dict_getitem(self, key.lower())\n except KeyError:\n return []\n else:\n if isinstance(vals, tuple):\n return [vals[1]]\n else:\n return vals[1:]",
"def hgetall(self, key):\n return self._command(b'HGETALL', key, handler=list_to_dict)",
"def getAllValues(self, keyName):\n self._db._c.execute(\"SELECT \" + keyName + \" FROM \" + self.tableName)\n\n return [ col[0] for col in self._db._c.fetchall() ]",
"async def get_all(self, key: datastore.Key) -> RV:\n\t\treturn await (await self.get(key)).collect() # type: ignore[return-value]",
"def topkList(self, key):\n \n return self.execute_command(self.TOPK_LIST, key)",
"def get_key_values(self):\n return self.key_values",
"def itervalues(self, key=None):\n if key != None:\n vals = self.get(key)\n if vals != None:\n for val in vals:\n yield val\n else:\n for key in self.iterkeys():\n vals = self.get(key)\n for val in vals:\n yield val",
"def values(self):\n return [self[key] for key in self.keys()]",
"def values(self):\n return [self[key] for key in self.keys()]",
"def values(self):\n return [self[key] for key in self.keys()]",
"def list_ids (self, key):\n\n list_of_key_values = [str(x[key]) for x in self.result]\n\n self.result = list(dict.fromkeys([re.findall(r'\\b\\d+\\b', x)[0] for x in list_of_key_values if len(re.findall(r'\\b\\d+\\b', x)) !=0]))\n\n return self",
"def keyValues(self): # real signature unknown; restored from __doc__\n return []",
"def values(self):\n return [self[name] for name in self.keys()]",
"def values(self):\n return [self[k] for k in self.keys()]",
"def key_list(dict):\n list = []\n for key in dict:\n list.append(key)\n return list",
"def list(self, key):\n\n if \"~\" in key or key == \"title\":\n v = self(key, connector=\"\\n\")\n if v == \"\": return []\n else: return v.split(\"\\n\")\n elif key in self: return self[key].split(\"\\n\")\n else: return []",
"def getlist(self, key):\n \n value = self.get(key, [])\n if value is None or isinstance(value, (list, tuple)):\n return value\n else:\n return [value]",
"def list_key_values_command():\n # Get Args needed for the command\n incident = demisto.args().get('id', get_investigation_id())\n # Search Collection for matching incident_id\n return_json = [] # type: ignore\n context = []\n found = False\n cursor = COLLECTION.find({}, {'_id': False})\n if cursor is None:\n # Collection doesn't exist - thus no records\n return_json = None # type: ignore\n else:\n # Iterate, collecting any name/value pairs associated with the incident\n for i in cursor:\n if incident in i:\n found = True\n return_json.append({\n 'Key': i[incident]['key'],\n 'Value': i[incident]['value']\n })\n context.append({\n 'Incident': incident,\n 'Key': i[incident]['key'],\n 'Value': i[incident]['value']\n })\n\n if not found:\n # Means no records were found with that incident_id\n # Discard empty return_json\n return_json = None # type: ignore\n\n human_readable = tableToMarkdown(f'The key/value paires stored in incident {incident}', return_json)\n ec = {'MongoDB.Incident(val.Key === obj.Key)': context}\n # Return a useful status\n return human_readable, ec, {}",
"def values(self):\n x = []\n for k in list(self.keys()):\n x.append(self[k])\n return x",
"def list(self):\n for key, value in self.languages.iteritems():\n print key, value",
"def _valueList(self, key, year, month=None, day=None, hour=None, status='1', metaData=None):\n vals = [self.timestamp]\n if (key is not None):\n vals.append(\"'{}'\".format(key))\n if (year is not None):\n vals.append(str(year))\n if (month is not None):\n vals.append(str(month))\n if (day is not None):\n vals.append(str(day))\n if (hour is not None):\n vals.append(str(hour))\n if (status is not None):\n vals.append(str(status))\n if (metaData is not None):\n vals.append(\"'{}'\".format(meta))\n\n return '(' + ', '.join(vals) + ')'",
"def obtain(self, key):\n if key in self:\n vals = self[key]\n else:\n vals = []\n dict.__setitem__(self, key, vals)\n return vals",
"def values(self):\r\n return [self[k] for k in self]",
"def gets(self, key):\n result = self._get_raw_input()[key]\n if isinstance(result, list):\n return deepcopy(result)\n return [result]"
]
| [
"0.7794685",
"0.7193316",
"0.71820873",
"0.71327794",
"0.6970553",
"0.6915631",
"0.6872249",
"0.67926764",
"0.6746791",
"0.6683304",
"0.66489327",
"0.6641336",
"0.6589233",
"0.6562483",
"0.6562483",
"0.6562483",
"0.6554378",
"0.65395224",
"0.6538315",
"0.6502347",
"0.6489043",
"0.647228",
"0.6465787",
"0.6429486",
"0.6413733",
"0.6399147",
"0.6394544",
"0.63781947",
"0.6371611",
"0.63409764"
]
| 0.8103035 | 0 |
Returns the Collatz sequence beginning with `n`. It is conjectured that Collatz sequences all end with `1`. Calls `collatz_step` at each iteration. | def collatz(n):
sequence = []
while n != 1:
if n > 1:
sequence = sequence + [n]
n = collatz_step(n)
elif n < 1:
n = collatz_step(n)
sequence = sequence + [n]
break
if n == 1:
sequence = sequence + [n]
return sequence
print sequence | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def collatz(n):\n if n==1:\n return [n]\n \n if n>1:\n seq = [n]\n while n>1:\n n = collatz_step(n)\n seq.append(n)\n\n if seq[-1]==1:\n return seq",
"def collatz_step(n):\n if n<1:\n raise ValueError('Input must be greater than 0')\n elif n==1:\n return 1\n elif n%2==0:\n return (n/2)\n elif n%2==1:\n return(3*n+1)",
"def collatz(n, out=None):\n if out is None:\n out = []\n if n in out:\n return out+[n]\n else:\n out.append(n)\n if n%2 == 0:\n return collatz(n//2, out)\n else:\n return collatz(n*3+1, out)",
"def collatz_step(n):\n try:\n while n >= 1:\n if n == 1:\n n = 1\n elif n %2 == 0:\n n = n / 2\n elif n % 2 != 0:\n n = 3 * n + 1\n return n\n except ValueError:\n raise ValueError",
"def collatz(n: int) -> int:\n # Yeild the input so that the sequence starts with the seed\n yield n\n\n # This probably won't loop infinitely but math doesn't really know\n # as long as the current n is not the end of the sequence then\n # return the next item\n while n != 1:\n # If n is even\n if n % 2 == 0:\n # Return n / 2\n n = n / 2\n\n # If n is odd\n else:\n # Return 3 * n + 1. 3 * any number will always be odd\n # so 3n + 1 is always even so we can skip a step by just\n # returning the following number and skipping all odds this\n # way\n n = (3 * n + 1) / 2\n\n # Cast the result to an integer and yield it\n yield int(n)",
"def collatz(n):\n if n%2==0: return n/2\n else: return 3*n+1",
"def collatz(start):\n n = start\n\n collatz_sequence = [n]\n\n while global.collatz_sequences.key().contains(n):\n if n % 2 == 0:\n n = n // 2\n else:\n n = 3 * n + 1\n\n collatz_sequence.append(n)\n\n global.collatz_sequences[]\n\n return collatz_sequence",
"def collatz(n):\n if (check_argument(n) == False):\n return\n else:\n conjecture(n)",
"def collatz(n):\n iterCount = 0\n while(n != 1):\n if(n & 1):\n n = 3 * n + 1\n else:\n n //= 2\n iterCount += 1\n return iterCount",
"def collatz_sequence(seed):\n n = seed\n yield n\n while True:\n n = collatz(n)\n yield n\n if n == 1:\n break",
"def collatz(n):\n memo = dict()\n def collatz_recur(m):\n if m == 1:\n return 1\n # check in the memo\n if m in memo:\n return memo[m]\n\n if m % 2: # odd\n val = 3 * m + 1\n memo[m] = 1 + collatz_recur(val)\n return memo[m]\n else:\n val = m / 2\n memo[m] = 1 + collatz_recur(val)\n return memo[m]\n\n for i in xrange(2,n):\n collatz_recur(i)\n\n return memo",
"def collatz(start):\n counter = 1\n n = start\n while n != 2:\n if n % 2 == 0:\n n /= 2\n else:\n n = (n * 3) + 1\n counter += 1\n\n counter += 1\n return counter",
"def collatz_cycle(n):\n cycles = 1\n assert n > 0\n while(n > 1):\n\tif n < cache_size and cycle_table[n] != 0:\n\t cycles = cycles + cycle_table[n] - 1\n\t break\n\tif n % 2 == 0:\n\t n = n / 2\n\t cycles+= 1\n\telse:\n\t n = n + (n >> 1) + 1\n\t cycles+=2\n assert cycles > 0\n return cycles",
"def collatz_sequence_len(n: int) -> int:\n result = 1\n while n != 1:\n if n % 2 == 0:\n n //= 2\n else:\n n = 3 * n + 1\n result += 1\n return result",
"def get_odd_collatz(n):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"12\")\n # END OF SOLUTION",
"def collatz_sequence_term(seed, k):\n if k == 1:\n return seed\n a = seed\n for i in range(k - 1):\n a = collatz(a)\n if a == 1:\n return None if k > i + 2 else a\n return a",
"def collatz(a):\n # Initialze the sequence with the fist value a.\n x_list = [a]\n # Continue computing values in the sequence until we reach 1.\n while x_list[-1] != 1:\n # Check if the last element in the list is even\n if x_list[-1] % 2 == 0:\n # Compute and append the new values\n x_list.append(x_list[-1] // 2)\n else:\n # Compute and append the new values\n x_list.append(3*x_list[-1] + 1)\n return x_list",
"def collatz(value):\r\n assert value >= 1\r\n if value % 2 == 0:\r\n return value/2\r\n else:\r\n return 3 * value + 1",
"def Collatz(seed):\n \n sq = []\n \n while True:\n next_t = next_term(seed)\n \n if next_t == 1:\n sq.append(next_t)\n break\n \n else:\n sq.append(seed)\n seed = next_t\n \n return sq",
"def lucas_iter(n):\n f = []\n for x in range(n + 1):\n if x == 0:\n f.append(2)\n elif x == 1:\n f.append(1)\n else:\n f.append(f[-1] + f[-2])\n return f[-1]",
"def Crn(r, n):\n ret = 1\n if(r>n/2):\n return Crn(n-r, n)\n for t in range(n, n-r+1-1, -1):\n ret *= t\n return ret/fact(r)",
"def no_math_solution(n: int):\n lookup = {1: 1}\n # Calculate the chain's length of all Collatz sequences started below n\n for i in range(2, n):\n cal_chain_length(i, lookup)\n # Find the longest chain\n longestChain = 1\n for i in range(2, n):\n if (lookup[i] > lookup[longestChain]):\n longestChain = i\n\n return longestChain",
"def zn_star(n):\n return [x for x in range(1, n) if coprime(x, n)]",
"def generate_diagonal_factors(n):\n\tfactors = np.array([1, -1]) # Initialize the diag terms with the diagonal of the Z Pauli matrix\n\tfor _ in range(n - 1): # Iterate n - 1 times\n\t\tfactors = np.hstack([factors, factors * -1]) # Append the same array multiplied by -1\n\treturn factors",
"def crea_cubi(n):\n lst = []\n for i in range(1, n + 1):\n lst = lst + [i**3]\n return lst",
"def _getZc(n):\n # An extra trial is required for low counts, due to the fact\n # that there is higher variance in the calculated deviation.\n extra = 1\n\n vFree = n - 1\n zc = 1.96\n if vFree > 15:\n # Normal distribution, and enough that we do not need to\n # have an extra trial.\n extra = 0\n elif vFree >= 10:\n # Here and below is a t-distribution; note that this comes\n # from the 97.5% column in Table 3 of Driels et al., since\n # those coefficients don't include the tail\n zc = 2.23\n elif vFree >= 5:\n zc = 2.57\n elif vFree >= 4:\n zc = 2.78\n elif vFree >= 3:\n zc = 3.18\n elif vFree >= 2:\n zc = 4.30\n elif vFree >= 1:\n zc = 12.71\n return zc, extra",
"def zernike_num_coeff(n):\n \n\tif not (n>=0):\n\t\tprint('Input parameter must be >= 0')\n\t\traise AssertionError() \n \n\treturn sum(xrange(n+1)) + n+1",
"def cube(n):\n result = [num*num*num for num in range(n)]\n\n return result[1:]",
"def get_min_steps_mem(n):\n\n steps = np.full(n+1, -1)\n return get_min_steps(n, steps)",
"def colony(N: int) -> np.ndarray:\n M = np.zeros((N, N))\n n = (N-1)//2\n M[n, n] = 1 # a bacteria at the center then n reproductions\n return binary_dilation(M, iterations = n).astype(int)"
]
| [
"0.83830476",
"0.7909489",
"0.7577504",
"0.7558608",
"0.7475624",
"0.7360087",
"0.7325965",
"0.7270145",
"0.7249773",
"0.72171444",
"0.71474427",
"0.67161584",
"0.6672091",
"0.6551856",
"0.5956467",
"0.59545493",
"0.58492994",
"0.58025",
"0.5543858",
"0.55369943",
"0.5479883",
"0.5423912",
"0.53941405",
"0.53243005",
"0.52824545",
"0.52694446",
"0.5258716",
"0.5250639",
"0.51665485",
"0.5123041"
]
| 0.80794156 | 1 |
Return a patch to update the port's physical network. | def _get_physnet_patch(self, physnet, port):
if (not CONF.processing.overwrite_existing
or port.physical_network == physnet):
return
return {'op': 'add', 'path': '/physical_network', 'value': physnet} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_patch_port_group(self):\n pass",
"def get_physnet(self, port, iface_name, introspection_data):",
"def fusion_api_edit_ethernet_network(self, body, uri, api=None, headers=None):\n return self.ethernet_network.update(body, uri, api, headers)",
"def test_networking_project_network_update(self):\n pass",
"def before_update(self, introspection_data, node_info, **kwargs):\n inventory = utils.get_inventory(introspection_data)\n\n ironic_ports = node_info.ports()\n\n for iface in inventory['interfaces']:\n if iface['name'] not in introspection_data['all_interfaces']:\n continue\n\n mac_address = iface['mac_address']\n port = ironic_ports.get(mac_address)\n if not port:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, matching port not found in Ironic.\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n # Determine the physical network for this port.\n # Port not touched in here.\n physnet = self.get_physnet(port, iface['name'], introspection_data)\n if physnet is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no physical network mapping\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n patch = self._get_physnet_patch(physnet, port)\n if patch is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no update required\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n try:\n node_info.patch_port(port, [patch])\n except exceptions.BadRequestException as e:\n LOG.warning(\"Failed to update port %(uuid)s: %(error)s\",\n {'uuid': port.id, 'error': e},\n node_info=node_info)",
"def _update_network_config(port_config, allow_multiple=False):\n # Get network id from port config\n network_id = port_config.get('network_id')\n\n # Get the network id from relationship if any\n rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type(\n ctx, NETWORK_OPENSTACK_TYPE)\n\n rel_network_id = rel_network_ids[0] if rel_network_ids else None\n # Check if network config comes from two sources or not\n if network_id and rel_network_id and not allow_multiple:\n raise NonRecoverableError('Port can\\'t both have the '\n '\"network_id\" property and be '\n 'connected to a network via a '\n 'relationship at the same time')\n\n port_config['network_id'] = network_id or rel_network_id",
"def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)",
"def _update_port_ext(self, original_port, req_port,\n session=None):\n\n commit = self._get_port_attr(req_port, \"commit\")\n trunked = self._get_port_attr(req_port, \"trunked\")\n hardware_id = self._get_port_attr(req_port, \"switch:hardware_id\")\n\n # we cannot allow the trunked flag to change if committed.\n if trunked is not None and (original_port[\"trunked\"] != trunked):\n if original_port[\"commit\"] and (commit is not False):\n msg = \"cannot update trunked flag when commit=true\"\n raise exc.InvalidInput(error_message=msg)\n\n port_ext = db.update_port_ext(\n port_id=original_port[\"id\"],\n trunked=trunked,\n commit=commit,\n hardware_id=hardware_id,\n session=session)\n return port_ext.as_dict()",
"def test_update_network_external_ports(self):\n policies_ports = [(self.qos_policies[0].id, {self.ports[0].id})]\n self.ports[2].qos_policy_id = self.qos_policies[0].id\n self.ports[2].update()\n port_obj.PortBinding(self.ctx, port_id=self.ports[1].id, host='host',\n profile={}, vif_type='',\n vnic_type=portbindings_api.VNIC_DIRECT).create()\n with mock.patch.object(self.qos_driver._driver._nb_idl,\n 'get_lswitch_port') as mock_lsp:\n mock_lsp.side_effect = [\n mock.Mock(type=ovn_const.LSP_TYPE_LOCALNET),\n mock.Mock(type=ovn_const.LSP_TYPE_EXTERNAL)]\n for qos_policy_id, reference_ports in policies_ports:\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n reviewed_port_ids, _, _ = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network, reset=True)\n self.assertEqual(reference_ports, reviewed_port_ids)\n calls = [mock.call(\n mock.ANY, self.ports[0].id, self.ports[0].network_id,\n qos_policy_id, None)]\n self.mock_rules.assert_has_calls(calls)\n self.mock_rules.reset_mock()",
"def update(self, oid, name, network_id, fixed_ips, host_id=None, \n profile=None, vnic_type=None, device_owner=None, device_id=None,\n security_groups=None):\n data = {\n \"port\": {\n }\n }\n if network_id is not None:\n data['port']['network_id'] = network_id\n if name is not None:\n data['port']['name'] = name\n if fixed_ips is not None:\n data['port']['fixed_ips'] = fixed_ips\n if host_id is not None:\n data['port']['binding:host_id'] = host_id\n if profile is not None:\n data['port']['binding:profile'] = profile\n if host_id is not None:\n data['port']['binding:vnic_type'] = vnic_type\n if device_owner is not None:\n data['port']['device_owner'] = device_owner\n if device_id is not None:\n data['port']['device_id'] = device_id\n if security_groups is not None:\n data['port']['security_groups'] = security_groups\n \n path = '%s/ports/%s' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Update openstack port: %s' % truncate(res))\n return res[0]['port']",
"def fusion_api_edit_fc_network(self, body, uri, api=None, headers=None):\n return self.fc_network.update(body, uri, api, headers)",
"def patch(self, uuid, patch):\n if self._from_chassis:\n raise exception.OperationNotPermitted\n\n rpc_node = objects.Node.get_by_uuid(pecan.request.context, uuid)\n\n # Check if node is transitioning state\n if rpc_node['target_power_state'] or \\\n rpc_node['target_provision_state']:\n msg = _(\"Node %s can not be updated while a state transition\"\n \"is in progress.\")\n raise wsme.exc.ClientSideError(msg % uuid, status_code=409)\n\n try:\n node = Node(**jsonpatch.apply_patch(rpc_node.as_dict(),\n jsonpatch.JsonPatch(patch)))\n except jsonpatch.JsonPatchException as e:\n LOG.exception(e)\n raise wsme.exc.ClientSideError(_(\"Patching Error: %s\") % e)\n\n # Update only the fields that have changed\n for field in objects.Node.fields:\n if rpc_node[field] != getattr(node, field):\n rpc_node[field] = getattr(node, field)\n\n try:\n new_node = pecan.request.rpcapi.update_node(pecan.request.context,\n rpc_node)\n except Exception as e:\n with excutils.save_and_reraise_exception():\n LOG.exception(e)\n\n return Node.convert_with_links(new_node)",
"def test_patch_cluster_network(self):\n pass",
"def patch_ports(cls, pair):\n if pair[0] in ('remote_port', 'local_port'):\n return pair[0], pair[1] and int(pair[1]) or None\n return pair",
"def test_update_port_group(self):\n pass",
"def update_net(self) -> None:\n self.units.update_net()",
"def fusion_api_patch_fcoe_network(self, body=None, uri=None, api=None, headers=None):\n return self.fcoe_network.patch(body, uri, api, headers)",
"def construct_patched(self, patched_layer):\n activation_layers = self.network.layers\n value_layers = activation_layers.copy()\n value_layers[self.layer_index] = patched_layer\n return MaskingNetwork(activation_layers, value_layers)",
"def _update_external_port(openstack_resource):\n # Get the external port using the resource id provided via port node\n external_port = openstack_resource.get()\n # Check if the current port node has allowed_address_pairs as part of\n # resource_config\n addresses_to_add = openstack_resource.config.get('allowed_address_pairs')\n if addresses_to_add:\n old_addresses = external_port.get('allowed_address_pairs') or []\n\n # Get the old ips from the each pair\n old_ips = \\\n [\n old_address['ip_address']\n for old_address\n in old_addresses if old_address.get('ip_address')\n ]\n # Get the ips need to be added to the external port\n ips_to_add = \\\n [\n address_to_add['ip_address']\n for address_to_add\n in addresses_to_add if address_to_add.get('ip_address')\n ]\n\n # Check if there are a common ips between old ips and the one we\n # should add via node\n common_ips = set(old_ips) & set(ips_to_add)\n if common_ips:\n raise NonRecoverableError(\n 'Ips {0} are already assigned to {1}'\n ''.format(common_ips, external_port.id))\n\n # Update port for allowed paris\n updated_port = openstack_resource.update(\n {'allowed_address_pairs': addresses_to_add})\n # Update runtime properties\n update_runtime_properties(\n {\n 'fixed_ips': updated_port.fixed_ips,\n 'mac_address': updated_port.mac_address,\n 'allowed_address_pairs': updated_port.allowed_address_pairs,\n }\n )\n\n # Get the networks from relationships if they are existed\n rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type(\n ctx, NETWORK_OPENSTACK_TYPE)\n\n rel_network_id = rel_network_ids[0] if rel_network_ids else None\n if rel_network_id:\n port = openstack_resource.get()\n if port['network_id'] != rel_network_id:\n raise NonRecoverableError(\n 'Expected external resources port {0} and network {1} '\n 'to be connected'.format(port.id, rel_network_id))",
"def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net",
"def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))",
"def update_network_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if self.ext_net:\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'public_network_id', self.ext_net.id)\n rconfig.set('network', 'floating_network_name', self.ext_net.name)\n rconfig.set('network-feature-enabled', 'floating_ips', True)\n else:\n if not rconfig.has_section('network-feature-enabled'):\n rconfig.add_section('network-feature-enabled')\n rconfig.set('network-feature-enabled', 'floating_ips', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def port_nic():",
"def mirror(self):\n return RequirePort(self)",
"def patch(self):\n\t\t\n\t\t# Create tunnels\n\t\t(module, self.tunnel_source) = create_tunnel(self.remote_source_info)\n\t\tself.modules += [ module ]\n\t\t(module, self.tunnel_sink) = create_tunnel(self.remote_sink_info)\n\t\tself.modules += [ module ]\n\t\t\n\t\t# Connect them to the local devices\n\t\tself.modules = self.modules + [\n\t\t\tadd_loopback(self.tunnel_source, self.local_sink),\n\t\t\tadd_loopback(self.local_source, self.tunnel_sink)\n\t\t]",
"def update_port(self, context, port_id, port):\n LOG.debug(_(\"QuantumRestProxyV2: update_port() called\"))\n\n # Validate Args\n orig_port = super(QuantumRestProxyV2, self).get_port(context, port_id)\n\n # Update DB\n new_port = super(QuantumRestProxyV2, self).update_port(context,\n port_id, port)\n\n # update on networl ctrl\n try:\n resource = PORTS_PATH % (orig_port[\"tenant_id\"],\n orig_port[\"network_id\"], port_id)\n data = {\"port\": new_port, }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n\n if new_port.get(\"device_id\") != orig_port.get(\"device_id\"):\n if orig_port.get(\"device_id\"):\n self._unplug_interface(context, orig_port[\"tenant_id\"],\n orig_port[\"network_id\"],\n orig_port[\"id\"])\n if new_port.get(\"device_id\"):\n self._plug_interface(context, new_port[\"tenant_id\"],\n new_port[\"network_id\"],\n new_port[\"id\"], new_port[\"id\"] + \"00\")\n\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to create remote port: \"\n \"%s\"), e.message)\n # reset port to original state\n super(QuantumRestProxyV2, self).update_port(context, port_id,\n orig_port)\n raise\n\n # return new_port\n return new_port",
"def getPeerToPeerNetwork(self):\r\n raise NotImplementedError()",
"def _update_port_config(port_config):\n\n # Update network config for port node\n _update_network_config(port_config)\n\n # Update network fixed ips config\n _update_fixed_ips_config(port_config)\n\n # Update security groups config for port node\n _update_security_groups_config(port_config)",
"def test_patch_port_sub_group(self):\n pass",
"def get_network_config(self, service, update):\n if self.is_vm_image() and not update:\n return None\n cs = self.virtual_environment[self.T_CS]\n nc = self.virtual_environment[self.T_NC]\n network_config = ConfigurationSet()\n network_config.configuration_set_type = nc[self.T_NC_CST]\n input_endpoints = nc[self.T_NC_IE]\n # avoid duplicate endpoint under same cloud service\n assigned_endpoints = service.get_assigned_endpoints(cs[self.T_CS_SN])\n endpoints = map(lambda i: i[self.T_NC_IE_LP], input_endpoints)\n unassigned_endpoints = map(str, find_unassigned_endpoints(endpoints, assigned_endpoints))\n map(lambda (i, u): i.update({self.T_NC_IE_PO: u}), zip(input_endpoints, unassigned_endpoints))\n for input_endpoint in input_endpoints:\n network_config.input_endpoints.input_endpoints.append(\n ConfigurationSetInputEndpoint(\n input_endpoint[self.T_NC_IE_N],\n input_endpoint[self.T_NC_IE_PR],\n input_endpoint[self.T_NC_IE_PO],\n input_endpoint[self.T_NC_IE_LP]\n )\n )\n return network_config"
]
| [
"0.58695257",
"0.5774932",
"0.5773838",
"0.5744404",
"0.57315767",
"0.5719085",
"0.56026155",
"0.5579877",
"0.5514237",
"0.5504025",
"0.54934007",
"0.5472406",
"0.54567254",
"0.539222",
"0.5383555",
"0.53722364",
"0.5357743",
"0.53411263",
"0.531233",
"0.5279417",
"0.5269543",
"0.52397794",
"0.52288187",
"0.52156156",
"0.51914465",
"0.5171283",
"0.5163351",
"0.51610327",
"0.514498",
"0.51411647"
]
| 0.73884064 | 0 |
Process introspection data and patch port physical network. | def before_update(self, introspection_data, node_info, **kwargs):
inventory = utils.get_inventory(introspection_data)
ironic_ports = node_info.ports()
for iface in inventory['interfaces']:
if iface['name'] not in introspection_data['all_interfaces']:
continue
mac_address = iface['mac_address']
port = ironic_ports.get(mac_address)
if not port:
LOG.debug("Skipping physical network processing for interface "
"%s, matching port not found in Ironic.",
mac_address,
node_info=node_info, data=introspection_data)
continue
# Determine the physical network for this port.
# Port not touched in here.
physnet = self.get_physnet(port, iface['name'], introspection_data)
if physnet is None:
LOG.debug("Skipping physical network processing for interface "
"%s, no physical network mapping",
mac_address,
node_info=node_info, data=introspection_data)
continue
patch = self._get_physnet_patch(physnet, port)
if patch is None:
LOG.debug("Skipping physical network processing for interface "
"%s, no update required",
mac_address,
node_info=node_info, data=introspection_data)
continue
try:
node_info.patch_port(port, [patch])
except exceptions.BadRequestException as e:
LOG.warning("Failed to update port %(uuid)s: %(error)s",
{'uuid': port.id, 'error': e},
node_info=node_info) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_physnet(self, port, iface_name, introspection_data):",
"def treat_devices_added_or_updated(self, details):\n device = details['device']\n LOG.debug(\"Processing port: %s\", device)\n # REVISIT(ivar): this is not a public facing API, we will move to\n # the right method once the redesign is complete.\n port = self.bridge_manager.get_vif_port_by_id(device)\n if port:\n gbp_details = details.get('gbp_details')\n trunk_details = details.get('trunk_details')\n neutron_details = details.get('neutron_details')\n if gbp_details and 'port_id' not in gbp_details:\n # The port is dead\n details.pop('port_id', None)\n if (gbp_details and gbp_details.get('host') and\n gbp_details['host'] != self.host):\n self.port_unbound(device)\n return False\n elif neutron_details and 'port_id' in neutron_details:\n LOG.info(\"Port %(device)s updated. Details: %(details)s\",\n {'device': device, 'details': details})\n # Inject GBP/Trunk details\n port.gbp_details = gbp_details\n port.trunk_details = trunk_details\n self.treat_vif_port(port, neutron_details['port_id'],\n neutron_details['network_id'],\n neutron_details['network_type'],\n neutron_details['physical_network'],\n neutron_details['admin_state_up'],\n neutron_details['fixed_ips'],\n neutron_details['device_owner'],\n neutron_details['segmentation_id'])\n # update plugin about port status\n if neutron_details.get('admin_state_up'):\n LOG.debug(\"Setting status for %s to UP\", device)\n self.plugin_rpc.update_device_up(\n self.context, device, self.agent_id, self.host)\n else:\n LOG.debug(\"Setting status for %s to DOWN\", device)\n self.plugin_rpc.update_device_down(\n self.context, device, self.agent_id, self.host)\n LOG.info(\"Configuration for device %s completed.\",\n device)\n else:\n LOG.warn(\"Device %s not defined on plugin\", device)\n if port and port.ofport != -1:\n self.port_unbound(port.vif_id)\n return False\n else:\n # The port disappeared and cannot be processed\n LOG.info(\"Port %s was not found on the integration bridge \"\n \"and will therefore not be processed\", device)\n self.port_unbound(device)\n return False\n return True",
"def port_update(self, context, **kwargs):\n self._refresh_bridge_mappings_to_neutron()",
"def process_update_port(self, context, data, result):\n\n orginal_exten = copy.deepcopy(result)\n # Process extension data\n self._find_port_dict_extensions(\n result, None, session=context.session)\n\n port_ext = self._update_port_ext(\n result, data, session=context.session)\n switchports = self._update_switchports(\n result, data, session=context.session)\n self._find_port_dict_extensions(\n result, None, port_ext=port_ext,\n switchports=switchports, session=context.session)\n\n # We only want to commit on a state change\n if orginal_exten.get(\"commit\") != result[\"commit\"]:\n # If we are transitioning to active, validate\n if not orginal_exten.get(\"commit\") and result[\"commit\"]:\n self._validate_port_can_commit(\n result, None, session=context.session)",
"def _get_port_info(self, context):\n port = {}\n data = dict()\n old_host_name = ''\n\n if context.original is not None:\n old_host_name = context.original.get('binding:host_id', '')\n\n context = context._port\n port_id = str(context.get('id', ''))\n data['device_owner'] = str(context.get('device_owner', ''))\n # don't create port \"network:floating_ip\n if data['device_owner'] == \"network:floatingip\":\n return None\n data['host_name'] = str(context.get('binding:host_id', ''))\n if len(context.get('fixed_ips', [])) > 0:\n data['subnet_id'] = str(context['fixed_ips'][0].get('subnet_id', ''))\n data['ip_address'] = str(context['fixed_ips'][0].get('ip_address', ''))\n data['device_id'] = str(context.get('device_id', ''))\n data['mac'] = str(context.get('mac_address', ''))\n data['network_id'] = str(context.get('network_id', ''))\n data['admin_state_up'] = context.get('admin_state_up', '')\n data['port_id'] = port_id\n data['tenant_id'] = str(context.get('tenant_id', ''))\n\n context_str = json.dumps(data, sort_keys=True)\n data['md5sum'] = hashlib.md5(context_str).hexdigest()\n\n data['field_not_in_md5'] = ['md5sum']\n data['field_not_in_md5'].append('old_host_name')\n data['old_host_name'] = old_host_name\n\n if data['port_id'] == '':\n LOG.error(_('Get creating port information failed'))\n return None\n\n if port_id != '':\n port[port_id] = data\n return port",
"def _send_all_data(self):\n admin_context = qcontext.get_admin_context()\n networks = {}\n ports = {}\n\n all_networks = super(QuantumRestProxyV2,\n self).get_networks(admin_context) or []\n for net in all_networks:\n networks[net.get('id')] = {\n 'id': net.get('id'),\n 'name': net.get('name'),\n 'op-status': net.get('admin_state_up'),\n }\n\n subnets = net.get('subnets', [])\n for subnet_id in subnets:\n subnet = self.get_subnet(admin_context, subnet_id)\n gateway_ip = subnet.get('gateway_ip')\n if gateway_ip:\n # FIX: For backward compatibility with wire protocol\n networks[net.get('id')]['gateway'] = gateway_ip\n\n ports = []\n net_filter = {'network_id': [net.get('id')]}\n net_ports = super(QuantumRestProxyV2,\n self).get_ports(admin_context,\n filters=net_filter) or []\n for port in net_ports:\n port_details = {\n 'id': port.get('id'),\n 'attachment': {\n 'id': port.get('id') + '00',\n 'mac': port.get('mac_address'),\n },\n 'state': port.get('status'),\n 'op-status': port.get('admin_state_up'),\n 'mac': None\n }\n ports.append(port_details)\n networks[net.get('id')]['ports'] = ports\n try:\n resource = '/topology'\n data = {\n 'networks': networks,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n return ret\n except RemoteRestError as e:\n LOG.error(_('QuantumRestProxy: Unable to update remote network: '\n '%s'), e.message)\n raise",
"def _update_port_config(port_config):\n\n # Update network config for port node\n _update_network_config(port_config)\n\n # Update network fixed ips config\n _update_fixed_ips_config(port_config)\n\n # Update security groups config for port node\n _update_security_groups_config(port_config)",
"def process(self, agent):\n self._process_internal_ports()\n self.process_external(agent)\n # Process static routes for router,using command: ip router xxx\n self.routes_updated()\n\n # Update ex_gw_port and enable_snat on the router info cache\n self.ex_gw_port = self.get_ex_gw_port()\n self.snat_ports = self.router.get(\n l3_constants.SNAT_ROUTER_INTF_KEY, [])\n self.enable_snat = self.router.get('enable_snat')",
"def filter_update_port_attributes(cls, port, context):\n cls.add_security_groups(port, context)\n try_del(port, ['network_id', 'id', 'status', 'mac_address',\n 'tenant_id', 'fixed_ips'])",
"def _get_physnet_patch(self, physnet, port):\n if (not CONF.processing.overwrite_existing\n or port.physical_network == physnet):\n return\n return {'op': 'add', 'path': '/physical_network', 'value': physnet}",
"def _plug_interface(self, context, tenant_id, net_id, port_id,\n remote_interface_id):\n LOG.debug(_(\"QuantumRestProxyV2: _plug_interface() called\"))\n\n # update attachment on network controller\n try:\n port = super(QuantumRestProxyV2, self).get_port(context, port_id)\n mac = port[\"mac_address\"]\n\n for ip in port[\"fixed_ips\"]:\n if ip.get(\"subnet_id\") is not None:\n subnet = super(QuantumRestProxyV2, self).get_subnet(\n context, ip[\"subnet_id\"])\n gateway = subnet.get(\"gateway_ip\")\n if gateway is not None:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\"network\":\n {\"id\": net_id,\n \"gateway\": gateway,\n }\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n\n if mac is not None:\n resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)\n data = {\"attachment\":\n {\"id\": remote_interface_id,\n \"mac\": mac,\n }\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2:Unable to update remote network: \"\n \"%s\"), e.message)\n raise",
"def patch(self):\n\t\t\n\t\t# Create tunnels\n\t\t(module, self.tunnel_source) = create_tunnel(self.remote_source_info)\n\t\tself.modules += [ module ]\n\t\t(module, self.tunnel_sink) = create_tunnel(self.remote_sink_info)\n\t\tself.modules += [ module ]\n\t\t\n\t\t# Connect them to the local devices\n\t\tself.modules = self.modules + [\n\t\t\tadd_loopback(self.tunnel_source, self.local_sink),\n\t\t\tadd_loopback(self.local_source, self.tunnel_sink)\n\t\t]",
"def test_patch_port_group(self):\n pass",
"def _send_all_data(self):\n admin_context = qcontext.get_admin_context()\n networks = []\n routers = []\n\n all_networks = super(NeutronRestProxyV2,\n self).get_networks(admin_context) or []\n for net in all_networks:\n mapped_network = self._get_mapped_network_with_subnets(net)\n net_fl_ips = self._get_network_with_floatingips(mapped_network)\n\n ports = []\n net_filter = {'network_id': [net.get('id')]}\n net_ports = super(NeutronRestProxyV2,\n self).get_ports(admin_context,\n filters=net_filter) or []\n for port in net_ports:\n mapped_port = self._map_state_and_status(port)\n mapped_port['attachment'] = {\n 'id': port.get('device_id'),\n 'mac': port.get('mac_address'),\n }\n ports.append(mapped_port)\n net_fl_ips['ports'] = ports\n\n networks.append(net_fl_ips)\n\n all_routers = super(NeutronRestProxyV2,\n self).get_routers(admin_context) or []\n for router in all_routers:\n interfaces = []\n mapped_router = self._map_state_and_status(router)\n router_filter = {\n 'device_owner': [\"network:router_interface\"],\n 'device_id': [router.get('id')]\n }\n router_ports = super(NeutronRestProxyV2,\n self).get_ports(admin_context,\n filters=router_filter) or []\n for port in router_ports:\n net_id = port.get('network_id')\n subnet_id = port['fixed_ips'][0]['subnet_id']\n intf_details = self._get_router_intf_details(admin_context,\n net_id,\n subnet_id)\n interfaces.append(intf_details)\n mapped_router['interfaces'] = interfaces\n\n routers.append(mapped_router)\n\n resource = '/topology'\n data = {\n 'networks': networks,\n 'routers': routers,\n }\n errstr = _(\"Unable to update remote topology: %s\")\n return self.servers.rest_action('PUT', resource, data, errstr)",
"def setup_logical_port_connectivity(self, context, port_db):\n pass",
"def _process(proc_data: Dict) -> Dict:\n root_int_list = {'pos', 'flags', 'mnt_id', 'ino', 'clockid', 'ticks',\n 'settime flags', 'size', 'count'}\n epoll_int_list = {'tfd', 'pos'}\n inotify_int_list = {'wd'}\n\n for key, val in proc_data.items():\n if key in root_int_list:\n proc_data[key] = int(val)\n\n if 'epoll' in proc_data:\n for key, val in proc_data['epoll'].items():\n if key in epoll_int_list:\n proc_data['epoll'][key] = int(val)\n\n if 'inotify' in proc_data:\n for key, val in proc_data['inotify'].items():\n if key in inotify_int_list:\n proc_data['inotify'][key] = int(val)\n\n return proc_data",
"def port_nic():",
"def _extract_pid_info(self, connect_info: dict) -> None:\n pid = connect_info.pop('pid', None)\n if pid:\n try:\n self.pid = int(pid)\n except ValueError:\n self.log.warning(f\"pid returned from kernel launcher is not an integer: {pid} - ignoring.\")\n pid = None\n pgid = connect_info.pop('pgid', None)\n if pgid:\n try:\n self.pgid = int(pgid)\n except ValueError:\n self.log.warning(f\"pgid returned from kernel launcher is not an integer: {pgid} - ignoring.\")\n pgid = None\n if pid or pgid: # if either process ids were updated, update the ip as well and don't use local_proc\n self.ip = self.assigned_ip\n if not RemoteProvisionerBase.ip_is_local(self.ip): # only unset local_proc if we're remote\n # FIXME - should we wait prior to unset?\n self.local_proc = None",
"def _update_ips(self):\n self.ip_others = []\n ips = self.mesh.ipaddr()\n self.rloc16 = self.mesh.rloc()\n for line in ips:\n if line.startswith('fd'):\n # Mesh-Local unicast IPv6\n try:\n addr = int(line.split(':')[-1], 16)\n except Exception:\n continue\n if addr == self.rloc16:\n # found RLOC\n # RLOC IPv6 has x:x:x:x:0:ff:fe00:RLOC16\n self.rloc = line\n elif ':0:ff:fe00:' not in line:\n # found Mesh-Local EID\n self.ip_eid = line\n elif line.startswith('fe80'):\n # Link-Local\n self.ip_link = line\n else:\n self.ip_others.append(line)",
"def _update_port_handler(self, *args, **kwargs):\n port = kwargs['port']\n orig_port = kwargs['original_port']\n if port['status'] == orig_port['status']:\n return # Change not relevant\n new_status = n_constants.PORT_STATUS_ACTIVE\n if port['status'] != n_constants.PORT_STATUS_ACTIVE:\n new_status = n_constants.PORT_STATUS_DOWN\n core_plugin = directory.get_plugin()\n for subport_id in self._get_subports_ids(port['id']):\n core_plugin.update_port_status(context.get_admin_context(),\n subport_id, new_status)",
"def _preprocess_nics(network_details, network_adapters):\n # Initial checks.\n if not network_adapters:\n raise exception.CloudbaseInitException(\n \"no network adapters available\")\n # Sort VM adapters by name (assuming that those\n # from the context are in correct order).\n # Do this for a better matching by order\n # if hardware address is missing.\n network_adapters = sorted(network_adapters, key=lambda arg: arg[0])\n refined_network_details = [] # store here processed interfaces\n # Check and update every NetworkDetails object.\n total = len(network_adapters)\n for nic in network_details:\n if not isinstance(nic, network_model.NetworkDetails):\n raise exception.CloudbaseInitException(\n \"invalid NetworkDetails object {!r}\"\n .format(type(nic))\n )\n # Check requirements.\n final_status = True\n for fields, status in NET_REQUIRE.items():\n if not status:\n continue # skip 'not required' entries\n if not isinstance(fields, tuple):\n fields = (fields,)\n final_status = any([getattr(nic, field) for field in fields])\n if not final_status:\n break\n address, netmask = nic.address, nic.netmask\n if final_status:\n # Additional check for info version.\n if not (address and netmask):\n final_status = nic.address6 and nic.netmask6\n if final_status:\n address = address or network.address6_to_4_truncate(\n nic.address6)\n netmask = netmask or network.netmask6_to_4_truncate(\n nic.netmask6)\n if not final_status:\n LOG.error(\"Incomplete NetworkDetails object %s\", nic)\n continue\n # Complete hardware address if missing by selecting\n # the corresponding MAC in terms of naming, then ordering.\n if not nic.mac:\n # By name...\n macs = [adapter[1] for adapter in network_adapters\n if adapter[0] == nic.name]\n mac = macs[0] if macs else None\n # ...or by order.\n idx = _name2idx(nic.name)\n if not mac and idx < total:\n mac = network_adapters[idx][1]\n nic = network_model.NetworkDetails(\n nic.name,\n mac,\n address,\n nic.address6,\n netmask,\n nic.netmask6,\n nic.broadcast,\n nic.gateway,\n nic.gateway6,\n nic.dnsnameservers\n )\n refined_network_details.append(nic)\n return refined_network_details",
"def filter_create_port_attributes(cls, port, context):\n cls.add_security_groups(port, context)\n # TODO(kmestery): Converting to uppercase due to ODL bug\n # https://bugs.opendaylight.org/show_bug.cgi?id=477\n port['mac_address'] = port['mac_address'].upper()\n try_del(port, ['status'])",
"def update(self, oid, name, network_id, fixed_ips, host_id=None, \n profile=None, vnic_type=None, device_owner=None, device_id=None,\n security_groups=None):\n data = {\n \"port\": {\n }\n }\n if network_id is not None:\n data['port']['network_id'] = network_id\n if name is not None:\n data['port']['name'] = name\n if fixed_ips is not None:\n data['port']['fixed_ips'] = fixed_ips\n if host_id is not None:\n data['port']['binding:host_id'] = host_id\n if profile is not None:\n data['port']['binding:profile'] = profile\n if host_id is not None:\n data['port']['binding:vnic_type'] = vnic_type\n if device_owner is not None:\n data['port']['device_owner'] = device_owner\n if device_id is not None:\n data['port']['device_id'] = device_id\n if security_groups is not None:\n data['port']['security_groups'] = security_groups\n \n path = '%s/ports/%s' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Update openstack port: %s' % truncate(res))\n return res[0]['port']",
"def rr1_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_rr1_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n for device in ci_addrs.switches_rr2_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr') \n assign_ports_n5k34()",
"def _validate_ens_net_portsecurity(self, net_data):\n pass",
"def update_port_postcommit(self, context):\n if self.rpc_handler is None:\n return\n port = self._get_port_info(context)\n if port is not None:\n try:\n self.rpc_handler.update_port(port)\n except:\n pass",
"def start_net(self):\n super(FaucetTopoTestBase, self).start_net()\n # Create a dictionary of host information that might be used in a test later on.\n # This makes it easier to retrieve certain information and consolidates it into one\n # location.\n self.host_information = {}\n for host_id, host_name in self.topo.hosts_by_id.items():\n host_obj = self.net.get(host_name)\n vlan = self.host_vlans[host_id]\n ip_interface = ipaddress.ip_interface(self.host_ip_address(host_id, vlan))\n self.set_host_ip(host_obj, ip_interface)\n self.host_information[host_id] = {\n 'host': host_obj,\n 'ip': ip_interface,\n 'mac': host_obj.MAC(),\n 'vlan': vlan,\n 'bond': None,\n 'ports': {}\n }\n # Add information of hosts chosen dpid, port map values\n # TODO: This redoes logic from get_config()\n for i, dpid in enumerate(self.dpids):\n index = 1\n for host_id, links in self.host_links.items():\n if i in links:\n n_links = links.count(i)\n for _ in range(n_links):\n port = self.port_maps[dpid]['port_%d' % index]\n self.host_information[host_id]['ports'].setdefault(dpid, [])\n self.host_information[host_id]['ports'][dpid].append(port)\n index += 1\n # Store faucet vip interfaces\n self.faucet_vips = {}\n for vlan in range(self.n_vlans):\n self.faucet_vips[vlan] = ipaddress.ip_interface(self.faucet_vip(vlan))\n # Setup the linux bonds for LACP connected hosts\n self.setup_lacp_bonds()\n # Add host routes to hosts for inter vlan routing\n self.setup_intervlan_host_routes()",
"def rr2_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_rr1_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n for device in ci_addrs.switches_rr2_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n assign_ports_n5k34()",
"def run(self, api, media_type, data, no_of_ports):\n # router id is retrieved with router get command\n # the first router id available is used for the operation\n router_id = self._get_or_create_midonet_router(media_type['router'])\n post_api = \"routers/\" + router_id + \"/\" + api\n # set header with content-type and authentication token\n header = {\"Content-Type\": media_type['port'], \"X-Auth-Token\": \"%s\"\n % AUTH_TOKEN}\n cidr = data[\"networkAddress\"] + '/' + data[\"networkLength\"]\n ip_list = netaddr.IPNetwork(cidr)\n\n for _ in range(no_of_ports):\n # port address is generated randomly in the cidr\n port_address = str(random.choice(ip_list))\n LOG.debug(\"port_address is: %s\" % port_address)\n data[\"portAddress\"] = port_address\n # create port\n self._create_router_port(\"POST\", post_api, header, data)",
"def update_ports( self ):\n self.ports = self.getComPorts()\n self.updatePortsUI()"
]
| [
"0.6093292",
"0.575335",
"0.56939214",
"0.56011117",
"0.5584167",
"0.54978436",
"0.5490232",
"0.5483728",
"0.5449413",
"0.5399004",
"0.53555924",
"0.5347445",
"0.53461397",
"0.5274959",
"0.527264",
"0.52614754",
"0.52535933",
"0.52219194",
"0.52121663",
"0.5172295",
"0.5153004",
"0.5151919",
"0.51359713",
"0.5135327",
"0.51344734",
"0.51209986",
"0.5101217",
"0.5083095",
"0.5078844",
"0.50630575"
]
| 0.7502641 | 0 |
Predicts if a given text param is a tip or not. Returns a boolean value. | def predict(self, text):
prediction = self.pipeline.predict([text])
return bool(prediction[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_tip(text):\n\n amount = 0\n if re.search(r'I sent you a \\$[0-9]*\\.00 tip ♥', text):\n amount = re.match(r'I sent you a \\$([0-9]*)\\.00 tip ♥', text).group(1)\n Settings.maybe_print(\"successfully found tip\")\n Settings.dev_print(\"amount: {}\".format(amount))\n return True, int(amount)\n elif re.search(r\"I\\'ve contributed \\$[0-9]*\\.00 to your Campaign\", text):\n amount = re.match(r'I\\'ve contributed \\$([0-9]*)\\.00 to your Campaign', text).group(1)\n Settings.maybe_print(\"successfully found campaign donation\")\n Settings.dev_print(\"amount: {}\".format(amount))\n return True, int(amount)\n return False, int(amount)",
"def is_syllabus(self, text):\n\n return bool(self.pipeline.predict([text])[0])",
"def get_toxicity(self, text, detoxify: detoxify.detoxify.Detoxify) ->dict:\n if text:\n results = detoxify.predict(text)\n #Rounding the results\n return results",
"def predict():\r\n variance=request.args.get('variance')\r\n skewness=request.args.get('skewness')\r\n curtosis=request.args.get('curtosis')\r\n entropy=request.args.get('entropy')\r\n scaling=scaler.transform([[variance,skewness,curtosis,entropy]])\r\n prediction=classifier.predict(scaling)\r\n if prediction[0] == 0:\r\n return 'It is an Authentic Note'\r\n else:\r\n return \"It is a fake note\"",
"def is_tautology(formula: Formula) -> bool:\r\n # A Formula is said to be a tautology if it gets the value True\r\n # in all models.\r\n # Task 2.5a\r\n all_models_local = all_models(list(formula.variables()))\r\n for bool_val in truth_values(formula, all_models_local):\r\n if not bool_val:\r\n return False\r\n return True",
"def is_positive(text) :\n r = requests.post(\"http://text-processing.com/api/sentiment/\", data={'text': text})\n return r.json()['probability']['pos'] > r.json()['probability']['neg']",
"def Truth(text):\n lowered = text.lower()\n if lowered in frozenset(['y', 'yes', 'true']):\n return True\n elif lowered in frozenset(['n', 'no', 'false']):\n return False\n else:\n raise Error('Invalid truth value: %r' % text)",
"def isTrueConstant(self, sentence):",
"def test_predict():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.predict(testing_features)\n assert False # Should be unreachable\n except ValueError:\n pass",
"def classify(some_string, trained_pos, trained_neg):\n pos_probability = get_probability(trained_pos, some_string)\n neg_probability = get_probability(trained_neg, some_string)\n if (pos_probability >= neg_probability):\n return \"positive\"\n elif pos_probability < neg_probability: \n return \"negative\"",
"def profanityCheck(text):\n return predict_prob([text])[0]",
"def classifier(text):\n return random.choice([True, False])",
"def predict_tweet():\n input_text = request.form['input_text']\n tokenized_tweet = tokenizer(input_text)\n logits = model.predict({k: np.array(tokenized_tweet[k])[None] for k in input_names})[0]\n scores = softmax(logits)\n pred = round(100 * scores.flatten()[1], 2)\n # return render_template('index.html', prediction=pred)\n\n exp = LIME_explainer.explain_instance(input_text, predictor, num_features=len(input_text.split()),\n top_labels=1, num_samples=100).as_html()\n\n if pred >= 50:\n return render_template('index.html', exp=exp, prediction=f'Emergency; confidence ({pred}%)')\n else:\n return render_template('index.html', exp=exp, prediction=f'Non-emergency; confidence ({100 - pred}%)')",
"def predict_tweet(tweet):\n text = TextBlob(tweet)\n \n if text.sentiment.polarity > 0:\n response = 'positive'\n elif text.sentiment.polarity == 0:\n response = 'neutral'\n else:\n response = 'negative'\n return response, text.sentiment.polarity, text.sentiment.subjectivity",
"def is_tautology(formula):\r\n values = list(formula.variables())\r\n if len(values) is 0:\r\n return evaluate(formula, {})\r\n for m in all_models(values):\r\n if not evaluate(formula, m):\r\n return False\r\n return True",
"def is_tiprack(self, labware_id: str) -> bool:\n definition = self.get_definition(labware_id)\n return definition.parameters.isTiprack",
"def ask_yes_no(text):\n if text.strip()[0] == 'n' or text.strip()[0] == 'N':\n return False\n else:\n return True",
"def is_tautology(formula: Formula) -> bool:\n # Task 2.5a\n variables = list(sorted(formula.variables()))\n assignment_dict = all_models(list(variables))\n for val in truth_values(formula, assignment_dict):\n if not val:\n return False\n return True",
"def is_predict_only(self):\n return self.model.is_predict_only",
"def has_text(self):\n try:\n first = self.text_planets()[0]\n except IndexError:\n first = None\n\n return first is not None",
"def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))",
"def handle_tip_request(author, body, comment):\n\n recipient = get_tip_recipient(comment)\n amount = parse_tip_amount(body=body, botname=helper.botname)\n\n if recipient is None or amount is None:\n reply = \"Nothing interesting happens.\\n\\n*In case you were trying to tip, I didn't understand you.*\"\n elif Decimal(amount) < Decimal(0.0001):\n reply = helper.get_below_threshold_message()\n else:\n tipper_logger.log(f'{author} is sending {recipient} {amount} XMR.')\n generate_wallet_if_doesnt_exist(recipient.lower())\n\n res = tip(sender=author, recipient=recipient, amount=amount)\n\n reply = f'{res[\"response\"]}'\n tipper_logger.log(\"The response is: \" + reply)\n\n if res[\"message\"] is not None:\n helper.praw.redditor(author).message(subject=\"Your tip\", message=f\"Regarding your tip here: {comment.context}\\n\\n\" + res[\"message\"] + get_signature())\n\n helper.praw.comment(str(comment)).reply(reply + get_signature())",
"def prediction(name=None, message=''):",
"def get_tip_sensor(self):\n response = self.__send_and_receive(protocol.GET_TIP_SENSOR)\n value = self.__gen_response_value(response)\n\n if value:\n if \"\".join(value)[1:] == \"0\":\n return True\n else:\n return False\n else:\n return False",
"def classify(self, testInstance):\n # here, since the preceptron is too simple, it just output yes or no\n # instead of 1 vs. n method\n return self.fire(testInstance) > 0",
"def is_retweet(self,text):\n if text[0] == 'rt':\n return True\n else:\n return False",
"def get_text_prediction():\n json = request.get_json()\n print(json)\n if len(json['text']) == 0:\n return jsonify({'error': 'invalid input'})\n\n return jsonify({'you sent this': json['text']})",
"def nltk_condition(stem):\n return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1)",
"def predict(self, title):\n \n return self.knn_model.predict(self.target_lang_topics[title])",
"def truth(text):\n lowered = str(text).lower()\n if lowered in frozenset(['y', 'yes', 'true']):\n return True\n elif lowered in frozenset(['n', 'no', 'false']):\n return False\n else:\n raise Error('Invalid truth value: %r' % text)"
]
| [
"0.6787285",
"0.5593779",
"0.55277246",
"0.5444057",
"0.5432679",
"0.53986967",
"0.53676295",
"0.53673977",
"0.5325176",
"0.53108793",
"0.53067994",
"0.5298198",
"0.52494943",
"0.52251226",
"0.52155834",
"0.5207014",
"0.51662445",
"0.51548153",
"0.51505727",
"0.5148154",
"0.513684",
"0.5132067",
"0.5130869",
"0.51176906",
"0.5083591",
"0.5082452",
"0.5081296",
"0.5074224",
"0.507018",
"0.506487"
]
| 0.64676976 | 1 |
Returns the full path for the dataset file. | def dataset_path(cls):
basepath = os.path.dirname(__file__)
filepath = os.path.abspath(
os.path.join(basepath, "..", "datasets", get_env('DATA_FILENAME')))
return filepath | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def path(self):\n return self._data_file",
"def dataset_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(work_dir, consts.DATA_DIR, dataset)",
"def get_filepath(filename):\n return os.path.join(\"datasets\", filename)",
"def dataset_dir(self):\n return self._dataset_dir",
"def dataPath(self):\n return ''",
"def data_path(self):\n raise NotImplementedError",
"def get_data_path():\n return os.getcwd() + \"/data/\"",
"def get_dataset_path(dataset: str = \"MVTec\") -> str:\n # Initially check if `datasets` directory exists locally and look\n # for the `dataset`. This is useful for local testing.\n path = os.path.join(\"./datasets\", dataset)\n\n # For docker deployment or a CI that runs on server, dataset directory\n # may not necessarily be located in the repo. Therefore, check anomalib\n # dataset path environment variable.\n if not os.path.isdir(path):\n path = os.path.join(os.environ[\"ANOMALIB_DATASET_PATH\"], dataset)\n return path",
"def data_filename(self) -> str: # type: ignore[return-value]\n return os.path.abspath(self.name) # type: ignore",
"def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep",
"def _get_dataset_path(self, field):\n return '{0}{1}||{2}'.format(self.path_in_hdf5, field, self.uuid)",
"def get_data_path():\n\n # Get pathname absolute or relative.\n path = os.path.join(\n os.path.dirname(__file__), __malstor_data_directory__)\n\n abs_data_path = os.path.abspath(path)\n if not os.path.exists(abs_data_path):\n raise project_path_not_found\n\n return abs_data_path",
"def _getDatasetPath(self):\n return self.__dataset_path",
"def get_data_path(path):\n\n data_path = Path(self.kard.meta.get('data_path', 'data'))\n\n if data_path.is_absolute():\n return str(data_path / path)\n\n return str(self.kard_folder_path / self.kard.name / data_path /\n path)",
"def data_path(filename):\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n return os.path.join(data_dir, filename)",
"def get_data_path():\n\treturn _paths[_DATA_DIRECTORY_KEY]",
"def get_dataset_config_path(dataset_dir: str) -> str:\n return os.path.join(dataset_dir, DATASET_CONFIG_NAME)",
"def path(self):\n return self.file_path()",
"def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'vehicles_dataset_v{}'.format(self._version))",
"def get_data_path(file_name=None):\n if file_name is None:\n file_name = \"\"\n return os.path.join(DATA_DIR, file_name)",
"def path(self) -> str:\n return os.path.join(DIR_CACHE_DATASETS, f\"{self.name}.parquet\")",
"def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path",
"def get_data_file():\n base_folder = os.path.dirname(__file__)\n # print(base_folder)\n return os.path.join(base_folder, 'data', 'Sacramentorealestatetransactions.csv')\n # print(filename)",
"def datapath(cls, *fname):\n return osp.join(cls.datadir, *fname)",
"def get_absolute_path(self):\n if self.datafile and self.datafile.storage.exists(self.datafile.path):\n return self.datafile.path\n else:\n return None",
"def file_path(self):\n return self._obs_file()",
"def datafilepath(*filename):\r\n import os\r\n return makepath(os.path.join(base_dir, *filename))",
"def data_full_filename(filename):\n return os.path.join(os.path.dirname(__file__), 'data', filename)",
"def get_data_file():\n this_directory = os.path.dirname(__file__)\n parent_directory = os.path.dirname(this_directory)\n return os.path.join(parent_directory, '_data/fortunes.txt')",
"def data_directory(self):\n\n return self.get_raw(\"data_directory\")"
]
| [
"0.8058205",
"0.80355954",
"0.7945212",
"0.77371156",
"0.7652561",
"0.7644312",
"0.7565171",
"0.75521266",
"0.7542085",
"0.75204843",
"0.7462107",
"0.7460071",
"0.7443824",
"0.7371767",
"0.7316586",
"0.73021555",
"0.72992235",
"0.7249082",
"0.7211426",
"0.7210987",
"0.7207542",
"0.72053516",
"0.7153473",
"0.7142303",
"0.7135742",
"0.7125167",
"0.70981795",
"0.70960194",
"0.707875",
"0.6983411"
]
| 0.87297165 | 0 |
Populate queue with messages containing S3 keys from 'prefix', grouped by the first occurrence of '/' after 'prefix'. If is required that the prefix ends with '/', which means that all the subdirs will be scanned. | def populate_queue_with_subdirs(bucket: str, prefix: str, queue: str) -> None:
# No reason to run the function without scanning subdirs
assert prefix[-1] == "/"
dirs = get_client("s3").list_objects_v2(
Bucket=bucket, Prefix=prefix, Delimiter="/",
)
# Paging is not supported here
assert not dirs["IsTruncated"]
for dir_key in dirs["CommonPrefixes"]:
LOGGER.info(dir_key["Prefix"])
get_client("sqs").send_message(QueueUrl=queue, MessageBody=dir_key["Prefix"]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_s3_files(bucket, prefix):\n \n s3 = boto3.client('s3')\n\n if type(prefix) != list:\n prefix = [prefix]\n \n # Loop over prefixes:\n file_list = []\n for p in prefix:\n \n # Load one prefix:\n response = s3.list_objects_v2(Bucket=bucket, Prefix=p)\n if response['KeyCount'] > 0:\n file_list = file_list + [d['Key'] for d in response['Contents']]\n while response['IsTruncated']:\n response = s3.list_objects_v2(Bucket=bucket, Prefix=p, StartAfter=file_list[-1])\n file_list = file_list + [d['Key'] for d in response['Contents']] \n \n return file_list",
"def load_from_s3(self, bucket, prefix=None):\r\n n = 0\r\n if prefix:\r\n prefix = '%s/' % prefix\r\n else:\r\n prefix = '%s/' % self.id[1:]\r\n rs = bucket.list(prefix=prefix)\r\n for key in rs:\r\n n += 1\r\n m = self.new_message(key.get_contents_as_string())\r\n self.write(m)\r\n return n",
"def get_common_prefixes(bucket, prefix):\n if not prefix.endswith('/'):\n prefix += \"/\"\n client = boto3.client('s3')\n paginator = client.get_paginator('list_objects')\n result = paginator.paginate(Bucket=bucket, Delimiter='/', Prefix=prefix)\n return [common_prefix['Prefix'].split('/')[-2]\n for common_prefix in result.search(\"CommonPrefixes\")\n if common_prefix]",
"def objs_with_prefix(bucket, log_type, query_time):\n prefix = get_prefix(log_type, query_time)\n # S3 guarantees to return objects in ascending key order based on the UTF-8\n # binary representation of the key. Unfortunately the server-side filtering\n # is quite limited; we can't specify the sort order or the sort key.\n objs = list(bucket.objects.filter(Prefix=prefix))\n logging.info('Found %s files with prefix %s',\n 'no' if not objs else len(objs), prefix)\n return objs",
"def get_matching_s3_objects(bucket, prefix=\"\", suffix=\"\"):\n s3session = boto3.Session(profile_name='prod')\n s3 = s3session.client(\"s3\", \"eu-west-2\")\n #s3 = boto3.client(\"s3\")\n paginator = s3.get_paginator(\"list_objects_v2\")\n\n kwargs = {'Bucket': bucket}\n\n # We can pass the prefix directly to the S3 API. If the user has passed\n # a tuple or list of prefixes, we go through them one by one.\n if isinstance(prefix, str):\n prefixes = (prefix, )\n else:\n prefixes = prefix\n\n for key_prefix in prefixes:\n kwargs[\"Prefix\"] = key_prefix\n\n for page in paginator.paginate(**kwargs):\n try:\n contents = page[\"Contents\"]\n except KeyError:\n break\n\n for obj in contents:\n key = obj[\"Key\"]\n if key.endswith(suffix):\n yield obj",
"def items_with_prefix(self, prefix):\n node = self.get_node(self.root, 0, prefix)\n # look at the middle subtree only (since only it has exact matches)\n return self.collect(node.middle, prefix)",
"def generate_items_in_bucket(\n bucket: BucketLocation, prefix: Optional[str] = None\n) -> Generator[ObjectLocation, None, None]:\n s3 = boto3.client(\"s3\")\n\n kwargs = {\"Bucket\": bucket.name}\n\n if prefix is not None:\n kwargs[\"Prefix\"] = prefix\n\n while True:\n response = s3.list_objects(**kwargs)\n\n for obj in response[\"Contents\"]:\n yield ObjectLocation(key=obj[\"Key\"], bucket=bucket)\n\n # The S3 API is paginated, returning up to 1000 keys at a time.\n # Pass the continuation token into the next response, until we\n # reach the final page (when this field is missing).\n try:\n kwargs[\"ContinuationToken\"] = response[\"NextContinuationToken\"]\n except KeyError:\n break",
"def list_keys(self, s3_prefix_path, delimiter='/'):\n bucket_name, prefix = S3Util.get_bucket_and_key(s3_prefix_path)\n bucket = self.s3_resource.Bucket(bucket_name)\n result = bucket.meta.client.list_objects(Bucket=bucket_name,\n Prefix=prefix,\n Delimiter=delimiter)\n if result.get('CommonPrefixes') is not None:\n return [o.get('Prefix') for o in result.get('CommonPrefixes')]",
"def prefix(self, prefix, *args):\n new_prefix = '%s%s' % (self.prefixes[-1], prefix % args)\n self.prefixes.append(new_prefix)\n try:\n yield\n finally:\n assert self.prefixes.pop() == new_prefix",
"def list(self, prefix=\"\"):\n try:\n list_rep = self.client.listdir(self.bucket + \"/\" + prefix)\n for i in list_rep:\n # Remove preceding bucket name and potential leading slash from returned key value\n i = i.replace(self.bucket, \"\").replace('tar', 'wsp.sz')\n if i[0] == '/': i = i[1:]\n yield i\n except pyhdfs.HdfsFileNotFoundException:\n pass",
"def prefixed(self, prefix):\n try:\n old_prefix = self._prefix\n if old_prefix is None:\n self._prefix = prefix\n else:\n self._prefix = \"{}{}\".format(old_prefix, prefix)\n yield self\n finally:\n # explicitly reset the stored prefix on completion and exceptions\n self._prefix = None\n self._prefix = old_prefix",
"def get_matching_s3_keys(client, bucket, prefix=\"\", suffix=\"\"):\n\n for obj in get_matching_s3_objects(client, bucket, prefix, suffix):\n yield obj[\"Key\"]",
"def _get_s3_policy_prefixes(self, bucket):\n client = bucket.meta.client\n response = client.list_objects(\n Bucket=bucket.name,\n Delimiter='/',\n Prefix='logs/'\n )\n if response['IsTruncated']:\n raise RuntimeError('ERROR: S3 response was truncated!')\n result = []\n for pname in response['CommonPrefixes']:\n result.append(pname['Prefix'].replace('logs/', '').strip('/'))\n return result",
"def get_matching_s3_keys(bucket, prefix='', suffix=''):\n for obj in get_matching_s3_objects(bucket, prefix, suffix):\n yield obj['Key']",
"def list_objects(self, s3_prefix_path):\n bucket_name, prefix = S3Util.get_bucket_and_key(s3_prefix_path)\n bucket = self.s3_resource.Bucket(bucket_name)\n return [\"s3://\" + bucket_name + \"/\" + key.key for key in bucket.objects.filter(Prefix=prefix)]",
"def get_matching_s3_objects(client, bucket, prefix=\"\", suffix=\"\"):\n\n kwargs = {\"Bucket\": bucket}\n if isinstance(prefix, str):\n kwargs[\"Prefix\"] = prefix\n # logging.info(\"kwargs: %s\" % kwargs)\n while True:\n resp = client.list_objects_v2(**kwargs)\n try:\n contents = resp[\"Contents\"]\n except KeyError:\n return\n for obj in contents:\n key = obj[\"Key\"]\n if key.startswith(prefix) and key.endswith(suffix):\n yield obj\n try:\n kwargs[\"ContinuationToken\"] = resp[\"NextContinuationToken\"]\n except KeyError:\n break",
"def list_s3(bucket, prefix, ext):\n s3 = boto3.resource('s3')\n s3_bucket = s3.Bucket(bucket)\n\n if ext:\n ext = '.' + ext.lstrip('.')\n else:\n ext = ''\n\n counter = 0\n for item in s3_bucket.objects.filter(Prefix=prefix):\n counter += 1\n if counter % 5000 == 0:\n print(f'Found {counter} items so far', file=sys.stderr)\n\n key = item.key\n if not key.endswith(ext):\n continue\n\n # Write to stdout\n print(key)",
"def get_subfolders(self, prefix):\n result = self.client.list_objects(Bucket=self.bucket, Prefix=prefix, Delimiter='/')\n prefixes = []\n for o in result.get('CommonPrefixes', []):\n prefixes.append(o.get('Prefix'))\n return prefixes",
"def list_s3(bucket, prefix, ext=None):\n s3 = boto3.resource('s3')\n s3_bucket = s3.Bucket(bucket)\n\n if ext:\n ext = '.' + ext.lstrip('.')\n else:\n ext = ''\n\n for item in s3_bucket.objects.filter(Prefix=prefix):\n key = item.key\n if not key.endswith(ext):\n continue\n\n yield key",
"def get_prefix_object(self, bucket, key, file_extension=None):\n\n prefix_objs = self._s3.Bucket(bucket).objects.filter(Prefix=key)\n\n if file_extension:\n obj_list = [x.key for x in prefix_objs if x.key.endswith(file_extension)]\n else:\n obj_list = [x.key for x in prefix_objs]\n\n return obj_list",
"def prefixSearch(self, prefix: str, _prec=\"\"):\n if prefix == \"\":\n # prefix exhasuted, match all\n yield from self.keys(_prec)\n else:\n try:\n # prefix not exhausted, traverse further\n chld = self.children[prefix[0]]\n yield from chld.prefixSearch(prefix[1:], _prec + self.ch)\n except IndexError:\n yield None\n except KeyError:\n yield None",
"def fetch_ckpts_with_prefix(bucket_name, dest_dir, prefix, key_path):\n storage_client = storage.Client.from_service_account_json(key_path)\n bucket = storage_client.get_bucket(bucket_name)\n\n # use list to ensure reusable generator\n blobs = list(bucket.list_blobs(prefix=prefix, delimiter=None))\n\n blob_names = [Path(blob.name).name for blob in blobs]\n assert len(blob_names) == 3, \"expected to find 3 files per checkpoint\"\n for blob, blob_name in zip(blobs, blob_names):\n dest_path = Path(dest_dir) / blob_name\n if not dest_path.exists():\n print(\"downloading {} to {}\".format(blob_name, dest_path))\n blob.download_to_filename(str(dest_path))\n else:\n print(\"found existing {} at {}, skipping\".format(blob_name, dest_path))",
"def list_keys(bucket, path, suffix=None):\n\t# Apparently there is no easy way of doing this except to loop over the result\n\t# chek the parameters delimiter='', marker=''\n\t# then the list returns boto.s3.prefix.Prefix objects on matches\n\tfiles = []\n\tpath = path.strip('/')\n\tfor key in bucket.list(path):\n\t\trelative_path = key.name.replace(path, '').lstrip('/')\n\t\tif not relative_path:\n\t\t\t# Empty\n\t\t\tcontinue\n\t\tif '/' in relative_path.strip('/'):\n\t\t\t# Skip sub-folders\n\t\t\tcontinue\n\n\t\tif not suffix or relative_path.endswith(suffix):\n\t\t\tfiles.append(relative_path)\n\treturn files",
"def get_matching_s3_keys(\n bucket: str,\n prefix: str = \"\",\n suffix: str = \"\",\n session: Optional[boto3.Session] = None,\n) -> Iterator[str]:\n for obj in get_matching_s3_objects(bucket, prefix, suffix, session):\n if \"Key\" in obj:\n yield obj[\"Key\"]",
"def delete_keys_with_prefix(prefix):\n rc = redis.StrictRedis(host=REDIS_SINGLE_HOST, port=REDIS_PORT, db=0)\n keys = rc.keys(\"*\" + prefix + \"*\")\n for key in keys:\n rc.delete(key)",
"def s3_list_files(prefix, as_generator=False):\n return _do_list_files(S3_BUCKET, prefix, as_generator=as_generator)",
"def list_all_objects_s3(bucket, prefix, profile):\n s3 = boto3.Session(profile_name=profile).client('s3')\n keys = []\n continuation_token = \"\"\n\n while True:\n list_kwargs = dict(Bucket=bucket, Prefix=prefix)\n if continuation_token:\n list_kwargs['ContinuationToken'] = continuation_token\n resp = s3.list_objects_v2(**list_kwargs)\n keys += [x['Key'] for x in resp.get('Contents', [])]\n \n if not resp.get('IsTruncated'):\n break\n continuation_token = resp.get('NextContinuationToken')\n \n return keys",
"def get_s3_keys_as_generator(s3_client,bucket, prefix):\n kwargs = {'Bucket': bucket, 'Prefix' : prefix}\n while True:\n resp = s3_client.list_objects_v2(**kwargs)\n for obj in resp['Contents']:\n yield obj\n\n try:\n kwargs['ContinuationToken'] = resp['NextContinuationToken']\n except KeyError:\n break",
"def list_object_paths_in_s3(s3_prefix: Path) -> Generator[Path]:\n\n s3_args, unknown = get_s3_args().parse_known_args()\n s3_client = get_s3_client(s3_args)\n log = get_logger(\"list_object_paths_in_s3\")\n\n resp = s3_client.list_objects_v2(Bucket=s3_args.s3_bucket, Prefix=str(s3_prefix))\n\n if \"Contents\" not in resp:\n raise NoS3DataError(f\"No data at prefix {s3_prefix}\")\n\n while True:\n yield from (Path(obj[\"Key\"]) for obj in resp[\"Contents\"])\n\n if resp[\"IsTruncated\"]:\n continuation_key = resp[\"NextContinuationToken\"]\n resp = s3_client.list_objects_v2(\n Bucket=s3_args.s3_bucket,\n Prefix=str(s3_prefix),\n ContinuationToken=continuation_key,\n )\n else:\n break",
"def get_s3_file_names(s3_prefix_path):\n\n # parse s3 path for bucket name and prefix\n regex = r\"s3://([\\w._-]+)/([\\w./_-]+)\"\n m = re.match(regex, s3_prefix_path)\n s3bucket_name = m.group(1)\n s3prefix = m.group(2)\n\n # Get s3 bucket handle\n s3 = boto3.resource('s3')\n s3bucket = s3.Bucket(s3bucket_name)\n\n # Get all file names in the `s3bucket` with the prefix `s3prefix`\n files = []\n for object in s3bucket.objects.filter(Prefix=s3prefix):\n path_to_file = os.path.join(\"s3://%s\" % s3bucket_name, object.key)\n files.append(path_to_file)\n\n return files"
]
| [
"0.65747863",
"0.65253735",
"0.65112585",
"0.64017195",
"0.62606305",
"0.62546366",
"0.6225744",
"0.62026757",
"0.6166004",
"0.6156403",
"0.6142595",
"0.6133839",
"0.6086885",
"0.6068397",
"0.6056256",
"0.60536903",
"0.6052233",
"0.604813",
"0.603384",
"0.60187894",
"0.59921753",
"0.5966482",
"0.591565",
"0.5873313",
"0.5814619",
"0.5812602",
"0.58098453",
"0.57896215",
"0.5787346",
"0.57861096"
]
| 0.77303046 | 0 |
(str) Encrypts the letters of a phrase by an offset | def encrypt(phrase, offset):
encrypted_phrase = "" #Empty string that will be populated with encrypted characters
for character in phrase:
unicode_value = ord(character) #Gets the unicode value for the character
if unicode_value >= 97 and unicode_value <= 122: #Unicode values for lowercase a - z
unicode_value += offset
if unicode_value > 122: #If it now falls outside the range of unicode values for a - z
unicode_value -= 26 #loop back to the start of the alphbet
elif unicode_value >= 65 and unicode_value <= 90: #Unicode values for A - Z
unicode_value += offset
if unicode_value > 90:
unicode_value -= 26 #Loops back if unicode_value exceeds that of Z
encrypted_phrase += chr(unicode_value) #Adds encrypted letter to the encrypted phrase
return encrypted_phrase | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def encrypt(text, offset):\r\n\r\n return format_text(text, offset)",
"def encrypt(text, offset):\n encrypted_text = \"\"\n for char in text:\n if ord(char) <= 64:\n encrypted_character = chr(ord(char))\n elif ord(char) < 90:\n encrypted_character = ord(char) + offset\n if encrypted_character > 90:\n encrypted_character -= 26\n encrypted_character = chr(encrypted_character)\n else:\n encrypted_character = ord(char) + offset\n if encrypted_character > 122:\n encrypted_character -= 26\n encrypted_character = chr(encrypted_character)\n encrypted_text += encrypted_character\n\n return encrypted_text",
"def encrypt(self,string=\"vrorqjdqgwdqnviruwkhilvk\",key=3):\r\n return \"\".join([chr((ord(ch)-key-ord('a'))%(ord('z')-ord('a')+1)+ord('a')) for ch in string])",
"def encryptionSelfMadeFunction(text,index):\n s = text\n transformedChar = \"\"\n\n transformedChar = s[0:index] + s[index+1:] +s[index]\n\n print(\"Encrypted Transformed text : \" )\n return transformedChar",
"def weaksauce_encrypt(text, password):\n\n offset = sum([ord(x) for x in password])\n encoded = ''.join(\n chr(min(ord(x) + offset, 2**20))\n for x in text\n )\n return encoded",
"def decrypt(phrase, offset):\n return encrypt(phrase, 26 - offset) #Encrypting then decrypting by the same number will in effect encrypt by 26, looping back to the starting letters",
"def encipher(self,string): \n string = self.remove_punctuation(string)\n ret = ''\n for (i,c) in enumerate(string):\n if i<len(self.key): offset = self.a2i(self.key[i])\n else: offset = self.a2i(string[i-len(self.key)]) \n ret += self.i2a(self.a2i(c)+offset)\n return ret",
"def crypt(text, passphrase, which):\n text = scrub_string(text)\n passphrase = scrub_string(passphrase)\n letters = (\n shift_string_by_letter(ch, passphrase[i % len(passphrase)], which)\n for i, ch in enumerate(text)\n )\n return \"\".join(letters)",
"def encrypt(self, phrase):\n keyword = input(\"What keyword would you like to use? \")\n plaintext = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n ciphertext = []\n encrypted = []\n for i in keyword.upper():\n if i not in ciphertext:\n ciphertext.append(i)\n for i in plaintext:\n if i not in ciphertext:\n ciphertext.append(i)\n\n key_dict = dict(zip(plaintext, ciphertext))\n\n for i in phrase.upper():\n if i == \" \":\n encrypted.append(\" \")\n else:\n for key, value in key_dict.items():\n if i == key:\n encrypted.append(value)\n\n return \"\".join(encrypted)",
"def cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>)\"\r\n# Fetching the writing in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# The crypting process, replaces letters in intab1 with outtab1\r\n crypted = (a.translate({ord(x): y for (x, y) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the crypted text within textbox\r\n self.textbox.setPlainText(crypted)",
"def encrypt(self, text):\n\n text = text.lower()\n encrypted_word = []\n for letter in text:\n try:\n index = self.alpha.index(letter)\n except ValueError:\n encrypted_word.append(letter)\n else:\n # Uses Affine encryption function to encrypt the word\n new_index = ((self.a*index)+self.b) % self.m\n encrypted_word.append(self.alpha[new_index])\n return \"\".join(encrypted_word)",
"def encrypt(self, text):\n output = []\n text = text.upper()\n\n for char in text:\n try:\n index = self.alpha.index(char)\n except ValueError:\n output.append(char)\n else:\n output.append(self.alpha[(index * 5 + 8) % 26])\n return \"\".join(output)",
"def encrypt(text):\r\n\r\n cipher = fuzz(text)\r\n return hexify(cipher)",
"def encryptionShift(text, index):\n s=text\n transformedChar=\"\"\n transformedChar = ord(s[index]) + 1\n\n if(transformedChar > 90):\n transformedChar=chr(ord(s[index]) + 1 - 26)\n else:\n transformedChar = chr(transformedChar)\n\n print(\"Single Shift Encrypted text: \")\n return s[:index] + transformedChar + s[index+1:]",
"def rotate(text, offset):\n\n # return text.lower().encode('rot13') # works but limited\n result = ''\n\n\n for letter in text:\n letter = ord(letter)\n result += (chr((letter + offset)))\n\n\n return(result)",
"def caesar_encryption(text):\n result = ''\n for char in text:\n if char.isdigit():\n i = (num_key.index(char) - 4) % 10\n result += num_key[i]\n elif not char.isdigit() and char.lower() in alpha_key:\n i = (alpha_key.index(char.lower()) - 4) % 26\n result += alpha_key[i]\n else:\n result += char\n return result",
"def encode(text, password):\r\n\tstep_index = 0\r\n\tencoded_text = ''\r\n\tfor letter in text:\r\n\t\tencoded_text += next_letter(letter, to_int(password[step_index]))\r\n\t\tstep_index += 1\r\n\t\tif step_index > len(password)-1:\r\n\t\t\tstep_index = 0\r\n\treturn encoded_text",
"def encrypt(cipher, word):\n encrypted = \"\"\n for char in word:\n encrypted += cipher[char]\n return encrypted",
"def encrypt_vigenere(plaintext, keyword):\n ciphertext = ''\n new_keyword = ''\n for i in range(len(plaintext)):\n num_word = ord(plaintext[i])\n while len(plaintext) > len(new_keyword):\n new_keyword += keyword\n num_key = ord(new_keyword[i])\n if num_key <= 90:\n num_key -= 65\n elif num_key >= 97:\n num_key -= 97\n if num_word <= 90:\n if (num_key+num_word) > 90:\n num_word -= 26\n else:\n if (num_key+num_word) > 122:\n num_word -= 26\n ciphertext += chr(num_word+num_key)\n return ciphertext",
"def encrypt_vigenere(plaintext, keyword):\n ciphertext = '' # Обнуляем строку\n keyword = length_equal(plaintext, keyword)\n for i in range(len(plaintext)):\n if (ord(plaintext[i].upper()) + ord(keyword[i].upper()) - 65) <= 90:\n ciphertext += chr(ord(plaintext[i].upper()) + (ord(keyword[i].upper()) - 65))\n else:\n ciphertext += chr(ord(keyword[i].upper()) - 91 + ord(plaintext[i].upper()))\n return ciphertext",
"def encrypt(self, text):\n ciphertext = []\n # text = text.upper()\n for char in text:\n try:\n key = (self.a * self.characters.index(char) + self.b) % len(self.characters)\n # If character is not in set for cipher,\n # directly append it without transformation\n except ValueError:\n ciphertext.append(char)\n else:\n ciphertext.append(self.characters[key])\n return ''.join(ciphertext)",
"def encrypt_vigenere(plaintext: str, keyword: str) -> str:\n ciphertext = \"\"\n # PUT YOUR CODE HERE\n\n key_lenght = len(keyword)\n text_lenght = len(plaintext)\n while key_lenght != text_lenght:\n keyword += keyword\n key_lenght = len(keyword)\n if key_lenght > text_lenght:\n keyword = keyword[:text_lenght]\n key_lenght = len(keyword)\n code_key = []\n ord_A = ord('A')\n ord_a = ord('a')\n\n if plaintext.islower():\n for i in range(key_lenght):\n if plaintext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_a)\n code_text = []\n for n in range(text_lenght):\n if plaintext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(plaintext[n]) - ord_a)\n ciphertext = ''\n for u in range(len(plaintext)):\n if plaintext[u] == \" \":\n ciphertext += \" \"\n else:\n value = (code_key[u] + code_text[u]) % 26 + ord_a\n ciphertext += chr(value)\n else:\n for i in range(key_lenght):\n if plaintext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_A)\n code_text = []\n for n in range(text_lenght):\n if plaintext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(plaintext[n]) - ord_A)\n ciphertext = ''\n for u in range(len(plaintext)):\n if plaintext[u] == \" \":\n value = ord(\" \")\n else:\n value = (code_key[u] + code_text[u]) % 26 + ord_A\n ciphertext += chr(value)\n return ciphertext",
"def encryptionMultipleShift(text, index, power):\n s=text\n transformedChar=\"\"\n\n transformedChar = ord(s[index]) + (power % 26)\n if (transformedChar >= 90):\n transformedChar = chr(64 + (transformedChar - 90))\n else:\n transformedChar = chr(transformedChar)\n\n print(\"Multiple Shift Encrypted text : \" )\n return s[:index] + transformedChar + s[index+1:]",
"def encrypt(self, text):\n text = text.upper()\n output = []\n text_list = list(text)\n for letter in text_list:\n output.append(self.atbash_dict.get(letter, letter))\n return ''.join(output)",
"def encrypt(word):\r\n if len(word) == 1:\r\n if word.islower() and word !='z':#only encode lower case letters\r\n return chr(ord(word) + 1)\r\n elif word.isupper and word != 'z':\r\n return word\r\n elif word == 'z': # special case: z\r\n return chr(ord(word) -25)\r\n else:\r\n myChar = word[0] #first get first chararacter in the word\r\n if myChar.islower() and myChar != 'z':\r\n myChar = chr(ord(word[0])+1)\r\n elif myChar == 'z': # special case: z\r\n myChar = chr(ord(word[0])-25)\r\n elif myChar.isupper:\r\n pass \r\n return myChar + encrypt(word[1:])",
"def encrypt(self, text):\n output = []\n text = text.upper()\n for char in text:\n try:\n text_index = self.alphabet.index(char)\n output.append(self.combined[text_index])\n except ValueError:\n output.append(char)\n\n return ''.join(output)",
"def encrypt_vigenere(plaintext, keyword):\n list = []\n index = 0\n for char in plaintext:\n new_char_val = ord(char) + ord(keyword[index]) - ord('A')\n if new_char_val > ord('Z'):\n new_char_val -= 26\n list.append(chr(new_char_val))\n index += 1\n index %= len(keyword)\n return ''.join(list)",
"def encrypt_vigenere(plaintext, keyword):\n ciphertext = \"\"\n for i in range(len(plaintext)):\n if 64 < ord(plaintext[i]) < 65 + 26:\n shift = (ord(keyword[i % len(keyword)]) - 65)\n if ord(plaintext[i]) > 64 + 26 - shift:\n ciphertext += chr(ord(plaintext[i]) + shift - 26)\n else:\n ciphertext += chr(ord(plaintext[i]) + shift)\n elif 96 < ord(plaintext[i]) < 97 + 26:\n shift = (ord(keyword[i % len(keyword)]) - 97)\n if ord(plaintext[i]) > 96 + 26 - shift:\n ciphertext += chr(ord(plaintext[i]) + shift - 26)\n else:\n ciphertext += chr(ord(plaintext[i]) + shift)\n else:\n ciphertext += i\n return ciphertext",
"def encrypt_vigenere(plaintext: str, keyword: str) -> str:",
"def caesar_cipher_encode(n: int, text: str, p: str) -> str:\n lookup_table = str.maketrans(p, p[n:] + p[:n])\n\n return text.translate(lookup_table)"
]
| [
"0.789051",
"0.7881164",
"0.7432777",
"0.743228",
"0.73752034",
"0.73717153",
"0.7289788",
"0.7195223",
"0.71443444",
"0.7125594",
"0.70946026",
"0.7082661",
"0.6997202",
"0.69794333",
"0.69541246",
"0.6937674",
"0.69305736",
"0.6927407",
"0.6907248",
"0.6897459",
"0.6884718",
"0.687214",
"0.68590623",
"0.68313247",
"0.6820711",
"0.6815376",
"0.6808003",
"0.68042326",
"0.6790465",
"0.6787056"
]
| 0.81793165 | 0 |
(str) Decrypts the letters of a phrase by an offset | def decrypt(phrase, offset):
return encrypt(phrase, 26 - offset) #Encrypting then decrypting by the same number will in effect encrypt by 26, looping back to the starting letters | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decrypt(text, offset):\r\n return format_text(text, -offset)",
"def decrypt(text, offset):\n decrypted_text = \"\"\n for char in text:\n if ord(char) <= 64:\n decrypted_character = chr(ord(char))\n elif ord(char) <= 90:\n decrypted_character = ord(char) - offset\n if decrypted_character < 65:\n decrypted_character += 26\n decrypted_character = chr(decrypted_character)\n else:\n decrypted_character = ord(char) - offset\n if decrypted_character < 97:\n decrypted_character += 26\n decrypted_character = chr(decrypted_character)\n decrypted_text += decrypted_character\n\n return decrypted_text",
"def decrypt(self, text):\n\n output = []\n text = text.upper()\n\n for char in text:\n try:\n index = self.alpha.index(char)\n except ValueError:\n output.append(char)\n else:\n output.append(self.alpha[21 * (index - 8) % 26])\n return \"\".join(output)",
"def decrypt(self, text):\n\n decrypted_word = []\n for letter in text:\n try:\n index = self.alpha.index(letter)\n except ValueError:\n decrypted_word.append(letter)\n else:\n # Uses Affine decryption function to decrypt the word\n new_index = ((21*(index-self.b)) % self.m)\n decrypted_word.append(self.alpha[new_index])\n return \"\".join(decrypted_word)",
"def decrypt(self, sentence, pad):\r\n sentence = list(sentence)\r\n decrypted_sentence = []\r\n x = list(\"abcdefghijklmnopqrstuvwxyz\")\r\n l = len(x)\r\n y = pad \r\n z = list(zip(x, y))\r\n z.append((\" \", \" \"))\r\n for i in sentence:\r\n if i.lower() not in x:\r\n decrypted_sentence.append(i)\r\n continue \r\n for a in z:\r\n if i.lower() == a[1]:\r\n if i.isupper():\r\n decrypted_sentence.append(a[0].upper())\r\n else:\r\n decrypted_sentence.append(a[0])\r\n decrypted_sentence = \"\".join(decrypted_sentence)\r\n return decrypted_sentence",
"def decryptionSelfMadeFunction(text,index):\n s = text\n transformedChar = \"\"\n\n transformedChar = s[:index] + s[-1] + s[index:len(s)-1]\n\n print(\"Decrypted Transformed text : \" )\n return transformedChar",
"def weaksauce_decrypt(text, password):\n offset = sum([ord(x) for x in password])\n decoded = ''.join(\n chr(max(ord(x) - offset, 0))\n for x in text\n )\n return decoded",
"def decrypt(text, shift):\n decrypted_text = list(range(len(text)))\n alphabet = string.ascii_lowercase\n first_half = alphabet[:shift]\n second_half = alphabet[shift:]\n shifted_alphabet = second_half + first_half\n\n for i, letter in enumerate(text.lower()):\n\n if letter in alphabet:\n index = shifted_alphabet.index(letter)\n original_letter = alphabet[index]\n decrypted_text[i] = original_letter\n else:\n decrypted_text[i] = letter\n\n return \"\".join(decrypted_text)",
"def decipher(self,string): \n string = self.remove_punctuation(string)\n ret = ''\n for (i,c) in enumerate(string):\n if i<len(self.key): offset = self.a2i(self.key[i])\n else: offset = self.a2i(ret[i-len(self.key)]) \n ret += self.i2a(self.a2i(c)-offset)\n return ret",
"def decrypt(self, phrase):\n keyword = input(\"What keyword is it encrypted with? \")\n plaintext = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n ciphertext = []\n decrypted = []\n for i in keyword.upper():\n if i not in ciphertext:\n ciphertext.append(i)\n for i in plaintext:\n if i not in ciphertext:\n ciphertext.append(i)\n\n key_dict = dict(zip(plaintext, ciphertext))\n\n for i in phrase.upper():\n if i == \" \":\n decrypted.append(\" \")\n else:\n for key, value in key_dict.items():\n if i == value:\n decrypted.append(key)\n\n return \"\".join(decrypted)",
"def decrypt(self, text):\n\n output = []\n text = text.upper()\n for char in text:\n try:\n text_index = self.combined.index(char)\n output.append(self.alphabet[text_index])\n except ValueError:\n output.append(char)\n\n return ''.join(output)",
"def decryptionShift(text, index):\n s = text;\n transformedChar = \"\"\n transformedChar = ord(s[index]) - 1\n\n if (s[index] == 'A'):\n transformedChar = chr(ord(s[index]) - 1 + 26)\n else:\n transformedChar = chr(ord(s[index]) - 1)\n\n print(\"Single Shift Decrypted text: \" )\n return s[:index] + transformedChar + s[index+1:]",
"def decipher(ciphered_text: str, key: int, charset: str = DEFAULT_CHARSET) -> str:\n deciphered_text = _offset_text(ciphered_text, key, False, Ciphers.CAESAR, charset)\n return deciphered_text",
"def __decrypt(string: str) -> str:\n key = 171\n result = \"\"\n i: int\n for i in string:\n a = key ^ i\n key = i\n result += chr(a)\n return result",
"def decrypt_vigenere(ciphertext, keyword):\n list = []\n index = 0\n for char in ciphertext:\n new_char_val = ord(char) - (ord(keyword[index]) - ord('A'))\n if new_char_val < ord('A'):\n new_char_val += 26\n list.append(chr(new_char_val))\n index += 1\n index %= len(keyword)\n return ''.join(list)",
"def de_cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz !,.\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>*'^;)\"\r\n# Fetching from written in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# Changing out the letters/numbers/etc\r\n crypted = (a.translate({ord(x): y for (y, x) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the Decrypted text\r\n self.textbox.setPlainText(crypted)",
"def decrypt_caesar(ciphertext: str) -> str:\n plaintext = ''\n for ab in ciphertext:\n if ('a' <= ab <= 'z') or ('A' <= ab <= 'Z'):\n ans = ord(ab) - 3\n if (ans < ord('a')) and (ans > ord('Z')) or (ans < ord('A')):\n ans += 26\n plaintext += chr(ans)\n else:\n plaintext += ab\n return plaintext",
"def decrypt_cesar(new_word, number): #3\n decrypted_num = []\n for char in new_word:\n char = ord(char)\n decrypted_num.append(char)\n\n decrypted_word = []\n for num in decrypted_num:\n num = num - number\n decrypted_word.append(chr(num))\n\n return \"\".join(decrypted_word)",
"def decryptionMultipleShift(text, index, power):\n s = text\n transformedChar = \"\"\n\n transformedChar = ord(s[index])\n if (power > 26):\n power = power % 26\n transformedChar = chr((transformedChar - power))\n\n else:\n transformedChar = chr((transformedChar) - power)\n\n print(\"Multiple Shift Decrypted text : \" )\n return s[:index] + transformedChar + s[(index + 1):]",
"def decryptionMultipleRotate(text, power):\n s = text;\n transformedChar = \"\"\n transformedChar = s[power:] + s[0:power]\n\n print(\"Multiple Rotation Decrypted text : \" )\n return transformedChar",
"def decipher(self):\n plaintext = \"\"\n for ct, key_char in zip(self.text, self.key):\n char_index = self.char_block.rows[key_char].index(ct)\n plaintext += self.char_block.alphabet[char_index]\n print(plaintext)",
"def decrypt_letter(letter: str, keystream: int) -> str:\n ord_diff = ord(letter) - ord('A')\n\n new_char_ord = (ord_diff - keystream) % 26\n\n return chr(new_char_ord + ord('A'))",
"def decrypt(n, ciphtext):\r\n res = ''\r\n\r\n for l in ciphtext:\r\n try:\r\n i = (key.index(l) + n) % len(key)\r\n res += key[i]\r\n except ValueError:\r\n res += 1\r\n return res",
"def encrypt(phrase, offset):\n encrypted_phrase = \"\" #Empty string that will be populated with encrypted characters\n for character in phrase:\n unicode_value = ord(character) #Gets the unicode value for the character\n if unicode_value >= 97 and unicode_value <= 122: #Unicode values for lowercase a - z\n unicode_value += offset\n if unicode_value > 122: #If it now falls outside the range of unicode values for a - z\n unicode_value -= 26 #loop back to the start of the alphbet\n elif unicode_value >= 65 and unicode_value <= 90: #Unicode values for A - Z\n unicode_value += offset\n if unicode_value > 90: \n unicode_value -= 26 #Loops back if unicode_value exceeds that of Z \n encrypted_phrase += chr(unicode_value) #Adds encrypted letter to the encrypted phrase\n return encrypted_phrase",
"def encrypt(text, offset):\n encrypted_text = \"\"\n for char in text:\n if ord(char) <= 64:\n encrypted_character = chr(ord(char))\n elif ord(char) < 90:\n encrypted_character = ord(char) + offset\n if encrypted_character > 90:\n encrypted_character -= 26\n encrypted_character = chr(encrypted_character)\n else:\n encrypted_character = ord(char) + offset\n if encrypted_character > 122:\n encrypted_character -= 26\n encrypted_character = chr(encrypted_character)\n encrypted_text += encrypted_character\n\n return encrypted_text",
"def decrypt_caesar(ciphertext: str, shift: int = 3) -> str:\n plaintext = \"\"\n low_first = ord(\"a\")\n low_last = ord(\"z\")\n high_first = ord(\"A\")\n high_last = ord(\"Z\")\n eng_alp = 26\n for i in ciphertext:\n if i.isalpha():\n if low_first <= ord(i) <= low_last:\n a = chr((((ord(i) - low_first) - shift) % eng_alp) + low_first)\n plaintext += a\n elif high_first <= ord(i) <= high_last:\n a = chr((((ord(i) - high_first) - shift) % eng_alp) + high_first)\n plaintext += a\n else:\n plaintext += i\n return plaintext",
"def decrypt_vigenere(ciphertext: str, keyword: str) -> str:\n plaintext = \"\"\n # PUT YOUR CODE HERE\n key_lenght = len(keyword)\n text_lenght = len(ciphertext)\n\n while key_lenght != text_lenght:\n keyword += keyword\n key_lenght = len(keyword)\n if key_lenght > text_lenght:\n keyword = keyword[:text_lenght]\n key_lenght = len(keyword)\n code_key = []\n ord_a = ord('a')\n ord_A = ord('A')\n\n if ciphertext.islower():\n for i in range(key_lenght):\n if ciphertext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_a)\n code_text = []\n for n in range(text_lenght):\n if ciphertext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(ciphertext[n]) - ord_a)\n for u in range(text_lenght):\n if ciphertext[u] == \" \":\n value = ord(\" \")\n else:\n\n value = ((code_text[u] - code_key[u] + 26) % 26) + ord_a\n plaintext += chr(value)\n else:\n for i in range(key_lenght):\n if ciphertext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_A)\n code_text = []\n for n in range(text_lenght):\n if ciphertext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(ciphertext[n]) - ord_A)\n for u in range(text_lenght):\n if ciphertext[u] == \" \":\n value = ord(\" \")\n else:\n value = ((code_text[u] - code_key[u] + 26) % 26) + ord_A\n plaintext += chr(value)\n\n return plaintext",
"def decrypt_vigenere(ciphertext, keyword):\n plaintext = ''\n keyword = length_equal(ciphertext, keyword)\n for i in range(len(ciphertext)):\n if (ord(ciphertext[i].upper()) - (ord(keyword[i].upper()) - 65)) >= 65:\n plaintext += chr(ord(ciphertext[i].upper()) - (ord(keyword[i].upper()) - 65))\n else:\n plaintext += chr(ord(ciphertext[i].upper()) - ord(keyword[i].upper()) + 91)\n return plaintext",
"def decrypt_vigenere(plaintext, keyword):\n ciphertext = \"\"\n for i in range(len(plaintext)):\n if 64 < ord(plaintext[i]) < 65 + 26:\n shift = (ord(keyword[i % len(keyword)]) - 65)\n if ord(plaintext[i]) < 64 + shift:\n ciphertext += chr(ord(plaintext[i]) + 26 - shift)\n else:\n ciphertext += chr(ord(plaintext[i]) - shift)\n elif 96 < ord(plaintext[i]) < 97 + 26:\n shift = (ord(keyword[i % len(keyword)]) - 97)\n if ord(plaintext[i]) < 96 + shift:\n ciphertext += chr(ord(plaintext[i]) + shift - 26)\n else:\n ciphertext += chr(ord(plaintext[i]) + shift)\n else:\n ciphertext += i\n return ciphertext",
"def decrypt_vigenere(cipehrtext: str, keyword: str) -> str:\n plaintext = \"\"\n if len(keyword) < len(cipehrtext):\n for j in range(len(cipehrtext) - len(keyword)):\n keyword += keyword[j]\n for i in range(len(cipehrtext)):\n n = ord(cipehrtext[i])\n m = ord(keyword[i])\n if (n >= ord('A')) and (n <= ord('Z')):\n if n >= m:\n plaintext += chr(n - m + ord('A'))\n else:\n plaintext += chr(ord('Z') + 1 - (m - n))\n else:\n if n >= m:\n plaintext += chr(n - m + ord('a'))\n else:\n plaintext += chr(ord('Z') + 1 - (m - n))\n return plaintext"
]
| [
"0.8053914",
"0.80221343",
"0.7361305",
"0.732292",
"0.72779155",
"0.7213974",
"0.7143914",
"0.7135272",
"0.70978755",
"0.70571774",
"0.699645",
"0.6964193",
"0.69637746",
"0.69365424",
"0.69018847",
"0.6900279",
"0.68594867",
"0.68542224",
"0.6836575",
"0.68228406",
"0.6814882",
"0.68049896",
"0.6801385",
"0.679793",
"0.6792684",
"0.6758114",
"0.67547995",
"0.6749103",
"0.67432857",
"0.6724589"
]
| 0.87086564 | 0 |
(tuple) Finds the decryption offsets that will result in an English phrase, then prints them | def find_encryption_offsets(phrase):
possible_offsets = {} #Empty dictionary to be populated by valid offsets as the keys, and decrypted phrases as the values
for i in range(1, 26):
english_phrase = True #Initialised as True, then if it is still True after checking it is a valid phrase
decrypted_phrase = decrypt(phrase, i)
words = re.split("-|\s", decrypted_phrase) #Splits string by whitespace and hyphens
for word in words:
while len(word) != 0 and word[0].isalpha() == False: #If the first character exists and is nonalphbetical
word = word[1:len(word)] #cuts off punctuation at the start of word for checking with word list
while len(word) != 0 and word[len(word) - 1].isalpha() == False:
word = word[0:len(word) - 1] #cuts off punctuation at the end of the word
contraction = False #Initialised as False
for char in word:
if char == "'":
contraction = True #If there is an apostrophe, ignore the word
break
if is_word_english(word.lower()) == False and contraction == False and word != "": #If the word isn't on the list, isn't a contraction and isn't blank
english_phrase = False
break
if english_phrase:
possible_offsets[i] = decrypted_phrase #Add valid phrases to the dectionary
if len(possible_offsets) == 0: #If no decryptions were found
print("No valid encryption offset")
else:
if len(possible_offsets) == 1:
print("One encryption offset:")
else:
print("Multiple encryption offsets:")
for key in possible_offsets:
print(str(key) + ": " + possible_offsets[key]) #Print all valid decryptions with their offsets
output_tuple = tuple(possible_offsets.keys()) #Converts keys to tuple for output
return output_tuple | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_encrypt_or_decrypt(option, offset, text):\n if option == \"e\":\n encrypt_or_decrypt = \"encrypted \"\n function = encrypt\n else:\n encrypt_or_decrypt = \"decrypted \"\n function = decrypt\n \n if offset == 0:\n print(\"The \" + encrypt_or_decrypt + \"text is:\")\n for offset in range(1, 26):\n print(\" \" + format(offset, '02') + \": \" + function(text, offset))\n else:\n print(\"The \" + encrypt_or_decrypt + \"text is: \" + function(text, offset))",
"def find_encryption_offsets(encrypted_text):\n encrypted_text = encrypted_text.lower()\n possible_offset = ()\n has_apostrophe = bool\n \n for offset in range(1, 26):\n decrypted_text = decrypt(encrypted_text, offset)\n for char in decrypted_text:\n if ord(char) < 64 and char not in (\"-\", \" \", range(48,58)):\n decrypted_text = decrypted_text.replace(char, \"\")\n if char == \"-\":\n decrypted_text = decrypted_text.replace(char, \" \")\n if char == \"'\":\n has_apostrophe = True\n decrypted_words = decrypted_text.split(\" \")\n for i in range(0, len(decrypted_words)):\n if is_word_english(decrypted_words[i]) == True:\n possible_offset += (offset, )\n\n\n if len(possible_offset) > 0 and len(decrypted_words) > 1:\n actual_offset = max(possible_offset, key = possible_offset.count)\n if has_apostrophe == True:\n possible_offset = (actual_offset, )\n elif len(decrypted_words) > 1:\n if possible_offset.count(actual_offset) == len(decrypted_words):\n possible_offset = (actual_offset, )\n else:\n possible_offset = ()\n\n return possible_offset",
"def decrypt(text, offset):\r\n return format_text(text, -offset)",
"def decrypt(phrase, offset):\n return encrypt(phrase, 26 - offset) #Encrypting then decrypting by the same number will in effect encrypt by 26, looping back to the starting letters",
"def decipher(self):\n plaintext = \"\"\n for ct, key_char in zip(self.text, self.key):\n char_index = self.char_block.rows[key_char].index(ct)\n plaintext += self.char_block.alphabet[char_index]\n print(plaintext)",
"def print_possible_offset(possible_offset):\n offset_string = ''\n\n for i in range(0, len(possible_offset)):\n offset_string += str(possible_offset[i])\n if possible_offset[i] != possible_offset[-1]:\n offset_string += ', '\n \n if len(possible_offset) == 0:\n print(\"No valid encryption offset\")\n elif len(possible_offset) > 1:\n print(\"Multiple encryption offsets:\", offset_string)\n else:\n print(\"Encryption offset:\", offset_string)\n decrypt_message = True\n return decrypt_message",
"def msg(text):\n for line in text.splitlines():\n if JS.alignment == \"left\":\n print(demarkup(line))\n elif JS.alignment == \"center\":\n print(demarkup(line).center(get_terminal_size()[0] - 1))\n else:\n print(demarkup(line).rjust(get_terminal_size()[0] - 1))",
"def print_chars(self):\n for v in voc.split('\\n'):\n pair = v.split(',')\n print(pair[0], pair[1], '\\t', self.epi.xsampa_list(pair[0]))",
"def print_report(self):\n\n if not self._translation:\n print('Failed to translate ciphertext.')\n return\n\n plaintext = self.ciphertext.translate(\n SubSolver._make_trans_from_dict(self._translation))\n print('Ciphertext:')\n print(self.ciphertext, '\\n')\n print('Plaintext:')\n print(plaintext, '\\n')\n\n print('Substitutions:')\n items = [key + ' -> ' + word for key, word\n in self._translation.items()]\n items.sort()\n i = 0\n for item in items:\n print(item + ' ', end='')\n if i % 5 == 4:\n print('')\n i += 1",
"def decryptStory():\n \n r=loadWords()\n\n m1=getStoryString()\n \n p=findBestShift(r, m1)\n \n strans=applyShift(m1,p)\n return strans",
"def __gettextinfo(edudict, eduspan):\n # text = lnode.text + \" \" + rnode.text\n text = []\n for idx in range(eduspan[0], eduspan[1]+1, 1):\n text += edudict[idx]\n # Return: A list of token indices\n return text",
"def brute_force_decrypt(text):\n for n in range(26):\n print(f\"Using a shift value of {n}\")\n print(decrypt(text, n))\n print(\"\\n***\\n\")",
"def decrypt(text, offset):\n decrypted_text = \"\"\n for char in text:\n if ord(char) <= 64:\n decrypted_character = chr(ord(char))\n elif ord(char) <= 90:\n decrypted_character = ord(char) - offset\n if decrypted_character < 65:\n decrypted_character += 26\n decrypted_character = chr(decrypted_character)\n else:\n decrypted_character = ord(char) - offset\n if decrypted_character < 97:\n decrypted_character += 26\n decrypted_character = chr(decrypted_character)\n decrypted_text += decrypted_character\n\n return decrypted_text",
"def decryptionMultipleRotate(text, power):\n s = text;\n transformedChar = \"\"\n transformedChar = s[power:] + s[0:power]\n\n print(\"Multiple Rotation Decrypted text : \" )\n return transformedChar",
"def decryptStory():\n wordList = loadWords()\n text= getStoryString()\n \n shift = findBestShift(wordList, text)\n return applyShift(text, shift)",
"def decryptStory():\n wordList = loadWords()\n text = getStoryString() \n k = findBestShift(wordList, text)\n \n return applyShift(text, k)",
"def translate_leet(phrase):",
"def decrypt_vigenere(ciphertext: str, keyword: str) -> str:\n plaintext = \"\"\n # PUT YOUR CODE HERE\n key_lenght = len(keyword)\n text_lenght = len(ciphertext)\n\n while key_lenght != text_lenght:\n keyword += keyword\n key_lenght = len(keyword)\n if key_lenght > text_lenght:\n keyword = keyword[:text_lenght]\n key_lenght = len(keyword)\n code_key = []\n ord_a = ord('a')\n ord_A = ord('A')\n\n if ciphertext.islower():\n for i in range(key_lenght):\n if ciphertext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_a)\n code_text = []\n for n in range(text_lenght):\n if ciphertext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(ciphertext[n]) - ord_a)\n for u in range(text_lenght):\n if ciphertext[u] == \" \":\n value = ord(\" \")\n else:\n\n value = ((code_text[u] - code_key[u] + 26) % 26) + ord_a\n plaintext += chr(value)\n else:\n for i in range(key_lenght):\n if ciphertext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_A)\n code_text = []\n for n in range(text_lenght):\n if ciphertext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(ciphertext[n]) - ord_A)\n for u in range(text_lenght):\n if ciphertext[u] == \" \":\n value = ord(\" \")\n else:\n value = ((code_text[u] - code_key[u] + 26) % 26) + ord_A\n plaintext += chr(value)\n\n return plaintext",
"def decryptionMultipleShift(text, index, power):\n s = text\n transformedChar = \"\"\n\n transformedChar = ord(s[index])\n if (power > 26):\n power = power % 26\n transformedChar = chr((transformedChar - power))\n\n else:\n transformedChar = chr((transformedChar) - power)\n\n print(\"Multiple Shift Decrypted text : \" )\n return s[:index] + transformedChar + s[(index + 1):]",
"def encipher(self):\n ciphertext = \"\"\n for pt, key_char in zip(self.text, self.key):\n char_index = self.char_block.alphabet.index(pt)\n ciphertext += self.char_block.rows[key_char][char_index]\n print(ciphertext)",
"def de_cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz !,.\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>*'^;)\"\r\n# Fetching from written in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# Changing out the letters/numbers/etc\r\n crypted = (a.translate({ord(x): y for (y, x) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the Decrypted text\r\n self.textbox.setPlainText(crypted)",
"def test_encryption(e, c):\n message = input(\"Enter word to encrypt: \")\n ciphered = ''\n\n for i in range(0, len(message)):\n ciphered = f'{ciphered}{chr(endecrypt(ord(message[i]), e, c))}'\n\n print(ciphered + ' is the ciphered text')\n d = key_cracker(e, c)\n print(\"Plain text is:\")\n for i in range(0, len(ciphered)):\n print(chr(endecrypt(ord(ciphered[i]), d, c)), end='')",
"def decryptMessage():\n exponents = [2, 1, 0]\n encryptedMessage = input(\"Please enter the RSA encrypted message: \\n\")\n messageSplit = encryptedMessage.split(\" \")\n print(\"\")\n for c in messageSplit:\n d = modInverse(PUBLIC_KEY[\"e\"], phi(PUBLIC_KEY[\"n\"]))\n p = (int(c) ** d) % PUBLIC_KEY[\"n\"]\n for e in exponents:\n letter = math.trunc((p/pow(26, e)) % 26)\n print(ALPHABET[letter], end=\"\")\n print(\" \", end=\"\")\n print(\"\")",
"def decryptionSwap(text, index1, index2):\n s = text\n transformedChar = \"\"\n\n swapIndex1 = s[index1]\n swapIndex2 = s[index2]\n\n prevText = s[:index1]\n midText = s[(index1 + 1):index2]\n endText = s[(index2 + 1):]\n\n transformedChar = prevText + swapIndex2 + midText + swapIndex1 + endText\n\n print(\"Swapped Decrypted text : \" )\n return transformedChar",
"def decrypt(word_d):\r\n\r\n translated_word = \"\"\r\n\r\n # For every 2 characters in word_d, translate and add to translated_word\r\n for char_index in range(0, len(word_d), 2):\r\n translated_word += keys[values.index(word_d[char_index] +\r\n word_d[char_index + 1])]\r\n # Returns output\r\n return translated_word",
"def encrypt(text, offset):\r\n\r\n return format_text(text, offset)",
"def decryptionSelfMadeFunction(text,index):\n s = text\n transformedChar = \"\"\n\n transformedChar = s[:index] + s[-1] + s[index:len(s)-1]\n\n print(\"Decrypted Transformed text : \" )\n return transformedChar",
"def decrypt(self, phrase):\n keyword = input(\"What keyword is it encrypted with? \")\n plaintext = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n ciphertext = []\n decrypted = []\n for i in keyword.upper():\n if i not in ciphertext:\n ciphertext.append(i)\n for i in plaintext:\n if i not in ciphertext:\n ciphertext.append(i)\n\n key_dict = dict(zip(plaintext, ciphertext))\n\n for i in phrase.upper():\n if i == \" \":\n decrypted.append(\" \")\n else:\n for key, value in key_dict.items():\n if i == value:\n decrypted.append(key)\n\n return \"\".join(decrypted)",
"def decrypt(self, sentence, pad):\r\n sentence = list(sentence)\r\n decrypted_sentence = []\r\n x = list(\"abcdefghijklmnopqrstuvwxyz\")\r\n l = len(x)\r\n y = pad \r\n z = list(zip(x, y))\r\n z.append((\" \", \" \"))\r\n for i in sentence:\r\n if i.lower() not in x:\r\n decrypted_sentence.append(i)\r\n continue \r\n for a in z:\r\n if i.lower() == a[1]:\r\n if i.isupper():\r\n decrypted_sentence.append(a[0].upper())\r\n else:\r\n decrypted_sentence.append(a[0])\r\n decrypted_sentence = \"\".join(decrypted_sentence)\r\n return decrypted_sentence",
"def decryptStory():\n return applyShift(getStoryString(), findBestShift(loadWords(), getStoryString()))"
]
| [
"0.7052739",
"0.660296",
"0.6579963",
"0.64074355",
"0.63853496",
"0.6375597",
"0.6309714",
"0.62654096",
"0.6165342",
"0.6105011",
"0.6055026",
"0.6052835",
"0.5956748",
"0.5912489",
"0.5912052",
"0.5774893",
"0.5742476",
"0.5712354",
"0.57076085",
"0.56913203",
"0.5623541",
"0.55976546",
"0.5581645",
"0.55683434",
"0.551724",
"0.5512246",
"0.5506674",
"0.5505868",
"0.5501198",
"0.5498768"
]
| 0.7051418 | 1 |
Blame something. This can be anything. | async def blame(self, ctx, *, the_blame: str):
if len(the_blame) > 1100:
return await ctx.send("Max blame length is 1100. Sorry.")
async with ctx.typing():
image = await self.request(endpoint="generators/blame", params=f"?name={the_blame}")
await ctx.send(f"**{ctx.author.name}** just blamed something.", file=discord.File(image, filename="file.png")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DoMaliciousThings():\r\n\tprint(\"You are infected\")",
"def think(self):\n pass",
"def hire(name):\r\n print(\"A CEO cannot be hired outright\")",
"def silence(self):\n return 'Fine. Be that way!'",
"async def _bailout_heist(self, ctx, user: discord.Member=None):\r\n author = ctx.message.author\r\n theme = await self.thief.get_guild_theme(ctx.guild)\r\n\r\n t_bail = theme[\"Bail\"]\r\n t_sentence = theme[\"Sentence\"]\r\n\r\n if user is None:\r\n player = author\r\n else:\r\n player = user\r\n\r\n if await self.thief.get_member_status(player) != \"Apprehended\":\r\n return await ctx.send(\"{} is not in jail.\".format(player.display_name))\r\n\r\n cost = await self.thief.get_member_bailcost(player)\r\n if not await bank.get_balance(player) >= cost:\r\n await ctx.send(\"You do not have enough to afford the {} amount.\".format(t_bail))\r\n return\r\n\r\n if player.id == author.id:\r\n msg = (\"Do you want to make a {0} amount? It will cost {1} credits. If you are \"\r\n \"caught again, your next {2} and {0} amount will triple. \"\r\n \"Do you still wish to pay the {0} amount?\".format(t_bail, cost, t_sentence))\r\n else:\r\n msg = (\"You are about pay a {2} amount for {0} and it will cost you {1} credits. \"\r\n \"Are you sure you wish to pay {1} for {0}?\".format(player.name, cost, t_bail))\r\n\r\n await ctx.send(msg)\r\n response = await self.bot.wait_for('MESSAGE', timeout=15, check=lambda x: x.author == author)\r\n\r\n if response is None:\r\n await ctx.send(\"You took too long. canceling transaction.\")\r\n return\r\n\r\n if \"yes\" in response.content.lower():\r\n msg = (\"Congratulations {}, you are free! Enjoy your freedom while it \"\r\n \"lasts...\".format(player.display_name))\r\n await bank.withdraw_credits(author, cost)\r\n await self.thief.set_member_free(author)\r\n await self.thief.set_member_oob(author, False)\r\n elif \"no\" in response.content.lower():\r\n msg = \"Canceling transaction.\"\r\n else:\r\n msg = \"Incorrect response, canceling transaction.\"\r\n\r\n await ctx.send(msg)",
"def hey(self, msg):\n if issilence(msg):\n return \"Fine. Be that way.\"\n elif isshouting(msg):\n return \"Woah, chill out!\"\n elif isquestion(msg):\n return \"Sure.\"\n else:\n return \"Whatever.\"",
"def FlashBang(self):\t\t\n\t\tprint(self.name.Title() + \"FlashBang!\")",
"async def say(self, ctx, *args):\n if not args:\n await ctx.send('did you want me to say something?')\n return\n message = ' '.join(args)\n message = profanity_filter(message)\n await ctx.send(message)",
"def absorb(user):",
"async def ball(self, ctx, question):\r\n if ctx.message.author == self.bot.user:\r\n return\r\n answers = ['It is certain.', 'It is decidedly so.', 'Without a doubt.', 'Yes, definitely.', 'As I see it, yes.', 'Most likely.', 'Outlook good.', 'Yes.', 'Signs point to yes.',\r\n 'Reply hazy, try again.', 'Ask again later.', 'Better not tell you know.', 'Cannot predict now.', 'Concentrate and try again.',\r\n 'Don\\'t count on it.', 'My reply is no.', 'My sources say no.', 'Outlook not so good.', 'Very doubtful.', 'The chances are the same as you buying every pack, so not likely.']\r\n await self.bot.say('{}, {}'.format(ctx.message.author.mention, random.choice(answers).lower()))",
"def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()",
"def fire(name):\r\n print('A CEO cannot be fired')",
"def brain_status(self):\r\n return 'thinking...'",
"def say_meow(self):\n\n pylog.info('My master calls me {} and meow!'.format(self.name))",
"def notice(self):\n if(self.data[1][0:18:] == \"*** You are banned\"):\n username = SOCKET_TO_USERID[self.target]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(username)\n network = self.source[1]\n BANHANDLER.add_ban(10080, user_pseudonym, network, self.data[0], 1)\n self.message = self.message + \"\\r\\n :orcbot!@localhost PRIVMSG \"+SOCKET_TO_USERID[self.source]+\" :You've been banned from this server\"\n\n self.send()",
"def beware_msg(msg):\n print(\"\\n\\n\\n************************************************************\")\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\\n\\n\\n\")\n print(msg)\n print(\"\\n\\n\\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n print(\"************************************************************\\n\\n\\n\")",
"def warn():\n pass",
"async def bait(self, ctx:commands.Context):\r\n\r\n bait = ''\r\n member_bait = await self.config.member(ctx.message.author).bait()\r\n for i in member_bait.keys():\r\n if not member_bait[i] == 0:\r\n bait += f'{i}{\" \" * (25 - len(i))}{member_bait[i]}\\n'\r\n await ctx.send(f'You have:\\n```{bait[:-1]}```')",
"def bark(self):\n print(f\"{self.name} is now barking\")",
"def use(target, name):\n out = target.damage() + \"\\n\"\n return out + \"You swing the \" + name + \" at \" + target.name",
"def yell():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n if ground_description_int != 12:\n printmessage(\"You yell, but nobody hears you.\", 5, CYAN, 1)\n else:\n printmessage(\"You have found the ranger, amd won the game!\", 5, GREEN, 3)\n die(\"ranger\")",
"def bad(self):\n raise NotImplementedError",
"def bad(self):\n raise NotImplementedError",
"def help_me():\n print(\"i'm trapped\")",
"async def treatme(self, ctx):\n await ctx.send(await self.cure_user(ctx, ctx.author))",
"async def is_bear(ctx):\n return ctx.message.author.id == 353730886577160203 or ctx.message.author.id == 715048392408956950",
"def healthcare():",
"def hey(self, string):\n\n if self.nothing(string):\n return \"Fine. Be that way!\"\n\n if self.yelling(string):\n return \"Woah, chill out!\"\n\n if self.question(string):\n return \"Sure.\"\n\n if self.anything(string):\n return \"Whatever.\"",
"def malicious(self):\n return self.probably_malicious",
"def bot_see(self, mess, args):\n try:\n return \"%s is %s\" % (args, bc.__getattribute__(args))\n except AttributeError:\n return \"No such attribute\""
]
| [
"0.6400579",
"0.6223384",
"0.61093575",
"0.61012876",
"0.6063428",
"0.6029938",
"0.60248995",
"0.5914384",
"0.5907781",
"0.58853453",
"0.5845043",
"0.5832998",
"0.5768127",
"0.5746215",
"0.57017595",
"0.5700348",
"0.5680856",
"0.5654781",
"0.56456023",
"0.56436473",
"0.5607548",
"0.56049097",
"0.56049097",
"0.56037605",
"0.5602665",
"0.5591477",
"0.5587699",
"0.5568981",
"0.5560724",
"0.5530255"
]
| 0.67705065 | 0 |
You get a bob ross makeover. | async def bobross(self, ctx, member: discord.Member = None):
member = member if member else ctx.author
async with ctx.typing():
image = await self.request(endpoint="generators/bobross", params=f"?avatar={member.avatar_url_as(format='png')}")
await ctx.send(f"**{member.name}** just got a bob ross makeover.", file=discord.File(image, filename="file.png")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rop():\n return",
"def make_move(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n if game.game_over:\n raise endpoints.ForbiddenException(\n 'Illegal action: Game is already over.')\n\n HANDS = ['gun', 'water', 'snake']\n if request.hand.lower() not in HANDS:\n raise endpoints.BadRequestException(\n 'Select one of Snake Water Gun')\n\n player_hand = request.hand.lower()\n computer_hand = random.choice(HANDS)\n msg = 'Player \"' + player_hand + '\" vs ' + \\\n 'Computer \"' + computer_hand + '\", '\n\n if computer_hand == player_hand:\n result = 'Tie'\n elif computer_hand == 'water':\n if player_hand == 'snake':\n result = 'Player'\n elif player_hand == 'gun':\n result = 'Computer'\n elif computer_hand == 'snake':\n if player_hand == 'water':\n result = 'Computer'\n elif player_hand == 'gun':\n result = 'Player'\n elif computer_hand == 'gun':\n if player_hand == 'water':\n result = 'Player'\n elif player_hand == 'snake':\n result = 'Computer'\n\n if result == 'Player':\n message = msg + 'You win!'\n game.history.append(message)\n game.put()\n game.end_game(game=request.urlsafe_game_key, message=message,\n player_hand=player_hand, computer_hand=computer_hand,\n won=True)\n return game.to_form(message)\n elif result == 'Computer':\n message = msg + 'You lose!'\n game.history.append(message)\n game.put()\n game.end_game(game=request.urlsafe_game_key, message=message,\n player_hand=player_hand, computer_hand=computer_hand,\n won=False)\n return game.to_form(message)\n elif result == 'Tie':\n message = msg + 'Tie! You can\\'t beat me. I am challenging you!'\n game.history.append(message)\n game.put()\n game.end_game(game=request.urlsafe_game_key, message=message,\n player_hand=player_hand, computer_hand=computer_hand,\n won=False)\n return game.to_form(message)",
"def makeMove(self, move, player):",
"def generate_random_toy() -> Toy:\n dimensions = round(uniform(5, 100), 2)\n rooms_number = randint(1, 5)\n return SantaWorkShop(dimensions, rooms_number, 5)",
"def crossover(first_chromosome: str, second_chromosome: str, nurses_number: int = 10) -> (str, str):\n\n # calculate the number of genes\n genes = 21 * nurses_number\n # generated a position to crossover\n position = randrange(0, genes)\n\n # Calculate two new chromosomes\n new_first_chromosome = first_chromosome[:position] + second_chromosome[position:]\n new_second_chromosome = second_chromosome[:position] + first_chromosome[position:]\n\n # calculates the fitness of the new chromosomes generated\n new_first_chromosome_fitness = fitness_function(individual=new_first_chromosome, nurses_number=nurses_number)\n new_second_chromosome_fitness = fitness_function(individual=new_second_chromosome, nurses_number=nurses_number)\n\n # return the best chromosome generated\n if new_first_chromosome_fitness < new_second_chromosome_fitness:\n return new_first_chromosome\n\n return new_second_chromosome",
"def get_red():\n # return name of actor, movement speed\n zombies = ['Zombie-1','Zombie-2','Zombie-3']\n return choice(zombies), randint(1,4)",
"def crossover(p1, p2):\n genotype = []\n \n #Your code here\n \n return {'genotype': genotype, 'fitness': None}",
"def main():\n k = int(input(\"Enter ply (level from 0 to 5): \"))\n px = \"human\"\n po = Player(\"O\", \"LEFT\", k)\n b = Board(7, 6)\n playGame(b, px, po)",
"def Thunder(robot):\n robot_return = Random_Actuation.myopic(\"Thunder\",robot,0.5,20) # with a %20 chance myopic will be triggered\n robot_return = Random_Actuation.hobbler(\"Thunder\",robot,0.5,20) # with a %20 change hobbler will be triggered\n return robot_return",
"def play(self):\n board = Board()\n print(\"Let's play tic-tac-toe against computer!\")\n print(\"Here is your board!\")\n count = 1\n print(board)\n while True:\n board.person_move()\n status = board.get_status()\n if status == 'x' or status == '0':\n return(f\"Winner is {status}\")\n elif status == 'draw':\n return(\"Friendship won!\")\n board.make_computer_move()\n status = board.get_status()\n if status == 'x' or status == '0':\n return(f\"Winner is {status}\")\n elif status == 'draw':\n return(\"Friendship won!\")\n print(f\"Board after {count} action.\")\n count += 1\n print(board)",
"def make_move(self):\n\n # If the agent is starting a game, make an \n # initial move\n if self.get_play_status() == False: \n self.initial_move()\n return\n\n # for speeds sake, allow the reflex agent to respond to manual\n # input. comment out for automatic running.\n x = int(input('hotwire x:'))\n y = int(input('hotwire y:'))\n return self.get_game_space().set_tile(x,y,self.get_affinity())\n\n # Check wheather the the agent side is going to \n # win by making one move, make the move\n # OR\n # Check if the oponent has a compromising move \n best_move = self.victory_check()\n if best_move is None: best_move = self.counter_opponent_win()\n if best_move is None: best_move = self.counter_opponent_adv()\n if best_move is None: best_move = self.best_last_option()\n if best_move != None: \n x = best_move[0]\n y = best_move[1]\n return self.get_game_space().set_tile(x,y,self.get_affinity())",
"async def rock_paper_scissors(self, ctx):\n authorize(ctx, \"mentions\") # check for a mentioned user\n\n p1 = Player.get(ctx.author.id)\n p2 = Player.get(ctx.message.mentions[0].id)\n\n # Ensure player is someone else\n if p1 == p2:\n raise UserInputError(\"You can't play against yourself\")\n\n # Create new game\n embed = discord.Embed(title=\"Rock Paper Scissors\",\n description=f\"{p1.name} **VS** {p2.name}\\n\\nCheck DMs for how to play\")\n await ctx.send(embed=embed)\n game = RPS(ctx.channel, p1, p2)\n await game.send_dms()",
"def game_over(self):\n raise NotImplementedError(\"Abstract method\") # no mercy for stooges",
"def house ():\n\n poly (3,300,\"red\")\n penup()\n setposition(0,-300)\n pendown()\n poly (4,300,\"brown\")\n penup()\n setposition(100,-300)\n pendown()\n poly(4,100,\"green\") \n\n return None",
"def pony(var, wrapper, message):\n\n wrapper.send(messages[\"pony_toss\"].format(wrapper.source))\n # 59/29/7/5 split\n rnd = random.random()\n if rnd < 0.59:\n pony = messages.get(\"pony_land\", 0)\n elif rnd < 0.88:\n pony = messages.get(\"pony_land\", 1)\n elif rnd < 0.95:\n pony = messages.get(\"pony_land\", 2)\n else:\n pony = messages.get(\"pony_land\", 3)\n wrapper.send(pony.format(nick=wrapper.source))",
"def take_turn(self, opponent):\n\n # --------- BEGIN YOUR CODE ----------\n\n # 1.) Guess a random space that has not been guessed (or be more clever!)\n\n # Steps 2-4 are the same as Human.take_turn\n\n # 2.) Call opponent.guess() to check whether the guess is a hit or miss\n\n # 3.) Update my_hits, my_misses, and sunk_ships accordingly\n\n # 4.) If the sunk_ships array has 5 ships in it set self.complete to True\n\n # --------- END YOUR CODE ----------\n\n # enforce a short delay to make the computer appear to \"think\" about its guess\n time.sleep(0.5)",
"def test_get_battle(self):\n battle = self.battle\n\n s1 = battle.create_skirmish(self.alice, 1)\n s2 = battle.create_skirmish(self.bob, 1)\n\n s3 = s2.react(self.alice, 1)\n\n self.assertEqual(battle, s1.get_battle())\n self.assertEqual(battle, s3.get_battle())",
"def won(s,n):",
"def light_rollout(self, game):\n game = game.copy()\n while True:\n legal_player_moves = game.get_legal_moves()\n if not legal_player_moves:\n break\n game.apply_move(random.choice(legal_player_moves))\n return game",
"def hobbler(actuation_name,robot,speed_multiplier,possibility):\n goodluck = random.randint(0,100)\n \n if goodluck < possibility:\n robot.speed = robot.speed * speed_multiplier\n print(\"{0} is affected by {2}. Its speed multiplied by {1}!\".format(robot.name,speed_multiplier,actuation_name))\n return robot\n else:\n print(\"{1} effect is not succesful on {0}!\".format(robot.name,actuation_name))\n return robot",
"def make_move(self, move: Any) -> \"StonehengeState\":\n new_state = StonehengeState(self.p1_turn, self.length,\n self.letters[:], self.claim[:])\n state = new_state\n if new_state.length == 1:\n state = self.move_length_1(move, new_state)\n if new_state.length == 2:\n state = self.move_length_2(move, new_state)\n if new_state.length == 3:\n state = self.move_length_3(move, new_state)\n if new_state.length == 4:\n if move in [\"A\", \"B\", \"J\", \"O\", \"N\", \"R\",\n \"C\", \"F\", \"E\", \"I\", \"P\", \"Q\"]:\n state = self.move_length_4(move, new_state)\n else:\n state = self.move_length_41(move, new_state)\n if new_state.length == 5:\n if move in [\"A\", \"B\", \"U\", \"O\", \"T\", \"Y\",\n \"C\", \"J\", \"E\", \"N\", \"V\", \"X\"]:\n state = self.move_length_5(move, new_state)\n elif move in [\"F\", \"I\", \"W\"]:\n state = self.move_length_51(move, new_state)\n else:\n state = self.move_length_52(move, new_state)\n return state",
"def test_simple():\n game = Game(3, [0, 0], -1, 5, -5, 10, 1, [[0, 1]], [0.0])\n\n print(f\"Check the baby exists\\n{game.baby}\")\n\n print(\"\\nCheck the berry exists\")\n for berry in game.get_berries():\n print(berry)\n\n print(f\"\\nHere is the board\\n{game.get_board()}\")\n\n print(\"First let's perform an illegal move Northwards\")\n board, reward, done = game.step(\"N\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow let's perform a legal move which does NOT eat the berry\")\n board, reward, done = game.step(\"E\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow we will move back to the original place and then eat the berry\")\n board, reward, done = game.step(\"W\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow let's perform a legal move which does NOT eat the berry\")\n board, reward, done = game.step(\"S\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")",
"def getLoseActor(self):\n if (self.loseActor == None):\n if not self.isSkeleton:\n # standard cog\n filePrefix, phase = TutorialModelDict[self.style.body]\n loseModel = \"phase_\" + str(phase) + filePrefix + \"lose-mod\"\n loseAnim = \"phase_\" + str(phase) + filePrefix + \"lose\"\n \n # make the actor\n self.loseActor = Actor.Actor(loseModel, {\"lose\":loseAnim})\n \n # copy the current head to the lose actor\n loseNeck = self.loseActor.find(\"**/joint_head\")\n for part in self.headParts:\n part.instanceTo(loseNeck)\n\n # put the appropriate textures on the suit\n if self.isWaiter:\n self.makeWaiter(self.loseActor)\n else:\n self.setSuitClothes(self.loseActor)\n else:\n # skelecog\n loseModel = \"phase_5/models/char/cog\" + string.upper(self.style.body) + \"_robot-lose-mod\"\n filePrefix, phase = TutorialModelDict[self.style.body]\n loseAnim = \"phase_\" + str(phase) + filePrefix + \"lose\"\n \n # make the actor\n self.loseActor = Actor.Actor(loseModel, {\"lose\":loseAnim})\n\n # set the appropriate tie texture\n self.generateCorporateTie(self.loseActor)\n\n # set the scale on the lose actor\n self.loseActor.setScale(self.scale)\n\n # put lose actor where actor is\n self.loseActor.setPos(self.getPos())\n self.loseActor.setHpr(self.getHpr())\n\n # put a shadow under the lose actor\n shadowJoint = self.loseActor.find(\"**/joint_shadow\")\n dropShadow = loader.loadModel(\"phase_3/models/props/drop_shadow\")\n dropShadow.setScale(0.45)\n dropShadow.setColor(0.0, 0.0, 0.0, 0.5)\n dropShadow.reparentTo(shadowJoint)\n \n return(self.loseActor)",
"def war_instruction(self):\n\n goal = \"Try to win over all of the opponents cards\"\n\n rules = \"The number of cards in each hand as well as the \" \\\n \"cards played are displayed.\\nBoth cards are added to the\" \\\n \" side that has the higher Rank card.\\n\" \\\n \"If both cards have the same rank, 4 additional cards are \" \\\n \"removed from each side. \\nThe last card is used to \" \\\n \"determine the winner and gets all 10 cards.\\n\" \\\n \"If both cards have the same rank, but a side has less \" \\\n \"then 4 cards in their hand they lose\"\n\n instruct = \"Take Guess: Enter the guess sequence\" \\\n \"New Game: Play another round \\n\" \\\n \"Reset: Restart the round \\n\" \\\n \"Clear: Clear all game history \\n\" \\\n \"Back: Goes back to game start menu\"\n\n self.display.display_instruction(goal, rules, instruct)",
"def make_board(self):\n generate = lambda: random.randint(1, 100) in range(1, self.p_pit+1)\n some_number = self.some_number\n agent = Agent(some_number)\n agent.program = Oozeplorer_Percept(agent)\n self.add_agent(agent)\n gold = Gold()\n self.add_thing(gold, None)\n for row in range(1, some_number + 1):\n for col in range(1, some_number + 1):\n valid_spot = (row, col) != gold.location and (row, col) != (1, 1)\n if valid_spot and generate():\n t_pt = Pit()\n t_pt.location = (row, col)\n self.things.append(t_pt)",
"def opp_turn(player, river, round_players):\r\n rank, hand = scan_cards(player, river)\r\n player.best_rank = rank\r\n rand = random.random()\r\n\r\n if player.cash == 0:\r\n player.check()\r\n else:\r\n if rank == 0:\r\n if rand <= .5: player.check()\r\n else: player.fold(round_players)\r\n elif rank == 1:\r\n if rand <= .33: player.check()\r\n elif rand <= .66 and rand > .33: player.opp_raise(rank)\r\n else: player.fold(round_players)\r\n elif rank == 2:\r\n if rand <= .33: player.check()\r\n elif rand <= .66 and rand > .33: player.opp_raise(rank)\r\n else: player.fold(round_players)\r\n elif rank == 3:\r\n if rand <= .5: player.check()\r\n elif rand <= .95 and rand > .5: player.opp_raise(rank)\r\n else: player.fold(round_players)\r\n else:\r\n player.opp_raise(rank)\r\n\r\n print ('')\r\n if player.action == 'raise':\r\n print (player.name, 'chooses to raise $', player.ante)\r\n else: print (player.name, 'chooses to ', player.action)\r\n print ('')",
"def randomly_spawn_mothership(self) -> None:\n return",
"def roof(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None, makeroof=True, makeceiling=True):\r\n global wallnum\r\n\r\n roof = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, y+height+self.ceilingthickness / 2, z), 0)\r\n self.walls.append(roof)\r\n roofmodel = Plane(w=length, h=width, name=name+str(wallnum))\r\n mergeshape.add(roofmodel,x,y+height+self.ceilingthickness,z,rx=90.0,ry=0.0,rz=0.0)\r\n\r\n wallnum += 1",
"def lobby():\n return UncheckedPlace(\"Lobby\")",
"def launchGame(): \n # On rejoint la partie\n game.join()\n\n #On affecte le nom\n game.player.setName(options.name)\n\n #On créer une nouvelle fenetre\n win = createNewWin(curses)\n\n #On creer notre premiere pomme...\n win.addch(game.apple.coordx, game.apple.coordy, 'O', curses.color_pair(3))\n\n #On indique la direction par defaut du serpent, il ira par defaut a droite\n key = curses.KEY_RIGHT\n\n #On effectue une boucle infinie tant que la touche Echap (27) n'est pas\n #pressée.\n while key != 27:\n #On ajoute le score a la ligne 0, colonne 2\n #Le score est calcule en recuperant la longueur du serpent actuel\n #et en retirant 2 (sa valeur initiale)\t\n win.addstr(0,2,' Joueur : %s Score : %s ' %(game.player.name, str(game.player.score)), curses.color_pair(1))\n\n #On calcul un mouvement de ralentissement dependant de la longueur du\n #serpent\n win.timeout(180+ ( (len(game.snake.oSnake)-2) % 10- (len(game.snake.oSnake)-2) ) * 3 )\n\n #On 'hook' les touches\n getkey = win.getch()\n\n #On recupere la valeur de la touche par defaut\n key = key if getkey==-1 else getkey\n\n #Suivant la touche pressée, on modifie les positions de notre serpent\n game.snake.move(key)\n\n #On supprime les derniers elements sur lequel le Snake passe\n win.addch(game.snake.oSnake[len(game.snake.oSnake)-1][1],\n game.snake.oSnake[len(game.snake.oSnake)-1][0],' ')\n\n #On supprime un element du snake pour eviter la collision\n if win.inch(game.snake.oSnake[0][1], game.snake.oSnake[0][0]) & 255 == 32:\n game.snake.oSnake.pop()\n\n #Si on passe sur un element O\t\n elif win.inch(game.snake.oSnake[0][1],game.snake.oSnake[0][0]) & 255 == ord('O'):\n #On ajoute 1 point a notre Joueur\n game.player.addPoint()\n\n #On recalcule des nouvelles coordonnees pour la pomme\n game.apple.newApple()\n #On verifie les nouvelles coordonnees\n while game.apple.checkApple(game.snake.oSnake) != True:\n game.apple.newApple()\n\n #On l'affiche a l'ecran\n win.addch(game.apple.coordx, game.apple.coordy, 'O', curses.color_pair(3))\n\t\t\n else:\n break\n\n #On affiche une partie de notre Snake\n win.addch(game.snake.oSnake[0][1],game.snake.oSnake[0][0],'X', curses.color_pair(2))\n\n\n #Si on sort de la boucle (GameOver), alors on\n #détruit les fenetres\n destroyWin()\n\n #A la fin de la partie (game over), on affiche l'écran \n showGameOver()"
]
| [
"0.58722734",
"0.55447125",
"0.5503272",
"0.5502197",
"0.5462816",
"0.5372951",
"0.53554696",
"0.5328905",
"0.532275",
"0.5309029",
"0.52666605",
"0.52522546",
"0.5248764",
"0.52413",
"0.5210267",
"0.52007526",
"0.518793",
"0.51843506",
"0.51795036",
"0.5164575",
"0.51561105",
"0.51449263",
"0.5140038",
"0.5127919",
"0.5126633",
"0.5113476",
"0.50994134",
"0.5095197",
"0.5087138",
"0.50717396"
]
| 0.5549154 | 1 |
You can make anyone become a challenger. | async def challenger(self, ctx, member: discord.Member = None):
member = member if member else None
async with ctx.typing():
image = await self.request(endpoint="generators/challenger", params=f"?avatar={member.avatar_url_as(format='png')}")
await ctx.send(f"**{member.name}** is now the challenger.", file=discord.File(image, filename="file.png")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def plaguedoctor(self, ctx):\n currency = await bank.get_currency_name(ctx.guild)\n await self.config.user(ctx.author).gameRole.set(\"Doctor\")\n await self.notify_user(ctx=ctx, user=ctx.author, notificationType=\"doctor\")\n await ctx.send(f\"{ctx.author} has spent 10,000 {currency} and become a Doctor.\")",
"async def plaguebearer(self, ctx):\n currency = await bank.get_currency_name(ctx.guild)\n await self.config.user(ctx.author).gameRole.set(\"Plaguebearer\")\n await self.notify_user(ctx=ctx, user=ctx.author, notificationType=\"plaguebearer\")\n await ctx.send(f\"{ctx.author} has spent 10,000 {currency} and become a Plaguebearer.\")",
"async def setcoachchannel(self, ctx, channel: int):\r\n if ctx.guild.id == 445092370006933505:\r\n await self.config.guild(ctx.guild).coachchannel.set(int(channel))\r\n await ctx.send(\"You set {} as the coaching channel\".format(channel))\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")",
"async def new(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n host = ctx.message.author\n if room not in tod_games:\n tod_games[room] = {'host': host.name, 'host_id': host.name, 'participants': {}, 'last': None}\n tod_games[room]['current'] = host.name\n tod_games[room]['last'] = host.name\n tod_games[room]['participants'][host.name.lower()] = {'spins': 0}\n await amor_manager.say(\"New Game of Truth Or Dare started in {}\".format(room))\n else:\n host = tod_games[room]['host']\n await amor_manager.say(\"Truth or Dare already in progress in {}. Game host: {}\".format(room, host))",
"def invite(self):\n pass",
"async def CelebrityMasterChef(self, ctx):\n me = CoachService.discord_user_to_coach(ctx.author)\n data = getattr(special_play, inspect.currentframe().f_code.co_name)(ctx.channel.name, me)\n await self.send_embed(data, ctx)",
"async def set_channel(self, ctx, channel):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n self.stream_channel = channel\n await self.bot.say(\"Channel sucessfully assigned.\")\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")",
"def talk(self):\r\n super().talk()\r\n print(\"meow\")",
"def talk(self):\r\n super().talk()\r\n print(\"Good day, darling\")",
"def whisper(self,name):\n\n self.sendCommand(\"global /join\",name+self.userName+\" private\")\n self.master.after(300,self.sendCommand,name+self.userName+\" /invite\",name)",
"def talk(self):\n print('Meow!')",
"def actor():\n return Actor()",
"def connect(self):\n\n label = self.scope[\"url_route\"][\"kwargs\"][\"label\"]\n self.user = self.scope[\"user\"]\n\n try:\n room = Relationship.objects.get(label=label)\n except Relationship.DoesNotExist:\n log.warning('No relationship have this label=%s', label)\n self.close()\n return\n except Exception as error:\n log.error(\"建立聊天室channel時發生錯誤: %s\" % error)\n self.close()\n return\n\n if not (room.client == self.user or room.performer == self.user):\n log.warning(\n '%s try to connect to the relationship that not belog to him', self.user)\n self.close()\n return\n\n self.scope[\"room\"] = room\n # Accept the incoming connection\n self.accept()\n\n async_to_sync(self.channel_layer.group_add)(\n \"chat\" + str(label), self.channel_name)",
"async def treatme(self, ctx):\n await ctx.send(await self.cure_user(ctx, ctx.author))",
"def talk(self):\r\n super().talk()\r\n print(\"tsssss\")",
"def process(self, car):\n super(NewSupervisorMessage, self).process(car)\n if car.get_name() == self.get_new_supervisor_name():\n car.make_supervisor(self)",
"async def vouch(ctx, *, member_name=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n server = ctx.message.server\n member_roles = ctx.message.author.roles\n member_admin = discord.utils.find(lambda r: r.name.lower() in admin_roles, member_roles)\n if member_admin is not None:\n member = discord.utils.find(lambda c: c.name.lower() == member_name.lower(), server.members)\n roles = member.roles\n new_role = discord.utils.find(lambda r: r.name.lower() == required_role, server.roles)\n roles.append(new_role)\n await amor_manager.replace_roles(member, *roles)\n await amor_manager.say('{0} granted citizenship'.format(member.name))",
"async def setcoachrole(self, ctx, role: discord.Role):\r\n if ctx.guild.id == 445092370006933505:\r\n id = role.id\r\n await self.config.guild(ctx.guild).coachid.set(int(id))\r\n await ctx.send(\"You set {} as the coach role id\".format(role.id))\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")",
"async def auto(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'Still working on integration with the election results. Maybe have a command to link to an elections '\n 'database?')\n else:\n await ctx.message.channel.send('Hey! You do not have permission to do that.')",
"async def games(ctx):\n games_channel = discord.utils.get(ctx.message.author.guild.text_channels, name=CHANNEL_GAMES)\n member = ctx.message.author\n role = discord.utils.get(member.guild.roles, name=ROLE_GAMES)\n if role in member.roles:\n await member.remove_roles(role)\n await ctx.send(\"Removed you from the games club... feel free to come back anytime!\")\n await games_channel.send(f\"{member.mention} left the party.\")\n else:\n await member.add_roles(role)\n await ctx.send(f\"You are now in the channel. Come and have fun in {games_channel.mention}! :tada:\")\n await games_channel.send(f\"Please welcome {member.mention} to the party!!\")",
"async def tod_join(self, ctx, *args):\n if ctx.author not in self.players:\n self.players.append(ctx.author)\n message = f\"{ctx.author.mention} has been added to the game!\"\n await ctx.send(message)\n else:\n message = f\"{ctx.author.mention} has already joined!\"\n await ctx.send(message)\n\n # Updates the role if channel exists\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"truth-or-dare\"):\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)\n return\n\n # Creates the channel if it doesn't exist\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n bots = discord.utils.get(ctx.guild.roles, name=\"Bots\")\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False, send_messages=False),\n bots: discord.PermissionOverwrite(read_messages=True, send_messages=True),\n role: discord.PermissionOverwrite(read_messages=True, send_messages=True, connect=True, speak=True)\n }\n await ctx.guild.create_text_channel('truth-or-dare', overwrites=overwrites)\n await ctx.guild.create_voice_channel('secret-voice', overwrites=overwrites)\n\n # Adds the role\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)",
"def participate(self):\n if self.allow_reco():\n self.config[self.id] = self.chs_config()",
"async def coach(ctx):\n await ctx.send(\"If you would like to apply for the `Coach` role, please fill out the form here: <https://forms.gle/UBKpWgqCr9Hjw9sa6>.\")",
"def start(update, context):\n context.bot.send_message(\n chat_id=update.effective_chat.id, text=\"Hi human\")",
"def talk(self):\r\n super().talk()\r\n print(\"woof woof\")",
"async def cuddle(self, ctx, member: discord.Member=None):\n if member is None:\n await ctx.send('You need to tell who to cuddle with.')\n return\n if member.id == ctx.author.id:\n await ctx.send('How is that even possible?')\n return\n e = discord.Embed(title=\"{} has cuddled with {}.\".format(member.name, ctx.author.name), color=discord.Color.magenta())\n e.set_image(url=nekos.img('cuddle'))\n await ctx.send(embed=e)",
"async def assign_clan(self, ctx, user : discord.Member, *, clanname=\"\"):\r\n nickname = '[{}] {}'.format(clanname.strip(), user.name)\r\n if clanname == \"\":\r\n nickname = None\r\n try:\r\n await self.bot.change_nickname(user, nickname)\r\n await self.bot.say(\"Done.\")\r\n except discord.Forbidden:\r\n await self.bot.say(\"I cannot do that, I lack the \"\r\n \"\\\"Manage Nicknames\\\" permission.\")",
"def send_letter_everyone(d):\n print(\"Letters have been sent to all the donors!!!\")\n d.send_letter_everyone()",
"async def _new_player_greeter(self, connection):\n await asyncio.sleep(1.3)\n send_message(connection,\n \"{}\".format(self.greeting),\n mode=ChatReceiveMode.RADIO_MESSAGE)\n return",
"async def devox(self, ctx):\n member = discord.utils.find(lambda m: m.id == 250865328194715658, ctx.channel.guild.members)\n await ctx.send(\"{} The great man who created this bot some people say he has too much power, but the truth is he doesnt have enough\".format(member.mention))"
]
| [
"0.61620516",
"0.5957869",
"0.5880904",
"0.58242786",
"0.56902057",
"0.56367546",
"0.55845785",
"0.5557291",
"0.5493999",
"0.5475206",
"0.5438497",
"0.54368716",
"0.5435145",
"0.5413625",
"0.540432",
"0.53667754",
"0.5364103",
"0.53528106",
"0.53496534",
"0.5336778",
"0.5326689",
"0.53254724",
"0.5313996",
"0.5313209",
"0.5307746",
"0.53076464",
"0.5307616",
"0.5301814",
"0.52965695",
"0.5279262"
]
| 0.5974989 | 1 |
Test the conversion of a dummy wheel foobar | def test_conversion(tmp_path, wheel_path):
os.chdir(str(tmp_path))
# convert wheel to debian source package
with patch.object(sys, 'argv', ['', '-x', str(wheel_path.parent)]):
with patch.object(wheel2deb.sys, "exit") as mock_exit:
wheel2deb.main()
assert mock_exit.call_args[0][0] == 0
unpack_path = tmp_path / 'output/python3-foobar_0.1.0-1~w2d0_all'
assert unpack_path.exists()
# build source package
with patch.object(sys, 'argv', ['', 'build']):
with patch.object(wheel2deb.sys, "exit") as mock_exit:
wheel2deb.main()
assert mock_exit.call_args[0][0] == 0
# output dir should contain a .deb
package_list = list((tmp_path / 'output').glob('*.deb'))
assert package_list
package_path = package_list[0]
assert package_path.name.startswith('python3-foobar_0.1.0-1')
package_hash = digests(package_list[0])
# check that the entrypoint will be installed in /usr/bin
entrypoint = (unpack_path / 'debian/python3-foobar/usr/bin/entrypoint')
assert entrypoint.exists()
# check shebang
with open(str(entrypoint), 'r') as f:
shebang = f.readline()
assert shebang.startswith('#!/usr/bin')
# idempotence: delete package, rerun build command
# and check that both packages have the same hash
package_list[0].unlink()
with patch.object(sys, 'argv', ['', 'build']):
with patch.object(wheel2deb.sys, "exit") as mock_exit:
wheel2deb.main()
assert mock_exit.call_args[0][0] == 0
assert digests(package_path) == package_hash | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wheel(string = None):\n None",
"def test_three_disemvowel_carson():\n from disemvowel_trolls import disemvowel\n try:\n assert disemvowel(0b1101) == TypeError\n except TypeError:\n print('Cannot take binary numbers!')",
"def test_dummy(self, data):\r\n source, expected = data\r\n result = self.converter.convert(source)\r\n self.assertUnicodeEquals(result, expected)",
"def test_to_celcius():\n\tassert to_celcius(32) == 0\n\tpass",
"def describe_a_library_of_units_converters_that():\n def blows_smoke():\n assert True\n\n def can_convert_psi_to_kpa():\n assert psi2kpa(32) == 220.631712 # 32 PSI == 220.631712 KPa; average car tire pressure\n assert psi2kpa(8.5) == 58.6052985 # 8.5 PSI == 58.6052985 KPa; basketball pressure\n\n # def can_convert_kpa_to_psi():\n # assert kpa2psi(101.325) == 14.695952495133 # KPa => PSI; average air pressure at sea level\n # assert kpa2psi(220.631712) == 31.999932479367043 # KPa => PSI; average car tire pressure\n\n # def can_convert_mpg_to_lp100k():\n # assert mpg2lp100k(40) == 5.8803694563 # miles-per-gallon => liters per 100km\n # assert mpg2lp100k(25) == 9.408591130080001 # miles-per-gallon => liters per 100km\n\n # def can_convert_lp100k_to_mpg():\n # assert lp100k2mpg(9.4) == 25.022895167663442 # liters per 100km => mpg\n # assert lp100k2mpg(5.1) == 46.12063030902673 # liters per 100km => mpg",
"def test_active_inference_SPM_1b(self):",
"async def test_sensor_defaults_binary(hass: HomeAssistant) -> None:\n assert await async_setup_component(\n hass,\n \"sensor\",\n {\n \"sensor\": [\n {\n \"platform\": \"statistics\",\n \"name\": \"test\",\n \"entity_id\": \"binary_sensor.test_monitored\",\n \"state_characteristic\": \"count\",\n \"sampling_size\": 20,\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n for value in VALUES_BINARY:\n hass.states.async_set(\n \"binary_sensor.test_monitored\",\n value,\n {ATTR_UNIT_OF_MEASUREMENT: UnitOfTemperature.CELSIUS},\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n assert state is not None\n assert state.state == str(len(VALUES_BINARY))\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None\n assert state.attributes.get(ATTR_STATE_CLASS) is SensorStateClass.MEASUREMENT\n assert state.attributes.get(\"buffer_usage_ratio\") == round(9 / 20, 2)\n assert state.attributes.get(\"source_value_valid\") is True\n assert \"age_coverage_ratio\" not in state.attributes",
"def test_dummy():",
"def test_output_exists():\n assert song_decoder(\"WUWUBUBWUBUWUB\") is not None",
"def test_convert():",
"def test_convert_logical():",
"def test_convert_invalid_unit():\n with pytest.raises(ValueError):\n pressure_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL)\n\n with pytest.raises(ValueError):\n pressure_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)",
"def test_chao1_bias_corrected(self):\n obs = chao1_bias_corrected(*osd(self.TestData))\n self.assertEqual(obs, 9.75)",
"def test_convert_same_unit():\n assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2\n assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3\n assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4\n assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5",
"def test_firmware_version(self):\n self._verify_firmware_version()",
"def test_init():\n rng = NonRandom()\n seed = 5\n rng.setSeed(seed)\n wheel = Wheel(rng)\n assert len(wheel.bins) == 38\n assert wheel.rng.value == seed\n assert wheel.rng.choice(range(0, 38)) == range(\n 0, 38)[wheel.rng.value] # == seed",
"def test_find_wheel_supported(\n self, data: TestData, monkeypatch: pytest.MonkeyPatch\n ) -> None:\n monkeypatch.setattr(\n pip._internal.utils.compatibility_tags,\n \"get_supported\",\n lambda **kw: [(\"py2\", \"none\", \"any\")],\n )\n\n req = install_req_from_line(\"simple.dist\")\n finder = make_test_finder(find_links=[data.find_links])\n found = finder.find_requirement(req, True)\n assert found is not None\n assert found.link.url.endswith(\"simple.dist-0.1-py2.py3-none-any.whl\"), found",
"async def test_no_binary_sensors(hass, aioclient_mock):\n await setup_deconz_integration(hass, aioclient_mock)\n assert len(hass.states.async_all()) == 0",
"def _dummy(*args, **kwargs):\n pass",
"def test_double_sharp_2():\n assert note_to_frequency(\"Ax4\") == note_to_frequency(\"B4\")",
"def test_find_microbit_unknown_os():\n with mock.patch('os.name', 'foo'):\n with pytest.raises(NotImplementedError) as ex:\n uflash.find_microbit()\n assert ex.value.args[0] == 'OS \"foo\" not supported.'",
"def test_not_find_wheel_not_supported(self, data: TestData) -> None:\n req = install_req_from_line(\"simple.dist\")\n target_python = TargetPython()\n # Make sure no tags will match.\n target_python._valid_tags = []\n finder = make_test_finder(\n find_links=[data.find_links],\n target_python=target_python,\n )\n\n with pytest.raises(DistributionNotFound):\n finder.find_requirement(req, True)",
"def test_measurement(lasco):\n assert lasco.measurement == \"white-light\"",
"def test_f2c():\n assert temperatura.f2c(32) == 0",
"def test_temperature_to_homekit():\n assert temperature_to_homekit(20.46, TEMP_CELSIUS) == 20.5\n assert temperature_to_homekit(92.1, TEMP_FAHRENHEIT) == 33.4",
"def test_chao1_uncorrected(self):\n obs = chao1_uncorrected(*osd(self.TestData))\n self.assertEqual(obs, 10.5)",
"def dummy(self):\n pass",
"def test_defaults(self):\n o = ofed()\n self.assertEqual(str(o),\nr'''# OFED\nRUN apt-get update -y && \\\n apt-get install -y --no-install-recommends \\\n dapl2-utils \\\n ibutils \\\n ibverbs-utils \\\n infiniband-diags \\\n libdapl-dev \\\n libibcm-dev \\\n libibmad5 \\\n libibmad-dev \\\n libibverbs1 \\\n libibverbs-dev \\\n libmlx4-1 \\\n libmlx4-dev \\\n libmlx5-1 \\\n libmlx5-dev \\\n librdmacm1 \\\n librdmacm-dev \\\n opensm \\\n rdmacm-utils && \\\n rm -rf /var/lib/apt/lists/*''')",
"def test_binary_helpers(self, number, expected):\n self.assertEqual(positional.from_binary(expected), number)\n self.assertEqual(positional.to_binary(number), expected)\n self.assertEqual(positional.to_binary(str(number)), expected)",
"def test_download_package__prefers_wheels(bucket_and_keys):\n\n bucket, keys = bucket_and_keys\n\n with mock.patch.object(download, \"write_key\") as patched_write:\n download.download_package(bucket, parse_package(\"package_two==0.0.1\"))\n\n patched_write.assert_called_once_with(keys[6])"
]
| [
"0.61710703",
"0.5704849",
"0.56919855",
"0.56591934",
"0.561975",
"0.5601396",
"0.55967706",
"0.5548598",
"0.5532267",
"0.55302954",
"0.54968655",
"0.54931015",
"0.5457107",
"0.5455384",
"0.54299057",
"0.54211265",
"0.5367547",
"0.5317658",
"0.52506894",
"0.52394545",
"0.52268595",
"0.5202266",
"0.5184322",
"0.51558083",
"0.514856",
"0.51470757",
"0.5116291",
"0.5114063",
"0.5110587",
"0.5108835"
]
| 0.58035195 | 1 |
Test starting aggregation when already running. | def test_start_already_running(self, mock_add_job, mock_get_job):
mock_get_job.return_value = MagicMock()
result = self.aggregator.start(self.node_id)
self.assertFalse(result)
self.assertFalse(mock_add_job.called) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_run_started(self):",
"def pytest_started_handling_group(session, worker):",
"def start_monitoring(self):\n pass",
"def test_001_start(self):\n HEADING()\n self.db.start()\n up = self.db.isup()\n result = up\n assert result",
"def process_test_start(self, config, results, result_id, db):\n pass",
"def start_task():\n get_results_from_message_queue()\n test_all_servers_connection()",
"def startTestRun(self):",
"def test_main(self):\n agg_list = generate_aggregation_list(self.config, self.files)\n evaluate_aggregation_list(self.config, agg_list, self.file)\n\n with nc.Dataset(self.file) as nc_in:\n status = nc_in.variables[\"status\"]\n # there should be no fill values...\n # before ncagg v0.8.5 vlen types like string incorrectly aggregated to all fill values.\n self.assertFalse(any(status[:] == status._FillValue))",
"def test_startService(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n # Schedule the process to start\r\n self.pm.startService()\r\n # advance the reactor to start the process\r\n self.reactor.advance(0)\r\n self.assertTrue(\"foo\" in self.pm.protocols)",
"def test_start_scan(self):\n pass",
"def test_startProcessAlreadyStarted(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIdentical(None, self.pm.startProcess(\"foo\"))",
"def isstarted():",
"def start(self):\n while True:\n LogService.log_info(\"aggregator\", \"Creating statistics\")\n self.create_statistics()\n LogService.log_info(\"aggregator\", \"Cleaning up\")\n self.cleanup_measurements()\n LogService.log_info(\"aggregator\", \"Sleeping for 60 minutes\")\n time.sleep(60*60)",
"def test_start_stop(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.stop()\n self.assertEqual(len(group.consumers), 0)",
"def test_startProcess(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIsInstance(self.pm.protocols[\"foo\"], LoggingProtocol)\r\n self.assertIn(\"foo\", self.pm.timeStarted.keys())",
"def started(self):\n return False",
"def setUp(self):\r\n self.reactor = DummyProcessReactor()\r\n self.pm = ProcessMonitor(reactor=self.reactor)\r\n self.pm.minRestartDelay = 2\r\n self.pm.maxRestartDelay = 10\r\n self.pm.threshold = 10",
"def test_base(self):\n self.render_config_template(\n path=os.path.abspath(self.working_dir) + \"/log/*\"\n )\n\n sqsbeat_proc = self.start_beat()\n self.wait_until(lambda: self.log_contains(\"sqsbeat is running\"))\n exit_code = sqsbeat_proc.kill_and_wait()\n assert exit_code == 0",
"def start_check(self):\n pass",
"def test_spawn(self):\n self.grid.spawn()\n # import sys\n # sys.stderr.write(\"\\nrooms: \" + repr(xyzroom.XYZRoom.objects.all()))\n # sys.stderr.write(\"\\n\\nexits: \" + repr(xyzroom.XYZExit.objects.all()) + \"\\n\")\n\n self.assertEqual(xyzroom.XYZRoom.objects.all().count(), 4)\n self.assertEqual(xyzroom.XYZExit.objects.all().count(), 8)",
"def test_aggregate(self, mock_async):\n from blotter.core.aggregation import AGGREGATION_QUEUE\n from blotter.core.aggregation.trends import aggregate\n from blotter.core.api.controller import aggregate_trends\n\n mock_async.return_value = mock_async\n\n _, status = aggregate_trends()\n\n self.assertEqual(200, status)\n mock_async.assert_called_once_with(target=aggregate,\n queue=AGGREGATION_QUEUE)\n mock_async.start.assert_called_once_with()",
"def test_starting_measure_no_measure_enqueued(processor):\n processor.start_measure(None)\n process_and_join_thread(processor._thread)\n assert not processor.active",
"def IsStarted(self) :\n\t\t...",
"def assert_pipeline_running(self, request):\r\n self.assertTrue(pipeline.running(request))",
"def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)",
"def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)",
"def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)",
"def test_start(set_env, container: Container, docker_client: DockerClient):\n # pylint: disable=unused-argument\n assert container\n container.stop()\n\n from dockerdb.commands.start import start\n\n start()\n\n assert container.status == \"running\"",
"def test_ipam_aggregates_create(self):\n pass",
"async def test_startup_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare schedule startup, and execute\n startup_schedule = StartUpSchedule() # A scheduled process of the _scheduler\n startup_schedule.name = 'startup schedule'\n startup_schedule.process_name = 'sleep30'\n startup_schedule.repeat = datetime.timedelta(seconds=0) # set no repeat to startup\n\n await scheduler.save_schedule(startup_schedule)\n\n await asyncio.sleep(1)\n # Assert no tasks ar running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n await scheduler.get_schedule(startup_schedule.schedule_id) # ID of the schedule startup\n\n await self.stop_scheduler(scheduler)\n\n scheduler = Scheduler()\n await scheduler.start()\n\n await asyncio.sleep(2)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n scheduler.max_running_tasks = 0 # set that no tasks would run\n await scheduler.cancel_task(tasks[0].task_id)\n\n await asyncio.sleep(2)\n\n # Assert no tasks are running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n scheduler.max_running_tasks = 1\n\n await asyncio.sleep(2)\n\n # Assert a single task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)"
]
| [
"0.620224",
"0.6192483",
"0.61083657",
"0.6017111",
"0.59069383",
"0.5893984",
"0.58389163",
"0.5824268",
"0.5810762",
"0.5804959",
"0.5804544",
"0.5776305",
"0.57740265",
"0.576617",
"0.5732756",
"0.5713319",
"0.5702192",
"0.56798035",
"0.56587154",
"0.5643842",
"0.5602294",
"0.5598075",
"0.5597066",
"0.55853355",
"0.55842566",
"0.55842566",
"0.55842566",
"0.55685717",
"0.5568318",
"0.5562045"
]
| 0.6298067 | 0 |
Test stopping aggregation with nothing running. | def test_stop_nothing(self, mock_get_job):
mock_get_job.return_value = None
result = self.aggregator.stop(self.node_id)
self.assertFalse(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stopTestRun(self):",
"def test_terminate_run(self):\n pass",
"def test_999_stop(self):\n HEADING()\n self.db.stop()\n result = True\n assert result",
"def stopTest(self, test):",
"def stopTest(self, test):\n self.complete_output()",
"def test_none_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n self.generic_function(above_objective=0)",
"def stop(self):\n self._should_run = False",
"def test_teardown(self):\n assert self.search_behaviour.teardown() is None\n self.assert_quantity_in_outbox(0)",
"def stopCond(self):\n\t\treturn False",
"def stop_check(self):\n pass",
"def _run(self):\n logging.warning('-> perform EMPTY experiment...')",
"def test_run_ended(self):",
"def stop() -> None:",
"def test_stopped_already_have_result(self):\n registry = ResultRegistry()\n er = EventualResult(succeed(123), None)\n registry.register(er)\n registry.stop()\n self.assertEqual(er.wait(0.1), 123)\n self.assertEqual(er.wait(0.1), 123)\n self.assertEqual(er.wait(0.1), 123)",
"def test_stop_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\"):\n group.on_join_complete({\"topic1\": [1]})\n consumer = group.consumers[\"topic1\"][0]\n consumer.stop.side_effect = KeyError()\n group.stop_consumers()",
"def stop_run(arn=None):\n pass",
"def stop(self):\n\t\tself._run_flag = False\n\t\tself.wait()",
"def stop(self):\n self._run = False",
"def stop(self):\n pub = rospy.Publisher('robot/set_super_stop', Empty, queue_size=10)\n baxter_dataflow.wait_for(\n test=lambda: self._state.stopped == True,\n timeout=3.0,\n timeout_msg=\"Failed to stop the robot\",\n body=pub.publish,\n )",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def process_test_stop(self, config, results, result_id, db):\n pass",
"def test_alerts_when_no_breath(app, events, data):\n time_intervals = 1 / DriverFactory.MOCK_SAMPLE_RATE_HZ\n num_of_samples = int(NO_BREATH_TIME / time_intervals)\n app.run_iterations(num_of_samples)\n assert alerts.AlertCodes.NO_BREATH in events.alerts_queue.active_alerts, \\\n f\"NO_BREATH missing from: {events.alerts_queue.active_alerts}\"",
"def stopProducing(self):\n pass",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()"
]
| [
"0.65075177",
"0.6409028",
"0.63663286",
"0.6074536",
"0.60525125",
"0.6051991",
"0.60442203",
"0.6043363",
"0.6023016",
"0.59951484",
"0.5951202",
"0.594287",
"0.59113306",
"0.58873826",
"0.5834736",
"0.58129764",
"0.5767303",
"0.5767132",
"0.57477456",
"0.57437325",
"0.57437325",
"0.572013",
"0.5685167",
"0.56832296",
"0.56801206",
"0.56801206",
"0.56801206",
"0.56801206",
"0.56801206",
"0.56801206"
]
| 0.700267 | 0 |
Test storing data with an API error. | def test_store_data_api_error(self, mock_contact):
message = b'Something went wrong. Our team has been notified.'
mock_contact.side_effect = ApiException(
HTTPStatus.INTERNAL_SERVER_ERROR,
message
)
self.aggregator._store_data(self.node_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_error_logging(self):\n # Verify nothing in the journal\n assert len(Record.objects.recent('heartbeat')) == 0\n\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': 'foosurvey',\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n 'question_text': 'how was lunch?',\n 'variation_id': '1'\n }\n\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 400\n errors = json.loads(resp.content)['errors']\n assert len(errors) > 0\n\n # Verify there's one entry now.\n assert len(Record.objects.recent('heartbeat')) == 1",
"def test_storing_wrong_data(self):\n data = {\"employer\": \"Trading Ltd\", \"jobTitle\": \"Assistant\", \"jobLocation\": \"5th street\",\n \"fromMonth\": \"something\",\n \"fromYear\": 2007, \"toMonth\": \"another\", \"toYear\": 2010, \"stillWorkHere\": False}\n response = self.client.post(self.url, data=json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())",
"def test_api_key_error(api):\n\twith pytest.raises(top_stories.APIKeyError):\n\t\tmissingAPI = top_stories.TopStoriesAPI()",
"def test_error1(self):\n try:\n api = self.load_api_description('error1.json')\n self.fail('No error thrown for undefined type')\n except APIDescriptionException:\n pass",
"def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)",
"def test_storing_missing_data(self):\n data = {\"employer\": \"Trading Ltd\", \"jobTitle\": \"Assistant\", \"jobLocation\": \"5th street\",\n \"fromMonth\": \"january\", \"fromYear\": 2007}\n response = self.client.post(self.url, data=json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())",
"def test_model_details_failure(self):\n\n # GIVEN invalid model ID\n model_id = 300\n\n # WHEN model details are retrieved\n response = self.api.dataid(self.app_label, self.model_name2, model_id)\n\n # THEN it should fail\n self.assertTrue(response.error)",
"def test_server_error(self):\n self._error_test(fitbit_exceptions.HTTPServerError)",
"async def test_api_state_change_with_bad_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n resp = await mock_api_client.post(\n \"/api/states/test_entity.that_does_not_exist\", json={}\n )\n\n assert resp.status == HTTPStatus.BAD_REQUEST",
"def test_update_unexpected_error(self, data_update, requests_mock, capsys):\n requests_mock.put(data_url, exc=ConnectionError)\n with pytest.raises(ConnectionError):\n r = operations.update(data_url, data=data_update)\n assert 'Unexpected error when connecting to' in capsys.readouterr().out",
"def test_manual_entry_error(self):\r\n self._login_admin()\r\n # no url entered\r\n res = self.app.post(\r\n '/admin/new_error',\r\n params={\r\n 'url': '',\r\n 'description': '',\r\n 'extended': '',\r\n 'tags': ''\r\n })\r\n self.assertIn('not valid', res.body)",
"def test_api_for_invalid_ticker(self):\n ticker = \"xxx\"\n name = \"Julian\"\n data = {'name': name, 'ticker': ticker}\n # pylint: disable=broad-except\n req = self.client.post('/stocks/addstock/', data, follow=True, secure=True)\n # pylint: enable=broad-except\n self.assertEqual(req.status_code, 500)\n data = DailyStockQuote.objects.all()\n self.assertEqual(len(data), 0)",
"def test_error3(self):\n try:\n api = self.load_api_description('error3.json')\n self.fail('No error thrown for undefined input segment')\n except APIDescriptionException:\n pass",
"def test_get_data_fail(self):\n self.assertIsNone(get_data('this_must_fail', 5, 0))",
"def test_get_error_parameters(self):\n with app.app_context():\n data_github = {\n \"version_control\": \"github\",\n \"scm_repo\": \"BB\",\n \"scm_branch\": \"BB\",\n \"scm_commit\": \"BB\",\n \"repo\": \"BB1\",\n \"branch\": \"BB1\",\n \"enabled\": True\n }\n\n data_git = {\n \"version_control\": \"github\",\n \"scm_repo\": \"BB\",\n \"scm_branch\": \"BB\",\n \"scm_commit\": \"BB\",\n \"repo\": \"BB2\",\n \"branch\": \"BB2\",\n \"enabled\": True\n }\n\n for data_insert in [data_github, data_git]:\n create_tracking(data_insert)\n\n resp = self.client.get(\"/tracking?oper=B&chcnsrb=B\")\n\n resp_dict = json.loads(resp.data)\n self.assertIn(\"code\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(\n ResponseCode.INPUT_PARAMETERS_ERROR, resp_dict.get(\"code\"), msg=\"Error in status code return\"\n )\n\n self.assertIn(\"msg\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(\n ResponseCode.CODE_MSG_MAP.get(ResponseCode.INPUT_PARAMETERS_ERROR),\n resp_dict.get(\"msg\"),\n msg=\"Error in status code return\"\n )\n\n self.assertIn(\"data\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(resp_dict.get(\"data\"), None, msg=\"Error in data information return\")",
"def test_trucks_api_error(self):\n resp = self.app.get('/asfdasdf')\n self.assertEqual(resp.status_code, 404)\n assert \"NOT FOUND\" in resp.status",
"def test_put_db_fail(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n with mock.patch('user_profile.models.UserProfile.update') as update:\n update.return_value = False\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)",
"def test_create_with_bad_backend(self):\n # Count the number of records before the save\n post_data = {\n 'source_type': 'test',\n 'source_id': '4bCOAuhvjsxbVBM5MM8oik',\n }\n resp = self.api_client.post('/api/metadata/tracks/', data=post_data)\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'The record could not be found.')",
"def test_unknown_api_key(self, app, data_queues, redis, metricsmock, logs):\n res = self._call(app, api_key=\"abcdefg\", ip=self.test_ip, status=400)\n self.check_response(data_queues, res, \"invalid_key\")\n metricsmock.assert_incr_once(\n self.metric_type + \".request\", tags=[self.metric_path, \"key:invalid\"]\n )\n assert redis.keys(\"apiuser:*\") == []\n assert logs.only_entry[\"api_key\"] == \"invalid\"\n assert logs.only_entry[\"invalid_api_key\"] == \"abcdefg\"",
"def test_ping_get_data_error(self):\n test_sensor_error_data = {\n \"sensorid\": int(self.test_sens_data['sensorid']),\n \"error\": \"Exception\",\n \"code\": 1,\n \"message\": \"Ping failed.\"\n }\n self.test_ping.get_data(self.test_sens_data, self.test_out_queue)\n assert_equal(self.test_out_queue.get(), test_sensor_error_data)",
"def test_update_with_invalid_data(self):\n saved_article = self.create_article()\n url = saved_article[0]\n token = saved_article[2]\n response = self.test_client.put(url, self.article_invalid_data2, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_missing_data(self):\n survey = SurveyFactory.create()\n\n orig_data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': survey.name,\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n 'question_text': 'how was lunch?',\n 'variation_id': '1'\n }\n\n for key in orig_data.keys():\n data = dict(orig_data)\n del data[key]\n\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 400\n resp_data = json.loads(resp.content)\n assert key in resp_data['errors']",
"def test_error(cls, err, data):\n do_error_test(cls, err, data)",
"def test_http_error(self):\n self.assertEqual(-1, self.__uft.failed_tests('raise'))\n self.assertEqual(-1, self.__uft.passed_tests('raise'))\n self.assertEqual(-1, self.__uft.skipped_tests('raise'))",
"async def test_unknown_error(hass: HomeAssistant, api_error: Mock) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}\n )\n assert result.get(\"type\") == data_entry_flow.FlowResultType.FORM\n assert result.get(\"step_id\") == \"user\"\n\n # Test filling in API key\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_USER},\n data={CONF_API_TOKEN: \"psk_123456789\"},\n )\n assert result.get(\"type\") == data_entry_flow.FlowResultType.FORM\n # Goes back to the user step\n assert result.get(\"step_id\") == \"user\"\n assert result.get(\"errors\") == {\"api_token\": \"unknown_error\"}",
"def test_get_json_data_throw_not_found_error(self):\n response = self.app.test_client().get('/test/get_json_data/99999')\n self.assertEqual(response.json['status'], 'failure')\n self.assertEqual(response.json['error'], 'Test not found')",
"def test_fitbit_error(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPServerError)\n self._check_response(response, 106)",
"def test_error_data_order(client):\n data = dict(product_name=\"Latte\")\n response = client.post(\"/api/order\", headers=HEADERS, json=data)\n assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY",
"def testInvalidData(self):\n data = {\n \"title\": 32,\n \"rent\": 700\n }\n\n response = self.client.post(\"/api/posts\",\n data=json.dumps(data),\n content_type=\"application/json\",\n headers=[(\"Accept\", \"application/json\")]\n )\n\n self.assertEqual(response.status_code, 422)\n\n data = json.loads(response.data)\n\n self.assertEqual(data[\"message\"], \"32 is not of type 'string'\")",
"def test_api_response_data(self):"
]
| [
"0.6994985",
"0.6812882",
"0.68098664",
"0.67812014",
"0.6707243",
"0.66378",
"0.6635431",
"0.66223747",
"0.6614883",
"0.66063714",
"0.66034055",
"0.65716434",
"0.6560661",
"0.6547544",
"0.652576",
"0.6511458",
"0.65089375",
"0.6506325",
"0.6502515",
"0.6500235",
"0.64576375",
"0.64489335",
"0.6445863",
"0.6439109",
"0.6432374",
"0.6423047",
"0.6418353",
"0.64149505",
"0.6403709",
"0.6394698"
]
| 0.727082 | 0 |
Read response data from RS485, extract direction, convert it to text. finally print the direction text. | def read_response():
global rs485
# Response in 7 bytes
buf = bytearray( 7 )
rs485.read( buf )
#print( 'Buffer: ', hex(buf[0]), hex(buf[1]), hex(buf[2]), hex(buf[3]), hex(buf[4]), hex(buf[5]), hex(buf[6]) )
# Decode the response
# 0 & 1 are the slave addr + function code
if not( (buf[0]==0x02) and (buf[1]==0x03) ):
raise Exception( 'Invalid Slave/function' )
if buf[2] != 0x02:
raise Exception( 'Invalid response length' )
# bytes 3 & 4 are the data. With value from 0 to 15, we do only need the
# lower byte value (higher byte will always be 0)
# print the direction label
label = dir_as_text( buf[4] )
print( 'Direction:', label )
# bytes 5 & 6 are CRC (not checked here) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readAndRespond(self):\n\n if self.ser.isOpen():\n try:\n #Try to read\n self.ser.flushOutput()\n response = self.ser.readline()\n self.parseString(response)\n print response\n #if response.strip() == \"up\":\n # self.moveArmUp()\n # print \"Moving Up!\"\n #elif response.strip() == \"down\":\n # self.moveArmDown()\n # print \"Moving Down!\"\n except Exception, e:\n print \"Error: \" + str(e)",
"def read_and_print(serial):\r\n resp = read_buffer(serial)\r\n if resp != \"\":\r\n print(resp)",
"def read_response(self):\n counter = 0\n rx_pkt_done = 0\n while not rx_pkt_done:\n uart_status, tx_buff_full, tx_buff_empty, rx_buff_full, rx_buff_empty, rx_pkt_done = self.x10g_rdma.read_uart_status()\n counter += 1\n if counter == 15001:\n print(\"\\n\\t read_response() timed out waiting for uart!\\n\")\n break\n response = self.x10g_rdma.uart_rx(0x0)\n # print(\"R: {}. {}\".format(response, counter))\n # print(\"... receiving: {} ({})\".format(' '.join(\"0x{0:02X}\".format(x) for x in response), counter))\n return response\n # return self.x10g_rdma.uart_rx(0x0)",
"def __get_response(serial_port):\n read_data = \"\"\n while not read_data.endswith(\"\\n>> \"):\n ready = select.select([serial_port], [], [], 25)[0]\n if ready:\n read_data += serial_port.read(serial_port.inWaiting()).decode(\n \"utf-8\", \"replace\")\n else:\n raise errors.DeviceError(\n \"Device cambrionix get response failed. \"\n \"Read timeout on serial port: {}\".format(serial_port))\n\n return read_data.splitlines()",
"def get_summary(self):\n self.rs485.write_command('#{}q0'.format(self.address),EOL='\\r\\n')\n response = self.rs485.read_until_byte_string('\\r\\n')\n return response",
"def reader(self):\n while self.alive:\n try:\n data = self.serial.read_until(b'~')[:-1]\n packet = ethernet.Ethernet(data)\n if packet[icmp.ICMP]:\n packet[ethernet.Ethernet].dst_s = \"dc:a6:32:00:a7:8b\"\n packet[ip.IP].dst_s = \"192.168.1.35\"\n packet[icmp.ICMP].sum = b'0x1783'\n print(\"\\n\\n__________________RESPONSE FROM VISIBLE PI__________________\")\n print(packet)\n if data:\n self.write(packet.bin())\n except socket.error as msg:\n break\n self.alive = False",
"def read_and_response(self, vsr, address_h, address_l):\n # time.sleep(0.2)\n self.send_cmd([vsr, 0x41, address_h, address_l])\n # time.sleep(0.2)\n resp = self.read_response() # ie resp = [42, 144, 48, 49, 13]\n reply = resp[2:-1] # Omit start char, vsr address and end char\n reply = \"{}\".format(''.join([chr(x) for x in reply])) # Turn list of integers into ASCII string\n # print(\" RR. reply: {} (resp: {})\".format(reply, resp)) # ie reply = '01'\n return resp, reply",
"def send_cmd_rd_response ( self,\r\r\n cmd_str=r'AT',\r\r\n rsp_str ='ok'):\r\r\n loggerModem = logging.getLogger(__name__ + 'send_cmd_rd_response')\r\r\n text_str = \"AT command\"\r\r\n loggerModem.debug(\"%-15s:\\t%s\" %(text_str, cmd_str))\r\r\n cmd_str = cmd_str + '\\r\\n'\r\r\n\r\r\n self.serObj.write(cmd_str) # write a string\r\r\n\r\r\n timeout_sec = 30\r\r\n remaining_time = timeout_sec\r\r\n poll_time_sec=2\r\r\n response = \"\"\r\r\n\r\r\n while remaining_time > 0:\r\r\n response = self.serObj.read(2048)\r\r\n time.sleep(poll_time_sec)\r\r\n remaining_time -= poll_time_sec\r\r\n loggerModem.debug(\"remaining time %s\" %remaining_time)\r\r\n reg_expr = r'\\b' + re.escape(rsp_str) + r'\\b'\r\r\n matchObj = re.search (reg_expr, response, re.M|re.I)\r\r\n if matchObj:\r\r\n break\r\r\n\r\r\n if matchObj:\r\r\n text_str = \"Response\"\r\r\n loggerModem.debug (\"%-15s:\\t%s\" %(text_str, matchObj.group()))\r\r\n return (0, response)\r\r\n else:\r\r\n loggerModem.debug(\"Ok, string not found in the response message\")\r\r\n return (1, response)",
"def response(self, data, response_type = \"terminal\"):\n if (response_type == \"terminal\"):\n print(data, end=\"\\n\")",
"def getData(self,cmd):\n self.ser.write(cmd.encode()+END.encode())\n out = self.ser.readline()\n\n if(out == \"\"):\n raise IOError(\"communication failed\")\n return out",
"def readResponseCommand(self, numElements=1):\n while 1:\n resp = self.serial.readline().decode() #Get the line\n if len(resp) > 5 and resp[0:5] == \"ERROR\": #If Error\n self._waiting_response = 0\n raise Exception(resp)\n _LOGGER.debug(\"Response %s\" % resp)\n if resp.find('#') > -1:\n self._waiting_response = 0\n break\n if resp == '':\n # nothing coming, have read too many lines\n return None, None\n respitems = resp.split(\"#\",maxsplit=1)[1].split()\n command = respitems[1]\n print(str(command))\n if numElements == 1:\n return respitems[-1], command\n else:\n return respitems[-numElements:], command",
"def TestResponse(port):\n\tcommandString = \"F\"\n\tport.write(commandString)\n\tcommandString = \"PM3,C,I1M500,I3M-500,I3M500,I1M-500,R\"\n\tport.write(commandString)\n\tWaitUntilReady(port)\n\tport.write(\"R\")\n\tresp=WaitUntilReady(port)\n\tcount=0\n\tprint(\"starting loop:\")\n\twhile('^' in resp):\n \tport.write(\"X\")\n\t\txpos=port.read(9)\n\t\tprint(xpos)\n\t\tport.write(\"R\")\n\t\ttime.sleep(5)\n\t\tresp=WaitUntilReady(port)\n\t\tcount = count+1\n\t\tprint(count)",
"def _read(self):\n \n try:\n d = self._get_byte()\n ts = time.time()\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n packet = [d]\n d = self._get_byte()\n if d == self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n else:\n packet.append(d)\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n packet.append(d)\n if self._debug == True:\n print \"Serial:_read: unescaped\", packet\n packet = self._unescape(packet)\n \n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n \n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n return RawPacket(ts, packet[1:-3], crc == packet_crc)\n except socket.timeout:\n return None",
"def ser_command(str, ser, responses=['OK\\r\\n']):\n ser.write(str)\n msg = ''\n while (msg not in responses):\n try:\n msg = ser.readline()\n except OSError, serial.SerialException:\n if DEBUG:\n print 'Unable to read. Is something else using the port?'\n return msg",
"def __read_response(self, nblines=-1):\n resp, code, data = (b\"\", None, None)\n cpt = 0\n while True:\n try:\n line = self.__read_line()\n except Response as inst:\n code = inst.code\n data = inst.data\n break\n except Literal as inst:\n resp += self.__read_block(inst.value)\n if not resp.endswith(CRLF):\n resp += self.__read_line() + CRLF\n continue\n if not len(line):\n continue\n resp += line + CRLF\n cpt += 1\n if nblines != -1 and cpt == nblines:\n break\n\n return (code, data, resp)",
"def _GetResponseFrame(self):\n self.write([self.SERIAL_IO])\n time.sleep(.1)\n resp = self.read(3)\n [ack, txcount, rxcount] = self.decode(resp)\n if ack != 0xFF:\n raise ValueError('Serial - GetResponseFrame - NACK received - Transmissionerror')\n if rxcount > 0:\n time.sleep(.01)\n com = self._serial_read(rxcount)\n #Add to buffer\n self.buffer += com\n return [ack, txcount, len(self.buffer)]",
"def get_response():\n line = FROMPIPE.readline()\n result = \"\"\n while True:\n result += line\n line = FROMPIPE.readline()\n # print(f\"Line read: [{line}]\")\n if line == '\\n':\n return result",
"def read_ascii_response(self):\n str = ''\n empties = 0\n while(empties < 5 and str[-3:] != '}\\r\\n'):\n time.sleep(.1)\n newdata = self.read()\n str += newdata\n if newdata:\n empties = 0\n else:\n empties += 1\n if empties: # last result must have gotten data, so empties should be zero\n raise LabProTimeout(\n 'timeout getting ascii data, current result: ' + repr(str))\n goodstart = str.find('{')\n if goodstart < 0:\n raise LabProDataError('bad ascii data: ' + repr(str))\n return map(eval, str[goodstart + 1:-3].split(','))",
"def readCommand(self):\n while (True):\n time.sleep(1)\n # At least a package of 4 bytes (minimum)\n # [ Head | Length | Address | Data[0…N] | Check ]\n if (self._serial.inWaiting()>=4):\n # Gets only the first byte of the packet (it should be HEAD)\n packet_header = self._serial.read(1)\n if (packet_header != Ind903Packet.PACKET_HEAD):\n # the next one is the length of the packet\n packet_length_bytes = self._serial.read(1)\n packet_length = int.from_bytes(packet_length_bytes, byteorder='big')\n if (packet_length > 0):\n raw_packet = b\"\".join([packet_header, packet_length_bytes, self._serial.read(packet_length)]) \n result_packet = Ind903Packet.parsePacket(raw_packet)\n return (result_packet)",
"def Read_Response(self, expected = bytes([0x01])):\r\n data = self.Port.read(1)\r\n if data == expected: return True\r\n return False",
"def read_inverter(inverter_addr, serial_port):\n # Flush the inputs and outputs\n serial_port.flushInput()\n serial_port.flushOutput()\n\n # Kaco inverter expects a '#{inv_Numb}\\r' command to get data\n inv_command = '#' + inverter_addr + '0\\r\\n'\n\n # Encode the command\n enc_cmd = inv_command.encode()\n if DEBUG:\n print(\"Sending the command to the RS485 port: {} encoded as: {}\".format(inv_command.replace('\\r', ''), enc_cmd))\n serial_port.write(enc_cmd)\n\n # wait 1 second. Do not make it less than that\n time.sleep(1)\n\n # read answer line\n response = ''\n while serial_port.in_waiting > 0:\n part = serial_port.read(serial_port.in_waiting)\n response += part.decode(\"iso-8859-1\")\n\n if DEBUG > 2:\n print(\"Received the following data: {}\".format(response.replace('\\r', '\\n')))\n\n return response",
"def readresp(self, cmd):\n\t\tdata = self.read(22)\n\t\tresponse = data[0]\n\t\t#print \"laser response\", self.mylaser, response\n\t\tgstt.lstt_dacanswers[self.mylaser] = response\n\t\tcmdR = data[1]\n\t\tstatus = Status(data[2:])\n\t\tr.set('/lack/'+str(self.mylaser), response)\n\n\t\tif cmdR != cmd:\n\t\t\traise ProtocolError(\"expected resp for %r, got %r\"\n\t\t\t\t% (cmd, cmdR))\n\n\t\tif response != \"a\":\n\t\t\traise ProtocolError(\"expected ACK, got %r\"\n\t\t\t\t% (response, ))\n\n\t\tself.last_status = status\n\t\treturn status",
"def _read(self):\n # because protocol has no termination chars the read reads the number\n # of bytes in the buffer\n bytes_in_buffer = self.visa_handle.bytes_in_buffer\n # a workaround for a timeout error in the pyvsia read_raw() function\n with(self.visa_handle.ignore_warning(visa.constants.VI_SUCCESS_MAX_CNT)):\n mes = self.visa_handle.visalib.read(\n self.visa_handle.session, bytes_in_buffer)\n mes = str(mes[0].decode()) # cannot be done on same line for some reason\n # if mes[1] != 0:\n # # see protocol descriptor for error codes\n # raise Exception('IVVI rack exception \"%s\"' % mes[1])\n return mes",
"def reader(self):\n try:\n line = ''\n while self.alive:\n data = self.serial.read(1)\n if data == '\\r':\n continue\n\n line += data\n if data == '\\n':\n self.log.print_distant(datetime.now().strftime(\n \"%d/%m/%Y %H:%M:%S> \"))\n if line.startswith('ALARM:'):\n self.log.alert(line)\n elif line.startswith('EVENT:') or line.startswith('INFO'):\n self.log.warn(line)\n else:\n self.log.print_distant(line)\n self.parse(line.strip())\n line = ''\n\n sys.stdout.flush()\n\n except serial.SerialException:\n self.alive = False\n # would be nice if the console reader could be interruptted at this\n # point...\n raise",
"def readline(self):\n returnIndex = self._RX_buf.index(\"\\n\") # \\r\\n technically\n if returnIndex != -1:\n s = self._RX_buf[0:returnIndex + 1]\n self._RX_buf = self._RX_buf[returnIndex + 1:]\n return s # bytes(s, encoding='ascii') # s\n else:\n return 0x04 # ''",
"def read():\n # TODO",
"def read_ir_char(self, dir):\n data = self._read_packet(dir, Dock.DATA_BYTES)\n\n if len(data) == Dock.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return byte\n else:\n return 0",
"def __read(self, bytes=31):\n raw_data = self.file_read.read(bytes)\n response = self.__get_response(raw_data)\n is_valid, error_code = self.__is_response_valid(response)\n if is_valid:\n char_list = self.__handle_raspi_glitch(response[1:])\n return str(''.join(char_list)), is_valid\n else:\n return error_code, is_valid",
"def read_dir():\n request()\n sleep_ms( 100 ) # give sometime for the buffer to get data\n try:\n read_response()\n except Exception as err:\n print( 'Error decoding response' )\n print( '[ERROR]', err )\n sleep_ms( 1000 )",
"async def _read_reply_async(self) -> str:\n reply_string = await self._serial.readline_async()\n logger.debug(f\"Reply received: {reply_string}\")\n return reply_string.decode(\"ascii\")"
]
| [
"0.6458579",
"0.61260074",
"0.60657144",
"0.5817488",
"0.57004523",
"0.55808157",
"0.55629456",
"0.55170035",
"0.55125105",
"0.54584724",
"0.54464424",
"0.54396635",
"0.53551924",
"0.53175896",
"0.5272487",
"0.5263698",
"0.52636516",
"0.5250304",
"0.52358776",
"0.5232269",
"0.5218171",
"0.52169955",
"0.5201425",
"0.51799095",
"0.5120975",
"0.51206887",
"0.51165414",
"0.509976",
"0.50953853",
"0.5079725"
]
| 0.74575657 | 0 |
get the available computation devices (CPU & GPUs) Get the computation devices for deep learning experiments with given preferred list of GPU and flag for multiGPU computation. | def get_computation_devices(
preferred_gpu_list: Optional[List[int]],
multi_gpu_flag: bool,
) -> List[Device]:
# use CPU when GPUs are not preferred or not available
if (preferred_gpu_list is None) \
or (len(preferred_gpu_list) == 0) \
or (not torch.cuda.is_available()):
return [Device('cpu'), ]
# else GPUs are preferred and available
# get all available GPU indexes
_available_gpu_list: List[int]
if getAvailable:
# by default, use GPU utility package with load and memory usage
# specification so that the 'available' GPUs are actually ready
# for deep learning runs (https://github.com/anderskm/gputil)
_available_gpu_list = getAvailable(
limit=_MAX_NUM_GPUS,
maxLoad=_MAX_GPU_LOAD,
maxMemory=_MAX_GPU_MEM_USED,
)
else:
# assume all GPUs are good to use without GPUtil package
_available_gpu_list = list(range(torch.cuda.device_count()))
_warning_msg = \
f'GPUtil (https://github.com/anderskm/gputil) not installed.' \
f'Assuming all GPUs ({_available_gpu_list}) are available ' \
f'and ready for training ... '
_LOGGER.warning(_warning_msg)
# get the overlap between the preferred and the available GPUs
_gpus = \
[_g for _g in _available_gpu_list if _g in preferred_gpu_list]
# use CPU if there is no preferred GPUs that are available
if len(_gpus) == 0:
return [Device('cpu'), ]
# otherwise return one or all GPUs depending on the multi-GPU flag
return [Device(f'cuda:{_g}') for _g in _gpus] \
if multi_gpu_flag else [Device(f'cuda:{_gpus[0]}'), ] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def try_all_gpus(): #@save\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]",
"def detect_gpus():\n def worker(q):\n # `device_lib` will not release the memory it took,\n # so we run it in a sub-process.\n try:\n from tensorflow.python.client import device_lib\n\n if is_tensorflow_version_higher_or_equal('1.8.0'):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n devices = list(device_lib.list_local_devices(config))\n else:\n devices = list(device_lib.list_local_devices())\n gpus = [\n (device.name, device)\n for device in devices\n if device.device_type == 'GPU'\n ]\n union_set = {i: i for i in range(len(gpus))}\n\n for i, (name, device) in enumerate(gpus):\n assert (device.name == '/device:GPU:{}'.format(i))\n for link in device.locality.links.link:\n if link.device_id != i:\n union_set[i] = union_set[link.device_id]\n\n for i in six.iterkeys(union_set):\n while union_set[i] != union_set[union_set[i]]:\n union_set[i] = union_set[union_set[i]]\n\n root_devices = sorted(set(union_set.values()))\n gpu_groups = [[] for _ in range(len(root_devices))]\n dev_to_group = {j: i for i, j in enumerate(root_devices)}\n for i, (name, device) in enumerate(gpus):\n gpu_groups[dev_to_group[union_set[i]]].append(name)\n\n q.put((1, gpu_groups))\n except Exception:\n q.put((0, traceback.format_exc()))\n\n q = mp.Queue()\n p = mp.Process(target=worker, args=(q,))\n\n try:\n p.start()\n result = q.get()\n if result[0] == 1:\n return result[1]\n else:\n raise RuntimeError(\n 'Failed to retrieve GPU information, the traceback of '\n 'sub-process is:\\n {}'.\n format('\\n '.join(result[1].split('\\n')))\n )\n finally:\n p.terminate()\n p.join()",
"def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]",
"def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]",
"def _get_available_gpus():\r\n #global _LOCAL_DEVICES\r\n if tfback._LOCAL_DEVICES is None:\r\n devices = tf.config.list_logical_devices()\r\n tfback._LOCAL_DEVICES = [x.name for x in devices]\r\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]",
"def _get_available_gpus():\n global _LOCAL_DEVICES\n if _LOCAL_DEVICES is None:\n if _is_tf_1():\n devices = get_session().list_devices()\n _LOCAL_DEVICES = [x.name for x in devices]\n else:\n _LOCAL_DEVICES = tf.config.experimental_list_devices()\n return [x for x in _LOCAL_DEVICES if 'device:gpu' in x.lower()]",
"def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tf_back._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tf_back._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tf_back._LOCAL_DEVICES if 'device:gpu' in x.lower()]",
"def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]",
"def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]",
"def get_cl_devices():\n\n _devices = {'CPU':[], 'GPU':[]}\n\n platforms = cl.get_platforms()\n for platform in platforms:\n devices = platform.get_devices()\n for device in devices:\n if device.type == cl.device_type.CPU:\n _devices['CPU'].append(device)\n elif device.type == cl.device_type.GPU:\n _devices['GPU'].append(device)\n \n \n return _devices",
"def get_test_devices():\n\n # Assumption: CPU is always available\n devices = ['cpu']\n\n if torch.cuda.is_available():\n devices.append('cuda')\n\n return devices",
"def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")",
"def get_test_devices():\n devices = [\"cpu\"]\n if torch.cuda.is_available():\n devices.append(\"cuda\")\n return devices",
"def get_available_devices():\n executable_path = os.path.join(os.path.dirname(__file__), 'build')\n try:\n num_devices = int(subprocess.check_output(\n [\"{}/query_devices\".format(executable_path)]))\n except subprocess.CalledProcessError as e:\n return [0]\n\n FNULL = open(os.devnull, 'w')\n\n available_devices = []\n for i in range(num_devices):\n try:\n if b\"NVIDIA\" in subprocess.check_output(\n [\"{}/test_device\".format(executable_path),\n str(i)], stderr=FNULL):\n available_devices.append(i)\n logging.info('Device {} is available for rendering'.format(i))\n except subprocess.CalledProcessError as e:\n logging.info(e)\n logging.info('Device {} is not available for rendering'.format(i))\n FNULL.close()\n\n return available_devices",
"def _get_device(self, n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger.warning(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger.warning(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, \"\n f\"but only {n_gpu} are available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n self.logger.info(f'Using device: {device}, {list_ids}')\n return device, list_ids",
"def try_gpu(i=0): #@save\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')",
"def prepare_device(n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, but only {n_gpu} are \"\n \"available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids",
"def prepare_device(n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, but only {n_gpu} are \"\n \"available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids",
"def prepare_device(n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(\"Warning: The number of GPU\\'s configured to use is {}, but only {} are available \"\n \"on this machine.\".format(n_gpu_use, n_gpu))\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids",
"def gpu_devices(self):\n return self._gpu_devices",
"def get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == \"GPU\"]",
"def get_devices(needs: int = None):\n\n num_gpus = torch.cuda.device_count()\n\n if num_gpus == 0:\n devices = [torch.device(\"cpu\")]\n if needs is None:\n return devices\n return devices * needs\n\n devices = [torch.device(f\"cuda:{index:d}\") for index in range(num_gpus)]\n if needs is None:\n return devices\n return [device for _, device in zip(range(needs), itertools.cycle(devices))]",
"def try_all_gpus():\n ctx_list = []\n try:\n for i in range(16):\n ctx = mx.gpu(i)\n _ = nd.array([0], ctx=ctx)\n ctx_list.append(ctx)\n except:\n pass\n if not ctx_list:\n ctx_list = [mx.cpu()]\n return ctx_list",
"def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")",
"def set_devices(args):\n global devices\n if args is not None:\n devices = [torch.device(i) for i in ast.literal_eval('[' + args + ']')]\n torch.cuda.set_device(devices[0])\n else:\n devices = [torch.device('cpu')]",
"def return_free_GPU():\r\n if torch.cuda.is_available():\r\n gpu_num = torch.cuda.device_count()\r\n device = torch.device('cuda:{}'.format(gpu_num-1))\r\n print('Using GPU:[{}]/[{}] for training...'.format(gpu_num-1,gpu_num-1))\r\n return device\r\n \r\n raise ValueError('GPU not available for training. Check CUDA env with function \"check_cuda_env\"')",
"def _prepare_device(self, n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger.warning(\n \"Warning: There\\'s no GPU available on this machine, training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger.warning(\n \"Warning: The number of GPU\\'s configured to use is {}, but only {} are available on this machine.\".format(\n n_gpu_use, n_gpu))\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids",
"def get_available_device():\n if torch.cuda.is_available():\n free_mem, device_idx = 0.0, 0\n for d in range(torch.cuda.device_count()):\n mem = torch.cuda.get_device_properties(d).total_memory - torch.cuda.memory_allocated(d)\n if mem > free_mem:\n device_idx = d\n free_mem = mem\n return torch.device(f'cuda:{device_idx}')\n else:\n return torch.device('cpu')",
"def setup_CL():\n\n #Set up openCL platform\n NAME = 'NVIDIA CUDA'\n platforms = cl.get_platforms()\n\n dev = None\n for p in platforms:\n #Easy switching for local vs remote machine\n if p.name == 'Apple':\n NAME = 'Apple'\n if p.name == NAME:\n dev = p.get_devices()\n\n # Command queue, enable GPU profiling\n ctx = cl.Context(dev)\n queue = cl.CommandQueue(ctx,properties=cl.command_queue_properties.PROFILING_ENABLE)\n\n return [dev,ctx,queue]",
"def getGpus():\n nvmlInit()\n gpu_list = []\n for i in range(0, nvmlDeviceGetCount()):\n handle = nvmlDeviceGetHandleByIndex(i)\n gpu_list.append(NvidiaGPU(handle))\n return gpu_list"
]
| [
"0.7351089",
"0.711227",
"0.7013773",
"0.7013773",
"0.69962704",
"0.6990039",
"0.69724643",
"0.69648427",
"0.69502217",
"0.6850669",
"0.6835772",
"0.678422",
"0.67773485",
"0.675242",
"0.6687484",
"0.6627573",
"0.6611311",
"0.6611311",
"0.6591264",
"0.64701104",
"0.6435652",
"0.6288411",
"0.6285713",
"0.6281467",
"0.62523884",
"0.6245533",
"0.6242748",
"0.62409663",
"0.6232231",
"0.61961323"
]
| 0.8674712 | 0 |
Test that you can add a text plugin | def test_add_edit_plugin(self):
# add a new text plugin
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type':"TextPlugin",
'language':settings.LANGUAGES[0][0],
'placeholder':page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content + "/"
response = self.client.get(edit_url)
self.assertEquals(response.status_code, 200)
data = {
"body":"Hello World"
}
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEquals("Hello World", txt.body) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_gameAddText(self):\n # this is tested graphically, it is UI\n pass",
"def test_plugins():\n assert plugins.template.plugin_test() == True\n assert plugin_test() == True",
"def test_copy_textplugin(self):\n page = create_page(\"page\", \"nav_playground.html\", \"en\")\n \n placeholder = page.placeholders.get(slot='body')\n\n plugin_base = CMSPlugin(\n plugin_type='TextPlugin',\n placeholder=placeholder,\n position=1,\n language=self.FIRST_LANG)\n plugin_base.insert_at(None, position='last-child', save=False)\n\n plugin = Text(body='')\n plugin_base.set_base_attr(plugin)\n plugin.save()\n\n plugin_ref_1_base = CMSPlugin(\n plugin_type='TextPlugin',\n placeholder=placeholder,\n position=1,\n language=self.FIRST_LANG)\n plugin_ref_1_base.insert_at(plugin_base, position='last-child', save=False)\n\n plugin_ref_1 = Text(body='')\n plugin_ref_1_base.set_base_attr(plugin_ref_1)\n plugin_ref_1.save()\n\n plugin_ref_2_base = CMSPlugin(\n plugin_type='TextPlugin',\n placeholder=placeholder,\n position=2,\n language=self.FIRST_LANG)\n plugin_ref_2_base.insert_at(plugin_base, position='last-child', save=False)\n\n plugin_ref_2 = Text(body='')\n plugin_ref_2_base.set_base_attr(plugin_ref_2)\n\n plugin_ref_2.save()\n\n plugin.body = plugin_tags_to_admin_html(' {{ plugin_object %s }} {{ plugin_object %s }} ' % (str(plugin_ref_1.pk), str(plugin_ref_2.pk)))\n plugin.save()\n self.assertEquals(plugin.pk, 1)\n page_data = self.get_new_page_data()\n\n #create 2nd language page\n page_data.update({\n 'language': self.SECOND_LANG,\n 'title': \"%s %s\" % (page.get_title(), self.SECOND_LANG),\n })\n response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + \"?language=%s\" % self.SECOND_LANG, page_data)\n self.assertRedirects(response, URL_CMS_PAGE)\n\n self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)\n self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)\n self.assertEquals(CMSPlugin.objects.count(), 3)\n self.assertEquals(Page.objects.all().count(), 1)\n\n copy_data = {\n 'placeholder': placeholder.pk,\n 'language': self.SECOND_LANG,\n 'copy_from': self.FIRST_LANG,\n }\n response = self.client.post(URL_CMS_PAGE + \"copy-plugins/\", copy_data)\n self.assertEquals(response.status_code, 200)\n self.assertEqual(response.content.count('<li '), 3)\n # assert copy success\n self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)\n self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 3)\n self.assertEquals(CMSPlugin.objects.count(), 6)\n\n new_plugin = Text.objects.get(pk=6)\n self.assertEquals(plugin_tags_to_id_list(new_plugin.body), [u'4', u'5'])",
"def test_plugin_initialize(self):\n p = PluginCustom()\n self.assertEqual('youpie', p.toto)",
"def test(self, plugin):\n plug = plugin_source.load_plugin(plugin)\n plug.test()",
"def test_add_text(self):\n text = 'test'\n info = self.api.add_text(text, tags=['asd'])\n self.assertEqual(info['value'], text)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_text_editor():\n assert chap2.text_editor()",
"def test_register_dynamic_plugin(self):\n pass",
"def test_plugin_initialize_from_args(self):\n sys.argv.append('-t')\n p = PluginCustom()\n self.assertEqual('yourah', p.toto)",
"def test_register_dynamic_plugin1(self):\n pass",
"def test_text(self):\n args = [\"hello world\"]\n namespace = self.parser.parse_args(args)\n self.assertEqual(namespace.text, \"hello world\")",
"def test_addplugin(self):\n app = QApplication(sys.argv)\n data = (np.random.rand(30, 31, 32) * 100).astype(np.int)\n data[15:40, 13:20, 10:18] += 50\n se = seededitorqt.QTSeedEditor(data)\n wg0 = seededitorqt.plugin.SampleThresholdPlugin()\n se.addPlugin(wg0)\n # se.exec_()\n # self.assertTrue(False)",
"def test_PlainText(self):\n quotation = zope.component.createObject('plaintext', u\"Hello World!\")\n from quotationtool.renderer.plaintext import PlainText\n self.assertTrue(isinstance(quotation, PlainText))",
"def add_cloud_plugin_content(self, content):",
"def register_plugin(self):\n self.edit_goto.connect(self.main.editor.load)\n self.redirect_stdio.connect(self.main.redirect_internalshell_stdio)\n self.main.add_dockwidget(self)\n\n unittesting_act = create_action(self, _(\"Run unit tests\"),\n icon=get_icon('profiler.png'),\n triggered=self.run_unittesting)\n unittesting_act.setEnabled(is_unittesting_installed())\n fixed_shortcut(\"Ctrl+Shift+F11\", self.main,\n self.run_unittesting)\n\n self.main.run_menu_actions += [unittesting_act]\n self.main.editor.pythonfile_dependent_actions += [unittesting_act]",
"def test_textlines_field():",
"def newTestTxt(self):\n self.newTab( extension = TestTxt.TYPE, repoDest=UCI.REPO_UNDEFINED )",
"def test_text_field():",
"def fixture_plugin(mkdocs_conf):\n plugin = mkdocs_conf[\"plugins\"][\"mkdocstrings\"]\n plugin.md = Markdown(extensions=mkdocs_conf[\"markdown_extensions\"], extension_configs=mkdocs_conf[\"mdx_configs\"])\n return plugin",
"def test_custom_plugin(self):\n plugin_name = 'Druptest'\n source = os.path.join(self.current_dir, 'classes', plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n shutil.copytree(source, target)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['name'] == plugin_name",
"def test_register_dynamic_plugin_manager(self):\n pass",
"def addText(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass",
"def addContent(text):",
"def testText(self):\n lc = self.CreateConsole()\n contents = \"\"\n self.assertEqual(contents, lc.GetText())\n for str in ('a', 'foo', '\\n\\n\\n', 'bar\\nbaz\\n choke choke zapf'):\n contents += str\n lc.AppendText(str)\n self.assertEqual(contents, lc.GetText())",
"def test_register_dynamic_plugin_manager1(self):\n pass",
"def get_about(self):\n return \"\"\"This test plug-in exists to verify that SBTools behaves correctly when it encounters a plug-in that has an incorrectly defined entry point.\"\"\"",
"def add_text(self, text):\n text_template = self.templateEnv.get_template(f'{ReportGenerator.COMPONENTS_FOLDER}/text.html')\n text_output = text_template.render(text=text)\n self.contents.append(text_output)",
"def plugin_two():\n return \"two\"",
"def test_nested_plugin_on_page(self):\n with SettingsOverride(CMS_MODERATOR=False, CMS_PERMISSION=False):\n # setup page 1\n page_one = create_page(u\"Three Placeholder\", u\"col_three.html\", u\"en\",\n position=u\"last-child\", published=True, in_navigation=True)\n page_one_ph_two = page_one.placeholders.get(slot=u\"col_left\")\n \n ###\n # add a plugin\n ###\n pre_nesting_body = u\"<p>the nested text plugin with a link inside</p>\"\n text_plugin = add_plugin(page_one_ph_two, u\"TextPlugin\", u\"en\", body=pre_nesting_body)\n # prepare nestin plugin\n page_one_ph_two = self.reload(page_one_ph_two)\n text_plugin = self.reload(text_plugin)\n link_plugin = add_plugin(page_one_ph_two, u\"LinkPlugin\", u\"en\", target=text_plugin)\n link_plugin.name = u\"django-cms Link\"\n link_plugin.url = u\"https://www.django-cms.org\" \n \n # as for some reason mptt does not \n # update the parent child relationship \n # in the add_plugin method when a target present\n # but this is not the topic of the test\n link_plugin.parent = text_plugin\n link_plugin.save()\n # reloading needs to be done after every save\n link_plugin = self.reload(link_plugin)\n text_plugin = self.reload(text_plugin)\n \n # mptt related insertion correct?\n msg = u\"parent plugin right is not updated, child not inserted correctly\"\n self.assertTrue(text_plugin.rght > link_plugin.rght, msg=msg)\n msg = u\"link has no parent\"\n self.assertFalse(link_plugin.parent == None, msg=msg)\n msg = u\"parent plugin left is not updated, child not inserted correctly\"\n self.assertTrue(text_plugin.lft < link_plugin.lft, msg=msg)\n msg = u\"child level is not bigger than parent level\"\n self.assertTrue(text_plugin.level < link_plugin.level , msg=msg)\n \n # add the link plugin to the body\n # emulate the editor in admin that adds some txt for the nested plugin\n in_txt = u\"\"\"<img id=\"plugin_obj_%s\" title=\"Link\" alt=\"Link\" src=\"/static/cms/images/plugins/link.png\">\"\"\"\n nesting_body = u\"%s<p>%s</p>\" % (text_plugin.body, (in_txt % (link_plugin.id)))\n text_plugin.body = nesting_body\n text_plugin.save()\n \n text_plugin = self.reload(text_plugin)\n # none of the descendants should have a placeholder other then my own one\n self.assertEquals(text_plugin.get_descendants().exclude(placeholder=text_plugin.placeholder).count(), 0)\n post_add_plugin_count = CMSPlugin.objects.count()\n self.assertEqual(post_add_plugin_count, 2)",
"def test_register():\n repobee.try_register_plugin(\n sanitizer, sanitizer.SanitizeRepo, sanitizer.SanitizeFile\n )"
]
| [
"0.7142208",
"0.6978258",
"0.69407517",
"0.69305396",
"0.6869483",
"0.68114066",
"0.6739826",
"0.668169",
"0.6654635",
"0.65116435",
"0.6424587",
"0.6378008",
"0.62111646",
"0.6171407",
"0.6164539",
"0.6146264",
"0.60773647",
"0.6075129",
"0.6025148",
"0.602333",
"0.60176533",
"0.6016968",
"0.60128504",
"0.597354",
"0.59358835",
"0.5934541",
"0.5884938",
"0.58787674",
"0.5874615",
"0.58473444"
]
| 0.73295635 | 0 |
Test that copying plugins works as expected. | def test_copy_plugins(self):
# create some objects
page_en = create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
text_plugin_en = add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEquals(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# check the relations
self.assertEquals(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
# just sanity check that so far everything went well
self.assertEqual(CMSPlugin.objects.count(), 2)
# copy the plugins to the german placeholder
copy_plugins_to(ph_en.cmsplugin_set.all(), ph_de, 'de')
self.assertEqual(ph_de.cmsplugin_set.filter(parent=None).count(), 1)
text_plugin_de = ph_de.cmsplugin_set.get(parent=None).get_plugin_instance()[0]
self.assertEqual(text_plugin_de.get_children().count(), 1)
link_plugin_de = text_plugin_de.get_children().get().get_plugin_instance()[0]
# check we have twice as many plugins as before
self.assertEqual(CMSPlugin.objects.count(), 4)
# check language plugins
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), 2)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 2)
text_plugin_en = self.reload(text_plugin_en)
link_plugin_en = self.reload(link_plugin_en)
# check the relations in english didn't change
self.assertEquals(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
self.assertEqual(link_plugin_de.name, link_plugin_en.name)
self.assertEqual(link_plugin_de.url, link_plugin_en.url)
self.assertEqual(text_plugin_de.body, text_plugin_en.body) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_custom_plugin(self):\n plugin_name = 'Druptest'\n source = os.path.join(self.current_dir, 'classes', plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n shutil.copytree(source, target)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['name'] == plugin_name",
"def test_copy_page_nested_plugin(self):\n with SettingsOverride(CMS_MODERATOR=False, CMS_PERMISSION=False):\n templates = []\n # setup page 1\n page_one = create_page(u\"Three Placeholder\", u\"col_three.html\", u\"en\",\n position=u\"last-child\", published=True, in_navigation=True)\n page_one_ph_one = page_one.placeholders.get(slot=u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot=u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot=u\"col_right\")\n # add the text plugin to placeholder one\n text_plugin_en = add_plugin(page_one_ph_one, u\"TextPlugin\", u\"en\", body=\"Hello World\")\n self.assertEquals(text_plugin_en.id, CMSPlugin.objects.all()[0].id)\n self.assertEquals(text_plugin_en.get_children().count(), 0)\n pre_add_plugin_count = CMSPlugin.objects.count()\n self.assertEqual(pre_add_plugin_count, 1)\n ###\n # add a plugin to placeholder two\n ###\n pre_nesting_body = u\"<p>the nested text plugin with a link inside</p>\"\n text_plugin_two = add_plugin(page_one_ph_two, u\"TextPlugin\", u\"en\", body=pre_nesting_body)\n text_plugin_two = self.reload(text_plugin_two)\n # prepare nesting plugin\n page_one_ph_two = self.reload(page_one_ph_two)\n text_plugin_two = self.reload(text_plugin_two)\n link_plugin = add_plugin(page_one_ph_two, u\"LinkPlugin\", u\"en\", target=text_plugin_two)\n link_plugin.name = u\"django-cms Link\"\n link_plugin.url = u\"https://www.django-cms.org\" \n link_plugin.parent = text_plugin_two\n link_plugin.save()\n \n link_plugin = self.reload(link_plugin)\n text_plugin_two = self.reload(text_plugin_two)\n in_txt = \"\"\"<img id=\"plugin_obj_%s\" title=\"Link\" alt=\"Link\" src=\"/static/cms/images/plugins/link.png\">\"\"\"\n nesting_body = \"%s<p>%s</p>\" % (text_plugin_two.body, (in_txt % (link_plugin.id)))\n # emulate the editor in admin that adds some txt for the nested plugin\n text_plugin_two.body = nesting_body\n text_plugin_two.save()\n text_plugin_two = self.reload(text_plugin_two)\n # the link is attached as a child?\n self.assertEquals(text_plugin_two.get_children().count(), 1)\n post_add_plugin_count = CMSPlugin.objects.count()\n self.assertEqual(post_add_plugin_count, 3)\n page_one.save()\n # get the plugins from the original page\n page_one = self.reload(page_one)\n page_one_ph_one = page_one.placeholders.get(slot = u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot = u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot = u\"col_right\")\n # verifiy the plugins got created\n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEquals(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n self.assertEquals(len(org_placeholder_two_plugins), 2)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEquals(len(org_placeholder_three_plugins), 0)\n self.assertEquals(page_one.placeholders.count(), 3)\n placeholder_count = Placeholder.objects.count()\n self.assertEquals(placeholder_count, 3)\n self.assertEquals(CMSPlugin.objects.count(), 3)\n page_one_plugins = CMSPlugin.objects.all()\n ##\n # setup page_copy_target page\n ##\n page_copy_target = create_page(\"Three Placeholder - page copy target\", \"col_three.html\", \"en\",\n position=\"last-child\", published=True, in_navigation=True)\n all_page_count = Page.objects.all().count()\n pre_copy_placeholder_count = Placeholder.objects.count()\n self.assertEquals(pre_copy_placeholder_count, 6)\n # copy the page\n superuser = self.get_superuser()\n with self.login_user_context(superuser):\n page_two = self.copy_page(page_one, page_copy_target)\n # validate the expected pages,placeholders,plugins,pluginbodies\n after_copy_page_plugin_count = CMSPlugin.objects.count()\n self.assertEquals(after_copy_page_plugin_count, 6)\n # check the amount of copied stuff\n after_copy_page_count = Page.objects.all().count()\n after_copy_placeholder_count = Placeholder.objects.count()\n self.assertTrue((after_copy_page_count > all_page_count), msg = u\"no new page after copy\")\n self.assertTrue((after_copy_page_plugin_count > post_add_plugin_count), msg = u\"plugin count is not grown\")\n self.assertTrue((after_copy_placeholder_count > pre_copy_placeholder_count), msg = u\"placeholder count is not grown\") \n self.assertTrue((after_copy_page_count == 3), msg = u\"no new page after copy\")\n # orginal placeholder\n page_one = self.reload(page_one)\n page_one_ph_one = page_one.placeholders.get(slot = u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot = u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot = u\"col_right\")\n # check if there are multiple pages assigned to this placeholders\n found_page = page_one_ph_one.page if page_one_ph_one else None\n self.assertEqual(found_page, page_one)\n found_page = page_one_ph_two.page if page_one_ph_two else None\n self.assertEqual(found_page, page_one)\n found_page = page_one_ph_three.page if page_one_ph_three else None\n self.assertEqual(found_page, page_one)\n \n page_two = self.reload(page_two)\n page_two_ph_one = page_two.placeholders.get(slot = u\"col_sidebar\")\n page_two_ph_two = page_two.placeholders.get(slot = u\"col_left\")\n page_two_ph_three = page_two.placeholders.get(slot = u\"col_right\")\n # check if there are multiple pages assigned to this placeholders\n found_page = page_two_ph_one.page if page_two_ph_one else None\n self.assertEqual(found_page, page_two)\n found_page = page_two_ph_two.page if page_two_ph_two else None\n self.assertEqual(found_page, page_two)\n found_page = page_two_ph_three.page if page_two_ph_three else None\n self.assertEqual(found_page, page_two)\n # check the stored placeholders org vs copy\n msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_one.pk, page_one_ph_one.pk, msg)\n msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_two.pk, page_one_ph_two.pk, msg)\n msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_three.pk, page_one_ph_three.pk, msg)\n # get the plugins from the original page\n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEquals(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n self.assertEquals(len(org_placeholder_two_plugins), 2)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEquals(len(org_placeholder_three_plugins), 0)\n # get the plugins from the copied page\n copied_placeholder_one_plugins = page_two_ph_one.get_plugins()\n self.assertEquals(len(copied_placeholder_one_plugins), 1)\n copied_placeholder_two_plugins = page_two_ph_two.get_plugins()\n self.assertEquals(len(copied_placeholder_two_plugins), 2)\n copied_placeholder_three_plugins = page_two_ph_three.get_plugins()\n self.assertEquals(len(copied_placeholder_three_plugins), 0)\n # verify the plugins got copied\n # placeholder 1\n count_plugins_copied = len(copied_placeholder_one_plugins)\n count_plugins_org = len(org_placeholder_one_plugins)\n msg = u\"plugin count %s %s for placeholder one not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg) \n # placeholder 2\n count_plugins_copied = len(copied_placeholder_two_plugins)\n count_plugins_org = len(org_placeholder_two_plugins)\n msg = u\"plugin count %s %s for placeholder two not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg) \n # placeholder 3\n count_plugins_copied = len(copied_placeholder_three_plugins)\n count_plugins_org = len(org_placeholder_three_plugins)\n msg = u\"plugin count %s %s for placeholder three not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg)\n # verify the body of text plugin with nested link plugin\n # org to copied \n org_nested_text_plugin = None\n # do this iteration to find the real text plugin with the attached link\n # the inheritance mechanism for the cmsplugins works through \n # (tuple)get_plugin_instance()\n for x in org_placeholder_two_plugins: \n if x.plugin_type == u\"TextPlugin\":\n instance = x.get_plugin_instance()[0]\n if instance.body.startswith(pre_nesting_body):\n org_nested_text_plugin = instance\n break\n copied_nested_text_plugin = None\n for x in copied_placeholder_two_plugins: \n if x.plugin_type == u\"TextPlugin\":\n instance = x.get_plugin_instance()[0]\n if instance.body.startswith(pre_nesting_body):\n copied_nested_text_plugin = instance\n break\n msg = u\"orginal nested text plugin not found\"\n self.assertNotEquals(org_nested_text_plugin, None, msg=msg)\n msg = u\"copied nested text plugin not found\"\n self.assertNotEquals(copied_nested_text_plugin, None, msg=msg)\n # get the children ids of the texplugin with a nested link\n # to check if the body of the text is genrated correctly\n org_link_child_plugin = org_nested_text_plugin.get_children()[0]\n copied_link_child_plugin = copied_nested_text_plugin.get_children()[0]\n # validate the textplugin body texts\n msg = u\"org plugin and copied plugin are the same\"\n self.assertTrue(org_link_child_plugin.id != copied_link_child_plugin.id, msg)\n needle = u\"plugin_obj_%s\"\n msg = u\"child plugin id differs to parent in body plugin_obj_id\"\n # linked child is in body\n self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg)\n msg = u\"copy: child plugin id differs to parent in body plugin_obj_id\"\n self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg)\n # really nothing else\n msg = u\"child link plugin id differs to parent body plugin_obj_id\"\n self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg)\n msg = u\"copy: child link plugin id differs to parent body plugin_obj_id\"\n self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg)\n # now reverse lookup the placeholders from the plugins\n org_placeholder = org_link_child_plugin.placeholder\n copied_placeholder = copied_link_child_plugin.placeholder\n msg = u\"placeholder of the orginal plugin and copied plugin are the same\"\n ok = ((org_placeholder.id != copied_placeholder.id))\n self.assertTrue(ok, msg)",
"def test_clone_system(self):\n pass",
"def test_copy_page_nested_plugin_moved_parent_plugin(self):\n with SettingsOverride(CMS_MODERATOR=False, CMS_PERMISSION=False):\n templates = []\n # setup page 1\n page_one = create_page(u\"Three Placeholder\", u\"col_three.html\", u\"en\",\n position=u\"last-child\", published=True, in_navigation=True)\n page_one_ph_one = page_one.placeholders.get(slot=u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot=u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot=u\"col_right\")\n # add the text plugin to placeholder one\n text_plugin_en = add_plugin(page_one_ph_one, u\"TextPlugin\", u\"en\", body=u\"Hello World\")\n self.assertEquals(text_plugin_en.id, CMSPlugin.objects.all()[0].id)\n self.assertEquals(text_plugin_en.get_children().count(), 0)\n pre_add_plugin_count = CMSPlugin.objects.count()\n self.assertEqual(pre_add_plugin_count, 1)\n # add a plugin to placeholder twho\n pre_nesting_body = u\"<p>the nested text plugin with a link inside</p>\"\n text_plugin_two = add_plugin(page_one_ph_two, u\"TextPlugin\", u\"en\", body=pre_nesting_body)\n text_plugin_two = self.reload(text_plugin_two)\n # prepare nestin plugin\n page_one_ph_two = self.reload(page_one_ph_two)\n text_plugin_two = self.reload(text_plugin_two)\n link_plugin = add_plugin(page_one_ph_two, u\"LinkPlugin\", u\"en\", target=text_plugin_two)\n link_plugin.name = u\"django-cms Link\"\n link_plugin.url = u\"https://www.django-cms.org\" \n link_plugin.parent = text_plugin_two\n link_plugin.save()\n # reload after every save\n link_plugin = self.reload(link_plugin)\n text_plugin_two = self.reload(text_plugin_two)\n in_txt = u\"\"\"<img id=\"plugin_obj_%s\" title=\"Link\" alt=\"Link\" src=\"/static/cms/images/plugins/link.png\">\"\"\"\n nesting_body = \"%s<p>%s</p>\" % (text_plugin_two.body, (in_txt % (link_plugin.id)))\n # emulate the editor in admin that adds some txt for the nested plugin\n text_plugin_two.body = nesting_body\n text_plugin_two.save()\n text_plugin_two = self.reload(text_plugin_two)\n # the link is attached as a child?\n self.assertEquals(text_plugin_two.get_children().count(), 1)\n post_add_plugin_count = CMSPlugin.objects.count()\n self.assertEqual(post_add_plugin_count, 3)\n page_one.save()\n # get the plugins from the original page\n page_one = self.reload(page_one)\n page_one_ph_one = page_one.placeholders.get(slot = u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot = u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot = u\"col_right\")\n # verify the plugins got created\n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEquals(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n self.assertEquals(len(org_placeholder_two_plugins), 2)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEquals(len(org_placeholder_three_plugins), 0)\n self.assertEquals(page_one.placeholders.count(), 3)\n \n placeholder_count = Placeholder.objects.count()\n self.assertEquals(placeholder_count, 3)\n self.assertEquals(CMSPlugin.objects.count(), 3)\n page_one_plugins = CMSPlugin.objects.all()\n # setup page_copy_target\n page_copy_target = create_page(\"Three Placeholder - page copy target\", \"col_three.html\", \"en\",\n position=\"last-child\", published=True, in_navigation=True)\n all_page_count = Page.objects.all().count()\n pre_copy_placeholder_count = Placeholder.objects.count()\n self.assertEquals(pre_copy_placeholder_count, 6)\n superuser = self.get_superuser()\n with self.login_user_context(superuser):\n # now move the parent text plugin to another placeholder\n post_data = {\n u'placeholder': u\"col_right\",\n u'placeholder_id': u\"%s\" % (page_one_ph_three.id),\n u'ids': u\"%s\" % (text_plugin_two.id),\n u'plugin_id': u\"%s\" % (text_plugin_two.id),\n }\n edit_url = URL_CMS_MOVE_PLUGIN % (page_one.id)\n response = self.client.post(edit_url, post_data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.content, u'ok')\n # check if the plugin got moved\n page_one = self.reload(page_one)\n text_plugin_two = self.reload(text_plugin_two)\n page_one_ph_one = page_one.placeholders.get(slot = u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot = u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot = u\"col_right\")\n \n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEquals(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n # the plugin got moved and child got moved\n self.assertEquals(len(org_placeholder_two_plugins), 0)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEquals(len(org_placeholder_three_plugins), 2)\n # copy the page\n page_two = self.copy_page(page_one, page_copy_target)\n # validate the expected pages,placeholders,plugins,pluginbodies\n after_copy_page_plugin_count = CMSPlugin.objects.count()\n self.assertEquals(after_copy_page_plugin_count, 6)\n after_copy_page_count = Page.objects.all().count()\n after_copy_placeholder_count = Placeholder.objects.count()\n self.assertTrue((after_copy_page_count > all_page_count), msg = u\"no new page after copy\")\n self.assertTrue((after_copy_page_plugin_count > post_add_plugin_count), msg = u\"plugin count is not grown\")\n self.assertTrue((after_copy_placeholder_count > pre_copy_placeholder_count), msg = u\"placeholder count is not grown\") \n self.assertTrue((after_copy_page_count == 3), msg = u\"no new page after copy\")\n # validate the structure\n # orginal placeholder\n page_one = self.reload(page_one)\n page_one_ph_one = page_one.placeholders.get(slot=u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot=u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot=u\"col_right\")\n # check if there are multiple pages assigned to this placeholders\n found_page = page_one_ph_one.page if page_one_ph_one else None\n self.assertEqual(found_page, page_one)\n found_page = page_one_ph_two.page if page_one_ph_two else None\n self.assertEqual(found_page, page_one)\n found_page = page_one_ph_three.page if page_one_ph_three else None\n self.assertEqual(found_page, page_one)\n page_two = self.reload(page_two)\n page_two_ph_one = page_two.placeholders.get(slot = u\"col_sidebar\")\n page_two_ph_two = page_two.placeholders.get(slot = u\"col_left\")\n page_two_ph_three = page_two.placeholders.get(slot = u\"col_right\")\n # check if there are multiple pages assigned to this placeholders\n found_page = page_two_ph_one.page if page_two_ph_one else None\n self.assertEqual(found_page, page_two)\n found_page = page_two_ph_two.page if page_two_ph_two else None\n self.assertEqual(found_page, page_two)\n found_page = page_two_ph_three.page if page_two_ph_three else None\n self.assertEqual(found_page, page_two)\n # check the stored placeholders org vs copy\n msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_one.pk, page_one_ph_one.pk, msg)\n msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_two.pk, page_one_ph_two.pk, msg)\n msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_three.pk, page_one_ph_three.pk, msg)\n # get the plugins from the original page\n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEquals(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n self.assertEquals(len(org_placeholder_two_plugins), 0)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEquals(len(org_placeholder_three_plugins), 2)\n # get the plugins from the copied page\n copied_placeholder_one_plugins = page_two_ph_one.get_plugins()\n self.assertEquals(len(copied_placeholder_one_plugins), 1)\n copied_placeholder_two_plugins = page_two_ph_two.get_plugins()\n self.assertEquals(len(copied_placeholder_two_plugins), 0)\n copied_placeholder_three_plugins = page_two_ph_three.get_plugins()\n self.assertEquals(len(copied_placeholder_three_plugins), 2)\n # verify the plugins got copied\n # placeholder 1\n count_plugins_copied = len(copied_placeholder_one_plugins)\n count_plugins_org = len(org_placeholder_one_plugins)\n msg = u\"plugin count %s %s for placeholder one not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg) \n # placeholder 2\n count_plugins_copied = len(copied_placeholder_two_plugins)\n count_plugins_org = len(org_placeholder_two_plugins)\n msg = u\"plugin count %s %s for placeholder two not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg) \n # placeholder 3\n count_plugins_copied = len(copied_placeholder_three_plugins)\n count_plugins_org = len(org_placeholder_three_plugins)\n msg = u\"plugin count %s %s for placeholder three not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg)\n # verify the body of text plugin with nested link plugin\n # org to copied \n org_nested_text_plugin = None\n # do this iteration to find the real text plugin with the attached link\n # the inheritance mechanism for the cmsplugins works through \n # (tuple)get_plugin_instance()\n for x in org_placeholder_three_plugins: \n if x.plugin_type == u\"TextPlugin\":\n instance = x.get_plugin_instance()[0]\n if instance.body.startswith(pre_nesting_body):\n org_nested_text_plugin = instance\n break\n copied_nested_text_plugin = None\n for x in copied_placeholder_three_plugins: \n if x.plugin_type == u\"TextPlugin\":\n instance = x.get_plugin_instance()[0]\n if instance.body.startswith(pre_nesting_body):\n copied_nested_text_plugin = instance\n break\n msg = u\"orginal nested text plugin not found\"\n self.assertNotEquals(org_nested_text_plugin, None, msg=msg)\n msg = u\"copied nested text plugin not found\"\n self.assertNotEquals(copied_nested_text_plugin, None, msg=msg)\n # get the children ids of the texplugin with a nested link\n # to check if the body of the text is generated correctly\n org_link_child_plugin = org_nested_text_plugin.get_children()[0]\n copied_link_child_plugin = copied_nested_text_plugin.get_children()[0]\n # validate the textplugin body texts\n msg = u\"org plugin and copied plugin are the same\"\n self.assertTrue(org_link_child_plugin.id != copied_link_child_plugin.id, msg)\n needle = u\"plugin_obj_%s\"\n msg = u\"child plugin id differs to parent in body plugin_obj_id\"\n # linked child is in body\n self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg)\n msg = u\"copy: child plugin id differs to parent in body plugin_obj_id\"\n self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg)\n # really nothing else\n msg = u\"child link plugin id differs to parent body plugin_obj_id\"\n self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg)\n msg = u\"copy: child link plugin id differs to parent body plugin_obj_id\"\n self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg)\n # now reverse lookup the placeholders from the plugins\n org_placeholder = org_link_child_plugin.placeholder\n copied_placeholder = copied_link_child_plugin.placeholder\n msg = u\"placeholder of the orginal plugin and copied plugin are the same\"\n ok = ((org_placeholder.id != copied_placeholder.id))\n self.assertTrue(ok, msg)",
"def test_override_plugin(self):\n plugin_name = 'Stdout'\n source = os.path.join(self.current_dir, 'classes', plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n shutil.copytree(source, target)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['info'][1] == os.path.join(target, '__init__.py')",
"def test_clone_deployment(self):\n pass",
"def test_copy_textplugin(self):\n page = create_page(\"page\", \"nav_playground.html\", \"en\")\n \n placeholder = page.placeholders.get(slot='body')\n\n plugin_base = CMSPlugin(\n plugin_type='TextPlugin',\n placeholder=placeholder,\n position=1,\n language=self.FIRST_LANG)\n plugin_base.insert_at(None, position='last-child', save=False)\n\n plugin = Text(body='')\n plugin_base.set_base_attr(plugin)\n plugin.save()\n\n plugin_ref_1_base = CMSPlugin(\n plugin_type='TextPlugin',\n placeholder=placeholder,\n position=1,\n language=self.FIRST_LANG)\n plugin_ref_1_base.insert_at(plugin_base, position='last-child', save=False)\n\n plugin_ref_1 = Text(body='')\n plugin_ref_1_base.set_base_attr(plugin_ref_1)\n plugin_ref_1.save()\n\n plugin_ref_2_base = CMSPlugin(\n plugin_type='TextPlugin',\n placeholder=placeholder,\n position=2,\n language=self.FIRST_LANG)\n plugin_ref_2_base.insert_at(plugin_base, position='last-child', save=False)\n\n plugin_ref_2 = Text(body='')\n plugin_ref_2_base.set_base_attr(plugin_ref_2)\n\n plugin_ref_2.save()\n\n plugin.body = plugin_tags_to_admin_html(' {{ plugin_object %s }} {{ plugin_object %s }} ' % (str(plugin_ref_1.pk), str(plugin_ref_2.pk)))\n plugin.save()\n self.assertEquals(plugin.pk, 1)\n page_data = self.get_new_page_data()\n\n #create 2nd language page\n page_data.update({\n 'language': self.SECOND_LANG,\n 'title': \"%s %s\" % (page.get_title(), self.SECOND_LANG),\n })\n response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + \"?language=%s\" % self.SECOND_LANG, page_data)\n self.assertRedirects(response, URL_CMS_PAGE)\n\n self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)\n self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)\n self.assertEquals(CMSPlugin.objects.count(), 3)\n self.assertEquals(Page.objects.all().count(), 1)\n\n copy_data = {\n 'placeholder': placeholder.pk,\n 'language': self.SECOND_LANG,\n 'copy_from': self.FIRST_LANG,\n }\n response = self.client.post(URL_CMS_PAGE + \"copy-plugins/\", copy_data)\n self.assertEquals(response.status_code, 200)\n self.assertEqual(response.content.count('<li '), 3)\n # assert copy success\n self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)\n self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 3)\n self.assertEquals(CMSPlugin.objects.count(), 6)\n\n new_plugin = Text.objects.get(pk=6)\n self.assertEquals(plugin_tags_to_id_list(new_plugin.body), [u'4', u'5'])",
"def test_copy(self):\n\n tempdir = tempfile.mkdtemp()\n include_example = os.path.join(here, 'include-example.ini')\n manifest = ManifestParser(manifests=(include_example,))\n manifest.copy(tempdir)\n self.assertEqual(sorted(os.listdir(tempdir)),\n ['fleem', 'include', 'include-example.ini'])\n self.assertEqual(sorted(os.listdir(os.path.join(tempdir, 'include'))),\n ['bar.ini', 'crash-handling', 'flowers', 'foo.ini'])\n from_manifest = ManifestParser(manifests=(include_example,))\n to_manifest = os.path.join(tempdir, 'include-example.ini')\n to_manifest = ManifestParser(manifests=(to_manifest,))\n self.assertEqual(to_manifest.get('name'), from_manifest.get('name'))\n shutil.rmtree(tempdir)",
"def test_plugins():\n assert plugins.template.plugin_test() == True\n assert plugin_test() == True",
"def test_get_plugins_with_search_args(self):\n response = self.client.get_plugins({'name_exact': \"pl-dircopy\"})\n self.assertEqual(response['data'][0]['name'], \"pl-dircopy\")",
"def test_clone_scenario(self):\n pass",
"def load_plugin():\n return HostTestPluginCopyMethod_Shell()",
"def test_remote_plugin(self):\n plugin_name = 'Slack'\n Plugin.download_plugin(plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['info'][1] == os.path.join(target, '__init__.py')",
"def test_install_plugin_again_is_ok(self):\n raise NotImplementedError()",
"def test_install_terraform_plugin_1(monkeypatch):\n monkeypatch.setattr(os, 'listdir', lambda path: [\n 'terraform-provider-terraform_v0.11.2_x4'])\n monkeypatch.setattr(os, 'remove', lambda path: True)\n monkeypatch.setattr(os, 'chmod', lambda path, permissions: True)\n\n monkeypatch.setattr(shutil, 'copy2', lambda src, dest: True)\n\n def mp_check_output(cmd):\n if cmd == ['which', 'terraform']:\n return b'/usr/local/bin/terraform\\n'\n\n if cmd == ['terraform', '-v']:\n return b'Terraform v0.11.3\\n\\n'\n\n raise Exception('Unmocked command: %s' % cmd)\n\n monkeypatch.setattr(subprocess, 'check_output', mp_check_output)\n\n install_terraform_plugin('/tmp/stone-burner_plugins')",
"def test_register_dynamic_plugin(self):\n pass",
"def test_register_dynamic_plugin1(self):\n pass",
"def test_11_clone(self):\n # Test basic operation of cloning repo which contains one\n # publisher to repo which contains same publisher\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that packages in dst which are not in src get removed.\n self.pkgsend_bulk(self.durl2, (self.amber30))\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone reports publishers not in the dest repo.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2), exit=1)\n\n # Test that clone adds new publishers if requested.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1,\n self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone removes all packages if source is empty\n self.pkgrecv(self.durl3, \"--clone -d {0}\".format(self.dpath2))\n self.pkgrepo(\"-s {0} list -H -p test2\".format(self.dpath2))\n self.assertEqualDiff(\"\", self.output)\n\n # Test that clone works fine with mulitple publishers\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n\n path = os.path.join(self.dpath2, \"publisher/test1\")\n shutil.rmtree(path)\n path = os.path.join(self.dpath2, \"publisher/test2\")\n shutil.rmtree(path)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2 -p test1\".format(\n self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\",\n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone fails if --raw is specified.\n self.pkgrecv(self.durl1, \"--raw --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -c is specified.\n self.pkgrecv(self.durl1, \"-c /tmp/ --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -a is specified.\n self.pkgrecv(self.durl1, \"-a --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if --newest is specified.\n self.pkgrecv(self.durl1, \"--newest --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)",
"def test_discover_and_install_1(monkeypatch):\n monkeypatch.setattr(subprocess, 'check_call', lambda cmd: True)\n monkeypatch.setattr(os, 'getcwd', lambda: '/tmp')\n monkeypatch.setattr(os, 'chdir', lambda path: True)\n monkeypatch.setattr(os, 'walk', lambda path: [\n ('root', ['d1', 'd2'], ['f1', 'f2']), ])\n monkeypatch.setattr(tempfile, 'mkdtemp', lambda: '/tmp/tempdir')\n monkeypatch.setattr(shutil, 'move', lambda src, dest: True)\n monkeypatch.setattr(shutil, 'rmtree', lambda path: True)\n\n discover_and_install(\n '/tmp/stone-burner_plugins',\n 'p1',\n ['c1', 'c2', 'c3'],\n SAMPLE_CONFIG,\n ['c1', 'c2'],\n )",
"def test_plugin_initialize(self):\n p = PluginCustom()\n self.assertEqual('youpie', p.toto)",
"def test_bogus_plugin_autoremove_no_path(conf):\n plugin_name = \"giraffehorse\"\n conf.config[\"INSTALLED_APPS\"].append(plugin_name)\n conf.save()\n conf.autoremove_unavailable_plugins()\n assert plugin_name not in conf.config[\"INSTALLED_APPS\"]",
"def test_register_dynamic_plugin_manager(self):\n pass",
"def test_args_copy():\n args = cli.parse_args(['-c'])\n assert args.copy\n args = cli.parse_args(['--copy'])\n assert args.copy",
"def test_register_dynamic_plugin_manager1(self):\n pass",
"def test_specific_plugin_installed(self):\n self._add_plugin(self.jigconfig, 'plugin01')\n set_jigconfig(self.gitrepodir, config=self.jigconfig)\n\n # Create staged\n self.commit(self.gitrepodir, 'a.txt', 'a')\n self.stage(self.gitrepodir, 'b.txt', 'b')\n\n with nested(\n patch('jig.runner.sys'),\n self.assertRaises(SystemExit)\n ) as (r_sys, ec):\n # Raise the error to halt execution like the real sys.exit would\n r_sys.exit.side_effect = SystemExit\n\n self.run_command('--plugin plugin01 {0}'.format(self.gitrepodir))\n\n self.assertResults(u\"\"\"\n ▾ plugin01\n\n ⚠ line 1: b.txt\n b is +\n\n {0} Jig ran 1 plugin\n Info 0 Warn 1 Stop 0\n \"\"\".format(ATTENTION), self.output)",
"def test_plugin_with_no_plugin_class(conf):\n # For fun, we pass in a system library\n installed_apps_before = conf.config[\"INSTALLED_APPS\"][:]\n cli.plugin(\"os.path\")\n assert installed_apps_before == conf.config[\"INSTALLED_APPS\"]",
"def test_plugin_urls(self):\n plugin = ProjectAppPluginPoint.get_plugin(PLUGIN_NAME)\n self.assertEqual(plugin.urls, urlpatterns)",
"def test_plugin_command(node_factory):\n n = node_factory.get_node()\n\n # Make sure that the 'hello' command from the helloworld.py plugin\n # is not available.\n cmd = [hlp for hlp in n.rpc.help()[\"help\"] if \"hello\" in hlp[\"command\"]]\n assert(len(cmd) == 0)\n\n # Add the 'contrib/plugins' test dir\n n.rpc.plugin_startdir(directory=os.path.join(os.getcwd(), \"contrib/plugins\"))\n # Make sure that the 'hello' command from the helloworld.py plugin\n # is now available.\n cmd = [hlp for hlp in n.rpc.help()[\"help\"] if \"hello\" in hlp[\"command\"]]\n assert(len(cmd) == 1)\n\n # Make sure 'rescan' and 'list' subcommands dont crash\n n.rpc.plugin_rescan()\n n.rpc.plugin_list()\n\n # Make sure the plugin behaves normally after stop and restart\n assert(\"Successfully stopped helloworld.py.\" == n.rpc.plugin_stop(plugin=\"helloworld.py\")[''])\n n.daemon.wait_for_log(r\"Killing plugin: helloworld.py\")\n n.rpc.plugin_start(plugin=os.path.join(os.getcwd(), \"contrib/plugins/helloworld.py\"))\n n.daemon.wait_for_log(r\"Plugin helloworld.py initialized\")\n assert(\"Hello world\" == n.rpc.call(method=\"hello\"))\n\n # Now stop the helloworld plugin\n assert(\"Successfully stopped helloworld.py.\" == n.rpc.plugin_stop(plugin=\"helloworld.py\")[''])\n n.daemon.wait_for_log(r\"Killing plugin: helloworld.py\")\n # Make sure that the 'hello' command from the helloworld.py plugin\n # is not available anymore.\n cmd = [hlp for hlp in n.rpc.help()[\"help\"] if \"hello\" in hlp[\"command\"]]\n assert(len(cmd) == 0)\n\n # Test that we cannot start a plugin with 'dynamic' set to False in\n # getmanifest\n with pytest.raises(RpcError, match=r\"Not a dynamic plugin\"):\n n.rpc.plugin_start(plugin=os.path.join(os.getcwd(), \"tests/plugins/static.py\"))\n\n # Test that we cannot stop a started plugin with 'dynamic' flag set to\n # False\n n2 = node_factory.get_node(options={\n \"plugin\": os.path.join(os.getcwd(), \"tests/plugins/static.py\")\n })\n with pytest.raises(RpcError, match=r\"static.py cannot be managed when lightningd is up\"):\n n2.rpc.plugin_stop(plugin=\"static.py\")\n\n # Test that we don't crash when starting a broken plugin\n with pytest.raises(RpcError, match=r\"Timed out while waiting for plugin response\"):\n n2.rpc.plugin_start(plugin=os.path.join(os.getcwd(), \"tests/plugins/broken.py\"))",
"def test_existing_multiple(self, tmp_path, loader, registry, plugin_1, bg_instance):\n\n instance1 = Instance(name=\"instance1\", id=\"58542eb571afd47ead90beef\")\n instance2 = Instance(name=\"instance2\", id=\"58542eb571afd47ead90beee\")\n create_system(\n System(name=\"foo\", version=\"1.0\", instances=[instance1, instance2])\n )\n\n plugin = tmp_path / \"plugin\"\n plugin.mkdir()\n\n write_file(\n plugin,\n textwrap.dedent(\n \"\"\"\n NAME='foo'\n VERSION='1.0'\n PLUGIN_ENTRY='entry.py'\n INSTANCES=[\"instance2\", \"instance3\"]\n \"\"\"\n ),\n )\n\n plugin_runners = loader.load_plugin(plugin)\n assert len(plugin_runners) == 2\n\n assert db.query_unique(Instance, name=\"instance1\") is None\n assert db.query_unique(Instance, name=\"instance3\") is not None\n\n instance2_db = db.query_unique(Instance, name=\"instance2\")\n assert instance2_db is not None\n assert instance2_db.id == instance2.id",
"def test_load_config(self):\n config = copyclipper.LoadConfig()\n self.assertTrue(len(config) > 0)"
]
| [
"0.72379047",
"0.68762565",
"0.68040013",
"0.679464",
"0.6739639",
"0.6720053",
"0.671467",
"0.6638799",
"0.65787935",
"0.64952445",
"0.6469627",
"0.6386378",
"0.6312222",
"0.6177669",
"0.6136663",
"0.6053952",
"0.60498655",
"0.601105",
"0.59868664",
"0.588027",
"0.58792764",
"0.58769757",
"0.5838749",
"0.58180755",
"0.58109015",
"0.5805623",
"0.5797585",
"0.5784643",
"0.57615423",
"0.573802"
]
| 0.75676566 | 0 |
When removing a draft plugin we would expect the public copy of the plugin to also be removed | def test_remove_plugin_before_published(self):
# add a page
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type':"TextPlugin",
'language':settings.LANGUAGES[0][0],
'placeholder':page.placeholders.get(slot="body").pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEquals(response.status_code, 200)
self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEquals(CMSPlugin.objects.all().count(), 1)
# delete the plugin
plugin_data = {
'plugin_id': int(response.content)
}
remove_url = URL_CMS_PLUGIN_REMOVE
response = self.client.post(remove_url, plugin_data)
self.assertEquals(response.status_code, 200)
# there should be no plugins
self.assertEquals(0, CMSPlugin.objects.all().count()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_plugin_data(self):",
"def unpublish(self, location):\r\n self.convert_to_draft(location)\r\n super(DraftModuleStore, self).delete_item(location)",
"def _delete_plugin_data(self):\n try:\n self.delete_plugin_data()\n except Exception as err:\n logging.debug(str(err))",
"def unload_plugin(self):\n pass",
"def test_remove_plugin_not_associated_to_page(self):\n page_data = self.get_new_page_data()\n response = self.client.post(URL_CMS_PAGE_ADD, page_data)\n page = Page.objects.all()[0]\n\n # add a plugin\n plugin_data = {\n 'plugin_type':\"TextPlugin\",\n 'language':settings.LANGUAGES[0][0],\n 'placeholder':page.placeholders.get(slot=\"body\").pk,\n }\n response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)\n\n # there should be only 1 plugin\n self.assertEquals(CMSPlugin.objects.all().count(), 1)\n\n ph = Placeholder(slot=\"subplugin\")\n ph.save()\n plugin_data = {\n 'plugin_type':\"TextPlugin\",\n 'language':settings.LANGUAGES[0][0],\n 'placeholder': ph.pk,\n 'parent': int(response.content)\n }\n response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)\n # no longer allowed for security reasons\n self.assertEqual(response.status_code, 404)",
"def _remove(self):\n pass",
"def remove():",
"def remove(self):",
"def project_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def _remove_pub(pub):\n # counting publisher instance per topic name\n TopicBack.pub_instance_count[pub.name] -= 1\n\n # Be aware of https://github.com/ros/ros_comm/issues/111\n return pub.unregister()",
"def remove():\n pass",
"def delete_version(self):\n pass",
"def userproject_post_delete(sender, instance, **kwargs):\n instance.document.delete(False)",
"def delete_underhanded(self,key):\n for plugin in self.server.plugins:\n if isinstance(plugin,MemcachedPlugin) and not plugin is self:\n plugin._delete_data(key)\n return None",
"def _is_plugin_deletable(cls, plugin):\n return not ClusterPlugin.is_plugin_used(plugin.id)",
"def dvs_uninstall(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_install\")\n\n self.show_step(2)\n cmd = 'fuel plugins --remove {0}=={1}'.format(\n plugin.plugin_name, plugin.DVS_PLUGIN_VERSION)\n\n assert_true(\n self.env.d_env.get_admin_remote().execute(cmd)['exit_code'] == 0,\n 'Can not remove plugin.')\n\n self.show_step(3)\n cmd = 'fuel plugins list'\n output = list(self.env.d_env.get_admin_remote().execute(\n cmd)['stdout']).pop().split(' ')\n\n assert_true(\n plugin.plugin_name not in output,\n \"Plugin is not removed {}\".format(plugin.plugin_name)\n )",
"def test_publish_draft_delete(self):\r\n location = self.old_course_key.make_usage_key('vertical', name='Vert1')\r\n item = self.draft_mongo.get_item(location, 2)\r\n self._xmodule_recurse(\r\n item,\r\n lambda i: self.draft_mongo.publish(i.location, self.userid)\r\n )\r\n # verify status\r\n item = self.draft_mongo.get_item(location, 0)\r\n self.assertFalse(getattr(item, 'is_draft', False), \"Item was published. Draft should not exist\")\r\n # however, children are still draft, but I'm not sure that's by design\r\n\r\n # convert back to draft\r\n self.draft_mongo.convert_to_draft(location)\r\n # both draft and published should exist\r\n draft_vert = self.draft_mongo.get_item(location, 0)\r\n self.assertTrue(getattr(draft_vert, 'is_draft', False), \"Item was converted to draft but doesn't say so\")\r\n item = self.old_mongo.get_item(location, 0)\r\n self.assertFalse(getattr(item, 'is_draft', False), \"Published item doesn't say so\")\r\n\r\n # delete the discussion (which oddly is not in draft mode)\r\n location = self.old_course_key.make_usage_key('discussion', name='Discussion1')\r\n self.draft_mongo.delete_item(location)\r\n # remove pointer from draft vertical (verify presence first to ensure process is valid)\r\n self.assertIn(location, draft_vert.children)\r\n draft_vert.children.remove(location)\r\n # move the other child\r\n other_child_loc = self.old_course_key.make_usage_key('html', name='Html2')\r\n draft_vert.children.remove(other_child_loc)\r\n other_vert = self.draft_mongo.get_item(self.old_course_key.make_usage_key('vertical', name='Vert2'), 0)\r\n other_vert.children.append(other_child_loc)\r\n self.draft_mongo.update_item(draft_vert, self.userid)\r\n self.draft_mongo.update_item(other_vert, self.userid)\r\n # publish\r\n self._xmodule_recurse(\r\n draft_vert,\r\n lambda i: self.draft_mongo.publish(i.location, self.userid)\r\n )\r\n item = self.old_mongo.get_item(draft_vert.location, 0)\r\n self.assertNotIn(location, item.children)\r\n with self.assertRaises(ItemNotFoundError):\r\n self.draft_mongo.get_item(location)\r\n self.assertNotIn(other_child_loc, item.children)\r\n self.assertTrue(self.draft_mongo.has_item(other_child_loc), \"Oops, lost moved item\")",
"def remove(self, egg):",
"def PluginUninstall(self, packageName):\n pass",
"def _removePlugins(portal):\n uf = getToolByName(portal, \"acl_users\")\n existing = uf.objectIds()\n\n if \"membrane\" in existing:\n uf.manage_delObjects(\n [\n \"membrane\",\n ]\n )\n\n if \"membrane_users\" in existing:\n uf.manage_delObjects(\n [\n \"membrane_users\",\n ]\n )\n\n if \"membrane_groups\" in existing:\n uf.manage_delObjects(\n [\n \"membrane_groups\",\n ]\n )\n\n if \"membrane_roles\" in existing:\n uf.manage_delObjects(\n [\n \"membrane_roles\",\n ]\n )\n\n if \"membrane_properties\" in existing:\n uf.manage_delObjects(\n [\n \"membrane_properties\",\n ]\n )\n\n if \"membrane_user_factory\" in existing:\n uf.manage_delObjects(\n [\n \"membrane_user_factory\",\n ]\n )",
"def __del__(self):\r\n del self.addons",
"def _onremove(self):\n self._channellist.remove(self)\n self.deleteLater()",
"def prepareToRemove( self ):\n self.emitRemoved()\n return True",
"def remove_export(self, context, volume):\n pass",
"def _delete_draft_message(draft):\n if draft is not None:\n draft.key.delete()\n return HttpTextResponse('OK')",
"def delete(self):\n self.package = None",
"def otherfiles_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def test_remove_submission_service_from_project(self):\n pass",
"def remove():\n run('pew rm {0}'.format(package_name()))",
"def remove_plugin(self, plugin: str):\r\n with PluginStore.mutex:\r\n if self._plugins.get(plugin) is not None:\r\n self._plugins.pop(plugin, None).shutdown()"
]
| [
"0.71801907",
"0.6379829",
"0.63617396",
"0.6255437",
"0.61235017",
"0.6070682",
"0.60078716",
"0.5940099",
"0.588655",
"0.5859026",
"0.58552694",
"0.58488226",
"0.5808737",
"0.57944655",
"0.57893765",
"0.57401067",
"0.5739087",
"0.56985503",
"0.56859654",
"0.56834507",
"0.5673921",
"0.56661177",
"0.5665369",
"0.56629395",
"0.5660336",
"0.56074154",
"0.56055224",
"0.55962354",
"0.5593601",
"0.5581728"
]
| 0.6627623 | 1 |
Test that copying of textplugins replaces references to copied plugins | def test_copy_textplugin(self):
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin_base.insert_at(None, position='last-child', save=False)
plugin = Text(body='')
plugin_base.set_base_attr(plugin)
plugin.save()
plugin_ref_1_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin_ref_1_base.insert_at(plugin_base, position='last-child', save=False)
plugin_ref_1 = Text(body='')
plugin_ref_1_base.set_base_attr(plugin_ref_1)
plugin_ref_1.save()
plugin_ref_2_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=2,
language=self.FIRST_LANG)
plugin_ref_2_base.insert_at(plugin_base, position='last-child', save=False)
plugin_ref_2 = Text(body='')
plugin_ref_2_base.set_base_attr(plugin_ref_2)
plugin_ref_2.save()
plugin.body = plugin_tags_to_admin_html(' {{ plugin_object %s }} {{ plugin_object %s }} ' % (str(plugin_ref_1.pk), str(plugin_ref_2.pk)))
plugin.save()
self.assertEquals(plugin.pk, 1)
page_data = self.get_new_page_data()
#create 2nd language page
page_data.update({
'language': self.SECOND_LANG,
'title': "%s %s" % (page.get_title(), self.SECOND_LANG),
})
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + "?language=%s" % self.SECOND_LANG, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)
self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)
self.assertEquals(CMSPlugin.objects.count(), 3)
self.assertEquals(Page.objects.all().count(), 1)
copy_data = {
'placeholder': placeholder.pk,
'language': self.SECOND_LANG,
'copy_from': self.FIRST_LANG,
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEquals(response.status_code, 200)
self.assertEqual(response.content.count('<li '), 3)
# assert copy success
self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)
self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 3)
self.assertEquals(CMSPlugin.objects.count(), 6)
new_plugin = Text.objects.get(pk=6)
self.assertEquals(plugin_tags_to_id_list(new_plugin.body), [u'4', u'5']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_copy_plugins(self):\n # create some objects\n page_en = create_page(\"CopyPluginTestPage (EN)\", \"nav_playground.html\", \"en\")\n page_de = create_page(\"CopyPluginTestPage (DE)\", \"nav_playground.html\", \"de\")\n ph_en = page_en.placeholders.get(slot=\"body\")\n ph_de = page_de.placeholders.get(slot=\"body\")\n \n # add the text plugin\n text_plugin_en = add_plugin(ph_en, \"TextPlugin\", \"en\", body=\"Hello World\")\n self.assertEquals(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)\n \n # add a *nested* link plugin\n link_plugin_en = add_plugin(ph_en, \"LinkPlugin\", \"en\", target=text_plugin_en,\n name=\"A Link\", url=\"https://www.django-cms.org\")\n \n # the call above to add a child makes a plugin reload required here.\n text_plugin_en = self.reload(text_plugin_en)\n \n # check the relations\n self.assertEquals(text_plugin_en.get_children().count(), 1)\n self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)\n \n # just sanity check that so far everything went well\n self.assertEqual(CMSPlugin.objects.count(), 2)\n \n # copy the plugins to the german placeholder\n copy_plugins_to(ph_en.cmsplugin_set.all(), ph_de, 'de')\n \n self.assertEqual(ph_de.cmsplugin_set.filter(parent=None).count(), 1)\n text_plugin_de = ph_de.cmsplugin_set.get(parent=None).get_plugin_instance()[0]\n self.assertEqual(text_plugin_de.get_children().count(), 1)\n link_plugin_de = text_plugin_de.get_children().get().get_plugin_instance()[0]\n \n \n # check we have twice as many plugins as before\n self.assertEqual(CMSPlugin.objects.count(), 4)\n \n # check language plugins\n self.assertEqual(CMSPlugin.objects.filter(language='de').count(), 2)\n self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 2)\n \n \n text_plugin_en = self.reload(text_plugin_en)\n link_plugin_en = self.reload(link_plugin_en)\n \n # check the relations in english didn't change\n self.assertEquals(text_plugin_en.get_children().count(), 1)\n self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)\n \n self.assertEqual(link_plugin_de.name, link_plugin_en.name)\n self.assertEqual(link_plugin_de.url, link_plugin_en.url)\n \n self.assertEqual(text_plugin_de.body, text_plugin_en.body)",
"def test_copy_page_nested_plugin(self):\n with SettingsOverride(CMS_MODERATOR=False, CMS_PERMISSION=False):\n templates = []\n # setup page 1\n page_one = create_page(u\"Three Placeholder\", u\"col_three.html\", u\"en\",\n position=u\"last-child\", published=True, in_navigation=True)\n page_one_ph_one = page_one.placeholders.get(slot=u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot=u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot=u\"col_right\")\n # add the text plugin to placeholder one\n text_plugin_en = add_plugin(page_one_ph_one, u\"TextPlugin\", u\"en\", body=\"Hello World\")\n self.assertEquals(text_plugin_en.id, CMSPlugin.objects.all()[0].id)\n self.assertEquals(text_plugin_en.get_children().count(), 0)\n pre_add_plugin_count = CMSPlugin.objects.count()\n self.assertEqual(pre_add_plugin_count, 1)\n ###\n # add a plugin to placeholder two\n ###\n pre_nesting_body = u\"<p>the nested text plugin with a link inside</p>\"\n text_plugin_two = add_plugin(page_one_ph_two, u\"TextPlugin\", u\"en\", body=pre_nesting_body)\n text_plugin_two = self.reload(text_plugin_two)\n # prepare nesting plugin\n page_one_ph_two = self.reload(page_one_ph_two)\n text_plugin_two = self.reload(text_plugin_two)\n link_plugin = add_plugin(page_one_ph_two, u\"LinkPlugin\", u\"en\", target=text_plugin_two)\n link_plugin.name = u\"django-cms Link\"\n link_plugin.url = u\"https://www.django-cms.org\" \n link_plugin.parent = text_plugin_two\n link_plugin.save()\n \n link_plugin = self.reload(link_plugin)\n text_plugin_two = self.reload(text_plugin_two)\n in_txt = \"\"\"<img id=\"plugin_obj_%s\" title=\"Link\" alt=\"Link\" src=\"/static/cms/images/plugins/link.png\">\"\"\"\n nesting_body = \"%s<p>%s</p>\" % (text_plugin_two.body, (in_txt % (link_plugin.id)))\n # emulate the editor in admin that adds some txt for the nested plugin\n text_plugin_two.body = nesting_body\n text_plugin_two.save()\n text_plugin_two = self.reload(text_plugin_two)\n # the link is attached as a child?\n self.assertEquals(text_plugin_two.get_children().count(), 1)\n post_add_plugin_count = CMSPlugin.objects.count()\n self.assertEqual(post_add_plugin_count, 3)\n page_one.save()\n # get the plugins from the original page\n page_one = self.reload(page_one)\n page_one_ph_one = page_one.placeholders.get(slot = u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot = u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot = u\"col_right\")\n # verifiy the plugins got created\n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEquals(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n self.assertEquals(len(org_placeholder_two_plugins), 2)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEquals(len(org_placeholder_three_plugins), 0)\n self.assertEquals(page_one.placeholders.count(), 3)\n placeholder_count = Placeholder.objects.count()\n self.assertEquals(placeholder_count, 3)\n self.assertEquals(CMSPlugin.objects.count(), 3)\n page_one_plugins = CMSPlugin.objects.all()\n ##\n # setup page_copy_target page\n ##\n page_copy_target = create_page(\"Three Placeholder - page copy target\", \"col_three.html\", \"en\",\n position=\"last-child\", published=True, in_navigation=True)\n all_page_count = Page.objects.all().count()\n pre_copy_placeholder_count = Placeholder.objects.count()\n self.assertEquals(pre_copy_placeholder_count, 6)\n # copy the page\n superuser = self.get_superuser()\n with self.login_user_context(superuser):\n page_two = self.copy_page(page_one, page_copy_target)\n # validate the expected pages,placeholders,plugins,pluginbodies\n after_copy_page_plugin_count = CMSPlugin.objects.count()\n self.assertEquals(after_copy_page_plugin_count, 6)\n # check the amount of copied stuff\n after_copy_page_count = Page.objects.all().count()\n after_copy_placeholder_count = Placeholder.objects.count()\n self.assertTrue((after_copy_page_count > all_page_count), msg = u\"no new page after copy\")\n self.assertTrue((after_copy_page_plugin_count > post_add_plugin_count), msg = u\"plugin count is not grown\")\n self.assertTrue((after_copy_placeholder_count > pre_copy_placeholder_count), msg = u\"placeholder count is not grown\") \n self.assertTrue((after_copy_page_count == 3), msg = u\"no new page after copy\")\n # orginal placeholder\n page_one = self.reload(page_one)\n page_one_ph_one = page_one.placeholders.get(slot = u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot = u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot = u\"col_right\")\n # check if there are multiple pages assigned to this placeholders\n found_page = page_one_ph_one.page if page_one_ph_one else None\n self.assertEqual(found_page, page_one)\n found_page = page_one_ph_two.page if page_one_ph_two else None\n self.assertEqual(found_page, page_one)\n found_page = page_one_ph_three.page if page_one_ph_three else None\n self.assertEqual(found_page, page_one)\n \n page_two = self.reload(page_two)\n page_two_ph_one = page_two.placeholders.get(slot = u\"col_sidebar\")\n page_two_ph_two = page_two.placeholders.get(slot = u\"col_left\")\n page_two_ph_three = page_two.placeholders.get(slot = u\"col_right\")\n # check if there are multiple pages assigned to this placeholders\n found_page = page_two_ph_one.page if page_two_ph_one else None\n self.assertEqual(found_page, page_two)\n found_page = page_two_ph_two.page if page_two_ph_two else None\n self.assertEqual(found_page, page_two)\n found_page = page_two_ph_three.page if page_two_ph_three else None\n self.assertEqual(found_page, page_two)\n # check the stored placeholders org vs copy\n msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_one.pk, page_one_ph_one.pk, msg)\n msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_two.pk, page_one_ph_two.pk, msg)\n msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_three.pk, page_one_ph_three.pk, msg)\n # get the plugins from the original page\n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEquals(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n self.assertEquals(len(org_placeholder_two_plugins), 2)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEquals(len(org_placeholder_three_plugins), 0)\n # get the plugins from the copied page\n copied_placeholder_one_plugins = page_two_ph_one.get_plugins()\n self.assertEquals(len(copied_placeholder_one_plugins), 1)\n copied_placeholder_two_plugins = page_two_ph_two.get_plugins()\n self.assertEquals(len(copied_placeholder_two_plugins), 2)\n copied_placeholder_three_plugins = page_two_ph_three.get_plugins()\n self.assertEquals(len(copied_placeholder_three_plugins), 0)\n # verify the plugins got copied\n # placeholder 1\n count_plugins_copied = len(copied_placeholder_one_plugins)\n count_plugins_org = len(org_placeholder_one_plugins)\n msg = u\"plugin count %s %s for placeholder one not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg) \n # placeholder 2\n count_plugins_copied = len(copied_placeholder_two_plugins)\n count_plugins_org = len(org_placeholder_two_plugins)\n msg = u\"plugin count %s %s for placeholder two not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg) \n # placeholder 3\n count_plugins_copied = len(copied_placeholder_three_plugins)\n count_plugins_org = len(org_placeholder_three_plugins)\n msg = u\"plugin count %s %s for placeholder three not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg)\n # verify the body of text plugin with nested link plugin\n # org to copied \n org_nested_text_plugin = None\n # do this iteration to find the real text plugin with the attached link\n # the inheritance mechanism for the cmsplugins works through \n # (tuple)get_plugin_instance()\n for x in org_placeholder_two_plugins: \n if x.plugin_type == u\"TextPlugin\":\n instance = x.get_plugin_instance()[0]\n if instance.body.startswith(pre_nesting_body):\n org_nested_text_plugin = instance\n break\n copied_nested_text_plugin = None\n for x in copied_placeholder_two_plugins: \n if x.plugin_type == u\"TextPlugin\":\n instance = x.get_plugin_instance()[0]\n if instance.body.startswith(pre_nesting_body):\n copied_nested_text_plugin = instance\n break\n msg = u\"orginal nested text plugin not found\"\n self.assertNotEquals(org_nested_text_plugin, None, msg=msg)\n msg = u\"copied nested text plugin not found\"\n self.assertNotEquals(copied_nested_text_plugin, None, msg=msg)\n # get the children ids of the texplugin with a nested link\n # to check if the body of the text is genrated correctly\n org_link_child_plugin = org_nested_text_plugin.get_children()[0]\n copied_link_child_plugin = copied_nested_text_plugin.get_children()[0]\n # validate the textplugin body texts\n msg = u\"org plugin and copied plugin are the same\"\n self.assertTrue(org_link_child_plugin.id != copied_link_child_plugin.id, msg)\n needle = u\"plugin_obj_%s\"\n msg = u\"child plugin id differs to parent in body plugin_obj_id\"\n # linked child is in body\n self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg)\n msg = u\"copy: child plugin id differs to parent in body plugin_obj_id\"\n self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg)\n # really nothing else\n msg = u\"child link plugin id differs to parent body plugin_obj_id\"\n self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg)\n msg = u\"copy: child link plugin id differs to parent body plugin_obj_id\"\n self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg)\n # now reverse lookup the placeholders from the plugins\n org_placeholder = org_link_child_plugin.placeholder\n copied_placeholder = copied_link_child_plugin.placeholder\n msg = u\"placeholder of the orginal plugin and copied plugin are the same\"\n ok = ((org_placeholder.id != copied_placeholder.id))\n self.assertTrue(ok, msg)",
"def test_copy_page_nested_plugin_moved_parent_plugin(self):\n with SettingsOverride(CMS_MODERATOR=False, CMS_PERMISSION=False):\n templates = []\n # setup page 1\n page_one = create_page(u\"Three Placeholder\", u\"col_three.html\", u\"en\",\n position=u\"last-child\", published=True, in_navigation=True)\n page_one_ph_one = page_one.placeholders.get(slot=u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot=u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot=u\"col_right\")\n # add the text plugin to placeholder one\n text_plugin_en = add_plugin(page_one_ph_one, u\"TextPlugin\", u\"en\", body=u\"Hello World\")\n self.assertEquals(text_plugin_en.id, CMSPlugin.objects.all()[0].id)\n self.assertEquals(text_plugin_en.get_children().count(), 0)\n pre_add_plugin_count = CMSPlugin.objects.count()\n self.assertEqual(pre_add_plugin_count, 1)\n # add a plugin to placeholder twho\n pre_nesting_body = u\"<p>the nested text plugin with a link inside</p>\"\n text_plugin_two = add_plugin(page_one_ph_two, u\"TextPlugin\", u\"en\", body=pre_nesting_body)\n text_plugin_two = self.reload(text_plugin_two)\n # prepare nestin plugin\n page_one_ph_two = self.reload(page_one_ph_two)\n text_plugin_two = self.reload(text_plugin_two)\n link_plugin = add_plugin(page_one_ph_two, u\"LinkPlugin\", u\"en\", target=text_plugin_two)\n link_plugin.name = u\"django-cms Link\"\n link_plugin.url = u\"https://www.django-cms.org\" \n link_plugin.parent = text_plugin_two\n link_plugin.save()\n # reload after every save\n link_plugin = self.reload(link_plugin)\n text_plugin_two = self.reload(text_plugin_two)\n in_txt = u\"\"\"<img id=\"plugin_obj_%s\" title=\"Link\" alt=\"Link\" src=\"/static/cms/images/plugins/link.png\">\"\"\"\n nesting_body = \"%s<p>%s</p>\" % (text_plugin_two.body, (in_txt % (link_plugin.id)))\n # emulate the editor in admin that adds some txt for the nested plugin\n text_plugin_two.body = nesting_body\n text_plugin_two.save()\n text_plugin_two = self.reload(text_plugin_two)\n # the link is attached as a child?\n self.assertEquals(text_plugin_two.get_children().count(), 1)\n post_add_plugin_count = CMSPlugin.objects.count()\n self.assertEqual(post_add_plugin_count, 3)\n page_one.save()\n # get the plugins from the original page\n page_one = self.reload(page_one)\n page_one_ph_one = page_one.placeholders.get(slot = u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot = u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot = u\"col_right\")\n # verify the plugins got created\n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEquals(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n self.assertEquals(len(org_placeholder_two_plugins), 2)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEquals(len(org_placeholder_three_plugins), 0)\n self.assertEquals(page_one.placeholders.count(), 3)\n \n placeholder_count = Placeholder.objects.count()\n self.assertEquals(placeholder_count, 3)\n self.assertEquals(CMSPlugin.objects.count(), 3)\n page_one_plugins = CMSPlugin.objects.all()\n # setup page_copy_target\n page_copy_target = create_page(\"Three Placeholder - page copy target\", \"col_three.html\", \"en\",\n position=\"last-child\", published=True, in_navigation=True)\n all_page_count = Page.objects.all().count()\n pre_copy_placeholder_count = Placeholder.objects.count()\n self.assertEquals(pre_copy_placeholder_count, 6)\n superuser = self.get_superuser()\n with self.login_user_context(superuser):\n # now move the parent text plugin to another placeholder\n post_data = {\n u'placeholder': u\"col_right\",\n u'placeholder_id': u\"%s\" % (page_one_ph_three.id),\n u'ids': u\"%s\" % (text_plugin_two.id),\n u'plugin_id': u\"%s\" % (text_plugin_two.id),\n }\n edit_url = URL_CMS_MOVE_PLUGIN % (page_one.id)\n response = self.client.post(edit_url, post_data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.content, u'ok')\n # check if the plugin got moved\n page_one = self.reload(page_one)\n text_plugin_two = self.reload(text_plugin_two)\n page_one_ph_one = page_one.placeholders.get(slot = u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot = u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot = u\"col_right\")\n \n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEquals(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n # the plugin got moved and child got moved\n self.assertEquals(len(org_placeholder_two_plugins), 0)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEquals(len(org_placeholder_three_plugins), 2)\n # copy the page\n page_two = self.copy_page(page_one, page_copy_target)\n # validate the expected pages,placeholders,plugins,pluginbodies\n after_copy_page_plugin_count = CMSPlugin.objects.count()\n self.assertEquals(after_copy_page_plugin_count, 6)\n after_copy_page_count = Page.objects.all().count()\n after_copy_placeholder_count = Placeholder.objects.count()\n self.assertTrue((after_copy_page_count > all_page_count), msg = u\"no new page after copy\")\n self.assertTrue((after_copy_page_plugin_count > post_add_plugin_count), msg = u\"plugin count is not grown\")\n self.assertTrue((after_copy_placeholder_count > pre_copy_placeholder_count), msg = u\"placeholder count is not grown\") \n self.assertTrue((after_copy_page_count == 3), msg = u\"no new page after copy\")\n # validate the structure\n # orginal placeholder\n page_one = self.reload(page_one)\n page_one_ph_one = page_one.placeholders.get(slot=u\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot=u\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot=u\"col_right\")\n # check if there are multiple pages assigned to this placeholders\n found_page = page_one_ph_one.page if page_one_ph_one else None\n self.assertEqual(found_page, page_one)\n found_page = page_one_ph_two.page if page_one_ph_two else None\n self.assertEqual(found_page, page_one)\n found_page = page_one_ph_three.page if page_one_ph_three else None\n self.assertEqual(found_page, page_one)\n page_two = self.reload(page_two)\n page_two_ph_one = page_two.placeholders.get(slot = u\"col_sidebar\")\n page_two_ph_two = page_two.placeholders.get(slot = u\"col_left\")\n page_two_ph_three = page_two.placeholders.get(slot = u\"col_right\")\n # check if there are multiple pages assigned to this placeholders\n found_page = page_two_ph_one.page if page_two_ph_one else None\n self.assertEqual(found_page, page_two)\n found_page = page_two_ph_two.page if page_two_ph_two else None\n self.assertEqual(found_page, page_two)\n found_page = page_two_ph_three.page if page_two_ph_three else None\n self.assertEqual(found_page, page_two)\n # check the stored placeholders org vs copy\n msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_one.pk, page_one_ph_one.pk, msg)\n msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_two.pk, page_one_ph_two.pk, msg)\n msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk)\n self.assertNotEquals(page_two_ph_three.pk, page_one_ph_three.pk, msg)\n # get the plugins from the original page\n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEquals(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n self.assertEquals(len(org_placeholder_two_plugins), 0)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEquals(len(org_placeholder_three_plugins), 2)\n # get the plugins from the copied page\n copied_placeholder_one_plugins = page_two_ph_one.get_plugins()\n self.assertEquals(len(copied_placeholder_one_plugins), 1)\n copied_placeholder_two_plugins = page_two_ph_two.get_plugins()\n self.assertEquals(len(copied_placeholder_two_plugins), 0)\n copied_placeholder_three_plugins = page_two_ph_three.get_plugins()\n self.assertEquals(len(copied_placeholder_three_plugins), 2)\n # verify the plugins got copied\n # placeholder 1\n count_plugins_copied = len(copied_placeholder_one_plugins)\n count_plugins_org = len(org_placeholder_one_plugins)\n msg = u\"plugin count %s %s for placeholder one not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg) \n # placeholder 2\n count_plugins_copied = len(copied_placeholder_two_plugins)\n count_plugins_org = len(org_placeholder_two_plugins)\n msg = u\"plugin count %s %s for placeholder two not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg) \n # placeholder 3\n count_plugins_copied = len(copied_placeholder_three_plugins)\n count_plugins_org = len(org_placeholder_three_plugins)\n msg = u\"plugin count %s %s for placeholder three not equal\" % (count_plugins_copied, count_plugins_org)\n self.assertEquals(count_plugins_copied, count_plugins_org, msg)\n # verify the body of text plugin with nested link plugin\n # org to copied \n org_nested_text_plugin = None\n # do this iteration to find the real text plugin with the attached link\n # the inheritance mechanism for the cmsplugins works through \n # (tuple)get_plugin_instance()\n for x in org_placeholder_three_plugins: \n if x.plugin_type == u\"TextPlugin\":\n instance = x.get_plugin_instance()[0]\n if instance.body.startswith(pre_nesting_body):\n org_nested_text_plugin = instance\n break\n copied_nested_text_plugin = None\n for x in copied_placeholder_three_plugins: \n if x.plugin_type == u\"TextPlugin\":\n instance = x.get_plugin_instance()[0]\n if instance.body.startswith(pre_nesting_body):\n copied_nested_text_plugin = instance\n break\n msg = u\"orginal nested text plugin not found\"\n self.assertNotEquals(org_nested_text_plugin, None, msg=msg)\n msg = u\"copied nested text plugin not found\"\n self.assertNotEquals(copied_nested_text_plugin, None, msg=msg)\n # get the children ids of the texplugin with a nested link\n # to check if the body of the text is generated correctly\n org_link_child_plugin = org_nested_text_plugin.get_children()[0]\n copied_link_child_plugin = copied_nested_text_plugin.get_children()[0]\n # validate the textplugin body texts\n msg = u\"org plugin and copied plugin are the same\"\n self.assertTrue(org_link_child_plugin.id != copied_link_child_plugin.id, msg)\n needle = u\"plugin_obj_%s\"\n msg = u\"child plugin id differs to parent in body plugin_obj_id\"\n # linked child is in body\n self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg)\n msg = u\"copy: child plugin id differs to parent in body plugin_obj_id\"\n self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg)\n # really nothing else\n msg = u\"child link plugin id differs to parent body plugin_obj_id\"\n self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg)\n msg = u\"copy: child link plugin id differs to parent body plugin_obj_id\"\n self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg)\n # now reverse lookup the placeholders from the plugins\n org_placeholder = org_link_child_plugin.placeholder\n copied_placeholder = copied_link_child_plugin.placeholder\n msg = u\"placeholder of the orginal plugin and copied plugin are the same\"\n ok = ((org_placeholder.id != copied_placeholder.id))\n self.assertTrue(ok, msg)",
"def test_override_plugin(self):\n plugin_name = 'Stdout'\n source = os.path.join(self.current_dir, 'classes', plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n shutil.copytree(source, target)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['info'][1] == os.path.join(target, '__init__.py')",
"def test_custom_plugin(self):\n plugin_name = 'Druptest'\n source = os.path.join(self.current_dir, 'classes', plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n shutil.copytree(source, target)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['name'] == plugin_name",
"def test_plugins():\n assert plugins.template.plugin_test() == True\n assert plugin_test() == True",
"def test_replacement3(engine_contents, engine_locations):\n file_name = 'Triangle.java.xml'\n new_contents = copy.deepcopy(engine_contents)\n new_locations = copy.deepcopy(engine_locations)\n target1 = (file_name, 'expr_stmt', 0)\n target2 = (file_name, 'comment', 0)\n assert not XmlEngine.do_replace(engine_contents, engine_locations, new_contents, new_locations, target1, target1)\n assert XmlEngine.do_replace(engine_contents, engine_locations, new_contents, new_locations, target1, target2)\n assert not XmlEngine.do_replace(engine_contents, engine_locations, new_contents, new_locations, target1, target2)",
"def test_install_terraform_plugin_1(monkeypatch):\n monkeypatch.setattr(os, 'listdir', lambda path: [\n 'terraform-provider-terraform_v0.11.2_x4'])\n monkeypatch.setattr(os, 'remove', lambda path: True)\n monkeypatch.setattr(os, 'chmod', lambda path, permissions: True)\n\n monkeypatch.setattr(shutil, 'copy2', lambda src, dest: True)\n\n def mp_check_output(cmd):\n if cmd == ['which', 'terraform']:\n return b'/usr/local/bin/terraform\\n'\n\n if cmd == ['terraform', '-v']:\n return b'Terraform v0.11.3\\n\\n'\n\n raise Exception('Unmocked command: %s' % cmd)\n\n monkeypatch.setattr(subprocess, 'check_output', mp_check_output)\n\n install_terraform_plugin('/tmp/stone-burner_plugins')",
"def test_replacement1(engine_contents, engine_locations):\n file_name = 'Triangle.java.xml'\n new_contents = copy.deepcopy(engine_contents)\n new_locations = copy.deepcopy(engine_locations)\n target1 = (file_name, 'expr_stmt', 3)\n assert not XmlEngine.do_replace(engine_contents, engine_locations, new_contents, new_locations, target1, target1)",
"def clone_plugin_data(self, entry):",
"def test_add_edit_plugin(self):\n # add a new text plugin\n page_data = self.get_new_page_data()\n response = self.client.post(URL_CMS_PAGE_ADD, page_data)\n page = Page.objects.all()[0]\n plugin_data = {\n 'plugin_type':\"TextPlugin\",\n 'language':settings.LANGUAGES[0][0],\n 'placeholder':page.placeholders.get(slot=\"body\").pk,\n }\n response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)\n self.assertEquals(response.status_code, 200)\n self.assertEquals(int(response.content), CMSPlugin.objects.all()[0].pk)\n # now edit the plugin\n edit_url = URL_CMS_PLUGIN_EDIT + response.content + \"/\"\n response = self.client.get(edit_url)\n self.assertEquals(response.status_code, 200)\n data = {\n \"body\":\"Hello World\"\n }\n response = self.client.post(edit_url, data)\n self.assertEquals(response.status_code, 200)\n txt = Text.objects.all()[0]\n self.assertEquals(\"Hello World\", txt.body)",
"def test_register_dynamic_plugin1(self):\n pass",
"def test_nested_plugin_on_page(self):\n with SettingsOverride(CMS_MODERATOR=False, CMS_PERMISSION=False):\n # setup page 1\n page_one = create_page(u\"Three Placeholder\", u\"col_three.html\", u\"en\",\n position=u\"last-child\", published=True, in_navigation=True)\n page_one_ph_two = page_one.placeholders.get(slot=u\"col_left\")\n \n ###\n # add a plugin\n ###\n pre_nesting_body = u\"<p>the nested text plugin with a link inside</p>\"\n text_plugin = add_plugin(page_one_ph_two, u\"TextPlugin\", u\"en\", body=pre_nesting_body)\n # prepare nestin plugin\n page_one_ph_two = self.reload(page_one_ph_two)\n text_plugin = self.reload(text_plugin)\n link_plugin = add_plugin(page_one_ph_two, u\"LinkPlugin\", u\"en\", target=text_plugin)\n link_plugin.name = u\"django-cms Link\"\n link_plugin.url = u\"https://www.django-cms.org\" \n \n # as for some reason mptt does not \n # update the parent child relationship \n # in the add_plugin method when a target present\n # but this is not the topic of the test\n link_plugin.parent = text_plugin\n link_plugin.save()\n # reloading needs to be done after every save\n link_plugin = self.reload(link_plugin)\n text_plugin = self.reload(text_plugin)\n \n # mptt related insertion correct?\n msg = u\"parent plugin right is not updated, child not inserted correctly\"\n self.assertTrue(text_plugin.rght > link_plugin.rght, msg=msg)\n msg = u\"link has no parent\"\n self.assertFalse(link_plugin.parent == None, msg=msg)\n msg = u\"parent plugin left is not updated, child not inserted correctly\"\n self.assertTrue(text_plugin.lft < link_plugin.lft, msg=msg)\n msg = u\"child level is not bigger than parent level\"\n self.assertTrue(text_plugin.level < link_plugin.level , msg=msg)\n \n # add the link plugin to the body\n # emulate the editor in admin that adds some txt for the nested plugin\n in_txt = u\"\"\"<img id=\"plugin_obj_%s\" title=\"Link\" alt=\"Link\" src=\"/static/cms/images/plugins/link.png\">\"\"\"\n nesting_body = u\"%s<p>%s</p>\" % (text_plugin.body, (in_txt % (link_plugin.id)))\n text_plugin.body = nesting_body\n text_plugin.save()\n \n text_plugin = self.reload(text_plugin)\n # none of the descendants should have a placeholder other then my own one\n self.assertEquals(text_plugin.get_descendants().exclude(placeholder=text_plugin.placeholder).count(), 0)\n post_add_plugin_count = CMSPlugin.objects.count()\n self.assertEqual(post_add_plugin_count, 2)",
"def test_register_dynamic_plugin(self):\n pass",
"def test_replace_project(self):\n pass",
"def test_update_content_copy(self):\n # add same content copy twise, there should be no duplication\n fpath_1 = self.temp_f_1.name\n fm_1 = content.Format.objects.using(self.the_channel_id).get(format_size=102)\n fm_3 = content.Format.objects.using(self.the_channel_id).get(format_size=46)\n file_1 = content.File.objects.using(self.the_channel_id).get(format=fm_1)\n api.update_content_copy(file_1, fpath_1)\n file_3 = content.File.objects.using(self.the_channel_id).filter(format=fm_3)[1]\n api.update_content_copy(file_3, fpath_1)\n self.assertEqual(1, len(os.listdir(settings.CONTENT_COPY_DIR+'/0/9/')))\n\n # swap the content copy in file_3\n fpath_2 = self.temp_f_2.name\n self.assertEqual(file_3.extension, '.pdf')\n api.update_content_copy(file_3, fpath_2)\n self.assertEqual(file_3.extension, '.mp4')\n\n # because file_3 and file_2 all have reference pointing to this content copy,\n # erase the reference from file_2 won't delete the content copy\n fm_2 = content.Format.objects.using(self.the_channel_id).get(format_size=51)\n file_2 = content.File.objects.using(self.the_channel_id).get(format=fm_2)\n api.update_content_copy(file_2, fpath_2)\n self.assertTrue(file_2.content_copy)\n api.update_content_copy(file_2, None)\n self.assertFalse(file_2.content_copy)\n content_copy_path = settings.CONTENT_COPY_DIR+'/3/3/335782204c8215e0061516c6b3b80271.mp4'\n self.assertTrue(os.path.isfile(content_copy_path))\n\n # all reference pointing to this content copy is gone,\n # the content copy should be deleted\n api.update_content_copy(file_3, None)\n self.assertFalse(os.path.isfile(content_copy_path))\n self.assertFalse(file_2.content_copy)\n self.assertFalse(file_2.checksum)\n\n # update None content copy on empty File object should be silent and have no effect\n api.update_content_copy(file_2, None)\n\n # test File __str__ method\n self.assertEqual(file_1.__str__(), '09293abba61d4fcfa4e3bd804bcaba43.pdf')\n\n # test MimeType __str__ method\n self.assertEqual(fm_1.mimetype.__str__(), 'video_high')\n\n # test for non File object exception\n with self.assertRaises(TypeError):\n api.update_content_copy(None, None)",
"def test_register():\n repobee.try_register_plugin(\n sanitizer, sanitizer.SanitizeRepo, sanitizer.SanitizeFile\n )",
"def replace_plugins_with_calls(nb):\n for cell in nb['cells']:\n cell['source'] = '\\n'.join(replace_plugins(get_source(cell)))\n \n return nb",
"def test_clipboard(self):\n def compare_text(clipboard, text, expected_text):\n self.compare_result = False\n self.compare_result = text == expected_text\n name = self.vimiv.get_pos(True)\n basename = os.path.basename(name)\n abspath = os.path.abspath(name)\n clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)\n primary = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY)\n # Copy basename and abspath to clipboard\n self.vimiv[\"fileextras\"].copy_name(False)\n # Check if the info message is displayed correctly\n self.check_statusbar(\"INFO: Copied \" + basename + \" to clipboard\")\n clipboard.request_text(compare_text, basename)\n self.assertTrue(self.compare_result)\n self.vimiv[\"fileextras\"].copy_name(True)\n clipboard.request_text(compare_text, abspath)\n self.assertTrue(self.compare_result)\n # Toggle to primary and copy basename\n self.vimiv[\"fileextras\"].toggle_clipboard()\n self.vimiv[\"fileextras\"].copy_name(False)\n primary.request_text(compare_text, basename)\n self.assertTrue(self.compare_result)\n # Toggle back to clipboard and copy basename\n self.vimiv[\"fileextras\"].toggle_clipboard()\n self.vimiv[\"fileextras\"].copy_name(False)\n clipboard.request_text(compare_text, basename)\n self.assertTrue(self.compare_result)",
"def copySpecial():\n depNode = nuke.dependencies(nuke.selectedNode())\n dependNode = nuke.dependentNodes(nuke.INPUTS or nuke.HIDDEN_INPUTS or nuke.EXPRESSIONS, [nuke.selectedNode()])\n i = 0\n if dependNode[0].Class() in ['Scene', 'MergeGeo']:\n i = nuke.inputs(dependNode[0])+1\n\n nuke.nodeCopy(nukescripts.cut_paste_file())\n\n for node in nuke.allNodes():\n node['selected'].setValue(0)\n\n nuke.nodePaste(nukescripts.cut_paste_file())\n\n newNode = nuke.selectedNode()\n newNode.setInput(0, depNode[0])\n dependNode[0].setInput(i+1, newNode)",
"def test_extra_substitutions(modpath):\n retcode, out = flake8(\n join(modpath, \"RST305/sphinx-substitutions\"),\n substitutions=\"bar\",\n )\n assert not retcode, out",
"def replace_plugins(lines):\n starts, ends = [], []\n stripped = [[]]\n exports = []\n plugin = False\n for i, line in enumerate(lines):\n if line.rstrip().endswith(END):\n assert plugin, f\"END PLUGIN without BEGIN PLUGIN found in {lines}\"\n plugin = False\n ends.append(i)\n stripped.append([])\n \n elif line.rstrip().endswith(BEGIN):\n assert not plugin, f\"Nested plugins found in {lines}\"\n starts.append(i)\n exports.append(False)\n plugin = True\n \n elif line.rstrip().endswith(BEGIN_EXPORT):\n assert not plugin, f\"Nested plugins found in {lines}\"\n starts.append(i)\n exports.append(True)\n plugin = True\n\n elif plugin:\n stripped[len(starts) - 1].append(line)\n\n assert len(stripped) == len(starts) + 1 == len(ends) + 1 == len(exports) + 1, f\"Error processing plugins in {lines}\"\n assert all(s < e for s, e in zip(starts, ends))\n\n starts.reverse()\n ends.reverse()\n stripped.reverse()\n stripped = stripped[1:]\n\n lines = lines.copy()\n\n for i, (s, e) in enumerate(zip(starts, ends)):\n config = yaml.full_load(\"\\n\".join(stripped[i]))\n export = exports[i]\n pg = config[\"plugin\"]\n args = \", \".join(config.get(\"args\", []))\n kwargs = \", \".join([f\"{k}={v}\" for k, v in config.get(\"kwargs\", {}).items()])\n\n call = (\"run_plugin\", \"add_plugin_files\")[export]\n\n call = f'grader.{call}(\"{pg}\"'\n if args:\n call += f', {args}'\n if kwargs:\n call += f', {kwargs}'\n call += ')'\n\n del lines[s:e+1]\n lines.insert(s, call)\n\n return lines",
"def substitute_plugins(topconstruct):\n #pylint: disable=too-many-locals\n def generate_method(namecode):\n event_name, code = namecode\n method_name = syntax.Construct(syntax.VAR_NAME, event_name.args[0])\n method_name.resolution = RESOLUTION_NAKED\n fcn = syntax.Construct(syntax.FUNCTION_DEF, method_name, [], code)\n method = syntax.Construct(syntax.STRUCT_MEMBER_METHOD, fcn)\n return method\n\n def generate_handler_method(ondohandler):\n return generate_method(ondohandler.args)\n for _, plugin_it in enumerate(\n query(\n [is_layering([syntax.PLUGIN_DEF])],\n TreeItem(topconstruct))\n ):\n plugin = plugin_it.construct\n vname = plugin.args[0]\n label = plugin.args[1]\n #vnop = plugin.args[2]\n clauses = [] #plugin.args[3]\n # we need the upper \"program\" construct for inserting\n # statements\n\n # create a class for the macroscript\n sname = f\"Plugin_{vname.args[0]}\"\n snamec = syntax.Construct(syntax.VAR_NAME, sname)\n snamec.resolution = RESOLUTION_NAKED\n\n # vname is really a name that we give to the plugin\n vname.resolution = RESOLUTION_NAKED\n\n plugin_items = list(filter(lambda c: c.construct == syntax.ROLLOUT_ITEM, clauses))\n on_do_handlers = list(filter(lambda c: c.construct == syntax.ON_DO_HANDLER, clauses))\n other_decls = list(filter(lambda c: c.construct != syntax.ON_DO_HANDLER, clauses))\n event_members = list(map(generate_handler_method, on_do_handlers))\n events = list(map(lambda c: c.args[0].args[0], on_do_handlers))\n #decl_class = syntax.Construct(syntax.STRUCT_DEF, snamec, members)\n\n decl_class = syntax.Construct(syntax.PY_PLUGIN_CLASS, snamec, vname, label, event_members, events, other_decls, plugin_items)\n\n # and substitute the ROLLOUT_DEF by a call to the struct\n # constructor\n snamec = syntax.Construct(syntax.VAR_NAME, sname)\n snamec.resolution = RESOLUTION_NAKED\n # ---------- this vnop thing does not work\n cnstr_call = syntax.Construct(syntax.CALL, snamec, []) #vnop) <<<< problem here\n plugin_it.replace_construct(cnstr_call)\n\n # we now want to add this class declaration in the top level of the\n # program\n # (note: we do this after because this breaks indices... could cause other\n # problems?)\n function_program = find_first_parent([is_function_program], plugin_it)\n function_program_construct = function_program.construct\n function_program_construct.args[0].insert(0, decl_class)",
"def _add_conversion(self, plugin, pbt):\n assert self.shape == pbt.shape\n assert len(self.inserts) == len(pbt.inserts)\n for (i, o) in zip(self.inserts, pbt.inserts):\n assert i.shape == o.shape\n assert i.kind == o.kind\n assert i.unevaluated == o.unevaluated\n if plugin not in self._plugins:\n self._plugins[plugin] = pbt",
"async def test_restored_overwrites_text(self):\n await self.cog._unsilence(self.text_channel)\n self.text_channel.set_permissions.assert_awaited_once_with(\n self.cog._everyone_role,\n overwrite=self.text_overwrite,\n )\n\n # Recall that these values are determined by the fixture.\n self.assertTrue(self.text_overwrite.send_messages)\n self.assertFalse(self.text_overwrite.add_reactions)",
"def test_pre_migration_modifies_answers(\n tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n src, dst = map(tmp_path_factory.mktemp, (\"src\", \"dst\"))\n\n # v1 of template asks for a favourite song and writes it to songs.json\n with local.cwd(src):\n build_file_tree(\n {\n \"[[ _copier_conf.answers_file ]].jinja\": (\n \"[[ _copier_answers|tojson ]]\"\n ),\n \"copier.yml\": (\n f\"\"\"\\\n _envops: {BRACKET_ENVOPS_JSON}\n best_song: la vie en rose\n \"\"\"\n ),\n \"songs.json.jinja\": \"[ [[ best_song|tojson ]] ]\",\n }\n )\n git(\"init\")\n git(\"add\", \".\")\n git(\"commit\", \"-m1\")\n git(\"tag\", \"v1\")\n # User copies v1 template into subproject\n with local.cwd(dst):\n run_copy(src_path=str(src), defaults=True, overwrite=True)\n answers = json.loads(Path(\".copier-answers.yml\").read_text())\n assert answers[\"_commit\"] == \"v1\"\n assert answers[\"best_song\"] == \"la vie en rose\"\n assert json.loads(Path(\"songs.json\").read_text()) == [\"la vie en rose\"]\n git(\"init\")\n git(\"add\", \".\")\n git(\"commit\", \"-m1\")\n with local.cwd(src):\n build_file_tree(\n {\n # v2 of template supports multiple songs, has a different default\n # and includes a data format migration script\n \"copier.yml\": (\n f\"\"\"\\\n _envops: {BRACKET_ENVOPS_JSON}\n best_song_list:\n default: [paranoid android]\n _migrations:\n - version: v2\n before:\n - - python\n - -c\n - |\n import sys, json, pathlib\n answers_path = pathlib.Path(*sys.argv[1:])\n answers = json.loads(answers_path.read_text())\n answers[\"best_song_list\"] = [answers.pop(\"best_song\")]\n answers_path.write_text(json.dumps(answers))\n - \"[[ _copier_conf.dst_path ]]\"\n - \"[[ _copier_conf.answers_file ]]\"\n \"\"\"\n ),\n \"songs.json.jinja\": \"[[ best_song_list|tojson ]]\",\n }\n )\n git(\"add\", \".\")\n git(\"commit\", \"-m2\")\n git(\"tag\", \"v2\")\n # User updates subproject to v2 template\n with local.cwd(dst):\n run_update(defaults=True, overwrite=True, unsafe=True)\n answers = json.loads(Path(\".copier-answers.yml\").read_text())\n assert answers[\"_commit\"] == \"v2\"\n assert \"best_song\" not in answers\n assert answers[\"best_song_list\"] == [\"la vie en rose\"]\n assert json.loads(Path(\"songs.json\").read_text()) == [\"la vie en rose\"]",
"def load_plugin():\n return HostTestPluginCopyMethod_Shell()",
"def test_text_classifier_update_testing_samples(self):\n pass",
"def test_args_copy():\n args = cli.parse_args(['-c'])\n assert args.copy\n args = cli.parse_args(['--copy'])\n assert args.copy",
"def test_specific_plugin_installed(self):\n self._add_plugin(self.jigconfig, 'plugin01')\n set_jigconfig(self.gitrepodir, config=self.jigconfig)\n\n # Create staged\n self.commit(self.gitrepodir, 'a.txt', 'a')\n self.stage(self.gitrepodir, 'b.txt', 'b')\n\n with nested(\n patch('jig.runner.sys'),\n self.assertRaises(SystemExit)\n ) as (r_sys, ec):\n # Raise the error to halt execution like the real sys.exit would\n r_sys.exit.side_effect = SystemExit\n\n self.run_command('--plugin plugin01 {0}'.format(self.gitrepodir))\n\n self.assertResults(u\"\"\"\n ▾ plugin01\n\n ⚠ line 1: b.txt\n b is +\n\n {0} Jig ran 1 plugin\n Info 0 Warn 1 Stop 0\n \"\"\".format(ATTENTION), self.output)"
]
| [
"0.70641947",
"0.6443434",
"0.64426214",
"0.6301122",
"0.61512595",
"0.58257747",
"0.5588954",
"0.55469763",
"0.55395544",
"0.54618496",
"0.5419054",
"0.5336528",
"0.5318562",
"0.52748966",
"0.52744967",
"0.52645534",
"0.52374196",
"0.5197857",
"0.5186564",
"0.5158541",
"0.51315707",
"0.51276076",
"0.51197547",
"0.5116862",
"0.5114455",
"0.50983226",
"0.50936294",
"0.50901455",
"0.5075781",
"0.5073935"
]
| 0.76817626 | 0 |
Return an Elasticsearch filter for filtering out NIPSA'd annotations. The returned filter is suitable for inserting into an Es query dict. | def nipsa_filter(userid=None):
# If any one of these "should" clauses is true then the annotation will
# get through the filter.
should_clauses = [{"not": {"term": {"nipsa": True}}}]
if userid is not None:
# Always show the logged-in user's annotations even if they have nipsa.
should_clauses.append({"term": {"user": userid.lower()}})
return {"bool": {"should": should_clauses}} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def not_nipsad_annotations(userid):\n query = query_for_users_annotations(userid)\n query[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\"must\"].append(\n {\"not\": {\"term\": {\"nipsa\": True}}})\n return query",
"def nipsad_annotations(userid):\n query = query_for_users_annotations(userid)\n query[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\"must\"].append(\n {\"term\": {\"nipsa\": True}})\n return query",
"def filter(self) -> Optional[pulumi.Input['FilterArgs']]:\n return pulumi.get(self, \"filter\")",
"def filter(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"filter\")",
"def filter_criteria(self) -> Optional[pulumi.Input['EventSourceMappingFilterCriteriaArgs']]:\n return pulumi.get(self, \"filter_criteria\")",
"def filter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"filter\")",
"def filter_criteria(self) -> pulumi.Output[Optional['outputs.EventSourceMappingFilterCriteria']]:\n return pulumi.get(self, \"filter_criteria\")",
"def filter(self) -> Optional[str]:\n return pulumi.get(self, \"filter\")",
"def filter(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"filter\")",
"def __filterIndices(self, indexFilter):\n if '*' in indexFilter:\n self.__index.filterIndices(indexFilter, indexFilter)\n else:\n self.__index.filterIndices(indexFilter)",
"def filter_non_running_NFs (self, nffg, log=logging.getLogger(\"FILTER\")):\n # TODO implement\n pass",
"def create_filter(args: dict) -> dict | None:\n if 'ip' in args:\n args['networkInterfaces.ipv4'] = args.pop('ip')\n expression_list = []\n for arg in args:\n value = args.get(arg)\n if arg == 'riskScore':\n restriction = \"GREATER_THAN_OR_EQUAL_TO\"\n values_list = [arg_to_number(value)]\n else:\n restriction = \"IN\"\n values_list = argToList(value)\n\n values_res = [{\"value\": val} for val in values_list]\n expression = {\n \"propertyName\": arg,\n \"restrictionType\": restriction,\n \"propertyValues\": values_res\n }\n expression_list.append(expression)\n if expression_list:\n return {\"criteria\": {\"criteriaList\": [{\"expressionList\": expression_list}], \"predicateType\": \"AND\"}}\n else:\n return None",
"def filter(self, fn):\r\n\t\treturn FilterProjectedList(self, [fn])",
"def filter(self):\n return self._filter",
"def filter(self, *args, **kwargs):\n return FilteredQuery(self, F(*args, **kwargs))",
"def filter(self, filter_dict):\n pass",
"def not_image_filter(*args, **kwargs):\n import itk\n instance = itk.NotImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()",
"def filter(self, *args):\n return _libsbml.ElementFilter_filter(self, *args)",
"def __invert__(self):\n not_filter = proto.FilterExpression()\n not_filter.filter_not.filter_expression.MergeFrom(self.filter)\n self.filter = not_filter\n return self",
"def _all_user_annotations_query(request, user):\n userid = util.user.userid_from_username(user.username, request)\n return {\n 'filtered': {\n 'filter': {'term': {'user': userid.lower()}},\n 'query': {'match_all': {}}\n }\n }",
"def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))",
"def custom_filter(image: Image) -> Image:\n image = image.filter(ImageFilter.Kernel(\n size=(3, 3), kernel=(1, 0, 1, 0, 0, 0, 1, 0, 1)))\n return image",
"def filter_denoise(self, x):\n b, a = self.c_notch\n return filtfilt(b, a, x)",
"def _custom_filter(self, query):\r\n return query",
"def query_for_users_annotations(userid):\n return {\n \"query\": {\n \"filtered\": {\n \"filter\": {\n \"bool\": {\n \"must\": [{\"term\": {\"user\": userid.lower()}}]\n }\n }\n }\n }\n }",
"def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)",
"def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")",
"def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")",
"def _adj_filt(self, ndims):\n\n # inner filter, that is 3x3x...\n filt_inner = np.zeros([3] * ndims)\n for j in range(ndims):\n o = [[1]] * ndims\n o[j] = [0, 2]\n filt_inner[np.ix_(*o)] = 1\n\n # full filter, that makes sure the inner filter is applied \n # ith feature to ith feature\n filt = np.zeros([3] * ndims + [ndims, ndims])\n for i in range(ndims):\n filt[..., i, i] = filt_inner\n \n return filt",
"def search_filter(query_params, query):\n if query_params.get('type') is not None:\n query = query.filter(search.c.kind == query_params.get('type'))\n return query"
]
| [
"0.63167226",
"0.63064855",
"0.54988974",
"0.54932624",
"0.52538085",
"0.51746196",
"0.51345307",
"0.5108755",
"0.50105524",
"0.49214038",
"0.49028167",
"0.48722836",
"0.4862893",
"0.48593944",
"0.48433056",
"0.48307765",
"0.47609994",
"0.47577628",
"0.47521484",
"0.47493073",
"0.47489008",
"0.4743355",
"0.47360024",
"0.47170806",
"0.47068587",
"0.4704797",
"0.46046412",
"0.46046412",
"0.4590706",
"0.4580151"
]
| 0.72393566 | 0 |
Return an Elasticsearch query for all the given user's annotations. | def query_for_users_annotations(userid):
return {
"query": {
"filtered": {
"filter": {
"bool": {
"must": [{"term": {"user": userid.lower()}}]
}
}
}
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _all_user_annotations_query(request, user):\n userid = util.user.userid_from_username(user.username, request)\n return {\n 'filtered': {\n 'filter': {'term': {'user': userid.lower()}},\n 'query': {'match_all': {}}\n }\n }",
"def nipsad_annotations(userid):\n query = query_for_users_annotations(userid)\n query[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\"must\"].append(\n {\"term\": {\"nipsa\": True}})\n return query",
"def _annotations(request):\n result = Search(request).run(MultiDict(request.params))\n\n return request.find_service(AnnotationReadService).get_annotations_by_id(\n ids=result.annotation_ids\n )",
"def GetAnnotationsQS(self):\n return self._costly_annotations_qs",
"def annotate(self, **annotations):\n return AnnotatedQuery(self, annotations)",
"def nipsa_filter(userid=None):\n # If any one of these \"should\" clauses is true then the annotation will\n # get through the filter.\n should_clauses = [{\"not\": {\"term\": {\"nipsa\": True}}}]\n\n if userid is not None:\n # Always show the logged-in user's annotations even if they have nipsa.\n should_clauses.append({\"term\": {\"user\": userid.lower()}})\n\n return {\"bool\": {\"should\": should_clauses}}",
"def not_nipsad_annotations(userid):\n query = query_for_users_annotations(userid)\n query[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\"must\"].append(\n {\"not\": {\"term\": {\"nipsa\": True}}})\n return query",
"def query_all_users():\n ddb = boto3.resource(\"dynamodb\")\n tb = ddb.Table(os.environ.get(\"TABLE_NAME\"))\n return tb.scan()",
"def query(self, *args, **kwargs) -> List[str]:\r\n self.logger.info(\"Returning Manual Users\")\r\n\r\n return kwargs['users']",
"def users(self, predicate=None):\n \n if predicate is None:\n return self._get(\"users\").json()\n else:\n return self._get(\"users/search\", params={\"predicate\":predicate}).json()",
"def create_searchqryset(cls, user, **kwargs):\n return cls._meta.model.published_where_is_examiner(user)",
"def _query_by_es_query(self, body: str, **kwargs) -> typing.Optional[typing.List[dict]]:\n return self.qm.search(body=body, **kwargs)",
"def get(self, request, search_string=None):\n query = SearchQuery(search_string)\n\n username_vector = SearchVector('username', weight='A')\n first_name_vector = SearchVector('first_name', weight='B')\n last_name_vector = SearchVector('last_name', weight='B')\n email_vector = SearchVector('email', weight='B')\n vectors = username_vector + first_name_vector + last_name_vector + email_vector\n qs = User.objects\n qs = qs.annotate(search=vectors).filter(search=query)\n qs = qs.annotate(rank=SearchRank(vectors, query)).order_by('-rank')\n print(qs)\n return Response(UserSerializer(qs, many=True).data)",
"def resources_for_index_query(self, search_text, session):\n query = session.query(self.User).order_by(self.User.name)\n if search_text:\n query = query.filter(self.User.name.ilike(\"%%%s%%\" % search_text))\n\n return query",
"def get_aggs(es, query, args):\n return es.search(index=args.index,\n body=query,\n timeout=float(args.timeout),\n filter_path=['aggregations'])",
"def get_annotations(self):\n entity = self.get_object()\n serializer = AnnotationValueSerializer(entity.annotations.all(), many=True)\n return Response(serializer.data)",
"def getAllAuthorQueries(self):\n return self.queries[\"au\"]",
"def create_searchqryset(cls, user):\n return cls._meta.model.published_where_is_examiner(user).annotate(latest_delivery_id=Max('deadlines__deliveries__id'),\n latest_deadline_id=Max('deadlines__id'),\n latest_deadline_deadline=Max('deadlines__deadline'),\n number_of_deliveries=Count('deadlines__deliveries'))",
"def annotations(self):\n annotations = {\"date\": self.date_trunc(\"usage_start\")}\n # { query_param: database_field_name }\n fields = self._mapper.provider_map.get(\"annotations\")\n for q_param, db_field in fields.items():\n annotations[q_param] = F(db_field)\n if (\n \"project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"and:project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"or:project\" in self.parameters.parameters.get(\"group_by\", {})\n ):\n annotations[\"project\"] = F(\"namespace\")\n\n return annotations",
"def get_all_by_user_and_template(user_id, template_id):\n return SavedQuery.get_all_by_user_and_template(user_id, template_id)",
"def declareds(self, user=None, action=None):\n\n kw = { 'is_anonymous' : False }\n if user:\n kw.update({ 'user' : user })\n if action:\n kw.update({ 'action' : action })\n\n return self.filter(**kw)",
"def search_users(self, q):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/User/SearchUsers/\"))",
"def ig_users_in(body):\n import re\n\n try:\n user_re = Formatter.USER_TAG_REGEX\n except AttributeError:\n # escape special characters so we can format the user-tag into a\n # regex pattern\n escaped_user_tag = re.sub(\n # match any '[', ']', '(', or ')'\n r'([\\[\\]\\(\\)])',\n # escape the matched character\n r'\\\\\\1',\n Formatter.USER_TAG\n )\n pattern = escaped_user_tag.format(\n user_raw='({0})'.format(instagram.USERNAME_PTN)\n )\n user_re = re.compile(pattern)\n Formatter.USER_TAG_REGEX = user_re\n\n return user_re.findall(body)",
"def get_all_users():\n return User.query.all()",
"def search(self, query, count = None):\n\n url = \"https://api.instagram.com/v1/users/search?q={0}&access_token={1}\".format(query, self.access_token)\n\n if count != None:\n url += \"&count=\" + str(count)\n\n request = requests.get(url)\n return request.json()",
"def searchUsers(self,conds,_from,to,order_by,desc,admin_obj):\n self.__searchUsersCheckInput(conds,_from,to,order_by,desc,admin_obj)\n search_helper=user_main.getAttributeManager().runAttrSearchers(conds,admin_obj)\n return search_helper.getUserIDs(_from,to,order_by,desc)",
"def get(self):\n queries = {\"wildcard_properties\": []}\n\n fullname_query = request.args.get(\"fullName\", None)\n email_query = request.args.get(\"email\", None)\n\n if fullname_query:\n queries[\"fullName\"] = f\"TextP.startingWith('{fullname_query}')\"\n queries[\"wildcard_properties\"].append(\"fullName\")\n if email_query:\n queries[\"fullName\"] = f\"TextP.startingWith('{email_query}')\"\n queries[\"wildcard_properties\"].append(\"email\")\n\n users = User.filter(limit=10, **queries)\n response = UserListSchema(many=True).dumps(users).data\n\n return jsonify_response(json.loads(response), 200)",
"def get_users_autocomplete(q: str, **params) -> JsonResponse:\n response = get(f'{API_V1}/users/autocomplete', q=q, **params)\n users = response.json()\n users['results'] = convert_all_timestamps(users['results'])\n return users",
"def get_all_users():",
"def queryuser(q, limit=10):\n _, idx1 = idquery.query(q)\n _, idx2 = nicknamequery.query(q)\n idx = list(set(idx1 + idx2))\n if len(idx)>999:\n idx = idx[:999]\n rst = db_session.query(User.id, User.nickname).filter(User.index.in_(idx)).\\\n order_by(User.score.desc(), User.active.asc()).limit(limit).all()\n return [{'id':itm[0], 'name':itm[1]} for itm in rst]"
]
| [
"0.84776706",
"0.6552474",
"0.63611656",
"0.60318583",
"0.59738034",
"0.5916854",
"0.55968183",
"0.5592595",
"0.5590119",
"0.5431266",
"0.52867997",
"0.5285357",
"0.5262147",
"0.5254336",
"0.5243139",
"0.51481014",
"0.51313215",
"0.51166993",
"0.51010364",
"0.50775105",
"0.505981",
"0.49875653",
"0.49833444",
"0.49811915",
"0.49809778",
"0.4974542",
"0.49651733",
"0.49539956",
"0.49332878",
"0.49320853"
]
| 0.7565154 | 1 |
Return an Elasticsearch query for the user's NIPSA'd annotations. | def nipsad_annotations(userid):
query = query_for_users_annotations(userid)
query["query"]["filtered"]["filter"]["bool"]["must"].append(
{"term": {"nipsa": True}})
return query | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nipsa_filter(userid=None):\n # If any one of these \"should\" clauses is true then the annotation will\n # get through the filter.\n should_clauses = [{\"not\": {\"term\": {\"nipsa\": True}}}]\n\n if userid is not None:\n # Always show the logged-in user's annotations even if they have nipsa.\n should_clauses.append({\"term\": {\"user\": userid.lower()}})\n\n return {\"bool\": {\"should\": should_clauses}}",
"def not_nipsad_annotations(userid):\n query = query_for_users_annotations(userid)\n query[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\"must\"].append(\n {\"not\": {\"term\": {\"nipsa\": True}}})\n return query",
"def _all_user_annotations_query(request, user):\n userid = util.user.userid_from_username(user.username, request)\n return {\n 'filtered': {\n 'filter': {'term': {'user': userid.lower()}},\n 'query': {'match_all': {}}\n }\n }",
"def query_for_users_annotations(userid):\n return {\n \"query\": {\n \"filtered\": {\n \"filter\": {\n \"bool\": {\n \"must\": [{\"term\": {\"user\": userid.lower()}}]\n }\n }\n }\n }\n }",
"def _annotations(request):\n result = Search(request).run(MultiDict(request.params))\n\n return request.find_service(AnnotationReadService).get_annotations_by_id(\n ids=result.annotation_ids\n )",
"def GetAnnotationsQS(self):\n return self._costly_annotations_qs",
"def annotate(self, **annotations):\n return AnnotatedQuery(self, annotations)",
"def annotations(self):\n annotations = {\"date\": self.date_trunc(\"usage_start\")}\n # { query_param: database_field_name }\n fields = self._mapper.provider_map.get(\"annotations\")\n for q_param, db_field in fields.items():\n annotations[q_param] = F(db_field)\n if (\n \"project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"and:project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"or:project\" in self.parameters.parameters.get(\"group_by\", {})\n ):\n annotations[\"project\"] = F(\"namespace\")\n\n return annotations",
"def get(self, pid, sid):\n helpers.abort_if_invalid_parameters(pid, sid)\n project = Project.query.get(pid)\n annotations = UserAnnotationModel.query.filter_by(session_id=sid).all()\n annotations = UserAnnotationSchema(many=True).dump(annotations)\n if project.is_public:\n return custom_response(200, data=annotations)\n helpers.abort_if_unauthorized(project)\n return custom_response(200, data=annotations)",
"def create_searchqryset(cls, user, **kwargs):\n return cls._meta.model.published_where_is_examiner(user)",
"def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"annotations\")",
"def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"annotations\")",
"def get_analysis_annotations():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id + '/analysis/annotations')\n\n annotations = []\n context_path = 'ThreatGrid.AnalysisResults.Sample.Id.Annotations'\n ec = {context_path: []} # type: ignore\n ips = demisto.get(r.json(), 'data.items.network') # type: ignore\n if ips:\n for k in ips:\n annotation = {\n 'IP': k,\n 'IP.Asn': ips[k].get('asn'),\n 'IP.City': ips[k].get('city'),\n 'IP.Country': ips[k].get('country'),\n 'IP.Org': ips[k].get('org'),\n 'IP.Region': ips[k].get('region'),\n 'IP.Timestamp': ips[k].get('ts')\n }\n annotations.append(annotation)\n ec[context_path].append(annotation)\n\n demisto.results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': r.json(),\n 'EntryContext': ec,\n 'HumanReadable': tableToMarkdown('ThreatGrid - Analysis Annotations', annotations, [\n 'IP', 'IP.Asn', 'IP.City', 'IP.Country', 'IP.Org', 'IP.Region', 'IP.Timestamp'\n ])\n })",
"def query(self, *args, **kwargs) -> List[str]:\r\n self.logger.info(\"Returning Manual Users\")\r\n\r\n return kwargs['users']",
"def annotations(self) -> Mapping[str, str]:\n return pulumi.get(self, \"annotations\")",
"def annotations(self) -> Mapping[str, str]:\n return pulumi.get(self, \"annotations\")",
"def get(self, request, search_string=None):\n query = SearchQuery(search_string)\n\n username_vector = SearchVector('username', weight='A')\n first_name_vector = SearchVector('first_name', weight='B')\n last_name_vector = SearchVector('last_name', weight='B')\n email_vector = SearchVector('email', weight='B')\n vectors = username_vector + first_name_vector + last_name_vector + email_vector\n qs = User.objects\n qs = qs.annotate(search=vectors).filter(search=query)\n qs = qs.annotate(rank=SearchRank(vectors, query)).order_by('-rank')\n print(qs)\n return Response(UserSerializer(qs, many=True).data)",
"def get_annotations(self):\n entity = self.get_object()\n serializer = AnnotationValueSerializer(entity.annotations.all(), many=True)\n return Response(serializer.data)",
"def _query_by_es_query(self, body: str, **kwargs) -> typing.Optional[typing.List[dict]]:\n return self.qm.search(body=body, **kwargs)",
"def annotations(self) -> pulumi.Output[Mapping[str, Any]]:\n return pulumi.get(self, \"annotations\")",
"def extract_nps(text, annotation):\n np_starts = [i for i in range(len(annotation)) if annotation[i] == 'B-NP']\n np_indexes = []\n for s in np_starts:\n i = 1\n while s+i < len(annotation) and annotation[s + i] == 'I-NP':\n i += 1\n np_indexes.append((s, s + i))\n return [' '.join(text[s:e]) for s, e in np_indexes]",
"def resources_for_index_query(self, search_text, session):\n query = session.query(self.User).order_by(self.User.name)\n if search_text:\n query = query.filter(self.User.name.ilike(\"%%%s%%\" % search_text))\n\n return query",
"def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")",
"def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")",
"def inspect_ann(node):\n if node.annotation is not None:\n return [{\"name\": \"annotation\", \"line\": node.annotation.lineno - 1, \"end_line\": node.annotation.end_lineno - 1,\n \"col_offset\": node.annotation.col_offset, \"end_col_offset\": node.annotation.end_col_offset,\n \"var_line\": node.lineno - 1, \"var_end_line\": node.end_lineno - 1, \"var_col_offset\": node.col_offset,\n \"var_end_col_offset\": node.end_col_offset}]\n else:\n return []",
"def GetQNoAnnotations(cls):\n return models.Q(annotationstable__isnull=True)",
"def ncbi_egquery(keyword):\n\tif request.args.has_key('callback'):\n\t\tkwargs = {\"term\": keyword}\n\t\tprint \"term:\",keyword\n\t\treturn EUtils.egquery(request.args['callback'], request.args['email'], **kwargs)\n\telse:\n\t\tkwargs = {\"term\": keyword}\n\t\treturn EUtils.egquery(None, ENTREZ_EMAIL, **kwargs)",
"def annotation_request():\n resp = make_response([])\n return jsonify(resp)",
"def parseRDFAnnotation(*args):\n return _libsbml.RDFAnnotationParser_parseRDFAnnotation(*args)",
"def getAllAuthorQueries(self):\n return self.queries[\"au\"]"
]
| [
"0.7206935",
"0.70296365",
"0.6918794",
"0.66117877",
"0.57555217",
"0.56842613",
"0.52800405",
"0.519476",
"0.5101369",
"0.5007688",
"0.49125174",
"0.49125174",
"0.48912767",
"0.47703254",
"0.46712175",
"0.46712175",
"0.46326005",
"0.46066797",
"0.46025413",
"0.45956907",
"0.45896646",
"0.45538187",
"0.45535743",
"0.45535743",
"0.4536024",
"0.45338044",
"0.45229295",
"0.44982132",
"0.44844398",
"0.44525057"
]
| 0.81125087 | 0 |
Return an Elasticsearch query for the user's nonNIPSA'd annotations. | def not_nipsad_annotations(userid):
query = query_for_users_annotations(userid)
query["query"]["filtered"]["filter"]["bool"]["must"].append(
{"not": {"term": {"nipsa": True}}})
return query | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nipsad_annotations(userid):\n query = query_for_users_annotations(userid)\n query[\"query\"][\"filtered\"][\"filter\"][\"bool\"][\"must\"].append(\n {\"term\": {\"nipsa\": True}})\n return query",
"def nipsa_filter(userid=None):\n # If any one of these \"should\" clauses is true then the annotation will\n # get through the filter.\n should_clauses = [{\"not\": {\"term\": {\"nipsa\": True}}}]\n\n if userid is not None:\n # Always show the logged-in user's annotations even if they have nipsa.\n should_clauses.append({\"term\": {\"user\": userid.lower()}})\n\n return {\"bool\": {\"should\": should_clauses}}",
"def _all_user_annotations_query(request, user):\n userid = util.user.userid_from_username(user.username, request)\n return {\n 'filtered': {\n 'filter': {'term': {'user': userid.lower()}},\n 'query': {'match_all': {}}\n }\n }",
"def query_for_users_annotations(userid):\n return {\n \"query\": {\n \"filtered\": {\n \"filter\": {\n \"bool\": {\n \"must\": [{\"term\": {\"user\": userid.lower()}}]\n }\n }\n }\n }\n }",
"def GetQNoAnnotations(cls):\n return models.Q(annotationstable__isnull=True)",
"def users_excludeds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"users_excludeds\")",
"def users_excludeds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"users_excludeds\")",
"def users_excludeds(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"users_excludeds\")",
"def visit_not_query(self, query):\n return query",
"def GetAnnotationsQS(self):\n return self._costly_annotations_qs",
"def get_unlabelled_tweets_reannotation():\n conn = get_connection()\n c = conn.cursor()\n #res = c.execute('SELECT * FROM tweets WHERE tweets.is_about_depression is null AND tweets.username IN (SELECT username FROM users WHERE mentions_depr=1)').fetchall()\n res = c.execute('SELECT * FROM tweets WHERE tweets.is_about_depression IN (0, 1, 2) AND tweets.is_about_depression2 IS NULL ORDER BY random()').fetchall()\n conn.close()\n return np.array(res)",
"def test_index_excludes_users(self):\n self.admin_api.create(\n 'User', email='',\n auth_id='',\n last_name='Doe', first_name='John')\n self.cron.index()\n result_dicts = [util.search_document_to_dict(doc)\n for doc in self.search_index.get_range()]\n result_kinds = [Model.get_kind(d['uid']) for d in result_dicts]\n self.assertNotIn('User', result_kinds)",
"def filter_for_user(self, user):\n query = Q(visibility=Document.PUBLIC) | \\\n Q(visibility=Document.PRIVATE, created_by=user) | \\\n Q(visibility=Document.ORG_ONLY,\n organization__memberships__user=user)\n\n if not user.external:\n query = query | Q(visibility=Document.ORG_ONLY_NO_EXTERNAL,\n organization__memberships__user=user)\n\n return super(DocumentManager, self).get_query_set().filter(query) \\\n .distinct()",
"def get_unused_annotations(graph):\n return graph.defined_annotation_keywords - get_annotations(graph)",
"def username_not_contains(self, username_not_contains):\n\n self._username_not_contains = username_not_contains",
"def username_not_contains(self, username_not_contains):\n\n self._username_not_contains = username_not_contains",
"def _annotations(request):\n result = Search(request).run(MultiDict(request.params))\n\n return request.find_service(AnnotationReadService).get_annotations_by_id(\n ids=result.annotation_ids\n )",
"def create_searchqryset(cls, user, **kwargs):\n return cls._meta.model.published_where_is_examiner(user)",
"def excluded_from_scan(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/excludedFromScan/')))",
"def annotations(self):\n annotations = {\"date\": self.date_trunc(\"usage_start\")}\n # { query_param: database_field_name }\n fields = self._mapper.provider_map.get(\"annotations\")\n for q_param, db_field in fields.items():\n annotations[q_param] = F(db_field)\n if (\n \"project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"and:project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"or:project\" in self.parameters.parameters.get(\"group_by\", {})\n ):\n annotations[\"project\"] = F(\"namespace\")\n\n return annotations",
"def anonymous(self, user=None, action=None):\n\n kw = { 'is_anonymous' : True }\n if user:\n kw.update({ 'user' : user })\n if action:\n kw.update({ 'action' : action })\n\n return self.filter(**kw)",
"def test_queryUnkeywordFlag(self):\n self._keywordFilteringTest(\"unkeyword\")",
"async def ign_whois(self, user: discord.Member):\n igns = self.names.get(user.mention)\n if not igns:\n await self.bot.say(\"{0} has not yet entered any IGN info. :cry:\".format(user.mention))\n else:\n await self.bot.say(self.format_igns(user, igns))",
"def get_query_without_request(obj_type, username, searchTerm, search_type=\"global\"):\n from crits.core.handlers import gen_global_query\n \n query = {}\n response = {}\n qdict = gen_global_query(obj_type, username, searchTerm, search_type, force_full=False)\n if not qdict.get('success', True):\n if qdict.get('ignore', False):\n response['Result'] = \"IGNORE\"\n else:\n response['Result'] = \"ERROR\"\n response['Message'] = qdict.get('error', 'Unable to process query')\n return response\n query.update(qdict)\n results = {}\n results['Result'] = \"OK\"\n results['query'] = query\n results['term'] = searchTerm\n return results",
"def _restricted_search_mentions(val: str):\n try:\n val = str(val)\n except ValueError:\n raise argparse.ArgumentTypeError(f\"{val} could not be parsed to a string\")\n\n if not val.startswith('@'):\n return '@' + val\n return val",
"def ncbi_egquery(keyword):\n\tif request.args.has_key('callback'):\n\t\tkwargs = {\"term\": keyword}\n\t\tprint \"term:\",keyword\n\t\treturn EUtils.egquery(request.args['callback'], request.args['email'], **kwargs)\n\telse:\n\t\tkwargs = {\"term\": keyword}\n\t\treturn EUtils.egquery(None, ENTREZ_EMAIL, **kwargs)",
"def show_nan(df):\n nan_df = df[(~df['tweet_user_location'].str.lower().isin(\n [x.lower() for x in LOCATION_DISCARD])) & df['geonameid'].isnull()]\n print(f'Number of NaNs: {len(nan_df.index)}')\n return nan_df",
"def username_not_starts_with(self, username_not_starts_with):\n\n self._username_not_starts_with = username_not_starts_with",
"def username_not_starts_with(self, username_not_starts_with):\n\n self._username_not_starts_with = username_not_starts_with",
"def filter_is_not_null(self, queryobject):\n raise NotImplementedError()"
]
| [
"0.7912646",
"0.73360676",
"0.66646075",
"0.637159",
"0.59995407",
"0.5719342",
"0.5719342",
"0.5572679",
"0.52060187",
"0.51259637",
"0.509457",
"0.5075936",
"0.50047857",
"0.49667862",
"0.4957798",
"0.4957798",
"0.484385",
"0.48353693",
"0.47016844",
"0.46956438",
"0.46931598",
"0.4687918",
"0.4675372",
"0.46591747",
"0.46332934",
"0.46152598",
"0.4598088",
"0.4573694",
"0.4573694",
"0.4571756"
]
| 0.83199334 | 0 |
Check whether string is pangram or not | def is_pangram(string):
a_pos = ord('a')
letters = [0] * 26
for char in string:
if char.isalpha():
letters[ord(char.lower()) - a_pos] += 1
return all(letters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_pangram(sentence):\n\n result = set()\n \n for char in sentence:\n\n if char.lower().isalpha():\n\n result.add(char.lower())\n\n\n if len(result) == 26:\n\n return True\n\n else:\n\n return False",
"def is_pangram(sentence):\n\n alpha = set()\n is_alpha = False\n\n for character in sentence:\n alpha.add(character)\n\n if len(alpha) == 26:\n is_alpha = True\n\n return is_alpha",
"def is_pangram(sentence):\n\n list = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p',\\\n 'q','r','s','t','u','v','x','z']\n count = 0\n sentence = sentence.lower()\n\n for i in range(0, len(list)):\n for j in sentence:\n if list[i] == j:\n count = count + 1\n list[i] = '>'\n\n if count == 24:\n return True\n\n elif count < 24 or count > 24:\n return False",
"def is_palindrome_ingoring_case_and_non_letter_chars(text):",
"def verificar_pangrama(cadena):\n for i in range(len(ascii_lowercase)):\n if ascii_lowercase[i] in cadena.lower():\n continue\n else:\n return False\n return True",
"def is_valid_ngram(ngram):\n for char in ngram:\n if char not in LETTERS:\n return False\n\n return True",
"def is_palindromic(phrase):\n\n val = str(phrase).lower().replace(\" \", \"\")\n if val == val[::-1]: # Reverse order\n return True\n else:\n return False",
"def is_palindrome(word: str) -> bool:\n\n # Todo\n return False",
"def is_palindrome(phrase):\n # remove spaces\n phrase = phrase.replace(' ', '').lower()\n\n # reverse phrase\n ans = ''\n for index in range(len(phrase)-1, -1, -1):\n ans += phrase[index]\n\n return True if ans == phrase else False",
"def string_palidrome(word):\n if word == string_reverse(word):\n return True\n else:\n return False",
"def is_pandigital_str(s):\n tot = 0\n zer = ord('0')\n for c in [c for c in s if c.isdigit()]:\n tot |= (1<<(ord(c) - zer))\n return tot == (1<<10)-2",
"def password_validator(password):\n if list(PUNCTUATIONS) in password:\n \"\"\"\n >>> list(string.punctuation)\n ['!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.',\n '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\\\', ']', '^', '_', '`',\n '{', '|', '}', '~']\n >>>\n \"\"\"\n return False\n else:\n return True",
"def valid_anagram(phrase):\n words = []\n series_of_words = phrase.split(' ')\n words.append(''.join(sorted(series_of_words.pop())))\n for word in series_of_words:\n word = ''.join(sorted(word))\n if word in words:\n return False\n words.append(word)\n return True",
"def isPalindrome(self, s: str) -> bool:\n left, right = 0, len(s) - 1\n while left < right:\n while left < right and not s[left].isalnum(): # 注意while loop的嵌套\n left += 1\n while left < right and not s[right].isalnum():\n right -= 1\n if s[left].lower() != s[right].lower():\n return False\n left += 1\n right -= 1\n return True",
"def is_palindrome(s):\n\n def to_chars(s):\n s = s.lower()\n letters = ''\n for char in s:\n if char in 'abcdefghijklmnopqrstuvwxyz':\n letters += char\n return letters\n\n def is_pal(s):\n if len(s) <= 1:\n return True\n else:\n return s[0] == s[-1] and is_pal(s[1:-1])\n\n return is_pal(to_chars(s))",
"def is_permutation_palindrome(str):\n for s in permutations(str): # loop through all permutations of str\n if is_palindrome(s):\n return True # successfully find a palindrome permutation\n return False # reach this, then no possible permutation is palindrome",
"def is_english(s):\n return s.isascii()",
"def is_well_formed_gtp_word(s):\n if not isinstance(s, str):\n return False\n if not _gtp_word_characters_re.search(s):\n return False\n return True",
"def is_anagram_of_palindrome(word):\n # palindrome has either exactly 2 of each letter in the word\n # or two of each letter revolving around one in the middle\n # An anagram rescrambles the letters\n chars = []\n\n # loop over the word\n # append chars to the list\n # if we see the char in list again, remove it.\n # if there is only one char or no chars in list\n # return True\n # else, return false\n\n for char in word:\n if char in chars:\n chars.remove(char)\n else:\n chars.append(char)\n if len(chars) >= 2:\n return False\n else:\n return True",
"def isValid(text):\n\n\n return any(word in text for word in [u\"我好看么\", u\"称赞\"])",
"def isPalindrome(self, s: str) -> bool:\n if not s:\n return True\n # process the string\n s_processed = \"\"\n for i in s:\n if i.isalnum():\n s_processed += i.lower()\n # Check if palindrome\n return s_processed == s_processed[::-1] # String[::-1], O(n)",
"def is_palindrome3(word):\n\n i = 0\n j = -1\n\n word = word.lower()\n\n if not word.isalnum():\n word = ''.join(character for character in word if character.isalnum())\n\n if word == \"\":\n return True\n\n while len(word) > 1:\n\n if word[i] == word[j]:\n\n i += 1\n j += 1\n\n else:\n return False\n\n return True",
"def alphanumeric(s: str) -> bool:\n return len(re.findall(r'[^A-Za-z0-9]', s)) == 0",
"def isPalindrome(s):\r\n return isPal(toChars(s))",
"def isPalindrome(word):\n\n input_str = IGNORE_NON_ALPHA_CHARACTER.sub(\"\", str(word)).casefold()\n return input_str == input_str[::-1]",
"def is_palindrome(text):\n\n # Property of a palindrome:\n # There be a maximum of only one letter that sums to an odd number\n \n char_count = {}\n # edge cases\n # Consider empty text as palindrome\n \n for char in text:\n if char in char_count:\n char_count[char] += 1\n else:\n char_count[char] = 1\n \n odd_count = 0\n for count in char_count.values():\n if count % 2 == 1:\n odd_count += 1\n if odd_count > 1:\n return False\n \n return True",
"def is_american_english_term(word: str) -> bool:\n word = process_word(word)\n return word in AMERICAN_ENGLISH_ONLY_TERMS",
"def is_english (self, testing_string): \n try:\n self.testing_string.encode(encoding='utf-8').decode('ascii')\n except UnicodeDecodeError:\n return False\n else:\n return True",
"def is_palindrome(string):\n return",
"def is_palindrome2(word):\n\n word = word.lower()\n\n if not word.isalnum():\n word = ''.join(character for character in word if character.isalnum())\n\n if word == \"\":\n return True\n\n while len(word) > 1:\n\n if word[0] == word[-1]:\n word = word[1:-1]\n\n else:\n return False\n\n return True"
]
| [
"0.765777",
"0.76229423",
"0.7067474",
"0.7052863",
"0.70356065",
"0.67905444",
"0.6626908",
"0.6356645",
"0.6289239",
"0.6203266",
"0.61825436",
"0.6097621",
"0.606496",
"0.6038649",
"0.600517",
"0.6004086",
"0.59945667",
"0.5993878",
"0.59906876",
"0.59893674",
"0.5980515",
"0.596226",
"0.5959494",
"0.5942759",
"0.5935181",
"0.59349775",
"0.5924281",
"0.5872953",
"0.58723634",
"0.5871506"
]
| 0.83000135 | 0 |
Test instructor creation with minimal data (email, password, role, birthday) | def test_create_instructor(self):
response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())
self.assertEqual(Instructor.objects.count(), self.qty + 1)
self.assertTrue(User.objects.filter(email=self.payload['email']).exists())
self.assertTrue(User.objects.filter(username=self.payload['email']).exists())
user_id = User.objects.get(username=self.payload['email']).id
self.assertTrue(Instructor.objects.filter(user_id=user_id).exists()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_instructor_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_automatic_default_public_username_role_instructor(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"3fd0ff83-a62d-4a12-9716-4d48821ae24f\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"roles\": \"Instructor\",\n },\n passport,\n )\n\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"[email protected]\", new_user.email)\n self.assertEqual(\n \"3fd0ff83-a62d-4a12-9716-4d48821ae24f@consumer\", new_user.username\n )\n self.assertEqual(user_count + 1, get_user_model().objects.count())\n self.assertEqual(\"Educational team\", new_user.public_username)",
"def setUp(self):\r\n instructor = AdminFactory.create()\r\n self.client.login(username=instructor.username, password='test')\r\n self.student = UserFactory.create(username='test', email='[email protected]')\r\n self.course = CourseFactory.create()\r\n CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)",
"def test_successful_add_instructor():\n assert add_instructor('mary', 'jones', 'instructor')",
"def test_create_instructor_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)",
"def test_automatic_default_public_username_role_administrator_instructor(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"3fd0ff83-a62d-4a12-9716-4d48821ae24f\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"roles\": (\n \"Administrator,Instructor,urn:lti:sysrole:ims/lis/Administrator,\"\n \"urn:lti:instrole:ims/lis/Administrator\"\n ),\n },\n passport,\n )\n\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"[email protected]\", new_user.email)\n self.assertEqual(\n \"3fd0ff83-a62d-4a12-9716-4d48821ae24f@consumer\", new_user.username\n )\n self.assertEqual(user_count + 1, get_user_model().objects.count())\n self.assertEqual(\"Educational team\", new_user.public_username)",
"def test_fail_add_instructor():\n assert add_instructor('sophia', 'smith', 'student') == False",
"def test_create_instructor_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)",
"def test_valid_account_create_is_a_developer(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident1\")\n form_data = {\n 'invitation_code': '1234',\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'Hank',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n self.client.post(self.url, form_data, follow=True)\n up = UserProfile.objects.get(user__email='[email protected]')\n self.assertEqual(up.user_type, 'DEV')",
"def test_create_instructor_twice(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)",
"def test_openedx_studio_launch_request_existing_user_instructor_admin_empty_username(\n self,\n ):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n role = random.choice([\"Instructor\", \"Administrator\"])\n public_username = (\n \"Educational team\" if role == \"Instructor\" else \"Administrator\"\n )\n params = {\n \"context_id\": \"course-v1:TEST1+0001+2020_T1\",\n \"context_label\": \"TEST1\",\n \"context_title\": \"test course 1\",\n \"custom_component_display_name\": \"Forum\",\n \"launch_presentation_return_url\": \"\",\n \"lis_result_sourcedid\": \"course-v1%3ATEST1%2B0001%2B2020_T1:-c7b2c44b1d\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"-c7b2c44b1d\",\n \"roles\": role,\n \"user_id\": \"student\",\n }\n # User 1 is using ashley from openedx studio in the course \"TEST1\"\n user1 = self._authenticate(params, passport)\n\n # A new ashley user should have been created\n self.assertEqual(1, get_user_model().objects.count())\n self.assertEqual(public_username, user1.public_username)\n\n # We set public_username to an empty value for the test\n user1.public_username = \"\"\n user1.save()\n self.assertEqual(\"\", user1.public_username)\n # Authenticate with the same params\n user1 = self._authenticate(params, passport)\n\n # No new user have been created\n self.assertEqual(1, get_user_model().objects.count())\n # Confirm that public_username is reset to the default value\n self.assertEqual(public_username, user1.public_username)",
"def test_init(self):\n self.assertEqual(self.new_user.first_name, \"Danlon\")\n self.assertEqual(self.new_user.last_name, \"Situma\")\n self.assertEqual(self.new_user.user_name, \"Dasi202\")\n self.assertEqual(self.new_user.password, \"passcode\")",
"def test_init(self):\n self.assertEqual(self.new_user.name,\"trinity\")\n self.assertEqual(self.new_user.email,\"[email protected]\")\n self.assertEqual(self.new_user.pin,\"123\")",
"def test_create_instructor_referred(self):\n referring_user = User.objects.get(email='[email protected]')\n self.payload_referred_instructor['referringCode'] = referring_user.referral_token\n response = self.client.post('{}/v1/register/'.format(settings.HOSTNAME_PROTOCOL),\n data=json.dumps(self.payload_referred_instructor),\n content_type='application/json',)\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertTrue(UserBenefits.objects.filter(\n user=User.objects.get(email=self.payload_referred_instructor['email']),\n user_origin=referring_user,\n ).exists())",
"def test_new_user():\n user = User(email = '[email protected]', password = '12345678ba', first_name='Jack',vCancer='YES',vTreatment='YES',vSymptoms='YES',result='50%')\n assert user.email == '[email protected]'\n assert user.password == '12345678ba'\n assert user.first_name == 'Jack'\n assert user.vSymptoms == 'YES'\n assert user.vCancer == 'YES'\n assert user.vTreatment == 'YES'\n assert user.result == '50%'\n print(\"Test passed\")",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def make_instructor(course, user_email):\r\n CourseStaffRole(course.id).add_users(User.objects.get(email=user_email))",
"def setUp(self):\r\n super(TestRawGradeCSV, self).setUp()\r\n\r\n self.instructor = '[email protected]'\r\n self.create_account('u2', self.instructor, self.password)\r\n self.activate_user(self.instructor)\r\n CourseStaffRole(self.course.id).add_users(User.objects.get(email=self.instructor))\r\n self.logout()\r\n self.login(self.instructor, self.password)\r\n self.enroll(self.course)\r\n\r\n # set up a simple course with four problems\r\n self.homework = self.add_graded_section_to_course('homework', late=False, reset=False, showanswer=False)\r\n self.add_dropdown_to_section(self.homework.location, 'p1', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p2', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p3', 1)\r\n self.refresh_course()",
"def test_user_creation(self):\n self.assertEqual(CustomUser.objects.count() - self.user_count, 2)\n self.assertEqual(self.new_student_user.username, 'Student')\n self.assertTrue(self.new_student_user.password, 'student12345')\n self.assertEqual(self.new_instructor_user.username, 'Instructor')\n self.assertTrue(self.new_instructor_user.password, 'instructor12345')",
"def test_create_student(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())",
"def setUp(self):\n BaseUser.setUp(self)\n self.login()\n new_course = dict(course=\"maths\")\n test_post_request(self, \"/course/new\", new_course, models.Course, 1)",
"def setUp(self):\r\n self.uname = 'testuser'\r\n self.email = '[email protected]'\r\n self.password = 'foo'\r\n\r\n # Create the use so we can log them in.\r\n self.user = User.objects.create_user(self.uname, self.email, self.password)\r\n\r\n # Note that we do not actually need to do anything\r\n # for registration if we directly mark them active.\r\n self.user.is_active = True\r\n # Staff has access to view all courses\r\n self.user.is_staff = True\r\n self.user.save()\r\n\r\n self.course_data = {\r\n 'org': 'MITx',\r\n 'number': '999',\r\n 'display_name': 'Robot Super Course',\r\n }",
"def test_create_student_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())",
"def create_instructor(self, username):\r\n return self._create_user(username, is_staff=True)",
"def setUp(self):\n self.user_count = CustomUser.objects.count()\n self.new_student_user = CustomUser.objects.create(**self.Student)\n self.new_instructor_user = CustomUser.objects.create(**self.Instructor)\n self.new_student_user.set_password(\"student12345\")\n self.new_student_user.save()\n self.new_instructor_user.set_password(\"instructor12345\")\n self.new_instructor_user.save()",
"def test_valid_account_create(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident2\")\n form_data = {\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'BamBam',\n 'last_name': 'Rubble',\n 'identification_choice': str(ident_choice.pk),\n }\n response = self.client.post(self.url, form_data, follow=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Please check your email')\n\n # verify username is lowercase\n User = get_user_model()\n u = User.objects.get(email=\"[email protected]\")\n self.assertEqual(u.username, \"[email protected]\")\n self.assertEqual(u.email, \"[email protected]\")\n\n # Ensure developer account does not have a crosswalk entry.\n self.assertEqual(Crosswalk.objects.filter(user=u).exists(), False)\n\n # verify user has identification label chosen\n exist = User.objects.filter(useridentificationlabel__users=u).filter(useridentificationlabel__slug='ident2').exists()\n self.assertEqual(exist, True)",
"def test_create_course(self):\r\n self.assert_created_course()",
"def setUp(self):\r\n uname = 'testuser'\r\n email = '[email protected]'\r\n password = 'foo'\r\n\r\n # Create the use so we can log them in.\r\n self.user = User.objects.create_user(uname, email, password)\r\n\r\n # Note that we do not actually need to do anything\r\n # for registration if we directly mark them active.\r\n self.user.is_active = True\r\n # Staff has access to view all courses\r\n self.user.is_staff = True\r\n self.user.save()\r\n\r\n self.client = AjaxEnabledTestClient()\r\n self.client.login(username=uname, password=password)\r\n\r\n self.course_data = {\r\n 'org': 'MITx',\r\n 'number': '999',\r\n 'display_name': 'Robot Super Course',\r\n 'run': '2013_Spring'\r\n }"
]
| [
"0.77973855",
"0.73921734",
"0.7269152",
"0.72092843",
"0.7193019",
"0.719033",
"0.7080006",
"0.70755994",
"0.69094527",
"0.6894123",
"0.68893015",
"0.67820835",
"0.67605054",
"0.6695033",
"0.6694981",
"0.6692216",
"0.6692216",
"0.6692216",
"0.6691131",
"0.66754854",
"0.6644642",
"0.65784174",
"0.65601915",
"0.6554544",
"0.6534889",
"0.6533424",
"0.6521904",
"0.6519248",
"0.6512825",
"0.65110326"
]
| 0.8086992 | 0 |
Test instructor creation with complete data (email, password, role, birthday, gender) | def test_create_instructor_complete_data(self):
response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())
self.assertEqual(Instructor.objects.count(), self.qty + 1)
self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())
self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())
user_id = User.objects.get(username=self.payload_all['email']).id
self.assertTrue(Instructor.objects.filter(user_id=user_id).exists()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_instructor(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def setUp(self):\r\n instructor = AdminFactory.create()\r\n self.client.login(username=instructor.username, password='test')\r\n self.student = UserFactory.create(username='test', email='[email protected]')\r\n self.course = CourseFactory.create()\r\n CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)",
"def test_successful_add_instructor():\n assert add_instructor('mary', 'jones', 'instructor')",
"def test_automatic_default_public_username_role_instructor(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"3fd0ff83-a62d-4a12-9716-4d48821ae24f\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"roles\": \"Instructor\",\n },\n passport,\n )\n\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"[email protected]\", new_user.email)\n self.assertEqual(\n \"3fd0ff83-a62d-4a12-9716-4d48821ae24f@consumer\", new_user.username\n )\n self.assertEqual(user_count + 1, get_user_model().objects.count())\n self.assertEqual(\"Educational team\", new_user.public_username)",
"def test_valid_account_create_is_a_developer(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident1\")\n form_data = {\n 'invitation_code': '1234',\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'Hank',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n self.client.post(self.url, form_data, follow=True)\n up = UserProfile.objects.get(user__email='[email protected]')\n self.assertEqual(up.user_type, 'DEV')",
"def test_automatic_default_public_username_role_administrator_instructor(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"3fd0ff83-a62d-4a12-9716-4d48821ae24f\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"roles\": (\n \"Administrator,Instructor,urn:lti:sysrole:ims/lis/Administrator,\"\n \"urn:lti:instrole:ims/lis/Administrator\"\n ),\n },\n passport,\n )\n\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"[email protected]\", new_user.email)\n self.assertEqual(\n \"3fd0ff83-a62d-4a12-9716-4d48821ae24f@consumer\", new_user.username\n )\n self.assertEqual(user_count + 1, get_user_model().objects.count())\n self.assertEqual(\"Educational team\", new_user.public_username)",
"def test_create_instructor_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)",
"def make_instructor(course, user_email):\r\n CourseStaffRole(course.id).add_users(User.objects.get(email=user_email))",
"def test_create_instructor_referred(self):\n referring_user = User.objects.get(email='[email protected]')\n self.payload_referred_instructor['referringCode'] = referring_user.referral_token\n response = self.client.post('{}/v1/register/'.format(settings.HOSTNAME_PROTOCOL),\n data=json.dumps(self.payload_referred_instructor),\n content_type='application/json',)\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertTrue(UserBenefits.objects.filter(\n user=User.objects.get(email=self.payload_referred_instructor['email']),\n user_origin=referring_user,\n ).exists())",
"def test_fail_add_instructor():\n assert add_instructor('sophia', 'smith', 'student') == False",
"def setUp(self):\r\n super(TestRawGradeCSV, self).setUp()\r\n\r\n self.instructor = '[email protected]'\r\n self.create_account('u2', self.instructor, self.password)\r\n self.activate_user(self.instructor)\r\n CourseStaffRole(self.course.id).add_users(User.objects.get(email=self.instructor))\r\n self.logout()\r\n self.login(self.instructor, self.password)\r\n self.enroll(self.course)\r\n\r\n # set up a simple course with four problems\r\n self.homework = self.add_graded_section_to_course('homework', late=False, reset=False, showanswer=False)\r\n self.add_dropdown_to_section(self.homework.location, 'p1', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p2', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p3', 1)\r\n self.refresh_course()",
"def test_create_instructor_twice(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)",
"def test_create_instructor_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)",
"def test_new_user():\n user = User(email = '[email protected]', password = '12345678ba', first_name='Jack',vCancer='YES',vTreatment='YES',vSymptoms='YES',result='50%')\n assert user.email == '[email protected]'\n assert user.password == '12345678ba'\n assert user.first_name == 'Jack'\n assert user.vSymptoms == 'YES'\n assert user.vCancer == 'YES'\n assert user.vTreatment == 'YES'\n assert user.result == '50%'\n print(\"Test passed\")",
"def setUp(self):\r\n self.uname = 'testuser'\r\n self.email = '[email protected]'\r\n self.password = 'foo'\r\n\r\n # Create the use so we can log them in.\r\n self.user = User.objects.create_user(self.uname, self.email, self.password)\r\n\r\n # Note that we do not actually need to do anything\r\n # for registration if we directly mark them active.\r\n self.user.is_active = True\r\n # Staff has access to view all courses\r\n self.user.is_staff = True\r\n self.user.save()\r\n\r\n self.course_data = {\r\n 'org': 'MITx',\r\n 'number': '999',\r\n 'display_name': 'Robot Super Course',\r\n }",
"def setUp(self):\n BaseUser.setUp(self)\n self.login()\n new_course = dict(course=\"maths\")\n test_post_request(self, \"/course/new\", new_course, models.Course, 1)",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def setUp(self):\r\n uname = 'testuser'\r\n email = '[email protected]'\r\n password = 'foo'\r\n\r\n # Create the use so we can log them in.\r\n self.user = User.objects.create_user(uname, email, password)\r\n\r\n # Note that we do not actually need to do anything\r\n # for registration if we directly mark them active.\r\n self.user.is_active = True\r\n # Staff has access to view all courses\r\n self.user.is_staff = True\r\n self.user.save()\r\n\r\n self.client = AjaxEnabledTestClient()\r\n self.client.login(username=uname, password=password)\r\n\r\n self.course_data = {\r\n 'org': 'MITx',\r\n 'number': '999',\r\n 'display_name': 'Robot Super Course',\r\n 'run': '2013_Spring'\r\n }",
"def create_instructor(self, username):\r\n return self._create_user(username, is_staff=True)",
"def test_init(self):\n self.assertEqual(self.new_user.first_name, \"Danlon\")\n self.assertEqual(self.new_user.last_name, \"Situma\")\n self.assertEqual(self.new_user.user_name, \"Dasi202\")\n self.assertEqual(self.new_user.password, \"passcode\")",
"def test_create_student_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())",
"def test_init(self):\n self.assertEqual(self.new_user.name,\"trinity\")\n self.assertEqual(self.new_user.email,\"[email protected]\")\n self.assertEqual(self.new_user.pin,\"123\")",
"def setUp(self):\n self.user_count = CustomUser.objects.count()\n self.new_student_user = CustomUser.objects.create(**self.Student)\n self.new_instructor_user = CustomUser.objects.create(**self.Instructor)\n self.new_student_user.set_password(\"student12345\")\n self.new_student_user.save()\n self.new_instructor_user.set_password(\"instructor12345\")\n self.new_instructor_user.save()",
"def test_create_student(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())",
"def test_valid_account_create(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident2\")\n form_data = {\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'BamBam',\n 'last_name': 'Rubble',\n 'identification_choice': str(ident_choice.pk),\n }\n response = self.client.post(self.url, form_data, follow=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Please check your email')\n\n # verify username is lowercase\n User = get_user_model()\n u = User.objects.get(email=\"[email protected]\")\n self.assertEqual(u.username, \"[email protected]\")\n self.assertEqual(u.email, \"[email protected]\")\n\n # Ensure developer account does not have a crosswalk entry.\n self.assertEqual(Crosswalk.objects.filter(user=u).exists(), False)\n\n # verify user has identification label chosen\n exist = User.objects.filter(useridentificationlabel__users=u).filter(useridentificationlabel__slug='ident2').exists()\n self.assertEqual(exist, True)",
"def setUp(self):\r\n\r\n # Get the Flask test client\r\n self.client = app.test_client()\r\n app.config['TESTING'] = True\r\n\r\n # Connect to test database\r\n connect_to_db(app, \"postgresql:///test_db\")\r\n\r\n # Create tables and add sample data\r\n db.create_all()\r\n \r\n self.user = crud.create_user(email='[email protected]', password = 'K9#n*Hs73', fname = 'Mary', lname = 'Crews', job = 'Night Auditor',\r\n current_location = 'Florida', place_of_birth = 'Iowa', dob ='1977-11-03', isAdmin =False)",
"def test_create_new_student_user(self):\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n 'name': \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"[email protected]\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertEqual(1, len(activation_token))",
"def setUp(self):\n self.college = self.setup_college()\n self.university = self.setup_university()\n self.faculty = self.setup_faculty()\n self.url = reverse('account:user-register')\n self.login_url = reverse('account:user-login')\n self.education = EducationSerializer(self.setup_education()).data\n self.data = {\n \"username\": \"testUser\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Test\",\n \"last_name\": \"User\",\n \"password\": \"1234\",\n \"confirm_password\": \"1234\",\n \"profile\": {\n \"user\": 1,\n \"contact_number\": \"9860476499\",\n \"address\": \"kapan\",\n \"education\": self.education\n }\n }\n self.response = self.client.post(self.url, data=self.data, format='json')\n user = User.objects.get()\n user.is_active = True\n user.save()\n self.login_data = {\n \"email\": \"[email protected]\",\n \"password\": \"1234\"\n }"
]
| [
"0.805841",
"0.7266817",
"0.71541744",
"0.705944",
"0.68980414",
"0.6877036",
"0.68710303",
"0.68508226",
"0.6850209",
"0.6784927",
"0.6781266",
"0.67804754",
"0.6769502",
"0.6686649",
"0.6650992",
"0.66394365",
"0.6631385",
"0.6631385",
"0.6631385",
"0.6592075",
"0.65870756",
"0.6576392",
"0.65647405",
"0.65541255",
"0.65422755",
"0.6534895",
"0.65119946",
"0.651022",
"0.6486349",
"0.6483575"
]
| 0.786218 | 1 |
Test instructor creation twice (same data) | def test_create_instructor_twice(self):
response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())
self.assertEqual(Instructor.objects.count(), self.qty + 1)
self.assertEqual(User.objects.count(), self.qty_users + 1)
response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())
self.assertEqual(Instructor.objects.count(), self.qty + 1)
self.assertEqual(User.objects.count(), self.qty_users + 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_instructor(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_create_instructor_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_successful_add_instructor():\n assert add_instructor('mary', 'jones', 'instructor')",
"def test_fail_add_instructor():\n assert add_instructor('sophia', 'smith', 'student') == False",
"def setUp(self):\r\n instructor = AdminFactory.create()\r\n self.client.login(username=instructor.username, password='test')\r\n self.student = UserFactory.create(username='test', email='[email protected]')\r\n self.course = CourseFactory.create()\r\n CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)",
"def setUp(self):\n super().setUp()\n\n # create course with cohorts\n self.manual_cohort_name = \"ManualCohort1\"\n self.auto_cohort_name = \"AutoCohort1\"\n self.course_fixture = CourseFixture(**self.course_info).install()\n self.setup_cohort_config(self.course_fixture, auto_cohort_groups=[self.auto_cohort_name])\n self.manual_cohort_id = self.add_manual_cohort(self.course_fixture, self.manual_cohort_name)\n\n # create a non-instructor who will be registered for the course and in the manual cohort.\n self.student_name, self.student_email = self._generate_unique_user_data()\n self.student_id = AutoAuthPage(\n self.browser, username=self.student_name, email=self.student_email,\n course_id=self.course_id, staff=False\n ).visit().get_user_id()\n self.add_user_to_cohort(self.course_fixture, self.student_name, self.manual_cohort_id)\n\n # create a second student user\n self.other_student_name, self.other_student_email = self._generate_unique_user_data()\n self.other_student_id = AutoAuthPage(\n self.browser, username=self.other_student_name, email=self.other_student_email,\n course_id=self.course_id, staff=False\n ).visit().get_user_id()\n\n # login as an instructor\n self.instructor_name, self.instructor_email = self._generate_unique_user_data()\n self.instructor_id = AutoAuthPage(\n self.browser, username=self.instructor_name, email=self.instructor_email,\n course_id=self.course_id, staff=True\n ).visit().get_user_id()\n\n # go to the membership page on the instructor dashboard\n self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)\n self.instructor_dashboard_page.visit()\n self.cohort_management_page = self.instructor_dashboard_page.select_cohort_management()",
"def test_instructors(has_page):\n faculty_names = [\"Teacher One\", \"Teacher Two\"]\n course_run = CourseRunFactory.create(course__page=None)\n if has_page:\n course_page = CoursePageFactory.create(course=course_run.course)\n FacultyMembersPageFactory.create(\n parent=course_page,\n **{\n f\"members__{idx}__member__name\": name\n for idx, name in enumerate(faculty_names)\n },\n )\n\n assert course_run.instructors == (\n [{\"name\": name} for name in faculty_names] if has_page else []\n )",
"def test_create_instructor_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)",
"def test_automatic_default_public_username_role_instructor(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"3fd0ff83-a62d-4a12-9716-4d48821ae24f\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"roles\": \"Instructor\",\n },\n passport,\n )\n\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"[email protected]\", new_user.email)\n self.assertEqual(\n \"3fd0ff83-a62d-4a12-9716-4d48821ae24f@consumer\", new_user.username\n )\n self.assertEqual(user_count + 1, get_user_model().objects.count())\n self.assertEqual(\"Educational team\", new_user.public_username)",
"def test_user_role_instructor(self):\r\n self.assertEqual(\r\n 'instructor',\r\n access.get_user_role(self.course_instructor, self.course_key)\r\n )\r\n # Masquerade instructor\r\n self.course_instructor.masquerade_as_student = True\r\n self.assertEqual(\r\n 'student',\r\n access.get_user_role(self.course_instructor, self.course_key)\r\n )",
"def test_can_create_multiple_education_instances_for_one_user(self):\n\t\tEducation.objects.create(\n\t\t\tuser=self.user,\n\t\t\tschool_name=self.school_name,\n\t\t\tcourse_name=self.course_name,\n\t\t\tstart_date=self.start_date,\n\t\t\tend_date=self.end_date,\n\t\t\tgrade_obtained=self.grade_obtained\n\t\t)\n\t\tEducation.objects.create(\n\t\t\tuser=self.user,\n\t\t\tschool_name=self.school_name,\n\t\t\tcourse_name=self.course_name,\n\t\t\tstart_date=self.start_date,\n\t\t\tend_date=self.end_date,\n\t\t\tgrade_obtained=self.grade_obtained\n\t\t)\n\n\t\teducation_instances = Education.objects.all()\n\t\tself.assertEqual(\n\t\t\t2,\n\t\t\teducation_instances.count(),\n\t\t\t'Expected 2 education instances, got {} instead.'.format(education_instances.count())\n\t\t)\n\t\tself.assertEqual(\n\t\t\teducation_instances.first().user,\n\t\t\tself.user,\n\t\t\t'Users don\\'t match'\n\t\t)\n\t\tself.assertEqual(\n\t\t\teducation_instances.last().user,\n\t\t\tself.user,\n\t\t\t'Users don\\'t match'\n\t\t)",
"def test_create_instructor_referred(self):\n referring_user = User.objects.get(email='[email protected]')\n self.payload_referred_instructor['referringCode'] = referring_user.referral_token\n response = self.client.post('{}/v1/register/'.format(settings.HOSTNAME_PROTOCOL),\n data=json.dumps(self.payload_referred_instructor),\n content_type='application/json',)\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertTrue(UserBenefits.objects.filter(\n user=User.objects.get(email=self.payload_referred_instructor['email']),\n user_origin=referring_user,\n ).exists())",
"def test_api_document_create_instructor(self):\n playlist = core_factories.PlaylistFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(playlist=playlist)\n\n self.assertEqual(MarkdownDocument.objects.count(), 0)\n\n response = self.client.post(\n \"/api/markdown-documents/\",\n {\n \"lti_id\": \"document_one\",\n \"playlist\": str(playlist.id),\n \"title\": \"Some document\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(MarkdownDocument.objects.count(), 1)\n self.assertEqual(response.status_code, 201)\n document = MarkdownDocument.objects.first()\n self.assertEqual(\n response.json(),\n {\n \"id\": str(document.id),\n \"images\": [],\n \"is_draft\": True,\n \"playlist\": {\n \"id\": str(playlist.id),\n \"lti_id\": playlist.lti_id,\n \"title\": playlist.title,\n },\n \"position\": 0,\n \"rendering_options\": {},\n \"translations\": [\n {\n \"content\": \"\",\n \"language_code\": \"en\",\n \"rendered_content\": \"\",\n \"title\": \"Some document\",\n }\n ],\n },\n )",
"def setUp(self):\r\n super(TestRawGradeCSV, self).setUp()\r\n\r\n self.instructor = '[email protected]'\r\n self.create_account('u2', self.instructor, self.password)\r\n self.activate_user(self.instructor)\r\n CourseStaffRole(self.course.id).add_users(User.objects.get(email=self.instructor))\r\n self.logout()\r\n self.login(self.instructor, self.password)\r\n self.enroll(self.course)\r\n\r\n # set up a simple course with four problems\r\n self.homework = self.add_graded_section_to_course('homework', late=False, reset=False, showanswer=False)\r\n self.add_dropdown_to_section(self.homework.location, 'p1', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p2', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p3', 1)\r\n self.refresh_course()",
"def test_automatic_default_public_username_role_administrator_instructor(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"3fd0ff83-a62d-4a12-9716-4d48821ae24f\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"roles\": (\n \"Administrator,Instructor,urn:lti:sysrole:ims/lis/Administrator,\"\n \"urn:lti:instrole:ims/lis/Administrator\"\n ),\n },\n passport,\n )\n\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"[email protected]\", new_user.email)\n self.assertEqual(\n \"3fd0ff83-a62d-4a12-9716-4d48821ae24f@consumer\", new_user.username\n )\n self.assertEqual(user_count + 1, get_user_model().objects.count())\n self.assertEqual(\"Educational team\", new_user.public_username)",
"def test_get_user_role_instructor(self):\r\n add_users(self.global_admin, CourseInstructorRole(self.course_key), self.instructor)\r\n self.assertEqual(\r\n 'instructor',\r\n get_user_role(self.instructor, self.course_key)\r\n )\r\n add_users(self.global_admin, CourseStaffRole(self.course_key), self.staff)\r\n self.assertEqual(\r\n 'instructor',\r\n get_user_role(self.instructor, self.course_key)\r\n )",
"def test_org_instructor_access(self):\r\n self.login(self.org_instructor_user)\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n check_for_get_code(self, 200, url)\r\n\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})\r\n check_for_get_code(self, 200, url)\r\n\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.other_org_course.id.to_deprecated_string()})\r\n check_for_get_code(self, 404, url)",
"def test_api_document_create_instructor_no_title(self):\n playlist = core_factories.PlaylistFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(playlist=playlist)\n\n self.assertEqual(MarkdownDocument.objects.count(), 0)\n\n response = self.client.post(\n \"/api/markdown-documents/\",\n {\n \"lti_id\": \"document_one\",\n \"playlist\": str(playlist.id),\n \"title\": \"\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(MarkdownDocument.objects.count(), 1)\n self.assertEqual(response.status_code, 201)\n document = MarkdownDocument.objects.first()\n self.assertEqual(\n response.json(),\n {\n \"id\": str(document.id),\n \"images\": [],\n \"is_draft\": True,\n \"playlist\": {\n \"id\": str(playlist.id),\n \"lti_id\": playlist.lti_id,\n \"title\": playlist.title,\n },\n \"position\": 0,\n \"rendering_options\": {},\n \"translations\": [\n {\n \"content\": \"\",\n \"language_code\": \"en\",\n \"rendered_content\": \"\",\n \"title\": \"\",\n }\n ],\n },\n )",
"def test_create_course(self):\r\n self.assert_created_course()",
"def test_education_instance_created(self):\n\n\t\tEducation.objects.create(\n\t\t\tuser=self.user,\n\t\t\tschool_name=self.school_name,\n\t\t\tcourse_name=self.course_name,\n\t\t\tstart_date=self.start_date,\n\t\t\tend_date=self.end_date,\n\t\t\tgrade_obtained=self.grade_obtained\n\t\t)\n\n\t\teducation = Education.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tself.user,\n\t\t\teducation.user,\n\t\t\t\"Users don't match.\")\n\t\tself.assertEqual(\n\t\t\tself.school_name,\n\t\t\teducation.school_name,\n\t\t\t\"School names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.course_name,\n\t\t\teducation.course_name,\n\t\t\t\"Course names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.start_date,\n\t\t\teducation.start_date,\n\t\t\t\"Start dates don't match\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.end_date,\n\t\t\teducation.end_date,\n\t\t\t\"End dates don't match\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.grade_obtained,\n\t\t\teducation.grade_obtained,\n\t\t\t\"Grade obtained don't match\"\n\t\t)",
"def __init__(self, *args, **kwargs):\r\n super(UniqueCourseTest, self).__init__(*args, **kwargs)",
"def setUp(self):\n program = program_utils.seedProgram()\n self.profile = profile_utils.seedSOCStudent(program)",
"def test_initialization_of_homework_result_author():\n assert result_1.author == good_student",
"def setUp(self):\r\n self.course = CourseFactory.create(\r\n display_name=self.COURSE_NAME,\r\n number=self.COURSE_NUM\r\n )\r\n self.students = [\r\n UserFactory.create(username='student1'),\r\n UserFactory.create(username='student2'),\r\n UserFactory.create(username='student3'),\r\n UserFactory.create(username='student4'),\r\n UserFactory.create(username='student5'),\r\n ]",
"def test_create_instructor_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)",
"def test_instructor_assessment(self):\r\n\r\n # Navigate to the AI-assessment problem and submit an essay\r\n # We have configured the stub to simulate that this essay will be staff-graded\r\n self.course_nav.go_to_sequential('AI-Assessed')\r\n self.submit_essay('ai', 'Censorship in the Libraries')\r\n\r\n # Refresh the page to get the updated feedback\r\n # then verify that we get the feedback sent by our stub XQueue implementation\r\n self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])\r\n\r\n # Verify the progress page\r\n self.progress_page.visit()\r\n scores = self.progress_page.scores('Test Section', 'Test Subsection')\r\n\r\n # First score is the self-assessment score, which we haven't answered, so it's 0/2\r\n # Second score is the AI-assessment score, which we have answered, so it's 1/2\r\n # Third score is peer-assessment, which we haven't answered, so it's 0/2\r\n self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])",
"def test_openedx_studio_launch_request_existing_user_instructor_admin_empty_username(\n self,\n ):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n role = random.choice([\"Instructor\", \"Administrator\"])\n public_username = (\n \"Educational team\" if role == \"Instructor\" else \"Administrator\"\n )\n params = {\n \"context_id\": \"course-v1:TEST1+0001+2020_T1\",\n \"context_label\": \"TEST1\",\n \"context_title\": \"test course 1\",\n \"custom_component_display_name\": \"Forum\",\n \"launch_presentation_return_url\": \"\",\n \"lis_result_sourcedid\": \"course-v1%3ATEST1%2B0001%2B2020_T1:-c7b2c44b1d\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"-c7b2c44b1d\",\n \"roles\": role,\n \"user_id\": \"student\",\n }\n # User 1 is using ashley from openedx studio in the course \"TEST1\"\n user1 = self._authenticate(params, passport)\n\n # A new ashley user should have been created\n self.assertEqual(1, get_user_model().objects.count())\n self.assertEqual(public_username, user1.public_username)\n\n # We set public_username to an empty value for the test\n user1.public_username = \"\"\n user1.save()\n self.assertEqual(\"\", user1.public_username)\n # Authenticate with the same params\n user1 = self._authenticate(params, passport)\n\n # No new user have been created\n self.assertEqual(1, get_user_model().objects.count())\n # Confirm that public_username is reset to the default value\n self.assertEqual(public_username, user1.public_username)",
"def test14_add_new_student_with_teacher(self):\n students_list_with_new_student = self.students_page. \\\n click_edit_students_list_button(). \\\n click_add_new_student_button(). \\\n enter_student_data(data['third_new_student']).\\\n enter_name_approved_by_custom(data['third_new_student']). \\\n click_save_data_changes_button(). \\\n click_exit_students_list_editor_button(). \\\n students_table()\n student = data_student_for_check(data['third_new_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student, students_list_with_new_student)\n return self.students_page",
"def test_initialization_of_teacher_first_name():\n assert opp_teacher.first_name == \"Daniil\"",
"def make_instructor(course, user_email):\r\n CourseStaffRole(course.id).add_users(User.objects.get(email=user_email))"
]
| [
"0.769729",
"0.7488967",
"0.7205441",
"0.71792346",
"0.6808503",
"0.6688776",
"0.667985",
"0.6660535",
"0.6659116",
"0.6634877",
"0.65139055",
"0.650547",
"0.6502173",
"0.6455965",
"0.6444356",
"0.6410022",
"0.6375959",
"0.63648933",
"0.6356673",
"0.6341334",
"0.6340871",
"0.6310494",
"0.6288275",
"0.62824494",
"0.6264759",
"0.6263563",
"0.6261357",
"0.62454027",
"0.62198645",
"0.6212938"
]
| 0.7693801 | 1 |
Test instructor creation with missing birthday info | def test_create_instructor_missing_birthday(self):
response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())
self.assertEqual(Instructor.objects.count(), self.qty) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_birth_validation(self):",
"def test_create_student_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty)",
"def test_fail_add_instructor():\n assert add_instructor('sophia', 'smith', 'student') == False",
"def test_create_instructor(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_init_invalid_order(self):\n with self.assertRaises(ValueError):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.birth_date - relativedelta(days=1))",
"def test_init_invalid_birth_date(self):\n # Whether this raises a ValueError or TypeError is an\n # implementation detail delegated to `datetime`\n with self.assertRaises((ValueError, TypeError)):\n Person(\n self.initial_year, self.name, 'invalid',\n retirement_date=self.retirement_date)",
"def test_patient_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, '2000-01-01')",
"def test_create_instructor_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_education_instance_created_without_required_arguments(self):\n\n\t\tEducation.objects.create(\n\t\t\tuser=self.user,\n\t\t\tschool_name=self.school_name,\n\t\t\tcourse_name=self.course_name,\n\t\t\tstart_date=self.start_date,\n\t\t)\n\n\t\teducation = Education.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tself.user,\n\t\t\teducation.user,\n\t\t\t\"Users don't match.\")\n\n\t\tself.assertEqual(\n\t\t\tself.school_name,\n\t\t\teducation.school_name,\n\t\t\t\"School names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.course_name,\n\t\t\teducation.course_name,\n\t\t\t\"Course names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.start_date,\n\t\t\teducation.start_date,\n\t\t\t\"Start dates don't match\"\n\t\t)",
"def test_creation_profile_3():\n assert tuple_NT[0][2] == LIST_dict[0]['birthdate'], \"birthdate of profile is not getting stored properly\"",
"def test_patient_one_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, datetime.date(2000, 2, 13))",
"def test_successful_add_instructor():\n assert add_instructor('mary', 'jones', 'instructor')",
"def test_education_instance_created(self):\n\n\t\tEducation.objects.create(\n\t\t\tuser=self.user,\n\t\t\tschool_name=self.school_name,\n\t\t\tcourse_name=self.course_name,\n\t\t\tstart_date=self.start_date,\n\t\t\tend_date=self.end_date,\n\t\t\tgrade_obtained=self.grade_obtained\n\t\t)\n\n\t\teducation = Education.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tself.user,\n\t\t\teducation.user,\n\t\t\t\"Users don't match.\")\n\t\tself.assertEqual(\n\t\t\tself.school_name,\n\t\t\teducation.school_name,\n\t\t\t\"School names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.course_name,\n\t\t\teducation.course_name,\n\t\t\t\"Course names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.start_date,\n\t\t\teducation.start_date,\n\t\t\t\"Start dates don't match\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.end_date,\n\t\t\teducation.end_date,\n\t\t\t\"End dates don't match\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.grade_obtained,\n\t\t\teducation.grade_obtained,\n\t\t\t\"Grade obtained don't match\"\n\t\t)",
"def test_init_basic(self):\n person = Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.retirement_date)\n self.assertEqual(person.name, self.name)\n self.assertEqual(person.birth_date, self.birth_date)\n self.assertEqual(person.retirement_date, self.retirement_date)\n self.assertIsInstance(person.name, str)\n self.assertIsInstance(person.birth_date, datetime)\n self.assertIsInstance(person.retirement_date, datetime)\n self.assertIsNone(person.spouse)\n self.assertIsNone(person.tax_treatment)",
"def test_create_instructor_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)",
"def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.careers(student_id)\n self.assertFalse(result)",
"def test_create_instructor_referred(self):\n referring_user = User.objects.get(email='[email protected]')\n self.payload_referred_instructor['referringCode'] = referring_user.referral_token\n response = self.client.post('{}/v1/register/'.format(settings.HOSTNAME_PROTOCOL),\n data=json.dumps(self.payload_referred_instructor),\n content_type='application/json',)\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertTrue(UserBenefits.objects.filter(\n user=User.objects.get(email=self.payload_referred_instructor['email']),\n user_origin=referring_user,\n ).exists())",
"def test_can_not_create_education_instance_without_start_date(self):\n\t\twith self.assertRaises(\n\t\t\tIntegrityError,\n\t\t\tmsg = 'Should raise IntegrityError if start_date not provided.'\n\t\t\t):\n\n\t\t\tEducation.objects.create(\n\t\t\t\tuser=self.user,\n\t\t\t\tschool_name=self.school_name,\n\t\t\t\tcourse_name=self.course_name,\n\t\t\t)",
"def test_required_year_of_birth_missing(self):\r\n self.url_params['year_of_birth'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 400)\r\n obj = json.loads(response.content)\r\n self.assertEqual(\r\n obj['value'],\r\n u'Your year of birth is required',\r\n )",
"def test_meeting_registrant_create(self):\n pass",
"def test_createItinerary(self):\n rv = self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = '2015-08-21T00:00:00.000Z'\n ))\n itinHash = str('alex' + \"_\" + '2015-08-21T00:00:00.000Z')\n assert itinHash in str(rv.data)\n\n rv = self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date= '2015-08-21T00:00:00.000Z'\n ))\n assert 'Itinerary date already in use' in str(rv.data)\n\n rv = self.json_post('/createItinerary/bbbb', dict(\n name = 'New Day',\n date= '2015-08-21T00:00:00.000Z'\n ))\n assert 'Invalid username' in str(rv.data)",
"def test_future_birth_date_import():\n _curr_date = datetime.utcnow()\n _later_date = _curr_date + relativedelta(days=1)\n later_date = _later_date.strftime(\"%d.%m.%Y\")\n\n citizen_with_birth_date_later_than_current = deepcopy(CITIZEN_EXAMPLE)\n citizen_with_birth_date_later_than_current[\"birth_date\"] = later_date\n with TestClient(app) as client:\n response = client.post(\n \"/imports\",\n json={\n \"citizens\": [\n citizen_with_birth_date_later_than_current\n ]}\n )\n\n assert response.status_code == 400",
"def test_init_invalid_retire_date(self):\n # Whether this raises a ValueError or TypeError is an\n # implementation detail delegated to `datetime`\n with self.assertRaises((ValueError, TypeError)):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date='invalid')",
"def test_date_of_birth(self):\n entries = {'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qDateOfBirth': [19951226],\n }}\n clone(entries)\n self.assertEqual(date(1995, 12, 26), Person.objects.first().date_of_birth)",
"def test_initialization_of_teacher_first_name():\n assert opp_teacher.first_name == \"Daniil\"",
"def test_meeting_create(self):\n pass",
"def test_initialization_of_teacher_last_name():\n assert opp_teacher.last_name == \"Shadrin\"",
"def setUp(self):\n self.initial_year = 2020\n self.name = \"Testy McTesterson\"\n self.birth_date = datetime(2000, 2, 1) # 1 February 2000\n self.retirement_date = datetime(2065, 6, 26) # 26 June 2065\n self.gross_income = Money(100000) # $100000\n self.raise_rate = Decimal(1) # 100%\n self.tax_treatment = Tax(\n {self.initial_year: {\n Money(0): Decimal('0.1'),\n Money(1000): Decimal('0.2'),\n Money(100000): Decimal('0.3')}\n },\n inflation_adjust={\n year: Decimal(1 + (year - self.initial_year) / 16)\n for year in range(self.initial_year, self.initial_year + 100)\n },\n personal_deduction={self.initial_year: Money(100)},\n credit_rate={self.initial_year: Decimal('0.15')})\n self.spouse = Person(\n initial_year=self.initial_year,\n name=\"Spouse\",\n birth_date=1998,\n retirement_date=2063,\n gross_income=Money(50000),\n raise_rate=self.raise_rate,\n spouse=None,\n tax_treatment=self.tax_treatment)\n self.owner = Person(\n initial_year=self.initial_year,\n name=self.name,\n birth_date=self.birth_date,\n retirement_date=self.retirement_date,\n gross_income=self.gross_income,\n raise_rate=self.raise_rate,\n spouse=self.spouse,\n tax_treatment=self.tax_treatment)",
"def test_valid_account_create_is_a_developer(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident1\")\n form_data = {\n 'invitation_code': '1234',\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'Hank',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n self.client.post(self.url, form_data, follow=True)\n up = UserProfile.objects.get(user__email='[email protected]')\n self.assertEqual(up.user_type, 'DEV')",
"def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.schedule(student_id)\n self.assertFalse(result)"
]
| [
"0.7039679",
"0.6868125",
"0.6761741",
"0.6733674",
"0.6697205",
"0.6690531",
"0.6585243",
"0.65621763",
"0.65392435",
"0.6535695",
"0.64939094",
"0.64026636",
"0.6383979",
"0.63214123",
"0.6286624",
"0.6255898",
"0.62543947",
"0.62521315",
"0.6212229",
"0.61938",
"0.6165529",
"0.616431",
"0.61091024",
"0.6081292",
"0.60686004",
"0.6055134",
"0.60448515",
"0.60420567",
"0.6034187",
"0.60146433"
]
| 0.8111842 | 0 |
Test instructor creation with missing role info | def test_create_instructor_missing_role(self):
response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())
self.assertEqual(Instructor.objects.count(), self.qty) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_instructor(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_automatic_default_public_username_role_instructor(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"3fd0ff83-a62d-4a12-9716-4d48821ae24f\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"roles\": \"Instructor\",\n },\n passport,\n )\n\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"[email protected]\", new_user.email)\n self.assertEqual(\n \"3fd0ff83-a62d-4a12-9716-4d48821ae24f@consumer\", new_user.username\n )\n self.assertEqual(user_count + 1, get_user_model().objects.count())\n self.assertEqual(\"Educational team\", new_user.public_username)",
"def test_fail_add_instructor():\n assert add_instructor('sophia', 'smith', 'student') == False",
"def test_user_role_instructor(self):\r\n self.assertEqual(\r\n 'instructor',\r\n access.get_user_role(self.course_instructor, self.course_key)\r\n )\r\n # Masquerade instructor\r\n self.course_instructor.masquerade_as_student = True\r\n self.assertEqual(\r\n 'student',\r\n access.get_user_role(self.course_instructor, self.course_key)\r\n )",
"def test_automatic_default_public_username_role_administrator_instructor(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"3fd0ff83-a62d-4a12-9716-4d48821ae24f\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"roles\": (\n \"Administrator,Instructor,urn:lti:sysrole:ims/lis/Administrator,\"\n \"urn:lti:instrole:ims/lis/Administrator\"\n ),\n },\n passport,\n )\n\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"[email protected]\", new_user.email)\n self.assertEqual(\n \"3fd0ff83-a62d-4a12-9716-4d48821ae24f@consumer\", new_user.username\n )\n self.assertEqual(user_count + 1, get_user_model().objects.count())\n self.assertEqual(\"Educational team\", new_user.public_username)",
"def test_add_role(self):\n pass",
"def test_create_instructor_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_get_user_role_instructor(self):\r\n add_users(self.global_admin, CourseInstructorRole(self.course_key), self.instructor)\r\n self.assertEqual(\r\n 'instructor',\r\n get_user_role(self.instructor, self.course_key)\r\n )\r\n add_users(self.global_admin, CourseStaffRole(self.course_key), self.staff)\r\n self.assertEqual(\r\n 'instructor',\r\n get_user_role(self.instructor, self.course_key)\r\n )",
"def test_successful_add_instructor():\n assert add_instructor('mary', 'jones', 'instructor')",
"def test_ipam_roles_create(self):\n pass",
"def test_add_role_simple(self):\n pass",
"def test_openedx_studio_launch_request_existing_user_instructor_admin_empty_username(\n self,\n ):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n role = random.choice([\"Instructor\", \"Administrator\"])\n public_username = (\n \"Educational team\" if role == \"Instructor\" else \"Administrator\"\n )\n params = {\n \"context_id\": \"course-v1:TEST1+0001+2020_T1\",\n \"context_label\": \"TEST1\",\n \"context_title\": \"test course 1\",\n \"custom_component_display_name\": \"Forum\",\n \"launch_presentation_return_url\": \"\",\n \"lis_result_sourcedid\": \"course-v1%3ATEST1%2B0001%2B2020_T1:-c7b2c44b1d\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"-c7b2c44b1d\",\n \"roles\": role,\n \"user_id\": \"student\",\n }\n # User 1 is using ashley from openedx studio in the course \"TEST1\"\n user1 = self._authenticate(params, passport)\n\n # A new ashley user should have been created\n self.assertEqual(1, get_user_model().objects.count())\n self.assertEqual(public_username, user1.public_username)\n\n # We set public_username to an empty value for the test\n user1.public_username = \"\"\n user1.save()\n self.assertEqual(\"\", user1.public_username)\n # Authenticate with the same params\n user1 = self._authenticate(params, passport)\n\n # No new user have been created\n self.assertEqual(1, get_user_model().objects.count())\n # Confirm that public_username is reset to the default value\n self.assertEqual(public_username, user1.public_username)",
"def test_create_student_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty)",
"def setUp(self):\r\n instructor = AdminFactory.create()\r\n self.client.login(username=instructor.username, password='test')\r\n self.student = UserFactory.create(username='test', email='[email protected]')\r\n self.course = CourseFactory.create()\r\n CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)",
"def make_instructor(course, user_email):\r\n CourseStaffRole(course.id).add_users(User.objects.get(email=user_email))",
"def test_add_role_simple_post(self):\n pass",
"def test_non_existent_course_role(self):\n self._login_as_staff()\n path = self.path(role='A')\n response = self.client.get(path)\n\n assert response.status_code == 400\n\n response = self.client.post(path)\n assert response.status_code == 400",
"def test_org_instructor_access(self):\r\n self.login(self.org_instructor_user)\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n check_for_get_code(self, 200, url)\r\n\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})\r\n check_for_get_code(self, 200, url)\r\n\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.other_org_course.id.to_deprecated_string()})\r\n check_for_get_code(self, 404, url)",
"def test_unique(self):\n with pytest.raises(IntegrityError):\n CourseAccessRoleAssignmentFactory(\n enrollment=self.program_course_enrollment,\n role=ProgramCourseEnrollmentRoles.COURSE_STAFF,\n )",
"def create_instructor(self, username):\r\n return self._create_user(username, is_staff=True)",
"def test_instructors(has_page):\n faculty_names = [\"Teacher One\", \"Teacher Two\"]\n course_run = CourseRunFactory.create(course__page=None)\n if has_page:\n course_page = CoursePageFactory.create(course=course_run.course)\n FacultyMembersPageFactory.create(\n parent=course_page,\n **{\n f\"members__{idx}__member__name\": name\n for idx, name in enumerate(faculty_names)\n },\n )\n\n assert course_run.instructors == (\n [{\"name\": name} for name in faculty_names] if has_page else []\n )",
"def test_create_course(self):\r\n self.assert_created_course()",
"def test_create_instructor_twice(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)",
"def test_list_role(self):\n pass",
"def test_api_document_create_instructor_no_title(self):\n playlist = core_factories.PlaylistFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(playlist=playlist)\n\n self.assertEqual(MarkdownDocument.objects.count(), 0)\n\n response = self.client.post(\n \"/api/markdown-documents/\",\n {\n \"lti_id\": \"document_one\",\n \"playlist\": str(playlist.id),\n \"title\": \"\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(MarkdownDocument.objects.count(), 1)\n self.assertEqual(response.status_code, 201)\n document = MarkdownDocument.objects.first()\n self.assertEqual(\n response.json(),\n {\n \"id\": str(document.id),\n \"images\": [],\n \"is_draft\": True,\n \"playlist\": {\n \"id\": str(playlist.id),\n \"lti_id\": playlist.lti_id,\n \"title\": playlist.title,\n },\n \"position\": 0,\n \"rendering_options\": {},\n \"translations\": [\n {\n \"content\": \"\",\n \"language_code\": \"en\",\n \"rendered_content\": \"\",\n \"title\": \"\",\n }\n ],\n },\n )",
"def test_has_role(self):\n self.make_assignment(self.project, self.user_bob, self.role_contributor)\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))",
"def test_creator_group_not_enabled(self):\r\n self.assertTrue(has_access(self.user, CourseCreatorRole()))",
"def test_create_cluster_role(self):\n pass",
"def test_read_by_organization_instructor(self):\n organization_access = OrganizationAccessFactory(\n role=INSTRUCTOR,\n organization=self.organization,\n )\n\n self.assert_user_cannot_read(organization_access.user, self.live)",
"def test_create_instructor_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)"
]
| [
"0.7794621",
"0.7467939",
"0.7449668",
"0.7408569",
"0.7357018",
"0.7348701",
"0.7240623",
"0.7208329",
"0.71653557",
"0.7107521",
"0.705882",
"0.6785346",
"0.67461926",
"0.6733531",
"0.66950613",
"0.66404426",
"0.66112673",
"0.6588831",
"0.65816295",
"0.6567953",
"0.6553653",
"0.6525567",
"0.651887",
"0.6503453",
"0.6423362",
"0.6417708",
"0.6413159",
"0.6404099",
"0.63896996",
"0.6378215"
]
| 0.8201606 | 0 |
Test parent creation with minimal data (email, password, role, birthday) | def test_create_parent(self):
response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())
self.assertEqual(Parent.objects.count(), self.qty + 1)
self.assertTrue(User.objects.filter(email=self.payload['email']).exists())
self.assertTrue(User.objects.filter(username=self.payload['email']).exists())
user_id = User.objects.get(username=self.payload['email']).id
self.assertTrue(Parent.objects.filter(user_id=user_id).exists()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_parent_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())",
"def test_create_parent_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty)",
"def test_create_parent_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty)",
"def test_create(self):\n pass",
"def test_create_project_unknown_parent(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': INVALID_UUID,\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def test_create_parent_twice(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)",
"def test_create_project_invalid_parent(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.project.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def test_createUser_single(self):\n #TODO: this and other tests",
"def test_create_parent_referred(self):\n referring_user = User.objects.get(email='[email protected]')\n self.payload_referred_parent['referringCode'] = referring_user.referral_token\n response = self.client.post('{}/v1/register/'.format(settings.HOSTNAME_PROTOCOL),\n data=json.dumps(self.payload_referred_parent),\n content_type='application/json',)\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertTrue(UserBenefits.objects.filter(\n user=User.objects.get(email=self.payload_referred_parent['email']),\n user_origin=referring_user,\n ).exists())",
"def test_parent_str(self):\n parent = models.Parent.objects.create(\n user=sample_user(),\n address=\"test add 1\",\n age=34,\n job=\"test 1\"\n )\n self.assertEqual(str(parent), parent.name)",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create__normal_valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n json_data = {\n 'email': '[email protected]',\n 'isAdmin': False, 'isSiteEditor': False}\n with test_app.test_request_context(self.request_path, json=json_data):\n actual_json = self.handler.do_post()\n self.assertEqual('[email protected]', actual_json['email'])\n self.assertFalse(actual_json['is_site_editor'])\n self.assertFalse(actual_json['is_admin'])\n\n new_appuser = (user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get())\n result_email = new_appuser.email\n result_is_admin = new_appuser.is_admin\n new_appuser.key.delete()\n self.assertEqual('[email protected]', result_email)\n self.assertFalse(result_is_admin)",
"def test_lacking_parent(self):\n pass",
"def test_create_record(self):\n pass",
"def test_validate_parent(self):\n with self.assertRaises(ValidationError):\n self.project.parent = self.project\n self.project.save()",
"def test_create(self, client):\n count = get_user_model().objects.count()\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 302\n\n user = get_user_model().objects.last()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()\n assert get_user_model().objects.count() == count + 1",
"def test_init(self):\n self.assertTrue(self.new_user.profile.bio == \"Hi!\")",
"def test_creating_new_patient(self):\n\n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n\n patient_id = create_new_patient_account(form_data)\n\n self.assertEqual(3, patient_id)",
"def test_create_another_parent_referred(self):\n referring_user = User.objects.get(email='[email protected]')\n self.payload_another_referred_parent['referringCode'] = referring_user.referral_token\n response = self.client.post('{}/v1/register/'.format(settings.HOSTNAME_PROTOCOL),\n data=json.dumps(self.payload_another_referred_parent),\n content_type='application/json',)\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertTrue(UserBenefits.objects.filter(\n user=User.objects.get(email=self.payload_another_referred_parent['email']),\n user_origin=referring_user,\n ).exists())",
"def test_init(self):\n self.assertEqual(self.new_user.name,\"trinity\")\n self.assertEqual(self.new_user.email,\"[email protected]\")\n self.assertEqual(self.new_user.pin,\"123\")",
"def testParent(self):\n self.assertEqual(\n self.parent,\n self.mr.parent\n )",
"def testOwnershipAfterCreate(self):\n self.simulateATGUIInteraction(task='create')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')",
"def test_0_0_create(self):\n\n self.assertTrue(self.b1)",
"def test_client_create(self):\n pass",
"def test_pod_valid_parent(self):\n session = self.login_to_apic()\n parent = PhysicalModel()\n pod = Pod.get(session, parent)\n children = parent.get_children()\n self.assertEqual(pod, children)",
"def test_user_creation(self):\r\n \r\n self.assertIsInstance(self.user, User)",
"def test_create(self):\n\n res = self.metadata.create_or_update(data=self.create)\n\n self.assertEqual(res.name, self.entity.name)\n self.assertEqual(res.service.id, self.entity.service.id)\n self.assertEqual(res.owner, None)",
"def test_createperson(self):\n p = model.Person(firstname=\"Tobias\", lastname=\"Thelen\",\n email=\"[email protected]\", hobbies=[\"singen\",\"springen\",\"fröhlichsein\"])\n id = p.store()\n\n p2 = model.Person(id=id)\n self.assertEqual(p.id, p2.id)\n self.assertEqual(p.firstname, p2.firstname)\n self.assertEqual(p.lastname, p2.lastname)\n self.assertEqual(p.email, p2.email)\n self.assertEqual(p.hobbies, p2.hobbies)"
]
| [
"0.7717782",
"0.7016609",
"0.6893099",
"0.67854035",
"0.67156076",
"0.668006",
"0.6613055",
"0.66108304",
"0.65245146",
"0.65000707",
"0.64178497",
"0.64178497",
"0.64178497",
"0.6329158",
"0.63214636",
"0.6318875",
"0.62952214",
"0.6155355",
"0.61476",
"0.61422133",
"0.6121838",
"0.61025697",
"0.6055645",
"0.60486346",
"0.6048584",
"0.60476434",
"0.6042903",
"0.60340494",
"0.6006656",
"0.59988785"
]
| 0.8012531 | 0 |
Test parent creation with complete data (email, password, role, birthday, gender) | def test_create_parent_complete_data(self):
response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())
self.assertEqual(Parent.objects.count(), self.qty + 1)
self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())
self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())
user_id = User.objects.get(username=self.payload_all['email']).id
self.assertTrue(Parent.objects.filter(user_id=user_id).exists()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_parent(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())",
"def test_create(self):\n pass",
"def test_create_parent_referred(self):\n referring_user = User.objects.get(email='[email protected]')\n self.payload_referred_parent['referringCode'] = referring_user.referral_token\n response = self.client.post('{}/v1/register/'.format(settings.HOSTNAME_PROTOCOL),\n data=json.dumps(self.payload_referred_parent),\n content_type='application/json',)\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertTrue(UserBenefits.objects.filter(\n user=User.objects.get(email=self.payload_referred_parent['email']),\n user_origin=referring_user,\n ).exists())",
"def test_create_parent_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty)",
"def test_createUser_single(self):\n #TODO: this and other tests",
"def test_create_parent_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty)",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create_parent_twice(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)",
"def test_creating_new_patient(self):\n\n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n\n patient_id = create_new_patient_account(form_data)\n\n self.assertEqual(3, patient_id)",
"def test_parent_str(self):\n parent = models.Parent.objects.create(\n user=sample_user(),\n address=\"test add 1\",\n age=34,\n job=\"test 1\"\n )\n self.assertEqual(str(parent), parent.name)",
"def test_create_another_parent_referred(self):\n referring_user = User.objects.get(email='[email protected]')\n self.payload_another_referred_parent['referringCode'] = referring_user.referral_token\n response = self.client.post('{}/v1/register/'.format(settings.HOSTNAME_PROTOCOL),\n data=json.dumps(self.payload_another_referred_parent),\n content_type='application/json',)\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertTrue(UserBenefits.objects.filter(\n user=User.objects.get(email=self.payload_another_referred_parent['email']),\n user_origin=referring_user,\n ).exists())",
"def test_create_project_unknown_parent(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': INVALID_UUID,\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def test_create_record(self):\n pass",
"def test_create(self, client):\n count = get_user_model().objects.count()\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 302\n\n user = get_user_model().objects.last()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()\n assert get_user_model().objects.count() == count + 1",
"def test_create__normal_valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n json_data = {\n 'email': '[email protected]',\n 'isAdmin': False, 'isSiteEditor': False}\n with test_app.test_request_context(self.request_path, json=json_data):\n actual_json = self.handler.do_post()\n self.assertEqual('[email protected]', actual_json['email'])\n self.assertFalse(actual_json['is_site_editor'])\n self.assertFalse(actual_json['is_admin'])\n\n new_appuser = (user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get())\n result_email = new_appuser.email\n result_is_admin = new_appuser.is_admin\n new_appuser.key.delete()\n self.assertEqual('[email protected]', result_email)\n self.assertFalse(result_is_admin)",
"def test_init(self):\n self.assertTrue(self.new_user.profile.bio == \"Hi!\")",
"def test_create_project_invalid_parent(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.project.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def test_valid_account_create_is_a_developer(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident1\")\n form_data = {\n 'invitation_code': '1234',\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'Hank',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n self.client.post(self.url, form_data, follow=True)\n up = UserProfile.objects.get(user__email='[email protected]')\n self.assertEqual(up.user_type, 'DEV')",
"def create(self, validated_data):",
"def create_person(self):",
"def test_creating_new_dietitian(self):\n\n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\"}\n\n dietitian_id = create_new_dietitian_account(form_data)\n\n self.assertEqual(2, dietitian_id)",
"def test_init(self):\n self.assertEqual(self.new_user.name,\"trinity\")\n self.assertEqual(self.new_user.email,\"[email protected]\")\n self.assertEqual(self.new_user.pin,\"123\")",
"def test_create(km_user_factory):\n models.Profile.objects.create(\n is_private=True, km_user=km_user_factory(), name=\"My Profile\"\n )",
"def setUpTestData(cls):\n cls.test_resource = Resource(name='Test', slug='test', description='')\n cls.test_resource.full_clean()\n cls.test_resource.save()\n cls.test_faculty = Faculty(name='Test', slug='test')\n cls.test_faculty.full_clean()\n cls.test_faculty.save()\n cls.test_department = Department(name='Test', slug='test', faculty=cls.test_faculty)\n cls.test_department.full_clean()\n cls.test_department.save()\n cls.test_agreement = Agreement(title='test-one',\n slug='test-one',\n resource=cls.test_resource,\n body='body',\n redirect_url='https://example.com',\n redirect_text='example-redirect')\n cls.test_agreement.full_clean()\n cls.test_agreement.save()\n cls.test_user = get_user_model().objects.create_user(username='test',\n first_name='test',\n last_name='test',\n email='[email protected]',\n password='testtesttest')",
"def create(self):\n ...",
"def test_createperson(self):\n p = model.Person(firstname=\"Tobias\", lastname=\"Thelen\",\n email=\"[email protected]\", hobbies=[\"singen\",\"springen\",\"fröhlichsein\"])\n id = p.store()\n\n p2 = model.Person(id=id)\n self.assertEqual(p.id, p2.id)\n self.assertEqual(p.firstname, p2.firstname)\n self.assertEqual(p.lastname, p2.lastname)\n self.assertEqual(p.email, p2.email)\n self.assertEqual(p.hobbies, p2.hobbies)",
"def setUp(self):\n #Inheriting the base class functionality\n super(CreateTaskAPITestCase, self).setUp()\n # Create the org using serializer\n create_org_data = {\n 'name': 'Ecell NITRR Open Source',\n 'tagline': 'We love open source.'\n }\n serializer = CreateOrgSerializer(data=create_org_data)\n if serializer.is_valid():\n self.org = serializer.save()[0]",
"def test_adding_new_patient(self):\n\n data = {\"dietitian_id\": 1, \"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n result = self.client.post(\"/patient/new-patient\", data=data,\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"registered new patient\", result.data)\n\n data = {\"dietitian_id\": 1, \"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n result = self.client.post(\"/patient/new-patient\", data=data,\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"email address already exists\", result.data)"
]
| [
"0.78743446",
"0.6740956",
"0.6684322",
"0.6632757",
"0.6584399",
"0.6516206",
"0.6486546",
"0.6486546",
"0.6486546",
"0.644505",
"0.6380854",
"0.6376411",
"0.6320839",
"0.63088953",
"0.62781596",
"0.62768555",
"0.62551355",
"0.6219303",
"0.62023044",
"0.6163518",
"0.61479264",
"0.613246",
"0.61288244",
"0.607802",
"0.6041339",
"0.6034564",
"0.6033799",
"0.60295236",
"0.60191685",
"0.60130703"
]
| 0.76904285 | 1 |
Test parent creation twice (same data) | def test_create_parent_twice(self):
response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())
self.assertEqual(Parent.objects.count(), self.qty + 1)
self.assertEqual(User.objects.count(), self.qty_users + 1)
response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())
self.assertEqual(Parent.objects.count(), self.qty + 1)
self.assertEqual(User.objects.count(), self.qty_users + 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_lacking_parent(self):\n pass",
"def test_create_parent(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())",
"def testParent(self):\n self.assertEqual(\n self.parent,\n self.mr.parent\n )",
"def test_create_parent_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())",
"def test_create_parented_item(self):\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='testx', offering='GreekHero', branch='draft'),\r\n 'chapter', block_id='chapter2'\r\n )\r\n original = modulestore().get_item(locator)\r\n\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='testx', offering='wonderful', branch='draft'), 'course', 'head23456'\r\n )\r\n premod_course = modulestore().get_course(locator.course_key)\r\n category = 'chapter'\r\n new_module = modulestore().create_item(\r\n locator, category, 'user123',\r\n fields={'display_name': 'new chapter'},\r\n definition_locator=original.definition_locator\r\n )\r\n # check that course version changed and course's previous is the other one\r\n self.assertNotEqual(new_module.location.version_guid, premod_course.location.version_guid)\r\n parent = modulestore().get_item(locator)\r\n self.assertIn(new_module.location.version_agnostic(), version_agnostic(parent.children))\r\n self.assertEqual(new_module.definition_locator.definition_id, original.definition_locator.definition_id)",
"def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )",
"def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )",
"def testParentage(self):\n self.assertEqual(\n self.cd,\n self.media_ref.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )",
"def test_validate_parent(self):\n with self.assertRaises(ValidationError):\n self.project.parent = self.project\n self.project.save()",
"def test_add_self_as_parent(self):\n groupa = Group('groupa')\n with pytest.raises(Exception):\n groupa.add_parent(groupa)",
"def test_create_project_unknown_parent(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': INVALID_UUID,\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def testSetParent(self):\n for child in self.color_corrections + self.color_decisions:\n self.assertEqual(\n None,\n child.parent\n )\n\n self.node.append_children(\n self.color_corrections + self.color_decisions\n )\n\n for child in self.node.all_children:\n self.assertEqual(\n self.node,\n child.parent\n )\n child.parent = 'banana'\n self.assertEqual(\n 'banana',\n child.parent\n )\n\n self.node.set_parentage()\n\n for child in self.node.all_children:\n self.assertEqual(\n self.node,\n child.parent\n )",
"def test_skirmish_parenting(self):\n root = SkirmishAction()\n a1 = SkirmishAction()\n a2 = SkirmishAction()\n self.sess.add_all([root, a1, a2])\n self.sess.commit()\n\n root.children.append(a1)\n root.children.append(a2)\n self.sess.commit()\n\n self.assertEqual(a1.parent_id, root.id)\n self.assertEqual(a2.parent_id, root.id)",
"def test_create_project_invalid_parent(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.project.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def test_set_category_parent(self):\n pass",
"def test_0_0_create(self):\n\n self.assertTrue(self.b1)",
"def test_parent_does_not_exist(self):\n self.bad_data_fail(self.test_data['pants'],\n self.test_data['hats'], 'Parent does not exist')",
"def test_add_parent(self):\n _, _, groupa, groupb = create_objects()\n groupa.add_parent(groupb)\n assert groupb in groupa.parents\n assert groupa in groupb.children\n return (groupa, groupb)",
"def test_pod_valid_parent(self):\n session = self.login_to_apic()\n parent = PhysicalModel()\n pod = Pod.get(session, parent)\n children = parent.get_children()\n self.assertEqual(pod, children)",
"def testSetParentage(self):\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cd.media_ref.parent\n )\n\n self.cc.parent = 'bob'\n self.media_ref.parent = 'joe'\n\n self.cd.set_parentage()\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cd.media_ref.parent\n )",
"def test_parent_str(self):\n parent = models.Parent.objects.create(\n user=sample_user(),\n address=\"test add 1\",\n age=34,\n job=\"test 1\"\n )\n self.assertEqual(str(parent), parent.name)",
"def test_replace_parent_to_self(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n with pytest.raises(Exception):\n groupa.replace_parent(groupb, groupa)",
"def test_create(self):\n pass",
"def test_parent_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.parent = None",
"def test_create_parent_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty)",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )"
]
| [
"0.7327622",
"0.7291284",
"0.71750873",
"0.70575345",
"0.683957",
"0.68384427",
"0.68384427",
"0.6751094",
"0.6615504",
"0.66008794",
"0.65949416",
"0.65104157",
"0.65016395",
"0.64667857",
"0.63932925",
"0.63917375",
"0.6368658",
"0.63437235",
"0.6332753",
"0.6326015",
"0.6301355",
"0.62887424",
"0.6286573",
"0.6278083",
"0.62713414",
"0.62468904",
"0.62468904",
"0.62468904",
"0.62468904",
"0.62468904"
]
| 0.7512109 | 0 |
Test parent creation with missing birthday info | def test_create_parent_missing_birthday(self):
response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())
self.assertEqual(Parent.objects.count(), self.qty) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_date_of_birth(self):\n entries = {'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qDateOfBirth': [19951226],\n }}\n clone(entries)\n self.assertEqual(date(1995, 12, 26), Person.objects.first().date_of_birth)",
"def test_patient_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, '2000-01-01')",
"def test_patient_one_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, datetime.date(2000, 2, 13))",
"def test_init_invalid_order(self):\n with self.assertRaises(ValueError):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.birth_date - relativedelta(days=1))",
"def test_create_student_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty)",
"def test_create_instructor_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)",
"def test_init_invalid_birth_date(self):\n # Whether this raises a ValueError or TypeError is an\n # implementation detail delegated to `datetime`\n with self.assertRaises((ValueError, TypeError)):\n Person(\n self.initial_year, self.name, 'invalid',\n retirement_date=self.retirement_date)",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def test_create_parent(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())",
"def test_create_project_unknown_parent(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': INVALID_UUID,\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def test_lacking_parent(self):\n pass",
"def test_create_parent_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())",
"def test_birth_validation(self):",
"def test_validate_parent(self):\n with self.assertRaises(ValidationError):\n self.project.parent = self.project\n self.project.save()",
"def test_init_invalid_retire_date(self):\n # Whether this raises a ValueError or TypeError is an\n # implementation detail delegated to `datetime`\n with self.assertRaises((ValueError, TypeError)):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date='invalid')",
"def test_create_project_invalid_parent(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.project.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def test_parent_does_not_exist(self):\n self.bad_data_fail(self.test_data['pants'],\n self.test_data['hats'], 'Parent does not exist')",
"def testOwnershipAfterCreate(self):\n self.simulateATGUIInteraction(task='create')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')",
"def test_create_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)",
"def test_meeting_registrant_create(self):\n pass",
"def test_new_object(self):\n now = datetime.now()\n eod = end_of_day(now)\n self.assertNotEqual(now, eod)",
"def test_basecreated(self):\n self.assertEqual(datetime, type(BaseModel().created_at))",
"def testParentage(self):\n self.assertEqual(\n self.cd,\n self.media_ref.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )",
"def test_parent_update_child(self):\n today = datetime.date.today()\n business = BUSINESS_FACTORY.create_business()\n slot = Slot.objects.create(site_id=2, business_id=business.id,\n start_date=today, end_date=today + datetime.timedelta(1))\n child_slot = Slot.objects.create(site_id=2, business_id=business.id,\n start_date=today, end_date=today + datetime.timedelta(1),\n parent_slot=slot)\n slot.end_date = today + datetime.timedelta(2)\n slot.save()\n child_slot = Slot.objects.get(id=child_slot.id)\n self.assertEqual(slot.end_date, child_slot.end_date)",
"def test_relative_date(self):\n self.assertEqual(self.show.relative_date, None)",
"def test_meeting_create(self):\n pass"
]
| [
"0.66133535",
"0.6401738",
"0.6379002",
"0.63506204",
"0.626649",
"0.62616646",
"0.6258136",
"0.62263054",
"0.62263054",
"0.62263054",
"0.62263054",
"0.62263054",
"0.61226845",
"0.60851943",
"0.60197324",
"0.5982935",
"0.5950324",
"0.59245646",
"0.59155875",
"0.5864435",
"0.5858001",
"0.5843902",
"0.5808836",
"0.58018005",
"0.5798628",
"0.57943857",
"0.5716452",
"0.57111603",
"0.569863",
"0.5686338"
]
| 0.7909044 | 0 |
Test parent creation with missing role info | def test_create_parent_missing_role(self):
response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())
self.assertEqual(Parent.objects.count(), self.qty) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_lacking_parent(self):\n pass",
"def test_create_parent(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())",
"def testParent(self):\n self.assertEqual(\n self.parent,\n self.mr.parent\n )",
"def test_create_project_unknown_parent(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': INVALID_UUID,\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )",
"def test_create_parent_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())",
"def test_create_project_invalid_parent(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.project.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def test_parent_does_not_exist(self):\n self.bad_data_fail(self.test_data['pants'],\n self.test_data['hats'], 'Parent does not exist')",
"def test_get_parent_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_all_children(root.id)\n role = children[-1]\n parent = role_middleware.get_parent(role.id)\n print(parent.name, parent.id)",
"def test_create_inherited_promote(self):\n # Create category role for user\n self.make_assignment(self.category, self.assign_user, self.role_guest)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.category).count(), 2\n )\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 1\n )\n\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.category).count(), 2\n )\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )\n role_as = RoleAssignment.objects.filter(\n project=self.project,\n role=self.role_contributor,\n user=self.assign_user,\n ).first()\n self.assertIsNotNone(role_as)",
"def test_add_role(self):\n pass",
"def test_validate_parent(self):\n with self.assertRaises(ValidationError):\n self.project.parent = self.project\n self.project.save()",
"def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )",
"def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )",
"def test_create_namespaced_role(self):\n pass",
"def test_ipam_roles_create(self):\n pass",
"def test_create_inherited_equal(self):\n self.make_assignment(\n self.category, self.assign_user, self.role_contributor\n )\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.category).count(), 2\n )\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 1\n )\n\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.category).count(), 2\n )\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )\n role_as = RoleAssignment.objects.filter(\n project=self.project,\n role=self.role_contributor,\n user=self.assign_user,\n ).first()\n self.assertIsNotNone(role_as)",
"def test_create_role_existing(self):\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 1\n )\n\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )\n post_data = {\n 'role': PROJECT_ROLE_GUEST,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )",
"def test_create_cluster_role(self):\n pass",
"def test_fetcher_parent_assignation(self):\n user = User()\n self.assertEquals(user.fetcher_for_rest_name(\"group\").parent_object, user)",
"def test_add_self_as_parent(self):\n groupa = Group('groupa')\n with pytest.raises(Exception):\n groupa.add_parent(groupa)",
"def test_node_bad_parent(self):\n pod_id = '1'\n self.assertRaises(TypeError, Node, '1', '2', 'Spine1', role='leaf', parent=pod_id)",
"def test_create_config_nodes(self):\n with self.override_role():\n self._create_config_node()",
"def test_creator_group_not_enabled(self):\r\n self.assertTrue(has_access(self.user, CourseCreatorRole()))",
"def test_add_role_simple(self):\n pass",
"def test_has_role_inherit(self):\n self.make_assignment(self.category, self.user_bob, self.role_delegate)\n self.make_assignment(\n self.category, self.user_carol, self.role_contributor\n )\n self.make_assignment(self.category, self.user_dan, self.role_guest)\n self.assertTrue(self.project.has_role(self.user_alice))\n self.assertTrue(self.project.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_carol))\n self.assertTrue(self.project.has_role(self.user_dan))"
]
| [
"0.7173321",
"0.71174806",
"0.68135136",
"0.67152756",
"0.6665044",
"0.6665044",
"0.6665044",
"0.6665044",
"0.6665044",
"0.66351545",
"0.6623037",
"0.66228104",
"0.6481688",
"0.64460397",
"0.6426748",
"0.64100564",
"0.63996893",
"0.63996893",
"0.6395939",
"0.6361072",
"0.6360539",
"0.633423",
"0.63257915",
"0.6283947",
"0.62794226",
"0.6230676",
"0.62053597",
"0.61972904",
"0.6167067",
"0.61667866"
]
| 0.78568757 | 0 |
Test student creation with minimal data (email, password, role, birthday) | def test_create_student(self):
response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())
self.assertEqual(Student.objects.count(), self.qty + 1)
self.assertTrue(User.objects.filter(email=self.payload['email']).exists())
self.assertTrue(User.objects.filter(username=self.payload['email']).exists())
user_id = User.objects.get(username=self.payload['email']).id
self.assertTrue(Student.objects.filter(user_id=user_id).exists()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_student_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())",
"def test_create_new_student_user_missing_field(self):\n data = {\n 'email': '[email protected]',\n 'password': 'test123!',\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_student_signup(self):\n post = {'email': '[email protected]', 'first_name': 'Tom',\n 'last_name': 'Student', 'user_type': 'student',\n 'password': '1234'}\n response = self.client.post(self.signup_student_url, post)\n self.assertRedirects(response, reverse('home'))\n SchoolUser.objects.get(username='[email protected]')",
"def test_create_new_student_user(self):\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n 'name': \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"[email protected]\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertEqual(1, len(activation_token))",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_createUser_single(self):\n #TODO: this and other tests",
"def test_new_user():\n user = User(email = '[email protected]', password = '12345678ba', first_name='Jack',vCancer='YES',vTreatment='YES',vSymptoms='YES',result='50%')\n assert user.email == '[email protected]'\n assert user.password == '12345678ba'\n assert user.first_name == 'Jack'\n assert user.vSymptoms == 'YES'\n assert user.vCancer == 'YES'\n assert user.vTreatment == 'YES'\n assert user.result == '50%'\n print(\"Test passed\")",
"def test_create_Student(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Student.objects.count(), 1)\n self.assertEqual(Student.objects.get().first_name, 'Poompatai')\n self.assertEqual(Student.objects.get().last_name, 'Puntitpong')\n self.assertEqual(Student.objects.get().age, 20)\n self.assertEqual(Student.objects.get().nationality, 'Thailand')\n self.assertEqual(Student.objects.get().school.id, school_ids[0])\n\n \"\"\"Invalid School\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': 'aaaa'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_user_creation(self):\n self.assertEqual(CustomUser.objects.count() - self.user_count, 2)\n self.assertEqual(self.new_student_user.username, 'Student')\n self.assertTrue(self.new_student_user.password, 'student12345')\n self.assertEqual(self.new_instructor_user.username, 'Instructor')\n self.assertTrue(self.new_instructor_user.password, 'instructor12345')",
"def test_create(self):\n pass",
"def test_create_Student_full(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n for i in range(20):\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['school'][0].code, 'invalid')\n self.assertEqual(str(response.data['school'][0]), 'School Triamudomsuksa already has maximum number of students')",
"def test_init(self):\n self.assertEqual(self.new_user.name,\"trinity\")\n self.assertEqual(self.new_user.email,\"[email protected]\")\n self.assertEqual(self.new_user.pin,\"123\")",
"def test_create_Student_missing_param(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n\n \"\"\"Normal request\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n \"\"\"Missing first_name\"\"\"\n data = {'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['first_name'][0].code, 'required')\n\n \"\"\"Missing all\"\"\"\n data = {}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['first_name'][0].code, 'required')\n self.assertEqual(response.data['last_name'][0].code, 'required')\n self.assertEqual(response.data['age'][0].code, 'required')\n self.assertEqual(response.data['nationality'][0].code, 'required')\n self.assertEqual(response.data['school'][0].code, 'required')",
"def test_init(self):\n self.assertEqual(self.new_user.first_name, \"Danlon\")\n self.assertEqual(self.new_user.last_name, \"Situma\")\n self.assertEqual(self.new_user.user_name, \"Dasi202\")\n self.assertEqual(self.new_user.password, \"passcode\")",
"def test_create_instructor(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_create_Student_data_type(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n \"\"\"String age\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': '20', 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Student.objects.count(), 1)\n self.assertEqual(Student.objects.get().first_name, 'Poompatai')\n self.assertEqual(Student.objects.get().last_name, 'Puntitpong')\n self.assertEqual(Student.objects.get().age, 20)\n self.assertEqual(Student.objects.get().nationality, 'Thailand')\n self.assertEqual(Student.objects.get().school.id, school_ids[0])\n\n \"\"\"Name, nationality, type number\"\"\"\n data = {'first_name':123, 'last_name': 123,'age': 20, 'nationality': 123, 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n \"\"\"String age but non convertable\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 'AAA', 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_student_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty)",
"def test_create_user(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n\n user = User.query.filter_by(id=6).first()\n self.assertTrue(user)\n self.assertEqual(user.firstname, \"John\")\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)",
"def test_create_record(self):\n pass",
"def test_create_user_only_lastname(self):\n data = {\"lastname\": \"Doe\"}\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n user = User.query.filter_by(id=6).first()\n self.assertEqual(user.firstname, None)\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)",
"def setUp(self):\n self.user_count = CustomUser.objects.count()\n self.new_student_user = CustomUser.objects.create(**self.Student)\n self.new_instructor_user = CustomUser.objects.create(**self.Instructor)\n self.new_student_user.set_password(\"student12345\")\n self.new_student_user.save()\n self.new_instructor_user.set_password(\"instructor12345\")\n self.new_instructor_user.save()",
"def test_create_user(self):\n user = User(\"Gideon Bamuleseyo\", \"[email protected]\", \"secret\")\n self.assertEqual(user.name, \"Gideon Bamuleseyo\")\n self.assertEqual(user.email, \"[email protected]\")\n self.assertEqual(user.password, \"secret\")",
"def createStudent(self):\n self.createProfile()\n from soc.modules.gsoc.models.profile import GSoCStudentInfo\n properties = {'key_name': self.profile.key().name(), 'parent': self.profile}\n self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)\n self.profile.put()",
"def test_create(self):\n \n name=\"mytest\"\n email=\"[email protected]\"\n \n #test user can be created successfully when given correct values\n user = users.create(Request(name, email)) \n self.assertIsInstance(user, User)\n self.assertEquals(user.name, name)\n self.assertEquals(user.email, email)\n \n #ensure that an error is raised when essential attributes are missed\n self.assertRaises(datastore_errors.BadValueError, users.create, None)",
"def test__init__(self):\n self.assertEqual(self.new_users.first_name, 'Dennis')\n self.assertEqual(self.new_users.last_name, 'Kiplangat')\n self.assertEqual(self.new_users.password, 'kiplangat18')",
"def test_init(self) :\n self.assertEqual(self.new_user.user_name,\"Juma\")\n self.assertEqual(self.new_user.password,\"12345\")",
"def test_create_user(self):\n user = User(email=\"[email protected]\", password=\"testpassword\")\n\n self.assertEqual(user.email, \"[email protected]\")\n self.assertNotEqual(user.password, \"testpassword\")\n self.assertFalse(user.confirmed)\n self.assertIsNone(user.confirmed_at)\n self.assertIsNotNone(user.created_at)\n self.assertIsNotNone(user.confirmation_token)",
"def test_create_Student_param_validate(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n\n \"\"\"Normal request\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n \"\"\"first_name, last_name more than 20 characters and nationality more than 30 character\"\"\"\n data = {'first_name': 'A'*21, 'last_name': 'A'*21,'age': 20, 'nationality': 'A'*31, 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['first_name'][0].code, 'max_length')\n self.assertEqual(str(response.data['first_name'][0]), 'Ensure this field has no more than 20 characters.')\n self.assertEqual(response.data['last_name'][0].code, 'max_length')\n self.assertEqual(str(response.data['last_name'][0]), 'Ensure this field has no more than 20 characters.')\n self.assertEqual(response.data['nationality'][0].code, 'max_length')\n self.assertEqual(str(response.data['nationality'][0]), 'Ensure this field has no more than 30 characters.')\n\n \"\"\"-1,0,150,151 age\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 0, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 150, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': -1, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['age'][0].code, 'min_value')\n self.assertEqual(str(response.data['age'][0]), 'Ensure this value is greater than or equal to 0.')\n\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 151, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['age'][0].code, 'max_value')\n self.assertEqual(str(response.data['age'][0]), 'Ensure this value is less than or equal to 150.')"
]
| [
"0.75032836",
"0.7184321",
"0.7168117",
"0.71637297",
"0.7106433",
"0.7106433",
"0.7106433",
"0.7096027",
"0.708405",
"0.7001411",
"0.6965496",
"0.6905284",
"0.6900171",
"0.6865108",
"0.6859173",
"0.6856256",
"0.6814214",
"0.6801656",
"0.6801538",
"0.67724085",
"0.67493975",
"0.6707578",
"0.669278",
"0.6641784",
"0.66354495",
"0.66119474",
"0.66112477",
"0.66067547",
"0.6590803",
"0.6588374"
]
| 0.77380747 | 0 |
Test student creation with complete data (email, password, role, birthday, gender) | def test_create_student_complete_data(self):
response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())
self.assertEqual(Student.objects.count(), self.qty + 1)
self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())
self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())
user_id = User.objects.get(username=self.payload_all['email']).id
self.assertTrue(Student.objects.filter(user_id=user_id).exists()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_student(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())",
"def test_create_new_student_user(self):\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n 'name': \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"[email protected]\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertEqual(1, len(activation_token))",
"def test_student_signup(self):\n post = {'email': '[email protected]', 'first_name': 'Tom',\n 'last_name': 'Student', 'user_type': 'student',\n 'password': '1234'}\n response = self.client.post(self.signup_student_url, post)\n self.assertRedirects(response, reverse('home'))\n SchoolUser.objects.get(username='[email protected]')",
"def test_new_user():\n user = User(email = '[email protected]', password = '12345678ba', first_name='Jack',vCancer='YES',vTreatment='YES',vSymptoms='YES',result='50%')\n assert user.email == '[email protected]'\n assert user.password == '12345678ba'\n assert user.first_name == 'Jack'\n assert user.vSymptoms == 'YES'\n assert user.vCancer == 'YES'\n assert user.vTreatment == 'YES'\n assert user.result == '50%'\n print(\"Test passed\")",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create_new_student_user_missing_field(self):\n data = {\n 'email': '[email protected]',\n 'password': 'test123!',\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_createUser_single(self):\n #TODO: this and other tests",
"def test_create_instructor(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_create_Student(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Student.objects.count(), 1)\n self.assertEqual(Student.objects.get().first_name, 'Poompatai')\n self.assertEqual(Student.objects.get().last_name, 'Puntitpong')\n self.assertEqual(Student.objects.get().age, 20)\n self.assertEqual(Student.objects.get().nationality, 'Thailand')\n self.assertEqual(Student.objects.get().school.id, school_ids[0])\n\n \"\"\"Invalid School\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': 'aaaa'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_Student_data_type(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n \"\"\"String age\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': '20', 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Student.objects.count(), 1)\n self.assertEqual(Student.objects.get().first_name, 'Poompatai')\n self.assertEqual(Student.objects.get().last_name, 'Puntitpong')\n self.assertEqual(Student.objects.get().age, 20)\n self.assertEqual(Student.objects.get().nationality, 'Thailand')\n self.assertEqual(Student.objects.get().school.id, school_ids[0])\n\n \"\"\"Name, nationality, type number\"\"\"\n data = {'first_name':123, 'last_name': 123,'age': 20, 'nationality': 123, 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n \"\"\"String age but non convertable\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 'AAA', 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create(self):\n pass",
"def test_create_Student_full(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n for i in range(20):\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['school'][0].code, 'invalid')\n self.assertEqual(str(response.data['school'][0]), 'School Triamudomsuksa already has maximum number of students')",
"def test_user_creation(self):\n self.assertEqual(CustomUser.objects.count() - self.user_count, 2)\n self.assertEqual(self.new_student_user.username, 'Student')\n self.assertTrue(self.new_student_user.password, 'student12345')\n self.assertEqual(self.new_instructor_user.username, 'Instructor')\n self.assertTrue(self.new_instructor_user.password, 'instructor12345')",
"def createStudent(self):\n self.createProfile()\n from soc.modules.gsoc.models.profile import GSoCStudentInfo\n properties = {'key_name': self.profile.key().name(), 'parent': self.profile}\n self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)\n self.profile.put()",
"def test_create_user(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n\n user = User.query.filter_by(id=6).first()\n self.assertTrue(user)\n self.assertEqual(user.firstname, \"John\")\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)",
"def test_creating_new_patient(self):\n\n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n\n patient_id = create_new_patient_account(form_data)\n\n self.assertEqual(3, patient_id)",
"def setUp(self):\n self.user_count = CustomUser.objects.count()\n self.new_student_user = CustomUser.objects.create(**self.Student)\n self.new_instructor_user = CustomUser.objects.create(**self.Instructor)\n self.new_student_user.set_password(\"student12345\")\n self.new_student_user.save()\n self.new_instructor_user.set_password(\"instructor12345\")\n self.new_instructor_user.save()",
"def register_student(request):\n email = auth.check_login(request)\n if email:\n db = database.Database()\n db.add_student(email, \"\", \"\")\n return True\n return False",
"def test_create_instructor_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_create_record(self):\n pass",
"def test_create_user_exists(self):\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test',\n 'last_name': 'JustUser'\n }\n sample_user(\n payload['user']['email'], payload['user']['password'],\n payload['user']['is_doctor'], payload['user']['is_hospital_admin']\n ),\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_init(self):\n self.assertEqual(self.new_user.name,\"trinity\")\n self.assertEqual(self.new_user.email,\"[email protected]\")\n self.assertEqual(self.new_user.pin,\"123\")",
"def test_create_user(self):\n self.login()\n res = self.submit()\n\n assert res.status_code == 200",
"def test_init(self):\n self.assertEqual(self.new_user.first_name, \"Danlon\")\n self.assertEqual(self.new_user.last_name, \"Situma\")\n self.assertEqual(self.new_user.user_name, \"Dasi202\")\n self.assertEqual(self.new_user.password, \"passcode\")",
"def test_valid_account_create_is_a_developer(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident1\")\n form_data = {\n 'invitation_code': '1234',\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'Hank',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n self.client.post(self.url, form_data, follow=True)\n up = UserProfile.objects.get(user__email='[email protected]')\n self.assertEqual(up.user_type, 'DEV')",
"def test_create(self, client):\n count = get_user_model().objects.count()\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 302\n\n user = get_user_model().objects.last()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()\n assert get_user_model().objects.count() == count + 1",
"def test_create_doctor(self):\n test_password = 'ooooooooooooooooooooooo'\n username = faker.first_name()\n data = {'username': username, 'email': faker.email(), 'password1': test_password, 'password2': test_password, 'is_doctor': True}\n response = self.client.post(self.url, data, format='json')\n # import pudb; pudb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Doctor.objects.count(), 1)\n # self.assertEqual(Account.objects.get().name, 'DabApps')",
"def test_create_user(self):\n \n new_user = {\"username\": \"beny1976\", \"vocab_count\": 0, \"name\": \"beny rood\", \"sex\": \"male\", \"dob\": \"18/10/1979\"}\n msg = app.create_user(predefined_user=new_user)\n self.assertTrue(msg != \"\")"
]
| [
"0.7699759",
"0.7331721",
"0.71936333",
"0.71245515",
"0.7116897",
"0.7116897",
"0.7116897",
"0.70259297",
"0.70055175",
"0.69289756",
"0.6923009",
"0.6889131",
"0.6862286",
"0.6843992",
"0.68248117",
"0.67905575",
"0.677062",
"0.6760585",
"0.6741597",
"0.67381847",
"0.67322767",
"0.6705431",
"0.67043877",
"0.66682744",
"0.6666384",
"0.66565126",
"0.6648793",
"0.6648044",
"0.6634538",
"0.6634484"
]
| 0.7493841 | 1 |
Test student creation twice (same data) | def test_create_student_twice(self):
response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())
self.assertEqual(Student.objects.count(), self.qty + 1)
self.assertEqual(User.objects.count(), self.qty_users + 1)
response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())
self.assertEqual(Student.objects.count(), self.qty + 1)
self.assertEqual(User.objects.count(), self.qty_users + 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_student(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())",
"def test_create_Student_full(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n for i in range(20):\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['school'][0].code, 'invalid')\n self.assertEqual(str(response.data['school'][0]), 'School Triamudomsuksa already has maximum number of students')",
"def test14_add_new_student_with_teacher(self):\n students_list_with_new_student = self.students_page. \\\n click_edit_students_list_button(). \\\n click_add_new_student_button(). \\\n enter_student_data(data['third_new_student']).\\\n enter_name_approved_by_custom(data['third_new_student']). \\\n click_save_data_changes_button(). \\\n click_exit_students_list_editor_button(). \\\n students_table()\n student = data_student_for_check(data['third_new_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student, students_list_with_new_student)\n return self.students_page",
"def test_create_student_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())",
"def test_add_student():\n classroom = setup_for_test()\n student = Student(\"Andrew Tsukuda\")\n classroom.add_student(student)\n assert len(classroom.student_dir) == 1\n assert classroom.student_dir[0].ID == 1",
"def test_create_Student(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Student.objects.count(), 1)\n self.assertEqual(Student.objects.get().first_name, 'Poompatai')\n self.assertEqual(Student.objects.get().last_name, 'Puntitpong')\n self.assertEqual(Student.objects.get().age, 20)\n self.assertEqual(Student.objects.get().nationality, 'Thailand')\n self.assertEqual(Student.objects.get().school.id, school_ids[0])\n\n \"\"\"Invalid School\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': 'aaaa'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def createStudent(self):\n self.createProfile()\n from soc.modules.gsoc.models.profile import GSoCStudentInfo\n properties = {'key_name': self.profile.key().name(), 'parent': self.profile}\n self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)\n self.profile.put()",
"def test09_add_new_student_with_coordinator(self):\n students_list_with_new_student = self.students_page. \\\n click_edit_students_list_button(). \\\n click_add_new_student_button(). \\\n enter_student_data(data['second_new_student']). \\\n click_save_data_changes_button(). \\\n click_exit_students_list_editor_button(). \\\n students_table()\n student = data_student_for_check(data['second_new_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student, students_list_with_new_student)",
"def test_case18(self):\n\n result = self.graph1.studentExists(\"student1\")\n\n self.assertTrue(result)",
"def test_create_a_student(self):\n data_class = {\n 'code': 'CS101',\n 'title': 'Introduction to Programming',\n 'description': 'This is a basic yet exciting course',\n 'enrolled_students':[]\n }\n url_class = reverse('class-list')\n response_class = self.client.post(url_class, data_class, format='json')\n self.assertEqual(response_class.status_code, status.HTTP_201_CREATED)\n class_pk = Class.objects.get().pk\n\n data = {\n 'first_name': 'Santiago',\n 'last_name': 'Quiroga Turdera',\n 'enrolled_to': ['http://127.0.0.1:8000' + reverse('class-detail', kwargs={'pk':class_pk}),]\n }\n url = reverse('student-list')\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Student.objects.count(), 1)\n self.assertEqual(Student.objects.get().first_name, 'Santiago')\n self.assertEqual(Student.objects.get().last_name, 'Quiroga Turdera')\n self.assertEqual(Student.objects.get().enrolled_to.count(), 1)\n self.assertEqual(Student.objects.get().enrolled_to.get().code, 'CS101')",
"def test01_add_new_student_with_admin(self):\n students_list_with_new_student = self.students_page.\\\n click_edit_students_list_button().\\\n click_add_new_student_button().\\\n enter_student_data(data['first_new_student']).\\\n click_save_data_changes_button().\\\n click_exit_students_list_editor_button().\\\n students_table()\n student = data_student_for_check(data['first_new_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student, students_list_with_new_student)",
"def test_create_Student_data_type(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n \"\"\"String age\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': '20', 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Student.objects.count(), 1)\n self.assertEqual(Student.objects.get().first_name, 'Poompatai')\n self.assertEqual(Student.objects.get().last_name, 'Puntitpong')\n self.assertEqual(Student.objects.get().age, 20)\n self.assertEqual(Student.objects.get().nationality, 'Thailand')\n self.assertEqual(Student.objects.get().school.id, school_ids[0])\n\n \"\"\"Name, nationality, type number\"\"\"\n data = {'first_name':123, 'last_name': 123,'age': 20, 'nationality': 123, 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n \"\"\"String age but non convertable\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 'AAA', 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_case17(self):\n\n result = self.graph1.studentExists(\"student5\")\n\n self.assertFalse(result)",
"def test_create_instructor_twice(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)\n response = self.client.post(self.url, data=json.dumps(self.payload_repeated), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertEqual(User.objects.count(), self.qty_users + 1)",
"def test_creation_profile_2():\n assert tuple_NT[0][1] == LIST_dict[0]['sex'], \"sex of profile is not getting stored properly\"",
"def test_create_record(self):\n pass",
"def test_createperson(self):\n p = model.Person(firstname=\"Tobias\", lastname=\"Thelen\",\n email=\"[email protected]\", hobbies=[\"singen\",\"springen\",\"fröhlichsein\"])\n id = p.store()\n\n p2 = model.Person(id=id)\n self.assertEqual(p.id, p2.id)\n self.assertEqual(p.firstname, p2.firstname)\n self.assertEqual(p.lastname, p2.lastname)\n self.assertEqual(p.email, p2.email)\n self.assertEqual(p.hobbies, p2.hobbies)",
"def test_create(self):\n pass",
"def test_createUser_single(self):\n #TODO: this and other tests",
"def setUp(self):\n self.student = Student(first_name=\"Eva\", last_name=\"Maier\", id=123456)\n self.assessor = Assessor(first_name=\"Peter\", last_name=\"Müller\")\n self.supervisor = Supervisor(first_name=\"Thomas\",\n last_name=\"Smits\", id=\"t.smits\")\n\n self.assessor.save()\n self.supervisor.save()\n self.student.save()",
"def setUp(self):\n program = program_utils.seedProgram()\n self.profile = profile_utils.seedSOCStudent(program)",
"def test_create_Student_missing_param(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n\n \"\"\"Normal request\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n \"\"\"Missing first_name\"\"\"\n data = {'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['first_name'][0].code, 'required')\n\n \"\"\"Missing all\"\"\"\n data = {}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['first_name'][0].code, 'required')\n self.assertEqual(response.data['last_name'][0].code, 'required')\n self.assertEqual(response.data['age'][0].code, 'required')\n self.assertEqual(response.data['nationality'][0].code, 'required')\n self.assertEqual(response.data['school'][0].code, 'required')",
"def test15_edit_data_first_student_with_teacher(self):\n students_list_with_edit_student = self.students_page. \\\n click_edit_students_list_button(). \\\n click_edit_student_button(). \\\n enter_student_data(data['third_new_data_student']). \\\n enter_name_approved_by_custom(data['third_new_data_student']). \\\n click_save_data_changes_button(). \\\n click_exit_students_list_editor_button(). \\\n students_table()\n student_with_changes = \\\n data_student_for_check(data['third_new_data_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student_with_changes,\n students_list_with_edit_student)\n return self.students_page",
"def test_create_study(self):\n study_spec = sample_study_spec()\n\n now = datetime.datetime.now()\n study_id = self.storage.create_study(study_spec)\n # Study ID must be a 32 char string.\n self.assertLen(study_id, 32)\n self.assertEqual(study_id, study_spec.id)\n self.assertGreaterEqual(study_spec.creation_time.ToDatetime(), now)\n\n # Read back the study and check that it is the same.\n self.assertEqual(self.storage.get_study(study_id), study_spec)",
"def test_initialization_of_student_first_name():\n assert lazy_student.first_name == \"Roman\"",
"def test_initialization_of_student_last_name():\n assert lazy_student.last_name == \"Petrov\"",
"def test_case12(self):\n\n self.graph1.transferStudent(\"student3\",\"supervisor1\",\"supervisor2\", self.supervisors)\n\n val1 = self.graph1.getSupervisorDegree(\"supervisor1\")\n val2 = self.graph1.getStudents(\"supervisor2\")\n val3 = self.graph1.getStudents(\"supervisor1\")\n\n expected = (2,['student4'],['student1','student2'])\n\n self.assertEqual((val1,val2,val3),expected)",
"def example(exam_name, question_set, student):\n\n exam = Exam(exam_name)\n for question in question_set:\n exam.add_question(question, question_set[question])\n student = Student(student['f_name'], student['l_name'], student['address'])\n take_test(exam, student)\n return student, exam",
"def __init__(self, student):\n pass",
"def _create_students(self):\n def mktime(str_date):\n return time.mktime(time.strptime(\n str_date, CountSkillCompletion.DATE_FORMAT))\n self.day1 = '2015-01-01'\n self.day2 = '2015-01-02'\n self.day3 = '2015-01-03'\n self.day4 = '2015-01-04'\n c = SkillCompletionTracker.COMPLETED\n p = SkillCompletionTracker.IN_PROGRESS\n # progress string for students\n students_progress = [\n {self.skill1.id : {c: mktime(self.day2), p: mktime(self.day1)},\n self.skill2.id : {c: mktime(self.day4), p: mktime(self.day1)}},\n {self.skill1.id : {c: mktime(self.day2), p: mktime(self.day2)},\n self.skill2.id : {p: mktime(self.day1)}},\n {self.skill1.id : {c: mktime(self.day1)}},\n {} # No progress\n ]\n for index, progress in enumerate(students_progress):\n student = models.Student(user_id=str(index))\n student.put()\n comp = models.StudentPropertyEntity.create(\n student=student,\n property_name=SkillCompletionTracker.PROPERTY_KEY)\n comp.value = transforms.dumps(progress)\n comp.put()"
]
| [
"0.7176237",
"0.7174548",
"0.71448815",
"0.704746",
"0.70384395",
"0.6947368",
"0.69430256",
"0.6755699",
"0.6726912",
"0.66720057",
"0.6644916",
"0.6610038",
"0.6580926",
"0.6543243",
"0.6506587",
"0.65057766",
"0.64726734",
"0.6429931",
"0.64217025",
"0.6405076",
"0.6374276",
"0.6348308",
"0.63387096",
"0.633577",
"0.63288015",
"0.63082856",
"0.62895626",
"0.62727576",
"0.62707955",
"0.6270401"
]
| 0.74358004 | 0 |
Test student creation with missing birthday info | def test_create_student_missing_birthday(self):
response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())
self.assertEqual(Student.objects.count(), self.qty) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_instructor_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)",
"def test_birth_validation(self):",
"def test_init_invalid_birth_date(self):\n # Whether this raises a ValueError or TypeError is an\n # implementation detail delegated to `datetime`\n with self.assertRaises((ValueError, TypeError)):\n Person(\n self.initial_year, self.name, 'invalid',\n retirement_date=self.retirement_date)",
"def test_patient_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, '2000-01-01')",
"def test_patient_one_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, datetime.date(2000, 2, 13))",
"def test_creation_profile_3():\n assert tuple_NT[0][2] == LIST_dict[0]['birthdate'], \"birthdate of profile is not getting stored properly\"",
"def test_date_of_birth(self):\n entries = {'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qDateOfBirth': [19951226],\n }}\n clone(entries)\n self.assertEqual(date(1995, 12, 26), Person.objects.first().date_of_birth)",
"def test_init_invalid_order(self):\n with self.assertRaises(ValueError):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.birth_date - relativedelta(days=1))",
"def test_init_basic(self):\n person = Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date=self.retirement_date)\n self.assertEqual(person.name, self.name)\n self.assertEqual(person.birth_date, self.birth_date)\n self.assertEqual(person.retirement_date, self.retirement_date)\n self.assertIsInstance(person.name, str)\n self.assertIsInstance(person.birth_date, datetime)\n self.assertIsInstance(person.retirement_date, datetime)\n self.assertIsNone(person.spouse)\n self.assertIsNone(person.tax_treatment)",
"def test_init_invalid_retire_date(self):\n # Whether this raises a ValueError or TypeError is an\n # implementation detail delegated to `datetime`\n with self.assertRaises((ValueError, TypeError)):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date='invalid')",
"def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.schedule(student_id)\n self.assertFalse(result)",
"def test_future_birth_date_import():\n _curr_date = datetime.utcnow()\n _later_date = _curr_date + relativedelta(days=1)\n later_date = _later_date.strftime(\"%d.%m.%Y\")\n\n citizen_with_birth_date_later_than_current = deepcopy(CITIZEN_EXAMPLE)\n citizen_with_birth_date_later_than_current[\"birth_date\"] = later_date\n with TestClient(app) as client:\n response = client.post(\n \"/imports\",\n json={\n \"citizens\": [\n citizen_with_birth_date_later_than_current\n ]}\n )\n\n assert response.status_code == 400",
"def __init__(self, student, start_date, day_periods):\n self.student = student\n self.start_date = start_date\n self.day_periods = day_periods\n self.student_name = student.full_name_lastname_first(\n show_middle_name=False)\n self.student_gender= student.gender\n self.student_attendance_record = self.student.attendance",
"def test_create_Student_missing_param(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n\n \"\"\"Normal request\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n \"\"\"Missing first_name\"\"\"\n data = {'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['first_name'][0].code, 'required')\n\n \"\"\"Missing all\"\"\"\n data = {}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['first_name'][0].code, 'required')\n self.assertEqual(response.data['last_name'][0].code, 'required')\n self.assertEqual(response.data['age'][0].code, 'required')\n self.assertEqual(response.data['nationality'][0].code, 'required')\n self.assertEqual(response.data['school'][0].code, 'required')",
"def test_date_of_birth_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_date_of_birth(input_val)\n self.assertEqual(output_val, self.line.date_of_birth)",
"def test_create_Student(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Student.objects.count(), 1)\n self.assertEqual(Student.objects.get().first_name, 'Poompatai')\n self.assertEqual(Student.objects.get().last_name, 'Puntitpong')\n self.assertEqual(Student.objects.get().age, 20)\n self.assertEqual(Student.objects.get().nationality, 'Thailand')\n self.assertEqual(Student.objects.get().school.id, school_ids[0])\n\n \"\"\"Invalid School\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': 'aaaa'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.careers(student_id)\n self.assertFalse(result)",
"def test_invalid_birthdate(self):\n data = self.valid_payload\n data['birthdate'] = '2017/09/19'\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_education_instance_created_without_required_arguments(self):\n\n\t\tEducation.objects.create(\n\t\t\tuser=self.user,\n\t\t\tschool_name=self.school_name,\n\t\t\tcourse_name=self.course_name,\n\t\t\tstart_date=self.start_date,\n\t\t)\n\n\t\teducation = Education.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tself.user,\n\t\t\teducation.user,\n\t\t\t\"Users don't match.\")\n\n\t\tself.assertEqual(\n\t\t\tself.school_name,\n\t\t\teducation.school_name,\n\t\t\t\"School names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.course_name,\n\t\t\teducation.course_name,\n\t\t\t\"Course names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.start_date,\n\t\t\teducation.start_date,\n\t\t\t\"Start dates don't match\"\n\t\t)",
"def test_create_parent_missing_birthday(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_birthday),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty)",
"def test_required_year_of_birth_missing(self):\r\n self.url_params['year_of_birth'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 400)\r\n obj = json.loads(response.content)\r\n self.assertEqual(\r\n obj['value'],\r\n u'Your year of birth is required',\r\n )",
"def test_employee_creation_bad_fields(self):\n helper = EmployeeHelper(name='Andrew', hired_on='2019-10-01T00:00:00', salary=None, department_id=None)\n result = self.client.execute(helper.get_create_employee_query())['data']['createEmployee']['employee']\n\n self.assertEqual(result['name'], helper.name)\n self.assertEqual(result['hiredOn'], helper.hired_on)\n self.assertEqual(result['salary'], 0)\n self.assertIsNone(result['departmentId'])",
"def test_invalid_student(self):\n # request\n request_body = {\n 'wwuid': '123456789', # too long\n 'labgroup': self.labgroup.id,\n 'enroll_key': self.labgroup.enroll_key\n }\n response = self.client.post(reverse(self.view_name), request_body)\n # test response\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n # test database\n self.assertEqual(len(Student.objects.all()), 0)",
"def staff_birthdays():\n try:\n staff_docs.birthday_list()\n\n except FileNotFoundError:\n popup_error(r\"\"\"Please Generate the file first from MYOB Payroll \n under the Employees reportand name it Birthday.csv.\nPlace in J:\\Quality Data\\Data Technician\\StaffDbases\\n\nThe File should have fields\n- Employee Code\n- Employee Full Name\n- Employee Status\n- Employee Occupation\n- Employee Start Date\n- Employee Birthdate\n- Employee Cost Centre Name\"\"\")",
"def test_date_of_birth_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_date_of_birth(val))",
"def test_initialization_of_student_last_name():\n assert lazy_student.last_name == \"Petrov\"",
"def test_create_new_student_user_missing_field(self):\n data = {\n 'email': '[email protected]',\n 'password': 'test123!',\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_education_instance_created(self):\n\n\t\tEducation.objects.create(\n\t\t\tuser=self.user,\n\t\t\tschool_name=self.school_name,\n\t\t\tcourse_name=self.course_name,\n\t\t\tstart_date=self.start_date,\n\t\t\tend_date=self.end_date,\n\t\t\tgrade_obtained=self.grade_obtained\n\t\t)\n\n\t\teducation = Education.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tself.user,\n\t\t\teducation.user,\n\t\t\t\"Users don't match.\")\n\t\tself.assertEqual(\n\t\t\tself.school_name,\n\t\t\teducation.school_name,\n\t\t\t\"School names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.course_name,\n\t\t\teducation.course_name,\n\t\t\t\"Course names don't match.\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.start_date,\n\t\t\teducation.start_date,\n\t\t\t\"Start dates don't match\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.end_date,\n\t\t\teducation.end_date,\n\t\t\t\"End dates don't match\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tself.grade_obtained,\n\t\t\teducation.grade_obtained,\n\t\t\t\"Grade obtained don't match\"\n\t\t)",
"def test_creation_throws_error_on_missing_fields(self, test_domain):\n with pytest.raises(ValidationError) as err:\n test_domain.repository_for(Person)._dao.create(last_name=\"Doe\")\n\n assert err.value.messages == {\"first_name\": [\"is required\"]}",
"def test_no_birthdate(self):\n data = self.valid_payload\n data[\"birthdate\"] = \"\"\n response1 = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n del data[\"birthdate\"]\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response1.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)"
]
| [
"0.7199732",
"0.7019023",
"0.6995882",
"0.69923717",
"0.6895691",
"0.6796359",
"0.67810917",
"0.66963357",
"0.644228",
"0.6409774",
"0.6269694",
"0.62652636",
"0.6264589",
"0.6253317",
"0.6228514",
"0.6179216",
"0.61280346",
"0.6105734",
"0.60952175",
"0.6064227",
"0.6063406",
"0.60521007",
"0.6050001",
"0.6011319",
"0.5997736",
"0.59706634",
"0.59638137",
"0.5962977",
"0.5962287",
"0.59450656"
]
| 0.780209 | 0 |
Test student creation with missing role info | def test_create_student_missing_role(self):
response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())
self.assertEqual(Student.objects.count(), self.qty) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_role(self):\n pass",
"def test_create_instructor_missing_role(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_missing_role),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty)",
"def test_add_role_simple(self):\n pass",
"def test_with_no_role(self, do_student_launch, student_payload):\n student_payload[\"https://purl.imsglobal.org/spec/lti/claim/roles\"] = [\"\"]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)",
"def test_create_new_person_w_no_role(self, selenium):\n expected_person = entities_factory.PeopleFactory().create(\n system_wide_role=roles.NO_ROLE)\n actual_person = admin_webui_service.PeopleAdminWebUiService(\n selenium).create_new_person(expected_person)\n self.general_equal_assert(expected_person, actual_person)",
"def test_create_student(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())",
"def test_with_unknown_role(self, do_student_launch, student_payload):\n student_payload[\"https://purl.imsglobal.org/spec/lti/claim/roles\"] = [\n \"http://purl.imsglobal.org/vocab/lis/v2/membership#Learner\",\n \"http://purl.imsglobal.org/vocab/lis/v2/uknownrole/unknown#Unknown\",\n ]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)",
"def test_add_role_simple_post(self):\n pass",
"def test_ipam_roles_create(self):\n pass",
"def test_create_cluster_role(self):\n pass",
"def test_create_role_existing(self):\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 1\n )\n\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )\n post_data = {\n 'role': PROJECT_ROLE_GUEST,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )",
"def test_case17(self):\n\n result = self.graph1.studentExists(\"student5\")\n\n self.assertFalse(result)",
"def test_create_namespaced_role(self):\n pass",
"def test_create_new_student_user_missing_field(self):\n data = {\n 'email': '[email protected]',\n 'password': 'test123!',\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_list_role(self):\n pass",
"def test_add_student():\n classroom = setup_for_test()\n student = Student(\"Andrew Tsukuda\")\n classroom.add_student(student)\n assert len(classroom.student_dir) == 1\n assert classroom.student_dir[0].ID == 1",
"def test_create_student_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())",
"def test_user_role_on_course_recreate(self):\r\n # check that user has enrollment and his default \"Student\" forum role for this course\r\n self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))\r\n self.assertTrue(self.user.roles.filter(name=\"Student\", course_id=self.course_key)) # pylint: disable=no-member\r\n\r\n # delete this course and recreate this course with same user\r\n delete_course_and_groups(self.course_key, commit=True)\r\n resp = self._create_course_with_given_location(self.course_key)\r\n self.assertEqual(resp.status_code, 200)\r\n\r\n # check that user has his enrollment for this course\r\n self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))\r\n\r\n # check that user has his default \"Student\" forum role for this course\r\n self.assertTrue(self.user.roles.filter(name=\"Student\", course_id=self.course_key)) # pylint: disable=no-member\r",
"def test_case18(self):\n\n result = self.graph1.studentExists(\"student1\")\n\n self.assertTrue(result)",
"def test_create_course_check_forum_seeding(self):\r\n test_course_data = self.assert_created_course(number_suffix=uuid4().hex)\r\n self.assertTrue(are_permissions_roles_seeded(_get_course_id(test_course_data)))",
"def test_create_role_success(self) -> None:\n\n role_code = \"TestRoleCode\"\n\n try:\n self.delete_role(role_code)\n except Exception as e:\n print(e)\n\n # Create a role creation request (using the access model)\n access_role_creation_request = create_access_role_creation_request(role_code)\n\n # Create a role using the LPT create_role method\n iam.roles.create_role(self.api_factory, access_role_creation_request)\n\n # Check that the role was correctly created through the access API\n access_role = self.get_access_role(role_code)\n self.assertEqual(first=access_role.id.code, second=role_code)\n\n # Check that the role was correctly created through the identity API\n identity_role = self.get_identity_role(role_code)\n self.assertEqual(first=identity_role.role_id.code, second=role_code)",
"def test14_add_new_student_with_teacher(self):\n students_list_with_new_student = self.students_page. \\\n click_edit_students_list_button(). \\\n click_add_new_student_button(). \\\n enter_student_data(data['third_new_student']).\\\n enter_name_approved_by_custom(data['third_new_student']). \\\n click_save_data_changes_button(). \\\n click_exit_students_list_editor_button(). \\\n students_table()\n student = data_student_for_check(data['third_new_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student, students_list_with_new_student)\n return self.students_page",
"def test_create_returns_errors_on_invalid(self):\n res = role_service.create('ad')\n self.assertIsInstance(res, Result)\n self.assertFalse(res)",
"def test_create_scenario(self):\n pass",
"def test_non_existent_course_role(self):\n self._login_as_staff()\n path = self.path(role='A')\n response = self.client.get(path)\n\n assert response.status_code == 400\n\n response = self.client.post(path)\n assert response.status_code == 400",
"def test_create_instructor(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_with_short_role(self, do_student_launch, student_payload):\n student_payload[\"https://purl.imsglobal.org/spec/lti/claim/roles\"] = [\"Learner\"]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)",
"def test01_add_new_student_with_admin(self):\n students_list_with_new_student = self.students_page.\\\n click_edit_students_list_button().\\\n click_add_new_student_button().\\\n enter_student_data(data['first_new_student']).\\\n click_save_data_changes_button().\\\n click_exit_students_list_editor_button().\\\n students_table()\n student = data_student_for_check(data['first_new_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student, students_list_with_new_student)",
"def test_creator_group_not_enabled(self):\r\n self.assertTrue(has_access(self.user, CourseCreatorRole()))",
"def test_invalid_student(self):\n # request\n request_body = {\n 'wwuid': '123456789', # too long\n 'labgroup': self.labgroup.id,\n 'enroll_key': self.labgroup.enroll_key\n }\n response = self.client.post(reverse(self.view_name), request_body)\n # test response\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n # test database\n self.assertEqual(len(Student.objects.all()), 0)"
]
| [
"0.7202376",
"0.7189899",
"0.7092232",
"0.70697355",
"0.70014966",
"0.69190806",
"0.6832253",
"0.6723964",
"0.66927207",
"0.6688905",
"0.66311604",
"0.65885717",
"0.65753794",
"0.65148205",
"0.65141356",
"0.6511439",
"0.65093863",
"0.64881116",
"0.6481572",
"0.64582074",
"0.64576346",
"0.6413818",
"0.64136744",
"0.6393454",
"0.6380549",
"0.6374707",
"0.637435",
"0.6366393",
"0.6352222",
"0.6323425"
]
| 0.7844897 | 0 |
Read a toml string, but extends it by parsing $include to include other Toml files The next Toml file is inserted in place. | def _flatten_toml_string(self, content: str, local_search_path: str) -> str:
output_str = ""
for line in content:
if line.startswith(self.include_cmd + " "):
# import the next file in line
next_file = line.strip(self.include_cmd).strip()
output_str += self._flatten_toml_file(next_file, local_search_path)
return output_str | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_toml(content):\n from toml import loads\n return loads(content)",
"def load_toml(self, toml_str): # type: (str) -> None\n self._toml = tomlkit.loads(toml_str)\n self._load_dict(self._toml.value)",
"def loads(xtext):\n\n class XmlLoads(object):\n \"\"\"\n XMLLOADS implements 'YZXml.loads' functionality\n \"\"\"\n\n def __init__(self, xtext):\n self.index = 0\n self.xtext = xtext\n self.length = len(xtext)\n self.stack = []\n\n def loads(self):\n \"\"\"\n LOADS converts xml-string to object-string\n\n Returns\n -------\n - dict\n object-xml contains {'tag': str, 'attribs': dict, 'elements': list}\n \"\"\"\n\n self.ignore_whitespaces()\n\n if self.index >= self.length:\n return\n\n if self.xtext[self.index] == '<':\n self.index += 1\n\n if self.xtext[self.index] == '/':\n self.index += 1\n\n tag = self.read_until('>')\n self.index += 1\n\n elements = []\n while len(self.stack) > 0 and\\\n (isinstance(self.stack[-1], str) or self.stack[-1]['tag'] != tag):\n elements.append(self.stack.pop())\n\n assert len(self.stack) > 0\n\n self.stack[-1]['elements'].extend(reversed(elements))\n\n else:\n self.ignore_whitespaces()\n tag = self.read_until(' >')\n\n attribs = {}\n if self.xtext[self.index] != '>':\n attribs = self.read_attribs()\n\n self.index += 1\n self.stack.append({'tag': tag, 'attribs': attribs, 'elements': []})\n else:\n self.stack.append(self.read_until('<').strip())\n\n self.loads()\n\n\n def ignore_whitespaces(self):\n \"\"\"\n IGNORE_WHITESPACES reads whitespaces and advances self.index\n \"\"\"\n\n whitespaces = [' ', '\\t', '\\n', '\\r']\n while self.index < self.length and self.xtext[self.index] in whitespaces:\n self.index += 1\n\n def read_until(self, chars):\n \"\"\"\n READ_UNTIL reads charaters and advances self.index\n unitl reaches any character in 'cahrs'\n\n Parameters\n ----------\n - chars: str\n stoping characters\n \"\"\"\n\n start_index = self.index\n\n while self.index < self.length and self.xtext[self.index] not in chars:\n self.index += 1\n\n assert self.index < self.length\n\n return self.xtext[start_index:self.index]\n\n def read_attribs(self):\n \"\"\"\n READ_ATTRIBS reads attributes of an elements\n \"\"\"\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs\n\n xmlloads = XmlLoads(xtext)\n xmlloads.loads()\n return xmlloads.stack.pop()",
"def include_string(parser, token):\n\tbits = token.split_contents()\n\tif len(bits) != 2:\n\t\traise TemplateSyntaxError(\"%r tag takes one argument: the template string to be included\" % bits[0])\n \tstring = parser.compile_filter(bits[1])\n\treturn IncludeStringNode(string)",
"def read_file(tp, ignoreincludes):\n ret = []\n filename, f = tp\n\n accumulate = \"\"\n for lineno, line in enumerate(f):\n lineno = lineno + 1 # number from 1\n line = line.strip()\n if not line: # preserve blanks\n ret.append((line, filename, lineno))\n continue\n if line.endswith(\"\\\\\"):\n accumulate += line[0:-1]\n continue\n elif accumulate:\n line = accumulate + line\n accumulate = \"\"\n\n if line:\n line = apply_macros(line)\n\n line = line.strip()\n\n if not line:\n continue\n\n try:\n if line.startswith(\"<\") and line.endswith(\">\"):\n if line.startswith(\"<include\"):\n if not ignoreincludes:\n line = line[1:-1]\n line = line[7:].strip()\n line = line.strip('\"')\n ret.extend(read_file(\n searching_open(line),\n ignoreincludes))\n else:\n ret.append((line, filename, lineno))\n elif line.startswith(\"<transform\"):\n line = line[1:-1]\n add_transform(line, filename, lineno)\n else:\n raise RuntimeError(_(\"unknown command {0}\").format(\n line))\n else:\n ret.append((line, filename, lineno))\n except RuntimeError as e:\n error(_(\"File {file}, line {line:d}: {exception}\").format(\n file=filename,\n line=lineno,\n exception=e),\n exitcode=None)\n raise RuntimeError(\"<included from>\")\n\n return ret",
"def parse_toml_file(filepath: Union[str, Path]) -> Any:\n from tomlkit import loads\n\n with open(filepath, 'r') as fp:\n toml_dict = loads(fp.read())\n\n blocks = list()\n variables = list()\n values = list()\n comments = list()\n\n header = None\n date_time = None\n\n for key, item in toml_dict.items():\n if key in ['__header__', 'header']:\n header = item\n elif key in ['__datetime__', 'datetime']:\n date_time = item\n else:\n for var, val in item.items():\n if isinstance(val, str):\n if re.fullmatch(r'\\d\\d\\d:\\d\\d', val):\n val = val.split(':')\n val = datetime.timedelta(hours=int(val[0]), minutes=int(val[1]))\n variables.append(var)\n values.append(val)\n blocks.append(key)\n\n variable_comment = dict()\n for key in toml_dict.keys():\n lines = toml_dict[key].as_string().split('\\n')\n while '' in lines:\n lines.remove('')\n comment = list()\n for line in lines:\n if line.startswith('#'):\n comment.append(line[2:])\n else:\n variable_comment[line.split('=')[0].strip()] = '\\n'.join(comment)\n comment = list()\n\n for var in variables:\n if var in variable_comment:\n comments.append(variable_comment[var])\n else:\n comments.append('')\n\n block_names = list(toml_dict.keys())\n try:\n block_names.remove('__header__')\n except ValueError:\n pass\n try:\n block_names.remove('__datetime__')\n except ValueError:\n pass\n\n header = list()\n lines = toml_dict.as_string().split('\\n')\n for line in lines:\n if line.startswith('#'):\n header.append(line.strip().split('# ')[1])\n if line == '':\n break\n\n date_time = _get_datetime_from_header(header)\n\n return date_time, header, block_names, (variables, values, comments, blocks)",
"def construct_include(loader: Loader, node: yaml.Node) -> Any:\n\n filename = os.path.abspath(\n os.path.join(loader._root, loader.construct_scalar(node))\n )\n extension = os.path.splitext(filename)[1].lstrip(\".\")\n\n with open(filename, \"r\") as f:\n if extension in (\"yaml\", \"yml\"):\n return yaml.load(f, Loader)\n elif extension in (\"json\",):\n return json.load(f)\n else:\n return \"\".join(f.readlines())",
"def parse(self):\n\n if exists(self.filepath):\n content = open(self.filepath).read().decode(charset)\n else:\n content = \"\"\n\n try:\n config = toml.loads(content)\n except toml.TomlSyntaxError:\n raise ConfigSyntaxError\n\n return config",
"def loadText(self,inName):\n reComment = re.compile(r'\\s*\\#.*')\n ins = file(inName)\n for line in ins:\n #print line,\n #--Strip spaces and comments\n line = reComment.sub('',line)\n line = line.rstrip()\n #--Skip empty/comment lines\n if not line: continue\n #--Parse line\n (libId,srcId,altId) = line.split('\\t')[:3]\n self.libList.append(libId)\n self.libMap[libId] = (srcId,altId)\n #--Done\n ins.close()",
"def load_text_embed(filepath: Union[str, os.PathLike], load_dir: str = 'model') \\\n -> Tuple[TransformerEmbedding, Callable]:\n model_dir = Path(filepath).joinpath(load_dir)\n tokenizer = AutoTokenizer.from_pretrained(str(model_dir.resolve()))\n args = dill.load(open(model_dir.joinpath('embedding.dill'), 'rb'))\n emb = TransformerEmbedding(\n str(model_dir.resolve()), embedding_type=args['embedding_type'], layers=args['layers']\n )\n return emb, tokenizer",
"def load_toml_versions(toml_file: Path) -> Tuple[_TOMLDocument, _TOMLDocument]:\n\n def load(lines: Sequence[str]) -> _TOMLDocument: # noqa\n return tomlkit.loads(\"\".join(lines))\n\n with toml_file.open() as fp:\n ours, theirs = parser.parse(fp)\n return load(ours), load(theirs)",
"def _merge_ctm(self, iri, included=None):\n from mio.ctm import CTMDeserializer\n deser = CTMDeserializer(context=self._context, included_by=included)\n deser.handler = self._maphandler\n deser.subordinate = True\n if included:\n deser.wildcard_counter = self.wildcard_counter\n deser.parse(Source(iri))\n if included:\n self.wildcard_counter = deser.wildcard_counter\n for template in deser.environment.templates.itervalues():\n self.register_template(template.name, template)",
"def examplereader(path, lower=False):\n for line in filereader(path):\n line = line.lower() if lower else line\n tokens = tokens_from_treestring(line)\n tree = Tree.fromstring(line) # use NLTK's Tree\n label = int(line[1])\n trans = transitions_from_treestring(line)\n yield Example(tokens=tokens, tree=tree, label=label, transitions=trans)",
"def readstring(self, fstring):\n return self.parse(fstring)",
"def _parse_ml(self, line):\n # Parse the line\n fields = line.split('\\\\')\n if self.lang == ENGLISH:\n # pylint: disable=C0301\n # English sample:\n # 14\\abandonment\\94\\C\\\\1\\N\\N\\N\\N\\Y\\abandon+ment\\2x\\SA\\N\\N\\N\\#\\N\\N\\SA\\((abandon)[V],(ment)[N|V.])[N]\\N\\N\\N\n # From the README:\n # The eml.cd file contains the following fields:\n # 1. IdNum\n # 2. Head\n # 3. Cob\n # 4. MorphStatus\n # 5. Lang\n # 6. MorphCnt\n # 7. NVAffComp\n # 8. Der\n # 9. Comp\n # 10. DerComp\n # 11. Def\n # 12. Imm\n # 13. ImmSubCat\n # 14. ImmSA\n # 15. ImmAllo\n # 16. ImmSubst\n # 17. ImmOpac\n # 18. TransDer\n # 19. ImmInfix\n # 20. ImmRevers\n # 21 FlatSA\n # 22. StrucLab\n # 23. StrucAllo\n # 24. StrucSubst\n # 25. StrucOpac\n lemma = fields[0]\n word = fields[1]\n derivation = fields[21]\n elif self.lang == DUTCH:\n # pylint: disable=C0301\n # Dutch sample:\n # 19\\aalbessengelei\\7\\C\\1\\Y\\Y\\Y\\aalbes+en+gelei\\NxN\\N\\N\\(((aal)[N],(bes)[N])[N],(en)[N|N.N],(gelei)[N])[N]\\N\\N\\N\n # The dml.cd file contains the following fields:\n # 1. IdNum\n # 2. Head\n # 3. Inl\n # 4. MorphStatus\n # 5. MorphCnt\n # 6. DerComp\n # 7. Comp\n # 8. Def\n # 9. Imm\n # 10. ImmSubCat\n # 11. ImmAllo\n # 12. ImmSubst\n # 13. StrucLab\n # 14. StruAcAllo\n # 15. StrucSubst\n # 16. Sepa\n lemma = fields[0]\n word = fields[1]\n derivation = fields[12]\n\n # Skip multi-word entries for roots\n roots = self._get_root(derivation) if \" \" not in word else None\n return (lemma, word, roots)",
"def process_includes(fn):\r\n @wraps(fn)\r\n def from_xml(cls, xml_data, system, id_generator):\r\n xml_object = etree.fromstring(xml_data)\r\n next_include = xml_object.find('include')\r\n while next_include is not None:\r\n system.error_tracker(\"WARNING: the <include> tag is deprecated, and will go away.\")\r\n file = next_include.get('file')\r\n parent = next_include.getparent()\r\n\r\n if file is None:\r\n continue\r\n\r\n try:\r\n ifp = system.resources_fs.open(file)\r\n # read in and convert to XML\r\n incxml = etree.XML(ifp.read())\r\n\r\n # insert new XML into tree in place of include\r\n parent.insert(parent.index(next_include), incxml)\r\n except Exception:\r\n # Log error\r\n msg = \"Error in problem xml include: %s\" % (\r\n etree.tostring(next_include, pretty_print=True))\r\n # tell the tracker\r\n system.error_tracker(msg)\r\n\r\n # work around\r\n parent = next_include.getparent()\r\n errorxml = etree.Element('error')\r\n messagexml = etree.SubElement(errorxml, 'message')\r\n messagexml.text = msg\r\n stackxml = etree.SubElement(errorxml, 'stacktrace')\r\n stackxml.text = traceback.format_exc()\r\n # insert error XML in place of include\r\n parent.insert(parent.index(next_include), errorxml)\r\n\r\n parent.remove(next_include)\r\n\r\n next_include = xml_object.find('include')\r\n return fn(cls, etree.tostring(xml_object), system, id_generator)\r\n return from_xml",
"def parse(self, script_str):\n lines = script_str.split('\\n')\n for line in lines:\n self.parse_line(line.strip())",
"def _load_template(name: str) -> str:\n html_tpl = _read_text(name + '.html')\n import re\n\n # line breaks are not needed\n html_tpl = html_tpl.replace('\\n', '')\n # remove comments\n html_tpl = re.sub(r'<!--(.|\\s|\\n)*?-->', '', html_tpl)\n # remove space around special characters\n html_tpl = re.sub(r'\\s*([><])\\s*', r'\\1', html_tpl)\n return html_tpl",
"def from_content(cls, content: str) -> Any:\n cls._check_toml()\n return toml.loads(content)",
"def loadText(self,filePath):\n ins = file(filePath,'r')\n reComment = re.compile(r\"#.*\")\n reSection = re.compile(r'@ +(srcmod|replace)',re.M)\n reReplace = re.compile(r\"(\\w[-\\w ']+)\\s*:\\s*(.+)\")\n reNewIds = re.compile(r\",\\s*\")\n mode = None\n for line in ins:\n line = reComment.sub('',line.strip())\n maSection = reSection.match(line)\n if maSection:\n mode = maSection.group(1)\n elif not line: #--Empty/comment line\n pass\n elif mode == 'srcmod':\n self.srcModName = line\n elif mode == 'replace':\n maReplace = reReplace.match(line)\n if not maReplace: continue\n oldId = maReplace.group(1)\n self.newIds[oldId.lower()] = reNewIds.split(maReplace.group(2))\n ins.close()",
"def pre_process(in_path):\n in_string = open(in_path, 'r').read()\n multi_line = '/\\\\*[^*]*\\\\*+(?:[^/*][^*]*\\\\*+)*/'\n\n # header\n description = re.search(multi_line, in_string).group(0)\n unit = re.search('\\\\n\\\\s*// unit .*', in_string).group(0)\n imports = re.findall('\\\\n\\\\s*// import .*', in_string)\n import_string = ''\n for i in imports:\n import_string += resolve_import(i.strip()[10:], in_path.parent)\n\n use_string = ''\n uses = re.findall('\\\\n\\\\s*// uses .*', in_string)\n for u in uses:\n use_string += 'uses ' + u.strip()[8:] + ';\\n'\n if use_string != '':\n use_string = '\\n\\n' + use_string\n\n header = '{' + description[2:-2] + '}\\n\\nunit ' + unit.strip()[8:] + ';' + use_string + '\\n\\n'\n\n # main part\n in_string_list, delphi_string_list = split(import_string + '\\n\\n' + in_string)\n\n return header, in_string_list, delphi_string_list",
"def parse(self, stream, media_type=None, parser_context=None):\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n decoded_stream = codecs.getreader(encoding)(stream)\n raw_body = decoded_stream.read()\n request = parser_context.get('request')\n setattr(request, 'raw_body', raw_body)\n filename = self.get_filename(stream, media_type, parser_context)\n if filename and (not filename.endswith('.toml') and not filename.endswith('.tml')):\n filename = f'{filename}.toml'\n setattr(request, 'filename', filename)\n return toml.loads(raw_body)",
"def read_line(self, line):\n\t\tparts = line[:line.find(';')].split()\n\t\tfor directive in self.directives:\n\t\t\tif directive in map(str.upper, parts):\n\t\t\t\tinclude = os.path.join(self.path, parts[parts.index(directive) + 1].split('\"')[1])\n\t\t\t\tif include not in self.includes:\n\t\t\t\t\tself.includes.append(include)\n\t\t\t\t\tself.read(include)",
"def importKML(filepath):\n\tf = open(filepath, 'r')\n\tstr = f.read()\n\treturn etree.fromstring(str)",
"def parse(self, infile):\r\n raise NotImplementedError()",
"def parse(input_str, file_path=True):\n\n tokens = _tokenize_glm(input_str, file_path)\n return _parse_token_list(tokens)",
"def parse_file(self, filepath: str) -> None:\n use_new, remove_old = self._check_path_actions(filepath)\n # Ensure that we have the latest Augeas DOM state on disk before\n # calling aug.load() which reloads the state from disk\n self.ensure_augeas_state()\n # Test if augeas included file for Httpd.lens\n # Note: This works for augeas globs, ie. *.conf\n if use_new:\n inc_test = self.aug.match(\n \"/augeas/load/Httpd['%s' =~ glob(incl)]\" % filepath)\n if not inc_test:\n # Load up files\n # This doesn't seem to work on TravisCI\n # self.aug.add_transform(\"Httpd.lns\", [filepath])\n if remove_old:\n self._remove_httpd_transform(filepath)\n self._add_httpd_transform(filepath)\n self.aug.load()",
"def run(self):\n\n # from sphynx Include Directive in https://github.com/sphinx-doc/sphinx/blob/master/sphinx/directives/other.py\n # type: () -> List[nodes.Node]\n env = self.state.document.settings.env\n if self.arguments[0].startswith('<') and \\\n self.arguments[0].endswith('>'):\n # docutils \"standard\" includes, do not do path processing\n return BaseInclude.run(self)\n rel_filename, filename = env.relfn2path(self.arguments[0])\n self.arguments[0] = filename\n env.note_included(filename)\n #end\n\n if not self.state.document.settings.file_insertion_enabled:\n raise self.warning('\"%s\" directive disabled.' % self.name)\n source = self.state_machine.input_lines.source(\n self.lineno - self.state_machine.input_offset - 1)\n source_dir = os.path.dirname(os.path.abspath(source))\n path = directives.path(self.arguments[0])\n if path.startswith('<') and path.endswith('>'):\n path = os.path.join(self.standard_include_path, path[1:-1])\n path = os.path.normpath(os.path.join(source_dir, path))\n path = utils.relative_path(None, path)\n path = nodes.reprunicode(path)\n encoding = self.options.get(\n 'encoding', self.state.document.settings.input_encoding)\n e_handler=self.state.document.settings.input_encoding_error_handler\n tab_width = self.options.get(\n 'tab-width', self.state.document.settings.tab_width)\n try:\n self.state.document.settings.record_dependencies.add(path)\n include_file = io.FileInput(source_path=path,\n encoding=encoding,\n error_handler=e_handler)\n except UnicodeEncodeError as error:\n raise self.severe(u'Problems with \"%s\" directive path:\\n'\n 'Cannot encode input file path \"%s\" '\n '(wrong locale?).' %\n (self.name, SafeString(path)))\n except IOError as error:\n raise self.severe(u'Problems with \"%s\" directive path:\\n%s.' %\n (self.name, ErrorString(error)))\n startline = self.options.get('start-line', None)\n endline = self.options.get('end-line', None)\n try:\n if startline or (endline is not None):\n lines = include_file.readlines()\n rawtext = ''.join(lines[startline:endline])\n else:\n rawtext = include_file.read()\n except UnicodeError as error:\n raise self.severe(u'Problem with \"%s\" directive:\\n%s' %\n (self.name, ErrorString(error)))\n # start-after/end-before: no restrictions on newlines in match-text,\n # and no restrictions on matching inside lines vs. line boundaries\n after_text = self.options.get('start-after', None)\n if after_text:\n # skip content in rawtext before *and incl.* a matching text\n after_index = rawtext.find(after_text)\n if after_index < 0:\n raise self.severe('Problem with \"start-after\" option of \"%s\" '\n 'directive:\\nText not found.' % self.name)\n rawtext = rawtext[after_index + len(after_text):]\n before_text = self.options.get('end-before', None)\n if before_text:\n # skip content in rawtext after *and incl.* a matching text\n before_index = rawtext.find(before_text)\n if before_index < 0:\n raise self.severe('Problem with \"end-before\" option of \"%s\" '\n 'directive:\\nText not found.' % self.name)\n rawtext = rawtext[:before_index]\n\n # Handle alternate comment styles\n style = self.options.get('style', 'C-style')\n if style not in COMMENT_STYLES:\n raise self.severe('Cannot find comment style \"%s\", not in %s'\n % (style, COMMENT_STYLES.keys()))\n self.comment_options = COMMENT_STYLES[style]\n\n rawtext = self.filterText(rawtext)\n #if (path == \"../examples/neuropil_hydra.c\"):\n #raise self.severe('filterd text from %s:\\n%s' % (path, rawtext))\n\n include_lines = statemachine.string2lines(rawtext, tab_width,\n convert_whitespace=True)\n if 'literal' in self.options:\n # Convert tabs to spaces, if `tab_width` is positive.\n if tab_width >= 0:\n text = rawtext.expandtabs(tab_width)\n else:\n text = rawtext\n literal_block = nodes.literal_block(rawtext, source=path,\n classes=self.options.get('class', []))\n literal_block.line = 1\n self.add_name(literal_block)\n if 'number-lines' in self.options:\n try:\n startline = int(self.options['number-lines'] or 1)\n except ValueError:\n raise self.error(':number-lines: with non-integer '\n 'start value')\n endline = startline + len(include_lines)\n if text.endswith('\\n'):\n text = text[:-1]\n tokens = NumberLines([([], text)], startline, endline)\n for classes, value in tokens:\n if classes:\n literal_block += nodes.inline(value, value,\n classes=classes)\n else:\n literal_block += nodes.Text(value, value)\n else:\n literal_block += nodes.Text(text, text)\n return [literal_block]\n if 'code' in self.options:\n self.options['source'] = path\n codeblock = CodeBlock(self.name,\n [self.options.pop('code')], # arguments\n self.options,\n include_lines, # content\n self.lineno,\n self.content_offset,\n self.block_text,\n self.state,\n self.state_machine)\n return codeblock.run()\n\n self.state_machine.insert_input(include_lines, path)\n return []",
"def test_toml_example(checker):\n want = labeled.contents(label=\"generate-toml\")\n got = Path(\"tests/generate.toml\").read_text(encoding=\"utf-8\")\n checker(want, got)",
"def _add_httpd_transform(self, incl: str) -> None:\n last_include: str = self.aug.match(\"/augeas/load/Httpd/incl [last()]\")\n if last_include:\n # Insert a new node immediately after the last incl\n self.aug.insert(last_include[0], \"incl\", False)\n self.aug.set(\"/augeas/load/Httpd/incl[last()]\", incl)\n # On first use... must load lens and add file to incl\n else:\n # Augeas uses base 1 indexing... insert at beginning...\n self.aug.set(\"/augeas/load/Httpd/lens\", \"Httpd.lns\")\n self.aug.set(\"/augeas/load/Httpd/incl\", incl)\n # Add included path to paths dictionary\n try:\n self.parser_paths[os.path.dirname(incl)].append(\n os.path.basename(incl))\n except KeyError:\n self.parser_paths[os.path.dirname(incl)] = [\n os.path.basename(incl)]"
]
| [
"0.5985448",
"0.58067685",
"0.5192504",
"0.5112522",
"0.5033513",
"0.5017051",
"0.49736485",
"0.49417657",
"0.49356148",
"0.4931559",
"0.48842973",
"0.4881478",
"0.4876239",
"0.48670778",
"0.4865844",
"0.48562425",
"0.48481768",
"0.48401296",
"0.48378825",
"0.48222765",
"0.48217267",
"0.47751513",
"0.47142455",
"0.4691327",
"0.4684609",
"0.46663627",
"0.4658891",
"0.4650792",
"0.46265635",
"0.46141782"
]
| 0.5871916 | 1 |
Returns a tuple containing a set of all user permission names, and a set of all direct and inherited role names. | def allPermissionsRoles(self):
permSet = set()
for p in self.permissions:
permSet.add(p.name)
roleNameSet = set()
for role in self.roles:
rolePerms = set()
roleNameSet.add(role.name)
for perm in role.permissions:
rolePerms.add(perm.name)
if role.parents:
tempRoleNameSet = set()
for role in role.parents:
if role.name in roleNameSet:
continue
tempRolePerms, tempRoleNameSet = role.allPermissionsRoles(previousRoleNames=roleNameSet)
rolePerms = rolePerms | tempRolePerms
roleNameSet = roleNameSet | tempRoleNameSet
permSet = permSet | rolePerms
return permSet, roleNameSet | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def permissions(self):\n perms = set()\n for g in self.groups:\n perms = perms | set(g.permissions)\n return perms",
"def get_granted_roles(self):",
"def get_all_permissions(self) -> set[tuple[str, str]]:\n return set(\n self.appbuilder.get_session.execute(\n select(self.action_model.name, self.resource_model.name)\n .join(self.permission_model.action)\n .join(self.permission_model.resource)\n )\n )",
"def allPermissionsRoles(self, previousRoleNames=None):\n rolePermsSet = set()\n if not previousRoleNames:\n previousRoleNames = set()\n\n # if role has already been counted, end recursion\n if self.id in previousRoleNames:\n return set(), set()\n\n previousRoleNames.add(self.name)\n\n for perm in self.permissions:\n rolePermsSet.add(perm.name)\n\n for subRole in self.parents:\n if subRole.name in previousRoleNames:\n continue\n tempRolePermSet, tempRoleNames = subRole.allPermissionsRoles(previousRoleNames=previousRoleNames)\n rolePermsSet = rolePermsSet | tempRolePermSet\n previousRoleNames = previousRoleNames | tempRoleNames\n\n return rolePermsSet, previousRoleNames",
"def NamedPermissionSets(self) -> _n_1_t_2:",
"def __dir__(self):\n return [\n 'permId', 'userId', 'firstName', 'lastName', 'email',\n 'registrator', 'registrationDate','space',\n 'get_roles()', 'assign_role(role, space)', 'revoke_role(role)',\n ]",
"def get_permissions(self):\n permissions = [IsAdminUser]\n return [permission() for permission in permissions]",
"def PermissionSet(self) -> _n_6_t_0:",
"def permissions(self):\n return self.get_permissions()",
"def getMemberships(self):\n\n extra_rights = {\n 'user': ['user'],\n 'public': ['anyone'],\n 'list': [],\n }\n\n return dicts.merge(extra_rights, self.rights)",
"def get_group_permissions (self):\n return [] # likewise with the other permission defs",
"def get_group_permissions (self):\n return [] # likewise with the other permission defs",
"def collect_all_perms(cls):\n permissions = filter(lambda perm: perm.startswith('biom_perm') or perm.startswith('entity_perm'), dir(cls))\n\n result = [{\n 'perm_name': perm,\n 'description': getattr(cls, perm).__doc__,\n 'perm_type': getattr(cls, perm).action_type if hasattr(getattr(cls, perm), 'action_type') else None,\n 'default_value': getattr(cls, perm).default_value if hasattr(getattr(cls, perm), 'default_value') else None,\n\n } for perm in permissions]\n return result",
"def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n elif self.action in ['retrieve']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [AllowAny]\n return [p() for p in permissions]",
"def getRoles(self):",
"def _listAllowedRolesAndUsers(self, user):\n result = list(user.getRoles())\n if hasattr(aq_base(user), 'getGroups'):\n result = result + ['user:%s' % x for x in user.getGroups()]\n result.append('Anonymous')\n return result",
"def aws_permissions(self):\n perms = []\n for g in self.allowed_groups:\n perms.append({\"Group\": g})\n\n for i in self.allowed_users:\n perms.append({\"UserId\": i})\n\n return perms",
"def get_permissions(self):\n if self.action in ['create', 'retrieve', 'react', 'reactions']:\n permissions = [IsAuthenticated, IsFriendPostOwner]\n elif self.action in ['update', 'partial_update']:\n permissions = [IsAuthenticated, IsCommentOwner]\n elif self.action in ['destroy']:\n permissions = [IsAuthenticated, IsCommentOrPostOwner]\n else:\n permissions = [IsAuthenticated]\n return[p() for p in permissions]",
"def get_permissions(self):\n if self.action in ['signup', 'login', 'verify']:\n permissions = [AllowAny]\n elif self.action in ['retrieve', 'update', 'partial_update', 'destroy', 'u', 'p']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n return [p() for p in permissions]",
"def get_permissions(self):\n if self.action in ['list', 'create']:\n permission_classes = [IsStaffOrReadOnly]\n else:\n permission_classes = [IsAuthorOrReadOnly, IsStaffOrReadOnly]\n return [permission() for permission in permission_classes]",
"def has_permissions(self):\n perms = set(r.name for r in Permission.query.join(Permission.groups, Group.users).filter(User.id == self.id).all())\n return perms",
"def get_PermissionNames(test_case, # type: AnyMagpieTestCaseType\n permissions, # type: Union[AnyPermissionType, Collection[AnyPermissionType]]\n combinations=False, # type: bool\n ): # type: (...) -> List[Str]\n version = TestSetup.get_Version(test_case)\n if not isinstance(permissions, (list, set, tuple)):\n permissions = [permissions]\n if combinations and TestVersion(version) >= TestVersion(\"3.0\"):\n permissions = [PermissionSet(*perm_combo) for perm_combo in itertools.product(permissions, Access, Scope)]\n else:\n permissions = [PermissionSet(perm) for perm in permissions]\n perm_names = set()\n for permission in permissions:\n perm_impl = permission.implicit_permission\n if perm_impl is not None:\n perm_names.add(perm_impl)\n if TestVersion(version) >= TestVersion(\"3.0\"):\n perm_names.add(permission.explicit_permission)\n return list(perm_names)",
"def get_permissions(cls, user):\n try:\n user_perm = UserPermission.objects.get(user_id=user.id)\n return user_perm.permission_list.split(',')\n except UserPermission.DoesNotExist:\n return []",
"def get_permissions(self):\n if self.action in ['retrieve', 'list']:\n self.permission_classes = [permissions.ViewUserPermission,]\n elif self.action in ['update', 'partial_update']:\n self.permission_classes = [permissions.UpdateUserPermission]\n elif self.action in ['destroy']:\n self.permission_classes = [permissions.UpdateUserPermission]\n\n return [permission() for permission in self.permission_classes]",
"def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n return [permission() for permission in permissions]",
"def get_permissions(self):\n if self.action in ['list', 'retrieve']:\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]",
"def PermissionSetName(self) -> str:",
"def permissions(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"permissions\")",
"def roleNames(self):\n return self._roles",
"def _get_all_roles_with_permissions(self) -> dict[str, Role]:\n return {\n r.name: r\n for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.permissions))\n ).unique()\n }"
]
| [
"0.6651237",
"0.658164",
"0.64076704",
"0.6305213",
"0.6285141",
"0.623088",
"0.6184744",
"0.61462337",
"0.60909164",
"0.5990684",
"0.5975998",
"0.5975998",
"0.59746265",
"0.595276",
"0.5930774",
"0.59035194",
"0.59011877",
"0.58839595",
"0.583101",
"0.5826767",
"0.5816657",
"0.5803663",
"0.5768999",
"0.57449335",
"0.5740807",
"0.5736379",
"0.5723793",
"0.5707586",
"0.5697307",
"0.5693867"
]
| 0.70529693 | 0 |
Add a role from which permissions will be inherited. Checks to make sure not trying to inherit itself, and itself is not inherited down the line. Priority to arguments goes role, roleName, then roleId. | def inheritRole(self, role=None, roleName=None, kvDict=None):
return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,
model=get_model('role'), db=db, action='add', modelType='role', inherit=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_role(self, role, parents=[]):\r\n self._roles.setdefault(role, set())\r\n self._roles[role].update(parents)",
"def removeInheritedRole(self, role=None, roleName=None, kvDict=None):\n\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='add', modelType='role', inherit=True)",
"def _overrideRole(self, newRole, args):\n oldRole = args.get('role', None)\n args['role'] = newRole\n return oldRole",
"def add_role(self, role):\n if role.name not in [r.name for r in self.roles]:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$push': {'roles': role.to_python()}})",
"def create_role(self, role_id, role):\n raise exception.NotImplemented() # pragma: no cover",
"async def role(ctx, role: discord.Role = None):\n if role is None:\n await ctx.send(\"List of assignable roles: \" + str(allowed_roles))\n if role.name in allowed_roles:\n if not role in ctx.message.author.roles:\n await ctx.message.author.add_roles(role)\n await ctx.send(\"Role added.\")\n else:\n await ctx.message.author.remove_roles(role)\n await ctx.send(\"Role removed.\") \n else:\n await ctx.send(\"That role doesn't exist, or you don't have permission to modify it.\")",
"def addRole(self, role=None, roleName=None, kvDict=None):\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='add', modelType='role')",
"async def apply_role(self, *, reason: str = None):\n if self.role not in self.member.roles:\n try:\n await self.member.add_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass",
"def add_role(self, principal, role):\n return permissions.utils.add_local_role(self, principal, role)",
"async def addrole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help addrole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{member}* already has *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.add_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been added to *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )",
"def manage_addRole(self, role_id, title, description, RESPONSE=None,\n REQUEST=None):\n if not role_id:\n message = 'Please+provide+a+Role+ID'\n else:\n self.addRole(role_id, title, description)\n message = 'Role+added'\n\n if RESPONSE is not None:\n RESPONSE.redirect('%s/manage_roles?manage_tabs_message=%s' %\n (self.absolute_url(), message))",
"def add_role(self, role):\n try:\n self.db_proxy.nameCheck(role.theName, 'role')\n except ARM.ARMException as ex:\n self.close()\n raise ARMHTTPError(ex)\n\n role_params = RoleParameters(\n name=role.theName,\n rType=role.theType,\n sCode=role.theShortCode,\n desc=role.theDescription,\n cProperties=[]\n )\n\n role_id = self.db_proxy.addRole(role_params)\n\n return role_id",
"def addRolePermission(self, role, _type):\n self._client.addRolePermission(role, _type)",
"async def addrole(self, ctx, role: discord.Role):\n guild = ctx.message.guild\n excluded_roles = await self.config.guild(guild).excluded_roles()\n\n for excluded_role in excluded_roles:\n if excluded_role == role.id:\n await ctx.send(\"%s already added to role exclusion list\" % role.name)\n return\n\n excluded_roles.append(role.id)\n await self.config.guild(guild).excluded_roles.set(excluded_roles)\n\n await ctx.send(\"%s added to role exclusion list\" % role.name)",
"async def addrole(self, ctx, user: discord.Member=None, *, role=None):\r\n if user is None or role is None:\r\n return await ctx.send(\"Incorrect usage! *;addrole @user role*\")\r\n r = discord.utils.get(ctx.guild.roles, name=str(role))\r\n if r is None:\r\n return await ctx.send(f'{role} was not found')\r\n try:\r\n await user.add_roles(r)\r\n return await ctx.send(f\"**{str(user)}** has been given the role of **{role}** {self.bot.get_emoji(470063310386233344)}\")\r\n except discord.Forbidden:\r\n return await ctx.send(\"Bot does not have enough permissions to give roles.\")",
"def test_add_role(self):\n pass",
"def add_role(self, role_id: str, current_user_id=None):\n if RoleModel.is_valid_role(role_id) and not self.has_role(role_id):\n user_role = UserRoleModel(user_id=self.id, role_id=role_id, lastchange_by=current_user_id)\n self.roles.append(user_role)",
"def add_role(role):\n roleOfUser=Role.objects.create(type=role)\n return roleOfUser",
"def find_role(self, *args, **kwargs):\n raise NotImplementedError",
"def changeRole(self, node, role):",
"async def add_role(\n client,\n event,\n user: ('user', 'User to add role to'),\n role: ('role', 'The role to give'),\n):\n # Check for permissions\n if not event.user_permissions.can_manage_roles:\n abort('You need `manage roles` permission to invoke this command.')\n \n if not event.guild.cached_permissions_for(client).can_manage_roles:\n abort('I need `manage roles` permission to execute this command.')\n \n if not event.user.has_higher_role_than(role):\n abort('You must have higher role than the role you are trying to give.')\n \n if not client.has_higher_role_than(role):\n abort('I must have higher role than the role you are trying to give.')\n \n # Using `.copy_to` on forms works as well.\n return ADD_ROLE_FORM.copy_with(\n title = f'Add role {role.name} to {user.full_name}',\n custom_id = f'add_role.{user.id}.{role.id}',\n )",
"def add_role():\n role = roles.find_or_create_role(request.values.get('role_name', ''))\n user = users.get_or_404(int(request.values.get('user_id', '')))\n if not users.add_role_to_user(user, role):\n return {}, 500\n return {}",
"async def addroleall(self, ctx, role: discord.Role):\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help addroleall```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in ctx.guild.members:\n if not i.bot:\n await i.add_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been added to **{len(ctx.guild.members)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )",
"def create_role(self, **kwargs):\n role = self.role_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(role)",
"def define_role(self, role):\n\n self._db_manager.create_role(role)",
"def role_add(role, nodes, node, node_vars, host_vars, extra):\n role_manager = get_role_manager()\n node += nodes\n nodes, node_vars, host_vars, extra_args = _split_vars(\n node, node_vars, host_vars, extra)\n if not nodes:\n raise ArgumentError('No nodes informed')\n\n added_nodes = role_manager.add_role(\n role, hosts_node_map=nodes, host_vars=host_vars,\n node_vars=node_vars, extra_args=extra_args)\n\n print(f\"{len(added_nodes)} nodes were added to role {role}: {', '.join(sorted(added_nodes))}\")\n return 0",
"async def add(ctx, *args: commands.clean_content):\r\n if len(args) < 2:\r\n await ctx.send('Add takes 2+ parameters')\r\n return\r\n\r\n tgt_role = args[-1]\r\n if tgt_role.startswith('@'):\r\n tgt_role = tgt_role[1:]\r\n if not discord.utils.get(ctx.guild.roles, name=tgt_role):\r\n await ctx.send(f'Role {args[-1]} does not exist')\r\n return\r\n\r\n roles = list(args[:-1])\r\n\r\n for index, role in enumerate(roles):\r\n if role.startswith('@'):\r\n role = role[1:]\r\n roles[index] = role\r\n print(role)\r\n if not discord.utils.get(ctx.guild.roles, name=role):\r\n await ctx.send(f'Role {role} does not exist')\r\n return\r\n\r\n docid = db.insert({'guild': ctx.guild.id, 'roles': roles, 'target': tgt_role})\r\n await ctx.send(f'Rule {docid} created')\r\n await update_roles(ctx.guild)\r\n await check_guild_rules(ctx.guild)",
"def add_admin(self, uid, name, role=None):\n uid = self._check_uid(uid)\n self._router_request(\n self._make_request_data(\n 'addAdminRole',\n data=dict(\n params=dict(\n uid=uid,\n name=name,\n role=role,\n )\n )\n )\n )\n\n return self.get_admin_by_name(uid, name)",
"def add_role_to_user(self, user, role):\n user, role = self._prepare_role_modify_args(user, role)\n if role not in user.roles:\n user.roles.append(role)\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n\n return False",
"def test_add_role_simple(self):\n pass"
]
| [
"0.74223286",
"0.6825309",
"0.6688563",
"0.6673416",
"0.6651328",
"0.6636815",
"0.65790045",
"0.64841187",
"0.64582",
"0.64572453",
"0.64176154",
"0.64043707",
"0.637732",
"0.6349665",
"0.6322872",
"0.6244851",
"0.624423",
"0.6229597",
"0.6223386",
"0.62183106",
"0.6210688",
"0.61905867",
"0.616813",
"0.6141925",
"0.6139559",
"0.6130572",
"0.61160696",
"0.6111072",
"0.61070734",
"0.60911644"
]
| 0.73552847 | 1 |
Remove a role from which permissions were inherited. Checks to make sure not trying to inherit itself, and itself is not inherited down the line. Priority to arguments goes role, roleName, then roleId. | def removeInheritedRole(self, role=None, roleName=None, kvDict=None):
return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,
model=get_model('role'), db=db, action='add', modelType='role', inherit=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_role(self, role):\n if role.name in [r.name for r in self.roles]:\n remaining_if_any_roles = [r.to_python() for r in self.roles if not r.name == role.name]\n if remaining_if_any_roles:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$set': {'roles': remaining_if_any_roles}})\n else:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$unset': {'roles': 1}})",
"def remove_role(self, principal, role):\n return permissions.utils.remove_local_role(self, principal, role)",
"def delete_role(role):\n fallback = Role.load_cli_user()\n\n def _del(cls, col):\n pq = db.session.query(cls)\n pq = pq.filter(col == role.id)\n\n def _repo(cls, col):\n pq = db.session.query(cls).filter(col == role.id)\n pq.update({col: fallback.id}, synchronize_session=False)\n\n _del(Permission, Permission.role_id)\n db.session.delete(role)\n db.session.commit()",
"def removeRole(self, role=None, roleName=None, kvDict=None):\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='remove', modelType='role')",
"async def remove_role(self, *, reason: str = None):\n await config.member(self.member).set_raw(str(self.role.id), value=None)\n if self.role in self.member.roles:\n try:\n await self.member.remove_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass",
"def remove_role():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/auth/{1}/role/{2}\".format(args.vault_url, args.k8s_cluster_name, args.k8s_namespace)\n print 'Removing role {0} for {1}'.format(args.k8s_namespace, args.k8s_cluster_name)\n send_delete(url=url, headers=headers)",
"async def rolemenu_remove_role(self, interaction: discord.Interaction,\n name: str, role: str):\n try:\n role_id = int(role)\n except ValueError:\n return await interaction.response.send_message(\n \"The role provided \"\n \"is not valid. Make sure that you either select one from the \"\n \"options that the autocomplete provides, or that you \"\n \"provide the role's ID\",\n ephemeral=True)\n doc = await self.db.find_one({\n \"guild_id\": interaction.guild.id,\n \"name\": name\n })\n if not doc:\n return await interaction.response.send_message(\n \"No role menu with that name exists.\", ephemeral=True)\n await interaction.response.defer(ephemeral=True)\n for role_doc in doc[\"roles\"]:\n if role_doc[\"id\"] == role_id:\n break\n else:\n return await interaction.followup.send(\n \"Role not found in that menu\")\n await self.db.update_one({\"_id\": doc[\"_id\"]},\n {\"$pull\": {\n \"roles\": role_doc\n }})\n doc = await self.db.find_one({\"_id\": doc[\"_id\"]})\n await interaction.followup.send(\"Role removed from the menu.\")\n menu = Menu(self, interaction.guild, doc)\n await menu.update()",
"async def removerole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removerole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role not in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"{member} doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.remove_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )",
"async def permissions_remove(\n self,\n ctx,\n type_: str.lower,\n name: str,\n *,\n user_or_role: Union[Role, utils.User, str] = None,\n ):\n if type_ not in {\"command\", \"level\", \"override\"} or (\n type_ != \"override\" and user_or_role is None\n ):\n return await ctx.send_help(ctx.command)\n\n if type_ == \"override\":\n extension = ctx.kwargs[\"user_or_role\"]\n if extension is not None:\n name += f\" {extension}\"\n name = name.lower()\n name = getattr(self.bot.get_command(name), \"qualified_name\", name)\n level = self.bot.config[\"override_command_level\"].get(name)\n if level is None:\n perm = self.bot.command_perm(name)\n embed = Embed(\n title=\"Error\",\n color=Color.red(),\n description=f\"The command permission level was never overridden: `{name}`, \"\n f\"current permission level is {perm.name}.\",\n )\n else:\n logger.info(\"Restored command permission level for `%s`.\", name)\n self.bot.config[\"override_command_level\"].pop(name)\n await self.bot.config.update()\n perm = self.bot.command_perm(name)\n embed = Embed(\n title=\"Success\",\n color=self.bot.main_color,\n description=f\"Command permission level for `{name}` was successfully restored to {perm.name}.\",\n )\n return await ctx.send(embed=embed)\n\n level = None\n if type_ == \"command\":\n name = name.lower()\n name = getattr(self.bot.get_command(name), \"qualified_name\", name)\n else:\n level = self._parse_level(name)\n if level is PermissionLevel.INVALID:\n embed = Embed(\n title=\"Error\",\n color=Color.red(),\n description=f\"The referenced level does not exist: `{name}`.\",\n )\n return await ctx.send(embed=embed)\n name = level.name\n\n value = self._verify_user_or_role(user_or_role)\n await self.bot.update_perms(level or name, value, add=False)\n\n if type_ == \"level\":\n if level > PermissionLevel.REGULAR:\n if value == -1:\n logger.info(\"Denying @everyone access to Modmail category.\")\n await self.bot.main_category.set_permissions(\n self.bot.modmail_guild.default_role, read_messages=False\n )\n elif isinstance(user_or_role, Role):\n logger.info(\n \"Denying %s access to Modmail category.\", user_or_role.name\n )\n await self.bot.main_category.set_permissions(\n user_or_role, overwrite=None\n )\n else:\n member = self.bot.modmail_guild.get_member(value)\n if member is not None and member != self.bot.modmail_guild.me:\n logger.info(\n \"Denying %s access to Modmail category.\", member.name\n )\n await self.bot.main_category.set_permissions(\n member, overwrite=None\n )\n\n embed = Embed(\n title=\"Success\",\n color=self.bot.main_color,\n description=f\"Permission for `{name}` was successfully updated.\",\n )\n return await ctx.send(embed=embed)",
"def revoke_role(self, role, principal_ids):",
"def remove_permission(self, role, permission):\n return permissions.utils.remove_permission(self, role, permission)",
"def delete_role(self, role_id):\n raise exception.NotImplemented() # pragma: no cover",
"def delete_role(self, name): # NOQA\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n role_record = self.get_role(name)\n self.client.delete_resource(role_record.get('href'))",
"def remove_role(self, name):\n role = Role.by_name(name)\n if not role:\n return\n if role in self.roles:\n self.roles.remove(role)",
"def removeRole(self, role_id, REQUEST=None):\n for principal_id in self._principal_roles.keys():\n self.removeRoleFromPrincipal(role_id, principal_id)\n\n del self._roles[role_id]",
"def remove_permission_from_bucket(bucket_name, role_type, member_type):\n\n # initialize client & get bucket\n _, bucket, _ = create_client(bucket_name)\n\n policy = bucket.get_iam_policy(requested_policy_version=3)\n \n # get member type\n member_value = get_member_bucket_level(member_type)\n\n # get role type\n role_value = get_role_bucket_level(role_type)\n\n for binding in policy.bindings:\n # print(binding)\n if binding[\"role\"] == role_value and binding.get(\"condition\") is None:\n # revoke role from member\n binding[\"members\"].discard(member_value)\n\n bucket.set_iam_policy(policy)\n\n print(\"removed {} with role {} from {}\".format(member_value, role_value, bucket_name))",
"def delete_implied_role(self, prior_role_id, implied_role_id):\n raise exception.NotImplemented() # pragma: no cover",
"async def massremove(\n self,\n ctx,\n role: discord.Role,\n member: commands.Greedy[discord.Member],\n ):\n\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help massremove```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in member:\n if role not in i.roles:\n await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{i}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(member)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )",
"async def removeRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.remove_roles(*roles)\n await ctx.send(f\"Removing {roles_str(person, roles)}\")",
"async def on_guild_role_delete(role):\r\n\r\n if role.guild.id not in RULES:\r\n return\r\n\r\n for target, rolesets in RULES[role.guild.id].items():\r\n if role == target:\r\n del RULES[role.guild.id][target]\r\n continue\r\n for i, roles in enumerate(rolesets):\r\n if role in roles:\r\n RULES[role.guild.id][target][i].remove(role)",
"def deleteRolePermission(self, role, _type):\n self._client.deleteRolePermission(role, _type)",
"async def removeroleall(self, ctx, role: discord.Role):\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removeroleall```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in ctx.guild.members:\n if not i.bot:\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(ctx.guild.members)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )",
"async def command_unassign_role(self, context, role: str):\n try:\n await context.author.remove_roles(discord.utils.get(context.guild.roles, name=role))\n await context.message.add_reaction('👍')\n except Exception as e:\n await context.message.add_reaction('👎')\n await context.send('Role could not be unassigned')\n print(f'Errored in command_unassign_role.', e)",
"async def removerole(self, ctx, role: discord.Role):\n guild = ctx.message.guild\n excluded_roles = await self.config.guild(guild).excluded_roles()\n\n if role.id in excluded_roles:\n excluded_roles.remove(role.id)\n await self.config.guild(guild).excluded_roles.set(excluded_roles)\n await ctx.send(\"Removed %s from role exclusion list.\" % role.name)\n else:\n await ctx.send(\"%s is not an excluded role.\" % role.name)",
"def remove_permission_from_role(self, role: Role, permission: Permission) -> None:\n if permission in role.permissions:\n try:\n role.permissions.remove(permission)\n self.get_session.merge(role)\n self.get_session.commit()\n log.info(const.LOGMSG_INF_SEC_DEL_PERMROLE.format(permission, role.name))\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_DEL_PERMROLE.format(e))\n self.get_session.rollback()",
"def deleteRole(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_remove_role_from_project_member(self):\n pass",
"def role_remove(role, nodes, node):\n role_manager = get_role_manager()\n node += nodes\n nodes, node_vars, host_vars, extra_args = _split_vars(node, [], [], [])\n\n if not nodes:\n raise ArgumentError('No nodes informed')\n\n if type(nodes) is list:\n d = defaultdict(list)\n for n in nodes:\n hosts = role_manager.get_role_node_hosts(role, n)\n if not hosts:\n raise NodeRoleError(n, role)\n for hname in hosts:\n d[hname].append(n)\n nodes = defaultdict_to_dict(d)\n else:\n nodes = nodes\n\n if not nodes:\n raise ValueError(f\"No nodes to remove from role {role}\")\n\n result = role_manager.remove_role(role, nodes)\n print(f\"{len(result)} nodes were removed from {role}: {', '.join(sorted(result))}\")\n return 0",
"def deleteRoleAccess(self, role, read, write, catalog='*', repository='*'):\n self._client.deleteRoleAccess(role, read, write, catalog, repository)",
"def unset(self, role, *permissions):\n for perm in permissions:\n for rec in self:\n if role is not None and rec[1] != role:\n continue\n\n if rec[2] is ALL_PERMISSIONS or perm is ALL_PERMISSIONS:\n rec[2] = set()\n else:\n if perm in rec[2]:\n rec[2].remove(perm)\n\n records = []\n for rec in self:\n if rec[2]:\n records.append(rec)\n self[:] = records"
]
| [
"0.72528136",
"0.6892453",
"0.68138254",
"0.6794808",
"0.6769324",
"0.6735325",
"0.6680242",
"0.66443104",
"0.6609181",
"0.65699977",
"0.6554231",
"0.6553536",
"0.65322053",
"0.65124947",
"0.64961034",
"0.6474033",
"0.64651",
"0.6438158",
"0.6403436",
"0.63929754",
"0.6341525",
"0.6330935",
"0.6330354",
"0.63172764",
"0.62894267",
"0.62450665",
"0.62417024",
"0.61760074",
"0.6158905",
"0.6155638"
]
| 0.77075964 | 0 |
Returns a tuple containing a set of all role permission names, and a set including the role name and all inherited role names. | def allPermissionsRoles(self, previousRoleNames=None):
rolePermsSet = set()
if not previousRoleNames:
previousRoleNames = set()
# if role has already been counted, end recursion
if self.id in previousRoleNames:
return set(), set()
previousRoleNames.add(self.name)
for perm in self.permissions:
rolePermsSet.add(perm.name)
for subRole in self.parents:
if subRole.name in previousRoleNames:
continue
tempRolePermSet, tempRoleNames = subRole.allPermissionsRoles(previousRoleNames=previousRoleNames)
rolePermsSet = rolePermsSet | tempRolePermSet
previousRoleNames = previousRoleNames | tempRoleNames
return rolePermsSet, previousRoleNames | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def allPermissionsRoles(self):\n permSet = set()\n for p in self.permissions:\n permSet.add(p.name)\n roleNameSet = set()\n for role in self.roles:\n rolePerms = set()\n roleNameSet.add(role.name)\n for perm in role.permissions:\n rolePerms.add(perm.name)\n if role.parents:\n tempRoleNameSet = set()\n for role in role.parents:\n if role.name in roleNameSet:\n continue\n tempRolePerms, tempRoleNameSet = role.allPermissionsRoles(previousRoleNames=roleNameSet)\n rolePerms = rolePerms | tempRolePerms\n roleNameSet = roleNameSet | tempRoleNameSet\n permSet = permSet | rolePerms\n\n return permSet, roleNameSet",
"def get_granted_roles(self):",
"def get_roles(self):\n\t\tif not self.roles:\n\t\t\tself.roles = get_roles(self.name)\n\t\treturn self.roles",
"def getRoles(self):",
"def roleNames(self):\n return self._roles",
"def roles(self) -> List[str]:\n\n role_list = []\n for spec in self.specs.values():\n role = spec.role()\n if role not in role_list:\n role_list.append(role)\n return role_list",
"def permissions(self):\n perms = set()\n for g in self.groups:\n perms = perms | set(g.permissions)\n return perms",
"def get_roles(role):",
"def roles(self):\n # type: (...) -> Set[Role]\n return self._roles",
"def roles(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"roles\")",
"def roles(self):\n return self._roles",
"def roles(self):\n # TODO: The admin interface only allows a subset of the roles\n # listed in model.py since it uses the OPDS representation of\n # the data, and some of the roles map to the same MARC code.\n CODES = Contributor.MARC_ROLE_CODES\n marc_to_role = dict()\n for role in [\n Contributor.ACTOR_ROLE,\n Contributor.ADAPTER_ROLE,\n Contributor.AFTERWORD_ROLE,\n Contributor.ARTIST_ROLE,\n Contributor.ASSOCIATED_ROLE,\n Contributor.AUTHOR_ROLE,\n Contributor.COMPILER_ROLE,\n Contributor.COMPOSER_ROLE,\n Contributor.CONTRIBUTOR_ROLE,\n Contributor.COPYRIGHT_HOLDER_ROLE,\n Contributor.DESIGNER_ROLE,\n Contributor.DIRECTOR_ROLE,\n Contributor.EDITOR_ROLE,\n Contributor.ENGINEER_ROLE,\n Contributor.FOREWORD_ROLE,\n Contributor.ILLUSTRATOR_ROLE,\n Contributor.INTRODUCTION_ROLE,\n Contributor.LYRICIST_ROLE,\n Contributor.MUSICIAN_ROLE,\n Contributor.NARRATOR_ROLE,\n Contributor.PERFORMER_ROLE,\n Contributor.PHOTOGRAPHER_ROLE,\n Contributor.PRODUCER_ROLE,\n Contributor.TRANSCRIBER_ROLE,\n Contributor.TRANSLATOR_ROLE,\n ]:\n marc_to_role[CODES[role]] = role\n return marc_to_role",
"def get_all_permissions(self) -> set[tuple[str, str]]:\n return set(\n self.appbuilder.get_session.execute(\n select(self.action_model.name, self.resource_model.name)\n .join(self.permission_model.action)\n .join(self.permission_model.resource)\n )\n )",
"def getRoles(self):\n return [self.getRole(), {\"roleName\":\"policajti\", \"roleTitle\":\"Svestky\"}]",
"def roles(self):\r\n return self._roles_str.split(\",\")",
"def _get_all_roles_with_permissions(self) -> dict[str, Role]:\n return {\n r.name: r\n for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.permissions))\n ).unique()\n }",
"def roles(self):\n params = {\n \"f\" : \"json\"\n }\n uURL = self._url + \"/roles\"\n return self._con.get(path=uURL, params=params)",
"def test_list_role_for_all_namespaces(self):\n pass",
"def roles(self):\n role_ids = self.role_ids\n if role_ids is None:\n roles = None\n else:\n roles = sorted(create_partial_role_from_id(role_id) for role_id in self.role_ids)\n \n return roles",
"def roles(self):\n db = self['__store'].db\n my_roles = {\n group_id\n for group_id, in db(\"\"\"\n select distinct\n groups.id\n from `groups`, subgroups\n where\n groups.id = subgroups.group_id\n and subgroup_id = %s\n and groups.type = 'U'\n \"\"\",\n self._id)\n }\n return my_roles",
"def NamedPermissionSets(self) -> _n_1_t_2:",
"def get_roles(self):\n return [role.role_id for role in self.roles if role]",
"def roles(self):\n roles = self.request.POST.get(\"roles\", \"\")\n # Remove all spaces from the string and extra trailing or leading commas\n roles = re.sub(r\"[\\s+]\", \"\", roles).strip(\",\")\n # Return a set of the roles mentioned in the request\n return set(roles.lower().split(\",\")) if roles else set()",
"def get_roles(self, **search_args):\n return self.openbis.get_role_assignments(person=self, **search_args)",
"def list(self):\n return self.client.find_all_roles()",
"def rolenames(self):\n try:\n return self.roles.split(',')\n except Exception:\n return []",
"def get_roles_setting() -> dict[str, set[type['Intent']]]:\n return {\n # the admin role has access to everything\n 'admin': {\n Public,\n Private,\n Personal,\n Secret\n },\n # the editor can do most things\n 'editor': {\n Public,\n Private,\n Personal,\n },\n # registered users can do a few things\n 'member': {\n Public,\n Personal,\n },\n # the public has some access\n 'anonymous': {\n Public,\n }\n }",
"def role_strings(self):\n return [s[RoleInfo.STRING] for s in [v for item in self.role_strings_info.values() for v in item] if s[RoleInfo.STRING]]",
"def manageableRoles(self):\n return roleinfo.AUTHOR_ROLES",
"def listRoleInfo(self):\n return self._roles.values()"
]
| [
"0.79383904",
"0.67280906",
"0.6560671",
"0.64984804",
"0.6487448",
"0.64560986",
"0.6447328",
"0.6397908",
"0.63527834",
"0.6341074",
"0.63302255",
"0.62972397",
"0.623607",
"0.6231561",
"0.62071687",
"0.6184152",
"0.61313784",
"0.6122978",
"0.61156124",
"0.6103487",
"0.60954285",
"0.60781986",
"0.6076905",
"0.6072807",
"0.6052047",
"0.6033641",
"0.600877",
"0.59856904",
"0.5972644",
"0.59725714"
]
| 0.73431504 | 1 |
Get the preassinged url to upload dna files | def get_dna_sequence_upload_url_view(request):
data = request.data
user = User.objects.get(username=request.user)
directory = Directory.objects.get(user=user)
directory_name = getattr(directory, 'name')
data["directory"] = getattr(directory, 'id')
dna_file_serializer = DNAFileSerializer(data=data)
# validate request
if dna_file_serializer.is_valid():
directory_full_path = directory_name + "/dna_files"
# get url using boto3
url = create_preassinged_url(
directory_name=directory_full_path, object_name=data['object_key'])
if not url is None:
dna_file_serializer.save()
return Response({"url": url}, status=HTTP_200_OK)
else:
return Response(dna_file_serializer.errors, status=HTTP_400_BAD_REQUEST) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def url(self):\n return self.get_upload_set().url(self.filename)",
"def file_url(self, fname):\n gs_url = f\"{self.gs_base_url}/{fname}\"\n return f\"{gs_url}\"",
"def action(self):\n return blobstore.create_upload_url(self.upload_url)",
"def get_url(self):\n return self.get_file(uri_type=URI_URL, no_copy=True)",
"def _get_file_url_from_dropbox(dropbox_url, filename):\n return dropbox_url + '?dl=1'",
"def anon_upload(infile: str):\n if exists(infile):\n URL = upload(infile)\n return URL\n return 5",
"def get_redirect_url(self, *args, **kwargs):\n return self.document.file.url",
"def upload_and_get_url(file_to_upload, extension):\n\timport string, random\n\tfilename = binascii.b2a_hex(os.urandom(30))+extension\n\tput = client.put_file(filename, file_to_upload) \n\tshare = client.share(filename, short_url=False)\n\treturn {'url':share['url'].replace('https://www.dropbox.com/', 'https://dl.dropboxusercontent.com/'), 'filename':filename}",
"def get_url():\r\n content = get_creds(CREDS_FILE)\r\n url = content[0]\r\n # get rid of trailing slash\r\n if url[len(url) - 1] == \"/\":\r\n return url[:len(url) - 1]\r\n return url",
"def get_image_url():",
"def remote_url(self) -> str:\n return f\"https://api.figma.com/v1/files/{self.file_id}\"",
"def url(self):\n if not os.path.exists(self.path):\n self.save()\n return self.uset.url(os.path.join(self.folder, self.get_filename()))",
"def url(self):\n if not self.fid:\n raise exceptions.NotCreatedError(object=self)\n\n return self._file_url(self.fid)",
"def get_url(f):\n return f.replace(Enums.Paths.MEDIA_BASE, Enums.Paths.WEB_BASE)",
"def upload_file_and_return_url(self, file_name, name_on_storage, **additional_params):\n assets_bucket = self.storage_client.bucket(\"car_assets\")\n blob = assets_bucket.blob(name_on_storage)\n blob.upload_from_filename(file_name, **additional_params)\n return blob.public_url",
"def _getImagePath(self, link):\n return settings.WIKI_UPLOAD_URL + urlquote(link)",
"async def _landing_url(self, responses: SourceResponses) -> URL:\n if not responses:\n return await super()._landing_url(responses)\n web_url = (await responses[0].json())[\"web_url\"]\n branch = self._parameter(\"branch\", quote=True)\n file_path = self._parameter(\"file_path\", quote=True)\n return URL(f\"{web_url}/blob/{branch}/{file_path}\")",
"def _create_request_url():\n url = 'http'\n if _config['save']:\n url += 's'\n url += '://{}:{}/move'.format(_config['ip'], _config['port'])\n return url",
"def get_public_url(self,project,filename):\n pass",
"def _transform_gdrive_url(self):\n fileid = self.parsed.path.replace('/file/d/', '').split('/')[0]\n self.url = self.GDRIVE_LINK_TEMPLATE.format(fileid=fileid)",
"def get_url(self):\n return staticfiles_storage.url(self._name)",
"def direct_url(self):\n #return '%s/getDownloadableFile' % self.absolute_url()\n return self.context.absolute_url()",
"def get_file_url(self):\n return ('/user-media/addons/3615/delicious_bookmarks-2.1.072-fx.xpi?'\n 'filehash=sha256%3A3808b13ef8341378b9c8305ca648200954ee7dcd8dc'\n 'e09fef55f2673458bc31f')",
"def _get_file_url(path):\n return urlparse.urljoin(BASE_URL, path)",
"def get_url(self):\r\n if self.mod.filename:\r\n return self.mod.service.get_mirror() + self.mod.filename",
"def image_url(self):\n return \"{}/mjpeg_read.php\".format(self.base_url)",
"def get_datafile_url(self):\n try:\n return self.datafile.url\n except ValueError:\n if core.utils.is_absolute_url(self.source):\n if self.source.startswith('s3://'):\n return None # file is in the UPLOAD_BUCKET\n return self.source\n logger.error(\"File not found at '%s'\", self.datafile.name)\n return None",
"def image_upload_url(reteta_id):\n return reverse('reteta:reteta-upload-image', args=[reteta_id])",
"def _file_url(self, fid):\n base = self.tq.threatq_host + '/files/'\n return base + str(fid) + '/details'",
"def get_url_from_path(post_path):\n url_start = post_path[:-len('data')]\n if url_start.endswith('/'):\n url_start = url_start[:-1]\n return url_start"
]
| [
"0.68955094",
"0.63371044",
"0.6274509",
"0.61494476",
"0.61346567",
"0.609408",
"0.60846543",
"0.6081793",
"0.60447085",
"0.5990297",
"0.5953082",
"0.5948043",
"0.59462446",
"0.59444606",
"0.5941417",
"0.59151006",
"0.58459884",
"0.5796251",
"0.57842004",
"0.57773983",
"0.5769783",
"0.5736538",
"0.57308555",
"0.5727582",
"0.5705286",
"0.5700546",
"0.56963086",
"0.5694603",
"0.5681008",
"0.5668186"
]
| 0.6438479 | 1 |
Request to get the default DNA Files. | def get_dna_bank_files(request):
user = User.objects.get(username=DEFAULT_USERNAME)
directory = Directory.objects.get(user=user)
dna_files = []
dna_files_query_set = DNAFile.objects.filter(
directory=directory, is_available=True)
for dna_file in dna_files_query_set:
dna_files.append(dna_file.get_file_details())
response = {"dna_files": dna_files}
return Response(response, status=HTTP_200_OK) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_users_dna_file_details(request):\n\n user = User.objects.get(username=request.user)\n directory = Directory.objects.get(user=user)\n\n dna_files = []\n\n dna_files_query_set = DNAFile.objects.filter(\n directory=directory, is_available=True)\n\n for dna_file in dna_files_query_set:\n dna_files.append(dna_file.get_file_details())\n\n response = {\"dna_files\": dna_files}\n\n return Response(response, status=HTTP_200_OK)",
"def get_nasadem_file_list():\n\n global nasadem_file_list\n if nasadem_file_list is None:\n nasadem_file = download_url(nasadem_file_index_url)\n with open(nasadem_file) as f:\n nasadem_file_list = f.readlines()\n nasadem_file_list = [f.strip() for f in nasadem_file_list]\n nasadem_file_list = [f for f in nasadem_file_list if \\\n f.endswith(nasadem_content_extension)]\n return nasadem_file_list",
"def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'visual_genome')",
"def default_motifs():\n config = MotifConfig()\n d = config.get_motif_dir()\n m = config.get_default_params()['motif_db']\n\n if not d or not m:\n raise ValueError(\"default motif database not configured\")\n\n fname = os.path.join(d, m)\n with open(fname) as f:\n motifs = read_motifs(f)\n \n return motifs",
"def Files(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('_files', default)\n return [HEP.RecordFile(i) for i in tmp]",
"def find_default(self, fs_path):\n if os.path.isdir(fs_path):\n default = None\n for name in self.defaults:\n _path = os.path.join(fs_path, name)\n if os.path.isfile(_path):\n default = _path\n break\n if default is None:\n raise Response(403)\n fs_path = default\n return fs_path",
"def findMayaFiles(directory):\n\n pass",
"def get_default_paths():\n DATA_ROOT = os.environ.get(\"DATA_ROOT\", \"data\")\n defaults = {\n \"TOKENIZE_DATA_DIR\": DATA_ROOT + \"/tokenize\",\n \"MWT_DATA_DIR\": DATA_ROOT + \"/mwt\",\n \"LEMMA_DATA_DIR\": DATA_ROOT + \"/lemma\",\n \"POS_DATA_DIR\": DATA_ROOT + \"/pos\",\n \"DEPPARSE_DATA_DIR\": DATA_ROOT + \"/depparse\",\n \"ETE_DATA_DIR\": DATA_ROOT + \"/ete\",\n \"NER_DATA_DIR\": DATA_ROOT + \"/ner\",\n \"CHARLM_DATA_DIR\": DATA_ROOT + \"/charlm\",\n \"SENTIMENT_DATA_DIR\": DATA_ROOT + \"/sentiment\",\n \"CONSTITUENCY_DATA_DIR\": DATA_ROOT + \"/constituency\",\n\n # Set directories to store external word vector data\n \"WORDVEC_DIR\": \"extern_data/wordvec\",\n\n # TODO: not sure what other people actually have\n # TODO: also, could make this automatically update to the latest\n \"UDBASE\": \"extern_data/ud2/ud-treebanks-v2.11\",\n \"UDBASE_GIT\": \"extern_data/ud2/git\",\n\n \"NERBASE\": \"extern_data/ner\",\n \"CONSTITUENCY_BASE\": \"extern_data/constituency\",\n \"SENTIMENT_BASE\": \"extern_data/sentiment\",\n\n # there's a stanford github, stanfordnlp/handparsed-treebank,\n # with some data for different languages\n \"HANDPARSED_DIR\": \"extern_data/handparsed-treebank\",\n\n # directory with the contents of https://nlp.stanford.edu/projects/stanza/bio/\n # on the cluster, for example, /u/nlp/software/stanza/bio_ud\n \"BIO_UD_DIR\": \"extern_data/bio\",\n\n # data root for other general input files, such as VI_VLSP\n \"EXTERN_DIR\": \"extern_data\",\n }\n\n paths = { \"DATA_ROOT\" : DATA_ROOT }\n for k, v in defaults.items():\n paths[k] = os.environ.get(k, v)\n\n return paths",
"def get_nasadem_file_list(current_dem_list):\n if current_dem_list is None:\n nasadem_file = download_url(nasadem_file_index_url)\n with open(nasadem_file) as f:\n nasadem_file_list = f.readlines()\n nasadem_file_list = [f.strip() for f in nasadem_file_list]\n nasadem_file_list = [f for f in nasadem_file_list if \\\n f.endswith(nasadem_content_extension)]\n return nasadem_file_list",
"def getImageFileNamesToOpen(self, defaultDirectory):\n extensions = OpDataSelection.SupportedExtensions\n filt = \"Image files (\" + ' '.join('*.' + x for x in extensions) + ')'\n options = QFileDialog.Options()\n if ilastik_config.getboolean(\"ilastik\", \"debug\"):\n options |= QFileDialog.DontUseNativeDialog\n fileNames = QFileDialog.getOpenFileNames( self, \"Select Images\", \n defaultDirectory, filt, options=options )\n # Convert from QtString to python str\n fileNames = [str(s) for s in fileNames]\n return fileNames",
"def get_defaults():\r\n profile = settings.profile_manager.get(\"default\")\r\n config = profile.get_config('freeseer.conf', settings.FreeseerConfig, storage_args=['Global'], read_only=True)\r\n return {\r\n \"video_directory\": config.videodir,\r\n \"oauth2_token\": os.path.join(settings.configdir, \"oauth2_token.json\"),\r\n \"client_secrets\": os.path.join(settings.configdir, \"client_secrets.json\")\r\n }",
"def download_rna_seq(rna_seq_uuid_list, dirpath):\n data_dict = {}\n data_dict[\"ids\"] = rna_seq_uuid_list\n\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(data_dict)\n\n try:\n response = requests.post('https://api.gdc.cancer.gov/data', headers=headers, data=data)\n filename = os.path.join(dirpath,response.headers[\"Content-Disposition\"].split(\"filename=\")[1])\n\n with open(filename, \"wb\") as file:\n file.write(response.content)\n file.close()\n return filename\n except:\n return None",
"def list_default_paths():\n filename = os.path.join(os.path.expanduser('~'), '.gfail_defaults')\n if os.path.exists(filename):\n D = ConfigObj(filename)\n print('Default paths currently set to:\\n')\n for key in D:\n print('\\t%s = %s' % (key, D[key]))\n else:\n print('No default paths currently set\\n')",
"def list_default_paths():\n filename = os.path.join(os.path.expanduser('~'), '.gfail_defaults')\n if os.path.exists(filename):\n D = ConfigObj(filename)\n print('Default paths currently set:')\n for key in D:\n print('\\t%s = %s' % (key, D[key]))\n else:\n print('No default paths currently set\\n')",
"def read_all_files():\n paths = get_all_recording_paths()\n\n return read_by_paths(paths)",
"def grab_predict_label3d_file(defaultdir=\"\"):\n def_ep = os.path.join(\".\", defaultdir)\n label3d_files = os.listdir(def_ep)\n label3d_files = [\n os.path.join(def_ep, f) for f in label3d_files if \"dannce.mat\" in f\n ]\n label3d_files.sort()\n\n if len(label3d_files) == 0:\n raise Exception(\"Did not find any *dannce.mat file in {}\".format(def_ep))\n print(\"Using the following *dannce.mat files: {}\".format(label3d_files[0]))\n return label3d_files[0]",
"def download_key_files(request):\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n path = os.path.join(workpath, './static/BioC/linking.key')\n path1 = os.path.join(workpath, './static/BioC/mention.key')\n ment = request.GET.get('type_key',None)\n if ment == 'mentions':\n path = open(path1, 'r')\n return HttpResponse(path, content_type='text/plain')\n elif ment == 'linking':\n path1 = open(path, 'r')\n return HttpResponse(path1, content_type='text/plain')",
"def default():\n raise NotImplementedError(\"Pvwattsv7 default file no longer exists!\")",
"def getFileListDAS(dataset,blacklist=[ ]):\n dataset = dataset.replace('__','/')\n if dataset[0]!='/':\n dataset = '/'+dataset\n instance = 'prod/global'\n if 'USER' in dataset:\n instance = 'prod/phys03'\n #cmd='das_client --limit=0 --query=\"file dataset=%s instance=%s\"'%(dataset,instance)\n cmd = 'das_client --limit=0 --query=\"file dataset=%s instance=%s status=*\"'%(dataset,instance)\n if args.verbose:\n print \"Executing \",cmd\n cmd_out = getoutput( cmd )\n tmpList = cmd_out.split(os.linesep)\n filelist = [ ]\n for line in tmpList:\n if '.root' in line and line not in blacklist:\n #files.append(\"root://cms-xrd-global.cern.ch/\"+line) # global\n filelist.append(\"root://xrootd-cms.infn.it/\"+line) # Eurasia\n filelist.sort()\n return filelist",
"def Documents(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('documents', default)\n return [HEP.DocumentObject(i) for i in tmp]",
"def get_defaults():\n\n return {\n \"numberofrules\": 0,\n \"datapath\": path_join_robust(BASEDIR_PATH, \"data\"),\n \"freshen\": True,\n \"replace\": False,\n \"backup\": False,\n \"skipstatichosts\": False,\n \"keepdomaincomments\": True,\n \"extensionspath\": path_join_robust(BASEDIR_PATH, \"extensions\"),\n \"extensions\": [],\n \"compress\": False,\n \"minimise\": False,\n \"outputsubfolder\": \"\",\n \"hostfilename\": \"hosts\",\n \"targetip\": \"0.0.0.0\",\n \"sourcedatafilename\": \"update.json\",\n \"sourcesdata\": [],\n \"readmefilename\": \"readme.md\",\n \"readmetemplate\": path_join_robust(BASEDIR_PATH, \"readme_template.md\"),\n \"readmedata\": {},\n \"readmedatafilename\": path_join_robust(BASEDIR_PATH, \"readmeData.json\"),\n \"exclusionpattern\": r\"([a-zA-Z\\d-]+\\.){0,}\",\n \"exclusionregexes\": [],\n \"exclusions\": [],\n \"commonexclusions\": [\"hulu.com\"],\n \"blacklistfile\": path_join_robust(BASEDIR_PATH, \"blacklist\"),\n \"whitelistfile\": path_join_robust(BASEDIR_PATH, \"whitelist\"),\n }",
"def default_downloader():\n default_spec = ('geoJSON', 'SPC', '{product}', '{year:4d}',\n 'day4fw_{year:4d}{month:02d}{day:02d}_1200_{hazard:s}.geojson')\n co_path_template = os.path.join('{config[data_dir]}', *default_spec)\n pre_path_template = os.path.join('{config[pre_existing_data_dir]}',\n *default_spec)\n\n return Day4FireDownloader(target_path_template=co_path_template,\n pre_downloaded_path_template=pre_path_template)",
"def getDefaultDataSearchPath():\n return FileSearchPath(os.path.dirname(__file__))",
"def filesFromBK(request):\n if not hasGridProxy():\n logger.error('filesFromBK: No Grig proxy!')\n return []\n\n if isinstance ( request , tuple ) : request = BKRequest ( *request )\n elif isinstance ( request , dict ) : request = BKRequest ( **request )\n\n path = request.path\n nmax = request.nmax\n first = request.first\n last = request.last\n grid = request.grid\n accessURL = request.accessURL\n SEs = request.SEs\n\n arguments = 'dirac-command %s' % which ( 'get_files_from_BK' ) \n\n arguments += \" %s \" % path\n\n if nmax < 0: nmax = 1000000\n if last < 0: last = 1000000\n if nmax < 1000000 : arguments += ' --Max %d' % nmax\n if 0 < first : arguments += ' --First %d' % first\n if last < 1000000 : arguments += ' --Last %d' % last\n if accessURL : arguments += ' -a True '\n #\n if grid and isinstance ( grid , str ) :\n arguments += ' --Sites %s ' % grid\n elif grid and 1 == len(grid):\n arguments += ' --Sites %s ' % grid[0]\n elif grid:\n sg = ','.join(grid)\n arguments += ' --Sites %s ' % sg\n\n if SEs and isinstance ( SEs , str):\n arguments += ' --SEs %s ' % grid\n elif SEs and 1 == len ( SEs ) :\n arguments += ' --SEs %s ' % SEs [ 0 ]\n elif SEs:\n sg = ','.join(SEs)\n arguments += ' --SEs %s ' % sg\n\n ## arguments += ' \"%s\" ' % path\n ## convert to DIRAC\n \n import os\n from subprocess import Popen, PIPE\n arguments = arguments.split()\n\n logger.verbose('filesFromBK:use Popen(%s)' % arguments)\n p = Popen(arguments, stdout=PIPE, stderr=PIPE)\n (cout, cerr) = p.communicate()\n\n \n if 0 != p.returncode :\n logger.error ( 'filesFromBK: error from Popen: %d/%s' % (p.returncode, cerr ) )\n return []\n\n if py3 :\n cout = cout.decode ( 'utf-8' ) if cout else cout \n cerr = cerr.decode ( 'utf-8' ) if cerr else cerr \n\n if cerr :\n logger.error ( 'filesFromBK: error from Popen: %d/%s' % (p.returncode, cerr ) )\n return []\n\n cout = cout.split('\\n')\n cout = cout [2:]\n cout = ' '.join ( cout )\n\n try:\n \n lst = eval ( cout )\n if not isinstance(lst, list):\n raise TypeError(\"Invalid list type\")\n logger.debug( 'filesFromBK: %s ' % lst )\n return lst\n\n except:\n logger.error (\"filesFromBK: can't interpret: %s\" % cout)\n\n return []",
"def getDefaultData(dmd):",
"def default_values():\n return pad_keys({}, default_basenames())",
"def download_nasdaq_public_data():\n url_nasdaq = \"http://www.nasdaq.com/screening/companies-by-industry.aspx?exchange=NASDAQ&render=download\"\n url_nyse = \"http://www.nasdaq.com/screening/companies-by-industry.aspx?exchange=NYSE&render=download\"\n\n if not os.path.exists(download_path):\n os.mkdir(download_path)\n\n downloaded_files = {}\n\n print download_path\n print 'Downloading Nasdaq'\n file_nasdaq = os.path.join(download_path, 'nasdaq_%s.csv' % common.file_safe_date(datetime.now()))\n r = requests.get(url_nasdaq)\n with open(os.path.join(download_path, file_nasdaq), 'wb') as code:\n code.write(r.content)\n downloaded_files['nasdaq'] = file_nasdaq\n\n print 'Downloading NYSE'\n file_nyse = os.path.join(download_path, 'nasdaq_%s.csv' % common.file_safe_date(datetime.now()))\n r = requests.get(url_nyse)\n with open(os.path.join(download_path, file_nyse), 'wb') as code:\n code.write(r.content)\n downloaded_files['nyse'] = file_nyse\n return downloaded_files",
"def OpenAnyFiles():\n \n wildcard = create_wildcard(\"All files\", ['*', '*.*'])\n\n files = []\n dlg = wx.FileDialog(None, \n \"Select file(s)\", \n paths.samples, \n \"\", \n wildcard, \n wx.FD_OPEN | wx.MULTIPLE | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n files = dlg.GetPaths()\n\n dlg.Destroy()\n return files",
"def getDefaults():\n return {\n 'minsize': 10, # minimum size in MB\n 'pattern': [], # file name patterns\n }",
"def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)"
]
| [
"0.6044433",
"0.556103",
"0.5553544",
"0.5448227",
"0.5398903",
"0.5360307",
"0.5325434",
"0.522247",
"0.52170986",
"0.51599985",
"0.5155407",
"0.5117732",
"0.5115005",
"0.51137894",
"0.51118296",
"0.5083684",
"0.507383",
"0.50611573",
"0.505775",
"0.5035349",
"0.50209016",
"0.5020416",
"0.50165486",
"0.5014365",
"0.5006568",
"0.5003526",
"0.49930313",
"0.49814302",
"0.49663973",
"0.495718"
]
| 0.6477019 | 0 |
Request to get the DNA Files of the authenticated user. | def get_users_dna_file_details(request):
user = User.objects.get(username=request.user)
directory = Directory.objects.get(user=user)
dna_files = []
dna_files_query_set = DNAFile.objects.filter(
directory=directory, is_available=True)
for dna_file in dna_files_query_set:
dna_files.append(dna_file.get_file_details())
response = {"dna_files": dna_files}
return Response(response, status=HTTP_200_OK) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_dna_bank_files(request):\n\n user = User.objects.get(username=DEFAULT_USERNAME)\n directory = Directory.objects.get(user=user)\n\n dna_files = []\n\n dna_files_query_set = DNAFile.objects.filter(\n directory=directory, is_available=True)\n\n for dna_file in dna_files_query_set:\n dna_files.append(dna_file.get_file_details())\n\n response = {\"dna_files\": dna_files}\n\n return Response(response, status=HTTP_200_OK)",
"def files(self):\r\n url = '{0}/files'.format(self.get_url())\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def get(self):\n data = []\n full_path = self.get_current_file()\n\n if self.is_allowed(full_path):\n data = self.get_file_info(full_path)\n else:\n abort(403, message=\"You are not allowed to this path\")\n\n result = {\"data\": data}\n return result",
"def search(self):\n try:\n log.debug('Search file for user ID: %s', self.current_user.id)\n\n sql = \"select fs.id, fs.name, fs.size, fs.type, fs.date_load, u.id shared_by_id, u.login shared_by_login, \"\\\n \"concat(u.first_name, ' ', COALESCE(u.second_name, '')) shared_by_name \"\\\n \"FROM file_store fs inner join file_share fsh on fsh.file_id = fs.id \"\\\n \"inner join user u on u.id = fsh.user_own_id where fsh.user_assigned_id ={0} \"\\\n \"union all(select fs.id, fs.name, fs.size, fs.type, fs.date_load, '' shared_by_id, \"\\\n \"'' shared_by_login, '' shared_by_name \" \\\n \"FROM file_store fs where fs.user_id ={0})\".format(self.current_user.id)\n\n files = self.db.execute(sql)\n\n response = {'files': files,\n 'used_quota': self.user_api.user_db.used_file_quota,\n 'quota': self.user_api.user_db.file_quota,\n 'total': len(files),\n 'extends': list(set(fl['type'] for fl in files))}\n\n return {'success': True, 'result': response}\n\n except StandardError:\n log.exception('Cannot search file')\n return SERVER_ERROR",
"def authenticated_files(request, referral_attachment_id):\n user = request.user\n\n # If the user is not logged-in, just bail out\n if not user.is_authenticated:\n return HttpResponse(status=401)\n\n # Get the related referral attachment object or return a 404\n try:\n referral_attachment = ReferralAttachment.objects.get(id=referral_attachment_id)\n except ReferralAttachment.DoesNotExist:\n return HttpResponse(status=404)\n\n # Get the actual filename from the referral attachment (ie. remove the UUID prefix and slash)\n filename = str(referral_attachment.file).rsplit(\"/\", 1)[-1]\n\n # Get the content type and encoding to serve the file as best we can\n content_type, encoding = mimetypes.guess_type(str(filename))\n content_type = content_type or \"application/octet-stream\"\n\n # Actually serve the file using Django's http facilities\n response = FileResponse(\n referral_attachment.file.open(\"rb\"), content_type=content_type\n )\n response[\"Content-Disposition\"] = f'attachment; filename=\"{filename}\"'\n if encoding:\n response[\"Content-Encoding\"] = encoding\n\n return response",
"def api_files():\n files = FileWrapper.get_files(g.user.id)\n return jsonify([f.serialize() for f in files])",
"def file_by_name(self, context, params):\n\n token = util.get_access_token(context['headers'])\n response, code = OnedriveApi.search(token, params['name'])\n\n if code == 400:\n return response\n\n result = []\n\n for item in response['value']:\n if item.get('file'):\n item_data = self.get_item_data(item)\n result.append(item_data)\n\n return result",
"def get_list_of_files_contoller(message):\n chat_id = message.chat.id\n user_id = message.from_user.id\n if db.files.count_documents({\"user_id\": user_id}) > 0:\n list_ = db.files.find({\"user_id\": user_id})\n markup = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)\n for file in list_:\n markup.add(\n telebot.types.KeyboardButton(\n text=f'/get_file {file[\"id\"]} {file[\"type\"]}'\n )\n )\n text = \"Please choose file:\"\n bot.reply_to(message, text, reply_markup=markup)\n else:\n text = \"Files not found\"\n bot.reply_to(message, text)",
"def get_files_by_accession(self, accession, query_filter, page_size, page, sort_direction, sort_conditions):\n request_url = self.api_base_url + \"projects/\" + accession + \"/files?\"\n\n if query_filter:\n request_url = request_url + \"filter=\" + query_filter + \"&\"\n\n request_url = request_url + \"pageSize=\" + str(page_size) + \"&page=\" + str(page) + \"&sortDirection=\" + sort_direction + \"&sortConditions=\" + sort_conditions\n\n headers = {\"Accept\": \"application/JSON\"}\n response = Util.get_api_call(request_url, headers)\n return response.json()",
"def list_files(request, dist):\n dist = get_object_or_404(Distribution, pk=dist)\n if request.user.groups.exists():\n if not (request.user in dist.Project.Assistants.all() or request.user == dist.Project.ResponsibleStaff or get_grouptype('studyadvisors') in request.user.groups.all()):\n # staff\n raise PermissionDenied(\"You are not allowed to view these student files.\")\n elif request.user != dist.Student:\n # student\n raise PermissionDenied(\"You are not allowed to view this page.\")\n\n files = dist.files.all()\n return render(request, 'students/list_files.html', context={\n 'dist': dist,\n 'files': files,\n })",
"def list(self):\n\n base_url = ''.join((\n self.BASE_URL + '/users/',\n self.__user_data.get('login') + '/gists',\n ))\n\n response = requests.get(base_url, headers=self.__headers)\n\n if response.status_code == 200:\n return response.json()\n\n raise GistException(Gist.__get_response_error('It is not possible to list files', response))",
"def files(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/files'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def files(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/files'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def files(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/files'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def user(self, request):\n\t\treturn super(fileManager, self).get_queryset().filter(author=request.user)",
"def __get_files(self):\r\n \r\n files = []\r\n with requests.Session() as s:\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}\r\n respons = s.get(self.__url, headers=headers).text\r\n soup = BeautifulSoup(respons, 'html.parser')\r\n data_files = [link.get('href') for link in soup.find_all('a', class_=\"btn-primary\")]\r\n for year in soup.find_all('td', class_=\"align-middle\"):\r\n regex = re.compile(r\"data/data-?gis({year}|\\-rok\\-{year})\\.zip\".format(year=year.text))\r\n if any((match := regex.match(link)) for link in data_files):\r\n files.append(match.group(0))\r\n else:\r\n files.append(data_files[-1])\r\n return files",
"def getAllFileRecords(user_id):\n files_db = userID = servers = None\n session = Queries.createSession()\n try:\n user = session.execute(sqlalchemy.select([Users])\n .where(Users.name == user_id)\n ).fetchone()\n userID = user.id\n catalog = session.execute(sqlalchemy.select([Catalog])\n .where(Catalog.fs_id == user.filespace_id)\n ).fetchone()\n files_db = session.query(FileTable).filter_by(catalog_id=catalog.id).all()\n servers = [file.server_id for file in files_db]\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return files_db, servers, userID",
"def get_file(self, path):\n file = self.get('data_request?id=file¶meters=%s' % path)\n return file",
"def read_file(file_directory, file_path, login_request, user_id):\n\n data, headers, server_host, server_port = process_request_header(file_directory, file_path, login_request, user_id)\n\n request = requests.post(\"http://\" + server_host + \":\" + server_port + \"/fileOperations/readFile\",\n headers=headers)\n return request.text",
"def get_datafiles(self, url, survey_path):\n pass",
"def getUserImages(username):\n\n if not UserExists(username):\n print ('Got here!')\n return \"False\"\n elif not g.user:\n return redirect(url_for('login'))\n\n return download_blobs(username)\n #files = glob(os.path.join('static', 'UserPictures', username, '*'))\n #return json.dumps(files)",
"def download(self, net_id, request_id, request_date):\n current_user_roles = get_user_roles()\n\n if current_user_roles[\"STFADM\"] or net_id == current_user.net_id:\n if self.make_pdf(net_id, request_id, request_date):\n try:\n with ZipFile(\"{0}/user_uploads/{1}/{2}/[{1}-{3}]_request.zip\".format(self.__APP_PATH__, net_id, request_id, request_date), mode=\"w\") as zip_archive:\n for user_file in scandir(\"{0}/user_uploads/{1}/{2}\".format(self.__APP_PATH__, net_id, request_id)):\n if \"_request.zip\" not in user_file.name and user_file.name not in self.__SPECIAL_FILES__:\n zip_archive.write(user_file.path, user_file.name, ZIP_DEFLATED)\n\n return send_from_directory(\"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, net_id, request_id),\n \"[{0}-{1}]_request.zip\".format(net_id, request_date), mimetype=\"blob\")\n except Exception as e:\n print(e)\n return abort(404)\n return abort(403)",
"def get_all_mediafiles(user_id, params=None, fields=None):\n fields = fields or [MediaFiles.id, MediaFiles.user_id, MediaFiles.path, MediaFiles.duration,\n MediaFiles.size, MediaFiles.title, MediaFiles.comment, MediaFiles.tags,\n MediaFiles.description, MediaFiles.coords, MediaFiles.location_id,\n MediaFiles.year, MediaFiles.created, MediaFiles.imported,\n MediaFiles.updated, MediaFiles.accessed, MediaFiles.visits]\n query = MediaFiles.query \\\n .outerjoin(Locations, MediaFiles.location_id == Locations.id) \\\n .outerjoin(Users, MediaFiles.user_id == Users.id)\n\n year = params.get('year', 'any')\n if year != 'any' and re.match('[1-2]{1}[0-9]{3}', year) or year == '0':\n query = query.filter(MediaFiles.year == year)\n\n location = params.get('location', 'any')\n if location != 'any':\n query = query.filter(MediaFiles.location_id == location)\n\n if user_id != -1: # user_id = -1 will not apply filtering on ownership (used for statistics)\n public = params.get('ownership_public')\n private = params.get('ownership_private')\n if private or public:\n ownership = []\n if public:\n ownership.append(MediaFiles.user_id == 0)\n if private:\n ownership.append(MediaFiles.user_id == int(user_id))\n query = query.filter(or_(*ownership))\n else: # If none of Public/Private selected, then result must be empty\n query = query.filter(MediaFiles.user_id == -1)\n\n entry = params.get('search', '').strip()\n matching = params.get('tags_matching')\n if entry:\n other_matches = []\n if params.get('search_in_path'):\n other_matches.append(MediaFiles.path.contains(entry))\n if params.get('search_in_title'):\n other_matches.append(MediaFiles.title.contains(entry))\n if params.get('search_in_description'):\n other_matches.append(MediaFiles.description.contains(entry))\n if params.get('search_in_comment'):\n other_matches.append(MediaFiles.comment.contains(entry))\n query = query.filter(or_(*other_matches))\n # Note: tags filtering is applied if no other terms are selected\n if params.get('search_in_tags') and not other_matches:\n tag_matches = []\n for tag in entry.split():\n tag_matches.append(MediaFiles.tags.contains(tag))\n if matching == 'strict':\n query = query.filter(and_(*tag_matches))\n else:\n query = query.filter(or_(*tag_matches))\n query = query.add_columns(*fields).order_by(func.random())\n logging.debug('Query executed: %s' % query)\n return query",
"def get_queryset(self):\n queryset = MediaFile.objects.all()\n username = self.request.query_params.get('username', None)\n userqueryset = User.objects.all()\n users = userqueryset.filter(username=username)\n if len(users) and username is not None:\n queryset = queryset.filter(owner=users[0])\n return queryset",
"def filesFromBK(request):\n if not hasGridProxy():\n logger.error('filesFromBK: No Grig proxy!')\n return []\n\n if isinstance ( request , tuple ) : request = BKRequest ( *request )\n elif isinstance ( request , dict ) : request = BKRequest ( **request )\n\n path = request.path\n nmax = request.nmax\n first = request.first\n last = request.last\n grid = request.grid\n accessURL = request.accessURL\n SEs = request.SEs\n\n arguments = 'dirac-command %s' % which ( 'get_files_from_BK' ) \n\n arguments += \" %s \" % path\n\n if nmax < 0: nmax = 1000000\n if last < 0: last = 1000000\n if nmax < 1000000 : arguments += ' --Max %d' % nmax\n if 0 < first : arguments += ' --First %d' % first\n if last < 1000000 : arguments += ' --Last %d' % last\n if accessURL : arguments += ' -a True '\n #\n if grid and isinstance ( grid , str ) :\n arguments += ' --Sites %s ' % grid\n elif grid and 1 == len(grid):\n arguments += ' --Sites %s ' % grid[0]\n elif grid:\n sg = ','.join(grid)\n arguments += ' --Sites %s ' % sg\n\n if SEs and isinstance ( SEs , str):\n arguments += ' --SEs %s ' % grid\n elif SEs and 1 == len ( SEs ) :\n arguments += ' --SEs %s ' % SEs [ 0 ]\n elif SEs:\n sg = ','.join(SEs)\n arguments += ' --SEs %s ' % sg\n\n ## arguments += ' \"%s\" ' % path\n ## convert to DIRAC\n \n import os\n from subprocess import Popen, PIPE\n arguments = arguments.split()\n\n logger.verbose('filesFromBK:use Popen(%s)' % arguments)\n p = Popen(arguments, stdout=PIPE, stderr=PIPE)\n (cout, cerr) = p.communicate()\n\n \n if 0 != p.returncode :\n logger.error ( 'filesFromBK: error from Popen: %d/%s' % (p.returncode, cerr ) )\n return []\n\n if py3 :\n cout = cout.decode ( 'utf-8' ) if cout else cout \n cerr = cerr.decode ( 'utf-8' ) if cerr else cerr \n\n if cerr :\n logger.error ( 'filesFromBK: error from Popen: %d/%s' % (p.returncode, cerr ) )\n return []\n\n cout = cout.split('\\n')\n cout = cout [2:]\n cout = ' '.join ( cout )\n\n try:\n \n lst = eval ( cout )\n if not isinstance(lst, list):\n raise TypeError(\"Invalid list type\")\n logger.debug( 'filesFromBK: %s ' % lst )\n return lst\n\n except:\n logger.error (\"filesFromBK: can't interpret: %s\" % cout)\n\n return []",
"def get_directory_list(self, storage=None, user=None, for_user=None):\n calling_user = User.get_user_by_username(user) if user else None\n team = True if for_user == \"team\" else False\n for_user = None if for_user in [None, \"team\"] else User.get_user_by_username(for_user)\n if team:\n self.access_handler.check_read_rights(None, calling_user)\n users = calling_user.get_team_members()\n elif self.has_read_permission(calling_user, for_user):\n users = [for_user.get_username()] if for_user else None\n else:\n raise PermissionError(\"You have no access to this data!\")\n return search_class.get_directory_list(storage_alias=storage, usernames=users)",
"def get_files(self, sid):\n try:\n return self.datas.get(sid)\n except Exception as ex:\n raise ex",
"def download_key_files(request):\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n path = os.path.join(workpath, './static/BioC/linking.key')\n path1 = os.path.join(workpath, './static/BioC/mention.key')\n ment = request.GET.get('type_key',None)\n if ment == 'mentions':\n path = open(path1, 'r')\n return HttpResponse(path, content_type='text/plain')\n elif ment == 'linking':\n path1 = open(path, 'r')\n return HttpResponse(path1, content_type='text/plain')",
"def files(self, **kwargs) -> \"FileMetadataList\":\n return self._cognite_client.files.list(asset_ids=[self.id], **kwargs)",
"async def list(self, request):\n\n userid = await authenticated_userid(request)\n project = await request.app.context_project(request, userid)\n\n request['slog'].debug('Camera list requested')\n\n response_js = {\n 'camera_files': await Camera.list(request, userid=userid, project_id=project.project_id)\n }\n\n return web.json_response(response_js)"
]
| [
"0.7363368",
"0.6090601",
"0.60854626",
"0.60225403",
"0.5861773",
"0.5856031",
"0.5812952",
"0.56845826",
"0.5641716",
"0.56278586",
"0.5582969",
"0.55632746",
"0.55632746",
"0.55632746",
"0.5561104",
"0.55126363",
"0.5475949",
"0.5418269",
"0.54156005",
"0.541434",
"0.54061687",
"0.540485",
"0.54029375",
"0.5394143",
"0.5390969",
"0.5345728",
"0.5332888",
"0.53323215",
"0.531037",
"0.5306632"
]
| 0.7595517 | 0 |
Load the BoW codebook and construct the kd search tree. Compute the nearest neighbor out of 3 for a 'new_data' sample by calling knn.find_nearest(new_data,3) | def getKNNClassifier():
codebook = loadCodebook()
args.nVisualWords = codebook.shape[0]
# find nearest neighbor in the codebook
knn = cv2.KNearest()
# construct kd-tree with labels from 0 - (nCodewords-1)
knn.train(codebook,np.arange(args.nVisualWords))
return knn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nearest_neighbors_classifier(data):\n clf = KNeighborsClassifier(3, 'distance')\n clf.name = \"KNN\"\n train_predict_and_results(data, clf)",
"def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()",
"def main():\n # Initializing learning rate\n learning_rate = 0.0005\n # Initializing stopping criteria\n stopping_criteria = 0.01\n # load the data training data from a csv file with an url\n training_x,testing_x, training_y, testing_y,mean,sd= ai.store_data(\"https://github.com/santiagocantu98/K-Nearest-Neightbours/raw/master/diabetes.csv\",\"training\")\n normal_testing = np.copy(testing_x)\n\n # scalates the features of the testing data\n testing_data_scaled,mean,sd = ai.scale_features(testing_x,mean,sd)\n ai.print_scaled_data(testing_data_scaled,\"testing\")\n ai.calculate_euclidean_distance(training_x, training_y , testing_data_scaled, testing_y,normal_testing)",
"def _grid_search_wl_kernel(\n k: WeisfilerLehman,\n subtree_candidates,\n train_x: list,\n train_y: torch.Tensor,\n lik: float,\n subtree_prior=None, # pylint: disable=unused-argument\n lengthscales=None,\n lengthscales_prior=None, # pylint: disable=unused-argument\n):\n # lik = 1e-6\n assert len(train_x) == len(train_y)\n best_nlml = torch.tensor(np.inf)\n best_subtree_depth = None\n best_lengthscale = None\n best_K = None\n if lengthscales is not None and k.se is not None:\n candidates = [(h_, l_) for h_ in subtree_candidates for l_ in lengthscales]\n else:\n candidates = [(h_, None) for h_ in subtree_candidates]\n\n for i in candidates:\n if k.se is not None:\n k.change_se_params({\"lengthscale\": i[1]})\n k.change_kernel_params({\"h\": i[0]})\n K = k.fit_transform(train_x, rebuild_model=True, save_gram_matrix=True)\n # self.logger.debug(K)\n K_i, logDetK = compute_pd_inverse(K, lik)\n # self.logger.debug(train_y)\n nlml = -compute_log_marginal_likelihood(K_i, logDetK, train_y)\n # self.logger.debug(f\"{i} {nlml}\")\n if nlml < best_nlml:\n best_nlml = nlml\n best_subtree_depth, best_lengthscale = i\n best_K = torch.clone(K)\n # self.logger.debug(f\"h: {best_subtree_depth} theta: {best_lengthscale}\")\n # self.logger.debug(best_subtree_depth)\n k.change_kernel_params({\"h\": best_subtree_depth})\n if k.se is not None:\n k.change_se_params({\"lengthscale\": best_lengthscale})\n k._gram = best_K # pylint: disable=protected-access",
"def test_k_nearest(self):\n L = range(100)\n L = [(i, i, i, i) for i in L]\n tree = KdTree(L)\n # remove distance, only keep points from the result\n items = lambda items: [x for (d, x) in items] \n assert items(tree.k_nearest((-1, -1), 1)) == [(0, 0, 0, 0)]\n assert items(tree.k_nearest((100, 100), 1)) == [(99, 99, 99, 99)]\n assert items(tree.k_nearest((50, 50), 1)) == [(50, 50, 50, 50)]\n assert items(tree.k_nearest((-1, -1), 2)) == [(0, 0, 0, 0),\n (1, 1, 1, 1)]",
"def train_knn(training_data):\n return knnclassifier(training_data, keys, 3)",
"def generate_kdtree(self):\n if self.method==2:\n coordinates = self.unassigned_data[0:3,:]\n else:\n coordinates = self.unassigned_data[0:2,:]\n tree = cKDTree(coordinates.T)\n\n return tree",
"def __init__(self,data,pos,neg,bk,target,max_depth=2):\n\n self.data = data\n self.examples = {}\n self.pos = pos\n self.neg = neg\n #initial model assumed P(target|data)=0.5 to target=1,0\n for ex in pos+neg:\n self.examples[ex] = 0\n self.bk = bk\n self.target = target\n self.max_depth = max_depth\n self.boosted_trees = []",
"def build_classifier(self, n_neighbours, data_index):\n knn = KNeighborsClassifier(n_neighbors=n_neighbours)\n BayesianKneighborClassifier.update_current_data(self, data_index)\n X_train, X_test, y_train, y_test = BayesianKneighborClassifier.split_test_and_train_data\\\n (self, 0.3, data_index)\n knn.fit(X_train, y_train)\n y_predicted = knn.predict(X_test)\n print(\"KNN classifier built. Accuracy score: {} using K={} neighbours in view: {}\".format(\n metrics.accuracy_score(y_test, y_predicted), n_neighbours,\n BayesianKneighborClassifier.views[data_index]))\n return knn",
"def load_crawl():\n\n\tmodule_path = dirname(__file__)\n\twith open(join(module_path, 'data', 'train2.csv')) as csv_file:\n\t\tdata_file = csv.reader(csv_file)\n\t\ttemp = next(data_file)\n\t\tglobal n_samples\n\t\tn_samples = int(temp[0])\n\t\tglobal n_features\n\t\tn_features = int(temp[1])\n\t\tprint \"n samples \" + str((n_samples))\n\t\tprint \"n_features\" + str((n_features))\n\t\ttarget_names = np.array(temp[2:4])\n\t\tdata = np.empty((n_samples, n_features))\n\t\ttarget = np.empty((n_samples,), dtype=np.int)\n\n\t\tfor count, value in enumerate(data_file):\n\t\t\tdata[count] = np.asarray(value[:-1], dtype=np.float)\n\t\t\ttarget[count] = np.asarray(value[-1], dtype=np.int)\n\t\t\t#print \"data is \" + str(data[count])\n\t\t\t#print \"target is \" + str(target[count])\n\t\tprint \"Number of target records is \" + str(len(target))\n\t#with open(join(module_path, 'descr', 'train.rst')) as rst_file:\n\t#\tfdescr = rst_file.read()\n\n\treturn Bunch(data=data, target=target,\n\t\t\t target_names=target_names,\n\t\t\t DESCR=None,\n\t\t\t feature_names = ['evalCount', 'setInterval', 'setTimeout', 'link', \n\t\t\t\t\t\t\t 'search', 'exec','escape', 'unescape', 'ratio', \n\t\t\t\t\t\t\t 'emtropyAvg', 'entropyScript', 'longStrings', \n\t\t\t\t\t\t\t 'maxEntropy', 'stringAvg', 'maxLength', 'longVarFunc', \n\t\t\t\t\t\t\t 'stringAssignments', 'stringModFuncsCount', 'eventFuncsCount', \n\t\t\t\t\t\t\t 'domModFuncsCounter', 'suspStrings', 'whiteSpaceRatio', \n\t\t\t\t\t\t\t 'hexaStrings', 'maxNonPrintableCharactersinString', 'lineAvg', \n\t\t\t\t\t\t\t 'iframeCount', 'malTagCount', 'jsLength'])",
"def nearest_neighbors(self):\n neighbor_distances_and_indices = []\n for idx, data_point in enumerate(self.data):\n distance = self.euclidean_dis(data_point[:-1], self.query) # Calculate the distance between the query\n # example and the current example from the data.\n\n neighbor_distances_and_indices.append((distance, idx)) # Add the distance and the index of the example\n # to an ordered collection\n\n sorted_neighbor_distances_and_indices = sorted(neighbor_distances_and_indices, key=lambda x: x[0]) #\n # Sort the ordered collection of distances and indices from smallest to largest (in ascending order) by\n # the distances\n\n k_nearest_distances_and_indices = sorted_neighbor_distances_and_indices[:self.k] # Pick the first K\n # entries from the sorted collection\n\n k_nearest_labels = [self.data[i][1] for distance, i in k_nearest_distances_and_indices] # Get the labels of\n # the selected K entries\n\n return k_nearest_labels, self.mode(k_nearest_labels)",
"def knn(trainingSetData, testSetData, k):\n trainingSet = trainingSetData.drop([14], axis=1) # drop income\n testSet = testSetData.drop([14], axis=1) # drop income\n\n distances = {}\n # this will store the distances re-sorted in ascending/descending order\n sort = {}\n # income band results (>=50k or <50K)\n incomePredictions = []\n\n # Calculating euclidean distance between each row of training data and test data instance\n for testInstance in range(len(testSet)): # len(testSet)\n \n # Store current test Point:\n testInstance = testSet.iloc[testInstance] \n \n distances = euclideanDistanceRow(testInstance, trainingSet)\n\n # sort the distances in order of smallest first:\n sorted_d = sorted(distances.items(), key=lambda x: x[1], reverse=False)\n\n neighbors = []\n\n # Extracting top k neighbors\n for x in range(k):\n neighbors.append(sorted_d[x])\n\n\n classVotes = {}\n\n # Calculating the most freq class in the neighbors\n results = {\"lessThan50\": 0, \"moreThan50\": 0}\n\n # creating a dataframe to which we will add the income values:\n\n for x in range(len(neighbors)):\n if (trainingSetData.iloc[neighbors[x][0]][14] == 0.0):\n results[\"lessThan50\"] += 1\n elif (trainingSetData.iloc[neighbors[x][0]][14] == 1.0):\n results[\"moreThan50\"] += 1\n\n print('results',results)\n\n if (results[\"lessThan50\"] > results[\"moreThan50\"]):\n incomePredictions.append(0.0)\n elif (results[\"lessThan50\"] < results[\"moreThan50\"]):\n incomePredictions.append(1.0)\n\n return incomePredictions",
"def KNeighborRegression(trainingObs,trainingParam,Obs,n_neighbors):\n #knn = nb.KNeighborsRegressor(algorithm='ball_tree',n_neighbors=n_neighbors,weights = 'distance')\n knn = nb.KNeighborsRegressor(algorithm='ball_tree',n_neighbors=n_neighbors)\n knn.fit(trainingObs,trainingParam)\n return knn.predict(Obs)",
"def kdtree( data, leafsize):\n\n leaves = []\n\n ndim = data.shape[0]\n ndata = data.shape[1]\n #print ndim\n #print ndata\n\n # find bounding hyper-rectangle\n hrect = numpy.zeros((2,data.shape[0]))\n hrect[0,:] = data.min(axis=1)\n hrect[1,:] = data.max(axis=1)\n\n # create root of kd-tree\n idx = numpy.argsort(data[0,:], kind='mergesort')\n data[:,:] = data[:,idx]\n splitval = data[0,ndata/2]\n\n left_hrect = hrect.copy()\n right_hrect = hrect.copy()\n left_hrect[1, 0] = splitval\n right_hrect[0, 0] = splitval\n\n tree = [(None, None, left_hrect, right_hrect, None, None)]\n\n stack = [(data[:,:ndata/2], idx[:ndata/2], 1, 0, True),\n (data[:,ndata/2:], idx[ndata/2:], 1, 0, False)]\n\n # recursively split data in halves using hyper-rectangles:\n while stack:\n\n # pop data off stack\n data, didx, depth, parent, leftbranch = stack.pop()\n ndata = data.shape[1]\n nodeptr = len(tree)\n\n # update parent node\n\n _didx, _data, _left_hrect, _right_hrect, left, right = tree[parent]\n\n tree[parent] = (_didx, _data, _left_hrect, _right_hrect, nodeptr, right) if leftbranch \\\n else (_didx, _data, _left_hrect, _right_hrect, left, nodeptr)\n\n # insert node in kd-tree\n\n # leaf node?\n if ndata <= leafsize:\n _didx = didx.copy()\n _data = data.copy()\n leaf = (_didx, _data, None, None, 0, 0)\n #leaf = (_data)\n tree.append(leaf)\n leaves.append(_didx)\n\n # not a leaf, split the data in two \n else:\n splitdim = depth % ndim\n idx = numpy.argsort(data[splitdim,:], kind='mergesort')\n data[:,:] = data[:,idx]\n didx = didx[idx]\n nodeptr = len(tree)\n stack.append((data[:,:ndata/2], didx[:ndata/2], depth+1, nodeptr, True))\n stack.append((data[:,ndata/2:], didx[ndata/2:], depth+1, nodeptr, False))\n splitval = data[splitdim,ndata/2]\n if leftbranch:\n left_hrect = _left_hrect.copy()\n right_hrect = _left_hrect.copy()\n else:\n left_hrect = _right_hrect.copy()\n right_hrect = _right_hrect.copy()\n left_hrect[1, splitdim] = splitval\n right_hrect[0, splitdim] = splitval\n\n #print data\n # append node to tree\n tree.append((None, None, left_hrect, right_hrect, None, None))\n\n return tree,leaves",
"def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err",
"def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"k\",\n type=int,\n help=\"the number of neighbors\")\n parser.add_argument(\"--xTrain\",\n default=\"q3xTrain.csv\",\n help=\"filename for features of the training data\")\n parser.add_argument(\"--yTrain\",\n default=\"q3yTrain.csv\",\n help=\"filename for labels associated with training data\")\n parser.add_argument(\"--xTest\",\n default=\"q3xTest.csv\",\n help=\"filename for features of the test data\")\n parser.add_argument(\"--yTest\",\n default=\"q3yTest.csv\",\n help=\"filename for labels associated with the test data\")\n\n args = parser.parse_args()\n # load the train and test data\n xTrain = pd.read_csv(args.xTrain)\n yTrain = pd.read_csv(args.yTrain)\n xTest = pd.read_csv(args.xTest)\n yTest = pd.read_csv(args.yTest)\n #create an instance of the model\n knn = Knn(args.k)\n knn.train(xTrain, yTrain['label'])\n # predict the training dataset\n yHatTrain = knn.predict(xTrain)\n trainAcc = accuracy(yHatTrain, yTrain['label'])\n # predict the test dataset\n yHatTest = knn.predict(xTest)\n testAcc = accuracy(yHatTest, yTest['label'])\n print(\"Training Acc:\", trainAcc)\n print(\"Test Acc:\", testAcc)\n\n # runs the KNN from 1 to K to compare the accuracy for different values of K.\n performance(xTrain, yTrain, xTest, yTest, args.k)",
"def __init__(self,data,pos,neg,bk,prior,target,max_depth=2):\n\n self.data = data\n self.examples = {}\n self.pos = pos\n self.neg = neg\n #initial model assumed P(target|data)=0.5 to target=1,0\n for ex in pos+neg:\n self.examples[ex] = prior[ex]\n self.bk = bk\n self.target = target\n self.max_depth = max_depth\n self.boosted_trees = []",
"def generate_knn(self,rating_data):\n\n algo = {}\n bcKNN = KNNBasic(sim_options={'name': 'cosine', 'user_based': True})\n algo['bcKNN'] = bcKNN\n\n wmKNN = KNNWithMeans(sim_options={'name': 'cosine', 'user_based': True})\n algo['wmKNN'] = wmKNN\n\n wzKNN = KNNWithZScore(sim_options={'name': 'cosine', 'user_based': True})\n algo['wzKNN'] = wzKNN\n\n blKNN = KNNBaseline(sim_options={'name': 'cosine', 'user_based': True})\n algo['blKNN'] = blKNN\n\n\n # tune param for knnBaseline, since it has best accuracy\n param_grid_bl = {'k': [10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 100]}\n best_params_bl = self.tune_and_find_parameter('blKNN', KNNBaseline, rating_data, param_grid_bl)\n\n blKNN_tuned = KNNBaseline(k=best_params_bl['k'])\n algo.update({'blKNN_tuned': blKNN_tuned})\n\n return algo",
"def generateKNNobj():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n nobs = len(b)\n #tdata=b[:,8:28].copy()\n # indx with only M22 without zero order\n m22idx = np.concatenate((np.arange(29,48),np.arange(49,68)))\n tdata=b[:,m22idx].copy()\n #tdata=b[:,9:28].copy() # remove the zero order zernike, i.e. remove the mean of the M20\n #-standardize the data. use this information in future validation data too.\n tmean = tdata.mean(axis=0)\n tstd = tdata.std(axis=0)\n tdata = (tdata - tmean)/tstd\n ttpara=b[:,0:5].copy()\n tpara = b[:,0:5].copy()\n tpara[:,3] = ttpara[:,3]*np.cos(np.deg2rad(ttpara[:,4]))\n tpara[:,4] = ttpara[:,3]*np.sin(np.deg2rad(ttpara[:,4]))\n knn = nb.KNeighborsRegressor(algorithm='ball_tree',n_neighbors=15)\n knn.fit(tdata,tpara)\n p.dump(knn,open('finerGridKnnObj_M22_remMean.cp','w'),2)\n p.dump([tmean,tstd],open('finerGridStdConst_M22_remMean.cp','w'),2)\n #np.savetxt('finerGridStdConst.txt',np.array([tmean,tstd]),fmt='%f10.5',delimiter = ',')\n return 'It is done !'",
"def build_knn_index(self, data, min_n_neighbors=MIN_N_NEIGHBORS, rho=RHO):\n # Add one extra neighbor because querying on the points that are part of the KNN index will result in\n # the neighbor set containing the queried point. This can be removed from the query result\n if self.shared_nearest_neighbors:\n k = max(1 + self.n_neighbors_snn, min_n_neighbors)\n else:\n k = max(1 + self.n_neighbors, min_n_neighbors)\n\n # KNN index based on the primary distance metric\n if self.approx_nearest_neighbors:\n params = {\n 'metric': self.metric,\n 'metric_kwds': self.metric_kwargs,\n 'n_neighbors': k,\n 'rho': rho,\n 'random_state': self.seed_rng,\n 'n_jobs': self.n_jobs,\n 'low_memory': self.low_memory\n }\n index_knn_primary = NNDescent(data, **params)\n\n self.nn_indices, self.nn_distances = remove_self_neighbors(index_knn_primary._neighbor_graph[0],\n index_knn_primary._neighbor_graph[1])\n else:\n # Exact KNN graph\n index_knn_primary = NearestNeighbors(\n n_neighbors=k,\n algorithm='brute',\n metric=self.metric,\n metric_params=self.metric_kwargs,\n n_jobs=self.n_jobs\n )\n index_knn_primary.fit(data)\n\n self.nn_indices, self.nn_distances = remove_self_neighbors(\n *self._query(data, index_knn_primary, k)\n )\n\n if self.shared_nearest_neighbors:\n # Construct a second KNN index that uses the shared nearest neighbor distance\n data_neighbors = self.nn_indices[:, 0:self.n_neighbors_snn]\n if self.approx_nearest_neighbors:\n params = {\n 'metric': distance_SNN,\n 'n_neighbors': max(1 + self.n_neighbors, min_n_neighbors),\n 'rho': rho,\n 'random_state': self.seed_rng,\n 'n_jobs': self.n_jobs,\n 'low_memory': self.low_memory\n }\n index_knn_secondary = NNDescent(data_neighbors, **params)\n\n # Save the nearest neighbor information of the data used to build the KNN index\n self.nn_indices, self.nn_distances = remove_self_neighbors(index_knn_secondary._neighbor_graph[0],\n index_knn_secondary._neighbor_graph[1])\n else:\n index_knn_secondary = NearestNeighbors(\n n_neighbors=(1 + self.n_neighbors),\n algorithm='brute',\n metric=distance_SNN,\n n_jobs=self.n_jobs\n )\n index_knn_secondary.fit(data_neighbors)\n\n # Save the nearest neighbor information of the data used to build the KNN index\n self.nn_indices, self.nn_distances = remove_self_neighbors(\n *self._query(data_neighbors, index_knn_secondary, 1 + self.n_neighbors)\n )\n\n index_knn = [index_knn_primary, index_knn_secondary]\n else:\n index_knn = [index_knn_primary]\n\n return index_knn",
"def bbknn(adata, batch_key='batch', save_knn=False, copy=False, **kwargs):\n try:\n from bbknn import bbknn\n except ImportError:\n raise ImportError('Please install bbknn: `pip install bbknn`.')\n params = locals()\n kwargs = params.pop('kwargs')\n return bbknn(**params, **kwargs)",
"def load_BindingDB_kd():\n affinity = pd.read_csv('./dataset/regression/BindingDB/BindingDB_Kd.txt', header=None)\n target = pd.read_csv('./dataset/regression/BindingDB/BindingDB_Target_Sequence_new.txt', header=None)\n drug = pd.read_csv('./dataset/regression/BindingDB/BindingDB_SMILES_new.txt', header=None)\n \n SMILES=[]\n Target=[]\n y=[]\n drugcnt=[]\n \n for i in range(len(target)):\n Target.append(target[0][i])\n y.append(affinity[0][i])\n SMILES.append(drug[0][i])\n\n aff=[]\n total=[]\n for i in range(len(target)):\n aff.insert(i, y[i].split(\" \"))\n for i in aff:\n total += i\n for i in range(len(SMILES)):\n drugcnt.insert(i, len(SMILES[i].split()))\n\n smile = []\n for segments in SMILES:\n for x in segments.split():\n smile.extend(x)\n #smile = [x for segments in SMILES for x in segments.split()]\n smiles_res=[]\n y_tmp=[]\n target_res=[]\n tmp=[]\n\n for i in range(len(drugcnt)):\n tmp.extend(repeat(Target[i], drugcnt[i]))\n for i in range(len(total)):\n if total[i] != '-1':\n y_tmp.append(total[i])\n smiles_res.append(smile[i])\n target_res.append(tmp[i])\n\n y_res = [float(i) for i in y_tmp]\n y_res = convert_y_unit(np.array(y_res), 'nM', 'p')\n return np.array(smiles_res), np.array(target_res), np.array(y_res)",
"def build(self):\n # weights to apply to training samples, updated on each\n # iteration of the boosting algo, normalised to 1\n sigWeights = np.ones(self.nSig, dtype=float)\n bkgWeights = np.ones(self.nBkg, dtype=float)\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight \n\n # Weight of each tree, strong classifers have higher weight\n self.treeWeights = np.zeros(self.ntrees, dtype=float)\n\n for i in xrange(self.ntrees):\n\n # build new tree\n newTree = Tree()\n newTree.load(self.sigData,self.bkgData,weights=(sigWeights,bkgWeights))\n newTree.build()\n self.dTrees.append(newTree) \n\n # evaluate trees\n # keep track of each event\n err = 0.0\n sigWrong = np.zeros(self.nSig)\n bkgWrong = np.zeros(self.nBkg)\n\n for j in range(self.nSig):\n if newTree.classify(np.array((self.sigData[j,])))<0:\n sigWrong[i]=1\n err+=sigWeights[j]\n\n for j in range(self.nBkg):\n if newTree.classify(np.array((self.bkgData[j,])))>0:\n bkgWrong[i]=1\n err+=bkgWeights[j]\n\n alpha = self.beta*math.log((1.0-err)/err)\n print err,alpha\n corFactor = math.exp(-alpha)\n wrongFactor = math.exp(alpha)\n\n if (err<1e-20 or err >= 0.5):\n print \"SOEMTHING WRONG!!\"\n\n self.treeWeights[i] = alpha\n\n # reweight training samples\n for j in range(self.nSig):\n if sigWrong[j]:\n sigWeights[j]*=wrongFactor\n else :\n sigWeights[j]*=corFactor\n\n for j in range(self.nBkg):\n if bkgWrong[j]:\n bkgWeights[j]*=wrongFactor\n else :\n bkgWeights[j]*=corFactor\n\n # normalise weights\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight",
"def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward",
"def _build_init(self, kg_dir = \"./related_triples_by_relation/\"):\r\n if self.training:\r\n idx = np.random.choice(range(len(self.train_data))) \r\n self.entry = self.train_data[idx]\r\n else:\r\n idx = self.idx_to_test\r\n self.entry = self.test_data[idx]\r\n self.query = self.entry['s'] + ' ' + self.entry['p']\r\n self.text_list = self.entry['corpus']\r\n ######################################################\r\n ## obtain the answer from extraction system output ###\r\n ######################################################\r\n if self.training:\r\n self.answer_list = self.pred_train[self.entry['id']] \r\n else:\r\n self.answer_list = self.pred_test[self.entry['id']]\r\n\r\n assert len(self.text_list) == len(self.answer_list), \"Wrong, text length %d, answer length %d\" %(len(self.text_list), len(self.answer_list))\r\n\r\n self.text_answer = [[self.text_list[i], self.answer_list[i]] for i in range(len(self.text_list))]\r\n \r\n self.max_index = len(self.text_list)\r\n ### #####################################################################\r\n ## initialize the index of current/new candidate as 0/1 respectively. ###\r\n #########################################################################\r\n self.cur_index = 0\r\n self.new_index = 1\r\n self.cur = self.text_answer[self.cur_index]\r\n try:\r\n self.new = self.text_answer[self.new_index]\r\n except:\r\n ####################################################################\r\n ## exception would happen when size of raw text is less than 2. ####\r\n ## which cannot happen in preprocessed data ########################\r\n ####################################################################\r\n self.new = self.cur\r\n self.curans = self.cur[1][0]\r\n self.newans = self.new[1][0]\r\n self.answer_seen = self.cur[1][0]\r\n self.truth = \"\".join(self.entry['o'])\r\n\r\n #################################################################\r\n ## if do bert, we need to squeeze the space #####################\r\n #################################################################\r\n if self.do_bert:\r\n self.truth = token_word(self.truth)\r\n # get reference values\r\n #os.chdir('/content/drive/My Drive/Knowledge Extraction/related_triples_by_relation')\r\n filename = \"%s.csv\" % self.entry['p']\r\n related_triples_to_use = pd.read_csv(kg_dir + filename, sep='\\t', header = None)\r\n self.reference_values = related_triples_to_use[2].values",
"def train_nb(training_data):\n trainer = nbtrainer()\n trainer.train(training_data, keys)\n classifier = nbclassifier(trainer)\n return classifier",
"def cuckoo_search(n=None, nd=None, Lb=None, Ub=None, pa=None):\n\tif n is None:\n\t\tn =25\n\n\tif nd is None:\n\t\tnd=21\n\n\tif Lb is None:\n\t\tLb = np.ones(nd)*0\n\tif Ub is None:\n\t\tUb = np.ones(nd)*5\n\n\tif pa is None:\n\t\tpa = 0.25\n\n\t# creation of the list for parameter pairs \n\t\n\tstep = 1\n\n # initialization of the nests\n\tnests = np.zeros((n,nd))\n\tfor i in range(n):\n\t\tnests[i,:] = Lb + (Ub-Lb)*np.random.rand(len(Lb))\n\n\tfitness = 10**10 * np.ones((n,1))\n\tbest_nest, fmin, nest, fitness, N_iter = single_cuckoo_search(nests,fitness,Lb,Ub,pa,step) \n\n\treturn best_nest, fmin, nest, fitness, N_iter",
"def find_new_kbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n\n #---> j loop\n for j in range(Ly):\n self.kbl[j] = N #initialize search at top\n\n # in fortran k=N-1,1,-1\n for k in range(N-1,0,-1):\n #INDEX MAP\n k_w = k\n k_r = k-1\n \n for j in range(Ly):\n if z_u_w[j,k_w] > z_u_w[j,N] - self.hbls[j]:\n self.kbl[j] = k_w",
"def knn(p, k, x, t):\r\n\r\n # Number of instances in data set\r\n N = x.shape[0]\r\n\r\n Euclidean_Distance = numpy.square(x - p) #Euclidean distance\r\n dis = numpy.sum(Euclidean_Distance, axis=1) #sum of the euclidean distance\r\n inds = numpy.argsort(dis)[:k] #sort the indices of the distance array\r\n tgt_cat = Counter([t[i] for i in inds]) #count the times of equivalent target labels\r\n top_class = max(tgt_cat, key= tgt_cat.get) #top class among the k nearest points\r\n\r\n\r\n #top_class = 0\r\n\r\n return top_class",
"def sample(self, root, tree, sample_num, for_d):\n\n # all_score = self.sess.run(self.generator.all_score)\n # all_score is a matrix with shape [n_node, n_node]\n all_score = self.generator.all_score\n samples = []\n paths = []\n n = 0\n\n while len(samples) < sample_num:\n current_node = root\n previous_node = -1\n paths.append([])\n is_root = True\n paths[n].append(current_node)\n while True:\n node_neighbor = tree[current_node][1:] if is_root else tree[current_node]\n # print(\"////\", tree[current_node])\n is_root = False\n if len(node_neighbor) == 0: # the tree only has a root\n return None, None\n if for_d: # skip 1-hop nodes (positive samples)\n if node_neighbor == [root]:\n # in current version, None is returned for simplicity\n return None, None\n if root in node_neighbor:\n node_neighbor.remove(root)\n\n # we retrieve embeddings corresponding to current node's neighbors\n # the multiply of g_v with shape (1, 50) and g_vi with shape(1, 50) is a scala\n # to calculate the multiply of g_v and g_vi: we calculate the \"multiplication\" (inner product) between embedding_matrix with shape(n_node, 50) and its transpose\n # then saved the result in self.score with shape (n_node, n_node) in dis_torch.py\n # all_score has the shape = (5254, 5254), each row is a list of scala, each scala is the \"multiplication\" (inner product) between a particular node to an other node in the graph\n # due to for each current_node, we have a list of its neighbors, saved in [node_neighbor]\n # we can retrieve a list of scalas that equal to the \"multiplications\" (inner product) between g_v(current node) to its neighbor g_vi\n # to do that, we have:\n relevance_probability = all_score[current_node][node_neighbor]\n\n # convert tensor to numpy array\n relevance_probability = relevance_probability.cpu().detach().numpy()\n\n # finally, applying softmax function, we get the relevance probability of current_node and its neighbors, as formed in the paper\n relevance_probability = utils.softmax(relevance_probability)\n \n # pick a random node from its neighbors based on relevance_probability\n next_node = np.random.choice(node_neighbor, size=1, p=relevance_probability)[0] # select next node\n # print(\"???\", next_node)\n paths[n].append(next_node)\n if next_node == previous_node: # terminating condition\n samples.append(current_node)\n break\n previous_node = current_node\n current_node = next_node\n n = n + 1 # n equal to sample_num\n return samples, paths # for each sample, we get one path from root to that sample"
]
| [
"0.6259887",
"0.56187296",
"0.5618197",
"0.5392091",
"0.53918236",
"0.53887093",
"0.5382357",
"0.5338102",
"0.5305068",
"0.52694744",
"0.5260001",
"0.5255998",
"0.52079123",
"0.5196971",
"0.5183346",
"0.51783186",
"0.5176576",
"0.51722944",
"0.5166057",
"0.5163322",
"0.5151925",
"0.5148887",
"0.5147413",
"0.5140987",
"0.51394033",
"0.51014227",
"0.50888443",
"0.5073309",
"0.5065503",
"0.50497824"
]
| 0.646793 | 0 |
Load the feature statistics. | def loadFeatureStats():
print 'Loading feature statistics...'
featurestats = np.loadtxt(os.path.abspath(args.featurestats), dtype=np.float32)
print 'Done.'
return featurestats | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_features(self, features):\n pass\n # self.features = features",
"def load(self):\n all_ = self._fetch_features()\n features = {f.name: f for f in all_}\n self._cache = {n: self._state(features.get(n))\n for n in FEATURES.keys()}",
"def load_stats():\n assert isinstance(settings.PARS['numBases'], int)\n assert isinstance(settings.PARS['dataset'], str)\n\n stat_filename = 'stat_{}_{}.json'.format(\n settings.PARS['numBases'], settings.PARS['dataset'])\n stat_full_path = os.path.join(settings.GENERATED_DATA_DIRECTORY, stat_filename)\n\n with open(stat_full_path, 'r') as file_:\n fobj_avg = json.load(file_)\n\n fobj_avg = {int(k): v for k, v in fobj_avg.items()}\n\n return fobj_avg",
"async def _load_features(self) -> int:\n for feature in self.config[\"Core\"].get(\"Modules\", []):\n try:\n await self.load_feature(feature)\n except ZeroBotModuleError as ex:\n self.logger.exception(ex)\n return len(self._features)",
"def load_features_mode(feature_path, mode='test',\n num_workers=10, batch_size=128):\n feature_dataset = load_features(os.path.join(feature_path, f'features_{mode}'))\n feature_loader = ch.utils.data.DataLoader(feature_dataset, \n num_workers=num_workers,\n batch_size=batch_size, \n shuffle=False)\n\n feature_metadata = ch.load(os.path.join(feature_path, f'metadata_train.pth'))\n feature_mean, feature_std = feature_metadata['X']['mean'], feature_metadata['X']['std']\n \n\n features = []\n\n for _, (feature, _) in tqdm(enumerate(feature_loader), total=len(feature_loader)):\n features.append(feature)\n \n features = ch.cat(features).numpy()\n return features, feature_mean, feature_std",
"def _get_stats(self):\n self.stats = set()\n self._bstats = set()\n self._h_bstats = set()\n self._tstats = set()\n self._ftstats = set()\n for cl in self.data_classes:\n for stat in cl._bstats:\n self.stats.add(stat)\n self._bstats.add(stat)\n for stat in cl._hbstats:\n self.stats.add(stat)\n self._h_bstats.add(stat)\n for stat in cl._tstats:\n self._tstats.add(stat)\n self.stats.add(stat)\n try:\n trips = cl.triples\n f_stats = cl.read_tfstats(trips,eq=False,lande=False)\n for trip in f_stats:\n for stat in f_stats[trip]:\n self._ftstats.add(stat)\n self.stats.add(stat)\n except:\n AttributeError",
"def load_stats(self, fallback=None):\n stats_filepath = os.path.join(self.results_dir, f\"{self.model_name}statistics.json\")\n if os.path.exists(stats_filepath):\n with open(stats_filepath, 'r') as f:\n stats = json.load(f)\n else:\n stats = fallback\n return stats",
"def load_statistics(log_path, iteration_number=None, verbose=True):\n # If no iteration is specified, we'll look for the most recent.\n if iteration_number is None:\n iteration_number = get_latest_iteration(log_path)\n\n log_file = '%s/%s_%d' % (log_path, FILE_PREFIX, iteration_number)\n\n if verbose:\n # pylint: disable=superfluous-parens\n print('Reading statistics from: {}'.format(log_file))\n # pylint: enable=superfluous-parens\n\n with tf.io.gfile.GFile(log_file, 'rb') as f:\n return pickle.load(f), iteration_number",
"def load_feature_rates(self):\n if not self.feature_rates:\n return\n if not os.path.exists(self.feature_rates):\n raise ValueError(\"Could not find feature rate file %s.\" % self.feature_rates)\n with io.open(self.feature_rates, encoding=\"UTF-8\") as fp:\n self.feature_rates = {}\n for line in fp:\n feature, rate = line.split(\",\")\n rate = float(rate.strip())\n self.feature_rates[feature] = rate\n norm = sum(self.feature_rates.values()) / len(self.feature_rates.values())\n for f in self.feature_rates:\n self.feature_rates[f] /= norm",
"def stats(self):\n pass",
"def setup(self):\n for gen in self._feature_stats_generators:\n gen.setup()",
"def loadall(bot) :\n for feature in features :\n load(bot, feature)",
"def loadModel(self):\n for feature in self.features:\n featureName = feature[\"name\"]\n probabilities = repository.readProbabilities(self.modelName, featureName, self.modelClass)\n probabilities = probabilities.set_index(self.modelClass)\n\n modelForFeature = {\n \"probabilities\": probabilities\n }\n self.model[featureName] = modelForFeature",
"def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do",
"def __init__(self):\n \n self.csv_features = {} # Create dictionary to load the CSV features\n self.meta_features = [] # Create list to load the metadata features",
"def _load(self):\n op_type_file_path = os.path.join(\n self._profiling_dir,\n self._csv_file_to_analyse.format(self._device_id)\n )\n op_type_file_path = validate_and_normalize_path(\n op_type_file_path, raise_key=\"Invalid op_type_file_path\")\n if not os.path.isfile(op_type_file_path):\n log.warning('The file <%s> does not exist.', op_type_file_path)\n return\n\n with open(op_type_file_path, 'r') as file:\n csv_reader = csv.reader(file)\n _ = next(csv_reader)\n for info in csv_reader:\n self._data.append(self._convert_field_type(info))",
"def stats(self):",
"def _load(self):\n if os.path.exists(self.path):\n with open(self.path) as src:\n data = json.loads(src.read())\n else:\n data = {\n 'type': 'FeatureCollection',\n 'features': []}\n\n # Must be a FeatureCollection\n assert data['type'] == 'FeatureCollection'\n # All features must have ids, TODO must be unique strings\n assert all(f.get('id') for f in data['features'])\n\n return data",
"def load_dataset_stats(config):\n filename = None\n if config.data.dataset == 'CIFAR10':\n filename = 'assets/stats/cifar10_stats.npz'\n elif config.data.dataset == 'CELEBA':\n filename = 'assets/stats/celeba_stats.npz'\n elif config.data.dataset == 'LSUN':\n filename = f'assets/stats/lsun_{config.data.category}_{config.data.image_size}_stats.npz'\n else:\n raise ValueError(f'Dataset {config.data.dataset} stats not found.')\n\n with tf.io.gfile.GFile(filename, 'rb') as fin:\n stats = np.load(fin)\n return stats",
"def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()",
"def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()",
"def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()",
"def load_data(self):\n if self.debug:\n print(\"Loading data\")",
"def stats(filename):\n from .utils import stats as print_stats\n click.echo('Starting to gather statistics on file {}'.format(filename))\n print_stats(filename)\n click.echo('Statistics printing finished')",
"def load_gltf(self):\n with open(str(self.path)) as fd:\n self.gltf = GLTFMeta(self.path, json.load(fd), self.meta)",
"def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")",
"def uses_statistics(self):\n return True",
"def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]",
"def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])",
"def load_attribute_data():\n global attr_value_counts, attr_counts, value_counts, \\\n attr_value_ratios, attrs\n\n print \"Loading extraction data...\"\n with open('./data/common_extractions.json') as f:\n place_data = json.loads(f.read())\n for place in place_data:\n for attr in place_data[place]:\n if attr not in attr_value_counts:\n attrs.add(attr)\n attr_value_counts[attr] = {}\n attr_counts[attr] = 0\n for value in place_data[place][attr]:\n c = place_data[place][attr][value]\n value_counts[value] = value_counts.get(value, 0) + c\n attr_counts[attr] += c\n attr_value_counts[attr][value] = \\\n attr_value_counts[attr].get(value, 0) + c\n \n for attr in attrs:\n attr_value_ratios[attr] = {}\n for value in attr_value_counts[attr]:\n attr_value_ratios[attr][value] = float(attr_value_counts[attr][value]) \\\n / attr_counts[attr]"
]
| [
"0.68637323",
"0.6641995",
"0.6630883",
"0.6143179",
"0.61423624",
"0.61005014",
"0.5939163",
"0.58294743",
"0.57920194",
"0.57680583",
"0.5766087",
"0.57518077",
"0.57271814",
"0.5694278",
"0.56627154",
"0.5630877",
"0.5625008",
"0.56058234",
"0.5586172",
"0.5585735",
"0.5582932",
"0.5550738",
"0.55431247",
"0.5536835",
"0.55239093",
"0.55123466",
"0.5508254",
"0.5504743",
"0.54840696",
"0.5482352"
]
| 0.8061477 | 0 |
Gets the asset tag of the given entity during upload. | def GetAssetTagsFromUploadRequest(self, entity, request):
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tag(self, tag, filename):\n return self.get_tag_batch(tag, [filename])[0]",
"def get(self, currency, entity):\n check_inputs(currency=currency, entity=entity)\n tags = entitiesDAO.list_entity_tags(currency, entity)\n return tags",
"def _get_tag(self):\n return self.__tag",
"def get_tag(self):\n return self.tag",
"def get(self, currency, entity):\n check_inputs(currency=currency, entity=entity)\n entity_stats = entitiesDAO.get_entity(currency, entity)\n if entity_stats:\n entity_stats['tags'] = entitiesDAO.\\\n list_entity_tags(currency, entity_stats['entity'])\n entity_stats['tag_coherence'] = compute_tag_coherence(\n entity_stats['tags'])\n return entity_stats\n abort(404,\n \"Entity {} not found in currency {}\".format(entity, currency))",
"def get_tag(self) -> int:\n return self.tag",
"def getEntity(self):\n\n fid = file(self.filename)\n entityre = re.compile(\"entity (\\w+) is\", re.IGNORECASE)\n\n matches = entityre.search(fid.read())\n self.entityname = matches.groups()[0]\n return self.entityname",
"def asset(self):\n\t\treturn self._asset",
"def get_tag(self, sha):\n return self.get_url_data(self.api_url + 'tags/' + sha)",
"def tag(self) -> str:\n return pulumi.get(self, \"tag\")",
"def git_tag(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"git_tag\")",
"def gettag(self):\n cmd = [\"git\", \"tag\"]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n return data.decode(\"utf-8\").split(\"\\n\")",
"def assetName(self):\n\t\treturn self._assetName",
"def name(self):\n\t\treturn self.asset.name",
"def tag(self):\n return self.tag_",
"def asset_id(self) -> str:\n return self.__asset_id",
"def get_asset(self, short_name):\n return self._assets[short_name]",
"def tag(self) -> 'genmod.Tag':\n return self._generation.tag",
"def asset_id(self) -> str:\n return self._asset_id",
"def tests_ti_file_get_tag(self, request: FixtureRequest):\n super().indicator_get_tag(request)",
"def get_tag(repo: str = None) -> str:\n if not repo:\n repo = '.'\n repo=repo.replace('\\\\','/')\n cmd = ['git', 'describe']\n result = _run_git(cmd, repo=repo, expect_stderr=True)\n if not result:\n return None\n tag: str = result.stdout.decode(\"utf-8\")\n tag = tag.replace('\\r', '').replace('\\n', '')\n return tag",
"def tag(self):\n return self._tag",
"def image_tag(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_tag\")",
"def GetEntity(self):\n return self.__entity",
"def tag(self):\n return self._tag",
"def get(self, currency, entity):\n check_inputs(currency=currency, entity=entity)\n\n def query_function(_):\n return (None, entitiesDAO.list_entity_tags(\n currency, int(entity)))\n return Response(to_csv(query_function), mimetype=\"text/csv\",\n headers=create_download_header('tags of entity {} '\n '({}).csv'\n .format(entity,\n currency\n .upper())))",
"def get_entity_by_name(self, entity_name):\n return Artifact.get_by_name(entity_name)",
"def get_name(self, asset):\n return self.get_name_and_meta(asset)[0]",
"def tag(self):\n\n return self._tag",
"def get_tag(self, index):\n\n model_index = self.GetItemData(index)\n return self._clientData.get(model_index, None)"
]
| [
"0.61146694",
"0.58778316",
"0.5813894",
"0.5805953",
"0.5650816",
"0.561922",
"0.56111354",
"0.56012976",
"0.55632",
"0.55189484",
"0.55128753",
"0.5499972",
"0.54612863",
"0.54450023",
"0.54363954",
"0.5431249",
"0.5408843",
"0.5406867",
"0.54059553",
"0.5365544",
"0.5364949",
"0.53571904",
"0.53416735",
"0.5328267",
"0.53235996",
"0.53165007",
"0.52956647",
"0.5290293",
"0.52884424",
"0.52806"
]
| 0.6771434 | 0 |
Fills InventoryServicePassphraseProperties for entity. | def FillInventoryServicePropertiesDuringEscrow(self, entity, request):
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init(self, passphrase: str, **kwargs):\n # Initialise the KDF\n self._vault_kdf = init_kdf(**self._kdf_defaults)\n\n # Set up an empty state\n self._state = {}\n self.save(passphrase)",
"def copy_from_entity(self, entity):\n for prop in entity._EndpointsPropertyItervalues():\n attr_name = prop._code_name\n value = getattr(entity, attr_name)\n if value is not None:\n if isinstance(prop, properties.EndpointsAliasProperty):\n value_set = getattr(self, attr_name) is not None\n elif isinstance(prop, ComputedProperty):\n value_set = True\n else:\n value_set = prop._name in self._values\n if not value_set:\n setattr(self, attr_name, value)",
"def setPassphrase( self , passphrase ):\n\t\tself.passphrase\t= passphrase\n\t\t\n\t\t# Generate and log the generated PMK.\n\t\tself.PMK = pbkdf2_bin( self.passphrase , self.ssid , 4096 , 32 )\n\t\tself.logger.logKey( 'Pairwise Master Key' , self.PMK )",
"def configure_service_password_encryption(device):\n\n try:\n device.configure(\"service password-encryption\")\n except SubCommandFailure:\n raise SubCommandFailure(\n \"Could not configure service password encryption\"\n )",
"def get_host_vars(self, host, vault_password=None):\n use_keychain = host.get_variables().get(\"use_keychain\")\n hostname = host.get_variables().get('inventory_hostname')\n if '-l' in sys.argv:\n # Check if only limited set of hosts is required for this run and get password only for them\n # quite a dirty way to accomplish that...\n found = False\n for limit in sys.argv[sys.argv.index('-l')+1].split(\",\"):\n m = re.match(limit.replace(\"*\", \".*\"), hostname)\n if m is not None:\n found = True\n break\n if not found:\n return\n if use_keychain and use_keychain.lower() in ['true', 'yes']:\n if VarsModule.sudo_password_cache.get(hostname) is None:\n user, passwd = KeyChain.get_credentials(host.get_variables()['inventory_hostname'])\n if not user:\n # Maybe short hostname then?\n user, passwd = KeyChain.get_credentials(host.get_variables()['inventory_hostname_short'])\n\n if not passwd:\n print(\"Cannot get password for host %s from keychain\" % hostname)\n passwd = getpass.getpass(\"Password for host %s: \"% hostname)\n VarsModule.remote_username_cache[hostname] = user\n VarsModule.sudo_password_cache[hostname] = passwd\n if VarsModule.remote_username_cache[hostname]:\n host.set_variable('ansible_ssh_user', VarsModule.remote_username_cache[hostname])\n host.set_variable('ansible_sudo_pass', VarsModule.sudo_password_cache[hostname])",
"def make_entity_dict(class_reference, template, partial_dict): \n _data = class_reference.properties()\n for _key in _data:\n _data[_key] = partial_dict.get(_key, template.get(_key, '')) \n return _data",
"def copy_fields(self, entity, all_fields=False):\n\n if all_fields:\n fields = self.get_all_fields()\n else:\n fields = self.get_non_pk_fields()\n\n for field in fields.keys():\n setattr(self, field, getattr(entity, field, None))",
"def __init__(self, passphrase=None):\n if not passphrase:\n passphrase = create_passphrase(bits_of_entropy=160)\n\n self._passphrase = passphrase",
"def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wc_api = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n price = 0.0\n if variant.variant_id:\n info = {'id': variant.variant_id, 'menu_order': variant.sequence}\n # Below are used to set the color in the metadata field.\n product_template_attribute_value = variant.product_id.product_template_attribute_value_ids.filtered(\n lambda attribute: attribute.display_type == 'color') or False\n if product_template_attribute_value and product_template_attribute_value.product_attribute_value_id.html_color:\n meta_data = []\n meta_data.append({'key': 'markersnpens-color-picker',\n 'value': product_template_attribute_value.product_attribute_value_id.html_color})\n info.update({'meta_data': meta_data})\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku': variant.default_code, 'weight': str(weight),\n \"manage_stock\": variant.woo_is_manage_stock})\n else:\n attributes = self.get_product_attribute(template.product_tmpl_id, instance, common_log_id, model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0, partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price': str(price), 'sale_price': str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku': variant.default_code, \"manage_stock\": variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price': str(price), 'sale_price': str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'update': woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % res.content\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % template.name)\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\": attributes})\n res = wc_api.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'create': variants_to_create})\n try:\n response = res.json()\n except Exception as error:\n message = \"Json Error : While update products to WooCommerce for instance %s. \\n%s\" % (\n instance.name, error)\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n else:\n variant_id = product.get(\"id\")\n variant = template.woo_product_ids.filtered(lambda x: x.default_code == product.get(\"sku\"))\n if variant:\n variant.write({\"variant_id\": variant_id, \"exported_in_woo\": True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag",
"def cookiepassphrase(self, cookiepassphrase) :\n\t\ttry :\n\t\t\tself._cookiepassphrase = cookiepassphrase\n\t\texcept Exception as e:\n\t\t\traise e",
"def prepare_properties(self):\n node = self.entities[0].node\n addr = self.entities[0].address\n\n if len(node.omni_listproperties()) > 2:\n AssertionError('There should not be more than two properties, MSC and TMSC, after a clean start')\n\n # tx: 50, ecosystem: 2, 9223372036854775807 indivisible tokens, \"TIndiv1\"\n node.omni_sendrawtx(addr, '0000003202000100000000000054496e646976310000007fffffffffffffff')\n # tx: 50, ecosystem: 2, 9223372036854775807 indivisible tokens, \"TIndiv2\"\n node.omni_sendrawtx(addr, '0000003202000100000000000054496e646976320000007fffffffffffffff')\n # tx: 50, ecosystem: 2, 9223372036854775807 indivisible tokens, \"TIndiv3\"\n node.omni_sendrawtx(addr, '0000003202000100000000000054496e646976330000007fffffffffffffff')\n # tx: 50, ecosystem: 2, 9223372036854775807 indivisible tokens, \"TIndivMax\"\n node.omni_sendrawtx(addr, '0000003202000100000000000054496e6469764d61780000007fffffffffffffff')\n # tx: 50, ecosystem: 2, 92233720368.54770000 divisible tokens, \"TDiv1\"\n node.omni_sendrawtx(addr, '0000003202000200000000000054446976310000007fffffffffffffff')\n # tx: 50, ecosystem: 2, 92233720368.54770000 divisible tokens, \"TDiv2\"\n node.omni_sendrawtx(addr, '0000003202000200000000000054446976320000007fffffffffffffff')\n # tx: 50, ecosystem: 2, 92233720368.54770000 divisible tokens, \"TDiv3\"\n node.omni_sendrawtx(addr, '0000003202000200000000000054446976330000007fffffffffffffff')\n # tx: 50, ecosystem: 2, 92233720368.54770000 divisible tokens, \"TDivMax\"\n node.omni_sendrawtx(addr, '00000032020002000000000000544469764d61780000007fffffffffffffff')\n # tx: 50, ecosystem: 1, 9223372036854775807 indivisible tokens, \"MIndiv1\"\n node.omni_sendrawtx(addr, '000000320100010000000000004d496e646976310000007fffffffffffffff')\n # tx: 50, ecosystem: 1, 92233720368.54770000 divisible tokens, \"MDiv1\"\n node.omni_sendrawtx(addr, '000000320100020000000000004d446976310000007fffffffffffffff')\n\n self.generate_block()\n self.check_balance(addr, TIndiv1, '9223372036854775807', '0')\n self.check_balance(addr, TIndiv2, '9223372036854775807', '0')\n self.check_balance(addr, TIndiv3, '9223372036854775807', '0')\n self.check_balance(addr, TIndivMax, '9223372036854775807', '0')\n self.check_balance(addr, TDiv1, '92233720368.54775807', '0.00000000')\n self.check_balance(addr, TDiv2, '92233720368.54775807', '0.00000000')\n self.check_balance(addr, TDiv3, '92233720368.54775807', '0.00000000')\n self.check_balance(addr, TDivMax, '92233720368.54775807', '0.00000000')\n self.check_balance(addr, MIndiv1, '9223372036854775807', '0')\n self.check_balance(addr, MDiv1, '92233720368.54775807', '0.00000000')",
"def populate(self, **kwargs):\n kwargs = _.omit(kwargs, Base.PUBLIC_PROPERTIES + ['key', 'id']) # We don't want to populate those properties\n kwargs = _.pick(kwargs, _.keys(self._properties)) # We want to populate only real model properties\n super(Base, self).populate(**kwargs)",
"def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wcapi = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n # var_url = ''\n price = 0.0\n if variant.variant_id:\n info = {'id':variant.variant_id}\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku':variant.default_code, 'weight':str(weight),\n \"manage_stock\":variant.woo_is_manage_stock})\n else:\n attributes = \\\n self.get_product_attribute(template.product_tmpl_id, instance, common_log_id,\n model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0,\n partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price':str(price), 'sale_price':str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku':variant.default_code,\n \"manage_stock\":variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price':str(price), 'sale_price':str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'update':woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % (res.content)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % (template.name))\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\":attributes})\n res = wcapi.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'create':variants_to_create})\n try:\n response = res.json()\n except Exception as e:\n message = \"Json Error : While update products to WooCommerce for instance %s.\" \\\n \" \\n%s\" % (instance.name, e)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n else:\n variant_id = product.get(\"id\")\n sku = product.get(\"sku\")\n variant = template.woo_product_ids.filtered(lambda x:x.default_code == sku)\n if variant:\n variant.write({\"variant_id\":variant_id, \"exported_in_woo\":True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag",
"def __init__(self, name, partition, **properties):\n super(ApplicationService, self).__init__(name, partition)\n\n for key, value in list(self.properties.items()):\n if key == \"options\":\n if key in properties:\n self._data.update(properties.get(key, value))\n for opt in value:\n if opt in properties:\n self._data[opt] = properties.get(opt, value)\n elif key == \"template\":\n self._data[key] = properties.get(key, value)",
"def fillComputedValues(tv):\n if 'computed' not in tv:\n tv['computed'] = dict()\n fillUUIDFromDiscoverResponse(tv)\n fillNameFromDiscoverResponse(tv)\n fillModelFromDiscoverResponse(tv)\n fillManufacturerFromDiscoverResponse(tv)",
"def _init_vendor_properties(self):\n\n properties = {}\n\n if self.configuration.get('datera_debug_replica_count_override'):\n replica_count = 1\n else:\n replica_count = 3\n self._set_property(\n properties,\n \"DF:replica_count\",\n \"Datera Volume Replica Count\",\n _(\"Specifies number of replicas for each volume. Can only be \"\n \"increased once volume is created\"),\n \"integer\",\n minimum=1,\n default=replica_count)\n\n self._set_property(\n properties,\n \"DF:acl_allow_all\",\n \"Datera ACL Allow All\",\n _(\"True to set acl 'allow_all' on volumes created. Cannot be \"\n \"changed on volume once set\"),\n \"boolean\",\n default=False)\n\n self._set_property(\n properties,\n \"DF:ip_pool\",\n \"Datera IP Pool\",\n _(\"Specifies IP pool to use for volume\"),\n \"string\",\n default=\"default\")\n\n # ###### QoS Settings ###### #\n self._set_property(\n properties,\n \"DF:read_bandwidth_max\",\n \"Datera QoS Max Bandwidth Read\",\n _(\"Max read bandwidth setting for volume qos, \"\n \"use 0 for unlimited\"),\n \"integer\",\n minimum=0,\n default=0)\n\n self._set_property(\n properties,\n \"DF:default_storage_name\",\n \"Datera Default Storage Instance Name\",\n _(\"The name to use for storage instances created\"),\n \"string\",\n default=\"storage-1\")\n\n self._set_property(\n properties,\n \"DF:default_volume_name\",\n \"Datera Default Volume Name\",\n _(\"The name to use for volumes created\"),\n \"string\",\n default=\"volume-1\")\n\n self._set_property(\n properties,\n \"DF:write_bandwidth_max\",\n \"Datera QoS Max Bandwidth Write\",\n _(\"Max write bandwidth setting for volume qos, \"\n \"use 0 for unlimited\"),\n \"integer\",\n minimum=0,\n default=0)\n\n self._set_property(\n properties,\n \"DF:total_bandwidth_max\",\n \"Datera QoS Max Bandwidth Total\",\n _(\"Max total bandwidth setting for volume qos, \"\n \"use 0 for unlimited\"),\n \"integer\",\n minimum=0,\n default=0)\n\n self._set_property(\n properties,\n \"DF:read_iops_max\",\n \"Datera QoS Max iops Read\",\n _(\"Max read iops setting for volume qos, \"\n \"use 0 for unlimited\"),\n \"integer\",\n minimum=0,\n default=0)\n\n self._set_property(\n properties,\n \"DF:write_iops_max\",\n \"Datera QoS Max IOPS Write\",\n _(\"Max write iops setting for volume qos, \"\n \"use 0 for unlimited\"),\n \"integer\",\n minimum=0,\n default=0)\n\n self._set_property(\n properties,\n \"DF:total_iops_max\",\n \"Datera QoS Max IOPS Total\",\n _(\"Max total iops setting for volume qos, \"\n \"use 0 for unlimited\"),\n \"integer\",\n minimum=0,\n default=0)\n # ###### End QoS Settings ###### #\n\n return properties, 'DF'",
"def fill_version_hash(apps, schema_editor):\n SushiCredentials = apps.get_model('sushi', 'SushiCredentials')\n for credentials in SushiCredentials.objects.all():\n credentials.version_hash = get_hash(credentials)\n credentials.save()",
"def hydrate_password(self, bundle):\n bundle.data['password'] = make_password(bundle.data.get('password'))\n return bundle",
"def Secure(self,passphrase=None,public_attributes=[]):\n\n\t\tif passphrase == None:\n\t\t\treturn self.Dictify()\n\t\telse:\n\t\t\tself.data = Encrypting.Symmetric.Encrypt(json.dumps(self.Dictify()).encode('utf-8'),passphrase).decode('utf-8')\n\t\t\t\n\t\t#secure data and dictify\n\t\tmy_secure_dict = self.Dictify()\n\n\t\t#new obfuscated obj\n\t\tnew_me = {'data':my_secure_dict['data']}\n\n\t\tfor pub_att in public_attributes:\n\t\t\tnew_me[pub_att] = my_secure_dict[pub_att]\n\n\t\treturn new_me",
"def __init__(self, properties_dict):\n for k, v in properties_dict.items():\n self.__setattr__(k,v)",
"def update(self, data):\n # TODO: try not to use setattr\n for key, item in data.items():\n if key == \"password\":\n new_password = self.__generate_hash(item)\n setattr(self, key, new_password)\n else:\n setattr(self, key, item)\n\n super().update(data)\n db.session.commit()",
"def __context_init(self):\n self._context.data[\"services\"] = copy.deepcopy(INITIAL_SRVDATA)",
"def set_properties(self, property_dict):\n self.properties.update(property_dict)",
"def prepare_woo_variant_vals(self, woo_instance, variant, template_title=\"\"):\n variant_vals = super(WooProductTemplateEpt, self).prepare_woo_variant_vals(woo_instance, variant,\n template_title)\n variant_vals.update({\"sequence\": variant.get(\"menu_order\")})\n return variant_vals",
"def setUp(self):\n self.crypt = Crypt()",
"def fill_default_attributes(self, template_dictionary, escape_db_operations=False):\n template_dictionary = self._populate_user_and_project(template_dictionary, escape_db_operations)\n template_dictionary = self._populate_message(template_dictionary)\n template_dictionary = self._populate_menu(template_dictionary)\n\n if KEY_ERRORS not in template_dictionary:\n template_dictionary[KEY_ERRORS] = {}\n if KEY_FORM_DATA not in template_dictionary:\n template_dictionary[KEY_FORM_DATA] = {}\n if KEY_SUB_SECTION not in template_dictionary and KEY_SECTION in template_dictionary:\n template_dictionary[KEY_SUB_SECTION] = template_dictionary[KEY_SECTION]\n if KEY_SUBMENU_LIST not in template_dictionary:\n template_dictionary[KEY_SUBMENU_LIST] = None\n\n template_dictionary[KEY_CURRENT_VERSION] = cfg.BASE_VERSION\n return template_dictionary",
"def _fill_user_entries(self):\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n # For every enabled verification parameter, set its value in its corresponding entry.\n for param in self.verify_params.enabled:\n self._fill_user_entry(self.computer, param)",
"def fill_ingredient(self, ingredient: str, quantity: int) -> None:\n self.inventory_availability[ingredient] = quantity",
"def set_keys(self):\n self.inventory_dict['csah'] = {'hosts': '{}'.format(socket.getfqdn()), 'vars': {}}",
"def initSlotObjectDict(cls):\n restslotattributedict.update(dict({extension_tunnel: \"name\"}))\n restslotattributedict.update(dict({extension_circuit: \"name\"}))\n restslotattributedict.update(dict({extension_ip_interface: \"name\"}))\n restslotattributedict.update(dict({extension_ip_route: \"name\"}))\n restslotattributedict.update(dict({gigabitethernet: \"name\"}))\n restslotattributedict.update(dict({blade: \"slot_number\"}))"
]
| [
"0.5169703",
"0.48675728",
"0.47662935",
"0.44413066",
"0.44063464",
"0.4381093",
"0.43770158",
"0.43584442",
"0.43020535",
"0.429262",
"0.42704624",
"0.42688766",
"0.42578593",
"0.42572045",
"0.4243665",
"0.42318308",
"0.42251247",
"0.42053914",
"0.41994518",
"0.4174146",
"0.41588172",
"0.4156701",
"0.4152849",
"0.41417506",
"0.41369307",
"0.41344318",
"0.41283208",
"0.41275328",
"0.4125594",
"0.41230243"
]
| 0.66786087 | 0 |
The function extract the https link from a given string | def find_https(x):
i = 0
start = None
end = None
for i in range(0,len(x)):
if i < (len(x) - 4):
string = x[i:i+4]
if string == "http":
start = i
if start != None and x[i] == '"':
end = i
return x[start:end] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_real_link(self, text):\n if text.startswith('https://www.google.com/url?'):\n return parse_qs(urlparse(text).query)['url'][0]\n\n return text",
"def https(url):\n if url[:8] == 'https://':\n return url\n if url[:7] != 'http://':\n return False\n return 'https://' + url[7:]",
"def extractUrl(self, href):\n url = ''\n pattern = re.compile(r'(http[s]?://[^&]+)&', re.U | re.M)\n url_match = pattern.search(href)\n if(url_match and url_match.lastindex > 0):\n url = url_match.group(1)\n\n return url",
"def href_from_link_text(url, headers, link_text):\n response = requests.get(url, headers=headers)\n html = response.text.replace(\"\\\"\", \"'\")\n link_text_pos = html.find(link_text)\n if link_text_pos == -1:\n issue_url = ('https://github.com/spacether/pycalculix/issues/new?title='\n 'CCX%20zip%20download%20fails%20on%20windows&body=Please%2'\n '0update%20the%20installer.py%20get_direct_url%20function.'\n '%20It%20no%20longer%20works')\n raise ValueError('Unable to download file because there was not a link '\n 'with text=\\'%s\\' on the page at url=%s\\nTo fix this, '\n 'please click the \\'Submit new issue\\' button '\n 'here:\\n%s' % (link_text, url, issue_url))\n href_pos = html[:link_text_pos].rfind('href')\n first_char = html.find(\"'\", href_pos)+1\n last_char = html.find(\"'\", first_char)\n return html[first_char:last_char]",
"def extract_url(log_entry):\n pattern = r\"https?://(?:[-\\w.]|(?:%[\\da-fA-F]{2}))+\"\n url = re.findall(pattern, log_entry)[0]\n\n if not url:\n raise URINotFound(log_entry)\n\n return urlparse(url)",
"def extract_url(input_string):\n if input_string:\n url_search = re.search(\n r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',\n input_string)\n\n if bool(url_search):\n return url_search.group()\n\n return None",
"def first_https_uri(xia):\n\n return first_uri_matching_prefix(xia, \"https://\")",
"def extract_url_from_anchor_tag(text):\n pattern = re.compile(r'(?<=href=\").*?(?=\")')\n matches = pattern.findall(text)\n return matches[0] if matches else ''",
"def reformat_weburl(s):\n s = (s or '').strip()\n\n if s and '.' in s and 'notavailable' not in s:\n match = re.match(r'^http(s)?://', s)\n if not match:\n s = 'http://' + s\n\n return s",
"def link_extract(link_text, content):\n h = html5lib.parse(content, namespaceHTMLElements=False)\n candidates = h.findall(\".//a[.='%s']\" % link_text)\n if not candidates:\n return 'NOT MATCHED'\n try:\n return candidates[0].attrib['href']\n except:\n return 'NOT MATCHED'",
"def url_prepare(url):\n if 'http://' in url or 'https://' in url:\n return url\n try:\n if requests.get('https://' + url):\n return 'https://' + url\n except Exception as ex:\n pprint(ex)\n return 'http://' + url",
"def remove_url(text):\r\n url = re.sub('https?://[A-Za-z0-9./]+', '', text)\r\n return url",
"def get_href(text, base_url=None):\n m = re.search(r'href\\s*=\\s*[\"\\']?([^\"\\'> ]+)[\"\\'> ]', text, re.I)\n if not m:\n return None\n link = m.group(1).strip()\n if base_url and not link.lower().startswith(\"http\"):\n import urlparse\n link = urlparse.urljoin(base_url, link)\n return link",
"def check_for_url_in_text(self, string):\r\n has_link = False\r\n\r\n # Find all links in the string.\r\n links = re.findall(r'(https?://\\S+)', string)\r\n if len(links)>0:\r\n has_link = True\r\n\r\n # Autolink by wrapping links in anchor tags.\r\n for link in links:\r\n string = re.sub(link, self.generate_file_link_html_from_url(link, link), string)\r\n\r\n return has_link, string",
"def extract_urls(string):\n regex = r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\" # noqa\n url = re.findall(regex, string)\n return [x[0] for x in url]",
"def getDomain(url):\n domain = string.replace(url,\"https://www.\",\"\")\n domain = string.replace(domain,\"http://www.\",\"\")\n domain = string.replace(domain,\"http://\",\"\")\n domain = string.replace(domain,\".com/\",\"\")\n domain = string.replace(domain,\".com\",\"\")\n return domain",
"def extract_url(message):\n # Returns the first url in a message. If there aren't any returns None\n url_re = \"(?:\\s|^)<(https?://[\\w./?+&+%$!#=\\-_]+)>(?:\\s|$)\"\n captures = re.search(url_re, message)\n\n if captures is not None:\n captures = captures.group(1).strip()\n\n return captures",
"def get_website_url(url_element):\n if not url_element:\n return ''\n\n web_url = unquote(url_element.find('a', first = True).attrs['href'])\n if 'url=' in web_url :\n web_url = web_url[web_url.find('url=')+4 : web_url.find('&')]\n\n return web_url",
"def get_protocol(url):\n result = re.search(r\"^https?://\", url)\n return result.group(0) if result else None",
"def remove_URL(sample):\n return re.sub(r\"http\\S+\", \"\", sample)",
"def extract_url(td):\n url = td.find('a',href=True)['href']\n return url",
"def extract_domain(link):\n domain = parse.urlsplit(link).netloc\n return domain",
"def ref_to_link(txt):\n text = txt.group(1) # because it was a match in a regular expression\n\n thecite, everythingelse = first_bracketed_string(text)\n thecite = thecite[1:-1] # strip curly brackets\n thecite = thecite.replace(\"\\\\\",\"\") # \\href --> href\n\n refs = thecite.split(\",\")\n ans = \"\"\n\n # print \"refs\",refs\n\n for ref in refs:\n ref = ref.strip() # because \\cite{A, B, C,D} can have spaces\n this_link = \"\"\n if ref.startswith(\"href\"):\n the_link = re.sub(r\".*{([^}]+)}{.*\", r\"\\1\", ref)\n click_on = re.sub(r\".*}{([^}]+)}\\s*\", r\"\\1\", ref)\n this_link = '{{ LINK_EXT(\"' + click_on + '\",\"' + the_link + '\") | safe}}'\n elif ref.startswith(\"doi\"):\n ref = ref.replace(\":\",\"\") # could be doi:: or doi: or doi\n the_doi = ref[3:] # remove the \"doi\"\n this_link = '{{ LINK_EXT(\"' + the_doi + '\",\"https://doi.org/' + the_doi + '\")| safe }}'\n elif ref.lower().startswith(\"mr\"):\n ref = ref.replace(\":\",\"\")\n the_mr = ref[2:] # remove the \"MR\"\n this_link = '{{ LINK_EXT(\"' + 'MR:' + the_mr + '\", '\n this_link += '\"http://www.ams.org/mathscinet/search/publdoc.html?pg1=MR&s1='\n this_link += the_mr + '\") | safe}}'\n elif ref.lower().startswith(\"arxiv\"):\n ref = ref.replace(\":\",\"\")\n the_arx = ref[5:] # remove the \"arXiv\"\n this_link = '{{ LINK_EXT(\"' + 'arXiv:' + the_arx + '\", '\n this_link += '\"http://arxiv.org/abs/'\n this_link += the_arx + '\")| safe}}'\n\n\n if this_link:\n if ans:\n ans += \", \"\n ans += this_link\n\n return '[' + ans + ']' + everythingelse",
"def contains_url(self, string):\n return re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)",
"def extract_link_str(self, link):\n if type(link) is str:\n # import pdb; pdb.set_trace()\n if re.match( r'^link:', link):\n # assume intending to specify a link, now match for rest of pattern \n matchObj = re.match( r'^link:([^ ]+)$', link)\n if matchObj:\n path = matchObj.group(1)\n node = self.get_node(path)\n link_info = {'node': node}\n return link_info\n else:\n print \"** Error, invalid path specified in link string, must not have spaces\"\n print \" link string is: '%s'\" % link\n traceback.print_stack()\n sys.exit(1)\n elif re.match( r'^extlink:', link):\n # assume intending to specify an external link, now match for rest of pattern\n matchObj = re.match( r'^extlink:([^ ]*[^ ,])[ ,]([^ ]+)$', link)\n if matchObj:\n file = matchObj.group(1)\n path = matchObj.group(2)\n link_info = {'extlink': (file, path)}\n return link_info\n else:\n print \"** Error, invalid file or path specified in extlink string\"\n print \" must not have spaces and file name must not end in comma\"\n print \"extlink string is: '%s'\"% link\n traceback.print_stack()\n sys.exit(1)\n return None",
"def normalize_link(link, split_url):\n url = link.get(\"href\", None)\n if not url:\n return None\n protocol = split_url.scheme + \"://\"\n netloc = split_url.netloc\n final_url = \"\"\n if not protocol in url: # Protocol doesn't exists, lets make sure that gets added.\n final_url += protocol\n if not netloc in url:\n final_url += netloc + \"/\"\n\n if url.startswith(\"/\"):\n final_url += url[1:]\n else:\n final_url += url\n\n return final_url",
"def get_hostname (surl):\n if str(surl).find('srm://'):\n surl = surl [str(surl).find('srm://'):]\n\n reg = re.search('[^:]+:(/)*([^:/]+)(:[0-9]+)?(/)?.*', surl)\n host = ''\n try:\n host = reg.group(2)\n except:\n pass\n \n return host",
"def __expandURL(self, link):\n try:\n return requests.get(link).url\n except Exception:\n return link",
"def href_to_link(href, domains=[\"\"]):\n\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\"\n }\n\n href = href.strip()\n\n # Some \"domains\" given might not actually be the root, this looks to add them\n for domain in domains:\n uri_parsed = urlparse(domain)\n tmp_domain = '{uri.scheme}://{uri.netloc}/'.format(uri=uri_parsed)\n\n if merge_link(domain, '/') != merge_link(tmp_domain, '/'):\n domains.append(tmp_domain)\n\n if domains[0] != \"\":\n domains.insert(0, \"\")\n\n for domain in domains:\n temp_url = merge_link(domain, href)\n print(temp_url)\n #temp_url = add_protocol(temp_url)\n\n # if not \".\" in temp_url:\n # continue\n\n try:\n r = requests.head(temp_url, headers=headers)\n print(r.status_code)\n if r.status_code == 200:\n return temp_url\n except:\n print(f\"{temp_url} failed\")\n pass\n\n try:\n print(switch_protocol(temp_url))\n r = requests.head(switch_protocol(temp_url), headers=headers)\n print(r.status_code)\n if r.status_code == 200:\n return switch_protocol(temp_url)\n except:\n print(f\"{switch_protocol(temp_url)} failed\")\n pass\n\n return href",
"def remove_urls(text):\n text = re.sub('(?P<url>https?://[^\\s]+)', '', text)\n return text"
]
| [
"0.75003695",
"0.7102347",
"0.6790707",
"0.6652766",
"0.65266573",
"0.65216005",
"0.64578366",
"0.64120406",
"0.63907766",
"0.637306",
"0.6370608",
"0.6312711",
"0.6296061",
"0.6275339",
"0.6269629",
"0.6259579",
"0.6188896",
"0.61413676",
"0.60936826",
"0.6092776",
"0.6079906",
"0.59634334",
"0.5954023",
"0.59520566",
"0.5945928",
"0.5917631",
"0.59125656",
"0.5905423",
"0.58966774",
"0.5882295"
]
| 0.7502399 | 0 |
Function wrapper to find the maximum (or min) of a function using the scipy fminlike minimisation routines, allowing some of the parameters to be fixed. This requires a messy wrapper because none of the parameter vector passed to the fmin funcitons can be fixed! LogLikelihood function to optimise, of the form func(parameters, func_args). Doesn't need to be a log likelihood of course just that's what I use it for! par array of parameters to the func to optimise func_args additional arguments to the func usually as a tuple type either max or min to optimise the funciton method algorithm to use NM NelderMead (Downhill simplex/Amoeba), CG Conjugategradient, or P Powell's method CG and P not working properly for some reason!! maxiter, maxfun max iterations and function evaluations for the neldermead algorithm | def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):
if fixed==None:
var_par = np.copy(par)
#otherwise construct the parameter vector from var_par and fixed_par_val
else:
par = np.array(par)
fixed = np.array(fixed) #ensure fixed is a np array
#assign parameters to normal param vector
fixed_par = par[np.where(fixed==True)]
var_par = par[np.where(fixed!=True)]
#set the algorithm to use - CG and P not working (at least not well)
add_kwords = {'verbose':verbose}
if method == 'NM':
Algorithm = NelderMead
add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}
elif method == 'CG':
print "warning: CG method didn't work properly during testing"
Algorithm = ConjugateGradient
elif method == 'P':
print "warning: Powell algorithm didn't work properly during testing"
Algorithm = Powell
else:
print "error: optimisation function not found"
return par
#set the optimisation function to pos or neg for the fmin funcitons
if type == 'max': OptFunc = NegFixedPar_func
elif type == 'min': OptFunc = FixedPar_func
else:
print "error: %s not a valid option" % type
return par
#call the optimser with the appropriate function
fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \
**add_kwords)
#now return the params in the correct order...
if fixed==None:
return_par = fitted_par
else:
return_par = np.copy(par)
return_par[np.where(fixed!=True)] = fitted_par
return return_par | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fmax(func_to_maximize, initial_guess=0.5*V):\n func_to_minimize = lambda x : -func_to_maximize(x)\n return fmin(func_to_minimize, initial_guess, disp=False)[0]",
"def fmax(func_to_maximize, initial_guess=0):\n func_to_minimize = lambda x : -func_to_maximize(x)\n return fmin(func_to_minimize, initial_guess, disp=False)[0]",
"def optimize_log_fmin(p0, data, model_func, pts, \n lower_bound=None, upper_bound=None,\n verbose=0, flush_delay=0.5, \n multinom=True, maxiter=None, \n full_output=False, func_args=[], \n func_kwargs={},\n fixed_params=None, output_file=None,nmarginals=1):\n #print p0\t\n if output_file:\n output_stream = file(output_file, 'w')\n else:\n output_stream = sys.stdout\n\t\n args = (data, model_func, pts, lower_bound, upper_bound, verbose,\n multinom, flush_delay, func_args, func_kwargs, fixed_params, 1.0,\n output_stream)\n #if nmarginals==1:\n #\tobject_fun=dadi.Inference._object_func_log\n #else:\n object_fun=_object_func_marginals_log\n \n p0 = dadi.Inference._project_params_down(p0, fixed_params)\n #print \"optimizing!\"\n \n #print object_fun\n #print numpy.log(p0)\n #print object_fun(p0,data,model_func,pts, lower_bound=lower_bound,upper_bound=upper_bound,verbose=0,multinom=multinom,flush_delay=flush_delay,func_args=func_args,func_kwargs=func_kwargs,fixed_params=fixed_params, ll_scale=1,output_stream=sys.stdout)\n \n outputs = scipy.optimize.fmin(object_fun, numpy.log(p0), args = args,\n disp=False, maxiter=maxiter, full_output=True)\n xopt, fopt, iter, funcalls, warnflag = outputs\n xopt = dadi.Inference._project_params_up(numpy.exp(xopt), fixed_params)\n\n if output_file:\n output_stream.close()\n\n if not full_output:\n return xopt\n else:\n return xopt, fopt, iter, funcalls, warnflag",
"def fmin(func, x0, args=(), kw=dict(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,\n full_output=0, disp=1, retall=0, callback=None, zdelt = 0.00025, nonzdelt = 0.05, \n holdfixed=None):\n # 2011-04-13 14:26 IJMC: Adding Keyword option\n # 2011-05-11 10:48 IJMC: Added the zdelt and nonzdelt options\n # 2011-05-30 15:36 IJMC: Added the holdfixed option\n\n def wrap_function(function, args, **kw):\n ncalls = [0]\n def function_wrapper(x):\n ncalls[0] += 1\n return function(x, *args, **kw)\n return ncalls, function_wrapper\n\n # Set up holdfixed arrays\n if holdfixed is not None:\n holdfixed = np.array(holdfixed)\n #x0[holdfixed] = x0[holdfixed]\n holdsome = True\n else:\n holdsome = False\n #holdfixed = np.zeros(params.size, dtype=bool)\n \n #if holdsome:\n # print \"holdfixed>>\", holdfixed\n\n fcalls, func = wrap_function(func, args, **kw)\n x0 = np.asfarray(x0).flatten()\n xoriginal = x0.copy()\n N = len(x0)\n rank = len(x0.shape)\n if not -1 < rank < 2:\n raise ValueError, \"Initial guess must be a scalar or rank-1 sequence.\"\n if maxiter is None:\n maxiter = N * 200\n if maxfun is None:\n maxfun = N * 200\n\n rho = 1; chi = 2; psi = 0.5; sigma = 0.5;\n one2np1 = range(1,N+1)\n\n if rank == 0:\n sim = np.zeros((N+1,), dtype=x0.dtype)\n else:\n sim = np.zeros((N+1,N), dtype=x0.dtype)\n fsim = np.zeros((N+1,), float)\n sim[0] = x0\n if retall:\n allvecs = [sim[0]]\n #print func.__name__\n #print x0\n fsim[0] = func(x0)\n for k in range(0,N):\n y = np.array(x0,copy=True)\n if y[k] != 0:\n y[k] = (1+nonzdelt)*y[k]\n else:\n y[k] = zdelt\n if holdsome and k in holdfixed:\n y[k] = xoriginal[k]\n sim[k+1] = y\n f = func(y)\n fsim[k+1] = f\n\n ind = np.argsort(fsim)\n fsim = np.take(fsim,ind,0)\n # sort so sim[0,:] has the lowest function value\n sim = np.take(sim,ind,0)\n\n iterations = 1\n\n while (fcalls[0] < maxfun and iterations < maxiter):\n ### IJC Edit to understand fmin!\n ##print 'xtol>> ' + str(max(np.ravel(abs(sim[1:]-sim[0])))) + ' > ' + str(xtol)\n ##print 'ftol>> ' + str(max(abs(fsim[0]-fsim[1:]))) + ' > ' + str(ftol)\n if (max(np.ravel(abs(sim[1:]-sim[0]))) <= xtol \\\n and max(abs(fsim[0]-fsim[1:])) <= ftol):\n break\n\n xbar = np.add.reduce(sim[:-1],0) / N\n xr = (1+rho)*xbar - rho*sim[-1]\n if holdsome:\n xr[holdfixed] = xoriginal[holdfixed]\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1+rho*chi)*xbar - rho*chi*sim[-1]\n if holdsome:\n xe[holdfixed] = xoriginal[holdfixed]\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1+psi*rho)*xbar - psi*rho*sim[-1]\n if holdsome:\n xc[holdfixed] = xoriginal[holdfixed]\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink=1\n else:\n # Perform an inside contraction\n xcc = (1-psi)*xbar + psi*sim[-1]\n if holdsome:\n xcc[holdfixed] = xoriginal[holdfixed]\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma*(sim[j] - sim[0])\n if holdsome:\n sim[j, holdfixed] = xoriginal[holdfixed]\n fsim[j] = func(sim[j])\n\n ind = np.argsort(fsim)\n sim = np.take(sim,ind,0)\n fsim = np.take(fsim,ind,0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n if retall:\n allvecs.append(sim[0])\n\n x = sim[0]\n fval = min(fsim)\n warnflag = 0\n\n if fcalls[0] >= maxfun:\n warnflag = 1\n if disp:\n print \"Warning: Maximum number of function evaluations has \"\\\n \"been exceeded.\"\n elif iterations >= maxiter:\n warnflag = 2\n if disp:\n print \"Warning: Maximum number of iterations has been exceeded\"\n else:\n if disp:\n print \"Optimization terminated successfully.\"\n print \" Current function value: %f\" % fval\n print \" Iterations: %d\" % iterations\n print \" Function evaluations: %d\" % fcalls[0]\n\n\n if full_output:\n retlist = x, fval, iterations, fcalls[0], warnflag\n if retall:\n retlist += (allvecs,)\n else:\n retlist = x\n if retall:\n retlist = (x, allvecs)\n\n return retlist",
"def fmin_powell(func, x0, args=(), kw=dict(), xtol=1e-4, ftol=1e-4, maxiter=None,\n maxfun=None, full_output=0, disp=1, retall=0, callback=None,\n direc=None, holdfixed=None):\n # 2010-07-01 11:17 IJC: Added keyword option\n\n from scipy import optimize\n from numpy import asarray, eye, pi, squeeze\n\n def wrap_function(function, args, **kw):\n ncalls = [0]\n def function_wrapper(x):\n ncalls[0] += 1\n return function(x, *args, **kw)\n return ncalls, function_wrapper\n\n def _linesearch_powell(func, p, xi, tol=1e-3):\n \"\"\"Line-search algorithm using fminbound.\n\n Find the minimium of the function ``func(x0+ alpha*direc)``.\n\n \"\"\"\n def myfunc(alpha):\n return func(p + alpha * xi)\n alpha_min, fret, iter, num = optimize.brent(myfunc, full_output=1, tol=tol)\n xi = alpha_min*xi\n return squeeze(fret), p+xi, xi\n\n\n # Set up holdfixed arrays\n if holdfixed is not None:\n holdfixed = np.array(holdfixed)\n #x0[holdfixed] = x0[holdfixed]\n holdsome = True\n else:\n holdsome = False\n #holdfixed = np.zeros(params.size, dtype=bool)\n\n # we need to use a mutable object here that we can update in the\n # wrapper function\n fcalls, func = wrap_function(func, args, **kw)\n x = asarray(x0).flatten()\n xoriginal = x.copy()\n if retall:\n allvecs = [x]\n N = len(x)\n rank = len(x.shape)\n if not -1 < rank < 2:\n raise ValueError, \"Initial guess must be a scalar or rank-1 sequence.\"\n if maxiter is None:\n maxiter = N * 1000\n if maxfun is None:\n maxfun = N * 1000\n\n\n if direc is None:\n direc = eye(N, dtype=float)\n else:\n direc = asarray(direc, dtype=float)\n\n fval = squeeze(func(x))\n x1 = x.copy()\n iter = 0;\n ilist = range(N)\n while True:\n fx = fval\n bigind = 0\n delta = 0.0\n for i in ilist:\n direc1 = direc[i]\n fx2 = fval\n if (not holdsome) or (i not in holdfixed):\n fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100)\n if (fx2 - fval) > delta:\n delta = fx2 - fval\n bigind = i\n iter += 1\n if callback is not None:\n callback(x)\n if retall:\n allvecs.append(x)\n if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break\n if fcalls[0] >= maxfun: break\n if iter >= maxiter: break\n\n # Construct the extrapolated point\n direc1 = x - x1\n x2 = 2*x - x1\n if holdsome:\n x2[holdfixed] = xoriginal[holdfixed]\n x1 = x.copy()\n fx2 = squeeze(func(x2))\n\n if (fx > fx2):\n t = 2.0*(fx+fx2-2.0*fval)\n temp = (fx-fval-delta)\n t *= temp*temp\n temp = fx-fx2\n t -= delta*temp*temp\n if t < 0.0:\n fval, x, direc1 = _linesearch_powell(func, x, direc1,\n tol=xtol*100)\n if holdsome:\n x[holdfixed] = xoriginal[holdfixed]\n direc[bigind] = direc[-1]\n direc[-1] = direc1\n\n warnflag = 0\n if fcalls[0] >= maxfun:\n warnflag = 1\n if disp:\n print \"Warning: Maximum number of function evaluations has \"\\\n \"been exceeded.\"\n elif iter >= maxiter:\n warnflag = 2\n if disp:\n print \"Warning: Maximum number of iterations has been exceeded\"\n else:\n if disp:\n print \"Optimization terminated successfully.\"\n print \" Current function value: %f\" % fval\n print \" Iterations: %d\" % iter\n print \" Function evaluations: %d\" % fcalls[0]\n\n x = squeeze(x)\n\n if full_output:\n retlist = x, fval, direc, iter, fcalls[0], warnflag\n if retall:\n retlist += (allvecs,)\n else:\n retlist = x\n if retall:\n retlist = (x, allvecs)\n\n return retlist",
"def _optimize_f(self,x0,type,method,**kwargs):\n from scipy.optimize import fmin,fmin_powell\n\n if type == 'min':\n g=lambda *args,**kwargs:self.f(*args,**kwargs)\n elif type == 'max':\n g=lambda *args,**kwargs:-1*self.f(*args,**kwargs)\n elif type == 'root':\n g=lambda *args,**kwargs:np.abs(self.f(*args,**kwargs))\n elif type == 'val':\n val = kwargs.pop('valtofind')\n g=lambda *args,**kwargs:np.abs(self.f(*args,**kwargs)-val)\n elif type == 'saddle':\n raise NotImplementedError\n else:\n raise ValueError('Unrecognized optimization type')\n\n if method == 'fmin':\n res = fmin(g,x0,tuple(self.parvals),**kwargs)\n elif method == 'fmin_powell':\n res = fmin_powell(g,x0,tuple(self.parvals),**kwargs)\n else:\n raise ValueError('Unrecognized method')\n\n self.lastOpt = res\n return res[0]",
"def maximize_scalar(max_func, *args, **kwargs):\n def min_func(*args):\n return -max_func(*args)\n\n res = minimize_scalar(min_func, *args, **kwargs)\n\n # we have to negate the function value before returning res\n res.fun = -res.fun\n return res",
"def hyperopt_fmin(\n fn,\n space,\n algo,\n max_evals=sys.maxsize,\n timeout=None,\n loss_threshold=None,\n trials=None,\n rstate=None,\n allow_trials_fmin=True,\n pass_expr_memo_ctrl=None,\n catch_eval_exceptions=False,\n verbose=True,\n return_argmin=True,\n points_to_evaluate=None,\n max_queue_len=1,\n show_progressbar=True,\n # early_stop_fn=None,\n):\n if rstate is None:\n env_rseed = os.environ.get(\"HYPEROPT_FMIN_SEED\", \"\")\n if env_rseed:\n rstate = np.random.RandomState(int(env_rseed))\n else:\n rstate = np.random.RandomState()\n\n validate_timeout(timeout)\n validate_loss_threshold(loss_threshold)\n\n if allow_trials_fmin and hasattr(trials, \"fmin\"):\n assert False\n # return trials.fmin(\n # fn,\n # space,\n # algo=algo,\n # max_evals=max_evals,\n # timeout=timeout,\n # loss_threshold=loss_threshold,\n # max_queue_len=max_queue_len,\n # rstate=rstate,\n # pass_expr_memo_ctrl=pass_expr_memo_ctrl,\n # verbose=verbose,\n # catch_eval_exceptions=catch_eval_exceptions,\n # return_argmin=return_argmin,\n # show_progressbar=show_progressbar,\n # early_stop_fn=early_stop_fn,\n # )\n\n if trials is None:\n if points_to_evaluate is None:\n trials = base.Trials()\n else:\n assert type(points_to_evaluate) == list\n trials = generate_trials_to_calculate(points_to_evaluate)\n\n domain = base.Domain(fn, space, pass_expr_memo_ctrl=pass_expr_memo_ctrl)\n\n rval = FMinIter(\n algo,\n domain,\n trials,\n max_evals=max_evals,\n timeout=timeout,\n loss_threshold=loss_threshold,\n rstate=rstate,\n verbose=verbose,\n max_queue_len=max_queue_len,\n show_progressbar=show_progressbar,\n # early_stop_fn=early_stop_fn,\n )\n rval.catch_eval_exceptions = catch_eval_exceptions\n\n # next line is where the fmin is actually executed\n rval.exhaust()\n\n if len(trials.trials) == 0:\n raise Exception(\n \"There are no evaluation tasks, cannot return argmin of task losses.\"\n )\n return trials",
"def gopt_max(fun, bounds, n_warmup = 1000, n_local = 10):\n x_best, y_best = gopt_min(lambda x: -fun(x), bounds, n_warmup, n_local)\n return x_best, -y_best",
"def optimize_log_fmin_coarse(p0, coarsenings,data, model_func, pts, \n lower_bound=None, upper_bound=None,\n verbose=0, flush_delay=0.5, \n multinom=True, maxiter=None, \n full_output=False, func_args=[], \n func_kwargs={},\n fixed_params=None, output_file=None,nmarginals=1):\n if output_file:\n output_stream = file(output_file, 'w')\n else:\n output_stream = sys.stdout\n\n args = (data, model_func, pts, lower_bound, upper_bound, verbose,\n multinom, flush_delay, func_args, func_kwargs, fixed_params, 1.0,\n output_stream)\n if nmarginals==1:\n \tobject_fun=_object_func_log\n else:\n \tobject_fun=_object_func_marginals_coarse_log(coarsenings)\n \t\n p0 = dadi.Inference._project_params_down(p0, fixed_params)\n outputs = scipy.optimize.fmin(object_fun, numpy.log(p0), args = args,\n disp=False, maxiter=maxiter, full_output=True)\n xopt, fopt, iter, funcalls, warnflag = outputs\n xopt = dadi.Inference._project_params_up(numpy.exp(xopt), fixed_params)\n\n if output_file:\n output_stream.close()\n\n if not full_output:\n return xopt\n else:\n return xopt, fopt, iter, funcalls, warnflag",
"def minimize_neldermead(func, x0, args=(), callback=None,\n maxiter=None, maxfev=None, disp=False,\n return_all=False, initial_simplex=None,\n xatol=1e-4, fatol=1e-4, **unknown_options):\n maxfun = maxfev\n retall = return_all\n\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n\n if initial_simplex is None:\n N = len(x0)\n\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n\n maxiter = 10\n maxfun = 10\n\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n # sort so sim[0,:] has the lowest function value\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n\n iterations = 1\n\n while iterations < maxiter:\n if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and\n numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n # Perform an inside contraction\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0,\n status=warnflag, success=(warnflag == 0),\n message=None, x=x, final_simplex=(sim, fsim))\n return result",
"def find_fmin_on_grid(f, xs, args, full_output):\n Nx = len(xs)\n Jout = np.zeros(Nx)\n for k in range(Nx):\n Jout[k] = f(xs[k], *args)\n idx = np.nanargmin(Jout)\n if not full_output:\n return xs[idx], Jout[idx]\n return xs[idx], Jout[idx], xs, Jout",
"def find_max_f():\n fmax = fmin(g, 2)\n return fmax[0]",
"def findmin(f, ranges, args=(), Ns=None, full_output=False, method='brute',\n finish=False):\n if method == 'brute':\n Ns = Ns or 3\n x0, J0, xs, Jout = brute(f, ranges, args=args, Ns=Ns, full_output=True)\n elif method == 'monte carlos':\n Ns = Ns or 1000\n x0, J0, xs, Jout = monte_carlos(f, ranges, args=args, Ns=Ns, full_output=True)\n else:\n valid_methods = ('brute', 'monte carlos')\n raise ValueError('optimization method must be one of {0!r}'.format(\n ', '.join(valid_methods)))\n\n # Mask any values that are not finite\n mask = np.isfinite(Jout)\n xs = xs[mask]\n Jout = Jout[mask]\n if not len(xs):\n raise RuntimeError('Failed to find optimized parameters')\n\n if finish:\n import scipy.optimize\n res = scipy.optimize.fmin(f, x0, args=args, full_output=True)\n x0, J0 = res[0:2]\n\n if not full_output:\n return x0\n return x0, J0, xs, Jout",
"def fmin(evaluator, xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,\n full_output=0, disp=1, callback=None):\n fcalls, func = wrap_function(evaluator.target)\n x0 = evaluator.x\n #x0 = asfarray(x0).flatten()\n N = len(x0)\n if maxiter is None:\n maxiter = N * 200\n if maxfun is None:\n maxfun = N * 200\n\n rho = 1; chi = 2; psi = 0.5; sigma = 0.5;\n one2np1 = range(1,N+1)\n\n sim = []\n fsim = [.0]*(N+1)\n for i in range(0,N+1):\n sim.append([.0]*(N+1))\n\n sim[0] = x0\n \n fsim[0] = func(x0)\n nonzdelt = 0.05\n zdelt = 0.00025\n for k in range(0,N):\n y = list(x0)\n if y[k] != 0:\n y[k] = (1+nonzdelt)*y[k]\n else:\n y[k] = zdelt\n\n sim[k+1] = y\n f = func(y)\n fsim[k+1] = f\n\n ind = sort_permutation(fsim)\n fsim = apply_permutation(fsim,ind)\n # sort so sim[0,:] has the lowest function value\n sim = apply_permutation(sim,ind)\n evaluator.x = sim[0]\n\n iterations = 1\n\n \n while (fcalls[0] < maxfun and iterations < maxiter):\n sim_size = max(map(lambda x : max(map(abs,map(operator.sub, x, sim[0]))),sim[1:]))\n #print \"The simplex size is %.6g(tol=%.6g)\"%(sim_size,xtol)\n fsim_size = max( map(lambda x: abs(x-fsim[0]), fsim[1:]))\n #print \"The simplex image size is %.6g(tol=%.6g)\"%(fsim_size, ftol)\n if ( sim_size <= xtol ) \\\n and fsim_size <=ftol:\n break\n# if (max(numpy.ravel(abs(sim[1:]-sim[0]))) <= xtol \\\n# and max(abs(fsim[0]-fsim[1:])) <= ftol):\n# break\n\n xbar = averageArrays(sim[:-1])\n xr = linearCombine((1+rho),xbar, - rho,sim[-1])\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = linearCombine((1+rho*chi),xbar, - rho*chi,sim[-1])\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = linearCombine((1+psi*rho),xbar, - psi*rho,sim[-1])\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink=1\n else:\n # Perform an inside contraction\n xcc = linearCombine((1-psi),xbar, psi,sim[-1])\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = linearCombine((1-sigma),sim[0] , sigma,sim[j])\n fsim[j] = func(sim[j])\n\n ind = sort_permutation(fsim)\n sim = apply_permutation(sim,ind)\n fsim = apply_permutation(fsim,ind)\n evaluator.x = sim[0]\n if callback is not None:\n callback(sim[0])\n iterations += 1\n\n x = sim[0]\n fval = min(fsim)\n warnflag = 0\n\n if fcalls[0] >= maxfun:\n warnflag = 1\n if disp:\n printOut(\"Warning: Maximum number of function evaluations has \"\\\n \"been exceeded.\")\n elif iterations >= maxiter:\n warnflag = 2\n if disp:\n printOut(\"Warning: Maximum number of iterations has been exceeded\")\n else:\n if disp:\n printOut(\"Optimization terminated successfully.\")\n printOut(\" Current function value: %f\" % fval)\n printOut(\" Iterations: %d\" % iterations)\n printOut(\" Function evaluations: %d\" % fcalls[0])\n\n\n if full_output:\n retlist = x, fval, iterations, fcalls[0], warnflag\n else:\n retlist = x\n\n return retlist",
"def maximize(func, grad_func, x, y, theta_0, alpha_0=0.01, max_it=100):\n return minimize(negate(func), negate_all(grad_func), x, y, theta_0, alpha_0=0.01, max_it=100)",
"def minimize(\n func: Callable,\n x0: Union[Array, BlockArray],\n args: Union[Tuple, Tuple[Any]] = (),\n method: str = \"L-BFGS-B\",\n hess: Optional[Union[Callable, str]] = None,\n hessp: Optional[Callable] = None,\n bounds: Optional[Union[Sequence, spopt.Bounds]] = None,\n constraints: Union[spopt.LinearConstraint, spopt.NonlinearConstraint, dict] = (),\n tol: Optional[float] = None,\n callback: Optional[Callable] = None,\n options: Optional[dict] = None,\n) -> spopt.OptimizeResult:\n\n if snp.util.is_complex_dtype(x0.dtype):\n # scipy minimize function requires real-valued arrays, so\n # we split x0 into a vector with real/imaginary parts stacked\n # and compose `func` with a `_join_real_imag`\n iscomplex = True\n func_ = lambda x: func(_join_real_imag(x))\n x0 = _split_real_imag(x0)\n else:\n iscomplex = False\n func_ = func\n\n x0_shape = x0.shape\n x0_dtype = x0.dtype\n x0 = x0.ravel() # if x0 is a BlockArray it will become a jax array here\n\n # Run the SciPy minimizer\n if method in (\n \"CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov, \"\n \"trust-exact, trust-constr\"\n ).split(\n \", \"\n ): # uses gradient info\n min_func = _wrap_func_and_grad(func_, x0_shape, x0_dtype)\n jac = True # see scipy.minimize docs\n else: # does not use gradient info\n min_func = _wrap_func(func_, x0_shape, x0_dtype)\n jac = False\n\n res = spopt.OptimizeResult({\"x\": None})\n\n def fun(x0):\n nonlocal res # To use the external res and update side effect\n res = spopt.minimize(\n min_func,\n x0=x0,\n args=args,\n jac=jac,\n method=method,\n options=options,\n ) # Returns OptimizeResult with x0 as ndarray\n return res.x.astype(x0_dtype)\n\n # HCB call with side effects to get the OptimizeResult on the same device it was called\n res.x = hcb.call(\n fun,\n arg=x0,\n result_shape=x0, # From Jax-docs: This can be an object that has .shape and .dtype attributes\n )\n\n # un-vectorize the output array from spopt.minimize\n res.x = snp.reshape(\n res.x, x0_shape\n ) # if x0 was originally a BlockArray then res.x is converted back to one here\n\n if iscomplex:\n res.x = _join_real_imag(res.x)\n\n return res",
"def minimize(self, func, grad, x0, args=()):\n learning_rate = self._learning_rate\n best_x = x = x0\n best_value = func(x, *args)\n iters_without_improve = 0\n\n for iteration in range(self._max_iterations):\n gradient = grad(x, *args)\n\n # If absolute values of all partial derivatives are equal to 0 with specified accuracy, then parameters are\n # close enough to the minimum and there is no need to continue gradient descent.\n if np.abs(gradient).max() <= self._accuracy:\n break\n\n x = x - learning_rate * gradient\n\n # If new values of x haven't lead to decrease of the function value for the specified number of iteration,\n # the x is reverted to its previous best value and the learning rate is reduced\n value = func(x, *args)\n if value > best_value:\n iters_without_improve += 1\n if iters_without_improve >= self._lr_reduce_patience:\n x = best_x\n learning_rate *= self._lr_reduce_factor\n else:\n iters_without_improve = 0\n best_value = value\n best_x = x\n\n return best_x",
"def max_continuous(func: Callable[[Tuple], np.ndarray], over: Iterable[Tuple],\\\n state: Tuple[Union[int, float]]) -> Tuple[float, Tuple[Union[int, float]], None]:\n statebounds = tuple(zip(state, state))\n init = tuple([*state, *np.random.uniform(*zip(*over))])\n funcarg = lambda x: -func(np.asarray(x).reshape(1,-1))[0, 0]\n res = minimize(funcarg, x0=init, bounds=(*statebounds, *over))\n return (-funcarg(res.x), tuple(res.x[len(state):]), None)",
"def max_(*args, **kwargs):\n ...",
"def optimize_log(p0, data, model_func, pts, lower_bound=None, upper_bound=None,\n verbose=0, flush_delay=0.5, epsilon=1e-3, \n gtol=1e-5, multinom=True, maxiter=None, full_output=False,\n func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,\n output_file=None,nmarginals=1):\n if output_file:\n output_stream = file(output_file, 'w')\n else:\n output_stream = sys.stdout\n #print \"in opt,\"\n #print data.shape\n args = (data, model_func, pts, lower_bound, upper_bound, verbose,\n multinom, flush_delay, func_args, func_kwargs, fixed_params, \n ll_scale, output_stream)\n if nmarginals==1:\n \tobject_fun=dadi.Inference._object_func_log\n else:\n \tobject_fun=_object_func_marginals_log\n\n\n p0 = dadi.Inference._project_params_down(p0, fixed_params)\n outputs = scipy.optimize.fmin_bfgs(object_fun, \n numpy.log(p0), epsilon=epsilon,\n args = args, gtol=gtol, \n full_output=True,\n disp=False,\n maxiter=maxiter)\n xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag = outputs\n xopt = dadi.Inference._project_params_up(numpy.exp(xopt), fixed_params)\n\n if output_file:\n output_stream.close()\n\n if not full_output:\n return xopt\n else:\n return xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag",
"def minimize(fun, \n bounds = None, \n value_limit = math.inf,\n num_retries = 1000,\n logger = None,\n workers = mp.cpu_count(),\n popsize = 31, \n max_evaluations = 50000, \n capacity = 500,\n stop_fittness = None,\n optimizer = None,\n ):\n\n if optimizer is None:\n optimizer = de_cma(max_evaluations, popsize, stop_fittness) \n store = Store(bounds, capacity = capacity, logger = logger)\n return retry(fun, store, optimizer.minimize, num_retries, value_limit, workers)",
"def argmax(function, X, tiesolve=None):\n return argmin(lambda x: -function(x), X, tiesolve)",
"def fit_function(x_vals, y_vals, func, n_params, iterations=2):\n\n # internal function to minimize the error\n def f2min(a):\n #sum square deviation\n return ((func(x_vals, a) - y_vals)**2).sum()\n\n param_guess = array(range(n_params))\n for i in range(iterations):\n xopt = fmin(f2min, param_guess, disp=0)\n param_guess = xopt\n\n return xopt",
"def max_hybrid(func: Callable[[Tuple], np.ndarray], over: Tuple[Tuple],\\\n state: Tuple[Union[int, float]], cont: Tuple[bool],\\\n actions: Iterable[Tuple]) -> Tuple[float, Tuple[Union[int, float]], None]:\n best = -np.inf\n bestarg = None\n funcarg = lambda x: -func(np.asarray(x).reshape(1, -1))[0, 0]\n statebounds = tuple(zip(state, state))\n for act in actions:\n actbounds = [b if c else (a, a) for a, c, b in zip(act, cont, over)]\n init = tuple([*state, *np.random.uniform(*zip(*over))])\n res = minimize(funcarg, x0=init, bounds=(*statebounds, *actbounds))\n val = -funcarg(res.x)\n if val > best:\n best = val\n bestarg = res.x\n return (best, tuple([float(v) if c else int(v) for v, c in \\\n zip(bestarg[len(state):], cont)]), None)",
"def regmax(f, Bc=None):\n\n if Bc is None: Bc = secross()\n y = regmin(neg(f),Bc)\n return y",
"def argmax(seq, fn):\n return argmin(seq, lambda x: -fn(x))",
"def maximize(self,x0,method='fmin',**kwargs):\n return self._optimize(x0,'max',method,**kwargs)",
"def fmin(func, x0, sigma0=None, args=()\r\n # the follow string arguments are evaluated, besides the verb_filenameprefix\r\n , CMA_active='False # exponential negative update, conducted after the original update'\r\n , CMA_activefac='1 # learning rate multiplier for active update'\r\n , CMA_cmean='1 # learning rate for the mean value'\r\n , CMA_const_trace='False # normalize trace, value CMA_const_trace=2 normalizes sum log eigenvalues to zero'\r\n , CMA_diagonal='0*100*N/sqrt(popsize) # nb of iterations with diagonal covariance matrix, True for always' # TODO 4/ccov_separable?\r\n , CMA_eigenmethod='np.linalg.eigh # 0=numpy-s eigh, -1=pygsl, otherwise cma.Misc.eig (slower)'\r\n , CMA_elitist='False # elitism likely impairs global search performance'\r\n , CMA_mirrors='popsize < 6 # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used'\r\n , CMA_mu='None # parents selection parameter, default is popsize // 2'\r\n , CMA_on='True # False or 0 for no adaptation of the covariance matrix'\r\n , CMA_rankmu='True # False or 0 for omitting rank-mu update of covariance matrix'\r\n , CMA_rankmualpha='0.3 # factor of rank-mu update if mu=1, subject to removal, default might change to 0.0'\r\n , CMA_dampfac='1 #v positive multiplier for step-size damping, 0.3 is close to optimal on the sphere'\r\n , CMA_dampsvec_fac='np.Inf # tentative and subject to changes, 0.5 would be a \"default\" damping for sigma vector update'\r\n , CMA_dampsvec_fade='0.1 # tentative fading out parameter for sigma vector update'\r\n , CMA_teststds='None # factors for non-isotropic initial distr. mainly for test purpose, see scaling_...'\r\n , CMA_AII='False # not yet tested'\r\n , bounds='[None, None] # lower (=bounds[0]) and upper domain boundaries, each a scalar or a list/vector'\r\n , eval_parallel='False # when True, func might be called with more than one solution as first argument'\r\n , eval_initial_x='False # '\r\n , fixed_variables='None # dictionary with index-value pairs like {0:1.1, 2:0.1} that are not optimized'\r\n , ftarget='-inf #v target function value, minimization'\r\n , incpopsize='2 # in fmin(): multiplier for increasing popsize before each restart'\r\n , maxfevals='inf #v maximum number of function evaluations'\r\n , maxiter='100 + 50 * (N+3)**2 // popsize**0.5 #v maximum number of iterations'\r\n , mindx='0 #v minimal std in any direction, cave interference with tol*'\r\n , minstd='0 #v minimal std in any coordinate direction, cave interference with tol*'\r\n , noise_handling='False # maximal number of evaluations for noise treatment, only fmin'\r\n , noise_reevals=' 1.5 + popsize/20 # number of solution to be reevaluated for noise measurement, only fmin'\r\n , noise_eps='1e-7 # perturbation factor for noise handling reevaluations, only fmin'\r\n , noise_change_sigma='True # exponent to default sigma increment'\r\n , popsize='4+int(3*log(N)) # population size, AKA lambda, number of new solution per iteration'\r\n , randn='np.random.standard_normal #v randn((lam, N)) must return an np.array of shape (lam, N)'\r\n , restarts='0 # in fmin(): number of restarts'\r\n , restart_from_best='False'\r\n , scaling_of_variables='None # scale for each variable, sigma0 is interpreted w.r.t. this scale, in that effective_sigma0 = sigma0*scaling. Internally the variables are divided by scaling_of_variables and sigma is unchanged, default is ones(N)'\r\n , seed='None # random number seed'\r\n , termination_callback='None #v a function returning True for termination, called after each iteration step and could be abused for side effects'\r\n , tolfacupx='1e3 #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0'\r\n , tolupsigma='1e20 #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates \"creeping behavior\" with usually minor improvements'\r\n , tolfun='1e-11 #v termination criterion: tolerance in function value, quite useful'\r\n , tolfunhist='1e-12 #v termination criterion: tolerance in function value history'\r\n , tolstagnation='int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations'\r\n , tolx='1e-11 #v termination criterion: tolerance in x-changes'\r\n , transformation='None # [t0, t1] are two mappings, t0 transforms solutions from CMA-representation to f-representation (tf_pheno), t1 is the (optional) back transformation, see class GenoPheno'\r\n , typical_x='None # used with scaling_of_variables'\r\n , updatecovwait='None #v number of iterations without distribution update, name is subject to future changes' # TODO: rename: iterwaitupdatedistribution?\r\n , verb_append='0 # initial evaluation counter, if append, do not overwrite output files'\r\n , verb_disp='100 #v verbosity: display console output every verb_disp iteration'\r\n , verb_filenameprefix='outcmaes # output filenames prefix'\r\n , verb_log='1 #v verbosity: write data to files every verb_log iteration, writing can be time critical on fast to evaluate functions'\r\n , verb_plot='0 #v in fmin(): plot() is called every verb_plot iteration'\r\n , verb_time='True #v output timings on console'\r\n , vv='0 #? versatile variable for hacking purposes, value found in self.opts[\\'vv\\']'\r\n ): # style guides say there should be the above empty line\r\n try: # pass on KeyboardInterrupt\r\n opts = locals() # collect all local variables (i.e. arguments) in a dictionary\r\n del opts['func'] # remove those without a default value\r\n del opts['args']\r\n del opts['x0'] # is not optional, no default available\r\n del opts['sigma0'] # is not optional for the constructor CMAEvolutionStrategy\r\n if not func: # return available options in a dictionary\r\n return Options(opts, True) # these opts are by definition valid\r\n\r\n # TODO: this is very ugly:\r\n incpopsize = Options({'incpopsize':incpopsize}).eval('incpopsize')\r\n restarts = Options({'restarts':restarts}).eval('restarts')\r\n del opts['restarts']\r\n noise_handling = Options({'noise_handling': noise_handling}).eval('noise_handling')\r\n del opts['noise_handling']# otherwise CMA throws an error\r\n\r\n irun = 0\r\n best = BestSolution()\r\n while 1:\r\n # recover from a CMA object\r\n if irun == 0 and isinstance(x0, CMAEvolutionStrategy):\r\n es = x0\r\n x0 = es.inputargs['x0'] # for the next restarts\r\n if sigma0 is None or not np.isscalar(array(sigma0)):\r\n sigma0 = es.inputargs['sigma0'] # for the next restarts\r\n # ignore further input args and keep original options\r\n else: # default case\r\n if irun and opts['restart_from_best']:\r\n print('CAVE: restart_from_best is typically not useful')\r\n es = CMAEvolutionStrategy(best.x, sigma0, opts)\r\n else:\r\n es = CMAEvolutionStrategy(x0, sigma0, opts)\r\n if opts['eval_initial_x']:\r\n x = es.gp.pheno(es.mean, bounds=es.gp.bounds)\r\n es.best.update([x], None, [func(x, *args)], 1)\r\n es.countevals += 1\r\n\r\n opts = es.opts # processed options, unambiguous\r\n\r\n append = opts['verb_append'] or es.countiter > 0 or irun > 0\r\n logger = CMADataLogger(opts['verb_filenameprefix'], opts['verb_log'])\r\n logger.register(es, append).add() # initial values, not fitness values\r\n\r\n # if es.countiter == 0 and es.opts['verb_log'] > 0 and not es.opts['verb_append']:\r\n # logger = CMADataLogger(es.opts['verb_filenameprefix']).register(es)\r\n # logger.add()\r\n # es.writeOutput() # initial values for sigma etc\r\n\r\n noisehandler = NoiseHandler(es.N, noise_handling, np.median, opts['noise_reevals'], opts['noise_eps'], opts['eval_parallel'])\r\n while not es.stop():\r\n X, fit = es.ask_and_eval(func, args, evaluations=noisehandler.evaluations,\r\n aggregation=np.median) # treats NaN with resampling\r\n # TODO: check args and in case use args=(noisehandler.evaluations, )\r\n\r\n if 11 < 3 and opts['vv']: # inject a solution\r\n # use option check_point = [0]\r\n if 0 * np.random.randn() >= 0:\r\n X[0] = 0 + opts['vv'] * es.sigma**0 * np.random.randn(es.N)\r\n fit[0] = func(X[0], *args)\r\n # print fit[0]\r\n es.tell(X, fit) # prepare for next iteration\r\n if noise_handling:\r\n es.sigma *= noisehandler(X, fit, func, es.ask, args)**opts['noise_change_sigma']\r\n es.countevals += noisehandler.evaluations_just_done # TODO: this is a hack, not important though\r\n\r\n es.disp()\r\n logger.add(more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [],\r\n modulo=1 if es.stop() and logger.modulo else None)\r\n if opts['verb_log'] and opts['verb_plot'] and \\\r\n (es.countiter % max(opts['verb_plot'], opts['verb_log']) == 0 or es.stop()):\r\n logger.plot(324, fontsize=10)\r\n\r\n # end while not es.stop\r\n mean_pheno = es.gp.pheno(es.mean, bounds=es.gp.bounds)\r\n fmean = func(mean_pheno, *args)\r\n es.countevals += 1\r\n\r\n es.best.update([mean_pheno], None, [fmean], es.countevals)\r\n best.update(es.best) # in restarted case\r\n\r\n # final message\r\n if opts['verb_disp']:\r\n srestarts = (' after %i restart' + ('s' if irun > 1 else '')) % irun if irun else ''\r\n for k, v in list(es.stop().items()):\r\n print('termination on %s=%s%s (%s)' % (k, str(v), srestarts, time.asctime()))\r\n\r\n print('final/bestever f-value = %e %e' % (es.best.last.f, best.f))\r\n if es.N < 9:\r\n print('mean solution: ' + str(es.gp.pheno(es.mean)))\r\n print('std deviation: ' + str(es.sigma * sqrt(es.dC) * es.gp.scales))\r\n else:\r\n print('mean solution: %s ...]' % (str(es.gp.pheno(es.mean)[:8])[:-1]))\r\n print('std deviations: %s ...]' % (str((es.sigma * sqrt(es.dC) * es.gp.scales)[:8])[:-1]))\r\n\r\n irun += 1\r\n if irun > restarts or 'ftarget' in es.stopdict or 'maxfunevals' in es.stopdict:\r\n break\r\n opts['verb_append'] = es.countevals\r\n opts['popsize'] = incpopsize * es.sp.popsize # TODO: use rather options?\r\n opts['seed'] += 1\r\n\r\n # while irun\r\n\r\n es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell\r\n if 1 < 3:\r\n return es.result() + (es.stop(), es, logger)\r\n\r\n else: # previously: to be removed\r\n return (best.x.copy(), best.f, es.countevals,\r\n dict((('stopdict', CMAStopDict(es.stopdict))\r\n ,('mean', es.gp.pheno(es.mean))\r\n ,('std', es.sigma * sqrt(es.dC) * es.gp.scales)\r\n ,('out', es.out)\r\n ,('opts', es.opts) # last state of options\r\n ,('cma', es)\r\n ,('inputargs', es.inputargs)\r\n ))\r\n )\r\n # TODO refine output, can #args be flexible?\r\n # is this well usable as it is now?\r\n except KeyboardInterrupt: # Exception, e:\r\n if opts['verb_disp'] > 0:\r\n print(' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception')\r\n raise # cave: swallowing this exception can silently mess up experiments, if ctrl-C is hit\r",
"def argmax(seq, fn):\n return argmin(seq, lambda x: -fn(x))"
]
| [
"0.7085272",
"0.67883414",
"0.6785883",
"0.66284406",
"0.6580602",
"0.64613897",
"0.6409272",
"0.6262781",
"0.6251602",
"0.6249426",
"0.6222751",
"0.62062293",
"0.6168339",
"0.61213404",
"0.6112865",
"0.6097352",
"0.5974419",
"0.59578776",
"0.59462065",
"0.58917284",
"0.58890676",
"0.58856815",
"0.5882135",
"0.5880978",
"0.5869691",
"0.5866858",
"0.58482623",
"0.58114177",
"0.5785878",
"0.5773671"
]
| 0.68643045 | 1 |
A naive implementation of the forward pass for a max pooling layer. | def max_pool_forward_naive(x, pool_param):
out = None
#############################################################################
# TODO: Implement the max pooling forward pass #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, pool_param)
return out, cache | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def max_pool_forward_naive(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max pooling forward pass #\n ###########################################################################\n N,C,H,W = x.shape\n pool_width = pool_param['pool_width']\n pool_height = pool_param['pool_height']\n stride = pool_param['stride']\n #Compute output size.\n out_width = int((W - pool_width) / stride + 1)\n out_height = int((H - pool_height) / stride + 1) \n out = np.zeros((N,C,out_height,out_width))\n #Naive implementation:Loop over each training example and max pool.(Naive===lots of FOR)\n for i in range(N):\n #Counters for output indices.\n a = 0;b = 0\n for t in range(0,H - pool_height + 1,stride):\n for k in range(0,W - pool_width + 1,stride):\n #Get max in each depth.\n for c in range(C):\n out[i,c,a,b] += np.max(x[i,c,t:(t + pool_height),k:(k + pool_width)])\n if (b == out_width - 1):\n a += 1\n b = 0\n else:\n b += 1\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache",
"def max_pool_forward_naive(x, pool_param):\n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n\n Hc = (H - pool_height) / stride + 1\n Wc = (W - pool_width) / stride + 1\n out = np.random.randn(N, C, Hc, Wc)\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n for i in xrange(N):\n for c in xrange(C):\n for hc in xrange(Hc):\n for wc in xrange(Wc):\n out[i, c, hc, wc] = np.max(x[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width])\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache",
"def max_pool_forward_naive(x, pool_param):\n\tout = None\n\t\n\tN, C, H, W = x.shape\n\tHH = pool_param['pool_height']\n\tWW = pool_param['pool_width']\n\tstride = pool_param['stride']\n\tHp = int(1 + (H-HH)/stride)\n\tWp = int(1 + (W-WW)/stride)\n\n\tout = np.zeros((N,C,Hp,Wp))\n\n\tfor n in range(N):\n\t\tfor j in range(Hp):\n\t\t\tfor i in range(Wp):\n\t\t\t\tout[n,:,j,i] = np.amax(x[n,:,j*stride:j*stride+HH,i*stride:i*stride+WW], axis=(-1,-2))\n\n\tcache = (x, pool_param)\n\treturn out, cache",
"def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n \n N, C, H, W = x.shape\n pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n stride = pool_param['stride']\n \n HH = 1 + (H - pool_height) // stride\n WW = 1 + (W - pool_width) // stride\n x_strides = x[0][0].strides\n strides = tuple(np.array(x_strides)*stride)\n \n out = np.zeros((N,C,HH,WW))\n \n for n in range(N):\n for c in range(C):\n out_shape = (HH,WW,pool_height,pool_width)\n pool_blocks = np.lib.stride_tricks.as_strided(x[n][c],out_shape,strides+x_strides)\n out[n][c] = np.max(pool_blocks, axis=(2,3))\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache",
"def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n H_prime = int(1 + (H - pool_height) / stride)\n W_prime = int(1 + (W - pool_width) / stride) #python 3 / is just float number division\n \n out = np.zeros((N,C,H_prime,W_prime))\n \n for n in range(N):\n for i in range(H_prime):\n for j in range(W_prime):\n h_start = i * stride\n h_end = h_start + pool_height\n w_start = j * stride\n w_end = w_start + pool_width\n pool_window = x[n, :, h_start:h_end, w_start:w_end]\n pool_window = pool_window.reshape((C,-1))\n out[n,:,i,j] = np.max(pool_window, axis=1)\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache",
"def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n (N, C, H, W) = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n HH = int(1 + (H - pool_height) / stride)\n WW = int(1 + (W - pool_width) / stride)\n\n out = np.zeros((N, C, HH, WW))\n\n for n in range(N):\n for h in range(HH):\n for w in range(WW):\n h1 = h * stride\n h2 = h1 + pool_height\n w1 = w * stride\n w2 = w1 + pool_width\n block = x[n, :, h1:h2, w1:w2]\n out[n,:,h,w] = np.max(block.reshape((C, pool_height*pool_width)), axis=1)\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache",
"def forward(self, x):\n\n x = F.max_pool2d(F.relu(self.batch_norm1(self.conv1(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm2(self.conv2(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm3_b(self.conv3_b(F.relu(self.batch_norm3_a(self.conv3_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm4_b(self.conv4_b(F.relu(self.batch_norm4_a(self.conv4_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm5_b(self.conv5_b(F.relu(self.batch_norm5_a(self.conv5_a(x)))))), 3, stride=2, padding=1)\n x = self.avg_pool(x).view(-1,512)\n out = self.linear(x)\n\n return out",
"def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out",
"def forward(self, x):\n \n x = F.relu(self.conv1_bn(self.conv1(self.conv0_bn(x))))\n x = F.relu(self.conv2_bn(self.conv2(x)))\n x = F.relu(self.conv3_bn(self.conv3( self.maxpool2(x))))\n x = F.relu(self.conv4_bn(self.conv4( self.maxpool3(x))))\n x = self.maxpool4(x) \n x = x.view(-1, 1184)\n x = F.relu(self.fc1(x))\n x = self.dense1_bn(x)\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)",
"def conv_relu_pool_forward_naive(x, w, b, conv_param, pool_param):\n\ta, conv_cache = conv_forward_naive(x, w, b, conv_param)\n\ts, relu_cache = relu_forward(a)\n\tout, pool_cache = max_pool_forward_naive(s, pool_param)\n\tcache = (conv_cache, relu_cache, pool_cache)\n\treturn out, cache",
"def _pool_step(\n X,\n pool_size, #TODO(mmd): Better name\n pooler = tf.nn.max_pool,\n):\n # TODO(mmd): Why all the expansion squeezing necessary?\n x = tf.expand_dims(x, 3) # num_samples x num_features x num_filters_in x 1\n x = pooler(x, ksize=[1,pool_size,1,1], strides=[1,pool_size,1,1], padding='SAME')\n #tf.maximum\n return tf.squeeze(x, [3]) # num_samples x num_features / p x num_filters",
"def forward(input, label, conv, maxpool, softmax):\n output = conv.forward((input / 255) - 0.5)\n output = maxpool.forward(output)\n output = softmax.forward(output)\n\n loss = -np.log(output[label])\n acc = 1 if np.argmax(output) == label else 0\n\n return output, loss, acc",
"def max_pool_backward_naive(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max pooling backward pass #\n ###########################################################################\n #Extract info from cache.\n x,pool_param = cache\n pool_width = pool_param['pool_width']\n pool_height = pool_param['pool_height']\n stride = pool_param['stride']\n N,C,H,W = x.shape\n\n #Start computing dx,same as forward pass loop with the correct stride over x.\n dx = np.zeros_like(x)\n for i in range(N):\n a = 0;b = 0\n for t in range(0,H - pool_height + 1,stride):\n for k in range(0,W - pool_width + 1,stride):\n #Go over all of the channels.\n for c in range(C):\n #Find max.\n max_index = np.argmax(x[i,c,t:(t + pool_height),k:(k + pool_width)])\n #Conver flat index.\n index = np.unravel_index(max_index,(pool_height,pool_width))\n dx[i,c,t + index[0],k + index[1]] += dout[i,c,a,b]\n if (b == dout.shape[3] - 1):\n a += 1\n b = 0\n else:\n b += 1\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx",
"def max_pool_backward_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx",
"def max_pool_backward_naive(dout, cache):\n x, pool_params = cache\n N, C, H, W = x.shape\n\n pool_height = pool_params['pool_height']\n pool_width = pool_params['pool_width']\n stride = pool_params['stride']\n\n Hc = (H - pool_height) / stride + 1\n Wc = (W - pool_width) / stride + 1\n\n dx = np.zeros(x.shape)\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n for i in xrange(N):\n for c in xrange(C):\n for hc in xrange(Hc):\n for wc in xrange(Wc):\n subx = x[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width]\n subdx = dx[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width]\n max_value = np.max(subx)\n \n subdx += (subx == max_value) * dout[i, c, hc, wc]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx",
"def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x",
"def forward(self, *inputs):\n\n x = self.relu1(self.maxpool1(self.conv1(*inputs)))\n x = self.relu2(self.maxpool2(self.conv2_drop(self.conv2(x))))\n x = x.view(x.size(0), -1)\n x = self.relu3(self.fc1(x))\n x = self.dropout2(x)\n x = self.fc2(x)\n return self.log_softmax(x)",
"def forward(self, input):\n x = self.emb(input)\n x = F.max_pool2d(x, kernel_size=x.shape[2:])\n x = x.view(x.shape[0:2])\n x = F.log_softmax(self.fc_final(x), dim=-1)\n return x",
"def forward(self, x):\n x = x.permute(0, 2, 1)\n if self.return_indices:\n x, indices = F.max_pool1d(x, self.kernel_size, return_indices=self.return_indices)\n else:\n x = F.max_pool1d(x)\n x = x.permute(0, 2, 1)\n\n if self.return_indices:\n output = x, indices\n else:\n output = x\n return output",
"def forward(self, inp: torch.Tensor) -> torch.Tensor:\n x = self.conv1(inp)\n x = self.maxpool(x)\n\n for i in range(self._num_layers):\n x = getattr(self, \"C%d\" % (i + 1))(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x",
"def forward(self, x, pool_size=(2, 2), pool_type=\"avg\"):\n\n x = F.relu_(self.norm1(self.conv1(x)))\n x = F.relu_(self.norm2(self.conv2(x)))\n if pool_type == \"max\":\n x = F.max_pool2d(x, kernel_size=pool_size)\n elif pool_type == \"avg\":\n x = F.avg_pool2d(x, kernel_size=pool_size)\n elif pool_type == \"avg+max\":\n x1 = F.avg_pool2d(x, kernel_size=pool_size)\n x2 = F.max_pool2d(x, kernel_size=pool_size)\n x = x1 + x2\n else:\n raise Exception(\"Incorrect pooling type!\")\n return x",
"def forward(self, inputData):\n weights = self.Weights\n biases = self.Biases\n poolParams = self.poolParams\n cache = [] #zmienna przechowujaca produkty warstw - pomocna do propagacji wstecznej\n #warstwa wejsciowa\n layer0 = np.asarray(inputData)\n cache.append(layer0)\n #pierwsza warstwa konwolucyjna\n layer1 = convolution_forward(np.asarray([layer0]),weights[0],biases[0])\n cache.append(layer1)\n #pierwsza warstwa max poolingu\n layer2 = maxpool_forward(layer1, poolParams[0][0], poolParams[0][1])\n cache.append(layer2)\n #druga warstwa konwolucyjna\n layer3 = convolution_forward(layer2,weights[1],biases[1])\n cache.append(layer3)\n #druga warstwa max poolingu\n layer4 = maxpool_forward(layer3, poolParams[1][0], poolParams[1][1])\n cache.append(layer4)\n #pierwsza warstwa fully connected zrealizowana jako warstwa konwolucyjna\n layer5 = convolution_forward( layer4,weights[2] ,biases[2] )\n cache.append(layer5)\n #druga warstwa fully connected z funkcja aktywacji typu ReLU\n layer6 = act.relu(np.dot(weights[3],layer5[:,0]).transpose() + biases[3]).transpose()\n cache.append(layer6)\n #softmax\n layer7 = np.dot( weights[4], layer6[:,0] ).transpose() + biases[4]\n layer7 -= np.max(layer7)\n layer7 = np.exp(layer7)/sum(np.exp(layer7))\n\n return (layer7, cache)",
"def conv_relu_pool_forward(x, w, b, conv_param, pool_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n s, relu_cache = relu_forward(a)\n out, pool_cache = max_pool_forward_fast(s, pool_param)\n cache = (conv_cache, relu_cache, pool_cache)\n return out, cache",
"def conv_relu_pool_forward(x, w, b, conv_param, pool_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n s, relu_cache = relu_forward(a)\n out, pool_cache = max_pool_forward_fast(s, pool_param)\n cache = (conv_cache, relu_cache, pool_cache)\n return out, cache",
"def max_pool(x,\n k_h,\n k_w,\n s_h,\n s_w,\n name,\n padding=\"VALID\"):\n with tf.name_scope(name):\n outputs = tf.nn.max_pool(x, [1, k_h, k_w, 1], [1, s_h, s_w, 1], padding)\n # Return layer's output\n return outputs",
"def forward(self, *inputs):\n\n x = functional.relu(functional.max_pool2d(self.conv1(*inputs), 2))\n x = functional.relu(functional.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = functional.relu(functional.max_pool2d(self.conv3(x), 2))\n x = x.view(x.size(0), -1)\n x = functional.relu(self.fc1(x))\n x = functional.dropout(x, training=self.training)\n x = self.fc2(x)\n return functional.log_softmax(x, dim=1)",
"def max_pool_backward_naive(dout, cache):\n\tdx = None\n\n\tx, pool_param = cache\n\tN,C,H,W = x.shape\n\tHH = pool_param['pool_height']\n\tWW = pool_param['pool_width']\n\tstride = pool_param['stride']\n\tHp = int(1 + (H-HH)/stride)\n\tWp = int(1 + (W-WW)/stride)\n\n\tdx = np.zeros_like(x)\n\n\tfor n in range(N):\n\t\tfor c in range(C):\n\t\t\tfor j in range(Hp):\n\t\t\t\tfor i in range(Wp):\n\t\t\t\t\tind = np.argmax(x[n,c,j*stride:j*stride+HH,i*stride:i*stride+WW])\n\t\t\t\t\tind1, ind2 = np.unravel_index(ind, (HH,WW))\n\t\t\t\t\tdx[n,c,j*stride:j*stride+HH,i*stride:i*stride+WW][ind1, ind2] = dout[n,c,j,i]\n\n\treturn dx",
"def max_pool_backward(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n x, pool_param = cache\n N, C, H, W = x.shape\n pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n stride = pool_param['stride']\n \n HH = 1 + (H - pool_height) // stride\n WW = 1 + (W - pool_width) // stride\n x_strides = x[0][0].strides\n strides = tuple(np.array(x_strides)*stride)\n \n dx = np.zeros(x.shape)\n \n for n in range(N):\n for c in range(C):\n for h in range(HH):\n for w in range(WW):\n h_start = stride * h\n h_end = h_start + pool_height\n\n w_start = stride * w\n w_end = w_start + pool_width\n\n # get the pool window in the input x\n pool_window = x[n, c, h_start:h_end, w_start:w_end]\n \n m = np.max(pool_window)\n dx_window = np.where(pool_window == m, 1, 0)\n \n dx[n, c, h_start:h_end, w_start:w_end] += dx_window * dout[n, c, h, w]\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx",
"def maxpool(input, filter_h, filter_w, stride_h, stride_w, padding, name):\n with tf.name_scope(name):\n mp = tf.nn.max_pool(input, ksize=[1, filter_h, filter_w, 1], strides=[1, stride_h, stride_w, 1],\n padding=padding)\n # print(name + \" : \", str(mp.shape))\n return mp",
"def feed_forward_net(net_def, inputs):\n\n inp = inputs.copy()\n for n in range(0, len(net_def['layers'])):\n\n outputs = process_layer(net_def['layers'][n], inp)\n\n inp = outputs.copy()\n\n # Index of largest value\n return np.argmax(inp)"
]
| [
"0.8176104",
"0.7866907",
"0.7854871",
"0.76688087",
"0.763123",
"0.75106704",
"0.7398416",
"0.73303866",
"0.7267503",
"0.7173863",
"0.7142943",
"0.7042369",
"0.70236975",
"0.7022948",
"0.696239",
"0.68989724",
"0.689837",
"0.68944407",
"0.6884486",
"0.6883826",
"0.688134",
"0.6863874",
"0.68104345",
"0.68104345",
"0.67569786",
"0.6748131",
"0.6733848",
"0.6714257",
"0.671138",
"0.67109793"
]
| 0.7984787 | 1 |
A naive implementation of the backward pass for a max pooling layer. | def max_pool_backward_naive(dout, cache):
dx = None
#############################################################################
# TODO: Implement the max pooling backward pass #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def max_pool_backward_naive(dout, cache):\n x, pool_params = cache\n N, C, H, W = x.shape\n\n pool_height = pool_params['pool_height']\n pool_width = pool_params['pool_width']\n stride = pool_params['stride']\n\n Hc = (H - pool_height) / stride + 1\n Wc = (W - pool_width) / stride + 1\n\n dx = np.zeros(x.shape)\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n for i in xrange(N):\n for c in xrange(C):\n for hc in xrange(Hc):\n for wc in xrange(Wc):\n subx = x[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width]\n subdx = dx[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width]\n max_value = np.max(subx)\n \n subdx += (subx == max_value) * dout[i, c, hc, wc]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx",
"def max_pool_backward_naive(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max pooling backward pass #\n ###########################################################################\n #Extract info from cache.\n x,pool_param = cache\n pool_width = pool_param['pool_width']\n pool_height = pool_param['pool_height']\n stride = pool_param['stride']\n N,C,H,W = x.shape\n\n #Start computing dx,same as forward pass loop with the correct stride over x.\n dx = np.zeros_like(x)\n for i in range(N):\n a = 0;b = 0\n for t in range(0,H - pool_height + 1,stride):\n for k in range(0,W - pool_width + 1,stride):\n #Go over all of the channels.\n for c in range(C):\n #Find max.\n max_index = np.argmax(x[i,c,t:(t + pool_height),k:(k + pool_width)])\n #Conver flat index.\n index = np.unravel_index(max_index,(pool_height,pool_width))\n dx[i,c,t + index[0],k + index[1]] += dout[i,c,a,b]\n if (b == dout.shape[3] - 1):\n a += 1\n b = 0\n else:\n b += 1\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx",
"def max_pool_backward_naive(dout, cache):\n\tdx = None\n\n\tx, pool_param = cache\n\tN,C,H,W = x.shape\n\tHH = pool_param['pool_height']\n\tWW = pool_param['pool_width']\n\tstride = pool_param['stride']\n\tHp = int(1 + (H-HH)/stride)\n\tWp = int(1 + (W-WW)/stride)\n\n\tdx = np.zeros_like(x)\n\n\tfor n in range(N):\n\t\tfor c in range(C):\n\t\t\tfor j in range(Hp):\n\t\t\t\tfor i in range(Wp):\n\t\t\t\t\tind = np.argmax(x[n,c,j*stride:j*stride+HH,i*stride:i*stride+WW])\n\t\t\t\t\tind1, ind2 = np.unravel_index(ind, (HH,WW))\n\t\t\t\t\tdx[n,c,j*stride:j*stride+HH,i*stride:i*stride+WW][ind1, ind2] = dout[n,c,j,i]\n\n\treturn dx",
"def max_pool_backward(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n x, pool_param = cache\n N, C, H, W = x.shape\n pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n stride = pool_param['stride']\n \n HH = 1 + (H - pool_height) // stride\n WW = 1 + (W - pool_width) // stride\n x_strides = x[0][0].strides\n strides = tuple(np.array(x_strides)*stride)\n \n dx = np.zeros(x.shape)\n \n for n in range(N):\n for c in range(C):\n for h in range(HH):\n for w in range(WW):\n h_start = stride * h\n h_end = h_start + pool_height\n\n w_start = stride * w\n w_end = w_start + pool_width\n\n # get the pool window in the input x\n pool_window = x[n, c, h_start:h_end, w_start:w_end]\n \n m = np.max(pool_window)\n dx_window = np.where(pool_window == m, 1, 0)\n \n dx[n, c, h_start:h_end, w_start:w_end] += dx_window * dout[n, c, h, w]\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx",
"def conv_relu_pool_backward_naive(dout, cache):\n\tconv_cache, relu_cache, pool_cache = cache\n\tds = max_pool_backward_naive(dout, pool_cache)\n\tda = relu_backward(ds, relu_cache)\n\tdx, dw, db = conv_backward_naive(da, conv_cache)\n\treturn dx, dw, db",
"def max_pool_backward(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n x, pool_param = cache\n \n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n H_prime = int(1 + (H - pool_height) / stride)\n W_prime = int(1 + (W - pool_width) / stride) #python 3 / is just float number division\n \n dx = np.zeros((N, C, H, W))\n \n for n in range(N):\n for c in range(C):\n for i in range(H_prime):\n for j in range(W_prime):\n h_start = i * stride\n h_end = h_start + pool_height\n w_start = j * stride\n w_end = w_start + pool_width\n pool_window = x[n, c, h_start:h_end, w_start:w_end]\n maxValue = np.max(pool_window)\n dx[n,c,h_start:h_end,w_start:w_end] += dout[n,c,i,j] * (pool_window == maxValue)\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx",
"def max_pool_backward(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n (x, pool_param) = cache\n (N, C, H, W) = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n HH = int(1 + (H - pool_height) / stride)\n WW = int(1 + (W - pool_width) / stride)\n\n dx = np.zeros_like(x)\n\n for n in range(N):\n for c in range(C):\n for h in range(HH):\n for w in range(WW):\n h1 = h * stride\n h2 = h1 + pool_height\n w1 = w * stride\n w2 = w1 + pool_width\n block = np.reshape(x[n, c, h1:h2, w1:w2], (pool_height*pool_width))\n mask = np.zeros_like(block)\n mask[np.argmax(block)] = 1\n dx[n,c,h1:h2,w1:w2] += np.reshape(mask,(pool_height,pool_width)) * dout[n,c,h,w]\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx",
"def conv_relu_pool_backward(dout, cache):\n conv_cache, relu_cache, pool_cache = cache\n ds = layers.max_pool_backward_naive(dout, pool_cache)\n da = layers.relu_backward(ds, relu_cache)\n dx, dw, db = layers.conv_backward_naive(da, conv_cache)\n return dx, dw, db",
"def max_pool_forward_naive(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max pooling forward pass #\n ###########################################################################\n N,C,H,W = x.shape\n pool_width = pool_param['pool_width']\n pool_height = pool_param['pool_height']\n stride = pool_param['stride']\n #Compute output size.\n out_width = int((W - pool_width) / stride + 1)\n out_height = int((H - pool_height) / stride + 1) \n out = np.zeros((N,C,out_height,out_width))\n #Naive implementation:Loop over each training example and max pool.(Naive===lots of FOR)\n for i in range(N):\n #Counters for output indices.\n a = 0;b = 0\n for t in range(0,H - pool_height + 1,stride):\n for k in range(0,W - pool_width + 1,stride):\n #Get max in each depth.\n for c in range(C):\n out[i,c,a,b] += np.max(x[i,c,t:(t + pool_height),k:(k + pool_width)])\n if (b == out_width - 1):\n a += 1\n b = 0\n else:\n b += 1\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache",
"def max_pool_forward_naive(x, pool_param):\n\tout = None\n\t\n\tN, C, H, W = x.shape\n\tHH = pool_param['pool_height']\n\tWW = pool_param['pool_width']\n\tstride = pool_param['stride']\n\tHp = int(1 + (H-HH)/stride)\n\tWp = int(1 + (W-WW)/stride)\n\n\tout = np.zeros((N,C,Hp,Wp))\n\n\tfor n in range(N):\n\t\tfor j in range(Hp):\n\t\t\tfor i in range(Wp):\n\t\t\t\tout[n,:,j,i] = np.amax(x[n,:,j*stride:j*stride+HH,i*stride:i*stride+WW], axis=(-1,-2))\n\n\tcache = (x, pool_param)\n\treturn out, cache",
"def max_pool_forward_naive(x, pool_param):\n out = None\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache",
"def conv_relu_pool_backward(dout, cache):\n conv_cache, relu_cache, pool_cache = cache\n ds = max_pool_backward_fast(dout, pool_cache)\n da = relu_backward(ds, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db",
"def conv_relu_pool_backward(dout, cache):\n conv_cache, relu_cache, pool_cache = cache\n ds = max_pool_backward_fast(dout, pool_cache)\n da = relu_backward(ds, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db",
"def max_pool_forward_naive(x, pool_param):\n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n\n Hc = (H - pool_height) / stride + 1\n Wc = (W - pool_width) / stride + 1\n out = np.random.randn(N, C, Hc, Wc)\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n for i in xrange(N):\n for c in xrange(C):\n for hc in xrange(Hc):\n for wc in xrange(Wc):\n out[i, c, hc, wc] = np.max(x[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width])\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache",
"def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)",
"def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n \n N, C, H, W = x.shape\n pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n stride = pool_param['stride']\n \n HH = 1 + (H - pool_height) // stride\n WW = 1 + (W - pool_width) // stride\n x_strides = x[0][0].strides\n strides = tuple(np.array(x_strides)*stride)\n \n out = np.zeros((N,C,HH,WW))\n \n for n in range(N):\n for c in range(C):\n out_shape = (HH,WW,pool_height,pool_width)\n pool_blocks = np.lib.stride_tricks.as_strided(x[n][c],out_shape,strides+x_strides)\n out[n][c] = np.max(pool_blocks, axis=(2,3))\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache",
"def pool_backward(dA, A_prev, kernel_shape, stride=(1, 1), mode='max'):\n m = dA.shape[0]\n h_new = dA.shape[1]\n w_new = dA.shape[2]\n c = dA.shape[3]\n h_prev = A_prev.shape[1]\n w_prev = A_prev.shape[2]\n kh = kernel_shape[0]\n kw = kernel_shape[1]\n # image_num = np.arange(m)\n sh = stride[0]\n sw = stride[1]\n func = {'max': np.max, 'avg': np.mean}\n\n dA_prev = np.zeros(shape=A_prev.shape)\n\n if mode in ['max', 'avg']:\n for img_num in range(m):\n for k in range(c):\n for i in range(h_new):\n for j in range(w_new):\n window = A_prev[\n img_num,\n i * sh: i * sh + kh,\n j * sw: j * sw + kw,\n k\n ]\n if mode == 'max':\n # maxpool returns the max\n # derivative of maxpool relative to the max is 1\n # derivative relative to any other element is 0\n # backpropagate 1 to the unit corresponding to max\n # backpropagate 0 for the other units\n # given these comments, define a mask of 1 and 0s\n mask = np.where(window == np.max(window), 1, 0)\n # print(mask)\n elif mode == 'avg':\n # define a mask weighted by the number of\n # elements in the pooling layer (kh * kw)\n mask = np.ones(shape=window.shape)\n mask /= (kh * kw)\n # print(mask)\n dA_prev[\n img_num,\n i * sh: i * sh + kh,\n j * sw: j * sw + kw,\n k\n ] += mask * dA[\n img_num,\n i,\n j,\n k\n ]\n return dA_prev",
"def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n H_prime = int(1 + (H - pool_height) / stride)\n W_prime = int(1 + (W - pool_width) / stride) #python 3 / is just float number division\n \n out = np.zeros((N,C,H_prime,W_prime))\n \n for n in range(N):\n for i in range(H_prime):\n for j in range(W_prime):\n h_start = i * stride\n h_end = h_start + pool_height\n w_start = j * stride\n w_end = w_start + pool_width\n pool_window = x[n, :, h_start:h_end, w_start:w_end]\n pool_window = pool_window.reshape((C,-1))\n out[n,:,i,j] = np.max(pool_window, axis=1)\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache",
"def backpropagation(self):\n\n print \"Backpropagation in pool layer\"\n deltasNext = self.__nextLayer.getDeltas()\n self.deltas = np.zeros(self.inputShape)\n\n\n # for para dar los valores del delta siguiente a los maximos\n idx = 0\n for n in range(self.inputShape[0]):\n for c in range(self.inputShape[1]):\n nh = 0\n for h in range(self.inputShape[2], self.inputShape[2] - self.kernelSize[0] + 1, self.stride[0]):\n nw = 0\n for w in range(self.inputShape[3], self.inputShape[3] - self.kernelSize[1] + 1, self.stride[1]):\n self.deltas[n, c, w + self.maxIdx[idx][0], h + self.maxIdx[idx][1]] = deltasNext[\n n, c,\n nh: nh + self.kernelSize[\n 0],\n nw:nw + self.kernelSize[\n 1]]\n idx += 1\n\n if self.__previousLayer is None:\n return self.deltas\n else:\n return self.__previousLayer.backpropagation()",
"def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n (N, C, H, W) = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n HH = int(1 + (H - pool_height) / stride)\n WW = int(1 + (W - pool_width) / stride)\n\n out = np.zeros((N, C, HH, WW))\n\n for n in range(N):\n for h in range(HH):\n for w in range(WW):\n h1 = h * stride\n h2 = h1 + pool_height\n w1 = w * stride\n w2 = w1 + pool_width\n block = x[n, :, h1:h2, w1:w2]\n out[n,:,h,w] = np.max(block.reshape((C, pool_height*pool_width)), axis=1)\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache",
"def backward(self, input_train, input_train_label):\n batchSize = len(input_train) #liczba obrazow podawanych na wejscie w trakcie jednej iteracji\n weights = self.Weights\n biases = self.Biases\n delta_W = self.delta_W\n delta_B = self.delta_B\n poolParams = self.poolParams\n dW_list = []\n dB_list = []\n dW4 = np.zeros(weights[4].shape)\n dB4 = np.zeros(biases[4].shape)\n dW3 = np.zeros(weights[3].shape)\n dB3 = np.zeros(biases[3].shape)\n dW2 = np.zeros(weights[2].shape)\n dB2 = np.zeros(biases[2].shape)\n dW1 = np.zeros(weights[1].shape)\n dB1 = np.zeros(biases[1].shape)\n dW0 = np.zeros(weights[0].shape)\n dB0 = np.zeros(biases[0].shape)\n loss = 0\n for image in range(batchSize):\n\n X_data = input_train[image]\n X_label = input_train_label[image]\n output_forward, cache = self.forward(X_data) \n loss += -1*sum(X_label - np.log(output_forward)) #obliczenie wartosci funkcji straty [cross entropy]\n\n #Propagacja wsteczna gradientu\n dy = -1*(X_label - output_forward)/2\n #print(\"X_label = {} \\t layer7 = {} \\t dy = {}\".format(X_label, output_forward, dy))\n\n [dy, dW, dB ] = fullycon_b(cache[6], np.asarray([dy]).transpose() , weights[4])\n dW4 += dW\n dB4 += dB.flatten() #wektoryzacja macierzy\n dy = act.relu_b(dy.transpose(), cache[6])\n\n [dy, dW, dB ] = fullycon_b(cache[5][:,0], dy, weights[3])\n dW3 += dW\n dB3 += dB.flatten()\n dy = act.relu_b(dy.transpose(), cache[5][:,0]) \n \n [dy, dW, dB ] = convolution_b(cache[4], dy, weights[2])\n dW2 += dW\n dB2 += dB.flatten()\n \n dy = maxpool_b(cache[3], dy)\n dy = act.relu_b(dy, cache[3])\n\n [dy, dW, dB ] = convolution_b(cache[2], dy, weights[1])\n dW1 += dW\n dB1 += dB.flatten()\n \n dy = maxpool_b(cache[1], dy)\n dy = act.relu_b(dy, cache[1]) \n\n [dy, dW, dB ] = convolution_b(np.asarray([cache[0]]), dy, weights[0])\n dW0 += dW\n dB0 += dB.flatten()\n\t\t\t\n dW_list.append(dW4)\n dB_list.append(dB4)\n dW_list.append(dW3)\n dB_list.append(dB3)\n dW_list.append(dW2)\n dB_list.append(dB2)\n dW_list.append(dW1)\n dB_list.append(dB1)\n dW_list.append(dW0)\n dB_list.append(dB0)\n dW_list = dW_list[::-1]\n dB_list = dB_list[::-1]\n \n #Aktualizacja parametrow kazdej z warstw (o ile takie posiada)\n #uczenie z metoda momentum: learning rate = const; alpha = const\n for x in range(len(dW_list)):\n delta_W[x] = alpha*delta_W[x] - eta*dW_list[x]/batchSize\n weights[x] += delta_W[x]\n delta_B[x] = alpha*delta_B[x] - eta*dB_list[x]/batchSize\n biases[x] += delta_B[x]\n #przypisanie nowych wag po aktualiacji wszystkich parametrow\n self.Weights = weights\n self.Biases = biases\n\n #zwrocenie stosunku wartosci f-cji straty do rozmiaru batch'u\n return loss/batchSize",
"def conv_relu_pool_forward_naive(x, w, b, conv_param, pool_param):\n\ta, conv_cache = conv_forward_naive(x, w, b, conv_param)\n\ts, relu_cache = relu_forward(a)\n\tout, pool_cache = max_pool_forward_naive(s, pool_param)\n\tcache = (conv_cache, relu_cache, pool_cache)\n\treturn out, cache",
"def pool_backward(dA, A_prev, kernel_shape, stride=(1, 1), mode='max'):\n sh, sw = stride\n kh, kw = kernel_shape\n m, h_prev, w_prev, c_prev = A_prev.shape\n dm, h_new, w_new, c_new = dA.shape\n dA_prev = np.zeros(A_prev.shape)\n for i in range(m):\n for j in range(h_new):\n for k in range(w_new):\n jsh = j * sh\n ksw = k * sw\n for ll in range(c_new):\n pool = A_prev[i, jsh: jsh + kh, ksw: ksw + kw, ll]\n if mode == 'max':\n maxp = np.amax(pool)\n mask = np.zeros(kernel_shape)\n np.place(mask, pool == maxp, 1)\n dA_prev[i, jsh: jsh + kh, ksw: ksw + kw, ll] += \\\n mask * dA[i, j, k, ll]\n else:\n mask = np.ones(kernel_shape)\n dA_prev[i, jsh: jsh + kh, ksw: ksw + kw, ll] += \\\n mask * dA[i, j, k, ll] / kh / kw\n return dA_prev",
"def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return",
"def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()",
"def _pool(prev_layer, layer_name):\n with tf.name_scope(layer_name):\n return tf.nn.max_pool(prev_layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')",
"def prop_max_pool(self, activation, relevance, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1]):\n act = tf.expand_dims(activation, 3) # N x M x F x 1\n z = tf.nn.max_pool(act, ksize, strides, padding='SAME') + self.epsilon\n with self.model.graph.as_default():\n rel = tf.expand_dims(relevance, 3)\n s = rel / z\n c = gen_nn_ops.max_pool_grad_v2(act, z, s, ksize, strides, padding='SAME')\n tmp = c * act\n return tf.squeeze(tmp, [3])",
"def backward(self, dout):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)-1,-1,-1):\n act_dout = self.activations[l].backward(dout)\n dout = self.layers[l].backward(act_dout)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return",
"def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads",
"def _poputil_block_recompute_backward(op, grads):\n return grads"
]
| [
"0.77614725",
"0.76357096",
"0.7491863",
"0.7489109",
"0.74684924",
"0.7454527",
"0.7403634",
"0.7400141",
"0.7351165",
"0.72067535",
"0.71149665",
"0.71029645",
"0.71029645",
"0.7050689",
"0.6985986",
"0.693516",
"0.6903203",
"0.6857985",
"0.6800385",
"0.6765994",
"0.67657655",
"0.6604956",
"0.65426624",
"0.65281963",
"0.6526524",
"0.6511189",
"0.6500846",
"0.64529574",
"0.641435",
"0.6409269"
]
| 0.80068 | 0 |
Computes the forward pass for spatial batch normalization. | def spatial_batchnorm_forward(x, gamma, beta, bn_param):
out, cache = None, None
#############################################################################
# TODO: Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should #
# be very short; ours is less than five lines. #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return out, cache | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spatial_batchnorm_forward(x, gamma, beta, bn_param):\n out, cache = None, None\n\n ###########################################################################\n # TODO: Implement the forward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should#\n # be very short; ours is less than five lines. #\n ###########################################################################\n reshaped = np.reshape(x,(-1,x.shape[1]))\n batch_norm,cache = batchnorm_forward(reshaped,gamma,beta,bn_param)\n out = np.reshape(batch_norm,x.shape)\n cache = (cache,x.shape)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return out, cache",
"def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n #######################################################################\n #Compute mean and variance of each element of the data.\n sample_mean = np.mean(x,axis = 0)\n sample_var = np.var(x,axis = 0)\n #Normalize\n x_normalized = (x - sample_mean) / (np.sqrt(sample_var + eps))\n #scale and shift.\n out = x_normalized * gamma + beta\n #Update running mean and variance.\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(sample_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n #Normalize with running mean and var.\n x_normalized = (x - running_mean) / (np.sqrt(running_var + eps))\n #scale and shift.\n out = gamma * x_normalized + beta\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(running_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache",
"def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n Xshape = x.shape\n\n if len(Xshape) > 2: #deal with 2d inputs\n N,C,H,W = x.shape\n x = np.swapaxes(x,1,3)\n D = C\n x = np.reshape(x,[N*H*W,C])\n else:\n N = x.shape[0]\n x = np.reshape(x,[N,-1])\n _, D = x.shape\n\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n mu = np.mean(x,axis=0)\n var = np.var(x, axis=0)\n x_norm = (x - mu)/np.sqrt(var + eps)\n out = gamma * x_norm + beta\n running_mean = momentum*running_mean + (1-momentum)*mu\n running_var = momentum*running_var + (1-momentum)*var\n cache = (x_norm, gamma, np.sqrt(var + eps))\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n x_norm = (x - running_mean)/np.sqrt(running_var + eps)\n out = gamma * x_norm + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n if len(Xshape) > 2:\n out = np.reshape(out,[N,W,H,C])\n out = np.swapaxes(out,1,3)\n else:\n out = np.reshape(out,Xshape)\n return out, cache",
"def spatial_batchnorm_forward(x, gamma, beta, bn_param):\n\tout, cache = None, None\n\n\tN, C, H, W = x.shape\n\ty = x.transpose(0,2,3,1).reshape((N*H*W,C))\n\tout, cache = batchnorm_forward(y, gamma, beta, bn_param)\n\tout = out.reshape((N,H,W,C)).transpose(0,3,1,2)\n\t###########################################################################\n\t# END OF YOUR CODE #\n\t###########################################################################\n\n\treturn out, cache",
"def forward(self, input):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n assert input.shape[1] == self.n_neurons, \"The shape of the input tensor is not correct.\"\n\n bn_fct = CustomBatchNormManualFunction()\n out = bn_fct.apply(input, self.gamma, self.beta, self.eps)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out",
"def forward_pass(self):\n # Compute the support set's mean and var and use these as the moments for\n # batch norm on the query set.\n train_embeddings = self.embedding_fn(self.episode.train_images,\n self.is_training)\n self.train_embeddings = train_embeddings['embeddings']\n support_set_moments = None\n if not self.transductive_batch_norm:\n support_set_moments = train_embeddings['moments']\n test_embeddings = self.embedding_fn(\n self.episode.test_images,\n self.is_training,\n moments=support_set_moments,\n backprop_through_moments=self.backprop_through_moments)\n self.test_embeddings = test_embeddings['embeddings']",
"def forward(ctx, input, gamma, beta, eps=1e-5):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n ####### Forward pass of batch normalization ######\n\n # In this section, we have to perform the forward pass of batch normalization\n # with more intermediate steps, since we want to propagate error terms. \n # To illustrate it better, we began from the bottom and follow our way to the top.\n # In that way, we unfolded every function step by step.\n\n # Step 3.2.3: Calculate variance\n var = input.var(dim=0, unbiased=False)\n\n # Step 3.2.2: add eps for numerical stability, then sqrt\n sqrt_var = torch.sqrt(var + eps)\n\n # Step 3.2: ivert sqrtwar\n inv_sqrt_var = 1./sqrt_var\n\n # Step 3.1.1: Calculate mean\n mean = input.mean(dim=0)\n\n # Step 3.1: subtract mean vector of every trainings example\n input_mean = input - mean\n\n # Step 3 - Execute normalization\n input_norm = input_mean * inv_sqrt_var \n\n # Step 2: Nor the two transformation steps\n scaled_input_norm = gamma * input_norm\n\n # Step 1: scale and shift\n out = scaled_input_norm + beta\n #################################################\n # store tensors and non-tensorial constants\n ctx.save_for_backward(gamma, inv_sqrt_var, mean, input)\n ctx.foo = eps\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out",
"def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n \n sample_mean = np.mean(x, axis=0)\n sample_variance = np.var(x, axis=0)\n \n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_variance\n \n num = x - sample_mean\n denom = np.sqrt(sample_variance + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n \n cache = (gamma, x_hat, num, denom, eps, sample_variance)\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n num = x - running_mean\n denom = np.sqrt(running_var + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache",
"def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n \n mu = np.mean(x, axis=0)\n var = np.var(x, axis=0)\n sigma = np.sqrt(var+eps)\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n out = gamma * (x - mu)/sigma + beta\n #out = (x - mu)/sigma\n #out = out * gamma.T + beta.T\n #print(gamma.shape)\n #out = out * gamma + beta\n #print(out.shape)\n \n running_mean = momentum * running_mean + (1 - momentum) * mu\n running_var = momentum * running_var + (1 - momentum) * (var+eps)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n out = (x - running_mean) / np.sqrt(running_var) * gamma + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n cache = (x, mu, sigma, gamma, beta)\n return out, cache",
"def forward(self, x, mask):\n \"Pass the input (and mask) through each layer in turn\"\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)",
"def batchnorm_forward(x, gamma, beta, bn_param):\n\tmode = bn_param['mode']\n\teps = bn_param.get('eps', 1e-5)\n\tmomentum = bn_param.get('momentum', 0.9)\n\n\tN, D = x.shape\n\trunning_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n\trunning_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n\tout, cache = None, None\n\tif mode == 'train':\n\t\t# normalize data\n\t\tmu = np.mean(x, axis=0)\n\t\tvar = np.var(x, axis=0)\n\t\tnormalized = (x-mu)/np.sqrt(var+eps)\n\t\tout = gamma*normalized + beta\n\t\t# Update running mean and variance\n\t\trunning_mean = momentum*running_mean + (1 - momentum)*mu\n\t\trunning_var = momentum*running_var + (1 - momentum)*var\n\t\t# Cache for backwards pass\n\t\tcache = (x, normalized, gamma, beta, mu, var, eps)\n\telif mode == 'test':\n\t\t# normalize data using running mean and variance from training\n\t\tnormalized = (x - running_mean)/np.sqrt(running_var+eps)\n\t\tout = gamma*normalized + beta\n\telse:\n\t\traise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n\t# Store the updated running means back into bn_param\n\tbn_param['running_mean'] = running_mean\n\tbn_param['running_var'] = running_var\n\n\treturn out, cache",
"def forward(self, images):\n # assuming that the precomputed features are not already l2-normalized\n x = l2norm(images.view( images.size(0), -1))\n #print(images.shape, self.fc )\n x = self.relu(self.fc1(x))\n x = self.fc2(x)\n #x = F.log_softmax(x) #no need of log softmax here if we use cross entropy as loss\n #x = self.softmax(x)\n # normalize in the joint embedding space\n \n\n return x",
"def forward(self, x, mask):\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)",
"def batch_norm(x: tf.Tensor) -> tf.Tensor:\n return slim.batch_norm(x, activation_fn=tf.nn.relu, scope='postnorm')",
"def forward(self, X):\r\n N = X.size()[0]\r\n assert X.size() == (N, 3, 448, 448)\r\n X = self.features(X)\r\n assert X.size() == (N, 512, 28, 28)\r\n X = X.view(N, 512, 28**2)\r\n X = torch.bmm(X, torch.transpose(X, 1, 2)) / (28**2) # Bilinear\r\n assert X.size() == (N, 512, 512)\r\n X = X.view(N, 512**2)\r\n X = torch.sign(X)*torch.sqrt(torch.abs(X)+1e-12)\r\n # X = torch.sqrt(X + 1e-5)\r\n X = torch.nn.functional.normalize(X)\r\n X = self.fc(X)\r\n assert X.size() == (N, 11)\r\n return X",
"def forward_train(self, feat_T: torch.Tensor) -> torch.Tensor:\n feat_T = feat_T.view(feat_T.size(0), -1)\n return self.norm_fn(feat_T)",
"def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n sample_mean = np.mean(x, axis = 0)\n sample_var = np.var(x , axis = 0)\n x_hat = (x - sample_mean) / (np.sqrt(sample_var + eps))\n out = gamma * x_hat + beta\n cache = (gamma, x, sample_mean, sample_var, eps, x_hat)\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n elif mode == 'test':\n scale = gamma / (np.sqrt(running_var + eps))\n out = x * scale + (beta - running_mean * scale)\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache",
"def forward(self, X):\n N = X.size()[0]\n assert X.size() == (N, 3, 448, 448)\n X = self.features(X)\n assert X.size() == (N, 512, 28, 28)\n X = X.view(N, 512, 28**2)\n X = torch.bmm(X, torch.transpose(X, 1, 2)) / (28**2) # Bilinear\n assert X.size() == (N, 512, 512)\n X = X.view(N, 512**2)\n X = torch.sqrt(X + 1e-5)\n X = torch.nn.functional.normalize(X)\n X = self.fc(X)\n assert X.size() == (N, 36)\n return X",
"def __call__(self, x, is_training=True):\n return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon,\n center=True, scale=True, is_training=is_training, scope=self.name)",
"def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n #############################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return dx, dgamma, dbeta",
"def spatial_groupnorm_forward(x, gamma, beta, G, gn_param):\n out, cache = None, None\n eps = gn_param.get('eps',1e-5)\n ###########################################################################\n # TODO: Implement the forward pass for spatial group normalization. #\n # This will be extremely similar to the layer norm implementation. #\n # In particular, think about how you could transform the matrix so that #\n # the bulk of the code is similar to both train-time batch normalization #\n # and layer normalization! # \n ###########################################################################\n pass\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return out, cache",
"def batch_norm(self, inputs):\n x = inputs\n x = self.bn(x)\n return x",
"def forward(self, x):\n\n x = F.max_pool2d(F.relu(self.batch_norm1(self.conv1(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm2(self.conv2(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm3_b(self.conv3_b(F.relu(self.batch_norm3_a(self.conv3_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm4_b(self.conv4_b(F.relu(self.batch_norm4_a(self.conv4_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm5_b(self.conv5_b(F.relu(self.batch_norm5_a(self.conv5_a(x)))))), 3, stride=2, padding=1)\n x = self.avg_pool(x).view(-1,512)\n out = self.linear(x)\n\n return out",
"def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n ###########################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should#\n # be very short; ours is less than five lines. #\n ###########################################################################\n s_cache,shape_x = cache\n reshaped_dout = np.reshape(dout,(-1,dout.shape[1]))\n dx_reshaped,dgamma,dbeta = batchnorm_backward_alt(reshaped_dout,s_cache)\n dx = np.reshape(dx_reshaped,shape_x)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta",
"def forward(self, batch: torch.Tensor) -> torch.Tensor:\n x = self.conv1(batch)\n x = self.bn1(x)\n x = self.prelu(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.bn2(x)\n x = self.dropout(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n x = self.features(x)\n\n if self.remove_bad_faces:\n # Remove bad quality faces, setting them to NaN\n x[torch.norm(x, dim=1) < self.magnitude_threshold] = float(\"nan\")\n\n return x",
"def forward(self, images):\n # assuming that the precomputed features are not already l2-normalized\n images = l2norm(images.view( images.size(0), -1))\n #print(images.shape, self.fc )\n features = self.fc(images)\n\n # normalize in the joint embedding space\n if not self.no_imgnorm:\n features = l2norm(features)\n\n # take the absolute value of embedding (used in order embeddings)\n if self.use_abs:\n features = torch.abs(features)\n\n return features",
"def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.compat.v1.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)",
"def forward_features(self, x):\n x_size = (x.shape[2], x.shape[3])\n x = self.patch_embed(x)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for layer in self.layers:\n x = layer(x, x_size)\n\n x = self.norm(x) # B L C\n x = self.patch_unembed(x, x_size)\n\n return x",
"def compute_forward(self, batch, stage):\n batch = batch.to(self.device)\n wavs, lens = batch.sig\n\n # Feature extraction and normalization\n feats = self.modules.compute_features(wavs)\n feats = self.modules.mean_var_norm(feats, lens)\n\n # Embeddings + speaker classifier\n embeddings = self.modules.embedding_model(feats)\n outputs = self.modules.classifier(embeddings)\n\n return outputs",
"def _create_batchnorm(cls, onnx_node, inputs, opset_version):\n x = inputs[0]\n factor = onnx_node.getattr('momentum', 0.9)\n if x.device.id() == -1:\n handle = singa.BatchNormHandle(factor, x.data)\n else:\n handle = singa.CudnnBatchNormHandle(factor, x.data)\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return handle, forward"
]
| [
"0.7394062",
"0.7037609",
"0.7005051",
"0.7002125",
"0.6987876",
"0.69365174",
"0.691629",
"0.68777937",
"0.6861998",
"0.6805032",
"0.68004656",
"0.67962664",
"0.678273",
"0.67058945",
"0.66437334",
"0.6576711",
"0.6567905",
"0.6561053",
"0.65598494",
"0.64757097",
"0.64390975",
"0.642709",
"0.64028186",
"0.6392953",
"0.63618076",
"0.6335805",
"0.6293106",
"0.6277744",
"0.62701005",
"0.6267411"
]
| 0.7471391 | 0 |
Computes the backward pass for spatial batch normalization. | def spatial_batchnorm_backward(dout, cache):
dx, dgamma, dbeta = None, None, None
#############################################################################
# TODO: Implement the backward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should #
# be very short; ours is less than five lines. #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dgamma, dbeta | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n ###########################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should#\n # be very short; ours is less than five lines. #\n ###########################################################################\n s_cache,shape_x = cache\n reshaped_dout = np.reshape(dout,(-1,dout.shape[1]))\n dx_reshaped,dgamma,dbeta = batchnorm_backward_alt(reshaped_dout,s_cache)\n dx = np.reshape(dx_reshaped,shape_x)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta",
"def spatial_batchnorm_backward(dout, cache):\r\n \tN, C, H, W = dout.shape\r\n dout_new = dout.transpose(0, 2, 3, 1).reshape(N*H*W, C)\r\n dx, dgamma, dbeta = batchnorm_backward(dout_new, cache)\r\n dx = dx.reshape(N, H, W, C).transpose(0, 3, 1, 2)\r\n\r\n return dx, dgamma, dbeta",
"def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n ###########################################################################\n #Extract x mean variance gamma and beta from cache.\n x_norm,inv_var,gamma = cache\n N = x_norm.shape[0]\n #Compute gradients of gamma and beta first,these are the simplest.\n dgamma = np.sum(dout * (x_norm),axis = 0)\n dbeta = np.sum(dout,axis = 0)\n #Now run backprop to compute dx.\n dx_normalized = gamma * dout\n #Move another step backward in graph towards x-E[x],there are 2 cases in this backward pass\n #TODO-Write this shorter,now the code is trivial.\n '''NEW code'''\n #Compute derivate of mean from norm.\n sub_mean = x_norm / inv_var\n derived_mean = np.zeros_like(inv_var)\n derived_mean += -1 * np.sum(dx_normalized * inv_var,axis = 0)\n derived_var = np.sum((-0.5 * (sub_mean * np.power(inv_var,3))) * dx_normalized,axis = 0)\n derived_mean += (-2 / N) * np.sum(sub_mean,axis = 0)\n #End of computing the differentiation of the mean.\n final_dx = np.zeros_like(x_norm)\n #Derivation of x_norm by x.\n final_dx += dx_normalized * inv_var\n #mean by x.\n final_dx += derived_mean * (1 / N)\n final_dx += derived_var * (2 / N) * sub_mean\n '''End of experiment code.'''\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return final_dx, dgamma, dbeta",
"def spatial_batchnorm_backward(dout, cache):\n\tdx, dgamma, dbeta = None, None, None\n\n\tN, C, H, W = dout.shape\n\ty = dout.transpose(0,2,3,1).reshape((N*H*W,C))\n\tdx, dgamma, dbeta = batchnorm_backward(y, cache)\n\tdx = dx.reshape((N,H,W,C)).transpose(0,3,1,2)\n\n\treturn dx, dgamma, dbeta",
"def forward_backward(self, data_batch):\n self.forward(data_batch, is_train=True)\n self.backward()\n if self.use_l2norm_grad_clip:\n # 2-Norm Grad Clip\n self.l2norm_grad_clip()",
"def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n #######################################################################\n #Compute mean and variance of each element of the data.\n sample_mean = np.mean(x,axis = 0)\n sample_var = np.var(x,axis = 0)\n #Normalize\n x_normalized = (x - sample_mean) / (np.sqrt(sample_var + eps))\n #scale and shift.\n out = x_normalized * gamma + beta\n #Update running mean and variance.\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(sample_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n #Normalize with running mean and var.\n x_normalized = (x - running_mean) / (np.sqrt(running_var + eps))\n #scale and shift.\n out = gamma * x_normalized + beta\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(running_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache",
"def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n \n Dshape = dout.shape\n x_norm, gamma, sigma = cache\n\n if len(Dshape) > 2: #deal with 2d inputs\n N,C,H,W = dout.shape\n dout = np.swapaxes(dout,1,3)\n D = C\n dout = np.reshape(dout,[N*H*W,C])\n else:\n dout = np.reshape(dout,[dout.shape[0],-1])\n N, D = x_norm.shape\n\n dgamma = np.sum(dout * x_norm, axis=0)\n dbeta = np.sum(dout, axis=0)\n dx = 1/N*(gamma/sigma)*(N*dout - dbeta - x_norm*dgamma)\n\n if len(Dshape) > 2:\n dx = np.reshape(dx,[N,W,H,C])\n dx = np.swapaxes(dx,1,3)\n else:\n dx = np.reshape(dx,Dshape)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta",
"def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n x, mu, sigma, gamma, beta = cache\n N = dout.shape[0]\n X_mu = x - mu\n var_inv = 1./sigma\n \n dX_norm = dout * gamma\n dvar = np.sum(dX_norm * X_mu,axis=0) * -0.5 * sigma**(-3)\n dmu = np.sum(dX_norm * -var_inv ,axis=0) + dvar * 1/N * np.sum(-2.* X_mu, axis=0)\n\n dx = (dX_norm * var_inv) + (dmu / N) + (dvar * 2/N * X_mu)\n dbeta = np.sum(dout, axis=0)\n dgamma = np.sum(dout * X_mu/sigma, axis=0)\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta",
"def affine_batchnorm_relu_backward(dout, cache):\n fc_cache, norm_cache, relu_cache = cache\n d_norm_out = relu_backward(dout, relu_cache)\n d_affine_out, dgamma, dbeta = batchnorm_backward_alt(d_norm_out, norm_cache)\n dx, dw, db = affine_backward(d_affine_out, fc_cache)\n return dx, dw, db, dgamma, dbeta",
"def affine_batchnorm_relu_backward(dout, cache):\n af_cache, bf_cache, relu_cache = cache\n \n dbf_out = relu_backward(dout, relu_cache)\n daf_out, dgamma, dbeta = batchnorm_backward(dbf_out, bf_cache)\n dx, dw, db = affine_backward(daf_out, af_cache)\n return dx, dw, db, dgamma, dbeta",
"def batchnorm_backward_alt(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # #\n # After computing the gradient with respect to the centered inputs, you #\n # should be able to compute gradients with respect to the inputs in a #\n # single statement; our implementation fits on a single 80-character line.#\n ###########################################################################\n N = dout.shape[0]\n x_norm,inv_var,gamma = cache\n dgamma = np.sum(dout * x_norm,axis = 0)\n dbeta = np.sum(dout,axis = 0)\n #Simplified calculation of dx.\n dx_normalized = dout * gamma\n dx = (1 / N) * inv_var * (N * dx_normalized - np.sum(dx_normalized,axis = 0) \\\n - x_norm * np.sum(dx_normalized * x_norm,axis = 0)) \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta",
"def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n gamma, x_hat, num, denom, eps, sample_variance = cache\n N, D = dout.shape\n \n dbeta = np.sum(dout, axis=0)\n dyx_hat = dout\n dgamma = np.sum(dyx_hat*x_hat, axis=0)\n dx_hat = gamma*dyx_hat\n ddenom = np.sum(num*dx_hat, axis=0)\n dmu1 = (1/denom)*dx_hat\n dsqvar = ddenom*(-1)*(1/(denom**2))\n dvar = 0.5*((sample_variance+eps)**(-0.5))*dsqvar\n dsq = (1/N)*np.ones((N,D))*dvar\n dmu2 = 2*num*dsq\n dmu = (-1)*np.sum(dmu1+dmu2, axis=0)\n dx1 = dmu1 + dmu2\n dx2 = (1/N)*np.ones((N,D))*dmu\n dx = dx1+dx2\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta",
"def batchnorm_backward(dout, cache):\n\tdx, dgamma, dbeta = None, None, None\n\tx, normalized, gamma, beta, mu, var, eps = cache\n\tN,D = dout.shape\n\n\tdx_norm = dout * gamma\n\t\n\tdx = (1. / N) * (1/np.sqrt(var + eps)) * (N*dx_norm - np.sum(dx_norm, axis=0) - normalized*np.sum(dx_norm*normalized, axis=0))\n\t\n\tdgamma = (dout * normalized).sum(axis = 0)\n\tdbeta = dout.sum(axis = 0)\n\treturn dx, dgamma, dbeta",
"def spatial_batchnorm_forward(x, gamma, beta, bn_param):\n out, cache = None, None\n\n #############################################################################\n # TODO: Implement the forward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return out, cache",
"def batchnorm_backward_alt(dout, cache):\n dx, dgamma, dbeta = None, None, None\n #############################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # #\n # After computing the gradient with respect to the centered inputs, you #\n # should be able to compute gradients with respect to the inputs in a #\n # single statement; our implementation fits on a single 80-character line. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n return dx, dgamma, dbeta",
"def _backward(self, w=None):\n grad = self.w # Should be I * self.w . We keep a vector for simplicity\n\n # Left multiply input `w` with normalizer gradient\n return w * grad if w is not None else grad",
"def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n Xshape = x.shape\n\n if len(Xshape) > 2: #deal with 2d inputs\n N,C,H,W = x.shape\n x = np.swapaxes(x,1,3)\n D = C\n x = np.reshape(x,[N*H*W,C])\n else:\n N = x.shape[0]\n x = np.reshape(x,[N,-1])\n _, D = x.shape\n\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n mu = np.mean(x,axis=0)\n var = np.var(x, axis=0)\n x_norm = (x - mu)/np.sqrt(var + eps)\n out = gamma * x_norm + beta\n running_mean = momentum*running_mean + (1-momentum)*mu\n running_var = momentum*running_var + (1-momentum)*var\n cache = (x_norm, gamma, np.sqrt(var + eps))\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n x_norm = (x - running_mean)/np.sqrt(running_var + eps)\n out = gamma * x_norm + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n if len(Xshape) > 2:\n out = np.reshape(out,[N,W,H,C])\n out = np.swapaxes(out,1,3)\n else:\n out = np.reshape(out,Xshape)\n return out, cache",
"def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)",
"def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n \n sample_mean = np.mean(x, axis=0)\n sample_variance = np.var(x, axis=0)\n \n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_variance\n \n num = x - sample_mean\n denom = np.sqrt(sample_variance + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n \n cache = (gamma, x_hat, num, denom, eps, sample_variance)\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n num = x - running_mean\n denom = np.sqrt(running_var + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache",
"def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n \n mu = np.mean(x, axis=0)\n var = np.var(x, axis=0)\n sigma = np.sqrt(var+eps)\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n out = gamma * (x - mu)/sigma + beta\n #out = (x - mu)/sigma\n #out = out * gamma.T + beta.T\n #print(gamma.shape)\n #out = out * gamma + beta\n #print(out.shape)\n \n running_mean = momentum * running_mean + (1 - momentum) * mu\n running_var = momentum * running_var + (1 - momentum) * (var+eps)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n out = (x - running_mean) / np.sqrt(running_var) * gamma + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n cache = (x, mu, sigma, gamma, beta)\n return out, cache",
"def batchnorm_backward(dout, cache):\r\n dx, dgamma, dbeta = None, None, None\r\n\r\n x, xc, var, std, xn, gamma, eps = cache\r\n N = x.shape[0]\r\n\r\n dbeta = np.sum(dout, axis=0)\r\n dgamma = np.sum(dout * xn, axis=0)\r\n dxn = dout * gamma\r\n\r\n dxc = dxn / std\r\n dstd = np.sum(-(xc * dxn) / (std * std), axis=0)\r\n dvar = 0.5 * dstd / std\r\n\r\n dxc += (2.0 / N) * xc * dvar\r\n dmu = -np.sum(dxc, axis=0)\r\n dx = dxc + dmu / N\r\n\r\n return dx, dgamma, dbeta",
"def backward(self, loss):\n global_timer.my_timer.start_profile(\"BWD\")\n mgr = PatrickStarManager()\n mgr.set_training_stage(TrainingStage.BWD)\n\n for param_fp16 in self.client.chunk_based_param_fp16:\n param_fp16.ps_attr.bwd_used_cnt = 0\n\n self.optimizer.zero_grad()\n if self.loss_scaler:\n self.loss_scaler.backward(loss)\n else:\n loss.backward()\n mgr.update_margin_mem()\n global_timer.my_timer.finish_profile(\"BWD\")",
"def spatial_batchnorm_forward(x, gamma, beta, bn_param):\n out, cache = None, None\n\n ###########################################################################\n # TODO: Implement the forward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should#\n # be very short; ours is less than five lines. #\n ###########################################################################\n reshaped = np.reshape(x,(-1,x.shape[1]))\n batch_norm,cache = batchnorm_forward(reshaped,gamma,beta,bn_param)\n out = np.reshape(batch_norm,x.shape)\n cache = (cache,x.shape)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return out, cache",
"def spatial_groupnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n ###########################################################################\n # TODO: Implement the backward pass for spatial group normalization. #\n # This will be extremely similar to the layer norm implementation. #\n ###########################################################################\n pass\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dgamma, dbeta",
"def conv_bn_relu_backward(dout, cache):\n conv_cache, sbn_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dan, dgamma, dbeta = spatial_batchnorm_backward(da, sbn_cache)\n dx, dw, db = conv_backward_fast(dan, conv_cache)\n return dx, dw, db, dgamma, dbeta",
"def _backward(loss):\n\n loss.backward()",
"def forward(ctx, input, gamma, beta, eps=1e-5):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n ####### Forward pass of batch normalization ######\n\n # In this section, we have to perform the forward pass of batch normalization\n # with more intermediate steps, since we want to propagate error terms. \n # To illustrate it better, we began from the bottom and follow our way to the top.\n # In that way, we unfolded every function step by step.\n\n # Step 3.2.3: Calculate variance\n var = input.var(dim=0, unbiased=False)\n\n # Step 3.2.2: add eps for numerical stability, then sqrt\n sqrt_var = torch.sqrt(var + eps)\n\n # Step 3.2: ivert sqrtwar\n inv_sqrt_var = 1./sqrt_var\n\n # Step 3.1.1: Calculate mean\n mean = input.mean(dim=0)\n\n # Step 3.1: subtract mean vector of every trainings example\n input_mean = input - mean\n\n # Step 3 - Execute normalization\n input_norm = input_mean * inv_sqrt_var \n\n # Step 2: Nor the two transformation steps\n scaled_input_norm = gamma * input_norm\n\n # Step 1: scale and shift\n out = scaled_input_norm + beta\n #################################################\n # store tensors and non-tensorial constants\n ctx.save_for_backward(gamma, inv_sqrt_var, mean, input)\n ctx.foo = eps\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out",
"def backward(self):\n self.loss_similarity = [LNCC(warped_img, self.batch_fixed, self.corr_kernel) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [LNCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:], self.corr_kernel) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha",
"def backward(ctx, grad_output):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n # Retrieve saved tensors and constants\n gamma, ivar, mean, input = ctx.saved_tensors\n eps = ctx.saved_tensors\n\n # Check which inputs need gradients\n input_needs_grad, gamma_needs_grad, beta_needs_grad = ctx.needs_input_grad\n\n # Get the batch size (=N)\n N, _ = grad_output.shape\n\n # reconstruct the input_norm\n input_norm = (input - mean) * ivar\n grand_input_norm = grad_output * gamma\n\n ##### Gradient wrt beta #####\n grad_beta = grad_output.sum(dim=0) if beta_needs_grad else None\n\n #### Gradient wrt gamma ####\n grad_gamma = (input_norm*grad_output).sum(dim=0) if gamma_needs_grad else None\n \n #### Gradient wrt input ####\n term1 = N*grand_input_norm \n term2 = torch.sum(grand_input_norm, dim=0)\n term3 = input_norm*torch.sum(grand_input_norm*input_norm, dim=0)\n grad_input = (1. / N) * ivar * (term1 - term2 - term3) if input_needs_grad else None\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n # return gradients of the three tensor inputs and None for the constant eps\n return grad_input, grad_gamma, grad_beta, None",
"def backward(ctx, grad_output):\n batch_size, n_dim = grad_output.shape\n sign_z, = ctx.saved_tensors\n device = grad_output.device\n S = sign_z != 0\n S[:, 0] = True\n sign_z[:, 0] = 0\n # XXX do clever computations\n L = torch.triu(torch.ones((n_dim, n_dim), dtype=torch.float64,\n device=device))\n\n grad_x, grad_lbda = [], []\n for i in range(batch_size):\n L_S = L[:, S[i]] # n_dim x |S|\n grad_u = grad_output[i].matmul(L_S) # 1 x |S|\n H_S = torch.inverse(L_S.t().matmul(L_S))\n grad_x.append(grad_u.matmul(H_S.matmul(L_S.t())))\n grad_lbda.append(grad_u.matmul(H_S.matmul(-sign_z[i][S[i]])))\n grad_x = torch.stack(grad_x)\n grad_lbda = torch.stack(grad_lbda)\n return (grad_x, grad_lbda)"
]
| [
"0.75364095",
"0.73951644",
"0.73047554",
"0.72159046",
"0.71643806",
"0.7082641",
"0.6987794",
"0.69790816",
"0.69370174",
"0.693398",
"0.6933647",
"0.6910771",
"0.6896396",
"0.6881379",
"0.6833048",
"0.6805785",
"0.68022525",
"0.6769649",
"0.6757795",
"0.6740955",
"0.6728636",
"0.6708606",
"0.66351277",
"0.66144127",
"0.65414304",
"0.6484158",
"0.6464822",
"0.6462691",
"0.644929",
"0.64155024"
]
| 0.78398657 | 0 |
Submit a cluster of 5 scheduler universe jobs with equal priority, and wait until they finish running. | def submit_equal_priority_jobs(default_condor, path_to_sleep):
cluster = default_condor.submit(
{
"executable": path_to_sleep,
"arguments": "1",
"universe": "scheduler",
"log": "scheduler_priority-equal.log",
},
count=NUM_JOBS,
)
cluster.wait(condition=ClusterState.all_terminal)
return cluster | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def submitJobs(numOfScript):\n submit = False\n\n for i in range(numOfScript):\n if submit == False:\n numOfRun = int(os.popen('apstat | grep wchen | wc -l').read())\n maxJobs = int(open('runJobs.conf').read())\n jobToSubmit = maxJobs - numOfRun\n print 'job to submit', jobToSubmit\n submit = True\n if submit == True:\n if jobToSubmit != 0 :\n print 'Job', i, 'submitted'\n os.system('qsub sat.pbs -v n='+str(i+1))\n jobToSubmit = jobToSubmit - 1\n else :\n submit = False\n time.sleep(10)",
"def _runJobs (self):\n\t\t# submit jobs\n\t\tdef sworker (q):\n\t\t\t\"\"\"\n\t\t\tThe worker to run jobs\n\t\t\t\"\"\"\n\t\t\twhile True:\n\t\t\t\t(run, i) = q.get()\n\t\t\t\tsleep (i)\n\t\t\t\tif run.isRunning():\n\t\t\t\t\tself.log (\"Job #%s is already running, skip submitting.\" % run.job.index, 'info')\n\t\t\t\telse:\n\t\t\t\t\trun.submit()\n\t\t\t\trun.wait() \n\t\t\t\trun.finish()\n\t\t\t\tq.task_done()\n\t\t\n\t\trunner = proc.RUNNERS[self.runner]\n\t\tmaxsubmit = self.forks\n\t\tif hasattr(runner, 'maxsubmit'): \n\t\t\tmaxsubmit = runner.maxsubmit\n\t\tinterval = .1\n\t\tif hasattr(runner, 'interval'): \n\t\t\tinterval = runner.interval\n\t\t\n\t\tsq = Queue()\n\t\tfor i in self.ncjobids:\n\t\t\trjob = runner (self.jobs[i])\n\t\t\ttm = int(i/maxsubmit) * interval\n\t\t\tsq.put ((rjob, tm))\n\n\t\t# submit jobs\n\t\tnojobs2submit = min (self.forks, len(self.ncjobids))\n\t\tfor i in range (nojobs2submit):\n\t\t\tt = threading.Thread(target = sworker, args = (sq, ))\n\t\t\tt.daemon = True\n\t\t\tt.start ()\n\t\t\n\t\tsq.join()",
"def submit_unequal_priority_jobs(default_condor, path_to_sleep):\n cluster = default_condor.submit(\n {\n \"executable\": path_to_sleep,\n \"arguments\": \"1\",\n \"universe\": \"scheduler\",\n \"log\": \"scheduler_priority-unequal.log\",\n \"priority\": \"$(process)\",\n },\n count=NUM_JOBS,\n )\n cluster.wait(condition=ClusterState.all_terminal)\n return cluster",
"def run_jobs(**kwargs): # pylint: disable=W0613\n\n root_nodes, job_instances_map = build_graph(ctx.nodes)\n monitor = Monitor(job_instances_map, ctx.logger)\n\n # Execution of first job instances\n tasks_list = []\n for root in root_nodes:\n tasks_list += root.queue_all_instances()\n monitor.add_node(root)\n wait_tasks_to_finish(tasks_list)\n\n # Monitoring and next executions loop\n while monitor.is_something_executing() and not api.has_cancel_request():\n # Monitor the infrastructure\n monitor.update_status()\n exec_nodes_finished = []\n new_exec_nodes = []\n for node_name, exec_node in monitor.get_executions_iterator():\n if exec_node.check_status():\n if exec_node.completed:\n exec_node.clean_all_instances()\n exec_nodes_finished.append(node_name)\n new_nodes_to_execute = exec_node.get_children_ready()\n for new_node in new_nodes_to_execute:\n new_exec_nodes.append(new_node)\n else:\n # Something went wrong in the node, cancel execution\n cancel_all(monitor.get_executions_iterator())\n return\n\n # remove finished nodes\n for node_name in exec_nodes_finished:\n monitor.finish_node(node_name)\n # perform new executions\n tasks_list = []\n for new_node in new_exec_nodes:\n tasks_list += new_node.queue_all_instances()\n monitor.add_node(new_node)\n wait_tasks_to_finish(tasks_list)\n\n if monitor.is_something_executing():\n cancel_all(monitor.get_executions_iterator())\n\n ctx.logger.info(\n \"------------------Workflow Finished-----------------------\")\n return",
"def schedule_jobs(self):\n for species in self.species_dict.values():\n if species.initial_xyz is None and species.final_xyz is None and species.conformers \\\n and any([e is not None for e in species.conformer_energies]):\n # The species has no xyz, but has conformers and at least one of the conformers has energy.\n self.determine_most_stable_conformer(species.label)\n if species.initial_xyz is not None:\n if self.composite_method:\n self.run_composite_job(species.label)\n else:\n self.run_opt_job(species.label, fine=self.fine_only)\n self.run_conformer_jobs()\n self.spawn_ts_jobs() # If all reactants/products are already known (Arkane yml or restart), spawn TS searches.\n while self.running_jobs != {}:\n self.timer = True\n for label in self.unique_species_labels:\n if self.output[label]['convergence'] is False:\n # Skip unconverged species.\n if label in self.running_jobs:\n del self.running_jobs[label]\n continue\n # Look for completed jobs and decide what jobs to run next.\n self.get_server_job_ids() # updates ``self.server_job_ids``\n self.get_completed_incore_jobs() # updates ``self.completed_incore_jobs``\n if label not in self.running_jobs.keys():\n continue\n job_list = self.running_jobs[label]\n for job_name in job_list:\n if 'conformer' in job_name:\n i = get_i_from_job_name(job_name)\n job = self.job_dict[label]['conformers'][i]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n # this is a completed conformer job\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n troubleshooting_conformer = self.parse_conformer(job=job, label=label, i=i)\n if troubleshooting_conformer:\n break\n # Just terminated a conformer job.\n # Are there additional conformer jobs currently running for this species?\n for spec_jobs in job_list:\n if 'conformer' in spec_jobs and spec_jobs != job_name:\n break\n else:\n # All conformer jobs terminated.\n # Check isomorphism and run opt on most stable conformer geometry.\n logger.info(f'\\nConformer jobs for {label} successfully terminated.\\n')\n if self.species_dict[label].is_ts:\n self.determine_most_likely_ts_conformer(label)\n else:\n self.determine_most_stable_conformer(label) # also checks isomorphism\n if self.species_dict[label].initial_xyz is not None:\n # if initial_xyz is None, then we're probably troubleshooting conformers, don't opt\n if not self.composite_method:\n self.run_opt_job(label, fine=self.fine_only)\n else:\n self.run_composite_job(label)\n self.timer = False\n break\n if 'tsg' in job_name:\n job = self.job_dict[label]['tsg'][get_i_from_job_name(job_name)]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n # This is a successfully completed tsg job. It may have resulted in several TSGuesses.\n self.end_job(job=job, label=label, job_name=job_name)\n if job.local_path_to_output_file.endswith('.yml'):\n for rxn in job.reactions:\n rxn.ts_species.process_completed_tsg_queue_jobs(yml_path=job.local_path_to_output_file)\n # Just terminated a tsg job.\n # Are there additional tsg jobs currently running for this species?\n for spec_jobs in job_list:\n if 'tsg' in spec_jobs and spec_jobs != job_name:\n break\n else:\n # All tsg jobs terminated. Spawn confs.\n logger.info(f'\\nTS guess jobs for {label} successfully terminated.\\n')\n self.run_conformer_jobs(labels=[label])\n self.timer = False\n break\n elif 'opt' in job_name:\n # val is 'opt1', 'opt2', etc., or 'optfreq1', optfreq2', etc.\n job = self.job_dict[label]['opt'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n success = self.parse_opt_geo(label=label, job=job)\n if success:\n self.spawn_post_opt_jobs(label=label, job_name=job_name)\n self.timer = False\n break\n elif 'freq' in job_name:\n # this is NOT an 'optfreq' job\n job = self.job_dict[label]['freq'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_freq_job(label=label, job=job)\n self.timer = False\n break\n elif 'sp' in job_name:\n job = self.job_dict[label]['sp'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_sp_job(label=label, job=job)\n self.timer = False\n break\n elif 'composite' in job_name:\n job = self.job_dict[label]['composite'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n success = self.parse_composite_geo(label=label, job=job)\n if success:\n self.spawn_post_opt_jobs(label=label, job_name=job_name)\n self.timer = False\n break\n elif 'directed_scan' in job_name:\n job = self.job_dict[label]['directed_scan'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_directed_scan_job(label=label, job=job)\n if 'cont' in job.directed_scan_type and job.job_status[1]['status'] == 'done':\n # This is a continuous restricted optimization, spawn the next job in the scan.\n xyz = parser.parse_xyz_from_file(job.local_path_to_output_file) \\\n if not hasattr(job, 'opt_xyz') else job.opt_xyz\n self.spawn_directed_scan_jobs(label=label, rotor_index=job.rotor_index, xyz=xyz)\n if 'brute_force' in job.directed_scan_type:\n # Just terminated a brute_force directed scan job.\n # Are there additional jobs of the same type currently running for this species?\n self.species_dict[label].rotors_dict[job.rotor_index]['number_of_running_jobs'] -= 1\n if not self.species_dict[label].rotors_dict[job.rotor_index]['number_of_running_jobs']:\n # All brute force scan jobs for these pivots terminated.\n logger.info(f'\\nAll brute force directed scan jobs for species {label} between '\n f'pivots {job.pivots} successfully terminated.\\n')\n self.process_directed_scans(label, pivots=job.pivots)\n shutil.rmtree(job.local_path, ignore_errors=True)\n self.timer = False\n break\n elif 'scan' in job_name and 'directed' not in job_name:\n job = self.job_dict[label]['scan'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination \\\n and (job.directed_scan_type is None or job.directed_scan_type == 'ess'):\n self.check_scan_job(label=label, job=job)\n self.timer = False\n break\n elif 'irc' in job_name:\n job = self.job_dict[label]['irc'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.spawn_post_irc_jobs(label=label, job=job)\n self.timer = False\n break\n elif 'orbitals' in job_name:\n job = self.job_dict[label]['orbitals'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n # copy the orbitals file to the species / TS output folder\n folder_name = 'rxns' if self.species_dict[label].is_ts else 'Species'\n orbitals_path = os.path.join(self.project_directory, 'output', folder_name, label,\n 'geometry', 'orbitals.fchk')\n if os.path.isfile(job.local_path_to_orbitals_file):\n try:\n shutil.copyfile(job.local_path_to_orbitals_file, orbitals_path)\n except shutil.SameFileError:\n pass\n self.timer = False\n break\n elif 'onedmin' in job_name:\n job = self.job_dict[label]['onedmin'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n # Copy the lennard_jones file to the species output folder (TS's don't have L-J data).\n lj_output_path = os.path.join(self.project_directory, 'output', 'Species', label,\n 'lennard_jones.dat')\n if os.path.isfile(job.local_path_to_lj_file):\n try:\n shutil.copyfile(job.local_path_to_lj_file, lj_output_path)\n except shutil.SameFileError:\n pass\n self.output[label]['job_types']['onedmin'] = True\n self.species_dict[label].set_transport_data(\n lj_path=os.path.join(self.project_directory, 'output', 'Species', label,\n 'lennard_jones.dat'),\n opt_path=self.output[label]['paths']['geo'], bath_gas=job.bath_gas,\n opt_level=self.opt_level)\n self.timer = False\n break\n\n if not len(job_list):\n self.check_all_done(label)\n if not self.running_jobs[label]:\n # Delete the label only if it represents an empty entry.\n del self.running_jobs[label]\n\n if self.timer and len(job_list):\n time.sleep(30) # wait 30 sec before bugging the servers again.\n t = time.time() - self.report_time\n if t > 3600 and self.running_jobs:\n self.report_time = time.time()\n logger.info(f'Currently running jobs:\\n{pprint.pformat(self.running_jobs)}')\n\n # Generate a TS report:\n self.generate_final_ts_guess_report()",
"async def test_max_processes(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # 2 maximum tasks\n\n # 1 runs at 1 second\n # 2 runs at 2 seconds\n # 3 runs at 11 seconds\n # 4 runs at 12 seconds\n # 5 runs at 21 seconds\n # 6 runs at 22 seconds\n # 7 runs at 31 seconds\n # 8 runs at 32 seconds\n # Total: 6\n\n scheduler.max_running_tasks = 2 # set the maximum number of running tasks in parallel\n\n # Set interval schedule configuration\n interval_schedule = IntervalSchedule()\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n interval_schedule.name = 'max active'\n interval_schedule.exclusive = False\n interval_schedule.process_name = 'sleep10'\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(30.3)\n scheduler.max_running_tasks = 0 # set the maximum number of running tasks in parallel\n\n tasks = await scheduler.get_tasks(10)\n assert len(tasks) == 6\n\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 2\n\n # They end...\n await asyncio.sleep(20)\n\n scheduler.max_running_tasks = 10\n\n await asyncio.sleep(11)\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 10\n\n await self.stop_scheduler(scheduler)",
"def submit_jobs(args, udf_command):\n hosts = []\n thread_list = []\n server_count_per_machine = 0\n\n # Get the host addresses of the cluster.\n ip_config = args.ip_config\n with open(ip_config) as f:\n for line in f:\n result = line.strip().split()\n if len(result) >= 3:\n ip = result[0]\n host = result[2]\n hosts.append((ip, host))\n else:\n raise RuntimeError(\"Format error of ip_config.\")\n server_count_per_machine = args.num_servers\n assert args.num_parts == len(hosts), \\\n 'The number of graph partitions has to match the number of machines in the cluster.'\n\n tot_num_clients = args.num_trainers * (1 + args.num_samplers) * len(hosts)\n # launch server tasks\n server_cmd = 'DGL_ROLE=server DGL_NUM_SAMPLER=' + str(args.num_samplers)\n server_cmd = server_cmd + ' ' + 'OMP_NUM_THREADS=' + str(args.num_server_threads)\n server_cmd = server_cmd + ' ' + 'DGL_NUM_CLIENT=' + str(tot_num_clients)\n server_cmd = server_cmd + ' ' + 'DGL_CONF_PATH=' + str(args.part_config)\n server_cmd = server_cmd + ' ' + 'DGL_IP_CONFIG=' + str(args.ip_config)\n server_cmd = server_cmd + ' ' + 'DGL_NUM_SERVER=' + str(args.num_servers)\n for i in range(len(hosts)*server_count_per_machine):\n _, pod_name = hosts[int(i / server_count_per_machine)]\n cmd = server_cmd + ' ' + 'DGL_SERVER_ID=' + str(i)\n cmd = cmd + ' ' + udf_command\n cmd = 'cd ' + str(args.workspace) + '; ' + cmd\n kubexec_multi(cmd, pod_name, thread_list)\n # launch client tasks\n client_cmd = 'DGL_DIST_MODE=\"distributed\" DGL_ROLE=client DGL_NUM_SAMPLER=' + str(args.num_samplers)\n client_cmd = client_cmd + ' ' + 'DGL_NUM_CLIENT=' + str(tot_num_clients)\n client_cmd = client_cmd + ' ' + 'DGL_CONF_PATH=' + str(args.part_config)\n client_cmd = client_cmd + ' ' + 'DGL_IP_CONFIG=' + str(args.ip_config)\n client_cmd = client_cmd + ' ' + 'DGL_NUM_SERVER=' + str(args.num_servers)\n if os.environ.get('OMP_NUM_THREADS') is not None:\n client_cmd = client_cmd + ' ' + 'OMP_NUM_THREADS=' + os.environ.get('OMP_NUM_THREADS')\n if os.environ.get('PYTHONPATH') is not None:\n client_cmd = client_cmd + ' ' + 'PYTHONPATH=' + os.environ.get('PYTHONPATH')\n\n torch_cmd = '-m torch.distributed.launch'\n torch_cmd = torch_cmd + ' ' + '--nproc_per_node=' + str(args.num_trainers)\n torch_cmd = torch_cmd + ' ' + '--nnodes=' + str(len(hosts))\n torch_cmd = torch_cmd + ' ' + '--node_rank=' + str(0)\n torch_cmd = torch_cmd + ' ' + '--master_addr=' + str(hosts[0][0])\n torch_cmd = torch_cmd + ' ' + '--master_port=' + str(1234)\n for node_id, tu in enumerate(hosts):\n _, pod_name = tu\n new_torch_cmd = torch_cmd.replace('node_rank=0', 'node_rank='+str(node_id))\n if 'python3' in udf_command:\n new_udf_command = udf_command.replace('python3', 'python3 ' + new_torch_cmd)\n elif 'python2' in udf_command:\n new_udf_command = udf_command.replace('python2', 'python2 ' + new_torch_cmd)\n else:\n new_udf_command = udf_command.replace('python', 'python ' + new_torch_cmd)\n cmd = client_cmd + ' ' + new_udf_command\n cmd = 'cd ' + str(args.workspace) + '; ' + cmd\n kubexec_multi(cmd, pod_name, thread_list)\n\n for thread in thread_list:\n thread.join()",
"def submit_cpucores():\n # TODO(soltesz): move static value to an external, inventory table.\n submit_generic(_root_hostname, 'cpu_cores', 'gauge', _CPU_COUNT)",
"def submit_cluster_batch_file(self, num_bundles):\n\n import os\n import re\n import getpass\n import commands\n from time import strftime\n from indi_schedulers import cluster_templates\n\n print \"Submitting cluster job to %s..\" % self._platform\n\n # Create cluster log dir\n cluster_files_dir = \\\n os.path.join(self._config[\"output_directory\"], \"cluster_files\")\n if not os.path.exists(cluster_files_dir):\n os.makedirs(cluster_files_dir)\n\n # Batch file variables\n timestamp = str(strftime(\"%Y_%m_%d_%H_%M_%S\"))\n shell = commands.getoutput('echo $SHELL')\n user_account = getpass.getuser()\n\n # Set up config dictionary\n config_dict = {'timestamp': timestamp,\n 'shell': shell,\n 'job_name': self._run_name,\n 'num_tasks': num_bundles,\n 'queue': \"all.q\",\n 'par_env': \"mpi_smp\",\n 'cores_per_task': self._config[\"num_processors\"],\n 'user': user_account,\n 'work_dir': cluster_files_dir}\n\n # Get string template for job scheduler\n if self._platform == \"PBS\":\n env_arr_idx = '$PBS_ARRAYID'\n batch_file_contents = cluster_templates.pbs_template\n confirm_str = '(?<=Your job-array )\\d+'\n exec_cmd = 'qsub'\n elif self._platform == \"SGE\":\n env_arr_idx = '$SGE_TASK_ID'\n batch_file_contents = cluster_templates.sge_template\n confirm_str = '(?<=Your job-array )\\d+'\n exec_cmd = 'qsub'\n elif self._platform == \"SLURM\":\n hrs_limit = 8 * num_bundles\n time_limit = '%d:00:00' % hrs_limit\n config_dict[\"time_limit\"] = time_limit\n env_arr_idx = '$SLURM_ARRAY_TASK_ID'\n batch_file_contents = cluster_templates.slurm_template\n confirm_str = '(?<=Submitted batch job )\\d+'\n exec_cmd = 'sbatch'\n\n config_dict['env_arr_idx'] = env_arr_idx\n config_dict['run_cmd'] = 'echo \"Running task: %s\"' % env_arr_idx\n\n # Populate string from config dict values\n batch_file_contents = batch_file_contents % config_dict\n\n run_str = \"qap_measures_pipeline.py --bundle_idx %s --log_dir %s %s \"\\\n \"%s\" % (env_arr_idx, self._run_log_dir,\n self._config[\"subject_list\"],\n self._config[\"pipeline_config_yaml\"])\n\n batch_file_contents = \"\\n\".join([batch_file_contents, run_str])\n\n batch_filepath = os.path.join(cluster_files_dir, 'cpac_submit_%s.%s'\n % (timestamp, self._platform))\n\n with open(batch_filepath, 'w') as f:\n f.write(batch_file_contents)\n\n print \"Batch file written to %s..\" % batch_filepath\n\n # Get output response from job submission\n out = commands.getoutput('%s %s' % (exec_cmd, batch_filepath))\n\n # Check for successful qsub submission\n if re.search(confirm_str, out) == None:\n err_msg = 'Error submitting QAP pipeline run to %s queue' \\\n % self._platform\n raise Exception(err_msg)\n\n print \"Batch job submitted to %s queue.\" % self._platform\n\n # Get pid and send to pid file\n pid = re.search(confirm_str, out).group(0)\n pid_file = os.path.join(cluster_files_dir, 'pid.txt')\n with open(pid_file, 'w') as f:\n f.write(pid)",
"def lantern_jobs():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not sending Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Sending Lantern jobs\".format(x=dates.now())\n LanternApi.make_new_jobs()",
"def submit(self, jobs):\n assert isinstance(jobs, list), 'Jobs must be type list'\n assert len(jobs) > 0, 'One or more jobs required'\n\n data = {'jobs': jobs}\n for j in data['jobs']:\n # generate a random UUID if absent\n if 'uuid' not in j:\n j['uuid'] = str(uuid1())\n\n # default missing fields\n j.update(dict(self._default_job_settings.items() + j.items()))\n\n self._job_schema.validate(jobs)\n\n try:\n self._api_post(self._scheduler_endpoint, data)\n return [j['uuid'] for j in data['jobs']]\n except HTTPError as e:\n raise JobClientError(e.message)",
"def submit(self):\n for async_femag in self.async_femags:\n async_femag.start()\n\n for task in self.job.tasks:\n self.queue.put(task)\n\n return len(self.job.tasks)",
"def _get_njobs_in_queue(self, username):",
"def parallel_provision_server(self, bodies, **kwargs):\n thrd_no = kwargs.get('count', 9)\n thrd_out = kwargs.get('timeout', 60)\n thrd_poll = kwargs.get('interval', 60)\n threads = list()\n\n # Identify time out for the keyword\n _t = 1 if len(bodies) < thrd_no else (len(bodies) / thrd_no) + 1\n time_out = datetime.now() + timedelta(minutes=thrd_out * _t)\n\n while time_out > datetime.now():\n LOG.publish_message(timeout=2)\n\n for index, item in enumerate(threads):\n if not item.is_alive():\n threads.pop(index)\n\n if len(threads) < thrd_no and len(bodies):\n t = Thread(target=self._parallel_deploy,\n args=(bodies.pop(), thrd_out, thrd_poll,))\n t.daemon = True\n t.start()\n threads.append(t)\n elif not len(bodies) and not len(threads):\n # No pending tasks or jobs\n break\n else:\n # There are some pending tasks or jobs\n sleep(5)\n\n LOG.publish_message(timeout=60)\n return self.results",
"def main(order_count):\n for id in range(MAX_ORDERS):\n while active_count() > MAX_QUEUE:\n print(\"..All permitted threads running: waiting\")\n sleep(LOOP_TIMEOUT)\n print(\"..Finished waiting\")\n o = Thread(target=order_gen, kwargs={\"id\": id})\n o.start()",
"def scheduler(self):\n while True:\n if self.sch.empty():\n self.log.info(\"No scheduled jobs detected. Entering idle state\")\n bits = bitarray()\n # generate random 7B bitarrays\n for _ in range(pow(self.cube_dim,3)):\n bits.append(bool(random.getrandbits(1)))\n self.sch.enter(self.transmit_freq, 4, self.transmit, argument=(0, bits), kwargs={})\n else:\n try:\n self.log.info(\"Scheduled jobs detected. Serving through scheduler runner\")\n self.sch.run()\n except IOError as exc:\n self.log.exception(\"\"\"Scheduler runner encountered an error while executing the \n top level event: %s\"\"\", exc)\n sys.exit(1) # exit with status code 1",
"def try_submit_jobs(output, verbose):\n cluster, promoted = Cluster.deserialize(\n output,\n try_promote_to_submitter=True,\n deserialize_jobs=True,\n )\n if not promoted:\n print(\"Another node is already the submitter.\")\n sys.exit(0)\n\n # Only create the logger if we get promoted.\n filename = os.path.join(output, \"submit_jobs.log\")\n event_filename = os.path.join(output, \"submit_jobs_events.log\")\n setup_event_logging(event_filename, mode=\"a\")\n level = logging.DEBUG if verbose else logging.INFO\n logger = setup_logging(__name__, filename, file_level=level, console_level=level, mode=\"a\")\n logger.info(get_cli_string())\n\n if cluster.is_complete():\n cluster.demote_from_submitter()\n logger.info(\"All jobs are already finished.\")\n sys.exit(0)\n\n ret = 1\n try:\n mgr = JobSubmitter.load(output)\n status = mgr.submit_jobs(cluster)\n if status == Status.IN_PROGRESS:\n check_cmd = f\"jade show-status -o {output}\"\n print(f\"Jobs are in progress. Run '{check_cmd}' for updates.\")\n ret = 0\n else:\n ret = status.value\n except Exception:\n logger.exception(\"Failed to try-submit-jobs\")\n raise\n finally:\n cluster.demote_from_submitter()\n\n sys.exit(ret)",
"async def _conquer_planets_using_buckets(\n context: Anacreon,\n planets: List[World],\n *,\n fleet_buckets: List[FleetBucket],\n) -> None:\n logger = logging.getLogger(\"Conquer planets\")\n\n # Step 1: ensure that we have ids for all the fleets\n logger.info(\"we are going to conquer the following planets\")\n fstr = (\n TermColors.BOLD\n + \"{0!s:60}\"\n + TermColors.ENDC\n + \"{1!s:10}{2!s:10}{3!s:10}{4!s:10}{5!s:10}\"\n )\n logger.info(fstr.format(\"name\", \"gf\", \"sf\", \"missilef\", \"mode\", \"id\"))\n\n # Step 2: Sort them into queues.\n for world in planets:\n if world.resources is not None:\n force = context.calculate_forces(world.resources)\n for bucket in fleet_buckets:\n if bucket.can_attack_world(world):\n bucket.add_world_to_queue(world)\n logger.info(\n fstr.format(\n world.name,\n force.ground_forces,\n force.space_forces,\n force.missile_forces,\n bucket.bucket_name,\n world.id,\n )\n )\n break # break out of bucket iteration loop\n\n input(\"Press [ENTER] to continue, or Ctrl+C to cancel\")\n # Step 3: fire up coroutines\n def future_callback(fut: asyncio.Future[Any]) -> None:\n logger.info(\"A future has completed!\")\n if (exc := fut.exception()) is not None:\n logger.error(\"Error occured on future!\", exc_info=exc)\n\n logger.info(\"Firing up coroutines . . .\")\n fleet_bucket_futures: \"List[asyncio.Task[None]]\" = []\n\n for bucket in fleet_buckets:\n fleet_bucket_futures.extend(bucket.send_fleets_to_attack())\n\n for future in fleet_bucket_futures:\n future.add_done_callback(future_callback)\n\n logger.info(\"Coroutines turned on, waiting for queues to empty . . .\")\n await asyncio.gather(*(bucket.queue.join() for bucket in fleet_buckets))\n\n logger.info(\n \"Queues are empty, waiting five minutes before forcefully cancelling futures\"\n )\n await asyncio.sleep(5 * 60)\n for future in fleet_bucket_futures:\n if not future.done():\n logger.warning(\"Had to cancel a coroutine ... why wasn't it done?\")\n future.cancel()",
"def do_jobs(self, job_list, wait_time=2, max_iters=100):\n for task_name in job_list:\n self.do_job(\n task_name,\n wait_time=wait_time,\n max_iters=max_iters\n )",
"def submit(fragment,njobs,nevts,outdir=\"\",first=None,indices=None,logdir=\"\",tag=\"\",dry=False,slc6=False,verb=0):\n print(\">>> Submitting...\")\n indir = os.path.dirname(fragment) or '.'\n fullfrag = os.path.abspath(fragment)\n ensuredir(os.path.join(indir,logdir)) # log directory\n ensuredir(outdir) # ensure output directory exists before submitting\n #args = f\"{outdir} {fullfrag} maxevts={nevts} index=$(ProcId) seed=$$([$(ProcId)+1])\" # start from 0\n args = f\"{outdir} {fullfrag} maxevts={nevts} index=$$([$(ProcId)+1]) seed=$$([$(ProcId)+1])\" # start from 1\n if tag:\n args += f\" tag={tag}\"\n if indices:\n indices_ = [ ]\n for index in indices:\n if isinstance(index,str) and index.count(':')==1:\n start, end = index.split(':') # e.g. '1:4' = [1, 2, 3, 4]\n for i in range(int(start),int(end)+1):\n indices_.append(i)\n else:\n indices_.append(int(index))\n args = args.replace('$(ProcId)','$(i)')\n queue = f\"-queue i in {', '.join(str(i) for i in indices_)}\"\n #queue = f\"-a 'queue i from ( {', '.join(str(i) for i in indices_)} )'\"\n elif first:\n args = args.replace('$(ProcId)','$(i)')\n queue = f\"-queue i from seq {first} {first+njobs-1} \\|\"\n #queue = f\"-a 'queue from seq {first} {njobs}|'\"\n else:\n queue = f\"-queue {njobs}\"\n name = f\"{os.path.basename(fragment).replace('.py','')}\"\n log = os.path.join(logdir,f\"submit_fragment{tag}.$(ClusterId).$(ProcId).log\")\n subcmd = f\"condor_submit submit_fragment.sub -a 'initialdir={indir}' -a 'mylogfile={log}'\"\n subcmd += f\" -a 'arguments={args}'\" # -a 'should_transfer_files=no'\n subcmd += f\" -batch-name {name} {queue}\" #-queue '{queue}'\n if slc6:\n subcmd += f\" -a 'requirements = (OpSysAndVer =?= \\\"SLCern6\\\")'\"\n if verb>=4:\n subcmd += \" -verbose\"\n print(\">>> \"+subcmd)\n if not dry:\n os.system(subcmd)",
"def run(self):\n\n logging.info(\"Pool scheduler started\")\n\n # do forever\n while True:\n\n try :\n # get job information about new jobs\n JobStatus.addNewJobs()\n\n # apply policy\n groups = self.applyPolicy()\n\n # any job to check?\n if len(groups) == 0:\n\n # no, wait for jobs to arrive\n logging.info( \"No work to do, \" + \\\n \"scheduler goes to sleep for \" + \\\n str(self.delay) + \" seconds\")\n sleep(self.delay)\n continue\n\n # new threads to start?\n if len(groups) >= self.threadsWorking:\n\n # yes, start threads\n for grp in groups:\n\n # but only for new groups\n if grp not in self.groupsUnderProcessing:\n\n # insert group ID into queue\n # to trigger thread start\n self.groupsUnderProcessing.add(grp)\n self.pool.enqueue(grp, grp)\n\n # wait for a thread to finish\n (group, result) = self.pool.dequeue()\n logging.info(\"Thread processing group \" + str(group) + \\\n \" has finished\")\n\n # decrement threads counter\n self.threadsWorking = self.threadsWorking - 1\n\n # remove its ID from groups\n self.groupsUnderProcessing.remove(group)\n\n # remove all finished jobs from this group\n JobStatus.removeFinishedJobs(group)\n\n except Exception, ex :\n import traceback\n logging.error( 'Error in PoolScheduler : [%s]' % str(ex) )\n logging.error( \"Traceback: %s\" % traceback.format_exc() )\n logging.error( \"PoolScheduler goes to sleep for \" + \\\n str(self.delay) + \" seconds\" )\n sleep(self.delay)",
"def _start_torque_workers(self):\n for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='run'):\n resource_args = []\n\n request_cpus = self._compute_request_cpus(bundle)\n if request_cpus:\n resource_args.extend(['-l', 'nodes=1:ppn=%d' % request_cpus])\n\n request_memory = self._compute_request_memory(bundle)\n if request_memory:\n resource_args.extend(['-l', 'mem=%d' % request_memory])\n\n request_queue = bundle.metadata.request_queue or self._default_request_queue\n if request_queue:\n # Either host=<host-name> or <queue-name>, but not tag=<tag>\n m = re.match('host=(.+)', request_queue)\n tagm = re.match('tag=.+', request_queue)\n if m:\n resource_args.extend(['-l', 'host=' + m.group(1)])\n elif not tagm:\n resource_args.extend(['-q', request_queue])\n\n request_priority = bundle.metadata.request_priority or self._default_request_priority\n if request_priority:\n resource_args.extend(['-p', str(request_priority)])\n\n script_args = [\n '--server', self._torque_bundle_service_url,\n '--password-file', self._torque_password_file,\n '--shared-file-system',\n ]\n\n script_env = {\n 'LOG_DIR': self._torque_log_dir,\n 'WORKER_CODE_DIR': self._torque_worker_code_dir,\n # -v doesn't work with spaces, so we have to hack it.\n 'WORKER_ARGS': '|'.join(script_args),\n }\n\n command = self._torque_ssh_command(\n ['qsub',\n '-k', 'n', # do not keep stdout/stderr streams (we redirect them manually to the configured log_dir)\n '-d', '/tmp', # avoid chdir permission problems, worker won't do anything in working directory anyway\n '-v', ','.join([k + '=' + v for k, v in script_env.iteritems()])] +\n resource_args +\n ['-S', '/bin/bash', os.path.join(self._torque_worker_code_dir, 'worker.sh')])\n\n # Throttle Torque commands, sometimes scheduler has trouble keeping up\n elapsed = time.time() - self._last_qsub_time\n if elapsed < self._torque_min_seconds_between_qsub:\n time.sleep(self._torque_min_seconds_between_qsub - elapsed)\n\n try:\n job_handle = subprocess.check_output(command, stderr=subprocess.STDOUT).strip()\n except subprocess.CalledProcessError as e:\n failure_message = 'Failed to launch Torque job: ' + e.output\n logger.info('Failing %s: %s', bundle.uuid, failure_message)\n self._model.update_bundle(\n bundle, {'state': State.FAILED,\n 'metadata': {'failure_message': failure_message}})\n continue\n finally:\n self._last_qsub_time = time.time()\n\n logger.info('Started Torque worker for bundle %s, job handle %s', bundle.uuid, job_handle)\n self._model.set_waiting_for_worker_startup_bundle(bundle, job_handle)",
"def run_jobs(num_runs):\n\n if os.environ.get('OS','') == 'Windows_NT':\n p = subprocess.Popen(\"dir /A:-d /B | findstr/r \\\"submit_run.*.sh\\\"\", shell=True, stdout=subprocess.PIPE)\n else:\n p = subprocess.Popen(\"ls -l | grep 'submit_run.*.sh' | awk '{print $9}'\", shell=True, stdout=subprocess.PIPE)# list SGE submit files\n out = p.stdout.read()\n \n if os.environ.get('OS','') == 'Windows_NT':\n fnames = out.rsplit(\"\\r\\n\")\n else:\n fnames = out.rsplit(\"\\n\")\n\n if len(fnames) > 0: del fnames[-1]\n\n # determine whether 'qsub' command is available\n if (is_valid_command('qsub')): # run the commands jobs using qsub\n for fname in fnames:\n p = subprocess.Popen(\"qsub %s\" % fname, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.stderr.read()\n if verbose:\n print out\n print \"Jobs submitted.\"\n else: # run the commands sequentially without using qsub\n print \"Error: 'qsub' is an invalid command.\"\n if os.environ.get('OS','') == 'Windows_NT':\n p = subprocess.Popen(\"dir /A:-d /B | findstr/r \\\"run.*.py\\\"\", shell=True, stdout=subprocess.PIPE)\n else:\n p = subprocess.Popen(\"ls -l | grep 'run.*.py' | awk '{print $9}'\", shell=True, stdout=subprocess.PIPE) # list SGE submit files\n out = p.stdout.read()\n\n if os.environ.get('OS','') == 'Windows_NT':\n fnames = out.rsplit(\"\\r\\n\")\n else:\n fnames = out.rsplit(\"\\n\")\n if len(fnames) > 0: del fnames[-1]\n\n for fname in fnames:\n for i in range(num_runs):\n if verbose:\n print \"Executing command: python %s %d\" % (fname, i)\n p = subprocess.Popen(\"python %s %d\" % (fname, i), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.stderr.read()\n if verbose:\n print out",
"def spawn_ts_jobs(self):\n for rxn in self.rxn_list:\n rxn.check_done_opt_r_n_p()\n if rxn.done_opt_r_n_p and not rxn.ts_species.tsg_spawned:\n if rxn.multiplicity is None:\n logger.info(f'Not spawning TS search jobs for reaction {rxn} for which the multiplicity is unknown.')\n else:\n rxn.ts_species.tsg_spawned = True\n tsg_index = 0\n for method in self.ts_adapters:\n if method in all_families_ts_adapters or \\\n (rxn.family is not None\n and rxn.family.label in list(ts_adapters_by_rmg_family.keys())\n and method in ts_adapters_by_rmg_family[rxn.family.label]):\n self.run_job(job_type='tsg',\n job_adapter=method,\n reactions=[rxn],\n tsg=tsg_index,\n )\n tsg_index += 1\n if all('user guess' in tsg.method for tsg in rxn.ts_species.ts_guesses):\n rxn.ts_species.tsg_spawned = True\n self.run_conformer_jobs(labels=[rxn.ts_label])",
"def _spawn_workers(self):\n self._event.set()\n self._workers = [ClassifierWorker(self._event, self._queue, self._results) for x in range(self._NUM_WORKERS)]\n [worker.start() for worker in self._workers]",
"def simulateArrivalOfJobs(env, processes, batchQ):\n for p in processes:\n batchQ.addToBq(p)",
"def simulateArrivalOfJobs(env, processes, batchQ):\n for p in processes:\n batchQ.addToBq(p)",
"def __run_schedules():\n while True:\n __scheduler.run()",
"def simple_jobs(driver):\n free = driver.execute_script('return Math.ceil(game.resources.trimps.realMax()/2) - game.resources.trimps.employed')\n if free == 0:\n return\n\n total = free\n for job in JOBS:\n current_job_number = driver.execute_script('return game.jobs[\"%s\"].owned' % job)\n total += current_job_number\n\n for job in JOBS:\n diff = int(total * JOBS_RATIOS[job]) - driver.execute_script('return game.jobs[\"%s\"].owned' % job)\n if diff <= 0:\n # nothing to see here\n continue\n\n buy = min(diff, free)\n buy_jobs(driver, job, buy)",
"def invoke_all_and_wait(self):\n list_promise = []\n for thread in self.__list_thread:\n thread.start()\n list_promise.append(thread)\n for process in list_promise: process.join()"
]
| [
"0.67436427",
"0.6306559",
"0.6293074",
"0.5875799",
"0.5839556",
"0.5705015",
"0.56819105",
"0.5659902",
"0.55891097",
"0.55879664",
"0.55686307",
"0.55011",
"0.54970616",
"0.54581",
"0.54405975",
"0.5399429",
"0.5384534",
"0.5361854",
"0.53499293",
"0.53348976",
"0.53125435",
"0.53082865",
"0.52934957",
"0.52832764",
"0.52816445",
"0.5270491",
"0.5270491",
"0.5239827",
"0.52384514",
"0.52220774"
]
| 0.6325101 | 1 |
Submit a cluster of 5 scheduler universe jobs with unequal priority, (proc 1 has priority = 1, proc 2 has priority = 2, etc.) and wait until they finish running. | def submit_unequal_priority_jobs(default_condor, path_to_sleep):
cluster = default_condor.submit(
{
"executable": path_to_sleep,
"arguments": "1",
"universe": "scheduler",
"log": "scheduler_priority-unequal.log",
"priority": "$(process)",
},
count=NUM_JOBS,
)
cluster.wait(condition=ClusterState.all_terminal)
return cluster | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def submitJobs(numOfScript):\n submit = False\n\n for i in range(numOfScript):\n if submit == False:\n numOfRun = int(os.popen('apstat | grep wchen | wc -l').read())\n maxJobs = int(open('runJobs.conf').read())\n jobToSubmit = maxJobs - numOfRun\n print 'job to submit', jobToSubmit\n submit = True\n if submit == True:\n if jobToSubmit != 0 :\n print 'Job', i, 'submitted'\n os.system('qsub sat.pbs -v n='+str(i+1))\n jobToSubmit = jobToSubmit - 1\n else :\n submit = False\n time.sleep(10)",
"def submit_equal_priority_jobs(default_condor, path_to_sleep):\n cluster = default_condor.submit(\n {\n \"executable\": path_to_sleep,\n \"arguments\": \"1\",\n \"universe\": \"scheduler\",\n \"log\": \"scheduler_priority-equal.log\",\n },\n count=NUM_JOBS,\n )\n cluster.wait(condition=ClusterState.all_terminal)\n return cluster",
"def _runJobs (self):\n\t\t# submit jobs\n\t\tdef sworker (q):\n\t\t\t\"\"\"\n\t\t\tThe worker to run jobs\n\t\t\t\"\"\"\n\t\t\twhile True:\n\t\t\t\t(run, i) = q.get()\n\t\t\t\tsleep (i)\n\t\t\t\tif run.isRunning():\n\t\t\t\t\tself.log (\"Job #%s is already running, skip submitting.\" % run.job.index, 'info')\n\t\t\t\telse:\n\t\t\t\t\trun.submit()\n\t\t\t\trun.wait() \n\t\t\t\trun.finish()\n\t\t\t\tq.task_done()\n\t\t\n\t\trunner = proc.RUNNERS[self.runner]\n\t\tmaxsubmit = self.forks\n\t\tif hasattr(runner, 'maxsubmit'): \n\t\t\tmaxsubmit = runner.maxsubmit\n\t\tinterval = .1\n\t\tif hasattr(runner, 'interval'): \n\t\t\tinterval = runner.interval\n\t\t\n\t\tsq = Queue()\n\t\tfor i in self.ncjobids:\n\t\t\trjob = runner (self.jobs[i])\n\t\t\ttm = int(i/maxsubmit) * interval\n\t\t\tsq.put ((rjob, tm))\n\n\t\t# submit jobs\n\t\tnojobs2submit = min (self.forks, len(self.ncjobids))\n\t\tfor i in range (nojobs2submit):\n\t\t\tt = threading.Thread(target = sworker, args = (sq, ))\n\t\t\tt.daemon = True\n\t\t\tt.start ()\n\t\t\n\t\tsq.join()",
"def run_jobs(**kwargs): # pylint: disable=W0613\n\n root_nodes, job_instances_map = build_graph(ctx.nodes)\n monitor = Monitor(job_instances_map, ctx.logger)\n\n # Execution of first job instances\n tasks_list = []\n for root in root_nodes:\n tasks_list += root.queue_all_instances()\n monitor.add_node(root)\n wait_tasks_to_finish(tasks_list)\n\n # Monitoring and next executions loop\n while monitor.is_something_executing() and not api.has_cancel_request():\n # Monitor the infrastructure\n monitor.update_status()\n exec_nodes_finished = []\n new_exec_nodes = []\n for node_name, exec_node in monitor.get_executions_iterator():\n if exec_node.check_status():\n if exec_node.completed:\n exec_node.clean_all_instances()\n exec_nodes_finished.append(node_name)\n new_nodes_to_execute = exec_node.get_children_ready()\n for new_node in new_nodes_to_execute:\n new_exec_nodes.append(new_node)\n else:\n # Something went wrong in the node, cancel execution\n cancel_all(monitor.get_executions_iterator())\n return\n\n # remove finished nodes\n for node_name in exec_nodes_finished:\n monitor.finish_node(node_name)\n # perform new executions\n tasks_list = []\n for new_node in new_exec_nodes:\n tasks_list += new_node.queue_all_instances()\n monitor.add_node(new_node)\n wait_tasks_to_finish(tasks_list)\n\n if monitor.is_something_executing():\n cancel_all(monitor.get_executions_iterator())\n\n ctx.logger.info(\n \"------------------Workflow Finished-----------------------\")\n return",
"def submit_cpucores():\n # TODO(soltesz): move static value to an external, inventory table.\n submit_generic(_root_hostname, 'cpu_cores', 'gauge', _CPU_COUNT)",
"async def test_max_processes(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # 2 maximum tasks\n\n # 1 runs at 1 second\n # 2 runs at 2 seconds\n # 3 runs at 11 seconds\n # 4 runs at 12 seconds\n # 5 runs at 21 seconds\n # 6 runs at 22 seconds\n # 7 runs at 31 seconds\n # 8 runs at 32 seconds\n # Total: 6\n\n scheduler.max_running_tasks = 2 # set the maximum number of running tasks in parallel\n\n # Set interval schedule configuration\n interval_schedule = IntervalSchedule()\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n interval_schedule.name = 'max active'\n interval_schedule.exclusive = False\n interval_schedule.process_name = 'sleep10'\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(30.3)\n scheduler.max_running_tasks = 0 # set the maximum number of running tasks in parallel\n\n tasks = await scheduler.get_tasks(10)\n assert len(tasks) == 6\n\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 2\n\n # They end...\n await asyncio.sleep(20)\n\n scheduler.max_running_tasks = 10\n\n await asyncio.sleep(11)\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 10\n\n await self.stop_scheduler(scheduler)",
"def test_equal_priority_jobs_run_in_submit_order(\n self, equal_priority_execute_events\n ):\n for i in range(1, NUM_JOBS):\n assert (\n JobID.from_job_event(equal_priority_execute_events[i]).proc\n > JobID.from_job_event(equal_priority_execute_events[i - 1]).proc\n )",
"def submit_jobs(args, udf_command):\n hosts = []\n thread_list = []\n server_count_per_machine = 0\n\n # Get the host addresses of the cluster.\n ip_config = args.ip_config\n with open(ip_config) as f:\n for line in f:\n result = line.strip().split()\n if len(result) >= 3:\n ip = result[0]\n host = result[2]\n hosts.append((ip, host))\n else:\n raise RuntimeError(\"Format error of ip_config.\")\n server_count_per_machine = args.num_servers\n assert args.num_parts == len(hosts), \\\n 'The number of graph partitions has to match the number of machines in the cluster.'\n\n tot_num_clients = args.num_trainers * (1 + args.num_samplers) * len(hosts)\n # launch server tasks\n server_cmd = 'DGL_ROLE=server DGL_NUM_SAMPLER=' + str(args.num_samplers)\n server_cmd = server_cmd + ' ' + 'OMP_NUM_THREADS=' + str(args.num_server_threads)\n server_cmd = server_cmd + ' ' + 'DGL_NUM_CLIENT=' + str(tot_num_clients)\n server_cmd = server_cmd + ' ' + 'DGL_CONF_PATH=' + str(args.part_config)\n server_cmd = server_cmd + ' ' + 'DGL_IP_CONFIG=' + str(args.ip_config)\n server_cmd = server_cmd + ' ' + 'DGL_NUM_SERVER=' + str(args.num_servers)\n for i in range(len(hosts)*server_count_per_machine):\n _, pod_name = hosts[int(i / server_count_per_machine)]\n cmd = server_cmd + ' ' + 'DGL_SERVER_ID=' + str(i)\n cmd = cmd + ' ' + udf_command\n cmd = 'cd ' + str(args.workspace) + '; ' + cmd\n kubexec_multi(cmd, pod_name, thread_list)\n # launch client tasks\n client_cmd = 'DGL_DIST_MODE=\"distributed\" DGL_ROLE=client DGL_NUM_SAMPLER=' + str(args.num_samplers)\n client_cmd = client_cmd + ' ' + 'DGL_NUM_CLIENT=' + str(tot_num_clients)\n client_cmd = client_cmd + ' ' + 'DGL_CONF_PATH=' + str(args.part_config)\n client_cmd = client_cmd + ' ' + 'DGL_IP_CONFIG=' + str(args.ip_config)\n client_cmd = client_cmd + ' ' + 'DGL_NUM_SERVER=' + str(args.num_servers)\n if os.environ.get('OMP_NUM_THREADS') is not None:\n client_cmd = client_cmd + ' ' + 'OMP_NUM_THREADS=' + os.environ.get('OMP_NUM_THREADS')\n if os.environ.get('PYTHONPATH') is not None:\n client_cmd = client_cmd + ' ' + 'PYTHONPATH=' + os.environ.get('PYTHONPATH')\n\n torch_cmd = '-m torch.distributed.launch'\n torch_cmd = torch_cmd + ' ' + '--nproc_per_node=' + str(args.num_trainers)\n torch_cmd = torch_cmd + ' ' + '--nnodes=' + str(len(hosts))\n torch_cmd = torch_cmd + ' ' + '--node_rank=' + str(0)\n torch_cmd = torch_cmd + ' ' + '--master_addr=' + str(hosts[0][0])\n torch_cmd = torch_cmd + ' ' + '--master_port=' + str(1234)\n for node_id, tu in enumerate(hosts):\n _, pod_name = tu\n new_torch_cmd = torch_cmd.replace('node_rank=0', 'node_rank='+str(node_id))\n if 'python3' in udf_command:\n new_udf_command = udf_command.replace('python3', 'python3 ' + new_torch_cmd)\n elif 'python2' in udf_command:\n new_udf_command = udf_command.replace('python2', 'python2 ' + new_torch_cmd)\n else:\n new_udf_command = udf_command.replace('python', 'python ' + new_torch_cmd)\n cmd = client_cmd + ' ' + new_udf_command\n cmd = 'cd ' + str(args.workspace) + '; ' + cmd\n kubexec_multi(cmd, pod_name, thread_list)\n\n for thread in thread_list:\n thread.join()",
"def submit(fragment,njobs,nevts,outdir=\"\",first=None,indices=None,logdir=\"\",tag=\"\",dry=False,slc6=False,verb=0):\n print(\">>> Submitting...\")\n indir = os.path.dirname(fragment) or '.'\n fullfrag = os.path.abspath(fragment)\n ensuredir(os.path.join(indir,logdir)) # log directory\n ensuredir(outdir) # ensure output directory exists before submitting\n #args = f\"{outdir} {fullfrag} maxevts={nevts} index=$(ProcId) seed=$$([$(ProcId)+1])\" # start from 0\n args = f\"{outdir} {fullfrag} maxevts={nevts} index=$$([$(ProcId)+1]) seed=$$([$(ProcId)+1])\" # start from 1\n if tag:\n args += f\" tag={tag}\"\n if indices:\n indices_ = [ ]\n for index in indices:\n if isinstance(index,str) and index.count(':')==1:\n start, end = index.split(':') # e.g. '1:4' = [1, 2, 3, 4]\n for i in range(int(start),int(end)+1):\n indices_.append(i)\n else:\n indices_.append(int(index))\n args = args.replace('$(ProcId)','$(i)')\n queue = f\"-queue i in {', '.join(str(i) for i in indices_)}\"\n #queue = f\"-a 'queue i from ( {', '.join(str(i) for i in indices_)} )'\"\n elif first:\n args = args.replace('$(ProcId)','$(i)')\n queue = f\"-queue i from seq {first} {first+njobs-1} \\|\"\n #queue = f\"-a 'queue from seq {first} {njobs}|'\"\n else:\n queue = f\"-queue {njobs}\"\n name = f\"{os.path.basename(fragment).replace('.py','')}\"\n log = os.path.join(logdir,f\"submit_fragment{tag}.$(ClusterId).$(ProcId).log\")\n subcmd = f\"condor_submit submit_fragment.sub -a 'initialdir={indir}' -a 'mylogfile={log}'\"\n subcmd += f\" -a 'arguments={args}'\" # -a 'should_transfer_files=no'\n subcmd += f\" -batch-name {name} {queue}\" #-queue '{queue}'\n if slc6:\n subcmd += f\" -a 'requirements = (OpSysAndVer =?= \\\"SLCern6\\\")'\"\n if verb>=4:\n subcmd += \" -verbose\"\n print(\">>> \"+subcmd)\n if not dry:\n os.system(subcmd)",
"def simulateArrivalOfJobs(env, processes, batchQ):\n for p in processes:\n batchQ.addToBq(p)",
"def simulateArrivalOfJobs(env, processes, batchQ):\n for p in processes:\n batchQ.addToBq(p)",
"def schedule_jobs(self):\n for species in self.species_dict.values():\n if species.initial_xyz is None and species.final_xyz is None and species.conformers \\\n and any([e is not None for e in species.conformer_energies]):\n # The species has no xyz, but has conformers and at least one of the conformers has energy.\n self.determine_most_stable_conformer(species.label)\n if species.initial_xyz is not None:\n if self.composite_method:\n self.run_composite_job(species.label)\n else:\n self.run_opt_job(species.label, fine=self.fine_only)\n self.run_conformer_jobs()\n self.spawn_ts_jobs() # If all reactants/products are already known (Arkane yml or restart), spawn TS searches.\n while self.running_jobs != {}:\n self.timer = True\n for label in self.unique_species_labels:\n if self.output[label]['convergence'] is False:\n # Skip unconverged species.\n if label in self.running_jobs:\n del self.running_jobs[label]\n continue\n # Look for completed jobs and decide what jobs to run next.\n self.get_server_job_ids() # updates ``self.server_job_ids``\n self.get_completed_incore_jobs() # updates ``self.completed_incore_jobs``\n if label not in self.running_jobs.keys():\n continue\n job_list = self.running_jobs[label]\n for job_name in job_list:\n if 'conformer' in job_name:\n i = get_i_from_job_name(job_name)\n job = self.job_dict[label]['conformers'][i]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n # this is a completed conformer job\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n troubleshooting_conformer = self.parse_conformer(job=job, label=label, i=i)\n if troubleshooting_conformer:\n break\n # Just terminated a conformer job.\n # Are there additional conformer jobs currently running for this species?\n for spec_jobs in job_list:\n if 'conformer' in spec_jobs and spec_jobs != job_name:\n break\n else:\n # All conformer jobs terminated.\n # Check isomorphism and run opt on most stable conformer geometry.\n logger.info(f'\\nConformer jobs for {label} successfully terminated.\\n')\n if self.species_dict[label].is_ts:\n self.determine_most_likely_ts_conformer(label)\n else:\n self.determine_most_stable_conformer(label) # also checks isomorphism\n if self.species_dict[label].initial_xyz is not None:\n # if initial_xyz is None, then we're probably troubleshooting conformers, don't opt\n if not self.composite_method:\n self.run_opt_job(label, fine=self.fine_only)\n else:\n self.run_composite_job(label)\n self.timer = False\n break\n if 'tsg' in job_name:\n job = self.job_dict[label]['tsg'][get_i_from_job_name(job_name)]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n # This is a successfully completed tsg job. It may have resulted in several TSGuesses.\n self.end_job(job=job, label=label, job_name=job_name)\n if job.local_path_to_output_file.endswith('.yml'):\n for rxn in job.reactions:\n rxn.ts_species.process_completed_tsg_queue_jobs(yml_path=job.local_path_to_output_file)\n # Just terminated a tsg job.\n # Are there additional tsg jobs currently running for this species?\n for spec_jobs in job_list:\n if 'tsg' in spec_jobs and spec_jobs != job_name:\n break\n else:\n # All tsg jobs terminated. Spawn confs.\n logger.info(f'\\nTS guess jobs for {label} successfully terminated.\\n')\n self.run_conformer_jobs(labels=[label])\n self.timer = False\n break\n elif 'opt' in job_name:\n # val is 'opt1', 'opt2', etc., or 'optfreq1', optfreq2', etc.\n job = self.job_dict[label]['opt'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n success = self.parse_opt_geo(label=label, job=job)\n if success:\n self.spawn_post_opt_jobs(label=label, job_name=job_name)\n self.timer = False\n break\n elif 'freq' in job_name:\n # this is NOT an 'optfreq' job\n job = self.job_dict[label]['freq'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_freq_job(label=label, job=job)\n self.timer = False\n break\n elif 'sp' in job_name:\n job = self.job_dict[label]['sp'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_sp_job(label=label, job=job)\n self.timer = False\n break\n elif 'composite' in job_name:\n job = self.job_dict[label]['composite'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n success = self.parse_composite_geo(label=label, job=job)\n if success:\n self.spawn_post_opt_jobs(label=label, job_name=job_name)\n self.timer = False\n break\n elif 'directed_scan' in job_name:\n job = self.job_dict[label]['directed_scan'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_directed_scan_job(label=label, job=job)\n if 'cont' in job.directed_scan_type and job.job_status[1]['status'] == 'done':\n # This is a continuous restricted optimization, spawn the next job in the scan.\n xyz = parser.parse_xyz_from_file(job.local_path_to_output_file) \\\n if not hasattr(job, 'opt_xyz') else job.opt_xyz\n self.spawn_directed_scan_jobs(label=label, rotor_index=job.rotor_index, xyz=xyz)\n if 'brute_force' in job.directed_scan_type:\n # Just terminated a brute_force directed scan job.\n # Are there additional jobs of the same type currently running for this species?\n self.species_dict[label].rotors_dict[job.rotor_index]['number_of_running_jobs'] -= 1\n if not self.species_dict[label].rotors_dict[job.rotor_index]['number_of_running_jobs']:\n # All brute force scan jobs for these pivots terminated.\n logger.info(f'\\nAll brute force directed scan jobs for species {label} between '\n f'pivots {job.pivots} successfully terminated.\\n')\n self.process_directed_scans(label, pivots=job.pivots)\n shutil.rmtree(job.local_path, ignore_errors=True)\n self.timer = False\n break\n elif 'scan' in job_name and 'directed' not in job_name:\n job = self.job_dict[label]['scan'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination \\\n and (job.directed_scan_type is None or job.directed_scan_type == 'ess'):\n self.check_scan_job(label=label, job=job)\n self.timer = False\n break\n elif 'irc' in job_name:\n job = self.job_dict[label]['irc'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.spawn_post_irc_jobs(label=label, job=job)\n self.timer = False\n break\n elif 'orbitals' in job_name:\n job = self.job_dict[label]['orbitals'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n # copy the orbitals file to the species / TS output folder\n folder_name = 'rxns' if self.species_dict[label].is_ts else 'Species'\n orbitals_path = os.path.join(self.project_directory, 'output', folder_name, label,\n 'geometry', 'orbitals.fchk')\n if os.path.isfile(job.local_path_to_orbitals_file):\n try:\n shutil.copyfile(job.local_path_to_orbitals_file, orbitals_path)\n except shutil.SameFileError:\n pass\n self.timer = False\n break\n elif 'onedmin' in job_name:\n job = self.job_dict[label]['onedmin'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n # Copy the lennard_jones file to the species output folder (TS's don't have L-J data).\n lj_output_path = os.path.join(self.project_directory, 'output', 'Species', label,\n 'lennard_jones.dat')\n if os.path.isfile(job.local_path_to_lj_file):\n try:\n shutil.copyfile(job.local_path_to_lj_file, lj_output_path)\n except shutil.SameFileError:\n pass\n self.output[label]['job_types']['onedmin'] = True\n self.species_dict[label].set_transport_data(\n lj_path=os.path.join(self.project_directory, 'output', 'Species', label,\n 'lennard_jones.dat'),\n opt_path=self.output[label]['paths']['geo'], bath_gas=job.bath_gas,\n opt_level=self.opt_level)\n self.timer = False\n break\n\n if not len(job_list):\n self.check_all_done(label)\n if not self.running_jobs[label]:\n # Delete the label only if it represents an empty entry.\n del self.running_jobs[label]\n\n if self.timer and len(job_list):\n time.sleep(30) # wait 30 sec before bugging the servers again.\n t = time.time() - self.report_time\n if t > 3600 and self.running_jobs:\n self.report_time = time.time()\n logger.info(f'Currently running jobs:\\n{pprint.pformat(self.running_jobs)}')\n\n # Generate a TS report:\n self.generate_final_ts_guess_report()",
"def submit_cluster_batch_file(self, num_bundles):\n\n import os\n import re\n import getpass\n import commands\n from time import strftime\n from indi_schedulers import cluster_templates\n\n print \"Submitting cluster job to %s..\" % self._platform\n\n # Create cluster log dir\n cluster_files_dir = \\\n os.path.join(self._config[\"output_directory\"], \"cluster_files\")\n if not os.path.exists(cluster_files_dir):\n os.makedirs(cluster_files_dir)\n\n # Batch file variables\n timestamp = str(strftime(\"%Y_%m_%d_%H_%M_%S\"))\n shell = commands.getoutput('echo $SHELL')\n user_account = getpass.getuser()\n\n # Set up config dictionary\n config_dict = {'timestamp': timestamp,\n 'shell': shell,\n 'job_name': self._run_name,\n 'num_tasks': num_bundles,\n 'queue': \"all.q\",\n 'par_env': \"mpi_smp\",\n 'cores_per_task': self._config[\"num_processors\"],\n 'user': user_account,\n 'work_dir': cluster_files_dir}\n\n # Get string template for job scheduler\n if self._platform == \"PBS\":\n env_arr_idx = '$PBS_ARRAYID'\n batch_file_contents = cluster_templates.pbs_template\n confirm_str = '(?<=Your job-array )\\d+'\n exec_cmd = 'qsub'\n elif self._platform == \"SGE\":\n env_arr_idx = '$SGE_TASK_ID'\n batch_file_contents = cluster_templates.sge_template\n confirm_str = '(?<=Your job-array )\\d+'\n exec_cmd = 'qsub'\n elif self._platform == \"SLURM\":\n hrs_limit = 8 * num_bundles\n time_limit = '%d:00:00' % hrs_limit\n config_dict[\"time_limit\"] = time_limit\n env_arr_idx = '$SLURM_ARRAY_TASK_ID'\n batch_file_contents = cluster_templates.slurm_template\n confirm_str = '(?<=Submitted batch job )\\d+'\n exec_cmd = 'sbatch'\n\n config_dict['env_arr_idx'] = env_arr_idx\n config_dict['run_cmd'] = 'echo \"Running task: %s\"' % env_arr_idx\n\n # Populate string from config dict values\n batch_file_contents = batch_file_contents % config_dict\n\n run_str = \"qap_measures_pipeline.py --bundle_idx %s --log_dir %s %s \"\\\n \"%s\" % (env_arr_idx, self._run_log_dir,\n self._config[\"subject_list\"],\n self._config[\"pipeline_config_yaml\"])\n\n batch_file_contents = \"\\n\".join([batch_file_contents, run_str])\n\n batch_filepath = os.path.join(cluster_files_dir, 'cpac_submit_%s.%s'\n % (timestamp, self._platform))\n\n with open(batch_filepath, 'w') as f:\n f.write(batch_file_contents)\n\n print \"Batch file written to %s..\" % batch_filepath\n\n # Get output response from job submission\n out = commands.getoutput('%s %s' % (exec_cmd, batch_filepath))\n\n # Check for successful qsub submission\n if re.search(confirm_str, out) == None:\n err_msg = 'Error submitting QAP pipeline run to %s queue' \\\n % self._platform\n raise Exception(err_msg)\n\n print \"Batch job submitted to %s queue.\" % self._platform\n\n # Get pid and send to pid file\n pid = re.search(confirm_str, out).group(0)\n pid_file = os.path.join(cluster_files_dir, 'pid.txt')\n with open(pid_file, 'w') as f:\n f.write(pid)",
"def run_jobs(num_runs):\n\n if os.environ.get('OS','') == 'Windows_NT':\n p = subprocess.Popen(\"dir /A:-d /B | findstr/r \\\"submit_run.*.sh\\\"\", shell=True, stdout=subprocess.PIPE)\n else:\n p = subprocess.Popen(\"ls -l | grep 'submit_run.*.sh' | awk '{print $9}'\", shell=True, stdout=subprocess.PIPE)# list SGE submit files\n out = p.stdout.read()\n \n if os.environ.get('OS','') == 'Windows_NT':\n fnames = out.rsplit(\"\\r\\n\")\n else:\n fnames = out.rsplit(\"\\n\")\n\n if len(fnames) > 0: del fnames[-1]\n\n # determine whether 'qsub' command is available\n if (is_valid_command('qsub')): # run the commands jobs using qsub\n for fname in fnames:\n p = subprocess.Popen(\"qsub %s\" % fname, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.stderr.read()\n if verbose:\n print out\n print \"Jobs submitted.\"\n else: # run the commands sequentially without using qsub\n print \"Error: 'qsub' is an invalid command.\"\n if os.environ.get('OS','') == 'Windows_NT':\n p = subprocess.Popen(\"dir /A:-d /B | findstr/r \\\"run.*.py\\\"\", shell=True, stdout=subprocess.PIPE)\n else:\n p = subprocess.Popen(\"ls -l | grep 'run.*.py' | awk '{print $9}'\", shell=True, stdout=subprocess.PIPE) # list SGE submit files\n out = p.stdout.read()\n\n if os.environ.get('OS','') == 'Windows_NT':\n fnames = out.rsplit(\"\\r\\n\")\n else:\n fnames = out.rsplit(\"\\n\")\n if len(fnames) > 0: del fnames[-1]\n\n for fname in fnames:\n for i in range(num_runs):\n if verbose:\n print \"Executing command: python %s %d\" % (fname, i)\n p = subprocess.Popen(\"python %s %d\" % (fname, i), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.stderr.read()\n if verbose:\n print out",
"def run(self):\n\n logging.info(\"Pool scheduler started\")\n\n # do forever\n while True:\n\n try :\n # get job information about new jobs\n JobStatus.addNewJobs()\n\n # apply policy\n groups = self.applyPolicy()\n\n # any job to check?\n if len(groups) == 0:\n\n # no, wait for jobs to arrive\n logging.info( \"No work to do, \" + \\\n \"scheduler goes to sleep for \" + \\\n str(self.delay) + \" seconds\")\n sleep(self.delay)\n continue\n\n # new threads to start?\n if len(groups) >= self.threadsWorking:\n\n # yes, start threads\n for grp in groups:\n\n # but only for new groups\n if grp not in self.groupsUnderProcessing:\n\n # insert group ID into queue\n # to trigger thread start\n self.groupsUnderProcessing.add(grp)\n self.pool.enqueue(grp, grp)\n\n # wait for a thread to finish\n (group, result) = self.pool.dequeue()\n logging.info(\"Thread processing group \" + str(group) + \\\n \" has finished\")\n\n # decrement threads counter\n self.threadsWorking = self.threadsWorking - 1\n\n # remove its ID from groups\n self.groupsUnderProcessing.remove(group)\n\n # remove all finished jobs from this group\n JobStatus.removeFinishedJobs(group)\n\n except Exception, ex :\n import traceback\n logging.error( 'Error in PoolScheduler : [%s]' % str(ex) )\n logging.error( \"Traceback: %s\" % traceback.format_exc() )\n logging.error( \"PoolScheduler goes to sleep for \" + \\\n str(self.delay) + \" seconds\" )\n sleep(self.delay)",
"def _spawn_workers(self):\n self._event.set()\n self._workers = [ClassifierWorker(self._event, self._queue, self._results) for x in range(self._NUM_WORKERS)]\n [worker.start() for worker in self._workers]",
"def test_priority_task():\n task_id = uuid.uuid4().hex\n\n high_priority_task.apply_async(queue=\"high_priority\")\n normal_task.apply_async(queue=\"default\")\n\n task1 = high_priority_task.apply_async(\n args=[\"high task 1\"], queue=\"default\", task_id=task_id\n )\n time.sleep(1)\n high_priority_task.apply_async(\n args=[\"high task 2\"], queue=\"default\", task_id=task_id\n )",
"def main(order_count):\n for id in range(MAX_ORDERS):\n while active_count() > MAX_QUEUE:\n print(\"..All permitted threads running: waiting\")\n sleep(LOOP_TIMEOUT)\n print(\"..Finished waiting\")\n o = Thread(target=order_gen, kwargs={\"id\": id})\n o.start()",
"def _get_njobs_in_queue(self, username):",
"def check_dispatch_one_job(backend):\r\n queue = list()\r\n\r\n def producer():\r\n for i in range(6):\r\n queue.append('Produced %i' % i)\r\n yield i\r\n\r\n Parallel(n_jobs=1, backend=backend)(\r\n delayed(consumer)(queue, x) for x in producer())\r\n nose.tools.assert_equal(queue,\r\n ['Produced 0', 'Consumed 0',\r\n 'Produced 1', 'Consumed 1',\r\n 'Produced 2', 'Consumed 2',\r\n 'Produced 3', 'Consumed 3',\r\n 'Produced 4', 'Consumed 4',\r\n 'Produced 5', 'Consumed 5']\r\n )\r\n nose.tools.assert_equal(len(queue), 12)",
"def execute_in_parallel(lambda_list, args, timeout_seconds = None, max_worker = 8):\n\tall_processes = []\n\tfor i, l in enumerate(lambda_list):\n\t\tp = Process(target=l, args = (args[i], ))\n\t\tall_processes.append(p)\n\t\tp.start()\n\n\tfor p in all_processes:\n\t\tp.join()",
"def lantern_jobs():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not sending Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Sending Lantern jobs\".format(x=dates.now())\n LanternApi.make_new_jobs()",
"def _launch_threads():\n from . import workqueue as lib\n from ctypes import CFUNCTYPE, c_int\n\n launch_threads = CFUNCTYPE(None, c_int)(lib.launch_threads)\n launch_threads(NUM_CPU)",
"def try_submit_jobs(output, verbose):\n cluster, promoted = Cluster.deserialize(\n output,\n try_promote_to_submitter=True,\n deserialize_jobs=True,\n )\n if not promoted:\n print(\"Another node is already the submitter.\")\n sys.exit(0)\n\n # Only create the logger if we get promoted.\n filename = os.path.join(output, \"submit_jobs.log\")\n event_filename = os.path.join(output, \"submit_jobs_events.log\")\n setup_event_logging(event_filename, mode=\"a\")\n level = logging.DEBUG if verbose else logging.INFO\n logger = setup_logging(__name__, filename, file_level=level, console_level=level, mode=\"a\")\n logger.info(get_cli_string())\n\n if cluster.is_complete():\n cluster.demote_from_submitter()\n logger.info(\"All jobs are already finished.\")\n sys.exit(0)\n\n ret = 1\n try:\n mgr = JobSubmitter.load(output)\n status = mgr.submit_jobs(cluster)\n if status == Status.IN_PROGRESS:\n check_cmd = f\"jade show-status -o {output}\"\n print(f\"Jobs are in progress. Run '{check_cmd}' for updates.\")\n ret = 0\n else:\n ret = status.value\n except Exception:\n logger.exception(\"Failed to try-submit-jobs\")\n raise\n finally:\n cluster.demote_from_submitter()\n\n sys.exit(ret)",
"def parallelize(cores=None, fork=True, flatten=False, info=False, infoclass=InfoThreadProgressBar, init=None, *args, **kwargs):\n\tif cores == None:\n\t\tcores = multiprocessing.cpu_count()\n\tdef wrapper(f):\n\t\tdef execute(*multiargs):\n\t\t\tresults = []\n\t\t\tlen(list(zip(*multiargs)))\n\t\t\tN = len(multiargs[0])\n\t\t\tif info:\n\t\t\t\tprint(\"running %i jobs on %i cores\" % (N, cores))\n\t\t\ttaskQueue = queue.Queue(len(multiargs[0]))\n\t\t\t#for timenr in range(times):\n\t\t\t#\ttaskQueue.put(timenr)\n\t\t\tfor tasknr, _args in enumerate(zip(*multiargs)):\n\t\t\t\ttaskQueue.put((tasknr, list(_args)))\n\t\t\t#for timenr in range(times):\n\t\t\t#\tresult = f(*args, **kwargs)\n\t\t\t#\tresults.append(result)\n\t\t\texecutions = [Execution(taskQueue, fork, f, init, corenr, args, kwargs) for corenr in range(cores)]\n\t\t\tif info:\n\t\t\t\tinfoobj = infoclass(len(multiargs[0]), executions)\n\t\t\t\tinfoobj.start()\n\t\t\tfor i, execution in enumerate(executions):\n\t\t\t\texecution.setName(\"T-%d\" % i)\n\t\t\t\texecution.start()\n\t\t\t#if 1:\n\t\t\t#\twatchdog = Watchdog(executions)\n\t\t\t#\twatchdog.start()\n\t\t\terror = False\n\t\t\tfor execution in executions:\n\t\t\t\tlog(\"joining:\",execution.getName())\n\t\t\t\ttry:\n\t\t\t\t\texecution.join()\n\t\t\t\texcept BaseException:\n\t\t\t\t\terror = True\n\t\t\t\tresults.extend(execution.results)\n\t\t\t\tif execution.error:\n\t\t\t\t\terror = True \n\t\t\tif info:\n\t\t\t\tinfoobj.join()\n\t\t\tif error:\n\t\t\t\tprint(\"error\", file=sys.stderr)\n\t\t\t\tresults = None\n\t\t\t\traise Exception(\"error in one or more of the executors\")\n\t\t\telse:\n\t\t\t\tresults.sort(cmp=lambda a, b: cmp(a[0], b[0]))\n\t\t\t\tresults = [k[1] for k in results]\n\t\t\t\t#print \"bla\", results\n\t\t\t\tif flatten:\n\t\t\t\t\tflatresults = []\n\t\t\t\t\tfor result in results:\n\t\t\t\t\t\tflatresults.extend(result)\n\t\t\t\t\tresults = flatresults\n\t\t\treturn results\n\t\treturn execute\n\treturn wrapper",
"def parallel_work(jobs, nr_of_threads):\n work_queue = Queue()\n result_queue = Queue()\n result = {}\n\n for job in jobs:\n work_queue.put(job)\n\n if nr_of_threads > len(jobs):\n nr_of_threads = len(jobs)\n\n for i in range(nr_of_threads):\n worker = Process(target=check_plugin, args=(work_queue,result_queue))\n worker.start()\n\n while len(result.keys()) < len(jobs):\n data = result_queue.get()\n\n if \" | \" in data[1]:\n (status, output) = data[1].split(\" | \")\n else:\n status = \"UNKNOWN\"\n output = data[1]\n\n result[data[0]] = {\"status\": status, \"output\": output}\n #print \"Host \" + data[0] + \" \" + status\n\n return result",
"def do_jobs(self, job_list, wait_time=2, max_iters=100):\n for task_name in job_list:\n self.do_job(\n task_name,\n wait_time=wait_time,\n max_iters=max_iters\n )",
"def submit(slurm_folder, nord=False):\r\n for files in slurm_folder:\r\n if not nord:\r\n call([\"sbatch\", \"{}\".format(files)])\r\n else:\r\n os.system(\"bsub < {}\".format(files))",
"def invoke_all_and_wait(self):\n list_promise = []\n for thread in self.__list_thread:\n thread.start()\n list_promise.append(thread)\n for process in list_promise: process.join()",
"def gen_jobs(fpath, num_runs, netid):\n\n run = \"\"\n run += \"import sys\\n\"\n run += \"import subprocess\\n\"\n run += \"cmd_array = (\"\n for i in range(num_runs):\n run += \"r\\\"python test.py %d\\\"\" % i\n run += \",\\n\"\n\n run += \")\\n\"\n run += \"p = subprocess.Popen(cmd_array[int(sys.argv[1])-1], shell=True, stdout=subprocess.PIPE)\\n\"\n run += \"out = p.stdout.read()\"\n# run += \"print cmd_array[int(sys.argv[1])]\"\n\n script_name = \"test\"\n\n if verbose:\n print \"Writing array script: \" + \"run.\" + script_name + \".py\"\n f = open(os.path.join(fpath, \"run.\" + script_name + \".py\"), 'w')\n f.write(\"%s\\n\" % run)\n\n f = open(os.path.join(fpath, \"submit_run.\" + script_name + \".sh\"), 'w')\n submit_run = \"#!/bin/csh\\n\"\n submit_run += \"#$ -N %s\\n\" % (\"job_%d\" % num_runs)\n submit_run += \"#$ -t 1:%d\\n\" % (num_runs)\n submit_run += \"#$ -M %[email protected]\\n\\n\" % (netid)\n# submit_run += \"#$ -q short\"\n# submit_run += \"#$ -r y\"\n submit_run += \"python run.%s.py ${SGE_TASK_ID}\" % (script_name)\n\n if verbose:\n print \"Writing submit shell script: \" + \"submit_run.\" + script_name + \".sh\"\n f.write(\"%s\\n\" % submit_run)"
]
| [
"0.6602641",
"0.64124155",
"0.6274278",
"0.5718874",
"0.571672",
"0.56453025",
"0.55025905",
"0.5481149",
"0.54473895",
"0.54327667",
"0.54327667",
"0.5420412",
"0.5390543",
"0.5334475",
"0.53291273",
"0.5319377",
"0.53153",
"0.5302234",
"0.5265775",
"0.5192927",
"0.51382375",
"0.51356053",
"0.5128623",
"0.51233107",
"0.5114498",
"0.5109877",
"0.51082695",
"0.5092454",
"0.5077687",
"0.5065579"
]
| 0.6622799 | 0 |
Simple approach to retrieving execute events. Open the job event log, iterate over the events in order and add all execute events to a list. | def equal_priority_execute_events(submit_equal_priority_jobs):
jel = htcondor.JobEventLog("scheduler_priority-equal.log")
execute_events = []
for event in jel.events(0):
if event.type == htcondor.JobEventType.EXECUTE:
execute_events.append(event)
return execute_events | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generateEvents(self, jobs):\n return []",
"def extract_execute_reports(events):\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n if evt.type == IpuTraceEvent.EXECUTE:\n try:\n module = evt.execute.module_name.decode('utf-8')\n rep = evt.execute.execution_report.decode('utf-8')\n if len(rep) > 0:\n result += [(module, rep)]\n except UnicodeDecodeError:\n pass\n return result",
"def unequal_priority_execute_events(submit_unequal_priority_jobs):\n return submit_unequal_priority_jobs.event_log.filter(\n lambda event: event.type is htcondor.JobEventType.EXECUTE\n )",
"def get_events(self):\n ret = []\n while True:\n event = self.event.get_event(wait=1, full=True)\n if event is None:\n return ret\n ret.append(event)",
"def get_event_list(self):\n pass",
"async def events(self) -> Iterable[Event]:",
"def get_events_list(self, opts, args):\n\n\t\timport events\n\n\t\tself.setup_listener_gettext()\n\n\t\t# we need to merge, because some events have only\n\t\t# handlers, and others have only callbacks.\n\t\tevents_names = set(events.events_handlers.keys()\n\t\t\t\t\t\t\t+ events.events_callbacks.keys())\n\t\tmax_name_len = max(len(x) for x in events_names)\n\n\t\tif opts.verbose >= verbose.INFO:\n\t\t\tremote_output(_(u'{0} distinct event(s), {1} handler(s) '\n\t\t\t\t\tu'and {2} callback(s)').format(len(events_names),\n\t\t\t\t\tsum(len(x) for x in events.events_handlers.itervalues()),\n\t\t\t\t\tsum(len(x) for x in events.events_callbacks.itervalues())\n\t\t\t\t\t) + u'\\n')\n\t\t\tfor event_name in events_names:\n\t\t\t\thandlers = events.events_handlers.get(event_name, ())\n\t\t\t\tcallbacks = events.events_callbacks.get(event_name, ())\n\n\t\t\t\tremote_output(_(u'Event: {0}\\n\\tHandlers:{1}{2}\\n'\n\t\t\t\t\t\tu'\\tCallbacks:{3}{4}\\n').format(\n\t\t\t\t\tstylize(ST_NAME, event_name),\n\t\t\t\t\tu'\\n\\t\\t' if len(handlers) else u'',\n\t\t\t\t\tu'\\n\\t\\t'.join(_(u'{0} in module {1}').format(\n\t\t\t\t\t\tstylize(ST_NAME, h.__name__),\n\t\t\t\t\t\tstylize(ST_COMMENT, h.__module__)) for h\n\t\t\t\t\t\t\tin handlers),\n\t\t\t\t\tu'\\n\\t\\t' if len(callbacks) else u'',\n\t\t\t\t\tu'\\n\\t\\t'.join(_(u'{0} in module {1}').format(\n\t\t\t\t\t\tstylize(ST_NAME, c.__name__),\n\t\t\t\t\t\tstylize(ST_COMMENT, c.__module__)) for c\n\t\t\t\t\t\t\tin callbacks),\n\t\t\t\t))\n\t\telse:\n\t\t\tfor event_name in events_names:\n\t\t\t\tremote_output(_(u'{0}: {1} handler(s), {2} callback(s).\\n').format(\n\t\t\t\t\t\t\tstylize(ST_NAME, event_name.rjust(max_name_len)),\n\t\t\t\t\t\t\tlen(events.events_handlers.get(event_name, ())),\n\t\t\t\t\t\t\tlen(events.events_callbacks.get(event_name, ())),\n\t\t\t\t\t\t))",
"def generateEvents(self, jobs):\n events = []\n for job in jobs:\n if job.status == BBJobStatus.WaitInput:\n evt = self.generateSubmittedEvents(job)\n events.append(evt)\n elif job.status == BBJobStatus.Outputing:\n evt = self.generateFinishOutput(job)\n events.append(evt)\n else:\n logging.warn('\\t Unable to generate events for %s' % str(job))\n\n for evt in events:\n logging.debug('\\t Generate %s' % str(evt))\n return events",
"def execute(self):\n results = []\n \n for callback in self.callback:\n results.append(callback(*self.args))\n \n return results",
"def _iter_events(self) -> Generator:\n response = self.client.call()\n events: list = response.json()\n\n if not events:\n return []\n\n while True:\n yield events\n last = events.pop()\n self.client.set_next_run_filter(last['@timestamp'])\n response = self.client.call()\n events = response.json()\n try:\n events.pop(0)\n assert events\n except (IndexError, AssertionError):\n LOG('empty list, breaking')\n break",
"def get_event_list(self):\n event_list = []\n eventLocation = -1\n for element in self:\n eventLocation += 1\n if element.isChunk():\n event = element.embedded_event()\n if event:\n event_list.append((eventLocation, event.eid))\n return event_list",
"def get_events(self):\n self._events = []\n self.ircobj.process_once(timeout=0.1)\n return self._events",
"def get_all(self, q=None):\r\n q = q or []\r\n event_filter = _event_query_to_event_filter(q)\r\n return [Event(message_id=event.message_id,\r\n event_type=event.event_type,\r\n generated=event.generated,\r\n traits=event.traits)\r\n for event in\r\n pecan.request.storage_conn.get_events(event_filter)]",
"def get_executed_jobs(self):\n with self.__lock:\n return list(self.__executed_jobs)",
"def build_events(self) -> list:\n raise NotImplementedError()",
"def generateEvents(self, jobs):\n events = []\n for job in jobs:\n if job.status == BBJobStatus.WaitInput:\n evt = self.generateSubmittedEvents(job)\n events.append(evt)\n elif job.status == BBJobStatus.Inputing:\n evt = self.generateFinishInput(job)\n events.append(evt)\n elif job.status == BBJobStatus.Running:\n evt1 = self.generateReleaseInBB(job)\n events.append(evt1)\n evt2 = self.generateFinishRun(job)\n events.append(evt2)\n elif job.status == BBJobStatus.Outputing:\n evt1 = self.generateReleaseRunCN(job)\n events.append(evt1)\n evt2 = self.generateFinishOutput(job)\n events.append(evt2)\n else:\n logging.warn('\\t Unable to generate events for %s' % str(job))\n\n for evt in events:\n logging.debug('\\t Generate %s' % str(evt))\n return events",
"def executions(self, context: Any) -> list[Any]:\n pass",
"def list(self, jobguid=\"\", executionparams=None):",
"def event_list(self):\n return self._event_list",
"def list(self, jobguid=\"\", executionparams=dict()):",
"def fetch_events(self):\n while 1:\n try:\n self.events_local.append(self._q.get(False))\n except queue.Empty:\n break",
"def list_event(self, start_time=0, end_time=sys.maxsize):\n entities = []\n entities_j = self._get('events?startTime={}&endTime={}'.format(start_time, end_time))\n if entities_j:\n for entity_j in entities_j:\n entity = Event(entity_j['id'], entity_j['eventType'], entity_j['ctime'],\n entity_j['dataSource'], entity_j.get('dataId', None),\n entity_j['category'], entity_j['text'], entity_j.get('tags', None),\n entity_j.get('tenantId', None), entity_j.get('context', None))\n entities.append(entity)\n return entities",
"def get_events(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n\r\n query = \"\"\"\r\n SELECT DISTINCT E.eid, E1.ename, E1.description,\r\n E.category, E1.start_date, E1.end_date, E1.num_cap,\r\n E1.num_attending, L.lname, L.address_1, E.tag, L.lat, L.lon\r\n FROM {}.EventTags AS E, {}.UserTags AS U, {}.Events as E1, {}.Locations as L\r\n WHERE U.username='{}' AND\r\n E.tag = U.tag AND\r\n E1.eid = E.eid AND\r\n E1.lid = L.lid AND\r\n E1.start_date >= {}\r\n ORDER by E1.start_date\r\n \"\"\".format(\r\n ENV_DB,\r\n ENV_DB,\r\n ENV_DB,\r\n ENV_DB,\r\n self.user.username,\r\n str(datetime.date.today())\r\n )\r\n\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n database.close()\r\n\r\n return [i for i in data]",
"def get_log_events(self):\n client = boto3.client('logs')\n\n # Set the timestamp we will start from next poll.\n # and limit current poll to.\n end_timestamp = self.get_timestamp()\n\n # Request LogEvents.\n\n # Check if LogStream was added while agent was running. If so, get LogEvents from LogStream creation time.\n # So we don't miss any.\n if self.added:\n self.last_event_check_timestamp = self.creation_time\n self.added = False\n\n log_events_response = client.get_log_events(\n startTime=self.last_event_check_timestamp,\n endTime=end_timestamp,\n logGroupName=self.log_group.name,\n logStreamName=self.name,\n limit=self.event_limit,\n startFromHead=True\n )\n\n # Create LogEvents list from response.\n events = [LogEvent(log_event_dict) for log_event_dict in log_events_response['events']]\n\n # Token used if another request is required to get all LogEvents.\n next_forward_token = log_events_response['nextForwardToken']\n\n event_count = len(events)\n\n # While we get LogEvents equal to event_limit, continue requesting.\n while event_count >= self.event_limit:\n log_events_response = client.get_log_events(\n startTime=self.last_event_check_timestamp,\n endTime=end_timestamp,\n logGroupName=self.log_group.name,\n logStreamName=self.name,\n limit=self.event_limit,\n nextToken=next_forward_token,\n startFromHead=True\n )\n\n # Set length and next forward token for while loop.\n event_count = len(log_events_response['events'])\n next_forward_token = log_events_response['nextForwardToken']\n\n # Add LogEvents to our event list.\n events += [LogEvent(log_event_dict) for log_event_dict in log_events_response['events']]\n\n # Set starting point for next poll\n self.last_event_check_timestamp = end_timestamp\n\n print('Found ' + str(len(events)) + ' LogEvents for LogStream ' + self.log_group.name + ' ' + self.name)\n return events",
"def swis_event_list_command(client: Client, args: Dict) -> CommandResults:\n\n query = validate_and_prepare_query_for_event_list(args)\n response = client.http_request(method=\"GET\", url_suffix=URL_SUFFIX[\"QUERY\"],\n params={\"query\": query})\n outputs = createContext(response.get(\"results\", []), removeNull=True)\n readable_outputs = convert_events_outputs_to_hr(outputs)\n return CommandResults(\n outputs_prefix=\"SolarWinds.Event\",\n outputs=outputs,\n readable_output=readable_outputs,\n raw_response=response,\n outputs_key_field=\"EventID\"\n )",
"def process(self) -> List['Event']:\n raise NotImplementedError",
"def eventList(self):\n return self._eventList",
"async def _e_list(self, ctx):\n event_list = self.database.get_guild_events(ctx.guild.id)\n if len(event_list) == 0:\n await ctx.send(\"This server has no custom events\")\n return\n out = \"```\\nServer Events:\\n\"\n for event in event_list:\n out += f\"{event.name} - {event.period}: {event.text}\\n\"\n out += \"```\"\n await ctx.send(out)",
"def list_jobs(exproot, **kwargs):\n for jobname, args, results in load_all(exproot):\n print jobname, args, results",
"def get_event_generation_commands():\n command_list = []\n\n for event_name in EVENT_NAMES_LIST:\n command_list.append(_create_event_statement(event_name))\n\n return command_list"
]
| [
"0.64143986",
"0.62423444",
"0.6215877",
"0.5903895",
"0.59004354",
"0.58682936",
"0.5826749",
"0.5744079",
"0.5743282",
"0.5716527",
"0.5693448",
"0.5693024",
"0.5689468",
"0.5675863",
"0.5658283",
"0.56387347",
"0.5615506",
"0.56138927",
"0.55930614",
"0.55862117",
"0.55836093",
"0.5580006",
"0.55605924",
"0.55561125",
"0.5554891",
"0.5520207",
"0.55065787",
"0.54944706",
"0.548973",
"0.5483333"
]
| 0.7135419 | 0 |
We expect equal priority jobs to run in the order they were submitted, which means they should run in jobidorder. Simple approach, just iterate over the list of events in a forloop and make sure proc ids appear in ascending order. | def test_equal_priority_jobs_run_in_submit_order(
self, equal_priority_execute_events
):
for i in range(1, NUM_JOBS):
assert (
JobID.from_job_event(equal_priority_execute_events[i]).proc
> JobID.from_job_event(equal_priority_execute_events[i - 1]).proc
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_unequal_priority_jobs_run_in_priority_order(\n self, unequal_priority_execute_events\n ):\n assert (\n sorted(\n unequal_priority_execute_events,\n key=lambda event: JobID.from_job_event(event),\n reverse=True,\n )\n == unequal_priority_execute_events\n )",
"def equal_priority_execute_events(submit_equal_priority_jobs):\n jel = htcondor.JobEventLog(\"scheduler_priority-equal.log\")\n execute_events = []\n for event in jel.events(0):\n if event.type == htcondor.JobEventType.EXECUTE:\n execute_events.append(event)\n return execute_events",
"def __sort_by_priority(self, input_list):\n print(\"========================Start of __sort_by_priority() Method *\")\n # temp1 = input_list.sort(key=operator.attrgetter(\"submission_time\"))\n # temp1 = temp1.sort(key=operator.attrgetter(str(\"__req_start\")))\n\n # sending one item from list at a time to be enqueued ensuring sorted-nes\n for j in range(len(input_list)):\n self.current_queue.enqueue(input_list[j])\n # print(\"Enqueued the FF item from Input list :\" + input_list[j].showFlightInfo())\n # print(\"*De-queued the FF item from Queue :\" + self.current_queue.dequeue(j).showFlightInfo())\n \"\"\"\n if input_list[i].get_reqStart <= self.current_queue.first.get_reqStart:\n if input_list[i].get_submissionTime <= self.current_queue.first.get_submissionTime:\n temp = self.current_queue.first\n self.current_queue.first = input_list[i]\n self.current_queue.first.next = temp\"\"\"\n print(\"========================End of __sort_by_priority() Method *\")",
"def unequal_priority_execute_events(submit_unequal_priority_jobs):\n return submit_unequal_priority_jobs.event_log.filter(\n lambda event: event.type is htcondor.JobEventType.EXECUTE\n )",
"def processEvents(self):\n self.framelist = sorted(self.framelist, key=lambda event: event.timestamp, reverse=True)\n self.framequeue = sorted(self.framequeue, key=lambda event: event.timestamp, reverse=True)\n self.packetqueue = sorted(self.packetqueue, key=lambda event: event.timestamp, reverse=True)\n \n print len(self.framequeue)\n print len(self.packetqueue)\n \n while len(self.framequeue) > 0 or len(self.packetqueue) > 0:\n self.getNextEvent().processEvent(self, self.decisionAlg)",
"def _GetEpiOrder(self):\n self.epi_series.sort()\n for series in self.epi_series:\n self.GetEpiAcqTimes(series)\n self.AssignEpiNames()",
"def test_priority_add_many_ok(self):\n test_name = sys._getframe().f_code.co_name\n for i in xrange(11):\n self._execute('priority add p%s' % i)\n rv, output = self._execute('priority list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def sort_events(self):\n RPR.MIDI_Sort(self.id)",
"def scan_scripts ( self ):\n prios = collections.defaultdict ( list )\n for event, hook in self.iter_scripts():\n if hook.event is None:\n hook.event = event\n if hook.has_priority():\n prios [event].append ( hook.priority )\n # -- end for\n\n for event, priolist in prios.items():\n self._get_prio_gen ( event ).add_generated ( priolist )",
"def sort_by_npartfile(self, npfiles, events_by_npart):\n if len(npfiles) > 1:\n print \"Warning: Several .npart files detected. Using the first in list:\"\n print npfiles[0]\n with open(npfiles[0], 'r') as npf:\n for line in npf:\n data = line.split()\n try:\n jobid = data[0]\n npart = int(data[1])\n if npart >= self._npmin and npart <= self._npmax:\n events_by_npart.append(self.outputname(jobid))\n except ValueError:\n continue",
"def test_runner_uses_priority(monkeypatch, runner, example_tasks):\n order = []\n\n def _run(self, input_files):\n print(f'running task {self.name} with priority {self.priority}')\n order.append(self.priority)\n return [f'{self.name}_test.nc']\n\n monkeypatch.setattr(BaseTask, '_run', _run)\n monkeypatch.setattr(esmvalcore._task, 'Pool', ThreadPool)\n\n runner(example_tasks)\n print(order)\n assert len(order) == 12\n assert order == sorted(order)",
"def execute_order(self, event):\n raise NotImplementedError(\"Should implement execute_order()\")",
"def process_tuples_sorted(self):\n return sorted(self.process_tuples, key=lambda process_tuple: process_tuple[0].name)",
"def init_priority(self):\n arr = []\n priority_dict = dict()\n\n for p in self.processes:\n priority_dict[p.id] = int(p.period)\n\n for key, value in sorted(priority_dict.items(), key=lambda value: value[1]):\n arr.append(key)\n\n return arr",
"def test_priority_order_up_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('priority order critical up')\n rv, output = self._execute('priority list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def processes(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process for item in self.process_tuples), key=lambda process: process.name)\n # MODIFIED 11/1/16 END",
"def sortInputByEndTimeAndDay(jobList):\n jobList=sorted(jobList, key= attrgetter('day','endTime'))\n return jobList",
"def sort_by_bfile(self, bfiles, events_by_b):\n if len(bfiles) > 1:\n print \"Warning: Several .b files detected. Using the first in list:\"\n print bfiles[0]\n with open(bfiles[0], 'r') as bf:\n for line in bf:\n data = line.split()\n try:\n jobid = data[0]\n impb = float(data[1])\n if impb >= self._bmin and impb <= self._bmax:\n events_by_b.append(self.outputname(jobid))\n except ValueError:\n continue",
"def test99EventSplit(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n eventsPerJob = 99\n jobGroups = jobFactory(events_per_job=eventsPerJob,\n performance=self.performanceParams)\n\n assert len(jobGroups) == 1, \\\n \"ERROR: JobFactory didn't return one JobGroup.\"\n\n assert len(jobGroups[0].jobs) == 2, \\\n \"ERROR: JobFactory created %s jobs not two\" % len(jobGroups[0].jobs)\n\n firstEvents = []\n for job in jobGroups[0].jobs:\n assert job.getFiles(type=\"lfn\") == [\"/some/file/name\"], \\\n \"ERROR: Job contains unknown files.\"\n self.assertTrue(job[\"mask\"].getMaxEvents() in [eventsPerJob, 1],\n \"ERROR: Job's max events is incorrect.\")\n\n assert job[\"mask\"][\"FirstEvent\"] in [0, eventsPerJob], \\\n \"ERROR: Job's first event is incorrect.\"\n\n assert job[\"mask\"][\"FirstEvent\"] not in firstEvents, \\\n \"ERROR: Job's first event is repeated.\"\n firstEvents.append(job[\"mask\"][\"FirstEvent\"])\n\n return",
"def _place_orders_onto_queue(self, order_list: List[OrderEvent]):\n for order_event in order_list:\n self._events.add_event(order_event)",
"def operation_on_events(path_to_data_dir, list_of_test_id, operation, num_of_proc=1):\n\ttest_id = [\"test%s\"%i for i in list_of_test_id]\n\tpool = Pool(processes = num_of_proc)\n\tpath_to_final_selected_events = path_to_data_dir + \"final_selected_events.json\"\n\tif os.path.exists(path_to_final_selected_events):\n\t\tfinal_selected_events = json.load(open(path_to_final_selected_events,\"r\"))\n\t\tfinal_interested_events = []\n\t\tfor event in final_selected_events:\n\t\t\tif event[0] in test_id:\n\t\t\t\tfinal_interested_events.append(event)\n\telse:\n\t\tfinal_interested_events = []\n\t\tfor test in list_of_test_id:\n\t\t\tpath_to_curr_test = data_dir_to_test_dir(path_to_data_dir, test)\n\t\t\tpath_to_test_result = path_to_curr_test +\"/results\"\n\t\t\tpath_to_event_list = path_to_test_result + \"/selected_events.json\"\n\t\t\tif os.path.exists(path_to_event_list):\n\t\t\t\tevent_list = json.load(open(path_to_event_list,\"r\"))\n\t\t\t\tfor value in event_list.values():\n\t\t\t\t\tevent = [\"test%s\"%test,[value[0],value[1],value[2]]]\n\t\t\t\t\tfinal_interested_events.append(event)\n\t\t\telse:\n\t\t\t\tprint \"skip current test:\", \"test%s\"%test, \"there is no selected events\"\n\t\n\t# if function operation has no return value, it will return a list of Nones\n\tresult_list = pool.map(operation,final_interested_events)\n\treturn result_list",
"def sort(self):\n self.tasks = sorted(self.tasks, key=lambda k: k.priority, reverse=True)",
"def todo(self):\n # sort events with eventid using datetime string\n pass",
"def find_prod_ids(dist_list, no_prod):\n list_of_prod = []\n for worker in dist_list:\n\n if len(list_of_prod) < no_prod:\n list_of_prod.append(worker)\n\n else:\n\n for i in list_of_prod:\n\n if i[1] > worker[1]:\n list_of_prod.remove(i)\n list_of_prod.append(worker)\n worker = i\n\n else:\n\n continue\n PIDs = []\n for i in list_of_prod:\n PIDs.append(i[0])\n PIDs.sort()\n return (PIDs)",
"def test_priority_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('priority list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def process_order(self, order_event : event.EventOrder) :\n pass",
"def test_priority_task():\n task_id = uuid.uuid4().hex\n\n high_priority_task.apply_async(queue=\"high_priority\")\n normal_task.apply_async(queue=\"default\")\n\n task1 = high_priority_task.apply_async(\n args=[\"high task 1\"], queue=\"default\", task_id=task_id\n )\n time.sleep(1)\n high_priority_task.apply_async(\n args=[\"high task 2\"], queue=\"default\", task_id=task_id\n )",
"def loadEvents(self, eventlist):\n for event in eventlist:\n event.ID = self.loadEvent(event.name, event.pid)",
"def priority(name):\n try:\n manager = Actions()\n priority = Priority[name]\n ordered_tasks = manager.order_by_priority(priority)\n click.echo(\"Ordered by priority:\" + click.style(name, bg='red', fg='white'))\n click.echo()\n console_utils.format_print_ordered(ordered_tasks)\n except IndexError as e:\n click.echo(\"IndexError: \"+e)\n except Exception as e:\n click.echo(e)",
"def test_order_by(self):\n manifestb = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='2.0.0')\n job_type1b = job_test_utils.create_seed_job_type(manifest=manifestb)\n job_test_utils.create_job(job_type=job_type1b, status='RUNNING')\n\n manifestc = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='3.0.0')\n job_type1c = job_test_utils.create_seed_job_type(manifest=manifestc)\n job_test_utils.create_job(job_type=job_type1c, status='RUNNING')\n\n url = '/%s/jobs/?is_superseded=false&order=job_type__name&order=-job_type__version' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)\n\n self.assertEqual(result['results'][0]['job_type']['id'], job_type1c.id)\n self.assertEqual(result['results'][1]['job_type']['id'], job_type1b.id)\n self.assertEqual(result['results'][2]['job_type']['id'], self.job_type1.id)\n self.assertEqual(result['results'][3]['job_type']['id'], self.job_type2.id)"
]
| [
"0.74742603",
"0.7011296",
"0.6303329",
"0.6300982",
"0.59918326",
"0.5927309",
"0.5818697",
"0.58018124",
"0.5766598",
"0.56478244",
"0.56222457",
"0.5613897",
"0.55461574",
"0.54555684",
"0.54496646",
"0.54344493",
"0.53668815",
"0.5358708",
"0.53512025",
"0.5326702",
"0.53191566",
"0.53165257",
"0.53105205",
"0.53028387",
"0.52920187",
"0.5291146",
"0.52855974",
"0.5283049",
"0.52785885",
"0.5273802"
]
| 0.802852 | 0 |
We expect unequal priority jobs to run in the order of priority, which for the set up above, means they should run in reversejobidorder. Josh's Pythonic approach using the sorted() function. | def test_unequal_priority_jobs_run_in_priority_order(
self, unequal_priority_execute_events
):
assert (
sorted(
unequal_priority_execute_events,
key=lambda event: JobID.from_job_event(event),
reverse=True,
)
== unequal_priority_execute_events
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sort(self):\n self.tasks = sorted(self.tasks, key=lambda k: k.priority, reverse=True)",
"def test_equal_priority_jobs_run_in_submit_order(\n self, equal_priority_execute_events\n ):\n for i in range(1, NUM_JOBS):\n assert (\n JobID.from_job_event(equal_priority_execute_events[i]).proc\n > JobID.from_job_event(equal_priority_execute_events[i - 1]).proc\n )",
"def __sort_by_priority(self, input_list):\n print(\"========================Start of __sort_by_priority() Method *\")\n # temp1 = input_list.sort(key=operator.attrgetter(\"submission_time\"))\n # temp1 = temp1.sort(key=operator.attrgetter(str(\"__req_start\")))\n\n # sending one item from list at a time to be enqueued ensuring sorted-nes\n for j in range(len(input_list)):\n self.current_queue.enqueue(input_list[j])\n # print(\"Enqueued the FF item from Input list :\" + input_list[j].showFlightInfo())\n # print(\"*De-queued the FF item from Queue :\" + self.current_queue.dequeue(j).showFlightInfo())\n \"\"\"\n if input_list[i].get_reqStart <= self.current_queue.first.get_reqStart:\n if input_list[i].get_submissionTime <= self.current_queue.first.get_submissionTime:\n temp = self.current_queue.first\n self.current_queue.first = input_list[i]\n self.current_queue.first.next = temp\"\"\"\n print(\"========================End of __sort_by_priority() Method *\")",
"def test_runner_uses_priority(monkeypatch, runner, example_tasks):\n order = []\n\n def _run(self, input_files):\n print(f'running task {self.name} with priority {self.priority}')\n order.append(self.priority)\n return [f'{self.name}_test.nc']\n\n monkeypatch.setattr(BaseTask, '_run', _run)\n monkeypatch.setattr(esmvalcore._task, 'Pool', ThreadPool)\n\n runner(example_tasks)\n print(order)\n assert len(order) == 12\n assert order == sorted(order)",
"def get_job_list():\n\tdirlist = os.listdir(\".\")\n\tjoblist = [x for x in dirlist if \"job.sh\" in x and x in job_dict]\n\ttmplist = [x for x in dirlist if \"job.sh\" in x and x not in job_dict]\n\tdef compare_function(s: str):\n\t\treturn job_dict[s].order\n\tjoblist.sort(key=compare_function)\n\tjoblist.extend(tmplist)\n\treturn joblist",
"def test_order_by(self):\n manifestb = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='2.0.0')\n job_type1b = job_test_utils.create_seed_job_type(manifest=manifestb)\n job_test_utils.create_job(job_type=job_type1b, status='RUNNING')\n\n manifestc = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='3.0.0')\n job_type1c = job_test_utils.create_seed_job_type(manifest=manifestc)\n job_test_utils.create_job(job_type=job_type1c, status='RUNNING')\n\n url = '/%s/jobs/?is_superseded=false&order=job_type__name&order=-job_type__version' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)\n\n self.assertEqual(result['results'][0]['job_type']['id'], job_type1c.id)\n self.assertEqual(result['results'][1]['job_type']['id'], job_type1b.id)\n self.assertEqual(result['results'][2]['job_type']['id'], self.job_type1.id)\n self.assertEqual(result['results'][3]['job_type']['id'], self.job_type2.id)",
"def sort_priority(todo_list):\n for index in range(len(todo_list)):\n smallest_index = find_index_of_smallest_after(todo_list, index)\n swap_values_at_indexes(todo_list, index, smallest_index)",
"def sort_in_jobs(self, in_jobs):\n if len(in_jobs) is 0:\n return in_jobs\n jobs_ordered_yx = sorted(\n in_jobs,\n key=lambda job: (job['y'] + job['height'], job['x'] + job['width']),\n reverse=True)\n\n return jobs_ordered_yx",
"def job_priority_key(self, job):\n camp, user = job.camp, job.user\n end = camp.time_left / user.shares # lower value -> higher priority\n # The `end` should be further multiplied by\n # `_stats.active_shares` / `_stats.cpu_used`.\n # However, that gives the same value for all the jobs\n # and we only need the ordering, not the absolute value.\n return (end, camp.created, user.ID, camp.ID,\n job.submit, job.ID)",
"def priority(name):\n try:\n manager = Actions()\n priority = Priority[name]\n ordered_tasks = manager.order_by_priority(priority)\n click.echo(\"Ordered by priority:\" + click.style(name, bg='red', fg='white'))\n click.echo()\n console_utils.format_print_ordered(ordered_tasks)\n except IndexError as e:\n click.echo(\"IndexError: \"+e)\n except Exception as e:\n click.echo(e)",
"def sort_priors(self):\n return",
"def equal_priority_execute_events(submit_equal_priority_jobs):\n jel = htcondor.JobEventLog(\"scheduler_priority-equal.log\")\n execute_events = []\n for event in jel.events(0):\n if event.type == htcondor.JobEventType.EXECUTE:\n execute_events.append(event)\n return execute_events",
"def part_build_order(self):\n priority = []\n for geo_item in self.build_order:\n if geo_item in self.parts:\n priority += [geo_item]\n return priority",
"def apply_sorting(tasks, *conditions):\n return tasks.sort(conditions)",
"def sort_by(processes, key, reverse=False):\n return sorted(processes, key=lambda process: process[key], reverse=reverse)",
"def job_sorter(self, job):\n key = natsort.natsort_keygen(key=self.job_title, alg=natsort.REAL)\n return key(job)",
"def test_priority_task():\n task_id = uuid.uuid4().hex\n\n high_priority_task.apply_async(queue=\"high_priority\")\n normal_task.apply_async(queue=\"default\")\n\n task1 = high_priority_task.apply_async(\n args=[\"high task 1\"], queue=\"default\", task_id=task_id\n )\n time.sleep(1)\n high_priority_task.apply_async(\n args=[\"high task 2\"], queue=\"default\", task_id=task_id\n )",
"def reorder( self ):\n self.sorted.sort(self.compareFunction)",
"def toposort(prereqs_d):\r\n\r\n# all1 = set(prereqs_d.keys())\r\n# all2 = set()\r\n# for x, y in prereqs_d.items():\r\n# all2.update(y)\r\n# print all1.difference(all2)\r\n\r\n seq = []\r\n done = set()\r\n postreqs_d = {}\r\n for x, prereqs in prereqs_d.items():\r\n for prereq in prereqs:\r\n postreqs_d.setdefault(prereq, set()).add(x)\r\n next = set([k for k in prereqs_d if not prereqs_d[k]])\r\n while next:\r\n bases = next\r\n next = set()\r\n for x in bases:\r\n done.add(x)\r\n seq.append(x)\r\n for x in bases:\r\n for postreq in postreqs_d.get(x, []):\r\n if not prereqs_d[postreq].difference(done):\r\n next.add(postreq)\r\n if len(prereqs_d) != len(seq):\r\n raise Exception(\"Cannot sort topologically: there might be cycles, \"\r\n \"prereqs_d does not have a key for each element or \"\r\n \"some orderings contain invalid elements.\")\r\n return seq",
"def sortInputByEndTimeAndDay(jobList):\n jobList=sorted(jobList, key= attrgetter('day','endTime'))\n return jobList",
"def getPriorityList(self):",
"def reversesort(self):\n ...",
"def test_priority_order_up_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('priority order critical up')\n rv, output = self._execute('priority list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def volume_sort(self):\n self.jobs_sorted = sorted(\n self.jobs,\n key=lambda job: (job['height'], job['width'] * job['height']),\n # key=lambda job: job['width'] * job['height'],\n reverse=True)",
"def unequal_priority_execute_events(submit_unequal_priority_jobs):\n return submit_unequal_priority_jobs.event_log.filter(\n lambda event: event.type is htcondor.JobEventType.EXECUTE\n )",
"def process_tuples_sorted(self):\n return sorted(self.process_tuples, key=lambda process_tuple: process_tuple[0].name)",
"def job_priority_key(self, job):\n if not self._stats.total_usage:\n fairshare = 1\n else:\n user = job.user\n effective = user.cpu_clock_used / self._stats.total_usage\n #shares_norm = user.shares # already normalized\n fairshare = 2.0 ** -(effective / user.shares)\n prio = int(fairshare * 100000) # higher value -> higher priority\n # TODO if needed change the constant to a configuration setting\n # TODO and add more components to the priority value\n return (-prio, job.submit, job.ID)",
"def init_priority(self):\n arr = []\n priority_dict = dict()\n\n for p in self.processes:\n priority_dict[p.id] = int(p.period)\n\n for key, value in sorted(priority_dict.items(), key=lambda value: value[1]):\n arr.append(key)\n\n return arr",
"def test_priority_order_down_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('priority order blocker down')\n rv, output = self._execute('priority list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def schedule_low_prio(self, job):\r\n assert(self.c.is_live())\r\n self.low_prio_jobs.put(job)"
]
| [
"0.7085913",
"0.6868466",
"0.6545962",
"0.6380382",
"0.6318971",
"0.6241187",
"0.62095785",
"0.6170921",
"0.6158344",
"0.60485375",
"0.60366434",
"0.6019585",
"0.5960103",
"0.5953529",
"0.59490013",
"0.59163475",
"0.59153193",
"0.5906476",
"0.58942324",
"0.58792806",
"0.58281004",
"0.5826781",
"0.581776",
"0.5794783",
"0.57699734",
"0.57542795",
"0.5744968",
"0.57293534",
"0.57256883",
"0.5718951"
]
| 0.7440909 | 0 |
Gets the app_version_id of this AppInstallVersion. | def app_version_id(self):
return self._app_version_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def version_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"version_id\")",
"def get_version(self):\n data = self._get('app_version')\n return data['version']",
"def app_id(self):\n return self._app_id",
"def app_installation_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_installation_id\")",
"def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")",
"def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])",
"def app_id(self) -> str:\n return self._app_id",
"def version_code(self):\n return self.proto.details.appDetails.versionCode",
"def version_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_id\")",
"def model_version_id(self) -> Optional[str]:\n return pulumi.get(self, \"model_version_id\")",
"def app_version(self) -> str:\n return pulumi.get(self, \"app_version\")",
"def appid(self):\n return self._item[\"appid\"]",
"def app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_id\")",
"def app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"app_id\")",
"def get_application_version(self):\n return self.connector.request('GET', '/app/version')",
"def application_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_id\")",
"def app_id(self):\n return self._app_id or self._modules['default'].data.get('application')",
"def version(self):\n return self.proto.details.appDetails.versionString",
"def application_id(self) -> Optional[str]:\n return pulumi.get(self, \"application_id\")",
"def VersionID(self, default=None):\n return self.data.get('version_id', default)",
"def client_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_app_id\")",
"def getApplicationVersion(self) -> unicode:\n ...",
"def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")",
"def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")",
"def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")",
"def version(self):\r\n return self.version_guid",
"def get_id(self, app_name):\n _id = []\n apps = [app for app in self.applications.response if app.name == app_name]\n if len(apps) > 0:\n return apps[0].id",
"def get_version(self):\r\n\r\n return self.versions[0].number",
"def application_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_id\")",
"def server_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_app_id\")"
]
| [
"0.73907787",
"0.73413086",
"0.7314593",
"0.728456",
"0.7267986",
"0.72030914",
"0.71844697",
"0.7167349",
"0.71540904",
"0.7137058",
"0.71128243",
"0.7077404",
"0.70418143",
"0.692636",
"0.69067955",
"0.6888486",
"0.6833583",
"0.6801371",
"0.674614",
"0.6674054",
"0.66592836",
"0.662662",
"0.65377825",
"0.65377825",
"0.65377825",
"0.6531648",
"0.65024596",
"0.64661235",
"0.64268905",
"0.6391483"
]
| 0.8765454 | 0 |
Sets the app_version_id of this AppInstallVersion. | def app_version_id(self, app_version_id):
self._app_version_id = app_version_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def app_version_id(self):\n return self._app_version_id",
"def app_id(self, app_id):\n\n self._app_id = app_id",
"def app_id(self, app_id):\n self._app_id = app_id",
"def setAppID(self, appid):\n\t\tself.config.APP_ID = appid",
"def __set_version_id(self):\r\n VersionId = self.client.factory.create('VersionId')\r\n VersionId.ServiceId = self._version_info['service_id']\r\n VersionId.Major = self._version_info['major']\r\n VersionId.Intermediate = self._version_info['intermediate']\r\n VersionId.Minor = self._version_info['minor']\r\n self.logger.debug(VersionId)\r\n self.VersionId = VersionId",
"def application_id(self, application_id):\n\n self._application_id = application_id",
"def crf_version_id(self, crf_version_id):\n\n self._crf_version_id = crf_version_id",
"def crf_version_id(self, crf_version_id):\n\n self._crf_version_id = crf_version_id",
"def maybe_update_application_version(self, value):\n if (\n value\n and value.command_class == COMMAND_CLASS_VERSION\n and value.label == \"Application Version\"\n ):\n self._application_version = value.data",
"def target_version_id(self, target_version_id):\n\n self._target_version_id = target_version_id",
"def set_version(self, version: str) -> None:\n if self.current_version == version:\n return\n self.current_version = version\n self._del_cached_property(\"version\")",
"def setAppInfo(self, name, version, devMode=False):\n self._appName = name\n self._appVersion = version\n self._devMode = devMode",
"def setVersion(self, version) :\n if version is not None :\n try :\n self.version = [int(p) for p in version.split(\".\")]\n except AttributeError :\n if len(version) == 2 : # 2-tuple\n self.version = version\n else :\n try :\n self.version = [int(p) for p in str(float(version)).split(\".\")]\n except :\n self.version = [int(p) for p in IPP_VERSION.split(\".\")]",
"def version(self, version: int):\n\n self._version = version",
"def set_application(self, app_id):\n if self._use_channel_info:\n self._channel = \"\"\n self._channel_name = app_id\n self._is_forced_val = True\n self._forced_count = 0",
"def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])",
"def version(self, app, args):\n app.put('\\n\\n%s\\n' % _version_str)",
"def version_code(self, version_code):\n\n self._version_code = version_code",
"def version(self, version: str):\n\n self._version = version",
"def version(self, version: str):\n\n self._version = version",
"def version(self, version):\n self._version = version",
"def version(self, version):\n self._version = version",
"async def slashtagset_appid(self, ctx: commands.Context, id: int = None):\n app_id = id or self.bot.user.id\n await self.config.application_id.set(app_id)\n self.application_id = app_id\n await ctx.send(f\"Application ID set to `{id}`.\")",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version"
]
| [
"0.6932447",
"0.6734854",
"0.6730428",
"0.65842754",
"0.6257902",
"0.6219609",
"0.6186703",
"0.6186703",
"0.5993434",
"0.5832336",
"0.5735562",
"0.57286",
"0.5630821",
"0.56066996",
"0.55946344",
"0.55857974",
"0.5575322",
"0.553692",
"0.5533564",
"0.5533564",
"0.5527333",
"0.5527333",
"0.5525864",
"0.55171835",
"0.55171835",
"0.55171835",
"0.55171835",
"0.55171835",
"0.55171835",
"0.55171835"
]
| 0.8455981 | 0 |
Gets the version_code of this AppInstallVersion. | def version_code(self):
return self._version_code | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def version_code(self):\n return self.proto.details.appDetails.versionCode",
"def version_code(self) -> str:\n return pulumi.get(self, \"version_code\")",
"def version_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_code\")",
"def code_version(self):\n\n if hasattr(self, \"model_dict\"):\n return self.model_dict[\"code_version\"].item()\n\n raise AttributeError(\"No model is loaded.\")",
"def app_version_id(self):\n return self._app_version_id",
"def get_version(self):\n data = self._get('app_version')\n return data['version']",
"def version(self):\n return self.proto.details.appDetails.versionString",
"def getApplicationVersion(self) -> unicode:\n ...",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def code(self):\n return self._getCode()",
"def code(self):\n return self._code",
"def code(self):\n return self._code",
"def code(self):\n return self._code",
"def code(self):\n return self._code",
"def Code(self):\n if self.force_auto_sync:\n self.get('Code')\n return self._Code",
"def app_version(self) -> str:\n return pulumi.get(self, \"app_version\")",
"def code(self) -> int:\n return self._code",
"def code(self) -> int:\n return self._code",
"def code(self):\n\n return self._code or self._default_code",
"def version_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"version_id\")",
"def get_application_version(self):\n return self.connector.request('GET', '/app/version')",
"def code(self) -> str:\n return self._code",
"def code(self) -> str:\n return self._code",
"def get_version(self) -> str:\n return versioning.get_version()",
"def version(self):\n\n return self.manifest[\"version\"]",
"def code(self) -> str:\n return pulumi.get(self, \"code\")",
"def code(self) -> str:\n return pulumi.get(self, \"code\")"
]
| [
"0.85095084",
"0.81484383",
"0.77529484",
"0.74329233",
"0.6954358",
"0.68858784",
"0.6868219",
"0.68313825",
"0.6803722",
"0.6803722",
"0.6803722",
"0.6803722",
"0.67462796",
"0.6620829",
"0.6620829",
"0.6620829",
"0.6620829",
"0.6598778",
"0.6593169",
"0.65157115",
"0.65157115",
"0.6432776",
"0.64053226",
"0.63833576",
"0.6363461",
"0.6363461",
"0.6348473",
"0.6340292",
"0.6300514",
"0.6300514"
]
| 0.8613739 | 0 |
Sets the version_code of this AppInstallVersion. | def version_code(self, version_code):
self._version_code = version_code | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_pkg_verif_code(self, doc, code):\n self.assert_package_exists()\n if not self.package_verif_set:\n self.package_verif_set = True\n match = self.VERIF_CODE_REGEX.match(code)\n if match:\n doc.package.verif_code = match.group(self.VERIF_CODE_CODE_GRP)\n if match.group(self.VERIF_CODE_EXC_FILES_GRP) is not None:\n doc.package.verif_exc_files = match.group(self.VERIF_CODE_EXC_FILES_GRP).split(',')\n return True\n else:\n raise SPDXValueError('Package::VerificationCode')\n else:\n raise CardinalityError('Package::VerificationCode')",
"def code(self, code: int):\n\n self._code = code",
"def update_code(self, new_code):\n\n self.code = new_code",
"def update_code(self, new_code):\n\n self.code = new_code",
"def update_code(self, new_code):\n self.code = new_code\n\n # Fill in the rest",
"def version_code(self):\n return self._version_code",
"def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code",
"def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code",
"def code(self, code):\n if code is None:\n raise ValueError(\"Invalid value for `code`, must not be `None`\")\n\n self._code = code",
"def code(self, code):\n\n self._code = code",
"def update_code(self, new_code):\n\n new_code = self.code",
"def version_code(self):\n return self.proto.details.appDetails.versionCode",
"def code(self, code: str):\n\n self._code = code",
"def set_code(self, code):\n self.set_payload(code)",
"def code(self, value: str) -> None:\n self._code = value",
"def version_code(self) -> str:\n return pulumi.get(self, \"version_code\")",
"def code(self, code: \"str\"):\n if code is None:\n raise ValueError(\"Invalid value for `code`, must not be `None`\")\n self._attrs[\"code\"] = code",
"def maybe_update_application_version(self, value):\n if (\n value\n and value.command_class == COMMAND_CLASS_VERSION\n and value.label == \"Application Version\"\n ):\n self._application_version = value.data",
"def version_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_code\")",
"def set_vat_code(self, vat_code):\n self.set_value_into_input_field(self.vat_code_textbox_locator, vat_code)",
"def code_version(version):\r\n if not isinstance(version, tuple):\r\n raise TypeError('version must be tuple', version)\r\n\r\n def deco(f):\r\n f.code_version = version\r\n return f\r\n return deco",
"def code_version(version):\r\n if not isinstance(version, tuple):\r\n raise TypeError('version must be tuple', version)\r\n\r\n def deco(f):\r\n f.code_version = version\r\n return f\r\n return deco",
"def app_version_id(self, app_version_id):\n\n self._app_version_id = app_version_id",
"def update_code(self):\n print ('update code')\n self.query_dict.update({'code':code.value})",
"def setProgramVersion(self, *args):\n return _libsbml.SBMLWriter_setProgramVersion(self, *args)",
"def result_code(self, result_code):\n\n self._result_code = result_code",
"def set_version(self, version: str) -> None:\n if self.current_version == version:\n return\n self.current_version = version\n self._del_cached_property(\"version\")",
"def _putCode(self, code):\n assert(type(code) == int)\n self.code[self.codeptr] = code\n self.codeptr += 1",
"def version(self, version):\n self._version = version",
"def version(self, version):\n self._version = version"
]
| [
"0.6724488",
"0.6579566",
"0.6568599",
"0.6568599",
"0.64402825",
"0.63804847",
"0.6379982",
"0.6379982",
"0.6379158",
"0.6373244",
"0.6364363",
"0.63410264",
"0.6326873",
"0.62825185",
"0.6040121",
"0.6036478",
"0.6006555",
"0.59921587",
"0.5891643",
"0.5890285",
"0.5869265",
"0.5869265",
"0.58469003",
"0.5791271",
"0.5789864",
"0.57829916",
"0.5642479",
"0.5566345",
"0.5565027",
"0.5565027"
]
| 0.84236884 | 0 |
Sets the build_number of this AppInstallVersion. | def build_number(self, build_number):
self._build_number = build_number | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_api_version(self, build_api_version):\n\n self._build_api_version = build_api_version",
"def hxdp_build_version(self, hxdp_build_version):\n\n self._hxdp_build_version = hxdp_build_version",
"def builder_version(self, builder_version):\n\n self._builder_version = builder_version",
"def build_info(self, build_info):\n if build_info is None:\n raise ValueError(\"Invalid value for `build_info`, must not be `None`\")\n\n self._build_info = build_info",
"def set_build(self, build):\n self.build = build\n if not self.record:\n return\n self.mdb.results.update({'_id':self.result_id}, \n {'$set':{'build':build}})",
"def set_version(self, version: str) -> None:\n if self.current_version == version:\n return\n self.current_version = version\n self._del_cached_property(\"version\")",
"def build_number(self):\n return self.get_data(\"build_number\")",
"def version(self, version: int):\n\n self._version = version",
"def version(self, version):\n self._version = version",
"def version(self, version):\n self._version = version",
"def version(self, version):\n \n self._version = version",
"def set_number(self, number):\n self.number = number",
"def version(self, version: str):\n\n self._version = version",
"def version(self, version: str):\n\n self._version = version",
"def build(self, build):\n\n self._build = build",
"def build_number(self):\n return self._build_number",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version"
]
| [
"0.68347824",
"0.6542784",
"0.63321733",
"0.61955804",
"0.6025173",
"0.57602996",
"0.5734798",
"0.56555605",
"0.56424505",
"0.56424505",
"0.56350744",
"0.561939",
"0.55984825",
"0.55984825",
"0.55925083",
"0.55788165",
"0.5573151",
"0.5573151",
"0.5573151",
"0.5573151",
"0.5573151",
"0.5573151",
"0.5573151",
"0.5573151",
"0.5573151",
"0.5573151",
"0.5573151",
"0.5573151",
"0.5573151",
"0.5573151"
]
| 0.779044 | 0 |
Gets the hash_string of this AppInstallVersion. | def hash_string(self):
return self._hash_string | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_hash(self) -> str:\n return self.__hash.hexdigest()",
"def hash(self) -> str:\n return pulumi.get(self, \"hash\")",
"def hash(self):\n return self._hash",
"def hex(self) -> str:\n return self.__hash.hexdigest()",
"def hash(self):\n return hashlib.sha256(self.to_json().encode()).hexdigest()",
"def _calculate_hash(self) -> str:\n data_str = str(self.version) + str(self.index) + self.pre_hash + str(self.timestamp) + str(self.data)\n return sha256(data_str.encode('utf-8')).hexdigest()",
"def get_hash(self):\r\n return",
"def hexdigest(self):\n return self.hashObject.hexdigest()",
"def hash(self):\n return hashlib.sha1(str(self._dict))",
"def calculate_hash(self):\n return sha256_2_string(str(self.header()))",
"def calculate_hash(self):\n return sha256_2_string(str(self.header()))",
"def version_hash():\n git_hash = current_git_hash()\n return \"%s-%s\" % (__VERSION__, git_hash)",
"def get_hash(self):\n return hashlib.sha1(\"\".join(asset.get_hash() for asset in self._assets).encode(\"utf-8\")).hexdigest()",
"def _Hash(self):\n fullhash = util.PrefixHash(self.key_bytes)\n return util.Base64WSEncode(fullhash[:constants.KEY_HASH_SIZE])",
"def hash(self):\n return os.popen('git rev-parse HEAD').read().strip()",
"def _digest(self):\n return self._hasher.hexdigest()",
"def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()",
"def hash(self):\n return Hash.dhash(bytes(self))",
"def get_hash(self, data: Optional[bytes] = None) -> str:\n return self.__handle__.hash",
"def getHash(self):\n if self.chash:\n return self.chash\n else:\n self.setHash()\n return self.chash",
"def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"!f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.signature.encode()\r\n block_data += self.choice.encode()\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()",
"def digest(self):\n return self._hash",
"def get_hash(self):\n if self.contributes:\n return hash_from_values(self.iter_values())",
"def hash(self):\n hash_properties = self.artifacts\n return hashlib.md5(','.join(hash_properties).encode()).hexdigest()",
"def hash(self) -> bytes:",
"def get_hash(self):\n return self.__hash",
"def __str__(self) -> str:\n return self.hash",
"def hash(self) -> str:\r\n ...",
"def version(self):\n return self.proto.details.appDetails.versionString",
"def hash(self):\n return self.__hash__()"
]
| [
"0.7808271",
"0.75277394",
"0.7086749",
"0.7050276",
"0.6995493",
"0.69806534",
"0.6962277",
"0.69613755",
"0.69579947",
"0.68826467",
"0.68826467",
"0.6855191",
"0.68526024",
"0.68239284",
"0.6819731",
"0.6798659",
"0.6748978",
"0.67443216",
"0.672221",
"0.6707711",
"0.66975564",
"0.6686193",
"0.6672437",
"0.6670678",
"0.6660412",
"0.6651897",
"0.66505325",
"0.6619043",
"0.65740407",
"0.65705013"
]
| 0.75950706 | 1 |
Sets the hash_string of this AppInstallVersion. | def hash_string(self, hash_string):
self._hash_string = hash_string | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hash(self, hash):\n\n self._hash = hash",
"def hash(self, hash):\n\n self._hash = hash",
"def hash_id(self, hash_id):\n\n self._hash_id = hash_id",
"def extended_hash(self, extended_hash):\n\n self._extended_hash = extended_hash",
"def distro_hash(self, distro_hash):\n\n self._distro_hash = distro_hash",
"def hash_string(self):\n return self._hash_string",
"def hash(self, string):\n return self.__scaffydb.hash(string)",
"def hash_key(self, hash_key):\n\n self._hash_key = hash_key",
"def repo_hash(self, repo_hash):\n\n self._repo_hash = repo_hash",
"def set_hash(self, hash_name, data):\n self.hashes[hash_name] = data",
"def session_hash(self, session_hash):\n \n self._session_hash = session_hash",
"def set_string(string, hash):\r\n # Pad out string with 3 nulls\r\n string = string + ([NULL_STRING] * 3)\r\n\r\n # If the string now longer than STRING_LENGTH, cut it shorter\r\n if len(string) > STRING_LENGTH:\r\n string = string[:STRING_LENGTH]\r\n\r\n # If the string is still too short, pad out with the hash\r\n if len(string) < STRING_LENGTH:\r\n string = string + hash[len(string) : STRING_LENGTH]\r\n\r\n return string",
"def __init__(self, hash_str, salt):\n self.hash = hash_str\n self.salt = salt",
"def setHash(self):\n chash_string = str(self.code) + str(\"CAMPAIGN\") + str(self.created_at)\n chash = hashlib.sha1()\n chash.update(chash_string)\n \n self.chash = chash.hexdigest()\n self.save()",
"def hash_password(self):\n self.__password = self.str_to_hash(self.__password)",
"def fill_version_hash(apps, schema_editor):\n SushiCredentials = apps.get_model('sushi', 'SushiCredentials')\n for credentials in SushiCredentials.objects.all():\n credentials.version_hash = get_hash(credentials)\n credentials.save()",
"def hash(self, string):\n h = md5()\n h.update(string)\n return h.digest()",
"def set_primary_object_hash(self, hsh):\n self.hash = hsh",
"def set_password_hash(self, password):\n salt = bcrypt.gensalt()\n self.password_hash = bcrypt.hashpw(password.encode(), salt)",
"def get_hash(self) -> str:\n return self.__hash.hexdigest()",
"def src_hash(self, src_hash):\n\n self._src_hash = src_hash",
"def hash(self) -> str:\n return pulumi.get(self, \"hash\")",
"def progression_hash(self, progression_hash):\n\n self._progression_hash = progression_hash",
"def commit_hash(self, commit_hash):\n\n self._commit_hash = commit_hash",
"def _set_version(self) -> None:\n proc = subprocess.Popen([self.hmy_binary_path, \"version\"], env=self.environment,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n if not err:\n raise RuntimeError(f\"Could not get version.\\n\"\n f\"\\tGot exit code {proc.returncode}. Expected non-empty error message.\")\n self.version = err.decode().strip()",
"def apply_hash (self, s):\r\n m = md5()\r\n m.update (s)\r\n d = m.digest()\r\n # base64.encodestring tacks on an extra linefeed.\r\n return encodestring (d)[:-1]",
"def set_hashes(self, url, hashes):",
"def setPassword(self, unhashPass):\n\t\tself.passHash = generate_password_hash(unhashPass)",
"def hash(self) -> str:\r\n ...",
"def hash_password(self, password):\n self.password_hash = generate_password_hash(password)"
]
| [
"0.6830537",
"0.6830537",
"0.61648875",
"0.5996138",
"0.59633523",
"0.58904546",
"0.57719904",
"0.57527906",
"0.5745238",
"0.57317674",
"0.57154334",
"0.56932443",
"0.56654274",
"0.56648725",
"0.56562525",
"0.565159",
"0.56116676",
"0.5534566",
"0.5495819",
"0.54931486",
"0.54924667",
"0.5455827",
"0.5431326",
"0.5387109",
"0.53850245",
"0.5357506",
"0.5322628",
"0.5301977",
"0.52978814",
"0.52978337"
]
| 0.7424539 | 0 |
Instantiates LaserStabilizer script object for stabilizing the laser | def __init__(self, config='toptica_laser_stabilization', ao_client=None, ai_client=None):
# Instantiate GUI
self.gui = Window(
gui_template='power_stabilizer',
host=get_ip()
)
self.widgets = get_gui_widgets(self.gui, p_setpoint=1, p_outputVoltage=1, label_power=1, config=1,
graph=2, legend=2, hardware_control=1, clear=1, start=1, stop=1)
self._ao_client = ao_client
self._ai_client = ai_client
self.widgets['config'].setText(config)
self._load_config_file(config)
#Now initialize control/output voltage to 0, and set up label
self._curr_output_voltage = self.widgets['p_outputVoltage'].value() #Stores current output voltage that is outputted by the AO
self.widgets['p_outputVoltage'].valueChanged.connect(self._set_output_voltage_from_label)
# self._ao_client.set_ao_voltage(self._ao_channel, self._curr_output_voltage)
#Update the input power label
self._last_power_text_update = 0
self._update_power_label()
self._initialize_graphs()
#Setting up hardware power control options
self.widgets['hardware_control'].enabled = False
self._under_hardware_control = False
self.widgets['hardware_control'].toggled.connect(self._update_hardware_control_from_gui)
#Finally hookup the final buttons
self.widgets['start'].clicked.connect(lambda: self.start(update_st_gui=True))
self.widgets['stop'].clicked.connect(self.stop)
self.widgets['clear'].clicked.connect(lambda: self._clear_data_plots(display_pts=5000))
#Initially the program starts in the "unlocked" phase
self._is_stabilizing = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def launch(**kwargs):\n\n logger, loghost, logport, clients, guis, params = unpack_launcher(**kwargs)\n config = load_config(kwargs['config'], logger=logger)\n\n\n ao_client = find_client(logger, clients, 'nidaqmx')\n ai_client = find_client(logger, clients, 'nidaqmx_ai')\n\n # Instantiate Monitor script\n laser_stabilizer = LaserStabilizer(\n config=kwargs['config'],\n ao_client=ao_client,\n ai_client=ai_client\n )\n\n update_service = Service()\n update_service.assign_module(module=laser_stabilizer)\n update_service.assign_logger(logger=logger)\n update_server, update_port = create_server(update_service, logger, host=get_ip())\n logger.update_data(data={'port': update_port})\n laser_stabilizer.gui.set_network_info(port=update_port)\n update_server.start()\n\n # Run continuously\n # Note that the actual operation inside run() can be paused using the update server\n while True:\n\n laser_stabilizer.run()",
"def __init__(self, ai_game):\n super().__init__()\n self.screen = ai_game.screen\n self.settings = ai_game.settings\n self.color = self.settings.laser_color\n\n # Create a laser rect at (0, 0) then move to correct position.\n self.rect = pygame.Rect(0, 0, self.settings.laser_width, \n self.settings.laser_height)\n self.rect.midright = ai_game.friend.rect.midright\n\n # Store the laser's position as a decimal value.\n self.x = float(self.rect.x)",
"def __init__(self):\n #screen Settings\n self.screen_width = 1024\n self.screen_height = 768\n self.bg_color = (32, 32, 32)\n\n #rocket settings\n self.rocket_speed = 1\n\n #laser Settings\n self.laser_speed = 1.0\n self.laser_width = 3\n self.laser_height = 15\n self.laser_color = (0, 255, 255)\n self.lasers_allowed = 3",
"def __init__(self, namespace, waypoints, update_frequency=10.):\n self.current_mode = ''\n self.previous_mode = ''\n self.namespace = namespace['name']\n self.battery_rate_mean = 1.0\n self.battery_rate_std = 1.0\n self.battery_voltages = list()\n self.low_battery = False\n self.set_battery(namespace['max_fuel'], namespace['min_fuel'],\n namespace['fuel_rate'])\n self._cancel_action = False\n self.external_intervened = False\n self.state = State()\n self.home = HomePosition()\n self.global_pose = NavSatFix()\n self.local_pose = PoseStamped()\n self.heading = 0.0\n self.waypoints = [None]\n self._current_wp = -1\n self._radius = 1e-5\n self._rate = rospy.Rate(update_frequency)\n # UAV specific variables\n self.irr_name = namespace['irr_attached']\n self._irr_ready_to_be_picked = 0\n self.landed = True\n self.home_moved = False\n self.rel_alt = 0.\n self.rangefinder = -1.\n self._alt_radius = 0.5\n self._rel_alt = [0. for _ in range(5)]\n self._rangefinder = [-1. for _ in range(5)]\n self._min_range = -1.\n self.deploy_msg = Int64()\n self.target_heading = [0.0 for _ in range(5)]\n self.target_global_pose = [NavSatFix() for _ in range(5)]\n self.target_imu = [Imu() for _ in range(5)]\n # LHM Controller\n if namespace['retrieve_system'] and (\n \"simulation\" not in rospy.get_param(\"~scenario_type\",\n \"simulation\")):\n self.lhm = LHMExecutor(self.namespace, update_frequency)\n if \"simulation\" in rospy.get_param(\"~scenario_type\", \"simulation\"):\n self.blade_pose = [[0., 0., 0.] for _ in range(10)]\n rospy.Subscriber('/%s/edge_wt_detector' % self.namespace,\n PoseArray,\n self._wt_cb,\n queue_size=1)\n # simulated winch system\n self._lhm_pub = rospy.Publisher('/attach_plugin/attach',\n String,\n queue_size=3)\n\n # Subscribers\n rospy.Subscriber('/%s/mavros/state' % self.namespace,\n State,\n self._state_cb,\n queue_size=1)\n # halt until mavros is connected to a uav\n rospy.loginfo('Waiting for a connection to %s ...' % self.namespace)\n while (not self.state.connected):\n self._rate.sleep()\n rospy.Subscriber('/%s/mavros/home_position/home' % self.namespace,\n HomePosition,\n self._home_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/global_position/rel_alt' % self.namespace,\n Float64,\n self._relative_alt_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/modified_battery' % self.namespace,\n BatteryState,\n self._battery_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/global_position/raw/unfix' %\n self.namespace,\n NavSatFix,\n self._global_pose_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/local_position/pose' % self.namespace,\n PoseStamped,\n self._local_pose_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/rangefinder/rangefinder' % self.namespace,\n Range,\n self._rangefinder_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/global_position/compass_hdg' %\n self.namespace,\n Float64,\n self._heading_cb,\n queue_size=1)\n\n # Service proxies\n rospy.loginfo('Waiting for /%s/mavros/cmd/set_home ...' %\n self.namespace)\n rospy.wait_for_service('/%s/mavros/cmd/set_home' % self.namespace)\n self._set_home_proxy = rospy.ServiceProxy(\n '/%s/mavros/cmd/set_home' % self.namespace, CommandHome)\n\n rospy.loginfo('Waiting for /%s/mavros/set_mode ...' % self.namespace)\n rospy.wait_for_service('/%s/mavros/set_mode' % self.namespace)\n self._set_mode_proxy = rospy.ServiceProxy(\n '/%s/mavros/set_mode' % self.namespace, SetMode)\n rospy.loginfo('Waiting for /%s/mavros/cmd/takeoff ...' %\n self.namespace)\n rospy.wait_for_service('/%s/mavros/cmd/takeoff' % self.namespace)\n self._takeoff_proxy = rospy.ServiceProxy(\n '/%s/mavros/cmd/takeoff' % self.namespace, CommandTOL)\n # Publisher\n self._setpoint_pub = rospy.Publisher('/%s/mavros/setpoint_raw/global' %\n self.namespace,\n GlobalPositionTarget,\n queue_size=1)\n\n rospy.sleep(3)\n self.set_current_location_as_home()\n # Adding initial waypoints' configuration\n while self.waypoints[0] is None:\n self._rate.sleep()\n self.waypoints = self.waypoints + waypoints\n # Auto call functions\n rospy.Timer(self._rate.sleep_dur, self.update_wp_position)\n rospy.Timer(self._rate.sleep_dur, self.update_landing_status)\n rospy.Timer(10 * self._rate.sleep_dur, self.intervene_observer)\n # change mode just to fill self.current_mode and self.previous_mode\n self.guided_mode()",
"def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.done = False\n # ship = random.choice(list(prepare.GFX[\"ships\"].values()))\n ship = list(prepare.GFX[\"ships\"].values())[7] # pick first ship available\n self.player = actors.Player((0, 0), ship)\n self.level = level.Level(self.screen_rect.copy(), self.player)\n\n self.energyloss_counter = 0\n self.energygain_counter = 0",
"def __init__(self, portname, devicetype):\n if devicetype == DEVICE_DEBUG_BOARD:\n self.device = debugbox.DebugBox(portname)\n elif devicetype == DEVICE_SFP_BREAKOUT:\n self.device = sfpbreakout.SFP(portname)\n elif devicetype == DEVICE_DUMMY_BOARD:\n self.device = dummybox.DummyBox(portname)\n else:\n raise IOError(\"Invalid Device Type\")\n \n # Set up laser sections\n self.mirror1 = Testrig.LaserSection(self, 814, 10, 90, 728, 844, 860, 864)\n self.laser_phase = Testrig.LaserSection(self, 810, 11, 20, 726, 846, 858, 866)\n self.gain = Testrig.LaserSection(self, 818, 12, 180, 730, 848, None, None)\n# self.gain1.validator.setTop(150)\n self.mirror2 = Testrig.LaserSection(self, 826, 14, 90, 734, 850, 862, 864)\n# self.front.validator.setTop(60)\n self.soa1 = Testrig.LaserSection(self, 822, 15, 300, 736, 852, None, None)\n# self.soa1.validator.setTop(150)\n self.soa2 = Testrig.LaserSection(self, 830, 16, 100, 738, 854, None, None, self.to_display_current_section2, self.to_internal_current_section2)\n self.phase1 = Testrig.LaserSection(self, 834, 13, 100, 732, 856, None, None, self.to_display_current_section2, self.to_internal_current_section2)\n \n self.voltage_max = 2.5 # This is now a constant\n \n self.full_rig = True",
"def __init__(self):\n Thread.__init__(self)\n self.daemon = True\n self.running = True\n\n self.rate = rospy.Rate(10)\n self.point_head_goal = SimpleActionClient('/head_controller/point_head_action', PointHeadAction)\n\n camera_info_msg = rospy.wait_for_message('/xtion/rgb/camera_info', CameraInfo)\n self.camera_intrinsics = array(camera_info_msg.K).reshape((3, 3))\n\n self.looker = PointHeadGoal()\n\n self.looker.target.header.frame_id = '/base_link'\n self.looker.pointing_frame = '/head_2_link'\n\n self.looker.pointing_axis.x = 1.0\n self.looker.pointing_axis.y = 0.0\n self.looker.pointing_axis.z = 0.0\n self.looker.max_velocity = 0.3\n\n look_point = PointStamped()\n look_point.header.frame_id = '/base_link'\n look_point.point.x = 40.0\n look_point.point.y = 0.0\n look_point.point.z = 0.0\n\n self.looker.target = look_point\n self.start()",
"def __init__(self):\n self.last_reward_pos = 0\n super().__init__()\n self.TERRAIN_VARIANCE = 0.0\n self.stump_spacing = 4.0\n self.stump_height = 1.0\n self.my_init({'leg_length': 35, 'walker_type': 'default'})",
"def __init__(self, **kwargs):\n super(VeryCleverBeamsplitter, self).__init__(**kwargs)\n self.shader_source = IL_SHADER_SOURCE\n self.centre = [0.5, 0.5]\n self.blazing_function = np.linspace(0,1,32)\n self.zernike_coefficients = np.zeros(12)",
"def init():\r\n\t# add grabber tools based on proxy tools\r\n\tfor proxyWrapper in vizconnect.getToolsWithMode('Proxy'):\r\n\t\tgrabberTool = tools.grabber.HandGrabber(usingPhysics=False,\r\n\t\t\t\t\t\t\t\t\t\t\t\tusingSprings=False,\r\n\t\t\t\t\t\t\t\t\t\t\t\tplacementMode=tools.placer.MODE_DROP_DOWN)\r\n\t\t\r\n\t\tname = 'grabber_tool_based_on_'+proxyWrapper.getName()\r\n\t\tgrabberWrapper = vizconnect.addTool(raw=grabberTool,\r\n\t\t\t\t\t\t\t\t\t\t\tname=name,\r\n\t\t\t\t\t\t\t\t\t\t\tmake='Virtual',\r\n\t\t\t\t\t\t\t\t\t\t\tmodel='Grabber')\r\n\t\t# parent the grabber wrapper to the proxy's parent\r\n\t\tgrabberWrapper.setParent(proxyWrapper)\r\n\t\t\r\n\t\tgrabberTool.setItems(grabbableItems)\r\n\t\r\n\tviz.callback(viz.getEventID('RESET_THE_LOFT_LAYOUT'), lambda e: resetMovedObjects())",
"def __init__(self, location, angle):\n pg.sprite.Sprite.__init__(self)\n self.original_laser = TURRET.subsurface((150,0,150,150))\n self.angle = -math.radians(angle-135)\n self.image = pg.transform.rotate(self.original_laser, angle)\n self.rect = self.image.get_rect(center=location)\n self.move = [self.rect.x, self.rect.y]\n self.speed_magnitude = 5\n self.speed = (self.speed_magnitude*math.cos(self.angle),\n self.speed_magnitude*math.sin(self.angle))\n self.done = False",
"def run_laser_predictor(self, laser_arr):",
"def __init__(self):\n super().__init__()\n\n # Gadget state\n \n self.isDoorOpen = False\n self.verified = True\n\n # Ev3dev initialization\n self.leds = Leds()\n self.sound = Sound()\n self.drive = MoveTank(OUTPUT_B, OUTPUT_C)\n \n self.ir_sensor = InfraredSensor()\n self.ir_sensor.mode = self.ir_sensor.MODE_IR_REMOTE\n self.color_sensor = ColorSensor()\n self.color_sensor.mode = 'COL-COLOR' # WHITE\n\n # Start threads\n threading.Thread(target=self._patrol_thread, daemon=True).start()",
"def setup_stellar_aberration(self,observer_velocity_xyz):\n self.v_for_stellar_aberr = sp.vscl(recip_clight,observer_velocity_xyz)",
"def __init__(self):\n\n # initialize window\n self.win = graphics.GraphWin(\"Lunar Lander Game\", 300, 500)\n \n # transform coordinates\n self.win.setCoords(0, -10, 300, 600)\n\n self.surface_polygon = self.create_surface()\n self.surface_polygon.draw(self.win)\n self.background()\n \n\n self.lander_polygon = None\n # Draws two different thrust buttons\n self.b1 = Button(graphics.Point(100, 560), 80, 20, 'Thrust')\n self.b2 = Button(graphics.Point(200, 560), 80, 20, 'No Thrust')\n self.b1.draw(self.win)\n self.b2.draw(self.win)\n \n # Draws text values for altitude, velocity, and fuel\n self.alt_num = graphics.Text(graphics.Point(50, 400), 'Altitude: ')\n self.vel_num = graphics.Text(graphics.Point(50, 450), 'Velocity: ')\n self.fuel_num = graphics.Text(graphics.Point(50, 500), 'Fuel: ')\n self.alt_num.draw(self.win)\n self.vel_num.draw(self.win)\n self.fuel_num.draw(self.win)",
"def __init__(self):\n\n # Set a node name - something relevant\n rospy.init_node('waypoint_updater')\n\n # Most recent pose\n self.pose = None\n\n # Map waypoint list \n self.waypoints = None\n\n # Map waypoint list xy only \n self.waypoints_2d = None\n\n # Map waypoint list xy only as KDTree\n self.waypoint_tree = None\n\n # Index at which to stop the vehicle\n # Negative one is a sentinel meaning no stop is required\n self.stopline_waypoint_idx = -1\n\n # Add subscriptions and handlers for relevant messages\n rospy.Subscriber('/base_waypoints', Lane, self.base_waypoints_cb)\n rospy.Subscriber('/current_pose', PoseStamped, self.current_pose_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_waypoint_cb)\n\n # Create publisher for final waypoints\n self.final_waypoints_pub = rospy.Publisher('/final_waypoints', Lane, queue_size=1)\n\n # Start loop\n self.loop()",
"def __init__(self, mylaser, PL, port = 7765):\n\t\tsocket.setdefaulttimeout(2)\n\n\t\t#print \"init\"\n\t\tself.mylaser = mylaser\n\t\t#print \"DAC\", self.mylaser, \"Handler process, connecting to\", gstt.lasersIPS[mylaser] \n\t\tself.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.connstatus = self.conn.connect_ex((gstt.lasersIPS[mylaser], port))\n\t\t#print \"Connection status for\", self.mylaser,\":\", self.connstatus\n\t\t#print 'debug', debug, gstt.debug\n\t\t# ipconn state is -1 at startup (see gstt) and modified here\n\t\tr.set('/lack/'+str(self.mylaser), self.connstatus)\n\t\tgstt.lstt_ipconn[self.mylaser] = self.connstatus\t\t\n\n\t\tself.buf = \"\"\n\t\t# Upper case PL is the Point List number\n\t\tself.PL = PL\n\n\t\t# Lower case pl is the actual point list coordinates\n\t\tself.pl = ast.literal_eval(r.get('/pl/'+str(self.mylaser)))\n\t\t#if self.mylaser ==0:\n\t\tprint \"Init Laser\", self.mylaser\n\t\t#print \"pl :\", self.pl\n\t\t#print \"EDH/\"+str(self.mylaser),r.get('/EDH/'+str(self.mylaser))\n\t\tgstt.EDH[self.mylaser] = np.array(ast.literal_eval(r.get('/EDH/'+str(self.mylaser))))\n\t\thomographyp.newEDH(self.mylaser)\n\t\t\n\t\t'''\n\t\td =homographyp.apply(gstt.EDH[self.mylaser],np.array([(300,400)]))\n\t\tprint ''\n\t\tprint \"d\",d\n\t\tprint \"d0\",d[0]\n\t\t#print \"d1\",len(d[1])\n\t\tprint \" \"\n\t\t'''\n\n\t\tself.xyrgb = self.xyrgb_prev = (0,0,0,0,0)\n\t\tself.newstream = self.OnePoint()\n\n\t\tprint \"Connection status for\", self.mylaser,\":\", self.connstatus\n\t\tprint 'debug', debug\n\t\tif self.connstatus != 0 and debug > 0:\n\t\t\tprint \"\"\n\t\t\tprint \"ERROR connection with laser :\", str(mylaser),str(gstt.lasersIPS[mylaser])\n\t\t\tprint \"first 10 points in PL\",self.PL, self.GetPoints(10)\n\n\t\t# Reference points \n\t\t# Read the \"hello\" message\n\t\tfirst_status = self.readresp(\"?\")\n\t\tfirst_status.dump()\n\t\tposition = []",
"def __init__(self, target_velocity, dt, model_type, robot_type):\n super(StraightEnv, self).__init__(\n target_velocity=target_velocity,\n dt=dt,\n model_type=model_type,\n robot_type=robot_type\n )\n\n # Reward function parameters\n self._lambda1 = 0.25\n\n # State in frame of target straight line. See get_initial_state()\n # function for info.\n self._current_state = np.zeros(6)\n\n # Target line to follow. See get_initial_state() function for info.\n self._target_y = 0\n self._target_yaw = 0\n\n # Initialize nodes and set up ROS topics\n rospy.init_node('rl_planner')\n self.publisher = rospy.Publisher('commands/keyboard',\n ackermann_msgs.msg.AckermannDriveStamped, queue_size=1)\n rospy.Subscriber('ekf_localization/odom', nav_msgs.msg.Odometry,\n self._odometry_callback)\n self._sensor_stamp = rospy.Time.now()\n\n # Wait this number of timesteps before computing another action (this\n # is similar to setting a larger dt during training)\n self._num_states_needed = 3\n self._num_states_received = 0",
"def init_stage2(self, simul):\n\n for k, v in self.queue:\n info(\n \" # Initializer (stage 2) parsing \" + str(k) + \" object.\",\n verbosity.high,\n )\n\n if k == \"gle\":\n # read thermostat parameters from file\n if not (hasattr(simul.ensemble, \"thermostat\")):\n raise ValueError(\n \"Ensemble does not have a thermostat to initialize\"\n )\n if not (hasattr(simul.ensemble.thermostat, \"s\")):\n raise ValueError(\n \"There is nothing to initialize in non-GLE thermostats\"\n )\n ssimul = simul.ensemble.thermostat.s\n if v.mode == \"manual\":\n sinput = v.value.copy()\n if sinput.size() != ssimul.size():\n raise ValueError(\n \"Size mismatch in thermostat initialization data\"\n )\n sinput.shape = ssimul.shape\n elif v.mode == \"chk\":\n rmotion = init_chk(v.value)[2]\n if not hasattr(rmotion, \"thermostat\") or not hasattr(\n rmotion.thermostat, \"s\"\n ):\n raise ValueError(\n \"Checkpoint file does not contain usable thermostat data\"\n )\n sinput = rmotion.thermostat.s.copy()\n if sinput.shape != ssimul.shape:\n raise ValueError(\n \"Shape mismatch in thermostat initialization data\"\n )\n\n # if all the preliminary checks are good, we can initialize the s's\n ssimul[:] = sinput",
"def __init__(self):\n\n # Call the parent class (Sprite) constructor\n super().__init__()\n\n # Create an image of the block, and fill it with a color.\n # This could also be an image loaded from the disk.\n self.image = pygame.image.load('assets/' + 'singleLaser.png')\n\n # Fetch the rectangle object that has the dimensions of the image\n # image.\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()\n self.rect.center = (settings.SCREEN_WIDTH / 2, settings.SCREEN_HEIGHT / 2)",
"def __init__(self, max_step=-1):\n self.environment = mls.rl.common.Environment()\n self.environment.game = mls.rl.common.Game(max_step=max_step)\n self.environment.current_state = self.environment.game.init_state(self.environment)",
"def __init__(self):\n #Screen settings\n self.screen_width=1200\n self.screen_height=800\n self.bg_color=(230,230,230)\n #ship settings\n self.ship_limit=1\n #bullet settings\n self.bullet_width=300\n self.bullet_height=15\n self.bullet_color=(60,60,60)\n self.bullets_allowed=3\n #Alien settings\n self.fleet_drop_speed = 20\n \n \n #how quickly the game speeds up\n self.speedup_scale=1.1\n #how quickly the point values increase\n self.score_scale=1.5\n \n self.initialize_dynamic_settings()",
"def __init__(self, sl):\n threading.Thread.__init__(self)\n self.alpha = 0.5\n self.running = True\n self.slam = sl\n self.image = None\n self.scale = 5.0\n self.centre = np.zeros(2)\n self.mouse = np.zeros(2)\n self.dimensions = np.array([1920, 1080])\n self.tracking = TrackingMode.FREE\n self.map_mode = MapMode.DIST",
"def __init__(self, LightFun):\n self.setParameters()\n self.Light = LightFun",
"def __init__(self):\r\n self.observation_space = spaces.Box(low=0, high=255, shape=(119, 214))\r\n self.state = np.zeros((119, 214), dtype=np.uint8) \r\n \r\n self.action_space = spaces.Discrete(4)\r\n\t\t\r\n self.goal = \t[221.0, -9.0] # global xy coordinates\r\n\r\n self.episodeN = 0\r\n self.stepN = 0 \r\n \r\n self.allLogs = { 'reward':[0] }\r\n self.allLogs['distance'] = [221]\r\n self.allLogs['action'] = [1]\r\n \r\n self._seed()\r\n self.stallCount = 0\r\n global airgym\r\n airgym = myAirSimMultirotorClient()",
"def __init__(self, distance_from_goal, drill_name=None, goalie_name=None):\n\n super().__init__()\n self.run_drill = True\n\n # Flag for the first ball\n self.first_ball = True\n\n # Create and connect to Bluetooth button\n self.threaded_bt_helper = threaded_bt_helper()\n self.threaded_bt_helper.bt_button_click.connect(self.bt_button_click)\n\n self.drill_name = drill_name\n self.goalie_name = goalie_name\n self.distance_from_goal = distance_from_goal\n\n if self.drill_name is not None:\n # Get drill information and save it\n self.drill_info = self.get_profile_info()\n\n # Acquire Rate of Fire (ROF) of the drill\n self.rof = int(self.drill_info['1'][2])\n\n # Initialize Trajectory Algorithm Helper\n self.trajectory_algo = trajectory_algorithm.TrajectoryAlgorithm(\n self.distance_from_goal)\n\n # Initialize all motors\n self.bfm = motor_ball_feed_vel.MotorBallFeed()\n self.bqm = motor_ball_queue_turn_once.MotorBallQueue()\n self.fmt = motor_flywheel_top.MotorFlywheelTop()\n self.fmb = motor_flywheel_bottom.MotorFlywheelBottom()\n self.pm = motor_pitch.MotorPitch()\n self.ym = motor_yaw.MotorYaw()\n\n # Stores previous shot location\n self.prev_shot_loc = \"CM\"",
"def __init__(self, st, curr, end):\n self._stages = {EStage.START: st,\n EStage.CURRENT: curr,\n EStage.END: end}",
"def __init__(self, rexarm, planner, kinect):\n self.rexarm = rexarm\n self.tp = planner\n self.kinect = kinect\n self.status_message = \"State: Idle\"\n self.current_state = \"idle\"\n self.next_state = \"idle\"\n self.waypoints = [\n [0.0, 0.0, 0.0, 0.0],\n [np.pi * 0.1, 0.0, np.pi / 2, 0.0],\n [np.pi * 0.25, np.pi / 2, -np.pi / 2, np.pi / 2],\n [np.pi * 0.4, np.pi / 2, -np.pi / 2, 0.0],\n [np.pi * 0.55, 0, 0, 0],\n [np.pi * 0.7, 0.0, np.pi / 2, 0.0],\n [np.pi * 0.85, np.pi / 2, -np.pi / 2, np.pi / 2],\n [np.pi, np.pi / 2, -np.pi / 2, 0.0],\n [0.0, np.pi / 2, np.pi / 2, 0.0],\n [np.pi / 2, -np.pi / 2, np.pi / 2, 0.0]]\n self.learned_joints = []",
"def __init__(self):\n self._pipe = []\n self._group = None\n stages = ['on', 'off', 'color', 'transition', 'flash', 'callback',\n 'repeat', 'brightness', 'wait', 'temperature', 'white',\n 'white_up', 'white_down', 'red_up', 'red_down',\n 'green_up', 'green_down', 'blue_up', 'blue_down',\n 'night_light', 'link', 'unlink']\n for name in stages:\n self._add_stage(name)",
"def __init__(\n self,\n front_left_vertex,\n front_right_vertex,\n back_left_vertex,\n back_right_vertex,\n strength,\n ):\n\n self.front_left_vertex = front_left_vertex\n self.front_right_vertex = front_right_vertex\n self.back_left_vertex = back_left_vertex\n self.back_right_vertex = back_right_vertex\n self.strength = strength\n\n # Initialize the line vortices that make up the ring vortex.\n self.front_leg = LineVortex(\n origin=self.front_right_vertex,\n termination=self.front_left_vertex,\n strength=self.strength,\n )\n self.left_leg = LineVortex(\n origin=self.front_left_vertex,\n termination=self.back_left_vertex,\n strength=self.strength,\n )\n self.back_leg = LineVortex(\n origin=self.back_left_vertex,\n termination=self.back_right_vertex,\n strength=self.strength,\n )\n self.right_leg = LineVortex(\n origin=self.back_right_vertex,\n termination=self.front_right_vertex,\n strength=self.strength,\n )\n\n # Initialize a variable to hold the centroid of the ring vortex.\n self.center = ps.geometry.centroid_of_quadrilateral(\n self.front_left_vertex,\n self.front_right_vertex,\n self.back_left_vertex,\n self.back_right_vertex,\n )"
]
| [
"0.630868",
"0.5851401",
"0.5740374",
"0.57391673",
"0.5730571",
"0.5719369",
"0.569621",
"0.56436825",
"0.561999",
"0.56145674",
"0.5545288",
"0.554523",
"0.54974353",
"0.54517597",
"0.5440188",
"0.5434769",
"0.54197156",
"0.5411297",
"0.5384036",
"0.5377103",
"0.5363766",
"0.53572184",
"0.53349614",
"0.5334445",
"0.53331834",
"0.53304684",
"0.5329678",
"0.5326695",
"0.53139436",
"0.53104997"
]
| 0.6353341 | 0 |
This method turns on power stabilizationpdate_vs to False to not update the setpoint from the GUI | def start(self, update_st_gui=True, display_pts = 5000):
if update_st_gui:
#First, update the setpoint based on the text in the GUI
self._update_voltageSetpoint_fromGUI()
self._set_output_voltage_from_label()
#Update hte PID parameters, which will save the new setpoint to the PID object we use
self._update_PID()
#Reset the graphs
self._clear_data_plots(display_pts)
#Finally turn on the power stabilization
self._is_stabilizing = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_voltageSetpoint_fromGUI(self):\n self.voltageSetpoint = self.widgets['p_setpoint'].value()/self.gain",
"def setPowerIfNecessary(self):\n if self.p.power == 0 and self.p.powerDensity > 0:\n self.setPowerFromDensity()",
"def update_newly_set_ref_V_ampl(self):\n self.qlin_ref_V_ampl_RMS.setText(\n \"%.3f\" % self.alia.config.ref_V_ampl_RMS\n )\n self.qlin_ref_V_ampl.setText(\"%.3f\" % self.alia.config.ref_V_ampl)\n self.qlbl_ref_is_clipping.setText(self.get_clipping_text())\n # QApplication.processEvents()\n\n self.alia_qdev.state.reset()\n self.clear_curves_stage_1_and_2()",
"def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()",
"def power_on(self):\n pass",
"def on_showpointsToolbutton_toggled(self, button):\n\n self._state( 'showpoints', button.get_active() )\n self._refresh_ui()",
"def set_powerobject(self, boolean):\n if boolean == True:\n self.powerobject = 'P'",
"def _localSetState(self,pdict):\n self.apex = pdict.pop('apex')\n self.min = pdict.pop('min' )\n self.max = pdict.pop('max' )",
"def changeValue(self):\r\n # productive #onUpDnArrow\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n # widget.scrollPointButton.setText('Scroll Point for Needle ' + str(widget.editNeedleTxtBox.value) + ' (pt: ' + str(self.ptNumber) + ')')\r\n self.lockControlPoints(widget.editNeedleTxtBox.value)\r\n self.unlockControlPoints(widget.editNeedleTxtBox.value)\r\n widget.drawValidationNeedlesButton.text = \"Render Manual Needle \" + str(widget.editNeedleTxtBox.value)",
"def predeposition(self):\n# status = self.mks146.settings['read_MFC0_valve']\n# if status == 'O' or status == 'C':\n# self.mks146.settings['set_MFC0_valve'] = 'N'\n# time.sleep(1)\n# self.mks146.settings['set_MFC0_SP'] = 0.7\n self.settings['predeposition'] = True",
"def updateParameters(self):\r\n\r\n\t\tif self.approach.altered:\r\n\t\t\tself.transform.enabled = True\r\n\r\n\t\t\tif self.approach.value == 'Locations in the DEM generated from field observations':\r\n\t\t\t\tself.predefined_pattern.enabled = False\r\n\t\t\t\tself.pattern_workspace.enabled = False\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = True\r\n\t\t\t\tself.mapping_field.enabled = True\r\n\t\t\t\tself.move_to_max.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\r\n\t\t\t\tself.mh_iteration.enabled = False\r\n\t\t\t\tself.mh_iteration.value = False\r\n\t\t\t\tself.output_table.enabled = False\r\n\t\t\t\tself.output_raster_workspace.enabled = False\r\n\t\t\t\tself.output_raster_workspace.value = ''\r\n\r\n\t\t\telif self.approach.value == 'Locations in the DEM versus pre-defined pattern':\r\n\t\t\t\tself.predefined_pattern.enabled = True\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = True\r\n\t\t\t\tself.mapping_field.enabled = True\r\n\t\t\t\tself.move_to_max.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = True\r\n\t\t\t\tself.mh_iteration.enabled = True\r\n\t\t\t\tself.output_table.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = False\r\n\t\t\t\tself.output_sim_matrix.value = ''\r\n\t\t\t\tself.output_raster_workspace.enabled = False\r\n\t\t\t\tself.output_raster_workspace.value = ''\r\n\r\n\t\t\telse: # seek pre-defined pattern in DEM\r\n\t\t\t\tself.predefined_pattern.enabled = True\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.mh_iteration.enabled = True\r\n\t\t\t\tself.output_raster_workspace.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = False\r\n\t\t\t\tself.point_vectors.value = ''\r\n\t\t\t\tself.mapping_field.enabled = False\r\n\t\t\t\tself.move_to_max.enabled = False\r\n\t\t\t\tself.move_to_max.value = False\r\n\t\t\t\tself.mh_dil_val.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = False\r\n\t\t\t\tself.output_sim_matrix.value = ''\r\n\t\t\t\tself.output_table.enabled = False\r\n\t\t\t\tself.output_table.value = ''\r\n\r\n\t\tif self.mh_iteration.altered:\r\n\r\n\t\t\tif self.mh_iteration.value is True:\r\n\t\t\t\tself.mh_dil_start.enabled = True\r\n\t\t\t\tself.mh_dil_stop.enabled = True\r\n\t\t\t\tself.mh_dil_step.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\tself.mh_dil_val.value = 1\r\n\r\n\t\t\telse:\r\n\t\t\t\tif self.approach.value == 'Locations in the DEM generated from field observations':\r\n\t\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\t\tself.mh_dil_val.value = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.mh_dil_val.enabled = True\r\n\r\n\t\t\t\tself.mh_dil_start.enabled = False\r\n\t\t\t\tself.mh_dil_stop.enabled = False\r\n\t\t\t\tself.mh_dil_step.enabled = False\r\n\t\t\t\tself.mh_dil_start.value = 0.01\r\n\t\t\t\tself.mh_dil_stop.value = 1\r\n\t\t\t\tself.mh_dil_step.value = 0.1\r\n\r\n\t\tif self.move_to_max.altered:\r\n\t\t\tif self.move_to_max.value is True:\r\n\t\t\t\tself.move_to_max_distance.enabled = True\r\n\t\t\telse:\r\n\t\t\t\tself.move_to_max_distance.enabled = False\r\n\t\t\t\tself.move_to_max_distance.value = 3\r\n\r\n\t\tif self.transform.altered:\r\n\t\t\tif self.transform.value == 'Work directly on the elevation matrix':\r\n\t\t\t\tself.size_of_the_cell.enabled = False\r\n\t\t\telif self.transform.value == 'Perform a local translation':\r\n\t\t\t\tself.size_of_the_cell.enabled = False\r\n\t\t\telif self.transform.value == 'Compute slopes' or self.transform.value == \\\r\n\t\t\t\t\t'Compute slopes and perform local translation':\r\n\t\t\t\tself.size_of_the_cell.enabled = True\r\n\r\n\t\tif self.predefined_pattern.altered:\r\n\t\t\tif self.predefined_pattern.value == 'Custom pattern':\r\n\t\t\t\tself.pattern_workspace.enabled = True\r\n\r\n\t\t\t\tself.mh_iteration.value = False\r\n\t\t\t\tself.mh_iteration.enabled = False\r\n\t\t\t\tself.mh_dil_start.enabled = False\r\n\t\t\t\tself.mh_dil_stop.enabled = False\r\n\t\t\t\tself.mh_dil_step.enabled = False\r\n\t\t\t\tself.mh_dil_start.value = 0.01\r\n\t\t\t\tself.mh_dil_stop.value = 1\r\n\t\t\t\tself.mh_dil_step.value = 0.1\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\tself.mh_dil_val.value = 1\r\n\t\t\telse:\r\n\t\t\t\tself.pattern_workspace.enabled = False",
"def standby(self, state):\r\n def toggle_gui(state):\r\n \"\"\"Toggles GUI components when standby is pressed\"\"\"\r\n self.mainWidget.standbyPushButton.setHidden(state)\r\n self.mainWidget.autoRecordPushButton.setHidden(state)\r\n self.mainWidget.recordPushButton.setVisible(state)\r\n self.mainWidget.recordPushButton.setEnabled(state)\r\n self.mainWidget.pauseToolButton.setVisible(state)\r\n self.mainWidget.eventComboBox.setDisabled(state)\r\n self.mainWidget.roomComboBox.setDisabled(state)\r\n self.mainWidget.dateComboBox.setDisabled(state)\r\n self.mainWidget.talkComboBox.setDisabled(state)\r\n self.mainWidget.audioFeedbackCheckbox.setDisabled(state)\r\n\r\n if (state): # Prepare the pipelines\r\n if self.load_backend():\r\n toggle_gui(True)\r\n self.controller.pause()\r\n self.mainWidget.statusLabel.setText(\"{} {} --- {} \".format(self.freeSpaceString,\r\n get_free_space(self.config.videodir),\r\n self.readyString))\r\n else:\r\n toggle_gui(False)\r\n self.mainWidget.standbyPushButton.setChecked(False)\r\n else:\r\n toggle_gui(False)\r\n self.mainWidget.standbyPushButton.setChecked(False)\r\n\r\n self.mainWidget.playPushButton.setVisible(False)\r\n self.mainWidget.playPushButton.setEnabled(False)",
"def is_on(self) -> bool:\n return self._zone.data[\"mode\"] == \"override\" and self._zone.data[\"setpoint\"]",
"def updateParameters(self):\n\n if self.params[1].value:\n if arcpy.Exists(self.params[1].value):\n try:\n min_value = arcpy.GetRasterProperties_management(self.params[1].value, \"MINIMUM\")[0]\n\n if str(self.params[8].value) != str(self.params[1].value):\n self.params[7].value = True\n self.params[8].value = str(self.params[1].value)\n else:\n self.params[7].value = False\n\n if str(min_value) == \"0\":\n if self.params[7].value == True:\n self.params[2].value = True\n self.params[3].enabled = True\n self.params[7].value = False\n else:\n self.params[2].value = False\n self.params[3].enabled = False\n\n except arcpy.ExecuteError:\n pass\n\n if self.params[2].value == True:\n self.params[3].enabled = True\n else:\n self.params[3].enabled = False",
"def before_sweep(self):\r\n _debug('GUISignalGenerator: before_sweep()')\r\n self.window.sleep(0.05)",
"def turn_on(self) -> None:\n self._monoprice.set_power(self._zone_id, True)",
"def standby(self):\n pass",
"def SetStandbyLPMode(self):\n handler = self.get_command_object(\"SetStandbyLPMode\")\n handler()",
"def update_plotmon_adaptive_cma(self, force_update=False):\n\n if self._live_plot_enabled():\n try:\n if (time.time() - self.time_last_ad_plot_update >\n self.plotting_interval() or force_update):\n ##########################################\n # Main plotmon\n ##########################################\n i = 0\n nr_sweep_funcs = len(self.sweep_function_names)\n\n # best_idx -1 as we count from 0 and best eval\n # counts from 1.\n best_index = int(self.opt_res_dset[-1, -1] - 1)\n\n for j in range(len(self.detector_function.value_names)):\n y_ind = nr_sweep_funcs + j\n\n ##########################################\n # Main plotmon\n ##########################################\n for x_ind in range(nr_sweep_funcs):\n\n x = self.dset[:, x_ind]\n y = self.dset[:, y_ind]\n\n self.curves[i]['config']['x'] = x\n self.curves[i]['config']['y'] = y\n\n best_x = x[best_index]\n best_y = y[best_index]\n self.curves_best_ever[i]['config']['x'] = [best_x]\n self.curves_best_ever[i]['config']['y'] = [best_y]\n mean_x = self.opt_res_dset[:, 2+x_ind]\n # std_x is needed to implement errorbars on X\n # std_x = self.opt_res_dset[:, 2+nr_sweep_funcs+x_ind]\n # to be replaced with an actual mean\n mean_y = self.opt_res_dset[:, 2+2*nr_sweep_funcs]\n mean_y = get_generation_means(\n self.opt_res_dset[:, 1], y)\n # TODO: turn into errorbars\n self.curves_distr_mean[i]['config']['x'] = mean_x\n self.curves_distr_mean[i]['config']['y'] = mean_y\n i += 1\n ##########################################\n # Secondary plotmon\n ##########################################\n # Measured value vs function evaluation\n y = self.dset[:, y_ind]\n x = range(len(y))\n self.iter_traces[j]['config']['x'] = x\n self.iter_traces[j]['config']['y'] = y\n\n # generational means\n gen_idx = self.opt_res_dset[:, 1]\n self.iter_mean_traces[j]['config']['x'] = gen_idx\n self.iter_mean_traces[j]['config']['y'] = mean_y\n\n # This plots the best ever measured value vs iteration\n # number of evals column\n best_evals_idx = (\n self.opt_res_dset[:, -1] - 1).astype(int)\n best_func_val = y[best_evals_idx]\n self.iter_bever_traces[j]['config']['x'] = best_evals_idx\n self.iter_bever_traces[j]['config']['y'] = best_func_val\n\n self.main_QtPlot.update_plot()\n self.secondary_QtPlot.update_plot()\n\n self.time_last_ad_plot_update = time.time()\n\n except Exception as e:\n log.warning(traceback.format_exc())",
"def correct(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[1] - self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()",
"def enable():\n boutonPierre[\"state\"] = \"normal\"\n boutonFeuille[\"state\"] = \"normal\"\n boutonCiseaux[\"state\"] = \"normal\"",
"def has_setpoint_changed(tstat, setpoint_data, zone, building):\n WARNING_MSG = \"WARNING. %s has been manually changed in zone %s. Setpoint is at %s from expected %s. \" \\\n \"Setting override to False and intiatiating program stop.\"\n flag_changed = False\n if tstat.cooling_setpoint != setpoint_data[\"cooling_setpoint\"]:\n flag_changed = True\n print(WARNING_MSG % (\"cooling setpoint\", zone, tstat.cooling_setpoint, setpoint_data[\"cooling_setpoint\"]))\n if tstat.heating_setpoint != setpoint_data[\"heating_setpoint\"]:\n flag_changed = True\n print(WARNING_MSG % (\"heating setpoint\", zone, tstat.heating_setpoint, setpoint_data[\"heating_setpoint\"]))\n\n return flag_changed",
"def _enable_disable_gui(self, state):\r\n self.mainWidget.standbyPushButton.setDisabled(state)\r\n self.mainWidget.eventComboBox.setDisabled(state)\r\n self.mainWidget.roomComboBox.setDisabled(state)\r\n self.mainWidget.dateComboBox.setDisabled(state)\r\n self.mainWidget.talkComboBox.setDisabled(state)\r\n self.mainWidget.audioFeedbackCheckbox.setDisabled(state)",
"def poweron(self):\n raise NotImplementedError()",
"def segmentNeedle(self):\r\n # productive #event\r\n profprint()\r\n if self.fiducialButton.isEnabled():\r\n print \"new checked state: \", not self.fiducialButton.checked\r\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)",
"def _updateState(self):\n\n self.changeColorBtn.setEnabled(self.transformTypeCbx.isChecked() or self.shapeTypeCbx.isChecked())",
"def update(self):\n self.plot.draw()\n \n func=str(self.edit1b.currentText())\n if self.win.test()==0:\n x=np.linspace(0,10,200)\n elif self.win.test()==1:\n x=np.linspace(0,0.40,200)\n \n pattern1=r'Steel'\n pattern2=r'Aluminium'\n pattern3=r'[\\d]+'\n \n if (func!='Comparison Chart'):\n self.edit2b.setDisabled(False)\n self.edit3b.setDisabled(False)\n self.edit4b.setDisabled(False)\n if (func=='Quenched/Tempered Steel'):\n alpha = 0.0025\n elif (func=='Annealed Steel'):\n alpha = 0.01\n elif (func=='Steel (input Su)'):\n S = str(self.edit2b.text())\n if (self.win.test()==0):\n S = str(float(S)/6.895)\n alpha = notch.alpha(eval(S))\n elif (func=='Aluminium Alloy 356.0 as cast'):\n rho = 0.08\n elif (func=='Aluminium Alloy 6061'):\n rho = 0.025\n elif (func=='Aluminium Alloy 7075'):\n rho = 0.015\n elif (func=='Material dropdown'):\n pass\n \n y1=[]\n if re.search(pattern1,func):\n Su=notch.su_s(alpha)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsp(alpha,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsp(alpha,r,self.win.test())))\n elif re.search(pattern2,func):\n Su=notch.su_a(rho)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsn(rho,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsn(rho,r,self.win.test())))\n \n self.edit2b.setText(str(Su))\n func1 = 'Steel (Su='+str(self.edit2b.text())+')'\n if (func!='Steel (input Su)'):\n self.plot.redraw(x,y,func, self.xlabel)\n elif (func=='Steel (input Su)'):\n self.plot.redraw(x,y,func1, self.xlabel)\n \n elif (func=='Comparison Chart'):\n self.edit2b.setText(\"\")\n self.edit2b.setDisabled(True)\n self.edit3b.setText(\"\")\n self.edit3b.setDisabled(True)\n self.edit4b.setText(\"\")\n self.edit4b.setDisabled(True)\n self.plot.draw_comp(self.xlabel, self.win.test())",
"def segmentNeedle(self):\n #productive #event\n profprint()\n if self.fiducialButton.isEnabled():\n print \"new checked state: \",not self.fiducialButton.checked\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)",
"def SetStandbyFPMode(self):\n handler = self.get_command_object(\"SetStandbyFPMode\")\n handler()",
"def check_setpoints(self):\n # TODO: Can possibly put this in the CCBC Brains\n for heater in self.ard_data['heaters'].keys():\n current_temp = float(self.ard_data['tempsensors'][self.ard_data['heaters'][heater]['tsensor_name']]['value'])\n\n # Assign the pin_status the previous value from the previous iteration\n pin_status = self.ard_data['heaters'][heater]['status']\n\n if current_temp > self.ard_data['heaters'][heater]['upper limit']:\n pin_status = 'OFF'\n\n if current_temp < self.ard_data['heaters'][heater]['lower limit']:\n pin_status = 'ON'\n\n if current_temp >= self.ard_data['heaters'][heater]['maxtemp']:\n pin_status = 'OFF'\n\n self.ard_data['heaters'][heater]['status'] = pin_status\n\n for pump in self.ard_data['pumps'].keys():\n pressure = float(self.ard_data['presssensors'][self.ard_data['pumps'][pump]['psensor_name']]['pressure'])\n gallons = float(pressure * self.ard_data['pumps'][pump]['psi_to_gal_slope'] +\n self.ard_data['pumps'][pump]['psi_to_gal_intercept'])\n self.ard_data['pumps'][pump]['gallons'] = gallons\n\n # Assign the pin status the previous value from the previous cycle\n pin_status = self.ard_data['pumps'][pump]['status']\n\n if gallons > self.ard_data['pumps'][pump]['upper limit']:\n # Turn the pump off when the setpoint is above the setpoint\n pin_status = 'OFF'\n # TODO: Account for solenoid valve control when available\n\n if gallons < self.ard_data['pumps'][pump]['lower limit']:\n pin_status = 'ON'\n\n self.ard_data['pumps'][pump]['status'] = pin_status"
]
| [
"0.68056685",
"0.6062927",
"0.5977094",
"0.58374494",
"0.58165467",
"0.5786206",
"0.57856846",
"0.5702661",
"0.56750655",
"0.56670874",
"0.56664824",
"0.5656362",
"0.5651878",
"0.55967134",
"0.5574887",
"0.5557816",
"0.55409354",
"0.5536943",
"0.55261093",
"0.5524713",
"0.55161643",
"0.55151296",
"0.55021846",
"0.5498523",
"0.54577357",
"0.54449713",
"0.54436374",
"0.54392105",
"0.54249877",
"0.5403966"
]
| 0.6440012 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.